diff --git a/Readme.md b/Readme.md new file mode 100644 index 0000000000..e7818c7696 --- /dev/null +++ b/Readme.md @@ -0,0 +1,71 @@ +This repository contains Intel's Linux LTS quilt releases. + +GPG Signed Releases +------------------- + +i) Check if a release tag is GPG-signed or not + +if a tag is not signed, when you run ‘git tag -v ’ command, you get the result as: + +$ git tag -v lts-v4.19.272-android_t-230316T041640Z +object 7150c8b4efa2baf0bef3a3da3850d29715c6fcbb +type commit +tag lts-v4.19.272-android_t-230316T041640Z +tagger sys_oak sys_oak@intel.com 1679296599 -0700 + +release Kernel 4.19 for android T Dessert +error: no signature found + +You can see ‘error: no signature found’ if the tag is not signed + +If the tag is signed - please follow the below steps to get the public key and verify the tag - + +ii) Download public key + +Open https://keys.openpgp.org/, input Full Key ID (i.e., EB4D99E5113E284368955757F18D9D84E60D69E7), or, +short Key ID (i.e., F18D9D84E60D69E7, the Last 16 digitals). or, the tagger email address(i.e., sys_oak@intel.com), +Click ‘Search’, then you can download the pub key file (i.e., EB4D99E5113E284368955757F18D9D84E60D69E7.asc). +The md5sum checksum is 40b0222665a5f6c70ca9d990b4014f43 for the pub key file: +$ md5sum EB4D99E5113E284368955757F18D9D84E60D69E7.asc +40b0222665a5f6c70ca9d990b4014f43 EB4D99E5113E284368955757F18D9D84E60D69E7.asc + +Once your checksum is correct, please do next step. + +iii) Configure your Linux Environment and verify the GPG signature of a tag ( one time setup) + +After you get the right pub key, please import it: +$ gpg --import EB4D99E5113E284368955757F18D9D84E60D69E7.asc + +Now, when you check the tag GPG signature, you can see ‘Good signature’ with a WARNING: +$ git tag -v lts-v4.19.282-android_t-230509T073627Z +object 180df1199944ebd8928f320a1bd16c8a87dba2ed +type commit +tag lts-v4.19.282-android_t-230509T073627Z +tagger sys_oak sys_oak@intel.com 1683864457 -0700 + +release Kernel 4.19 for android T Dessert +gpg: Signature made Fri 12 May 2023 12:07:37 AM EDT +gpg: using RSA key EB4D99E5113E284368955757F18D9D84E60D69E7 +gpg: Good signature from "sys_oak (NSWE) sys_oak@intel.com" [unknown] +gpg: WARNING: This key is not certified with a trusted signature! +gpg: There is no indication that the signature belongs to the owner. +Primary key fingerprint: EB4D 99E5 113E 2843 6895 5757 F18D 9D84 E60D 69E7 + +To deal with the WARNING, let the pub key be trusted, run ‘gpg --edit-key ’ to edit it ( one time setup) +$ gpg --edit-key F18D9D84E60D69E7 +input trust +input 5 +input y +input quit + +Now, when you check the tag GPG signature again , you can see ‘Good signature’ without warnings: +$ git tag -v lts-v4.19.282-android_t-230509T073627Z +object 180df1199944ebd8928f320a1bd16c8a87dba2ed +type commit +tag lts-v4.19.282-android_t-230509T073627Z +tagger sys_oak sys_oak@intel.com 1683864457 -0700 + +release Kernel 4.19 for android T Dessert +gpg: Signature made Fri 12 May 2023 12:07:37 AM EDT +gpg: using RSA key EB4D99E5113E284368955757F18D9D84E60D69E7 +gpg: Good signature from "sys_oak (NSWE) sys_oak@intel.com" [ultimate] diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000000..373608b6a6 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,5 @@ +# Security Policy +Intel is committed to rapidly addressing security vulnerabilities affecting our customers and providing clear guidance on the solution, impact, severity and mitigation. + +## Reporting a Vulnerability +Please report any security vulnerabilities in this project utilizing the guidelines [here](https://www.intel.com/content/www/us/en/security-center/vulnerability-handling-guidelines.html). diff --git a/patches/0001-ASoC-Intel-Skylake-Relocate-ipc-structs-to-appropria.audio b/patches/0001-ASoC-Intel-Skylake-Relocate-ipc-structs-to-appropria.audio new file mode 100644 index 0000000000..af281fbcb4 --- /dev/null +++ b/patches/0001-ASoC-Intel-Skylake-Relocate-ipc-structs-to-appropria.audio @@ -0,0 +1,316 @@ +From d2f37965bb576bf4785dfe965a472d82ec2e7b5e Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Wed, 24 Apr 2019 13:56:21 +0200 +Subject: [PATCH 001/193] ASoC: Intel: Skylake: Relocate ipc structs to + appropriate header + +IPC related structs never belonged to topology domain, these are +strictly part of DSP firmware communication interface. Relocate them. +While at it, update skl_connector_node_id declaration to reflect changes +made in firmware. + +Change-Id: I66e69bc9504f626ce0c3f8367e637e6d3e0bf5b3 +Signed-off-by: Cezary Rojewski +--- + sound/soc/intel/skylake/skl-sst-ipc.h | 117 ++++++++++++++++++++++++ + sound/soc/intel/skylake/skl-topology.h | 118 ------------------------- + 2 files changed, 117 insertions(+), 118 deletions(-) + +diff --git a/sound/soc/intel/skylake/skl-sst-ipc.h b/sound/soc/intel/skylake/skl-sst-ipc.h +index 08ac31778325..f48898fc1436 100644 +--- a/sound/soc/intel/skylake/skl-sst-ipc.h ++++ b/sound/soc/intel/skylake/skl-sst-ipc.h +@@ -11,10 +11,127 @@ + #include + #include "../common/sst-ipc.h" + #include "skl-sst-dsp.h" ++#include + + struct sst_dsp; + struct sst_generic_ipc; + ++union skl_connector_node_id { ++ u32 val; ++ struct { ++ u32 vindex:8; ++ u32 dma_type:5; ++ u32 rsvd:19; ++ } node; ++}; ++ ++enum skl_channel_index { ++ SKL_CHANNEL_LEFT = 0, ++ SKL_CHANNEL_RIGHT = 1, ++ SKL_CHANNEL_CENTER = 2, ++ SKL_CHANNEL_LEFT_SURROUND = 3, ++ SKL_CHANNEL_CENTER_SURROUND = 3, ++ SKL_CHANNEL_RIGHT_SURROUND = 4, ++ SKL_CHANNEL_LFE = 7, ++ SKL_CHANNEL_INVALID = 0xF, ++}; ++ ++enum skl_bitdepth { ++ SKL_DEPTH_8BIT = 8, ++ SKL_DEPTH_16BIT = 16, ++ SKL_DEPTH_24BIT = 24, ++ SKL_DEPTH_32BIT = 32, ++ SKL_DEPTH_INVALID ++}; ++ ++enum skl_s_freq { ++ SKL_FS_8000 = 8000, ++ SKL_FS_11025 = 11025, ++ SKL_FS_12000 = 12000, ++ SKL_FS_16000 = 16000, ++ SKL_FS_22050 = 22050, ++ SKL_FS_24000 = 24000, ++ SKL_FS_32000 = 32000, ++ SKL_FS_44100 = 44100, ++ SKL_FS_48000 = 48000, ++ SKL_FS_64000 = 64000, ++ SKL_FS_88200 = 88200, ++ SKL_FS_96000 = 96000, ++ SKL_FS_128000 = 128000, ++ SKL_FS_176400 = 176400, ++ SKL_FS_192000 = 192000, ++ SKL_FS_INVALID ++}; ++ ++struct skl_audio_data_format { ++ enum skl_s_freq s_freq; ++ enum skl_bitdepth bit_depth; ++ u32 channel_map; ++ enum skl_ch_cfg ch_cfg; ++ enum skl_interleaving interleaving; ++ u8 number_of_channels; ++ u8 valid_bit_depth; ++ u8 sample_type; ++ u8 reserved[1]; ++} __packed; ++ ++struct skl_base_cfg { ++ u32 cpc; ++ u32 ibs; ++ u32 obs; ++ u32 is_pages; ++ struct skl_audio_data_format audio_fmt; ++}; ++ ++struct skl_cpr_gtw_cfg { ++ u32 node_id; ++ u32 dma_buffer_size; ++ u32 config_length; ++ /* not mandatory; required only for DMIC/I2S */ ++ u32 config_data[1]; ++} __packed; ++ ++struct skl_cpr_cfg { ++ struct skl_base_cfg base_cfg; ++ struct skl_audio_data_format out_fmt; ++ u32 cpr_feature_mask; ++ struct skl_cpr_gtw_cfg gtw_cfg; ++} __packed; ++ ++struct skl_cpr_pin_fmt { ++ u32 sink_id; ++ struct skl_audio_data_format src_fmt; ++ struct skl_audio_data_format dst_fmt; ++} __packed; ++ ++struct skl_src_module_cfg { ++ struct skl_base_cfg base_cfg; ++ enum skl_s_freq src_cfg; ++} __packed; ++ ++/* Maximum number of coefficients up down mixer module */ ++#define UP_DOWN_MIXER_MAX_COEFF 8 ++ ++struct skl_up_down_mixer_cfg { ++ struct skl_base_cfg base_cfg; ++ enum skl_ch_cfg out_ch_cfg; ++ /* This should be set to 1 if user coefficients are required */ ++ u32 coeff_sel; ++ /* Pass the user coeff in this array */ ++ s32 coeff[UP_DOWN_MIXER_MAX_COEFF]; ++ u32 ch_map; ++} __packed; ++ ++struct skl_algo_cfg { ++ struct skl_base_cfg base_cfg; ++ char params[0]; ++} __packed; ++ ++struct skl_base_outfmt_cfg { ++ struct skl_base_cfg base_cfg; ++ struct skl_audio_data_format out_fmt; ++} __packed; ++ + enum skl_ipc_pipeline_state { + PPL_INVALID_STATE = 0, + PPL_UNINITIALIZED = 1, +diff --git a/sound/soc/intel/skylake/skl-topology.h b/sound/soc/intel/skylake/skl-topology.h +index e967800dbb62..dbc416c30846 100644 +--- a/sound/soc/intel/skylake/skl-topology.h ++++ b/sound/soc/intel/skylake/skl-topology.h +@@ -16,7 +16,6 @@ + + #include + #include +-#include + #include "skl.h" + + #define BITS_PER_BYTE 8 +@@ -24,9 +23,6 @@ + #define MAX_DMIC_TS_GROUPS 4 + #define MAX_FIXED_DMIC_PARAMS_SIZE 727 + +-/* Maximum number of coefficients up down mixer module */ +-#define UP_DOWN_MIXER_MAX_COEFF 8 +- + #define MODULE_MAX_IN_PINS 8 + #define MODULE_MAX_OUT_PINS 8 + +@@ -42,45 +38,6 @@ + #define SKL_MAX_MODULE_FORMATS 32 + #define SKL_MAX_MODULE_RESOURCES 32 + +-enum skl_channel_index { +- SKL_CHANNEL_LEFT = 0, +- SKL_CHANNEL_RIGHT = 1, +- SKL_CHANNEL_CENTER = 2, +- SKL_CHANNEL_LEFT_SURROUND = 3, +- SKL_CHANNEL_CENTER_SURROUND = 3, +- SKL_CHANNEL_RIGHT_SURROUND = 4, +- SKL_CHANNEL_LFE = 7, +- SKL_CHANNEL_INVALID = 0xF, +-}; +- +-enum skl_bitdepth { +- SKL_DEPTH_8BIT = 8, +- SKL_DEPTH_16BIT = 16, +- SKL_DEPTH_24BIT = 24, +- SKL_DEPTH_32BIT = 32, +- SKL_DEPTH_INVALID +-}; +- +- +-enum skl_s_freq { +- SKL_FS_8000 = 8000, +- SKL_FS_11025 = 11025, +- SKL_FS_12000 = 12000, +- SKL_FS_16000 = 16000, +- SKL_FS_22050 = 22050, +- SKL_FS_24000 = 24000, +- SKL_FS_32000 = 32000, +- SKL_FS_44100 = 44100, +- SKL_FS_48000 = 48000, +- SKL_FS_64000 = 64000, +- SKL_FS_88200 = 88200, +- SKL_FS_96000 = 96000, +- SKL_FS_128000 = 128000, +- SKL_FS_176400 = 176400, +- SKL_FS_192000 = 192000, +- SKL_FS_INVALID +-}; +- + enum skl_widget_type { + SKL_WIDGET_VMIXER = 1, + SKL_WIDGET_MIXER = 2, +@@ -88,78 +45,12 @@ enum skl_widget_type { + SKL_WIDGET_MUX = 4 + }; + +-struct skl_audio_data_format { +- enum skl_s_freq s_freq; +- enum skl_bitdepth bit_depth; +- u32 channel_map; +- enum skl_ch_cfg ch_cfg; +- enum skl_interleaving interleaving; +- u8 number_of_channels; +- u8 valid_bit_depth; +- u8 sample_type; +- u8 reserved[1]; +-} __packed; +- +-struct skl_base_cfg { +- u32 cpc; +- u32 ibs; +- u32 obs; +- u32 is_pages; +- struct skl_audio_data_format audio_fmt; +-}; +- +-struct skl_cpr_gtw_cfg { +- u32 node_id; +- u32 dma_buffer_size; +- u32 config_length; +- /* not mandatory; required only for DMIC/I2S */ +- u32 config_data[1]; +-} __packed; +- + struct skl_dma_control { + u32 node_id; + u32 config_length; + u32 config_data[0]; + } __packed; + +-struct skl_cpr_cfg { +- struct skl_base_cfg base_cfg; +- struct skl_audio_data_format out_fmt; +- u32 cpr_feature_mask; +- struct skl_cpr_gtw_cfg gtw_cfg; +-} __packed; +- +-struct skl_cpr_pin_fmt { +- u32 sink_id; +- struct skl_audio_data_format src_fmt; +- struct skl_audio_data_format dst_fmt; +-} __packed; +- +-struct skl_src_module_cfg { +- struct skl_base_cfg base_cfg; +- enum skl_s_freq src_cfg; +-} __packed; +- +-struct skl_up_down_mixer_cfg { +- struct skl_base_cfg base_cfg; +- enum skl_ch_cfg out_ch_cfg; +- /* This should be set to 1 if user coefficients are required */ +- u32 coeff_sel; +- /* Pass the user coeff in this array */ +- s32 coeff[UP_DOWN_MIXER_MAX_COEFF]; +- u32 ch_map; +-} __packed; +- +-struct skl_algo_cfg { +- struct skl_base_cfg base_cfg; +- char params[0]; +-} __packed; +- +-struct skl_base_outfmt_cfg { +- struct skl_base_cfg base_cfg; +- struct skl_audio_data_format out_fmt; +-} __packed; +- + enum skl_dma_type { + SKL_DMA_HDA_HOST_OUTPUT_CLASS = 0, + SKL_DMA_HDA_HOST_INPUT_CLASS = 1, +@@ -180,15 +71,6 @@ union skl_ssp_dma_node { + } dma_node; + }; + +-union skl_connector_node_id { +- u32 val; +- struct { +- u32 vindex:8; +- u32 dma_type:4; +- u32 rsvd:20; +- } node; +-}; +- + struct skl_module_fmt { + u32 channels; + u32 s_freq; +-- +2.17.1 + diff --git a/patches/0001-EDAC-Add-three-new-memory-types.edac b/patches/0001-EDAC-Add-three-new-memory-types.edac new file mode 100644 index 0000000000..7f6da2f994 --- /dev/null +++ b/patches/0001-EDAC-Add-three-new-memory-types.edac @@ -0,0 +1,83 @@ +From 19c04f8f7fedb5a30602e6b129432faff9e2525b Mon Sep 17 00:00:00 2001 +From: Qiuxu Zhuo +Date: Sat, 27 Oct 2018 23:35:25 +0800 +Subject: [PATCH 1/5] EDAC: Add three new memory types + +There are {Low-Power DDR3/4, WIO2} types of memory. +Add new entries to 'enum mem_type' and new strings to +'edac_mem_types[]' for the new types. + +Signed-off-by: Qiuxu Zhuo +--- + drivers/edac/edac_mc.c | 3 +++ + include/linux/edac.h | 9 +++++++++ + 2 files changed, 12 insertions(+) + +diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c +index e6fd079783bd..49e9ff3018ee 100644 +--- a/drivers/edac/edac_mc.c ++++ b/drivers/edac/edac_mc.c +@@ -211,10 +211,13 @@ const char * const edac_mem_types[] = { + [MEM_DDR3] = "Unbuffered-DDR3", + [MEM_RDDR3] = "Registered-DDR3", + [MEM_LRDDR3] = "Load-Reduced-DDR3-RAM", ++ [MEM_LPDDR3] = "Low-Power-DDR3-RAM", + [MEM_DDR4] = "Unbuffered-DDR4", + [MEM_RDDR4] = "Registered-DDR4", ++ [MEM_LPDDR4] = "Low-Power-DDR4-RAM", + [MEM_LRDDR4] = "Load-Reduced-DDR4-RAM", + [MEM_NVDIMM] = "Non-volatile-RAM", ++ [MEM_WIO2] = "Wide-IO-2", + }; + EXPORT_SYMBOL_GPL(edac_mem_types); + +diff --git a/include/linux/edac.h b/include/linux/edac.h +index c19483b90079..e908376b874a 100644 +--- a/include/linux/edac.h ++++ b/include/linux/edac.h +@@ -183,11 +183,14 @@ static inline char *mc_event_error_type(const unsigned int err_type) + * @MEM_RDDR3: Registered DDR3 RAM + * This is a variant of the DDR3 memories. + * @MEM_LRDDR3: Load-Reduced DDR3 memory. ++ * @MEM_LPDDR3: Low-Power DDR3 memory. + * @MEM_DDR4: Unbuffered DDR4 RAM + * @MEM_RDDR4: Registered DDR4 RAM + * This is a variant of the DDR4 memories. + * @MEM_LRDDR4: Load-Reduced DDR4 memory. ++ * @MEM_LPDDR4: Low-Power DDR4 memory. + * @MEM_NVDIMM: Non-volatile RAM ++ * @MEM_WIO2: Wide I/O 2. + */ + enum mem_type { + MEM_EMPTY = 0, +@@ -208,10 +211,13 @@ enum mem_type { + MEM_DDR3, + MEM_RDDR3, + MEM_LRDDR3, ++ MEM_LPDDR3, + MEM_DDR4, + MEM_RDDR4, + MEM_LRDDR4, ++ MEM_LPDDR4, + MEM_NVDIMM, ++ MEM_WIO2, + }; + + #define MEM_FLAG_EMPTY BIT(MEM_EMPTY) +@@ -231,10 +237,13 @@ enum mem_type { + #define MEM_FLAG_XDR BIT(MEM_XDR) + #define MEM_FLAG_DDR3 BIT(MEM_DDR3) + #define MEM_FLAG_RDDR3 BIT(MEM_RDDR3) ++#define MEM_FLAG_LPDDR3 BIT(MEM_LPDDR3) + #define MEM_FLAG_DDR4 BIT(MEM_DDR4) + #define MEM_FLAG_RDDR4 BIT(MEM_RDDR4) + #define MEM_FLAG_LRDDR4 BIT(MEM_LRDDR4) ++#define MEM_FLAG_LPDDR4 BIT(MEM_LPDDR4) + #define MEM_FLAG_NVDIMM BIT(MEM_NVDIMM) ++#define MEM_FLAG_WIO2 BIT(MEM_WIO2) + + /** + * enum edac-type - Error Detection and Correction capabilities and mode +-- +2.17.1 + diff --git a/patches/0001-PCI-add-pci_devices_ignore-cmdline-option.acrn b/patches/0001-PCI-add-pci_devices_ignore-cmdline-option.acrn new file mode 100644 index 0000000000..7836caaa8c --- /dev/null +++ b/patches/0001-PCI-add-pci_devices_ignore-cmdline-option.acrn @@ -0,0 +1,112 @@ +From 53ef7791cd649b9eb57c298d325ec2a6b66a6493 Mon Sep 17 00:00:00 2001 +From: Yonghua Huang +Date: Fri, 31 Aug 2018 10:58:54 +0800 +Subject: [PATCH 001/150] PCI: add pci_devices_ignore cmdline option + +some PCI devices may be occupied by hypervisor and do not want to +enable in linux guest. +add cmdline option "pci_devices_ignore=(B1:D1:F1),(B2:D2:F2, ...)" +to ignore PCI devices when system doing pci scan. + +Change-Id: I506efef0a9d3a20b207770c744c70a013b10de13 +Tracked-On:218445 +Signed-off-by: Yonghua Huang +Signed-off-by: Jason Chen CJ +Reviewed-on: +Reviewed-by: Chi, Mingqiang +Reviewed-by: Dong, Eddie +Tested-by: Dong, Eddie +--- + drivers/pci/probe.c | 69 +++++++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 69 insertions(+) + +diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c +index 3d5271a7a849..32d7903201e3 100644 +--- a/drivers/pci/probe.c ++++ b/drivers/pci/probe.c +@@ -42,6 +42,70 @@ struct pci_domain_busn_res { + int domain_nr; + }; + ++#define PCI_IGNORE_MAX 8 ++ ++static u16 devices_ignore_table[PCI_IGNORE_MAX]; ++static int devices_ignore_cnt; ++ ++static void parse_ignore_device(char *bdf_str) ++{ ++ int fields; ++ unsigned int bus; ++ unsigned int dev; ++ unsigned int func; ++ ++ if (devices_ignore_cnt >= PCI_IGNORE_MAX - 1) ++ return; ++ ++ fields = sscanf(bdf_str, "%x:%x:%x", &bus, &dev, &func); ++ if (fields != 3) ++ return; ++ ++ devices_ignore_table[devices_ignore_cnt++] = ++ PCI_DEVID(bus, PCI_DEVFN(dev, func)); ++} ++ ++static int __init pci_deivces_ignore(char *str) ++{ ++ int len; ++ char *start, *end; ++ char bdf[16]; ++ ++ devices_ignore_cnt = 0; ++ ++ while ((start = strchr(str, '('))) { ++ ++ end = strchr(start, ')'); ++ if (end == NULL) ++ break; ++ ++ len = end - start - 1; ++ if (len >= 16) /*invalid string*/ ++ break; ++ ++ memcpy((void *)bdf, (void *)(start+1), len); ++ bdf[len] = '\0'; ++ parse_ignore_device(bdf); ++ str = end + 1; ++ } ++ ++ return 1; ++} ++__setup("pci_devices_ignore=", pci_deivces_ignore); ++ ++static bool device_on_ignore_list(int bus, int dev, int func) ++{ ++ int i; ++ ++ for (i = 0; i < devices_ignore_cnt; i++) ++ if ((PCI_BUS_NUM(devices_ignore_table[i]) == bus) && ++ (PCI_SLOT(devices_ignore_table[i]) == dev) && ++ (PCI_FUNC(devices_ignore_table[i]) == func)) ++ return true; ++ ++ return false; ++} ++ + static struct resource *get_pci_domain_busn_res(int domain_nr) + { + struct pci_domain_busn_res *r; +@@ -2442,6 +2506,11 @@ struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn) + return dev; + } + ++ if (device_on_ignore_list(bus->number, ++ PCI_SLOT(devfn), ++ PCI_FUNC(devfn))) ++ return NULL; ++ + dev = pci_scan_device(bus, devfn); + if (!dev) + return NULL; +-- +2.17.1 + diff --git a/patches/0001-drm-i915-psr-Make-PSR-registers-relative-to-transcoder.drm b/patches/0001-drm-i915-psr-Make-PSR-registers-relative-to-transcoder.drm new file mode 100644 index 0000000000..b4ab63bbca --- /dev/null +++ b/patches/0001-drm-i915-psr-Make-PSR-registers-relative-to-transcoder.drm @@ -0,0 +1,513 @@ +From 53b6bd1115b299bf4c82f6c847508170340c51f3 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Jos=C3=A9=20Roberto=20de=20Souza?= +Date: Tue, 20 Aug 2019 15:33:23 -0700 +Subject: [PATCH 001/690] drm/i915/psr: Make PSR registers relative to + transcoders +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +PSR registers are a mess, some have the full address while others just +have the additional offset from psr_mmio_base. + +For BDW+ psr_mmio_base is nothing more than TRANSCODER_EDP_OFFSET + +0x800 and using it makes more difficult for people with an PSR +register address or PSR register name from from BSpec as i915 also +don't match the BSpec names. +For HSW psr_mmio_base is _DDI_BUF_CTL_A + 0x800 and PSR registers are +only available in DDIA. + +Other reason to make relative to transcoder is that since BDW every +transcoder have PSR registers, so in theory it should be possible to +have PSR enabled in a non-eDP transcoder. + +So for BDW+ we can use _TRANS2() to get the register offset of any +PSR register in any transcoder while for HSW we have _HSW_PSR_ADJ +that will calculate the register offset for the single PSR instance, +noting that we are already guarded about trying to enable PSR in other +port than DDIA on HSW by the 'if (dig_port->base.port != PORT_A)' in +intel_psr_compute_config(), this check should only be valid for HSW +and will be changed in future. +PSR2 registers and PSR_EVENT was added after Haswell so that is why +_PSR_ADJ() is not used in some macros. + +The only registers that can not be relative to transcoder are +PSR_IMR and PSR_IIR that are not relative to anything, so keeping it +hardcoded. That changed for TGL but it will be handled in another +patch. + +Also removing BDW_EDP_PSR_BASE from GVT because it is not used as it +is the only PSR register that GVT have. + +v5: +- Macros changed to be more explicit about HSW (Dhinakaran) +- Squashed with the patch that added the tran parameter to the +macros (Dhinakaran) + +v6: +- Checking for interruption errors after module reload in the +transcoder that will be used (Dhinakaran) +- Using lowercase to the registers offsets + +v7: +- Removing IS_HASWELL() from registers macros(Jani) + +Cc: Dhinakaran Pandiyan +Cc: Rodrigo Vivi +Cc: Jani Nikula +Cc: Ville Syrjälä +Cc: Zhi Wang +Reviewed-by: Lucas De Marchi +Signed-off-by: José Roberto de Souza +Signed-off-by: Lucas De Marchi +Link: https://patchwork.freedesktop.org/patch/msgid/20190820223325.27490-1-jose.souza@intel.com +--- + drivers/gpu/drm/i915/display/intel_psr.c | 104 +++++++++++++---------- + drivers/gpu/drm/i915/gvt/handlers.c | 2 +- + drivers/gpu/drm/i915/i915_debugfs.c | 18 ++-- + drivers/gpu/drm/i915/i915_drv.h | 5 +- + drivers/gpu/drm/i915/i915_reg.h | 57 +++++++++---- + 5 files changed, 113 insertions(+), 73 deletions(-) + +diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c +index 3bfb720560c2..77232f6bca17 100644 +--- a/drivers/gpu/drm/i915/display/intel_psr.c ++++ b/drivers/gpu/drm/i915/display/intel_psr.c +@@ -390,7 +390,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp) + + BUILD_BUG_ON(sizeof(aux_msg) > 20); + for (i = 0; i < sizeof(aux_msg); i += 4) +- I915_WRITE(EDP_PSR_AUX_DATA(i >> 2), ++ I915_WRITE(EDP_PSR_AUX_DATA(dev_priv->psr.transcoder, i >> 2), + intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i)); + + aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); +@@ -401,7 +401,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp) + + /* Select only valid bits for SRD_AUX_CTL */ + aux_ctl &= psr_aux_mask; +- I915_WRITE(EDP_PSR_AUX_CTL, aux_ctl); ++ I915_WRITE(EDP_PSR_AUX_CTL(dev_priv->psr.transcoder), aux_ctl); + } + + static void intel_psr_enable_sink(struct intel_dp *intel_dp) +@@ -491,8 +491,9 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp) + if (INTEL_GEN(dev_priv) >= 8) + val |= EDP_PSR_CRC_ENABLE; + +- val |= I915_READ(EDP_PSR_CTL) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK; +- I915_WRITE(EDP_PSR_CTL, val); ++ val |= (I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)) & ++ EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK); ++ I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), val); + } + + static void hsw_activate_psr2(struct intel_dp *intel_dp) +@@ -528,9 +529,9 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) + * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is + * recommending keep this bit unset while PSR2 is enabled. + */ +- I915_WRITE(EDP_PSR_CTL, 0); ++ I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), 0); + +- I915_WRITE(EDP_PSR2_CTL, val); ++ I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val); + } + + static bool intel_psr2_config_valid(struct intel_dp *intel_dp, +@@ -606,10 +607,9 @@ void intel_psr_compute_config(struct intel_dp *intel_dp, + + /* + * HSW spec explicitly says PSR is tied to port A. +- * BDW+ platforms with DDI implementation of PSR have different +- * PSR registers per transcoder and we only implement transcoder EDP +- * ones. Since by Display design transcoder EDP is tied to port A +- * we can safely escape based on the port A. ++ * BDW+ platforms have a instance of PSR registers per transcoder but ++ * for now it only supports one instance of PSR, so lets keep it ++ * hardcoded to PORT_A + */ + if (dig_port->base.port != PORT_A) { + DRM_DEBUG_KMS("PSR condition failed: Port not supported\n"); +@@ -649,8 +649,8 @@ static void intel_psr_activate(struct intel_dp *intel_dp) + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + if (INTEL_GEN(dev_priv) >= 9) +- WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE); +- WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); ++ WARN_ON(I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)) & EDP_PSR2_ENABLE); ++ WARN_ON(I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)) & EDP_PSR_ENABLE); + WARN_ON(dev_priv->psr.active); + lockdep_assert_held(&dev_priv->psr.lock); + +@@ -720,19 +720,37 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, + if (INTEL_GEN(dev_priv) < 11) + mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE; + +- I915_WRITE(EDP_PSR_DEBUG, mask); ++ I915_WRITE(EDP_PSR_DEBUG(dev_priv->psr.transcoder), mask); + } + + static void intel_psr_enable_locked(struct drm_i915_private *dev_priv, + const struct intel_crtc_state *crtc_state) + { + struct intel_dp *intel_dp = dev_priv->psr.dp; ++ u32 val; + + WARN_ON(dev_priv->psr.enabled); + + dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state); + dev_priv->psr.busy_frontbuffer_bits = 0; + dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe; ++ dev_priv->psr.transcoder = crtc_state->cpu_transcoder; ++ ++ /* ++ * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR ++ * will still keep the error set even after the reset done in the ++ * irq_preinstall and irq_uninstall hooks. ++ * And enabling in this situation cause the screen to freeze in the ++ * first time that PSR HW tries to activate so lets keep PSR disabled ++ * to avoid any rendering problems. ++ */ ++ val = I915_READ(EDP_PSR_IIR); ++ val &= EDP_PSR_ERROR(edp_psr_shift(dev_priv->psr.transcoder)); ++ if (val) { ++ dev_priv->psr.sink_not_reliable = true; ++ DRM_DEBUG_KMS("PSR interruption error set, not enabling PSR\n"); ++ return; ++ } + + DRM_DEBUG_KMS("Enabling PSR%s\n", + dev_priv->psr.psr2_enabled ? "2" : "1"); +@@ -782,20 +800,27 @@ static void intel_psr_exit(struct drm_i915_private *dev_priv) + u32 val; + + if (!dev_priv->psr.active) { +- if (INTEL_GEN(dev_priv) >= 9) +- WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE); +- WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); ++ if (INTEL_GEN(dev_priv) >= 9) { ++ val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)); ++ WARN_ON(val & EDP_PSR2_ENABLE); ++ } ++ ++ val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)); ++ WARN_ON(val & EDP_PSR_ENABLE); ++ + return; + } + + if (dev_priv->psr.psr2_enabled) { +- val = I915_READ(EDP_PSR2_CTL); ++ val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)); + WARN_ON(!(val & EDP_PSR2_ENABLE)); +- I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE); ++ val &= ~EDP_PSR2_ENABLE; ++ I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val); + } else { +- val = I915_READ(EDP_PSR_CTL); ++ val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)); + WARN_ON(!(val & EDP_PSR_ENABLE)); +- I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE); ++ val &= ~EDP_PSR_ENABLE; ++ I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), val); + } + dev_priv->psr.active = false; + } +@@ -817,10 +842,10 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) + intel_psr_exit(dev_priv); + + if (dev_priv->psr.psr2_enabled) { +- psr_status = EDP_PSR2_STATUS; ++ psr_status = EDP_PSR2_STATUS(dev_priv->psr.transcoder); + psr_status_mask = EDP_PSR2_STATUS_STATE_MASK; + } else { +- psr_status = EDP_PSR_STATUS; ++ psr_status = EDP_PSR_STATUS(dev_priv->psr.transcoder); + psr_status_mask = EDP_PSR_STATUS_STATE_MASK; + } + +@@ -963,7 +988,8 @@ int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state, + * defensive enough to cover everything. + */ + +- return __intel_wait_for_register(&dev_priv->uncore, EDP_PSR_STATUS, ++ return __intel_wait_for_register(&dev_priv->uncore, ++ EDP_PSR_STATUS(dev_priv->psr.transcoder), + EDP_PSR_STATUS_STATE_MASK, + EDP_PSR_STATUS_STATE_IDLE, 2, 50, + out_value); +@@ -979,10 +1005,10 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv) + return false; + + if (dev_priv->psr.psr2_enabled) { +- reg = EDP_PSR2_STATUS; ++ reg = EDP_PSR2_STATUS(dev_priv->psr.transcoder); + mask = EDP_PSR2_STATUS_STATE_MASK; + } else { +- reg = EDP_PSR_STATUS; ++ reg = EDP_PSR_STATUS(dev_priv->psr.transcoder); + mask = EDP_PSR_STATUS_STATE_MASK; + } + +@@ -1208,36 +1234,24 @@ void intel_psr_flush(struct drm_i915_private *dev_priv, + */ + void intel_psr_init(struct drm_i915_private *dev_priv) + { +- u32 val; +- + if (!HAS_PSR(dev_priv)) + return; + +- dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ? +- HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE; +- + if (!dev_priv->psr.sink_support) + return; + ++ if (IS_HASWELL(dev_priv)) ++ /* ++ * HSW don't have PSR registers on the same space as transcoder ++ * so set this to a value that when subtract to the register ++ * in transcoder space results in the right offset for HSW ++ */ ++ dev_priv->hsw_psr_mmio_adjust = _SRD_CTL_EDP - _HSW_EDP_PSR_BASE; ++ + if (i915_modparams.enable_psr == -1) + if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable) + i915_modparams.enable_psr = 0; + +- /* +- * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR +- * will still keep the error set even after the reset done in the +- * irq_preinstall and irq_uninstall hooks. +- * And enabling in this situation cause the screen to freeze in the +- * first time that PSR HW tries to activate so lets keep PSR disabled +- * to avoid any rendering problems. +- */ +- val = I915_READ(EDP_PSR_IIR); +- val &= EDP_PSR_ERROR(edp_psr_shift(TRANSCODER_EDP)); +- if (val) { +- DRM_DEBUG_KMS("PSR interruption error set\n"); +- dev_priv->psr.sink_not_reliable = true; +- } +- + /* Set link_standby x link_off defaults */ + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) + /* HSW and BDW require workarounds that we don't implement. */ +diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c +index 25f78196b964..45a9124e53b6 100644 +--- a/drivers/gpu/drm/i915/gvt/handlers.c ++++ b/drivers/gpu/drm/i915/gvt/handlers.c +@@ -2796,7 +2796,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) + MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW_PLUS); + + MMIO_D(WM_MISC, D_BDW); +- MMIO_D(_MMIO(BDW_EDP_PSR_BASE), D_BDW); ++ MMIO_D(_MMIO(_SRD_CTL_EDP), D_BDW); + + MMIO_D(_MMIO(0x6671c), D_BDW_PLUS); + MMIO_D(_MMIO(0x66c00), D_BDW_PLUS); +diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c +index b0f51591f2e4..e103fcba6435 100644 +--- a/drivers/gpu/drm/i915/i915_debugfs.c ++++ b/drivers/gpu/drm/i915/i915_debugfs.c +@@ -2133,7 +2133,7 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m) + "BUF_ON", + "TG_ON" + }; +- val = I915_READ(EDP_PSR2_STATUS); ++ val = I915_READ(EDP_PSR2_STATUS(dev_priv->psr.transcoder)); + status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >> + EDP_PSR2_STATUS_STATE_SHIFT; + if (status_val < ARRAY_SIZE(live_status)) +@@ -2149,7 +2149,7 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m) + "SRDOFFACK", + "SRDENT_ON", + }; +- val = I915_READ(EDP_PSR_STATUS); ++ val = I915_READ(EDP_PSR_STATUS(dev_priv->psr.transcoder)); + status_val = (val & EDP_PSR_STATUS_STATE_MASK) >> + EDP_PSR_STATUS_STATE_SHIFT; + if (status_val < ARRAY_SIZE(live_status)) +@@ -2192,10 +2192,10 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) + goto unlock; + + if (psr->psr2_enabled) { +- val = I915_READ(EDP_PSR2_CTL); ++ val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)); + enabled = val & EDP_PSR2_ENABLE; + } else { +- val = I915_READ(EDP_PSR_CTL); ++ val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)); + enabled = val & EDP_PSR_ENABLE; + } + seq_printf(m, "Source PSR ctl: %s [0x%08x]\n", +@@ -2208,7 +2208,8 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) + * SKL+ Perf counter is reset to 0 everytime DC state is entered + */ + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { +- val = I915_READ(EDP_PSR_PERF_CNT) & EDP_PSR_PERF_CNT_MASK; ++ val = I915_READ(EDP_PSR_PERF_CNT(dev_priv->psr.transcoder)); ++ val &= EDP_PSR_PERF_CNT_MASK; + seq_printf(m, "Performance counter: %u\n", val); + } + +@@ -2226,8 +2227,11 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) + * Reading all 3 registers before hand to minimize crossing a + * frame boundary between register reads + */ +- for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) +- su_frames_val[frame / 3] = I915_READ(PSR2_SU_STATUS(frame)); ++ for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) { ++ val = I915_READ(PSR2_SU_STATUS(dev_priv->psr.transcoder, ++ frame)); ++ su_frames_val[frame / 3] = val; ++ } + + seq_puts(m, "Frame:\tPSR2 SU blocks:\n"); + +diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h +index 772154e4073e..d9fadc38fcfa 100644 +--- a/drivers/gpu/drm/i915/i915_drv.h ++++ b/drivers/gpu/drm/i915/i915_drv.h +@@ -479,6 +479,7 @@ struct i915_psr { + bool enabled; + struct intel_dp *dp; + enum pipe pipe; ++ enum transcoder transcoder; + bool active; + struct work_struct work; + unsigned busy_frontbuffer_bits; +@@ -1331,11 +1332,11 @@ struct drm_i915_private { + */ + u32 gpio_mmio_base; + ++ u32 hsw_psr_mmio_adjust; ++ + /* MMIO base address for MIPI regs */ + u32 mipi_mmio_base; + +- u32 psr_mmio_base; +- + u32 pps_mmio_base; + + wait_queue_head_t gmbus_wait_queue; +diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h +index 2abd199093c5..a092b34c269d 100644 +--- a/drivers/gpu/drm/i915/i915_reg.h ++++ b/drivers/gpu/drm/i915/i915_reg.h +@@ -4186,10 +4186,17 @@ enum { + #define PIPESRC(trans) _MMIO_TRANS2(trans, _PIPEASRC) + #define PIPE_MULT(trans) _MMIO_TRANS2(trans, _PIPE_MULT_A) + +-/* HSW+ eDP PSR registers */ +-#define HSW_EDP_PSR_BASE 0x64800 +-#define BDW_EDP_PSR_BASE 0x6f800 +-#define EDP_PSR_CTL _MMIO(dev_priv->psr_mmio_base + 0) ++/* ++ * HSW+ eDP PSR registers ++ * ++ * HSW PSR registers are relative to DDIA(_DDI_BUF_CTL_A + 0x800) with just one ++ * instance of it ++ */ ++#define _HSW_EDP_PSR_BASE 0x64800 ++#define _SRD_CTL_A 0x60800 ++#define _SRD_CTL_EDP 0x6f800 ++#define _PSR_ADJ(tran, reg) (_TRANS2(tran, reg) - dev_priv->hsw_psr_mmio_adjust) ++#define EDP_PSR_CTL(tran) _MMIO(_PSR_ADJ(tran, _SRD_CTL_A)) + #define EDP_PSR_ENABLE (1 << 31) + #define BDW_PSR_SINGLE_FRAME (1 << 30) + #define EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK (1 << 29) /* SW can't modify */ +@@ -4226,16 +4233,22 @@ enum { + #define EDP_PSR_TRANSCODER_A_SHIFT 8 + #define EDP_PSR_TRANSCODER_EDP_SHIFT 0 + +-#define EDP_PSR_AUX_CTL _MMIO(dev_priv->psr_mmio_base + 0x10) ++#define _SRD_AUX_CTL_A 0x60810 ++#define _SRD_AUX_CTL_EDP 0x6f810 ++#define EDP_PSR_AUX_CTL(tran) _MMIO(_PSR_ADJ(tran, _SRD_AUX_CTL_A)) + #define EDP_PSR_AUX_CTL_TIME_OUT_MASK (3 << 26) + #define EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK (0x1f << 20) + #define EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK (0xf << 16) + #define EDP_PSR_AUX_CTL_ERROR_INTERRUPT (1 << 11) + #define EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK (0x7ff) + +-#define EDP_PSR_AUX_DATA(i) _MMIO(dev_priv->psr_mmio_base + 0x14 + (i) * 4) /* 5 registers */ ++#define _SRD_AUX_DATA_A 0x60814 ++#define _SRD_AUX_DATA_EDP 0x6f814 ++#define EDP_PSR_AUX_DATA(tran, i) _MMIO(_PSR_ADJ(tran, _SRD_AUX_DATA_A) + (i) + 4) /* 5 registers */ + +-#define EDP_PSR_STATUS _MMIO(dev_priv->psr_mmio_base + 0x40) ++#define _SRD_STATUS_A 0x60840 ++#define _SRD_STATUS_EDP 0x6f840 ++#define EDP_PSR_STATUS(tran) _MMIO(_PSR_ADJ(tran, _SRD_STATUS_A)) + #define EDP_PSR_STATUS_STATE_MASK (7 << 29) + #define EDP_PSR_STATUS_STATE_SHIFT 29 + #define EDP_PSR_STATUS_STATE_IDLE (0 << 29) +@@ -4260,10 +4273,15 @@ enum { + #define EDP_PSR_STATUS_SENDING_TP1 (1 << 4) + #define EDP_PSR_STATUS_IDLE_MASK 0xf + +-#define EDP_PSR_PERF_CNT _MMIO(dev_priv->psr_mmio_base + 0x44) ++#define _SRD_PERF_CNT_A 0x60844 ++#define _SRD_PERF_CNT_EDP 0x6f844 ++#define EDP_PSR_PERF_CNT(tran) _MMIO(_PSR_ADJ(tran, _SRD_PERF_CNT_A)) + #define EDP_PSR_PERF_CNT_MASK 0xffffff + +-#define EDP_PSR_DEBUG _MMIO(dev_priv->psr_mmio_base + 0x60) /* PSR_MASK on SKL+ */ ++/* PSR_MASK on SKL+ */ ++#define _SRD_DEBUG_A 0x60860 ++#define _SRD_DEBUG_EDP 0x6f860 ++#define EDP_PSR_DEBUG(tran) _MMIO(_PSR_ADJ(tran, _SRD_DEBUG_A)) + #define EDP_PSR_DEBUG_MASK_MAX_SLEEP (1 << 28) + #define EDP_PSR_DEBUG_MASK_LPSP (1 << 27) + #define EDP_PSR_DEBUG_MASK_MEMUP (1 << 26) +@@ -4271,7 +4289,9 @@ enum { + #define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1 << 16) /* Reserved in ICL+ */ + #define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1 << 15) /* SKL+ */ + +-#define EDP_PSR2_CTL _MMIO(0x6f900) ++#define _PSR2_CTL_A 0x60900 ++#define _PSR2_CTL_EDP 0x6f900 ++#define EDP_PSR2_CTL(tran) _MMIO_TRANS2(tran, _PSR2_CTL_A) + #define EDP_PSR2_ENABLE (1 << 31) + #define EDP_SU_TRACK_ENABLE (1 << 30) + #define EDP_Y_COORDINATE_VALID (1 << 26) /* GLK and CNL+ */ +@@ -4293,8 +4313,8 @@ enum { + #define _PSR_EVENT_TRANS_B 0x61848 + #define _PSR_EVENT_TRANS_C 0x62848 + #define _PSR_EVENT_TRANS_D 0x63848 +-#define _PSR_EVENT_TRANS_EDP 0x6F848 +-#define PSR_EVENT(trans) _MMIO_TRANS2(trans, _PSR_EVENT_TRANS_A) ++#define _PSR_EVENT_TRANS_EDP 0x6f848 ++#define PSR_EVENT(tran) _MMIO_TRANS2(tran, _PSR_EVENT_TRANS_A) + #define PSR_EVENT_PSR2_WD_TIMER_EXPIRE (1 << 17) + #define PSR_EVENT_PSR2_DISABLED (1 << 16) + #define PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN (1 << 15) +@@ -4312,15 +4332,16 @@ enum { + #define PSR_EVENT_LPSP_MODE_EXIT (1 << 1) + #define PSR_EVENT_PSR_DISABLE (1 << 0) + +-#define EDP_PSR2_STATUS _MMIO(0x6f940) ++#define _PSR2_STATUS_A 0x60940 ++#define _PSR2_STATUS_EDP 0x6f940 ++#define EDP_PSR2_STATUS(tran) _MMIO_TRANS2(tran, _PSR2_STATUS_A) + #define EDP_PSR2_STATUS_STATE_MASK (0xf << 28) + #define EDP_PSR2_STATUS_STATE_SHIFT 28 + +-#define _PSR2_SU_STATUS_0 0x6F914 +-#define _PSR2_SU_STATUS_1 0x6F918 +-#define _PSR2_SU_STATUS_2 0x6F91C +-#define _PSR2_SU_STATUS(index) _MMIO(_PICK_EVEN((index), _PSR2_SU_STATUS_0, _PSR2_SU_STATUS_1)) +-#define PSR2_SU_STATUS(frame) (_PSR2_SU_STATUS((frame) / 3)) ++#define _PSR2_SU_STATUS_A 0x60914 ++#define _PSR2_SU_STATUS_EDP 0x6f914 ++#define _PSR2_SU_STATUS(tran, index) _MMIO(_TRANS2(tran, _PSR2_SU_STATUS_A) + (index) * 4) ++#define PSR2_SU_STATUS(tran, frame) (_PSR2_SU_STATUS(tran, (frame) / 3)) + #define PSR2_SU_STATUS_SHIFT(frame) (((frame) % 3) * 10) + #define PSR2_SU_STATUS_MASK(frame) (0x3ff << PSR2_SU_STATUS_SHIFT(frame)) + #define PSR2_SU_STATUS_FRAMES 8 +-- +2.17.1 + diff --git a/patches/0001-fTPM-add-OPTEE-fTPM-driver.security b/patches/0001-fTPM-add-OPTEE-fTPM-driver.security new file mode 100644 index 0000000000..0340b10ec7 --- /dev/null +++ b/patches/0001-fTPM-add-OPTEE-fTPM-driver.security @@ -0,0 +1,567 @@ +From e70e9e2b2dea7ec43f0551021c89a14d18e7d7cc Mon Sep 17 00:00:00 2001 +From: Vincent Cao +Date: Wed, 28 Nov 2018 16:30:52 -0800 +Subject: [PATCH 01/65] fTPM: add OPTEE fTPM driver + +This driver provides support to communicate to a fTPM Trusted Application (TA) +running within a ARM TEE, in this case OP-TEE. It uses the tee framework +for SMC trigger into the TEE OS and provides TCG CRB like interfaces to communicate +to the fTPM TA running with the TEE. + +will require TCG_FTPM_OPTEE to be enabled and .compatible devicetree node "tcg,fwtpm" +to be set for initialize the driver. + +Change-Id: Ic5ada2a5b629dc48ff31061603f600ccd0ab17c9 +Signed-off-by: Vincent Cao +Signed-off-by: Igor Opaniuk +--- + README.kmb_optee_ftpm | 15 ++ + drivers/char/tpm/Kconfig | 8 + + drivers/char/tpm/Makefile | 1 + + drivers/char/tpm/tpm_ftpm_optee.c | 390 ++++++++++++++++++++++++++++++ + drivers/char/tpm/tpm_ftpm_optee.h | 71 ++++++ + drivers/tee/tee_core.c | 1 + + 6 files changed, 486 insertions(+) + create mode 100644 README.kmb_optee_ftpm + create mode 100644 drivers/char/tpm/tpm_ftpm_optee.c + create mode 100644 drivers/char/tpm/tpm_ftpm_optee.h + +diff --git a/README.kmb_optee_ftpm b/README.kmb_optee_ftpm +new file mode 100644 +index 000000000000..75fe989592a9 +--- /dev/null ++++ b/README.kmb_optee_ftpm +@@ -0,0 +1,15 @@ ++IP Block: OPTEE core driver ++Platform Affect: (ARM64) IOTG KeemBay ++BugFix: None ++ ++ ++This branch pull implements a driver provides support to communicate to a fTPM Trusted ++Application (TA) running within a ARM TEE, in this case OP-TEE. It uses the tee framework ++for SMC trigger into the TEE OS and provides TCG CRB like interfaces to communicate ++to the fTPM TA running with the TEE. ++ ++The follow defconfig must be enabled for driver to compile ++ONFIG_TCG_FTPM_OPTEE ++CONFIG_TCG_TPM ++ ++compatible devicetree node "tcg,fwtpm" to be set for initialize the driver. +diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig +index 9c37047f4b56..e9a9f2944401 100644 +--- a/drivers/char/tpm/Kconfig ++++ b/drivers/char/tpm/Kconfig +@@ -155,6 +155,14 @@ config TCG_CRB + from within Linux. To compile this driver as a module, choose + M here; the module will be called tpm_crb. + ++config TCG_FTPM_OPTEE ++ tristate "TPM 2.0 FTPM OP-TEE Interface" ++ depends on OPTEE ++ ---help--- ++ This driver proxies for a firmware TPM 2.0 implementation, which ++ leverages ARM Trustzone and OP-TEE Trusted Execution Environment. ++ To compile this driver as a module, choose M here; ++ + config TCG_VTPM_PROXY + tristate "VTPM Proxy Interface" + depends on TCG_TPM +diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile +index c354cdff9c62..316a5f538bf6 100644 +--- a/drivers/char/tpm/Makefile ++++ b/drivers/char/tpm/Makefile +@@ -32,5 +32,6 @@ obj-$(CONFIG_TCG_IBMVTPM) += tpm_ibmvtpm.o + obj-$(CONFIG_TCG_TIS_ST33ZP24) += st33zp24/ + obj-$(CONFIG_TCG_XEN) += xen-tpmfront.o + obj-$(CONFIG_TCG_CRB) += tpm_crb.o ++obj-$(CONFIG_TCG_FTPM_OPTEE) += tpm_ftpm_optee.o + obj-$(CONFIG_TCG_VTPM_PROXY) += tpm_vtpm_proxy.o + obj-$(CONFIG_TCG_FTPM_TEE) += tpm_ftpm_tee.o +diff --git a/drivers/char/tpm/tpm_ftpm_optee.c b/drivers/char/tpm/tpm_ftpm_optee.c +new file mode 100644 +index 000000000000..f724722e44d1 +--- /dev/null ++++ b/drivers/char/tpm/tpm_ftpm_optee.c +@@ -0,0 +1,390 @@ ++/* ++ * Copyright (C) 2018 Intel Corporation ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "tpm.h" ++#include "tpm_ftpm_optee.h" ++ ++static struct platform_device *fwtpm_pdev; ++ ++static int fwtpm_optee_match(struct tee_ioctl_version_data *data, ++ const void *vers) ++{ ++ return 1; ++} ++ ++static int fwtpm_optee_init_session(struct fwtpm_device_priv *priv) ++{ ++ struct tee_ioctl_open_session_arg sess_arg; ++ struct tee_ioctl_version_data vers = { ++ .impl_id = TEE_OPTEE_CAP_TZ, ++ .impl_caps = TEE_IMPL_ID_OPTEE, ++ .gen_caps = TEE_GEN_CAP_GP, ++ }; ++ struct teec_uuid uuid = TA_FTPM_UUID; ++ int rc = 0; ++ ++ if (priv->status & FTPM_SESSION_INITED) ++ return rc; ++ priv->ctx = tee_client_open_context(NULL, fwtpm_optee_match, ++ NULL, &vers); ++ if (IS_ERR(priv->ctx)) ++ return -EINVAL; ++ ++ memset(&sess_arg, 0, sizeof(sess_arg)); ++ memcpy(&sess_arg.uuid, &uuid, sizeof(uuid)); ++ sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC; ++ ++ rc = tee_client_open_session(priv->ctx, &sess_arg, NULL); ++ if (!rc && sess_arg.ret) ++ return -EIO; ++ if (rc) { ++ tee_client_close_context(priv->ctx); ++ return rc; ++ } ++ priv->sess = sess_arg.session; ++ ++ priv->cmd_buf_shm = tee_shm_alloc(priv->ctx, TPM_BUFSIZE, ++ TEE_SHM_MAPPED | TEE_SHM_DMA_BUF); ++ if (!priv->cmd_buf_shm) { ++ rc = -ENOMEM; ++ goto err_close_sess; ++ } ++ ++ priv->resp_buf_shm = tee_shm_alloc(priv->ctx, TPM_BUFSIZE, ++ TEE_SHM_MAPPED | TEE_SHM_DMA_BUF); ++ if (!priv->resp_buf_shm) { ++ rc = -ENOMEM; ++ goto err_shm_free; ++ } ++ ++ priv->cmd_buf = tee_shm_get_va(priv->cmd_buf_shm, 0); ++ if (IS_ERR(priv->cmd_buf)) { ++ rc = -ERESTART; ++ goto err_shm_free2; ++ } ++ ++ priv->resp_buf = tee_shm_get_va(priv->resp_buf_shm, 0); ++ if (IS_ERR(priv->resp_buf)) { ++ rc = -ERESTART; ++ goto err_shm_free2; ++ } ++ ++ priv->status |= FTPM_SESSION_INITED; ++ return rc; ++ ++err_shm_free2: ++ tee_shm_free(priv->resp_buf_shm); ++ ++err_shm_free: ++ tee_shm_free(priv->cmd_buf_shm); ++ ++err_close_sess: ++ tee_client_close_session(priv->ctx, priv->sess); ++ tee_client_close_context(priv->ctx); ++ ++ return rc; ++} ++ ++static int fwtpm_optee_cmd(struct fwtpm_device_priv *priv, ++ enum fwtpm_optee_cmd cmd, ++ u8 *buf, ++ size_t count) ++{ ++ struct tee_ioctl_invoke_arg arg; ++ struct tee_param param[2]; ++ struct tpm_header *out_header; ++ int rc = 0; ++ __be32 len; ++ ++ if (!(priv->status & FTPM_SESSION_INITED)) ++ return -EIO; ++ ++ priv->cmd_len = count; ++ memcpy(priv->cmd_buf, buf, count); ++ ++ memset(&arg, 0, sizeof(arg)); ++ arg.func = cmd; ++ arg.session = priv->sess; ++ arg.num_params = 2; ++ ++ /* ++ * change cancellation id for each new operation, ++ * it should be a unique value to identify this request ++ */ ++ arg.cancel_id = ++(priv->cancel_id); ++ priv->is_canceled = false; ++ ++ memset(param, 0, sizeof(param)); ++ ++ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT; ++ param[0].u.memref.shm = priv->cmd_buf_shm; ++ param[0].u.memref.size = TPM_BUFSIZE; ++ param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT; ++ param[1].u.memref.shm = priv->resp_buf_shm; ++ param[1].u.memref.size = TPM_BUFSIZE; ++ ++ rc = tee_client_invoke_func(priv->ctx, &arg, param); ++ ++ if (!rc && arg.ret) ++ return -EIO; ++ ++ out_header = (struct tpm_header *)priv->resp_buf; ++ len = out_header->length; ++ ++ priv->resp_len = be32_to_cpu(len); ++ ++ return 0; ++} ++ ++static int fwtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) ++{ ++ struct fwtpm_device_priv *priv = dev_get_drvdata(&chip->dev); ++ int rc = 0; ++ size_t len; ++ ++ /* sanity check */ ++ if (count < 6) ++ return -EIO; ++ ++ len = priv->resp_len; ++ if (count < len) { ++ dev_err(&chip->dev, ++ "Invalid size in recv: count=%zd, resp_len=%zd\n", ++ count, len); ++ rc = -EIO; ++ goto out; ++ } ++ ++ memcpy(buf, priv->resp_buf, len); ++ priv->resp_len = 0; ++ ++out: ++ return rc ? rc : len; ++} ++ ++static int fwtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) ++{ ++ struct fwtpm_device_priv *priv = dev_get_drvdata(&chip->dev); ++ int rc = 0; ++ ++ if (!(priv->status & FTPM_SESSION_INITED)) { ++ rc = fwtpm_optee_init_session(priv); ++ if (rc) { ++ dev_err(&chip->dev, ++ "Can't init session to swTPM TA\n"); ++ return -EIO; ++ } ++ } ++ ++ if (fwtpm_optee_cmd(priv, FTPM_HANDLE_CMD_SUBMIT, buf, count)) { ++ dev_err(&chip->dev, ++ "Sending command to swTPM TA failed\n"); ++ return -EIO; ++ } ++ ++ return rc; ++} ++ ++static void fwtpm_cancel(struct tpm_chip *chip) ++{ ++ struct fwtpm_device_priv *priv = dev_get_drvdata(&chip->dev); ++ struct tee_ioctl_cancel_arg arg; ++ ++ if (!(priv->status & FTPM_SESSION_INITED)) { ++ dev_err(&chip->dev, ++ "Session is not inited, nothing to cancel\n"); ++ return; ++ } ++ ++ arg.cancel_id = priv->cancel_id; ++ arg.session = priv->sess; ++ ++ if (tee_client_cancel_req(priv->ctx, &arg)) { ++ dev_err(&chip->dev, ++ "Cancel request failed\n"); ++ return; ++ } ++ ++ priv->is_canceled = true; ++} ++ ++static u8 fwtpm_status(struct tpm_chip *chip) ++{ ++ return BIT(0); ++} ++ ++static bool fwtpm_req_canceled(struct tpm_chip *chip, u8 status) ++{ ++ struct fwtpm_device_priv *priv = dev_get_drvdata(&chip->dev); ++ ++ return priv->is_canceled; ++} ++ ++static int fwtpm_suspend(struct device *dev) ++{ ++ struct tpm_chip *chip = dev_get_drvdata(dev); ++ struct fwtpm_device_priv *priv = dev_get_drvdata(&chip->dev); ++ struct fwtpm_pm_data pm_data; ++ int rc = 0; ++ ++ if (!(priv->status & FTPM_SESSION_INITED)) { ++ rc = fwtpm_optee_init_session(priv); ++ if (rc) { ++ dev_err(&chip->dev, ++ "Can't init session to swTPM TA\n"); ++ return -EIO; ++ } ++ } ++ ++ pm_data.pm_event = FTPM_PM_SUSPEND; ++ ++ if (fwtpm_optee_cmd(priv, FTPM_HANDLE_PM, ++ (u8 *)&pm_data, sizeof(pm_data))) { ++ dev_err(&chip->dev, ++ "Sending command to swTPM TA failed\n"); ++ return -EIO; ++ } ++ ++ return rc; ++} ++ ++static int fwtpm_resume(struct device *dev) ++{ ++ struct tpm_chip *chip = dev_get_drvdata(dev); ++ struct fwtpm_device_priv *priv = dev_get_drvdata(&chip->dev); ++ struct fwtpm_pm_data pm_data; ++ int rc = 0; ++ ++ if (!(priv->status & FTPM_SESSION_INITED)) { ++ rc = fwtpm_optee_init_session(priv); ++ if (rc) { ++ dev_err(&chip->dev, ++ "Can't init session to swTPM TA\n"); ++ return -EIO; ++ } ++ } ++ ++ pm_data.pm_event = FTPM_PM_RESUME; ++ ++ if (fwtpm_optee_cmd(priv, FTPM_HANDLE_PM, ++ (u8 *)&pm_data, sizeof(pm_data))) { ++ dev_err(&chip->dev, ++ "Sending command to swTPM TA failed\n"); ++ return -EIO; ++ } ++ ++ return rc; ++} ++ ++static const struct tpm_class_ops fwtpm_ops = { ++ .status = fwtpm_status, ++ .recv = fwtpm_recv, ++ .send = fwtpm_send, ++ .cancel = fwtpm_cancel, ++ .req_canceled = fwtpm_req_canceled, ++}; ++ ++static const struct dev_pm_ops fwtpm_pm_ops = { ++ .suspend = fwtpm_suspend, ++ .resume = fwtpm_resume, ++}; ++ ++static SIMPLE_DEV_PM_OPS(i2c_atmel_pm_ops, tpm_pm_suspend, tpm_pm_resume); ++ ++static int fwtpm_probe(struct platform_device *pdev) ++{ ++ struct tpm_chip *chip; ++ struct fwtpm_device_priv *priv; ++ int rc = 0; ++ ++ fwtpm_pdev = platform_device_register_simple("optee_fwtpm", ++ -1, NULL, 0); ++ if (IS_ERR(fwtpm_pdev)) { ++ rc = PTR_ERR(fwtpm_pdev); ++ return rc; ++ } ++ ++ priv = devm_kzalloc(&fwtpm_pdev->dev, sizeof(struct fwtpm_device_priv), ++ GFP_KERNEL); ++ if (!priv) { ++ rc = -ENOMEM; ++ goto err_unreg_dev; ++ } ++ ++ chip = tpmm_chip_alloc(&fwtpm_pdev->dev, &fwtpm_ops); ++ if (IS_ERR(chip)) { ++ rc = PTR_ERR(chip); ++ goto err_unreg_dev; ++ } ++ ++ dev_set_drvdata(&chip->dev, priv); ++ ++ ++ rc = tpm_chip_register(chip); ++ if (rc) ++ goto err_unreg_dev; ++ ++ return rc; ++ ++err_unreg_dev: ++ platform_device_unregister(pdev); ++ ++ return rc; ++} ++ ++static const struct of_device_id fwtpm_of_match[] = { ++ { .compatible = "tcg,fwtpm", }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, fwtpm_of_match); ++ ++static struct platform_driver fwtpm_pdrv = { ++ .probe = fwtpm_probe, ++ .driver = { ++ .name = "optee_fwtpm;", ++ .of_match_table = fwtpm_of_match, ++ .pm = &fwtpm_pm_ops, ++ }, ++}; ++ ++static int __init fwtpm_driver_init(void) ++{ ++ int rc = 0; ++ ++ rc = platform_driver_register(&fwtpm_pdrv); ++ if (rc) ++ return rc; ++ ++ return rc; ++} ++ ++static void __exit fwtpm_driver_exit(void) ++{ ++ platform_driver_unregister(&fwtpm_pdrv); ++} ++ ++module_init(fwtpm_driver_init); ++module_exit(fwtpm_driver_exit); ++ ++MODULE_AUTHOR("Igor Opaniuk "); ++MODULE_AUTHOR("Vincent T Cao "); ++MODULE_DESCRIPTION("fTPM 2.0 TEE Driver"); ++MODULE_VERSION("1.0"); ++MODULE_LICENSE("GPL v2"); +diff --git a/drivers/char/tpm/tpm_ftpm_optee.h b/drivers/char/tpm/tpm_ftpm_optee.h +new file mode 100644 +index 000000000000..2f5d09b1be05 +--- /dev/null ++++ b/drivers/char/tpm/tpm_ftpm_optee.h +@@ -0,0 +1,71 @@ ++ ++/* ++ * Copyright (C) 2018 Intel Corporation ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++ ++#ifndef __TPM_FTPM_OPTEE__ ++#define __TPM_FTPM_OPTEE__ ++ ++#include "tpm.h" ++ ++#define FTPM_DEBUG_HEAD "[fTPM]" ++#define kdebug(FMT, ...) \ ++ pr_info(FTPM_DEBUG_HEAD " ==> %s(" FMT ")\n", \ ++ __func__, ##__VA_ARGS__) ++ ++#define TA_FTPM_UUID \ ++ { 0x71d950bc, 0xc9d4, 0xc442, \ ++ { 0x82, 0xcb, 0x34, 0x3f, 0xb7, 0xf3, 0x78, 0x96 } } ++ ++struct teec_uuid { ++ u32 timeLow; ++ u16 timeMid; ++ u16 timeHiAndVersion; ++ u8 clockSeqAndNode[8]; ++}; ++ ++enum fwtpm_status { ++ FTPM_SESSION_INITED = BIT(0), ++}; ++ ++struct fwtpm_device_priv { ++ unsigned int status; ++ struct tee_context *ctx; ++ u32 cancel_id; ++ struct tee_shm *cmd_buf_shm; ++ struct tee_shm *resp_buf_shm; ++ u32 sess; ++ bool is_canceled; ++ size_t cmd_len; ++ size_t resp_len; ++ char *cmd_buf; ++ char *resp_buf; ++ ++}; ++ ++enum fwtpm_pm_event { ++ FTPM_PM_RESUME = 0, ++ FTPM_PM_SUSPEND = 1, ++}; ++ ++struct fwtpm_pm_data { ++ u64 pm_event; ++}; ++ ++enum fwtpm_optee_cmd { ++ FTPM_HANDLE_CMD_SUBMIT = 0, ++ FTPM_HANDLE_PPI = 1, ++ FTPM_HANDLE_PM = 2, ++}; ++ ++#endif /* __TPM_FTPM_OPTEE__ */ +diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c +index 0f16d9ffd8d1..3ca3d2ea92f8 100644 +--- a/drivers/tee/tee_core.c ++++ b/drivers/tee/tee_core.c +@@ -1040,6 +1040,7 @@ int tee_client_cancel_req(struct tee_context *ctx, + return ctx->teedev->desc->ops->cancel_req(ctx, arg->cancel_id, + arg->session); + } ++EXPORT_SYMBOL_GPL(tee_client_cancel_req); + + static int tee_client_device_match(struct device *dev, + struct device_driver *drv) +-- +2.17.1 + diff --git a/patches/0001-iio-adc-add-support-for-Intel-ADC.felipeb-5.4 b/patches/0001-iio-adc-add-support-for-Intel-ADC.felipeb-5.4 new file mode 100644 index 0000000000..1fec48439e --- /dev/null +++ b/patches/0001-iio-adc-add-support-for-Intel-ADC.felipeb-5.4 @@ -0,0 +1,483 @@ +From 72bd1442bb00614fea6281bc7a38674912270b8c Mon Sep 17 00:00:00 2001 +From: Felipe Balbi +Date: Fri, 15 Jun 2018 15:31:21 +0300 +Subject: [PATCH 01/14] iio: adc: add support for Intel ADC + +Recent Intel SoCs have an integrated 14-bit, 4 MS/sec ADC. This patch +adds support for that controller. + +Signed-off-by: Felipe Balbi +--- + drivers/iio/adc/Kconfig | 9 + + drivers/iio/adc/Makefile | 1 + + drivers/iio/adc/intel-adc.c | 426 ++++++++++++++++++++++++++++++++++++ + 3 files changed, 436 insertions(+) + create mode 100644 drivers/iio/adc/intel-adc.c + +diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig +index f0af3a42f53c..eb305f30c6d5 100644 +--- a/drivers/iio/adc/Kconfig ++++ b/drivers/iio/adc/Kconfig +@@ -432,6 +432,15 @@ config INGENIC_ADC + This driver can also be built as a module. If so, the module will be + called ingenic_adc. + ++config INTEL_ADC ++ tristate "Intel ADC IIO driver" ++ depends on PCI ++ select IIO_BUFFER ++ select IIO_TRIGGERED_BUFFER ++ help ++ Say yes here to build support for Intel ADC available on ++ recent SoCs. ++ + config IMX7D_ADC + tristate "Freescale IMX7D ADC driver" + depends on ARCH_MXC || COMPILE_TEST +diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile +index ef9cc485fb67..f04e1bf89826 100644 +--- a/drivers/iio/adc/Makefile ++++ b/drivers/iio/adc/Makefile +@@ -42,6 +42,7 @@ obj-$(CONFIG_HX711) += hx711.o + obj-$(CONFIG_IMX7D_ADC) += imx7d_adc.o + obj-$(CONFIG_INA2XX_ADC) += ina2xx-adc.o + obj-$(CONFIG_INGENIC_ADC) += ingenic-adc.o ++obj-$(CONFIG_INTEL_ADC) += intel-adc.o + obj-$(CONFIG_LP8788_ADC) += lp8788_adc.o + obj-$(CONFIG_LPC18XX_ADC) += lpc18xx_adc.o + obj-$(CONFIG_LPC32XX_ADC) += lpc32xx_adc.o +diff --git a/drivers/iio/adc/intel-adc.c b/drivers/iio/adc/intel-adc.c +new file mode 100644 +index 000000000000..9c834cba762b +--- /dev/null ++++ b/drivers/iio/adc/intel-adc.c +@@ -0,0 +1,426 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/** ++ * intel-adc.c - Intel ADC Driver ++ * ++ * Copyright (C) 2018 Intel Corporation ++ * ++ * Author: Felipe Balbi ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define ADC_DMA_CTRL 0x0000 ++#define ADC_FIFO_STTS 0x0004 ++#define ADC_DMA_DEBUG 0x0008 ++#define ADC_PWR_STAT 0x000c ++ ++#define ADC_CTRL 0x0400 ++#define ADC_LOOP_CTRL 0x0404 ++#define ADC_LOOP_SEQ 0x0408 ++#define ADC_LOOP_DELAY_0 0x040c ++#define ADC_LOOP_DELAY_1 0x0410 ++#define ADC_LOOP_DELAY_2 0x0414 ++#define ADC_LOOP_DELAY_3 0x0418 ++#define ADC_LOOP_DELAY_4 0x041c ++#define ADC_LOOP_DELAY_5 0x0420 ++#define ADC_LOOP_DELAY_6 0x0424 ++#define ADC_LOOP_DELAY_7 0x0428 ++#define ADC_CAL_CTRL 0x042c ++#define ADC_CONV_CTRL 0x0430 ++#define ADC_CONV_DELAY 0x0434 ++#define ADC_CONFIG1 0x0438 ++#define ADC_CONFIG2 0x043c ++#define ADC_FIFO_CTRL 0x0440 ++#define ADC_STAT 0x0444 ++#define ADC_FIFO_RD_POINTER 0x0448 ++#define ADC_RAW_DATA 0x044c ++#define ADC_DATA_THRESHOLD_0 0x0450 ++#define ADC_DATA_THRESHOLD_1 0x0454 ++#define ADC_DATA_THRESHOLD_2 0x0458 ++#define ADC_DATA_THRESHOLD_3 0x045c ++#define ADC_DATA_THRESHOLD_4 0x0460 ++#define ADC_DATA_THRESHOLD_5 0x0464 ++#define ADC_DATA_THRESHOLD_6 0x0468 ++#define ADC_DATA_THRESHOLD_7 0x046c ++#define ADC_THRESHOLD_CONFIG 0x0470 ++#define ADC_RIS 0x0474 ++#define ADC_IMSC 0x0478 ++#define ADC_MIS 0x047c ++#define ADC_LOOP_CFG_0 0x0480 ++#define ADC_LOOP_CFG_1 0x0484 ++#define ADC_LOOP_CFG_2 0x0488 ++#define ADC_LOOP_CFG_3 0x048c ++#define ADC_LOOP_CFG_4 0x0490 ++#define ADC_LOOP_CFG_5 0x0494 ++#define ADC_LOOP_CFG_6 0x0498 ++#define ADC_LOOP_CFG_7 0x049c ++#define ADC_FIFO_DATA 0x0800 ++ ++#define ADC_BITS 14 ++ ++/* ADC DMA Ctrl */ ++#define ADC_DMA_CTRL_EN BIT(0) ++#define ADC_DMA_CTRL_BRST_THRSLD GENMASK(10, 1) ++ ++/* ADC FIFO Status */ ++#define ADC_FIFO_STTS_COUNT GENMASK(9, 0) ++ ++/* ADC Ctrl */ ++#define ADC_CTRL_EN BIT(0) ++#define ADC_CTRL_DATA_THRSHLD_MODE(r) (((r) >> 1) & 3) ++ ++/* ADC Conversion Ctrl */ ++#define ADC_CONV_CTRL_NUM_SMPL_MASK GENMASK(17, 8) ++#define ADC_CONV_CTRL_NUM_SMPL(n) (((n) - 1) << 8) ++#define ADC_CONV_CTRL_CONV_MODE BIT(4) ++#define ADC_CONV_CTRL_REQ BIT(0) ++ ++/* ADC Config1 */ ++#define ADC_CONFIG1_ATTEN_TRIM GENMASK(31, 30) ++#define ADC_CONFIG1_INBUF_CUR GENMASK(29, 28) ++#define ADC_CONFIG1_BG_BYPASS BIT(24) ++#define ADC_CONFIG1_BG_TRIM GENMASK(23, 19) ++#define ADC_CONFIG1_BG_CTRIM GENMASK(18, 16) ++#define ADC_CONFIG1_REF_TRIM GENMASK(15, 8) ++#define ADC_CONFIG1_ADC_RESET BIT(6) ++#define ADC_CONFIG1_REF_BYPASS_EN BIT(5) ++#define ADC_CONFIG1_REF_EN BIT(4) ++#define ADC_CONFIG1_CNL_SEL_MASK GENMASK(3, 1) ++#define ADC_CONFIG1_CNL_SEL(ch) ((ch) << 1) ++#define ADC_CONFIG1_DIFF_SE_SEL BIT(0) ++ ++/* ADC Interrupt Mask Register */ ++#define ADC_INTR_LOOP_DONE_INTR BIT(22) ++#define ADC_INTR_FIFO_EMPTY_INTR BIT(21) ++#define ADC_INTR_DMA_DONE_INTR BIT(20) ++#define ADC_INTR_DATA_THRSHLD_LOW_INTR_7 BIT(19) ++#define ADC_INTR_DATA_THRSHLD_HIGH_INTR_7 BIT(18) ++#define ADC_INTR_DATA_THRSHLD_LOW_INTR_6 BIT(17) ++#define ADC_INTR_DATA_THRSHLD_HIGH_INTR_6 BIT(16) ++#define ADC_INTR_DATA_THRSHLD_LOW_INTR_5 BIT(15) ++#define ADC_INTR_DATA_THRSHLD_HIGH_INTR_5 BIT(14) ++#define ADC_INTR_DATA_THRSHLD_LOW_INTR_4 BIT(13) ++#define ADC_INTR_DATA_THRSHLD_HIGH_INTR_4 BIT(12) ++#define ADC_INTR_DATA_THRSHLD_LOW_INTR_3 BIT(11) ++#define ADC_INTR_DATA_THRSHLD_HIGH_INTR_3 BIT(10) ++#define ADC_INTR_DATA_THRSHLD_LOW_INTR_2 BIT(9) ++#define ADC_INTR_DATA_THRSHLD_HIGH_INTR_2 BIT(8) ++#define ADC_INTR_DATA_THRSHLD_LOW_INTR_1 BIT(7) ++#define ADC_INTR_DATA_THRSHLD_HIGH_INTR_1 BIT(6) ++#define ADC_INTR_DATA_THRSHLD_LOW_INTR_0 BIT(5) ++#define ADC_INTR_DATA_THRSHLD_HIGH_INTR_0 BIT(4) ++#define ADC_INTR_PWR_DWN_EXIT_INTR BIT(3) ++#define ADC_INTR_FIFO_FULL_INTR BIT(2) ++#define ADC_INTR_SMPL_DONE_INTR BIT(0) ++ ++#define ADC_INTR_ALL_MASK (ADC_INTR_LOOP_DONE_INTR | \ ++ ADC_INTR_FIFO_EMPTY_INTR | \ ++ ADC_INTR_DMA_DONE_INTR | \ ++ ADC_INTR_DATA_THRSHLD_LOW_INTR_7 | \ ++ ADC_INTR_DATA_THRSHLD_HIGH_INTR_7 | \ ++ ADC_INTR_DATA_THRSHLD_LOW_INTR_6 | \ ++ ADC_INTR_DATA_THRSHLD_HIGH_INTR_6 | \ ++ ADC_INTR_DATA_THRSHLD_LOW_INTR_5 | \ ++ ADC_INTR_DATA_THRSHLD_HIGH_INTR_5 | \ ++ ADC_INTR_DATA_THRSHLD_LOW_INTR_4 | \ ++ ADC_INTR_DATA_THRSHLD_HIGH_INTR_4 | \ ++ ADC_INTR_DATA_THRSHLD_LOW_INTR_3 | \ ++ ADC_INTR_DATA_THRSHLD_HIGH_INTR_3 | \ ++ ADC_INTR_DATA_THRSHLD_LOW_INTR_2 | \ ++ ADC_INTR_DATA_THRSHLD_HIGH_INTR_2 | \ ++ ADC_INTR_DATA_THRSHLD_LOW_INTR_1 | \ ++ ADC_INTR_DATA_THRSHLD_HIGH_INTR_1 | \ ++ ADC_INTR_DATA_THRSHLD_LOW_INTR_0 | \ ++ ADC_INTR_DATA_THRSHLD_HIGH_INTR_0 | \ ++ ADC_INTR_PWR_DWN_EXIT_INTR | \ ++ ADC_INTR_FIFO_FULL_INTR | \ ++ ADC_INTR_SMPL_DONE_INTR) ++ ++#define ADC_VREF_UV 1600000 ++#define ADC_DEFAULT_CONVERSION_TIMEOUT_MS 5000 ++ ++struct intel_adc { ++ struct completion completion; ++ void __iomem *regs; ++ u32 value; ++}; ++ ++static inline void intel_adc_writel(void __iomem *base, u32 offset, u32 value) ++{ ++ writel(value, base + offset); ++} ++ ++static inline u32 intel_adc_readl(void __iomem *base, u32 offset) ++{ ++ return readl(base + offset); ++} ++ ++static void intel_adc_enable(struct intel_adc *adc) ++{ ++ u32 ctrl; ++ u32 cfg1; ++ ++ cfg1 = intel_adc_readl(adc->regs, ADC_CONFIG1); ++ cfg1 &= ~ADC_CONFIG1_ADC_RESET; ++ intel_adc_writel(adc->regs, ADC_CONFIG1, cfg1); ++ ++ ctrl = intel_adc_readl(adc->regs, ADC_CTRL); ++ ctrl |= ADC_CTRL_EN; ++ intel_adc_writel(adc->regs, ADC_CTRL, ctrl); ++ ++ cfg1 |= ADC_CONFIG1_REF_EN; ++ intel_adc_writel(adc->regs, ADC_CONFIG1, cfg1); ++ ++ /* must wait 1ms before allowing any further accesses */ ++ usleep_range(1000, 1500); ++} ++ ++static void intel_adc_disable(struct intel_adc *adc) ++{ ++ u32 ctrl; ++ ++ ctrl = intel_adc_readl(adc->regs, ADC_CTRL); ++ ctrl &= ~ADC_CTRL_EN; ++ intel_adc_writel(adc->regs, ADC_CTRL, ctrl); ++} ++ ++static int intel_adc_single_channel_conversion(struct intel_adc *adc, ++ struct iio_chan_spec const *channel, int *val) ++{ ++ u32 ctrl; ++ u32 reg; ++ ++ ctrl = intel_adc_readl(adc->regs, ADC_CONV_CTRL); ++ ctrl |= ADC_CONV_CTRL_CONV_MODE; ++ ctrl &= ~ADC_CONV_CTRL_NUM_SMPL_MASK; ++ ctrl |= ADC_CONV_CTRL_NUM_SMPL(1); ++ intel_adc_writel(adc->regs, ADC_CONV_CTRL, ctrl); ++ ++ reg = intel_adc_readl(adc->regs, ADC_CONFIG1); ++ reg &= ~ADC_CONFIG1_CNL_SEL_MASK; ++ reg |= ADC_CONFIG1_CNL_SEL(channel->scan_index); ++ ++ if (channel->differential) ++ reg &= ~ADC_CONFIG1_DIFF_SE_SEL; ++ else ++ reg |= ADC_CONFIG1_DIFF_SE_SEL; ++ ++ intel_adc_writel(adc->regs, ADC_CONFIG1, reg); ++ ++ ctrl |= ADC_CONV_CTRL_REQ; ++ intel_adc_writel(adc->regs, ADC_CONV_CTRL, ctrl); ++ ++ /* enable sample done IRQ event */ ++ reg = intel_adc_readl(adc->regs, ADC_IMSC); ++ reg &= ~ADC_INTR_SMPL_DONE_INTR; ++ intel_adc_writel(adc->regs, ADC_IMSC, reg); ++ ++ usleep_range(1000, 5000); ++ adc->value = intel_adc_readl(adc->regs, ADC_FIFO_DATA); ++ ++ return 0; ++} ++ ++static int intel_adc_read_raw(struct iio_dev *iio, ++ struct iio_chan_spec const *channel, int *val, int *val2, ++ long mask) ++{ ++ struct intel_adc *adc = iio_priv(iio); ++ int shift; ++ int ret; ++ ++ switch (mask) { ++ case IIO_CHAN_INFO_RAW: ++ shift = channel->scan_type.shift; ++ ++ ret = iio_device_claim_direct_mode(iio); ++ if (ret) ++ break; ++ ++ intel_adc_enable(adc); ++ ++ ret = intel_adc_single_channel_conversion(adc, channel, val); ++ if (ret) { ++ intel_adc_disable(adc); ++ iio_device_release_direct_mode(iio); ++ break; ++ } ++ intel_adc_disable(adc); ++ ret = IIO_VAL_INT; ++ iio_device_release_direct_mode(iio); ++ break; ++ default: ++ ret = -EINVAL; ++ break; ++ } ++ ++ return ret; ++} ++ ++static const struct iio_info intel_adc_info = { ++ .read_raw = intel_adc_read_raw, ++}; ++ ++static const struct iio_event_spec intel_adc_events[] = { ++ { ++ .type = IIO_EV_TYPE_THRESH, ++ .dir = IIO_EV_DIR_RISING, ++ .mask_separate = BIT(IIO_EV_INFO_VALUE) | ++ BIT(IIO_EV_INFO_ENABLE), ++ }, { ++ .type = IIO_EV_TYPE_THRESH, ++ .dir = IIO_EV_DIR_FALLING, ++ .mask_separate = BIT(IIO_EV_INFO_VALUE), ++ }, { ++ .type = IIO_EV_TYPE_THRESH, ++ .dir = IIO_EV_DIR_EITHER, ++ .mask_separate = BIT(IIO_EV_INFO_ENABLE) | ++ BIT(IIO_EV_INFO_PERIOD), ++ }, ++}; ++ ++#define INTEL_ADC_SINGLE_CHAN(c) \ ++{ \ ++ .type = IIO_VOLTAGE, \ ++ .indexed = 1, \ ++ .channel = (c), \ ++ .scan_index = (c), \ ++ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ ++ .scan_type = { \ ++ .sign = 's', \ ++ .realbits = 14, \ ++ .storagebits = 32, \ ++ .endianness = IIO_CPU, \ ++ }, \ ++ .event_spec = intel_adc_events, \ ++ .num_event_specs = ARRAY_SIZE(intel_adc_events),\ ++ .datasheet_name = "ain"#c, \ ++} ++ ++static struct iio_chan_spec const intel_adc_channels[] = { ++ INTEL_ADC_SINGLE_CHAN(0), ++ INTEL_ADC_SINGLE_CHAN(1), ++ INTEL_ADC_SINGLE_CHAN(2), ++ INTEL_ADC_SINGLE_CHAN(3), ++ INTEL_ADC_SINGLE_CHAN(4), ++ INTEL_ADC_SINGLE_CHAN(5), ++ INTEL_ADC_SINGLE_CHAN(6), ++ INTEL_ADC_SINGLE_CHAN(7), ++}; ++ ++static irqreturn_t intel_adc_irq(int irq, void *_adc) ++{ ++ struct intel_adc *adc = _adc; ++ u32 status; ++ ++ status = intel_adc_readl(adc->regs, ADC_MIS); ++ ++ if (!status) ++ return IRQ_NONE; ++ ++ intel_adc_writel(adc->regs, ADC_IMSC, ADC_INTR_ALL_MASK); ++ adc->value = intel_adc_readl(adc->regs, ADC_FIFO_DATA); ++ complete(&adc->completion); ++ ++ return IRQ_HANDLED; ++} ++ ++static int intel_adc_probe(struct pci_dev *pci, const struct pci_device_id *id) ++{ ++ struct intel_adc *adc; ++ struct iio_dev *iio; ++ int ret; ++ int irq; ++ ++ iio = devm_iio_device_alloc(&pci->dev, sizeof(*adc)); ++ if (!iio) ++ return -ENOMEM; ++ ++ adc = iio_priv(iio); ++ ret = pcim_enable_device(pci); ++ if (ret) ++ return ret; ++ ++ pci_set_master(pci); ++ ++ ret = pcim_iomap_regions(pci, BIT(0), pci_name(pci)); ++ if (ret) ++ return ret; ++ ++ adc->regs = pcim_iomap_table(pci)[0]; ++ if (!adc->regs) { ++ ret = -EFAULT; ++ return ret; ++ } ++ ++ pci_set_drvdata(pci, adc); ++ init_completion(&adc->completion); ++ iio->dev.parent = &pci->dev; ++ iio->name = dev_name(&pci->dev); ++ iio->modes = INDIO_DIRECT_MODE; ++ iio->info = &intel_adc_info; ++ iio->channels = intel_adc_channels; ++ iio->num_channels = ARRAY_SIZE(intel_adc_channels); ++ ++ ret = devm_iio_device_register(&pci->dev, iio); ++ if (ret) ++ return ret; ++ ++ ret = pci_alloc_irq_vectors(pci, 1, 1, PCI_IRQ_ALL_TYPES); ++ if (ret < 0) ++ return ret; ++ ++ irq = pci_irq_vector(pci, 0); ++ ret = devm_request_irq(&pci->dev, irq, intel_adc_irq, ++ IRQF_ONESHOT | IRQF_SHARED | IRQF_TRIGGER_RISING, ++ "intel-adc", adc); ++ if (ret) ++ goto err; ++ ++ pm_runtime_set_autosuspend_delay(&pci->dev, 1000); ++ pm_runtime_use_autosuspend(&pci->dev); ++ pm_runtime_put_autosuspend(&pci->dev); ++ pm_runtime_allow(&pci->dev); ++ ++ return 0; ++ ++err: ++ pci_free_irq_vectors(pci); ++ return ret; ++} ++ ++static void intel_adc_remove(struct pci_dev *pci) ++{ ++ pm_runtime_forbid(&pci->dev); ++ pm_runtime_get_noresume(&pci->dev); ++ ++ pci_free_irq_vectors(pci); ++} ++ ++static const struct pci_device_id intel_adc_id_table[] = { ++ { PCI_VDEVICE(INTEL, 0x4bb8), }, ++ { } /* Terminating Entry */ ++}; ++MODULE_DEVICE_TABLE(pci, intel_adc_id_table); ++ ++static struct pci_driver intel_adc_driver = { ++ .name = "intel-adc", ++ .probe = intel_adc_probe, ++ .remove = intel_adc_remove, ++ .id_table = intel_adc_id_table, ++}; ++module_pci_driver(intel_adc_driver); ++ ++MODULE_AUTHOR("Felipe Balbi "); ++MODULE_DESCRIPTION("Intel ADC"); ++MODULE_LICENSE("GPL v2"); +-- +2.17.1 + diff --git a/patches/0001-intel_th-pci-Add-Elkhart-Lake-SOC-support.core-ehl b/patches/0001-intel_th-pci-Add-Elkhart-Lake-SOC-support.core-ehl new file mode 100644 index 0000000000..fb73416a82 --- /dev/null +++ b/patches/0001-intel_th-pci-Add-Elkhart-Lake-SOC-support.core-ehl @@ -0,0 +1,31 @@ +From c1e6399dde3573623b21a507113bbf1103248b03 Mon Sep 17 00:00:00 2001 +From: Gayatri Kammela +Date: Tue, 22 Oct 2019 17:34:24 -0700 +Subject: [PATCH 01/12] intel_th: pci: Add Elkhart Lake SOC support + +This adds support for Intel Trace Hub in Elkhart Lake. + +Signed-off-by: Alexander Shishkin +--- + drivers/hwtracing/intel_th/pci.c | 5 +++++ + 1 file changed, 5 insertions(+) + +diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c +index 91dfeba62485..807d73f1237c 100644 +--- a/drivers/hwtracing/intel_th/pci.c ++++ b/drivers/hwtracing/intel_th/pci.c +@@ -209,6 +209,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = { + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa0a6), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, ++ { ++ /* Elkhart Lake */ ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4b26), ++ .driver_data = (kernel_ulong_t)&intel_th_2x, ++ }, + { 0 }, + }; + +-- +2.17.1 + diff --git a/patches/0001-mfd-core-Propagate-software-fwnode-to-the-sub-de.usb-typec b/patches/0001-mfd-core-Propagate-software-fwnode-to-the-sub-de.usb-typec new file mode 100644 index 0000000000..1f1a988ee9 --- /dev/null +++ b/patches/0001-mfd-core-Propagate-software-fwnode-to-the-sub-de.usb-typec @@ -0,0 +1,55 @@ +From 5f8c5ecaf1625c341dcb7a7c069f7e0918a8f918 Mon Sep 17 00:00:00 2001 +From: Heikki Krogerus +Date: Mon, 5 Aug 2019 14:54:37 +0300 +Subject: [PATCH 01/18] mfd: core: Propagate software fwnode to the sub devices + +When ever device properties are supplied for a sub device, a +software node (fwnode) is actually created for that sub +device. By allowing the drivers to supply the complete +software node instead of just the properties in it, the +drivers can take advantage of the other features the +software nodes have on top of supplying the device +properties. + +Signed-off-by: Heikki Krogerus +--- + drivers/mfd/mfd-core.c | 8 ++++++++ + include/linux/mfd/core.h | 3 +++ + 2 files changed, 11 insertions(+) + +diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c +index 23276a80e3b4..cf71563ac7d8 100644 +--- a/drivers/mfd/mfd-core.c ++++ b/drivers/mfd/mfd-core.c +@@ -196,6 +196,14 @@ static int mfd_add_device(struct device *parent, int id, + goto fail_alias; + } + ++ if (cell->node) { ++ ret = software_node_register(cell->node); ++ if (ret) ++ goto fail_alias; ++ ++ pdev->dev.fwnode = software_node_fwnode(cell->node); ++ } ++ + ret = mfd_platform_add_cell(pdev, cell, usage_count); + if (ret) + goto fail_alias; +diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h +index b43fc5773ad7..073efcafe9da 100644 +--- a/include/linux/mfd/core.h ++++ b/include/linux/mfd/core.h +@@ -45,6 +45,9 @@ struct mfd_cell { + /* device properties passed to the sub devices drivers */ + struct property_entry *properties; + ++ /* Software fwnode for the sub device */ ++ const struct software_node *node; ++ + /* + * Device Tree compatible string + * See: Documentation/devicetree/usage-model.txt Chapter 2.2 for details +-- +2.17.1 + diff --git a/patches/0001-net-stmmac-fix-error-in-updating-rx-tail-poin.connectivity b/patches/0001-net-stmmac-fix-error-in-updating-rx-tail-poin.connectivity new file mode 100644 index 0000000000..fc20e0cd9e --- /dev/null +++ b/patches/0001-net-stmmac-fix-error-in-updating-rx-tail-poin.connectivity @@ -0,0 +1,71 @@ +From 0b8839963762037fdffc7076c35f67dacd35ab29 Mon Sep 17 00:00:00 2001 +From: Ong Boon Leong +Date: Wed, 31 Jul 2019 13:55:13 +0800 +Subject: [PATCH 001/108] net: stmmac: fix error in updating rx tail pointer to + last free entry + +DMA_CH(#i)_RxDesc_Tail_Pointer points to an offset from the base and +indicates the location of the last valid descriptor. + +The change introduced by "net: stmmac: Update RX Tail Pointer to last +free entry" incorrectly update the RxDesc_Tail_Pointer and causes +Rx operation to freeze. The issue is explained as follow:- + +Say, cur_rx=1 and dirty_rx=0, then dirty=1 and entry=0 before +the loop inside stmmac_rx_refill() is entered. Inside the loop, +Rx buffer[entry=0] is rightfully refilled and entry++ (now, entry=1). +Upon the loop condition check "dirty-- > 0", the condition is false +because dirty=0. + +Now, dirty_rx=entry(=1) and it is used to update the Rx Tail Pointer +instead of "0" because only Rx buffer[entry=0] is refilled. + +So, fix this by tracking the index of the most recently refilled Rx +buffer by using "last_refill". Since there the loop can also bail +out pre-maturely, we only update the Rx Tail Pointer if 'last_refill' +is valid. + +Fixes: 858a31ffc3d9 net: stmmac: Update RX Tail Pointer to last free entry + +Signed-off-by: Ong Boon Leong +--- + drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 14 +++++++++----- + 1 file changed, 9 insertions(+), 5 deletions(-) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index 3dfd04e0506a..e433fa8d7b7a 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -3378,6 +3378,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + int len, dirty = stmmac_rx_dirty(priv, queue); + unsigned int entry = rx_q->dirty_rx; ++ unsigned int last_refill = entry; + + len = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; + +@@ -3428,13 +3429,16 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) + + dma_wmb(); + stmmac_set_rx_owner(priv, p, use_rx_wd); +- ++ last_refill = entry; + entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE); + } +- rx_q->dirty_rx = entry; +- rx_q->rx_tail_addr = rx_q->dma_rx_phy + +- (rx_q->dirty_rx * sizeof(struct dma_desc)); +- stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); ++ if (last_refill != entry) { ++ rx_q->dirty_rx = entry; ++ rx_q->rx_tail_addr = rx_q->dma_rx_phy + ++ (last_refill * sizeof(struct dma_desc)); ++ stmmac_set_rx_tail_ptr(priv, priv->ioaddr, ++ rx_q->rx_tail_addr, queue); ++ } + } + + /** +-- +2.17.1 + diff --git a/patches/0001-pinctrl-intel-Avoid-potential-glitches-if-pin-is-in-G.lpss b/patches/0001-pinctrl-intel-Avoid-potential-glitches-if-pin-is-in-G.lpss new file mode 100644 index 0000000000..fba9e4aeb2 --- /dev/null +++ b/patches/0001-pinctrl-intel-Avoid-potential-glitches-if-pin-is-in-G.lpss @@ -0,0 +1,82 @@ +From 591d9b96c9561b983db44dcd87972a367b9fb0a8 Mon Sep 17 00:00:00 2001 +From: Andy Shevchenko +Date: Mon, 30 Sep 2019 18:12:22 +0300 +Subject: [PATCH 01/40] pinctrl: intel: Avoid potential glitches if pin is in + GPIO mode + +When consumer requests pin, we, in order to be on the safest side, +switch it first to GPIO mode followed by immediate transition to +the input state. Due to posted writes it's luckily to be a single +I/O transaction. + +However, if firmware or boot loader already configures pin to +the GPIO mode, user expects no glitches for the requested pin. +We may check if the pin is pre-configured and leave it as is +till the actual consumer toggles its state to avoid glitches. + +Fixes: 7981c0015af2 ("Add Intel Sunrisepoint pin controller and GPIO support") +Depends-on: f5a26acf0162 ("Initialize GPIO properly when used through irqchip") +Reported-by: Oliver Barta +Reported-by: Malin Jonsson +Signed-off-by: Andy Shevchenko +--- + drivers/pinctrl/intel/pinctrl-intel.c | 21 ++++++++++++++++++++- + 1 file changed, 20 insertions(+), 1 deletion(-) + +diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c +index 1f13bcd0e4e1..b35e9a737637 100644 +--- a/drivers/pinctrl/intel/pinctrl-intel.c ++++ b/drivers/pinctrl/intel/pinctrl-intel.c +@@ -52,6 +52,7 @@ + #define PADCFG0_GPIROUTNMI BIT(17) + #define PADCFG0_PMODE_SHIFT 10 + #define PADCFG0_PMODE_MASK GENMASK(13, 10) ++#define PADCFG0_PMODE_GPIO 0 + #define PADCFG0_GPIORXDIS BIT(9) + #define PADCFG0_GPIOTXDIS BIT(8) + #define PADCFG0_GPIORXSTATE BIT(1) +@@ -330,7 +331,7 @@ static void intel_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, + cfg1 = readl(intel_get_padcfg(pctrl, pin, PADCFG1)); + + mode = (cfg0 & PADCFG0_PMODE_MASK) >> PADCFG0_PMODE_SHIFT; +- if (!mode) ++ if (mode == PADCFG0_PMODE_GPIO) + seq_puts(s, "GPIO "); + else + seq_printf(s, "mode %d ", mode); +@@ -456,6 +457,11 @@ static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input) + writel(value, padcfg0); + } + ++static int intel_gpio_get_gpio_mode(void __iomem *padcfg0) ++{ ++ return (readl(padcfg0) & PADCFG0_PMODE_MASK) >> PADCFG0_PMODE_SHIFT; ++} ++ + static void intel_gpio_set_gpio_mode(void __iomem *padcfg0) + { + u32 value; +@@ -489,7 +495,20 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev, + } + + padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0); ++ ++ /* ++ * If pin is already configured in GPIO mode, we assume that ++ * firmware provides correct settings. In such case we avoid ++ * potential glitches on the pin. Otherwise, for alternative ++ * mode, consumer has to supply respective flags. ++ */ ++ if (intel_gpio_get_gpio_mode(padcfg0) == PADCFG0_PMODE_GPIO) { ++ raw_spin_unlock_irqrestore(&pctrl->lock, flags); ++ return 0; ++ } ++ + intel_gpio_set_gpio_mode(padcfg0); ++ + /* Disable TX buffer and enable RX (this will be input) */ + __intel_gpio_set_direction(padcfg0, true); + +-- +2.17.1 + diff --git a/patches/0001-platform-x86-add-sep-and-socwatch-drivers-wit.sep-socwatch b/patches/0001-platform-x86-add-sep-and-socwatch-drivers-wit.sep-socwatch new file mode 100644 index 0000000000..a44a6121db --- /dev/null +++ b/patches/0001-platform-x86-add-sep-and-socwatch-drivers-wit.sep-socwatch @@ -0,0 +1,55515 @@ +From df250d25feb41a95fb99b8d174a6a6f99e04dfc6 Mon Sep 17 00:00:00 2001 +From: Jon Moeller +Date: Tue, 13 Nov 2018 01:25:24 -0600 +Subject: [PATCH 01/27] platform/x86: add sep and socwatch drivers without + socperf. + +Signed-off-by: Jon Moeller +--- + drivers/platform/x86/Kconfig | 3 + + drivers/platform/x86/Makefile | 4 + + drivers/platform/x86/sepdk/Kconfig | 63 + + drivers/platform/x86/sepdk/Makefile | 5 + + drivers/platform/x86/sepdk/inc/apic.h | 114 + + drivers/platform/x86/sepdk/inc/asm_helper.h | 186 + + drivers/platform/x86/sepdk/inc/chap.h | 31 + + drivers/platform/x86/sepdk/inc/control.h | 510 ++ + drivers/platform/x86/sepdk/inc/core2.h | 49 + + drivers/platform/x86/sepdk/inc/cpumon.h | 53 + + .../platform/x86/sepdk/inc/ecb_iterators.h | 581 ++ + drivers/platform/x86/sepdk/inc/eventmux.h | 42 + + drivers/platform/x86/sepdk/inc/gfx.h | 39 + + drivers/platform/x86/sepdk/inc/gmch.h | 31 + + .../platform/x86/sepdk/inc/haswellunc_sa.h | 57 + + drivers/platform/x86/sepdk/inc/jkt_unc_ha.h | 37 + + .../platform/x86/sepdk/inc/jkt_unc_qpill.h | 64 + + drivers/platform/x86/sepdk/inc/linuxos.h | 79 + + drivers/platform/x86/sepdk/inc/lwpmudrv.h | 551 ++ + drivers/platform/x86/sepdk/inc/msrdefs.h | 81 + + drivers/platform/x86/sepdk/inc/output.h | 120 + + drivers/platform/x86/sepdk/inc/pci.h | 133 + + drivers/platform/x86/sepdk/inc/pebs.h | 494 ++ + drivers/platform/x86/sepdk/inc/perfver4.h | 51 + + drivers/platform/x86/sepdk/inc/pmi.h | 65 + + .../platform/x86/sepdk/inc/sepdrv_p_state.h | 34 + + drivers/platform/x86/sepdk/inc/silvermont.h | 41 + + drivers/platform/x86/sepdk/inc/sys_info.h | 71 + + drivers/platform/x86/sepdk/inc/unc_common.h | 161 + + drivers/platform/x86/sepdk/inc/unc_gt.h | 86 + + drivers/platform/x86/sepdk/inc/utility.h | 637 ++ + .../x86/sepdk/inc/valleyview_sochap.h | 60 + + .../x86/sepdk/include/error_reporting_utils.h | 167 + + .../x86/sepdk/include/lwpmudrv_chipset.h | 274 + + .../x86/sepdk/include/lwpmudrv_defines.h | 507 ++ + .../platform/x86/sepdk/include/lwpmudrv_ecb.h | 1116 +++ + .../platform/x86/sepdk/include/lwpmudrv_gfx.h | 33 + + .../x86/sepdk/include/lwpmudrv_ioctl.h | 284 + + .../platform/x86/sepdk/include/lwpmudrv_pwr.h | 100 + + .../x86/sepdk/include/lwpmudrv_struct.h | 2059 +++++ + .../x86/sepdk/include/lwpmudrv_types.h | 159 + + .../x86/sepdk/include/lwpmudrv_version.h | 111 + + .../platform/x86/sepdk/include/pax_shared.h | 180 + + .../platform/x86/sepdk/include/rise_errors.h | 326 + + drivers/platform/x86/sepdk/pax/Makefile | 4 + + drivers/platform/x86/sepdk/pax/pax.c | 967 +++ + drivers/platform/x86/sepdk/pax/pax.h | 33 + + drivers/platform/x86/sepdk/sep/Makefile | 67 + + drivers/platform/x86/sepdk/sep/apic.c | 228 + + drivers/platform/x86/sepdk/sep/chap.c | 474 ++ + drivers/platform/x86/sepdk/sep/control.c | 896 ++ + drivers/platform/x86/sepdk/sep/core2.c | 2137 +++++ + drivers/platform/x86/sepdk/sep/cpumon.c | 357 + + drivers/platform/x86/sepdk/sep/eventmux.c | 446 + + drivers/platform/x86/sepdk/sep/gfx.c | 261 + + drivers/platform/x86/sepdk/sep/gmch.c | 505 ++ + drivers/platform/x86/sepdk/sep/linuxos.c | 1477 ++++ + drivers/platform/x86/sepdk/sep/lwpmudrv.c | 7537 +++++++++++++++++ + drivers/platform/x86/sepdk/sep/output.c | 1177 +++ + drivers/platform/x86/sepdk/sep/pci.c | 661 ++ + drivers/platform/x86/sepdk/sep/pebs.c | 1957 +++++ + drivers/platform/x86/sepdk/sep/perfver4.c | 1972 +++++ + drivers/platform/x86/sepdk/sep/pmi.c | 640 ++ + .../platform/x86/sepdk/sep/sepdrv_p_state.c | 88 + + drivers/platform/x86/sepdk/sep/silvermont.c | 1113 +++ + drivers/platform/x86/sepdk/sep/sys32.S | 200 + + drivers/platform/x86/sepdk/sep/sys64.S | 140 + + drivers/platform/x86/sepdk/sep/sys_info.c | 1111 +++ + drivers/platform/x86/sepdk/sep/unc_common.c | 388 + + drivers/platform/x86/sepdk/sep/unc_gt.c | 470 + + drivers/platform/x86/sepdk/sep/unc_mmio.c | 1083 +++ + drivers/platform/x86/sepdk/sep/unc_msr.c | 347 + + drivers/platform/x86/sepdk/sep/unc_pci.c | 491 ++ + drivers/platform/x86/sepdk/sep/unc_power.c | 444 + + drivers/platform/x86/sepdk/sep/unc_sa.c | 173 + + drivers/platform/x86/sepdk/sep/utility.c | 1157 +++ + .../x86/sepdk/sep/valleyview_sochap.c | 301 + + drivers/platform/x86/socwatch/Kconfig | 6 + + drivers/platform/x86/socwatch/Makefile | 22 + + .../platform/x86/socwatch/inc/sw_collector.h | 136 + + .../platform/x86/socwatch/inc/sw_defines.h | 156 + + .../platform/x86/socwatch/inc/sw_file_ops.h | 70 + + .../x86/socwatch/inc/sw_hardware_io.h | 118 + + .../platform/x86/socwatch/inc/sw_internal.h | 138 + + drivers/platform/x86/socwatch/inc/sw_ioctl.h | 303 + + .../x86/socwatch/inc/sw_kernel_defines.h | 164 + + drivers/platform/x86/socwatch/inc/sw_list.h | 76 + + .../platform/x86/socwatch/inc/sw_lock_defs.h | 98 + + drivers/platform/x86/socwatch/inc/sw_mem.h | 82 + + .../x86/socwatch/inc/sw_ops_provider.h | 62 + + .../x86/socwatch/inc/sw_output_buffer.h | 136 + + .../socwatch/inc/sw_overhead_measurements.h | 189 + + .../platform/x86/socwatch/inc/sw_structs.h | 500 ++ + drivers/platform/x86/socwatch/inc/sw_telem.h | 74 + + .../socwatch/inc/sw_trace_notifier_provider.h | 82 + + .../x86/socwatch/inc/sw_tracepoint_handlers.h | 142 + + drivers/platform/x86/socwatch/inc/sw_types.h | 152 + + .../platform/x86/socwatch/inc/sw_version.h | 74 + + drivers/platform/x86/socwatch/sw_collector.c | 706 ++ + drivers/platform/x86/socwatch/sw_driver.c | 1472 ++++ + drivers/platform/x86/socwatch/sw_file_ops.c | 364 + + .../platform/x86/socwatch/sw_hardware_io.c | 188 + + drivers/platform/x86/socwatch/sw_internal.c | 238 + + drivers/platform/x86/socwatch/sw_mem.c | 331 + + .../platform/x86/socwatch/sw_ops_provider.c | 1225 +++ + .../platform/x86/socwatch/sw_output_buffer.c | 598 ++ + drivers/platform/x86/socwatch/sw_reader.c | 163 + + drivers/platform/x86/socwatch/sw_telem.c | 493 ++ + .../x86/socwatch/sw_trace_notifier_provider.c | 2233 +++++ + .../x86/socwatch/sw_tracepoint_handlers.c | 399 + + drivers/platform/x86/socwatchhv/Kconfig | 6 + + drivers/platform/x86/socwatchhv/Makefile | 20 + + drivers/platform/x86/socwatchhv/control.c | 141 + + .../platform/x86/socwatchhv/inc/asm_helper.h | 158 + + drivers/platform/x86/socwatchhv/inc/control.h | 194 + + .../platform/x86/socwatchhv/inc/pw_types.h | 132 + + .../platform/x86/socwatchhv/inc/pw_version.h | 67 + + .../platform/x86/socwatchhv/inc/sw_defines.h | 156 + + .../platform/x86/socwatchhv/inc/sw_ioctl.h | 303 + + .../x86/socwatchhv/inc/sw_kernel_defines.h | 164 + + .../platform/x86/socwatchhv/inc/sw_structs.h | 501 ++ + .../platform/x86/socwatchhv/inc/sw_types.h | 152 + + .../platform/x86/socwatchhv/inc/sw_version.h | 74 + + .../platform/x86/socwatchhv/inc/swhv_acrn.h | 117 + + .../x86/socwatchhv/inc/swhv_acrn_sbuf.h | 186 + + .../x86/socwatchhv/inc/swhv_defines.h | 111 + + .../platform/x86/socwatchhv/inc/swhv_driver.h | 109 + + .../platform/x86/socwatchhv/inc/swhv_ioctl.h | 164 + + .../x86/socwatchhv/inc/swhv_structs.h | 234 + + drivers/platform/x86/socwatchhv/swhv_acrn.c | 747 ++ + drivers/platform/x86/socwatchhv/swhv_driver.c | 375 + + 131 files changed, 54452 insertions(+) + create mode 100755 drivers/platform/x86/sepdk/Kconfig + create mode 100755 drivers/platform/x86/sepdk/Makefile + create mode 100644 drivers/platform/x86/sepdk/inc/apic.h + create mode 100644 drivers/platform/x86/sepdk/inc/asm_helper.h + create mode 100644 drivers/platform/x86/sepdk/inc/chap.h + create mode 100644 drivers/platform/x86/sepdk/inc/control.h + create mode 100644 drivers/platform/x86/sepdk/inc/core2.h + create mode 100644 drivers/platform/x86/sepdk/inc/cpumon.h + create mode 100644 drivers/platform/x86/sepdk/inc/ecb_iterators.h + create mode 100644 drivers/platform/x86/sepdk/inc/eventmux.h + create mode 100644 drivers/platform/x86/sepdk/inc/gfx.h + create mode 100644 drivers/platform/x86/sepdk/inc/gmch.h + create mode 100644 drivers/platform/x86/sepdk/inc/haswellunc_sa.h + create mode 100644 drivers/platform/x86/sepdk/inc/jkt_unc_ha.h + create mode 100644 drivers/platform/x86/sepdk/inc/jkt_unc_qpill.h + create mode 100644 drivers/platform/x86/sepdk/inc/linuxos.h + create mode 100644 drivers/platform/x86/sepdk/inc/lwpmudrv.h + create mode 100644 drivers/platform/x86/sepdk/inc/msrdefs.h + create mode 100644 drivers/platform/x86/sepdk/inc/output.h + create mode 100644 drivers/platform/x86/sepdk/inc/pci.h + create mode 100644 drivers/platform/x86/sepdk/inc/pebs.h + create mode 100644 drivers/platform/x86/sepdk/inc/perfver4.h + create mode 100644 drivers/platform/x86/sepdk/inc/pmi.h + create mode 100644 drivers/platform/x86/sepdk/inc/sepdrv_p_state.h + create mode 100644 drivers/platform/x86/sepdk/inc/silvermont.h + create mode 100644 drivers/platform/x86/sepdk/inc/sys_info.h + create mode 100644 drivers/platform/x86/sepdk/inc/unc_common.h + create mode 100644 drivers/platform/x86/sepdk/inc/unc_gt.h + create mode 100644 drivers/platform/x86/sepdk/inc/utility.h + create mode 100644 drivers/platform/x86/sepdk/inc/valleyview_sochap.h + create mode 100644 drivers/platform/x86/sepdk/include/error_reporting_utils.h + create mode 100644 drivers/platform/x86/sepdk/include/lwpmudrv_chipset.h + create mode 100644 drivers/platform/x86/sepdk/include/lwpmudrv_defines.h + create mode 100644 drivers/platform/x86/sepdk/include/lwpmudrv_ecb.h + create mode 100644 drivers/platform/x86/sepdk/include/lwpmudrv_gfx.h + create mode 100644 drivers/platform/x86/sepdk/include/lwpmudrv_ioctl.h + create mode 100644 drivers/platform/x86/sepdk/include/lwpmudrv_pwr.h + create mode 100644 drivers/platform/x86/sepdk/include/lwpmudrv_struct.h + create mode 100644 drivers/platform/x86/sepdk/include/lwpmudrv_types.h + create mode 100644 drivers/platform/x86/sepdk/include/lwpmudrv_version.h + create mode 100644 drivers/platform/x86/sepdk/include/pax_shared.h + create mode 100644 drivers/platform/x86/sepdk/include/rise_errors.h + create mode 100755 drivers/platform/x86/sepdk/pax/Makefile + create mode 100755 drivers/platform/x86/sepdk/pax/pax.c + create mode 100755 drivers/platform/x86/sepdk/pax/pax.h + create mode 100755 drivers/platform/x86/sepdk/sep/Makefile + create mode 100755 drivers/platform/x86/sepdk/sep/apic.c + create mode 100755 drivers/platform/x86/sepdk/sep/chap.c + create mode 100755 drivers/platform/x86/sepdk/sep/control.c + create mode 100755 drivers/platform/x86/sepdk/sep/core2.c + create mode 100755 drivers/platform/x86/sepdk/sep/cpumon.c + create mode 100755 drivers/platform/x86/sepdk/sep/eventmux.c + create mode 100755 drivers/platform/x86/sepdk/sep/gfx.c + create mode 100755 drivers/platform/x86/sepdk/sep/gmch.c + create mode 100755 drivers/platform/x86/sepdk/sep/linuxos.c + create mode 100755 drivers/platform/x86/sepdk/sep/lwpmudrv.c + create mode 100755 drivers/platform/x86/sepdk/sep/output.c + create mode 100755 drivers/platform/x86/sepdk/sep/pci.c + create mode 100755 drivers/platform/x86/sepdk/sep/pebs.c + create mode 100755 drivers/platform/x86/sepdk/sep/perfver4.c + create mode 100755 drivers/platform/x86/sepdk/sep/pmi.c + create mode 100755 drivers/platform/x86/sepdk/sep/sepdrv_p_state.c + create mode 100755 drivers/platform/x86/sepdk/sep/silvermont.c + create mode 100755 drivers/platform/x86/sepdk/sep/sys32.S + create mode 100755 drivers/platform/x86/sepdk/sep/sys64.S + create mode 100755 drivers/platform/x86/sepdk/sep/sys_info.c + create mode 100755 drivers/platform/x86/sepdk/sep/unc_common.c + create mode 100755 drivers/platform/x86/sepdk/sep/unc_gt.c + create mode 100755 drivers/platform/x86/sepdk/sep/unc_mmio.c + create mode 100755 drivers/platform/x86/sepdk/sep/unc_msr.c + create mode 100755 drivers/platform/x86/sepdk/sep/unc_pci.c + create mode 100755 drivers/platform/x86/sepdk/sep/unc_power.c + create mode 100755 drivers/platform/x86/sepdk/sep/unc_sa.c + create mode 100755 drivers/platform/x86/sepdk/sep/utility.c + create mode 100755 drivers/platform/x86/sepdk/sep/valleyview_sochap.c + create mode 100644 drivers/platform/x86/socwatch/Kconfig + create mode 100644 drivers/platform/x86/socwatch/Makefile + create mode 100644 drivers/platform/x86/socwatch/inc/sw_collector.h + create mode 100644 drivers/platform/x86/socwatch/inc/sw_defines.h + create mode 100644 drivers/platform/x86/socwatch/inc/sw_file_ops.h + create mode 100644 drivers/platform/x86/socwatch/inc/sw_hardware_io.h + create mode 100644 drivers/platform/x86/socwatch/inc/sw_internal.h + create mode 100644 drivers/platform/x86/socwatch/inc/sw_ioctl.h + create mode 100644 drivers/platform/x86/socwatch/inc/sw_kernel_defines.h + create mode 100644 drivers/platform/x86/socwatch/inc/sw_list.h + create mode 100644 drivers/platform/x86/socwatch/inc/sw_lock_defs.h + create mode 100644 drivers/platform/x86/socwatch/inc/sw_mem.h + create mode 100644 drivers/platform/x86/socwatch/inc/sw_ops_provider.h + create mode 100644 drivers/platform/x86/socwatch/inc/sw_output_buffer.h + create mode 100644 drivers/platform/x86/socwatch/inc/sw_overhead_measurements.h + create mode 100644 drivers/platform/x86/socwatch/inc/sw_structs.h + create mode 100644 drivers/platform/x86/socwatch/inc/sw_telem.h + create mode 100644 drivers/platform/x86/socwatch/inc/sw_trace_notifier_provider.h + create mode 100644 drivers/platform/x86/socwatch/inc/sw_tracepoint_handlers.h + create mode 100644 drivers/platform/x86/socwatch/inc/sw_types.h + create mode 100644 drivers/platform/x86/socwatch/inc/sw_version.h + create mode 100644 drivers/platform/x86/socwatch/sw_collector.c + create mode 100644 drivers/platform/x86/socwatch/sw_driver.c + create mode 100644 drivers/platform/x86/socwatch/sw_file_ops.c + create mode 100644 drivers/platform/x86/socwatch/sw_hardware_io.c + create mode 100644 drivers/platform/x86/socwatch/sw_internal.c + create mode 100644 drivers/platform/x86/socwatch/sw_mem.c + create mode 100644 drivers/platform/x86/socwatch/sw_ops_provider.c + create mode 100644 drivers/platform/x86/socwatch/sw_output_buffer.c + create mode 100644 drivers/platform/x86/socwatch/sw_reader.c + create mode 100644 drivers/platform/x86/socwatch/sw_telem.c + create mode 100644 drivers/platform/x86/socwatch/sw_trace_notifier_provider.c + create mode 100644 drivers/platform/x86/socwatch/sw_tracepoint_handlers.c + create mode 100644 drivers/platform/x86/socwatchhv/Kconfig + create mode 100644 drivers/platform/x86/socwatchhv/Makefile + create mode 100644 drivers/platform/x86/socwatchhv/control.c + create mode 100644 drivers/platform/x86/socwatchhv/inc/asm_helper.h + create mode 100644 drivers/platform/x86/socwatchhv/inc/control.h + create mode 100644 drivers/platform/x86/socwatchhv/inc/pw_types.h + create mode 100644 drivers/platform/x86/socwatchhv/inc/pw_version.h + create mode 100644 drivers/platform/x86/socwatchhv/inc/sw_defines.h + create mode 100644 drivers/platform/x86/socwatchhv/inc/sw_ioctl.h + create mode 100644 drivers/platform/x86/socwatchhv/inc/sw_kernel_defines.h + create mode 100644 drivers/platform/x86/socwatchhv/inc/sw_structs.h + create mode 100644 drivers/platform/x86/socwatchhv/inc/sw_types.h + create mode 100644 drivers/platform/x86/socwatchhv/inc/sw_version.h + create mode 100644 drivers/platform/x86/socwatchhv/inc/swhv_acrn.h + create mode 100644 drivers/platform/x86/socwatchhv/inc/swhv_acrn_sbuf.h + create mode 100644 drivers/platform/x86/socwatchhv/inc/swhv_defines.h + create mode 100644 drivers/platform/x86/socwatchhv/inc/swhv_driver.h + create mode 100644 drivers/platform/x86/socwatchhv/inc/swhv_ioctl.h + create mode 100644 drivers/platform/x86/socwatchhv/inc/swhv_structs.h + create mode 100644 drivers/platform/x86/socwatchhv/swhv_acrn.c + create mode 100644 drivers/platform/x86/socwatchhv/swhv_driver.c + +diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig +index ae21d08c65e8..52b941c5c2c8 100644 +--- a/drivers/platform/x86/Kconfig ++++ b/drivers/platform/x86/Kconfig +@@ -1343,3 +1343,6 @@ config PMC_ATOM + def_bool y + depends on PCI + select COMMON_CLK ++source "drivers/platform/x86/socwatch/Kconfig" ++source "drivers/platform/x86/socwatchhv/Kconfig" ++source "drivers/platform/x86/sepdk/Kconfig" +diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile +index 415104033060..f9fac98188c8 100644 +--- a/drivers/platform/x86/Makefile ++++ b/drivers/platform/x86/Makefile +@@ -100,3 +100,7 @@ obj-$(CONFIG_I2C_MULTI_INSTANTIATE) += i2c-multi-instantiate.o + obj-$(CONFIG_INTEL_ATOMISP2_PM) += intel_atomisp2_pm.o + obj-$(CONFIG_PCENGINES_APU2) += pcengines-apuv2.o + obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += intel_speed_select_if/ ++obj-$(CONFIG_INTEL_SOCWATCH) += socwatch/ ++obj-$(CONFIG_INTEL_SOCWATCH_HV) += socwatchhv/ ++obj-$(CONFIG_INTEL_SEP) += sepdk/ ++ +diff --git a/drivers/platform/x86/sepdk/Kconfig b/drivers/platform/x86/sepdk/Kconfig +new file mode 100755 +index 000000000000..ed9c94ec3d07 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/Kconfig +@@ -0,0 +1,63 @@ ++# ++# THE SEP KERNEL DRIVER UNDER LINUX* ++# ++config INTEL_SEP ++ bool "Sampling Enabling Product (SEP)" ++ help ++ SEP is a command line tool for doing hardware-based sampling using ++ event-based sampling (EBS). ++ depends on X86 || X86_64 ++ ++config SEP ++ tristate "SEP kernel driver" ++ depends on INTEL_SEP ++ default m ++ ++config SEP_ACRN ++ tristate "SEP kernel driver" ++ depends on INTEL_SEP && ACRN_VHM ++ default m ++ ++config SEP_PAX ++ tristate "PAX kernel driver from SEP" ++ depends on INTEL_SEP ++ depends on SEP ++ default m ++ ++config SEP_PER_USER_MODE ++ bool "Use Per User Mode on SEP" ++ depends on INTEL_SEP ++ default n ++ ++choice ++ prompt "Choose log mode" ++ default SEP_STANDARD_MODE ++ depends on INTEL_SEP ++ help ++ This option allows to select logging mode. ++ ++config SEP_STANDARD_MODE ++ bool "Use standard logging mode" ++ ++config SEP_MINLOG_MODE ++ bool "Use min logging mode" ++ help ++ WARNING: Using minimal logging mode. ++ This may make troubleshooting more difficult. ++ ++config SEP_MAXLOG_MODE ++ bool "Use max logging mode" ++ help ++ WARNING: Using maximal logging mode. ++ This may increase overhead ++ ++endchoice ++ ++config SEP_PRIVATE_BUILD ++ bool "Is this build an Internal and Private build" ++ depends on INTEL_SEP ++ default y ++ help ++ Select Yes if this is an Intel Internal Build ++ ++ +diff --git a/drivers/platform/x86/sepdk/Makefile b/drivers/platform/x86/sepdk/Makefile +new file mode 100755 +index 000000000000..c8992312a9bb +--- /dev/null ++++ b/drivers/platform/x86/sepdk/Makefile +@@ -0,0 +1,5 @@ ++ ++obj-$(CONFIG_SEP) += sep/ ++obj-$(CONFIG_SEP_PAX) += pax/ ++ ++ +diff --git a/drivers/platform/x86/sepdk/inc/apic.h b/drivers/platform/x86/sepdk/inc/apic.h +new file mode 100644 +index 000000000000..2b7f1c70dab5 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/inc/apic.h +@@ -0,0 +1,114 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#ifndef _APIC_H_ ++#define _APIC_H_ ++ ++#include ++#include ++ ++typedef U64 * PHYSICAL_ADDRESS; ++ ++/** ++ * Data Types and Macros ++ */ ++ ++/* ++ * APIC registers and constants ++ */ ++ ++// APIC base MSR ++#define DRV_APIC_BASE_MSR 0x001b ++ ++// APIC registers ++#define DRV_APIC_LCL_ID 0x0020 ++#define DRV_APIC_LCL_TSKPRI 0x0080 ++#define DRV_APIC_LCL_PPR 0x00a0 ++#define DRV_APIC_LCL_EOI 0x00b0 ++#define DRV_APIC_LCL_LDEST 0x00d0 ++#define DRV_APIC_LCL_DSTFMT 0x00e0 ++#define DRV_APIC_LCL_SVR 0x00f0 ++#define DRV_APIC_LCL_ICR 0x0300 ++#define DRV_APIC_LVT_TIMER 0x0320 ++#define DRV_APIC_LVT_PMI 0x0340 ++#define DRV_APIC_LVT_LINT0 0x0350 ++#define DRV_APIC_LVT_LINT1 0x0360 ++#define DRV_APIC_LVT_ERROR 0x0370 ++ ++#define DRV_APIC_LCL_ID_MSR 0x802 ++#define DRV_APIC_LCL_TSKPRI_MSR 0x808 ++#define DRV_APIC_LCL_PPR_MSR 0x80a ++#define DRV_APIC_LCL_EOI_MSR 0x80b ++#define DRV_APIC_LCL_LDEST_MSR 0x80d ++#define DRV_APIC_LCL_DSTFMT_MSR 0x80e ++#define DRV_APIC_LCL_SVR_MSR 0x80f ++#define DRV_APIC_LCL_ICR_MSR 0x830 ++#define DRV_APIC_LVT_TIMER_MSR 0x832 ++#define DRV_APIC_LVT_PMI_MSR 0x834 ++#define DRV_APIC_LVT_LINT0_MSR 0x835 ++#define DRV_APIC_LVT_LINT1_MSR 0x836 ++#define DRV_APIC_LVT_ERROR_MSR 0x837 ++ ++// masks for LVT ++#define DRV_LVT_MASK 0x10000 ++#define DRV_LVT_EDGE 0x00000 ++#define DRV_LVT_LEVEL 0x08000 ++#define DRV_LVT_EXTINT 0x00700 ++#define DRV_LVT_NMI 0x00400 ++ ++// task priorities ++#define DRV_APIC_TSKPRI_LO 0x0000 ++#define DRV_APIC_TSKPRI_HI 0x00f0 ++ ++#define DRV_X2APIC_ENABLED 0xc00LL ++ ++//// Interrupt vector for PMU overflow event ++// ++// Choose the highest unused IDT vector possible so that our ++// callback routine runs at the highest priority allowed; ++// must avoid using pre-defined vectors in, ++// include/asm/irq.h ++// include/asm/hw_irq.h ++// include/asm/irq_vectors.h ++// ++// FIRST_DEVICE_VECTOR should be valid for kernels 2.6.33 and earlier ++#define CPU_PERF_VECTOR DRV_LVT_NMI ++// Has the APIC Been enabled ++#define DRV_APIC_BASE_GLOBAL_ENABLED(a) ((a)&1 << 11) ++#define DRV_APIC_VIRTUAL_WIRE_ENABLED(a) ((a)&0x100) ++ ++/** ++ * Function Declarations ++ */ ++ ++/* ++ * APIC control functions ++ */ ++extern VOID APIC_Enable_Pmi(void); ++extern VOID APIC_Init(PVOID param); ++extern VOID APIC_Install_Interrupt_Handler(PVOID param); ++extern VOID APIC_Restore_LVTPC(PVOID param); ++ ++#endif +diff --git a/drivers/platform/x86/sepdk/inc/asm_helper.h b/drivers/platform/x86/sepdk/inc/asm_helper.h +new file mode 100644 +index 000000000000..fd4eabf95dd9 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/inc/asm_helper.h +@@ -0,0 +1,186 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#ifndef _ASM_HELPER_H_ ++#define _ASM_HELPER_H_ ++ ++#include ++ ++#if KERNEL_VERSION(4, 1, 0) > LINUX_VERSION_CODE ++ ++#include ++#include ++ ++#else ++ ++#ifdef CONFIG_AS_CFI ++ ++#define CFI_STARTPROC .cfi_startproc ++#define CFI_ENDPROC .cfi_endproc ++#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset ++#define CFI_REL_OFFSET .cfi_rel_offset ++#define CFI_RESTORE .cfi_restore ++ ++#else ++ ++.macro cfi_ignore a = 0, b = 0, c = 0, d = 0 ++.endm ++ ++#define CFI_STARTPROC cfi_ignore ++#define CFI_ENDPROC cfi_ignore ++#define CFI_ADJUST_CFA_OFFSET cfi_ignore ++#define CFI_REL_OFFSET cfi_ignore ++#define CFI_RESTORE cfi_ignore ++#endif ++ ++#ifdef CONFIG_X86_64 ++ .macro SAVE_C_REGS_HELPER offset = 0 rax = 1 rcx = 1 r8910 = 1 r11 = 1 ++ .if \r11 ++ movq %r11, 6*8+\offset(%rsp) ++ CFI_REL_OFFSET r11, \offset ++ .endif ++ .if \r8910 ++ movq %r10, 7*8+\offset(%rsp) ++ CFI_REL_OFFSET r10, \offset ++ ++ movq %r9, 8*8+\offset(%rsp) ++ CFI_REL_OFFSET r9, \offset ++ ++ movq %r8, 9*8+\offset(%rsp) ++ CFI_REL_OFFSET r8, \offset ++ .endif ++ .if \rax ++ movq %rax, 10*8+\offset(%rsp) ++ CFI_REL_OFFSET rax, \offset ++ .endif ++ .if \rcx ++ movq %rcx, 11*8+\offset(%rsp) ++ CFI_REL_OFFSET rcx, \offset ++ .endif ++ movq %rdx, 12*8+\offset(%rsp) ++ CFI_REL_OFFSET rdx, \offset ++ ++ movq %rsi, 13*8+\offset(%rsp) ++ CFI_REL_OFFSET rsi, \offset ++ ++ movq %rdi, 14*8+\offset(%rsp) ++ CFI_REL_OFFSET rdi, \offset ++ .endm ++ .macro SAVE_C_REGS offset = 0 ++ SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1 ++ .endm ++ .macro SAVE_EXTRA_REGS offset = 0 ++ movq %r15, 0*8+\offset(%rsp) ++ CFI_REL_OFFSET r15, \offset ++ ++ movq %r14, 1*8+\offset(%rsp) ++ CFI_REL_OFFSET r14, \offset ++ ++ movq %r13, 2*8+\offset(%rsp) ++ CFI_REL_OFFSET r13, \offset ++ ++ movq %r12, 3*8+\offset(%rsp) ++ CFI_REL_OFFSET r12, \offset ++ ++ movq %rbp, 4*8+\offset(%rsp) ++ CFI_REL_OFFSET rbp, \offset ++ ++ movq %rbx, 5*8+\offset(%rsp) ++ CFI_REL_OFFSET rbx, \offset ++ .endm ++ ++ .macro RESTORE_EXTRA_REGS offset = 0 ++ movq 0*8+\offset(%rsp), %r15 ++ CFI_RESTORE r15 ++ movq 1*8+\offset(%rsp), %r14 ++ CFI_RESTORE r14 ++ movq 2*8+\offset(%rsp), %r13 ++ CFI_RESTORE r13 ++ movq 3*8+\offset(%rsp), %r12 ++ CFI_RESTORE r12 ++ movq 4*8+\offset(%rsp), %rbp ++ CFI_RESTORE rbp ++ movq 5*8+\offset(%rsp), %rbx ++ CFI_RESTORE rbx ++ .endm ++ .macro RESTORE_C_REGS_HELPER rstor_rax = 1, rstor_rcx = 1, rstor_r11 = 1, rstor_r8910 = 1, rstor_rdx = 1 ++ .if \rstor_r11 ++ movq 6*8(%rsp), %r11 ++ CFI_RESTORE r11 ++ .endif ++ .if \rstor_r8910 ++ movq 7*8(%rsp), %r10 ++ CFI_RESTORE r10 ++ movq 8*8(%rsp), %r9 ++ CFI_RESTORE r9 ++ movq 9*8(%rsp), %r8 ++ CFI_RESTORE r8 ++ .endif ++ .if \rstor_rax ++ movq 10*8(%rsp), %rax ++ CFI_RESTORE rax ++ .endif ++ .if \rstor_rcx ++ movq 11*8(%rsp), %rcx ++ CFI_RESTORE rcx ++ .endif ++ .if \rstor_rdx ++ movq 12*8(%rsp), %rdx ++ CFI_RESTORE rdx ++ .endif ++ movq 13*8(%rsp), %rsi ++ CFI_RESTORE rsi ++ movq 14*8(%rsp), %rdi ++ CFI_RESTORE rdi ++ .endm ++ .macro RESTORE_C_REGS ++ RESTORE_C_REGS_HELPER 1, 1, 1, 1, 1 ++ .endm ++ ++ .macro ALLOC_PT_GPREGS_ON_STACK addskip = 0 ++ subq $15*8+\addskip, %rsp ++ CFI_ADJUST_CFA_OFFSET 15*8+\addskip ++ .endm ++ ++ .macro REMOVE_PT_GPREGS_FROM_STACK addskip = 0 ++ addq $15*8+\addskip, %rsp ++ CFI_ADJUST_CFA_OFFSET - (15*8+\addskip) ++ .endm ++ ++ .macro SAVE_ALL ++ ALLOC_PT_GPREGS_ON_STACK ++ SAVE_C_REGS ++ SAVE_EXTRA_REGS ++ .endm ++ ++ .macro RESTORE_ALL ++ RESTORE_EXTRA_REGS ++ RESTORE_C_REGS ++ REMOVE_PT_GPREGS_FROM_STACK ++ .endm ++#endif //CONFIG_X86_64 ++#endif ++ ++#endif +diff --git a/drivers/platform/x86/sepdk/inc/chap.h b/drivers/platform/x86/sepdk/inc/chap.h +new file mode 100644 +index 000000000000..823aa9058cd5 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/inc/chap.h +@@ -0,0 +1,31 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#ifndef _CHAP_H_ ++#define _CHAP_H_ ++ ++extern CS_DISPATCH_NODE chap_dispatch; ++ ++#endif +diff --git a/drivers/platform/x86/sepdk/inc/control.h b/drivers/platform/x86/sepdk/inc/control.h +new file mode 100644 +index 000000000000..ecc93ceab221 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/inc/control.h +@@ -0,0 +1,510 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++ ++#ifndef _CONTROL_H_ ++#define _CONTROL_H_ ++ ++#include ++#include ++#if defined(DRV_IA32) ++#include ++#endif ++#include ++#if defined(DRV_IA32) ++#include ++#endif ++#include ++#include ++ ++#include "lwpmudrv_defines.h" ++#include "lwpmudrv.h" ++#include "lwpmudrv_types.h" ++#if defined(BUILD_CHIPSET) ++#include "lwpmudrv_chipset.h" ++#endif ++ ++// large memory allocation will be used if the requested size (in bytes) is ++// above this threshold ++#define MAX_KMALLOC_SIZE ((1 << 17) - 1) ++ ++// check whether Linux driver should use unlocked ioctls (not protected by BKL) ++#if defined(HAVE_UNLOCKED_IOCTL) ++#define DRV_USE_UNLOCKED_IOCTL ++#endif ++#if defined(DRV_USE_UNLOCKED_IOCTL) ++#define IOCTL_OP .unlocked_ioctl ++#define IOCTL_OP_TYPE long ++#define IOCTL_USE_INODE ++#else ++#define IOCTL_OP .ioctl ++#define IOCTL_OP_TYPE S32 ++#define IOCTL_USE_INODE struct inode *inode, ++#endif ++ ++// Information about the state of the driver ++typedef struct GLOBAL_STATE_NODE_S GLOBAL_STATE_NODE; ++typedef GLOBAL_STATE_NODE * GLOBAL_STATE; ++struct GLOBAL_STATE_NODE_S { ++ volatile S32 cpu_count; ++ volatile S32 dpc_count; ++ ++ S32 num_cpus; // Number of CPUs in the system ++ S32 active_cpus; // Number of active CPUs - some cores can be ++ // deactivated by the user / admin ++ S32 num_em_groups; ++ S32 num_descriptors; ++ ++ volatile S32 current_phase; ++ ++ U32 num_modules; ++}; ++ ++// Access Macros ++#define GLOBAL_STATE_num_cpus(x) ((x).num_cpus) ++#define GLOBAL_STATE_active_cpus(x) ((x).active_cpus) ++#define GLOBAL_STATE_cpu_count(x) ((x).cpu_count) ++#define GLOBAL_STATE_dpc_count(x) ((x).dpc_count) ++#define GLOBAL_STATE_num_em_groups(x) ((x).num_em_groups) ++#define GLOBAL_STATE_num_descriptors(x) ((x).num_descriptors) ++#define GLOBAL_STATE_current_phase(x) ((x).current_phase) ++#define GLOBAL_STATE_sampler_id(x) ((x).sampler_id) ++#define GLOBAL_STATE_num_modules(x) ((x).num_modules) ++ ++/* ++ * ++ * ++ * CPU State data structure and access macros ++ * ++ */ ++typedef struct CPU_STATE_NODE_S CPU_STATE_NODE; ++typedef CPU_STATE_NODE * CPU_STATE; ++struct CPU_STATE_NODE_S { ++ S32 apic_id; // Processor ID on the system bus ++ PVOID apic_linear_addr; // linear address of local apic ++ PVOID apic_physical_addr; // physical address of local apic ++ ++ PVOID idt_base; // local IDT base address ++ atomic_t in_interrupt; ++ ++#if defined(DRV_IA32) ++ U64 saved_ih; // saved perfvector to restore ++#endif ++#if defined(DRV_EM64T) ++ PVOID saved_ih; // saved perfvector to restore ++#endif ++ ++ U64 last_mperf; // previous value of MPERF, for calculating delta MPERF ++ U64 last_aperf; // previous value of APERF, for calculating delta MPERF ++ DRV_BOOL last_p_state_valid; // are the previous values valid? ++ //(e.g., the first measurement does not have ++ // a previous value for calculating the delta ++ DRV_BOOL p_state_counting; //Flag to mark PMI interrupt from fixed event ++ ++ S64 *em_tables; // holds the data that is saved/restored ++ // during event multiplexing ++ U32 em_table_offset; ++ ++ struct timer_list *em_timer; ++ U32 current_group; ++ S32 trigger_count; ++ S32 trigger_event_num; ++ ++ DISPATCH dispatch; ++ PVOID lbr_area; ++ PVOID old_dts_buffer; ++ PVOID dts_buffer; ++ U32 dts_buffer_size; ++ U32 dts_buffer_offset; ++ U32 initial_mask; ++ U32 accept_interrupt; ++ ++#if defined(BUILD_CHIPSET) ++ // Chipset counter stuff ++ U32 chipset_count_init; //flag to initialize the last MCH and ICH array ++ U64 last_mch_count[8]; ++ U64 last_ich_count[8]; ++ U64 last_gmch_count[MAX_CHIPSET_COUNTERS]; ++ U64 last_mmio_count[32]; // it's 9 now but next generation may have 29 ++ ++#endif ++ ++ U64 *pmu_state; // holds PMU state (e.g., MSRs) that will be ++ // saved before and restored after collection ++ S32 socket_master; ++ S32 core_master; ++ S32 thr_master; ++ U64 num_samples; ++ U64 reset_mask; ++ U64 group_swap; ++ U64 last_visa_count[16]; ++ U16 cpu_module_num; ++ U16 cpu_module_master; ++ S32 system_master; ++ DRV_BOOL offlined; ++ U32 nmi_handled; ++ struct tasklet_struct nmi_tasklet; ++ U32 em_timer_delay; ++ U32 core_type; ++}; ++ ++#define CPU_STATE_apic_id(cpu) ((cpu)->apic_id) ++#define CPU_STATE_apic_linear_addr(cpu) ((cpu)->apic_linear_addr) ++#define CPU_STATE_apic_physical_addr(cpu) ((cpu)->apic_physical_addr) ++#define CPU_STATE_idt_base(cpu) ((cpu)->idt_base) ++#define CPU_STATE_in_interrupt(cpu) ((cpu)->in_interrupt) ++#define CPU_STATE_saved_ih(cpu) ((cpu)->saved_ih) ++#define CPU_STATE_saved_ih_hi(cpu) ((cpu)->saved_ih_hi) ++#define CPU_STATE_dpc(cpu) ((cpu)->dpc) ++#define CPU_STATE_em_tables(cpu) ((cpu)->em_tables) ++#define CPU_STATE_em_table_offset(cpu) ((cpu)->em_table_offset) ++#define CPU_STATE_pmu_state(cpu) ((cpu)->pmu_state) ++#define CPU_STATE_em_dpc(cpu) ((cpu)->em_dpc) ++#define CPU_STATE_em_timer(cpu) ((cpu)->em_timer) ++#define CPU_STATE_current_group(cpu) ((cpu)->current_group) ++#define CPU_STATE_trigger_count(cpu) ((cpu)->trigger_count) ++#define CPU_STATE_trigger_event_num(cpu) ((cpu)->trigger_event_num) ++#define CPU_STATE_dispatch(cpu) ((cpu)->dispatch) ++#define CPU_STATE_lbr(cpu) ((cpu)->lbr) ++#define CPU_STATE_old_dts_buffer(cpu) ((cpu)->old_dts_buffer) ++#define CPU_STATE_dts_buffer(cpu) ((cpu)->dts_buffer) ++#define CPU_STATE_dts_buffer_size(cpu) ((cpu)->dts_buffer_size) ++#define CPU_STATE_dts_buffer_offset(cpu) ((cpu)->dts_buffer_offset) ++#define CPU_STATE_initial_mask(cpu) ((cpu)->initial_mask) ++#define CPU_STATE_accept_interrupt(cpu) ((cpu)->accept_interrupt) ++#define CPU_STATE_msr_value(cpu) ((cpu)->msr_value) ++#define CPU_STATE_msr_addr(cpu) ((cpu)->msr_addr) ++#define CPU_STATE_socket_master(cpu) ((cpu)->socket_master) ++#define CPU_STATE_core_master(cpu) ((cpu)->core_master) ++#define CPU_STATE_thr_master(cpu) ((cpu)->thr_master) ++#define CPU_STATE_num_samples(cpu) ((cpu)->num_samples) ++#define CPU_STATE_reset_mask(cpu) ((cpu)->reset_mask) ++#define CPU_STATE_group_swap(cpu) ((cpu)->group_swap) ++#define CPU_STATE_last_mperf(cpu) ((cpu)->last_mperf) ++#define CPU_STATE_last_aperf(cpu) ((cpu)->last_aperf) ++#define CPU_STATE_last_p_state_valid(cpu) ((cpu)->last_p_state_valid) ++#define CPU_STATE_cpu_module_num(cpu) ((cpu)->cpu_module_num) ++#define CPU_STATE_cpu_module_master(cpu) ((cpu)->cpu_module_master) ++#define CPU_STATE_p_state_counting(cpu) ((cpu)->p_state_counting) ++#define CPU_STATE_system_master(cpu) ((cpu)->system_master) ++#define CPU_STATE_offlined(cpu) ((cpu)->offlined) ++#define CPU_STATE_nmi_handled(cpu) ((cpu)->nmi_handled) ++#define CPU_STATE_nmi_tasklet(cpu) ((cpu)->nmi_tasklet) ++#define CPU_STATE_em_timer_delay(cpu) ((cpu)->em_timer_delay) ++#define CPU_STATE_core_type(cpu) ((cpu)->core_type) ++ ++/* ++ * For storing data for --read/--write-msr command line options ++ */ ++typedef struct MSR_DATA_NODE_S MSR_DATA_NODE; ++typedef MSR_DATA_NODE * MSR_DATA; ++struct MSR_DATA_NODE_S { ++ U64 value; // Used for emon, for read/write-msr value ++ U64 addr; ++}; ++ ++#define MSR_DATA_value(md) ((md)->value) ++#define MSR_DATA_addr(md) ((md)->addr) ++ ++/* ++ * Memory Allocation tracker ++ * ++ * Currently used to track large memory allocations ++ */ ++ ++typedef struct MEM_EL_NODE_S MEM_EL_NODE; ++typedef MEM_EL_NODE * MEM_EL; ++struct MEM_EL_NODE_S { ++ PVOID address; // pointer to piece of memory we're tracking ++ S32 size; // size (bytes) of the piece of memory ++ U32 is_addr_vmalloc; ++ // flag to check if the memory is allocated using vmalloc ++}; ++ ++// accessors for MEM_EL defined in terms of MEM_TRACKER below ++ ++#define MEM_EL_MAX_ARRAY_SIZE 32 // minimum is 1, nominal is 64 ++ ++typedef struct MEM_TRACKER_NODE_S MEM_TRACKER_NODE; ++typedef MEM_TRACKER_NODE * MEM_TRACKER; ++struct MEM_TRACKER_NODE_S { ++ U16 max_size; // MAX number of elements in the array ++ U16 elements; // number of elements available in this array ++ U16 node_vmalloc; ++ // flag to check whether the node struct is allocated using vmalloc ++ U16 array_vmalloc; ++ // flag to check whether the list of mem el is allocated using vmalloc ++ MEM_EL mem; // array of large memory items we're tracking ++ MEM_TRACKER prev, next; // enables bi-directional scanning linked list ++}; ++#define MEM_TRACKER_max_size(mt) ((mt)->max_size) ++#define MEM_TRACKER_node_vmalloc(mt) ((mt)->node_vmalloc) ++#define MEM_TRACKER_array_vmalloc(mt) ((mt)->array_vmalloc) ++#define MEM_TRACKER_elements(mt) ((mt)->elements) ++#define MEM_TRACKER_mem(mt) ((mt)->mem) ++#define MEM_TRACKER_prev(mt) ((mt)->prev) ++#define MEM_TRACKER_next(mt) ((mt)->next) ++#define MEM_TRACKER_mem_address(mt, i) ((MEM_TRACKER_mem(mt)[(i)].address)) ++#define MEM_TRACKER_mem_size(mt, i) ((MEM_TRACKER_mem(mt)[(i)].size)) ++#define MEM_TRACKER_mem_vmalloc(mt, i) \ ++ ((MEM_TRACKER_mem(mt)[(i)].is_addr_vmalloc)) ++ ++/**************************************************************************** ++ ** Global State variables exported ++ ***************************************************************************/ ++extern CPU_STATE pcb; ++extern U64 *cpu_tsc; ++extern GLOBAL_STATE_NODE driver_state; ++extern MSR_DATA msr_data; ++extern U32 *core_to_package_map; ++extern U32 *core_to_dev_map; ++extern U32 *core_to_phys_core_map; ++extern U32 *core_to_thread_map; ++extern U32 *threads_per_core; ++extern U32 num_packages; ++extern U64 *restore_bl_bypass; ++extern U32 **restore_ha_direct2core; ++extern U32 **restore_qpi_direct2core; ++extern U32 *occupied_core_ids; ++/**************************************************************************** ++ ** Handy Short cuts ++ ***************************************************************************/ ++ ++/* ++ * CONTROL_THIS_CPU() ++ * Parameters ++ * None ++ * Returns ++ * CPU number of the processor being executed on ++ * ++ */ ++#define CONTROL_THIS_CPU() smp_processor_id() ++ ++/* ++ * CONTROL_THIS_RAW_CPU() ++ * Parameters ++ * None ++ * Returns ++ * CPU number of the processor being executed on ++ * ++ */ ++#define CONTROL_THIS_RAW_CPU() (raw_smp_processor_id()) ++/**************************************************************************** ++ ** Interface definitions ++ ***************************************************************************/ ++ ++/* ++ * Execution Control Functions ++ */ ++ ++extern VOID CONTROL_Invoke_Cpu(S32 cpuid, VOID (*func)(PVOID), PVOID ctx); ++ ++/* ++ * @fn VOID CONTROL_Invoke_Parallel_Service(func, ctx, blocking, exclude) ++ * ++ * @param func - function to be invoked by each core in the system ++ * @param ctx - pointer to the parameter block for each func invocation ++ * @param blocking - Wait for invoked function to complete ++ * @param exclude - exclude the current core from executing the code ++ * ++ * @returns none ++ * ++ * @brief Service routine to handle all kinds of parallel invoke on all ++ * CPU calls ++ * ++ * Special Notes: ++ * Invoke the function provided in parallel in either a ++ * blocking/non-blocking mode. The current core may be excluded if desired. ++ * NOTE - Do not call this function directly from source code. Use the aliases ++ * CONTROL_Invoke_Parallel(), CONTROL_Invoke_Parallel_NB(), ++ * CONTROL_Invoke_Parallel_XS(). ++ * ++ */ ++extern VOID CONTROL_Invoke_Parallel_Service(VOID (*func)(PVOID), PVOID ctx, ++ S32 blocking, S32 exclude); ++ ++/* ++ * @fn VOID CONTROL_Invoke_Parallel(func, ctx) ++ * ++ * @param func - function to be invoked by each core in the system ++ * @param ctx - pointer to the parameter block for each function invocation ++ * ++ * @returns none ++ * ++ * @brief Invoke the named function in parallel. ++ * Wait for all the functions to complete. ++ * ++ * Special Notes: ++ * Invoke the function named in parallel, including the CPU ++ * that the control is being invoked on ++ * Macro built on the service routine ++ * ++ */ ++#define CONTROL_Invoke_Parallel(a, b) \ ++ CONTROL_Invoke_Parallel_Service((a), (b), TRUE, FALSE) ++ ++/* ++ * @fn VOID CONTROL_Invoke_Parallel_NB(func, ctx) ++ * ++ * @param func - function to be invoked by each core in the system ++ * @param ctx - pointer to the parameter block for each function invocation ++ * ++ * @returns none ++ * ++ * @brief Invoke the named function in parallel. ++ * DO NOT Wait for all the functions to complete. ++ * ++ * Special Notes: ++ * Invoke the function named in parallel, including the CPU ++ * that the control is being invoked on ++ * Macro built on the service routine ++ * ++ */ ++#define CONTROL_Invoke_Parallel_NB(a, b) \ ++ CONTROL_Invoke_Parallel_Service((a), (b), FALSE, FALSE) ++ ++/* ++ * @fn VOID CONTROL_Invoke_Parallel_XS(func, ctx) ++ * ++ * @param func - function to be invoked by each core in the system ++ * @param ctx - pointer to the parameter block for each function invocation ++ * ++ * @returns none ++ * ++ * @brief Invoke the named function in parallel. ++ * Wait for all the functions to complete. ++ * ++ * Special Notes: ++ * Invoke the function named in parallel, excluding the CPU ++ * that the control is being invoked on ++ * Macro built on the service routine ++ * ++ */ ++#define CONTROL_Invoke_Parallel_XS(a, b) \ ++ CONTROL_Invoke_Parallel_Service((a), (b), TRUE, TRUE) ++ ++/* ++ * @fn VOID CONTROL_Memory_Tracker_Init(void) ++ * ++ * @param None ++ * ++ * @returns None ++ * ++ * @brief Initializes Memory Tracker ++ * ++ * Special Notes: ++ * This should only be called when the ++ * the driver is being loaded. ++ */ ++extern VOID CONTROL_Memory_Tracker_Init(void); ++ ++/* ++ * @fn VOID CONTROL_Memory_Tracker_Free(void) ++ * ++ * @param None ++ * ++ * @returns None ++ * ++ * @brief Frees memory used by Memory Tracker ++ * ++ * Special Notes: ++ * This should only be called when the ++ * driver is being unloaded. ++ */ ++extern VOID CONTROL_Memory_Tracker_Free(void); ++ ++/* ++ * @fn VOID CONTROL_Memory_Tracker_Compaction(void) ++ * ++ * @param None ++ * ++ * @returns None ++ * ++ * @brief Compacts the memory allocator if holes are detected ++ * ++ * Special Notes: ++ * At end of collection (or at other safe sync point), ++ * reclaim/compact space used by mem tracker ++ */ ++extern VOID CONTROL_Memory_Tracker_Compaction(void); ++ ++/* ++ * @fn PVOID CONTROL_Allocate_Memory(size) ++ * ++ * @param IN size - size of the memory to allocate ++ * ++ * @returns char* - pointer to the allocated memory block ++ * ++ * @brief Allocate and zero memory ++ * ++ * Special Notes: ++ * Allocate memory in the GFP_KERNEL pool. ++ * ++ * Use this if memory is to be allocated within a context where ++ * the allocator can block the allocation (e.g., by putting ++ * the caller to sleep) while it tries to free up memory to ++ * satisfy the request. Otherwise, if the allocation must ++ * occur atomically (e.g., caller cannot sleep), then use ++ * CONTROL_Allocate_KMemory instead. ++ */ ++extern PVOID CONTROL_Allocate_Memory(size_t size); ++ ++/* ++ * @fn PVOID CONTROL_Allocate_KMemory(size) ++ * ++ * @param IN size - size of the memory to allocate ++ * ++ * @returns char* - pointer to the allocated memory block ++ * ++ * @brief Allocate and zero memory ++ * ++ * Special Notes: ++ * Allocate memory in the GFP_ATOMIC pool. ++ * ++ * Use this if memory is to be allocated within a context where ++ * the allocator cannot block the allocation (e.g., by putting ++ * the caller to sleep) as it tries to free up memory to ++ * satisfy the request. Examples include interrupt handlers, ++ * process context code holding locks, etc. ++ */ ++extern PVOID CONTROL_Allocate_KMemory(size_t size); ++ ++/* ++ * @fn PVOID CONTROL_Free_Memory(location) ++ * ++ * @param IN location - size of the memory to allocate ++ * ++ * @returns pointer to the allocated memory block ++ * ++ * @brief Frees the memory block ++ * ++ * Special Notes: ++ * Does not try to free memory if fed with a NULL pointer ++ * Expected usage: ++ * ptr = CONTROL_Free_Memory(ptr); ++ */ ++extern PVOID CONTROL_Free_Memory(PVOID location); ++ ++#endif +diff --git a/drivers/platform/x86/sepdk/inc/core2.h b/drivers/platform/x86/sepdk/inc/core2.h +new file mode 100644 +index 000000000000..8a6c0835a623 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/inc/core2.h +@@ -0,0 +1,49 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#ifndef _CORE2_H_ ++#define _CORE2_H_ ++ ++#include "msrdefs.h" ++ ++extern DISPATCH_NODE core2_dispatch; ++extern DISPATCH_NODE corei7_dispatch; ++extern DISPATCH_NODE corei7_dispatch_nehalem; ++extern DISPATCH_NODE corei7_dispatch_htoff_mode; ++extern DISPATCH_NODE corei7_dispatch_2; ++extern DISPATCH_NODE corei7_dispatch_htoff_mode_2; ++ ++#define CORE2UNC_BLBYPASS_BITMASK 0x00000001 ++#define CORE2UNC_DISABLE_BL_BYPASS_MSR 0x39C ++ ++#if defined(DRV_IA32) ++#define CORE2_LBR_DATA_BITS 32 ++#else ++#define CORE2_LBR_DATA_BITS 48 ++#endif ++ ++#define CORE2_LBR_BITMASK ((1ULL << CORE2_LBR_DATA_BITS) - 1) ++ ++#endif +diff --git a/drivers/platform/x86/sepdk/inc/cpumon.h b/drivers/platform/x86/sepdk/inc/cpumon.h +new file mode 100644 +index 000000000000..0ce584c1c805 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/inc/cpumon.h +@@ -0,0 +1,53 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#ifndef _CPUMON_H_ ++#define _CPUMON_H_ ++ ++#include ++#include "lwpmudrv_defines.h" ++ ++/* ++ * Defines ++ */ ++ ++/** ++ * Function Declarations ++ */ ++ ++/* ++ * CPUMON control functions ++ */ ++ ++extern VOID CPUMON_Install_Cpuhooks(void); ++extern VOID CPUMON_Remove_Cpuhooks(void); ++#if defined(DRV_CPU_HOTPLUG) ++extern DRV_BOOL CPUMON_is_Online_Allowed(void); ++extern DRV_BOOL CPUMON_is_Offline_Allowed(void); ++extern VOID CPUMON_Online_Cpu(PVOID parm); ++extern VOID CPUMON_Offline_Cpu(PVOID parm); ++#endif ++ ++#endif +diff --git a/drivers/platform/x86/sepdk/inc/ecb_iterators.h b/drivers/platform/x86/sepdk/inc/ecb_iterators.h +new file mode 100644 +index 000000000000..10527535925f +--- /dev/null ++++ b/drivers/platform/x86/sepdk/inc/ecb_iterators.h +@@ -0,0 +1,581 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#ifndef _ECB_ITERATORS_H_ ++#define _ECB_ITERATORS_H_ ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++/* ++ * Loop macros to walk through the event control block ++ * Use for access only in the kernel mode ++ * To Do - Control access from kernel mode by a macro ++ */ ++ ++#define FOR_EACH_CCCR_REG(pecb, idx) \ ++ { \ ++ U32 idx; \ ++ U32 this_cpu__ = CONTROL_THIS_CPU(); \ ++ CPU_STATE pcpu__ = &pcb[this_cpu__]; \ ++ U32 dev_idx = core_to_dev_map[this_cpu__]; \ ++ U32 cur_grp = CPU_STATE_current_group(pcpu__); \ ++ ECB pecb = LWPMU_DEVICE_PMU_register_data( \ ++ &devices[dev_idx])[cur_grp]; \ ++ if ((pecb)) { \ ++ for ((idx) = ECB_cccr_start(pecb); \ ++ (idx) < \ ++ (ECB_cccr_start(pecb) + ECB_cccr_pop(pecb)); \ ++ (idx)++) { \ ++ if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ ++ continue; \ ++ } ++ ++#define END_FOR_EACH_CCCR_REG \ ++ } \ ++ } \ ++ } ++ ++#define FOR_EACH_CCCR_REG_CPU(pecb, idx, cpuid) \ ++ { \ ++ U32 idx; \ ++ U32 this_cpu__ = cpuid; \ ++ CPU_STATE pcpu__ = &pcb[this_cpu__]; \ ++ U32 dev_idx = core_to_dev_map[this_cpu__]; \ ++ U32 cur_grp = CPU_STATE_current_group(pcpu__); \ ++ ECB pecb = LWPMU_DEVICE_PMU_register_data( \ ++ &devices[dev_idx])[cur_grp]; \ ++ if ((pecb)) { \ ++ for ((idx) = ECB_cccr_start(pecb); \ ++ (idx) < \ ++ (ECB_cccr_start(pecb) + ECB_cccr_pop(pecb)); \ ++ (idx)++) { \ ++ if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ ++ continue; \ ++ } ++ ++#define END_FOR_EACH_CCCR_REG_CPU \ ++ } \ ++ } \ ++ } ++ ++#define FOR_EACH_CCCR_GP_REG(pecb, idx) \ ++ { \ ++ U32 idx; \ ++ U32 this_cpu__ = CONTROL_THIS_CPU(); \ ++ CPU_STATE pcpu__ = &pcb[this_cpu__]; \ ++ U32 dev_idx = core_to_dev_map[this_cpu__]; \ ++ U32 cur_grp = CPU_STATE_current_group(pcpu__); \ ++ ECB pecb = LWPMU_DEVICE_PMU_register_data( \ ++ &devices[dev_idx])[cur_grp]; \ ++ if ((pecb)) { \ ++ for ((idx) = ECB_cccr_start(pecb); \ ++ (idx) < \ ++ (ECB_cccr_start(pecb) + ECB_cccr_pop(pecb)); \ ++ (idx)++) { \ ++ if (ECB_entries_is_gp_reg_get((pecb), \ ++ (idx)) == 0) { \ ++ continue; \ ++ } ++ ++#define END_FOR_EACH_CCCR_GP_REG \ ++ } \ ++ } \ ++ } ++ ++#define FOR_EACH_ESCR_REG(pecb, idx) \ ++ { \ ++ U32 idx; \ ++ U32 this_cpu__ = CONTROL_THIS_CPU(); \ ++ CPU_STATE pcpu__ = &pcb[this_cpu__]; \ ++ U32 dev_idx = core_to_dev_map[this_cpu__]; \ ++ U32 cur_grp = CPU_STATE_current_group(pcpu__); \ ++ ECB pecb = LWPMU_DEVICE_PMU_register_data( \ ++ &devices[dev_idx])[cur_grp]; \ ++ if ((pecb)) { \ ++ for ((idx) = ECB_escr_start(pecb); \ ++ (idx) < \ ++ (ECB_cccr_start(pecb) + ECB_cccr_pop(pecb)); \ ++ (idx)++) { \ ++ if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ ++ continue; \ ++ } ++ ++#define END_FOR_EACH_ESCR_REG \ ++ } \ ++ } \ ++ } ++ ++#define FOR_EACH_ESCR_REG_CPU(pecb, idx, cpuid) \ ++ { \ ++ U32 idx; \ ++ U32 this_cpu__ = cpuid; \ ++ CPU_STATE pcpu__ = &pcb[this_cpu__]; \ ++ U32 dev_idx = core_to_dev_map[this_cpu__]; \ ++ U32 cur_grp = CPU_STATE_current_group(pcpu__); \ ++ ECB pecb = LWPMU_DEVICE_PMU_register_data( \ ++ &devices[dev_idx])[cur_grp]; \ ++ if ((pecb)) { \ ++ for ((idx) = ECB_escr_start(pecb); \ ++ (idx) < \ ++ (ECB_cccr_start(pecb) + ECB_cccr_pop(pecb)); \ ++ (idx)++) { \ ++ if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ ++ continue; \ ++ } ++ ++#define END_FOR_EACH_ESCR_REG_CPU \ ++ } \ ++ } \ ++ } ++ ++ ++#define FOR_EACH_DATA_REG(pecb, idx) \ ++ { \ ++ U32 idx; \ ++ U32 this_cpu__ = CONTROL_THIS_CPU(); \ ++ CPU_STATE pcpu__ = &pcb[this_cpu__]; \ ++ U32 dev_idx = core_to_dev_map[this_cpu__]; \ ++ U32 cur_grp = CPU_STATE_current_group(pcpu__); \ ++ ECB pecb = LWPMU_DEVICE_PMU_register_data( \ ++ &devices[dev_idx])[cur_grp]; \ ++ if ((pecb)) { \ ++ for ((idx) = ECB_data_start(pecb); \ ++ (idx) < \ ++ (ECB_cccr_start(pecb) + ECB_cccr_pop(pecb)); \ ++ (idx)++) { \ ++ if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ ++ continue; \ ++ } ++ ++#define END_FOR_EACH_DATA_REG \ ++ } \ ++ } \ ++ } ++ ++#define FOR_EACH_DATA_REG_CPU(pecb, idx, cpuid) \ ++ { \ ++ U32 idx; \ ++ U32 this_cpu__ = cpuid; \ ++ CPU_STATE pcpu__ = &pcb[this_cpu__]; \ ++ U32 dev_idx = core_to_dev_map[this_cpu__]; \ ++ U32 cur_grp = CPU_STATE_current_group(pcpu__); \ ++ ECB pecb = LWPMU_DEVICE_PMU_register_data( \ ++ &devices[dev_idx])[cur_grp]; \ ++ if ((pecb)) { \ ++ for ((idx) = ECB_data_start(pecb); \ ++ (idx) < \ ++ (ECB_data_start(pecb) + ECB_data_pop(pecb)); \ ++ (idx)++) { \ ++ if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ ++ continue; \ ++ } ++ ++#define END_FOR_EACH_DATA_REG_CPU \ ++ } \ ++ } \ ++ } ++ ++#define FOR_EACH_DATA_REG_UNC(pecb, device_idx, idx) \ ++ { \ ++ U32 idx; \ ++ U32 cpu = CONTROL_THIS_CPU(); \ ++ U32 pkg = core_to_package_map[cpu]; \ ++ U32 cur_grp = \ ++ LWPMU_DEVICE_cur_group(&devices[(device_idx)])[(pkg)]; \ ++ ECB pecb = LWPMU_DEVICE_PMU_register_data( \ ++ &devices[(device_idx)])[cur_grp]; \ ++ if ((pecb)) { \ ++ for ((idx) = ECB_data_start(pecb); \ ++ (idx) < \ ++ (ECB_cccr_start(pecb) + ECB_cccr_pop(pecb)); \ ++ (idx)++) { \ ++ if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ ++ continue; \ ++ } ++ ++#define END_FOR_EACH_DATA_REG_UNC \ ++ } \ ++ } \ ++ } ++ ++#define FOR_EACH_DATA_REG_UNC_VER2(pecb, i, idx) \ ++ { \ ++ U32 idx; \ ++ if ((pecb)) { \ ++ for ((idx) = ECB_data_start(pecb); \ ++ (idx) < \ ++ ECB_data_start(pecb) + ECB_data_pop(pecb); \ ++ (idx)++) { \ ++ if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ ++ continue; \ ++ } ++ ++#define END_FOR_EACH_DATA_REG_UNC_VER2 \ ++ } \ ++ } \ ++ } ++ ++#define FOR_EACH_DATA_GP_REG(pecb, idx) \ ++ { \ ++ U32 idx; \ ++ U32 this_cpu__ = CONTROL_THIS_CPU(); \ ++ CPU_STATE pcpu__ = &pcb[this_cpu__]; \ ++ U32 dev_idx = core_to_dev_map[this_cpu__]; \ ++ U32 cur_grp = CPU_STATE_current_group(pcpu__); \ ++ ECB pecb = LWPMU_DEVICE_PMU_register_data( \ ++ &devices[dev_idx])[cur_grp]; \ ++ if ((pecb)) { \ ++ for ((idx) = ECB_data_start(pecb); \ ++ (idx) < \ ++ ECB_data_start(pecb) + ECB_data_pop(pecb); \ ++ (idx)++) { \ ++ if (ECB_entries_is_gp_reg_get((pecb), \ ++ (idx)) == 0) { \ ++ continue; \ ++ } ++ ++#define END_FOR_EACH_DATA_GP_REG \ ++ } \ ++ } \ ++ } ++ ++#define FOR_EACH_DATA_GENERIC_REG(pecb, idx) \ ++ { \ ++ U32 idx; \ ++ U32 this_cpu__ = CONTROL_THIS_CPU(); \ ++ CPU_STATE pcpu__ = &pcb[this_cpu__]; \ ++ U32 dev_idx = core_to_dev_map[this_cpu__]; \ ++ U32 cur_grp = CPU_STATE_current_group(pcpu__); \ ++ ECB pecb = LWPMU_DEVICE_PMU_register_data( \ ++ &devices[dev_idx])[cur_grp]; \ ++ if ((pecb)) { \ ++ for ((idx) = ECB_data_start(pecb); \ ++ (idx) < \ ++ ECB_data_start(pecb) + ECB_data_pop(pecb); \ ++ (idx)++) { \ ++ if (ECB_entries_is_generic_reg_get( \ ++ (pecb), (idx)) == 0) { \ ++ continue; \ ++ } ++ ++#define END_FOR_EACH_DATA_GENERIC_REG \ ++ } \ ++ } \ ++ } ++ ++#define FOR_EACH_REG_ENTRY(pecb, idx) \ ++ { \ ++ U32 idx; \ ++ U32 this_cpu__ = CONTROL_THIS_CPU(); \ ++ CPU_STATE pcpu__ = &pcb[this_cpu__]; \ ++ U32 dev_idx = core_to_dev_map[this_cpu__]; \ ++ U32 cur_grp = CPU_STATE_current_group(pcpu__); \ ++ ECB pecb = LWPMU_DEVICE_PMU_register_data( \ ++ &devices[dev_idx])[cur_grp]; \ ++ if ((pecb)) { \ ++ for ((idx) = 0; (idx) < ECB_num_entries(pecb); \ ++ (idx)++) { \ ++ if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ ++ continue; \ ++ } ++ ++#define END_FOR_EACH_REG_ENTRY \ ++ } \ ++ } \ ++ } ++ ++#define FOR_EACH_REG_ENTRY_UNC(pecb, device_idx, idx) \ ++ { \ ++ U32 idx; \ ++ U32 cpu = CONTROL_THIS_CPU(); \ ++ U32 pkg = core_to_package_map[cpu]; \ ++ U32 cur_grp = \ ++ LWPMU_DEVICE_cur_group(&devices[(device_idx)])[(pkg)]; \ ++ ECB pecb = LWPMU_DEVICE_PMU_register_data( \ ++ &devices[(device_idx)])[(cur_grp)]; \ ++ if ((pecb)) { \ ++ for ((idx) = 0; (idx) < ECB_num_entries(pecb); \ ++ (idx)++) { \ ++ if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ ++ continue; \ ++ } ++ ++#define END_FOR_EACH_REG_ENTRY_UNC \ ++ } \ ++ } \ ++ } ++ ++#define FOR_EACH_PCI_DATA_REG(pecb, i, device_idx, offset_delta) \ ++ { \ ++ U32 i = 0; \ ++ U32 cpu = CONTROL_THIS_CPU(); \ ++ U32 pkg = core_to_package_map[cpu]; \ ++ U32 cur_grp = \ ++ LWPMU_DEVICE_cur_group(&devices[(device_idx)])[(pkg)]; \ ++ ECB pecb = LWPMU_DEVICE_PMU_register_data( \ ++ &devices[(device_idx)])[(cur_grp)]; \ ++ if ((pecb)) { \ ++ for ((i) = ECB_data_start(pecb); \ ++ (i) < ECB_data_start(pecb) + ECB_data_pop(pecb); \ ++ (i)++) { \ ++ if (ECB_entries_reg_offset((pecb), (i)) == \ ++ 0) { \ ++ continue; \ ++ } \ ++ (offset_delta) = \ ++ (ECB_entries_reg_offset(pecb, i) - \ ++ DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio( \ ++ &ECB_pcidev_entry_node( \ ++ pecb))); ++ ++#define END_FOR_EACH_PCI_DATA_REG \ ++ } \ ++ } \ ++ } ++ ++#define FOR_EACH_PCI_DATA_REG_VER2(pecb, i, device_idx, offset_delta) \ ++ { \ ++ U32 i = 0; \ ++ if ((pecb)) { \ ++ for ((i) = ECB_data_start(pecb); \ ++ (i) < ECB_data_start(pecb) + ECB_data_pop(pecb); \ ++ (i)++) { \ ++ if (ECB_entries_reg_offset((pecb), (i)) == 0) {\ ++ continue; \ ++ } \ ++ (offset_delta) = \ ++ ECB_entries_reg_offset(pecb, i) - \ ++ DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio( \ ++ &ECB_pcidev_entry_node(pecb)); ++ ++#define END_FOR_EACH_PCI_DATA_REG_VER2 \ ++ } \ ++ } \ ++ } ++ ++#define FOR_EACH_PCI_DATA_REG_RAW(pecb, i, device_idx) \ ++ { \ ++ U32 i = 0; \ ++ U32 cpu = CONTROL_THIS_CPU(); \ ++ U32 pkg = core_to_package_map[cpu]; \ ++ U32 cur_grp = \ ++ LWPMU_DEVICE_cur_group(&devices[(device_idx)])[(pkg)]; \ ++ ECB pecb = LWPMU_DEVICE_PMU_register_data( \ ++ &devices[(device_idx)])[(cur_grp)]; \ ++ if ((pecb)) { \ ++ for ((i) = ECB_data_start(pecb); \ ++ (i) < ECB_data_start(pecb) + ECB_data_pop(pecb); \ ++ (i)++) { \ ++ if (ECB_entries_reg_offset((pecb), (i)) == \ ++ 0) { \ ++ continue; \ ++ } ++ ++#define END_FOR_EACH_PCI_DATA_REG_RAW \ ++ } \ ++ } \ ++ } ++ ++#define FOR_EACH_PCI_CCCR_REG_RAW(pecb, i, device_idx) \ ++ { \ ++ U32 i = 0; \ ++ U32 cpu = CONTROL_THIS_CPU(); \ ++ U32 pkg = core_to_package_map[cpu]; \ ++ U32 cur_grp = \ ++ LWPMU_DEVICE_cur_group(&devices[(device_idx)])[(pkg)]; \ ++ ECB pecb = LWPMU_DEVICE_PMU_register_data( \ ++ &devices[(device_idx)])[(cur_grp)]; \ ++ if ((pecb)) { \ ++ for ((i) = ECB_cccr_start(pecb); \ ++ (i) < ECB_cccr_start(pecb) + ECB_cccr_pop(pecb); \ ++ (i)++) { \ ++ if (ECB_entries_reg_offset((pecb), (i)) == \ ++ 0) { \ ++ continue; \ ++ } ++ ++#define END_FOR_EACH_PCI_CCCR_REG_RAW \ ++ } \ ++ } \ ++ } ++ ++#define FOR_EACH_PCI_REG_RAW(pecb, i, device_idx) \ ++ { \ ++ U32 i = 0; \ ++ U32 cpu = CONTROL_THIS_CPU(); \ ++ U32 pkg = core_to_package_map[cpu]; \ ++ U32 cur_grp = \ ++ LWPMU_DEVICE_cur_group(&devices[(device_idx)])[(pkg)]; \ ++ ECB pecb = LWPMU_DEVICE_PMU_register_data( \ ++ &devices[(device_idx)])[(cur_grp)]; \ ++ if ((pecb)) { \ ++ for ((i) = 0; (i) < ECB_num_entries(pecb); (i)++) { \ ++ if (ECB_entries_reg_offset((pecb), (i)) == \ ++ 0) { \ ++ continue; \ ++ } ++ ++#define END_FOR_EACH_PCI_REG_RAW \ ++ } \ ++ } \ ++ } ++ ++#define FOR_EACH_PCI_REG_RAW_GROUP(pecb, i, device_idx, cur_grp) \ ++ { \ ++ U32 i = 0; \ ++ ECB pecb = LWPMU_DEVICE_PMU_register_data( \ ++ &devices[(device_idx)])[(cur_grp)]; \ ++ if ((pecb)) { \ ++ for ((i) = 0; (i) < ECB_num_entries(pecb); (i)++) { \ ++ if (ECB_entries_reg_offset((pecb), (i)) == \ ++ 0) { \ ++ continue; \ ++ } ++ ++#define END_FOR_EACH_PCI_REG_RAW_GROUP \ ++ } \ ++ } \ ++ } ++ ++#define CHECK_SAVE_RESTORE_EVENT_INDEX(prev_ei, cur_ei, evt_index) \ ++ { \ ++ if (prev_ei == -1) { \ ++ prev_ei = cur_ei; \ ++ } \ ++ if (prev_ei < cur_ei) { \ ++ prev_ei = cur_ei; \ ++ evt_index++; \ ++ } else { \ ++ evt_index = 0; \ ++ prev_ei = cur_ei; \ ++ } \ ++ } ++ ++#define FOR_EACH_REG_ENTRY_UNC_WRITE_MSR(pecb, device_idx, idx) \ ++ { \ ++ U32 idx; \ ++ U32 cpu = CONTROL_THIS_CPU(); \ ++ U32 pkg = core_to_package_map[cpu]; \ ++ U32 cur_grp = \ ++ LWPMU_DEVICE_cur_group(&devices[(device_idx)])[(pkg)]; \ ++ ECB pecb = LWPMU_DEVICE_PMU_register_data( \ ++ &devices[(device_idx)])[(cur_grp)]; \ ++ if ((pecb)) { \ ++ for ((idx) = 0; (idx) < ECB_num_entries(pecb); \ ++ (idx)++) { \ ++ if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ ++ continue; \ ++ } ++ ++#define END_FOR_EACH_REG_ENTRY_UNC \ ++ } \ ++ } \ ++ } ++ ++#define FOR_EACH_REG_UNC_OPERATION(pecb, device_idx, idx, operation) \ ++ { \ ++ U32 idx; \ ++ U32 cpu = CONTROL_THIS_CPU(); \ ++ U32 pkg = core_to_package_map[cpu]; \ ++ U32 cur_grp = \ ++ LWPMU_DEVICE_cur_group(&devices[(device_idx)])[(pkg)]; \ ++ ECB pecb = LWPMU_DEVICE_PMU_register_data( \ ++ &devices[(device_idx)])[(cur_grp)]; \ ++ if ((pecb)) { \ ++ for ((idx) = ECB_operations_register_start( \ ++ pecb, (operation)); \ ++ (idx) < \ ++ (ECB_operations_register_start(pecb, \ ++ (operation)) + \ ++ ECB_operations_register_len(pecb, (operation))); \ ++ (idx)++) { \ ++ if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ ++ continue; \ ++ } ++ ++#define END_FOR_EACH_REG_UNC_OPERATION \ ++ } \ ++ } \ ++ } ++ ++#define FOR_EACH_NONEVENT_REG(pecb, idx) \ ++ { \ ++ U32 idx; \ ++ U32 this_cpu__ = CONTROL_THIS_CPU(); \ ++ CPU_STATE pcpu__ = &pcb[this_cpu__]; \ ++ U32 dev_idx = core_to_dev_map[this_cpu__]; \ ++ U32 cur_grp = CPU_STATE_current_group(pcpu__); \ ++ ECB pecb = LWPMU_DEVICE_PMU_register_data( \ ++ &devices[dev_idx])[cur_grp]; \ ++ if ((pecb)) { \ ++ for ((idx) = ECB_metric_start(pecb); \ ++ (idx) < \ ++ ECB_metric_start(pecb) + ECB_metric_pop(pecb); \ ++ (idx)++) { \ ++ if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ ++ continue; \ ++ } ++ ++#define END_FOR_EACH_NONEVENT_REG \ ++ } \ ++ } \ ++ } ++ ++#define FOR_EACH_REG_CORE_OPERATION(pecb, idx, operation) \ ++ { \ ++ U32 idx; \ ++ U32 this_cpu__ = CONTROL_THIS_CPU(); \ ++ CPU_STATE pcpu__ = &pcb[this_cpu__]; \ ++ U32 cur_grp = CPU_STATE_current_group(pcpu__); \ ++ U32 dev_idx = core_to_dev_map[this_cpu__]; \ ++ ECB pecb = LWPMU_DEVICE_PMU_register_data( \ ++ &devices[dev_idx])[cur_grp]; \ ++ if ((pecb)) { \ ++ for ((idx) = ECB_operations_register_start( \ ++ pecb, (operation)); \ ++ (idx) < \ ++ (ECB_operations_register_start(pecb, \ ++ (operation)) + \ ++ ECB_operations_register_len(pecb, (operation))); \ ++ (idx)++) { \ ++ if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ ++ continue; \ ++ } ++ ++#define END_FOR_EACH_REG_CORE_OPERATION \ ++ } \ ++ } \ ++ } ++ ++#define ECB_SECTION_REG_INDEX(pecb, idx, operation) \ ++ (ECB_operations_register_start(pecb, operation) + (idx)) ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif +diff --git a/drivers/platform/x86/sepdk/inc/eventmux.h b/drivers/platform/x86/sepdk/inc/eventmux.h +new file mode 100644 +index 000000000000..4a96bb18ae85 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/inc/eventmux.h +@@ -0,0 +1,42 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++/* ++ * cvs_id[] = "$Id$" ++ */ ++ ++#ifndef _EVENTMUX_H_ ++#define _EVENTMUX_H_ ++ ++#include "lwpmudrv_ecb.h" ++#include "lwpmudrv_types.h" ++ ++extern VOID EVENTMUX_Start(void); ++ ++extern VOID EVENTMUX_Initialize(void); ++ ++extern VOID EVENTMUX_Destroy(void); ++ ++#endif /* _EVENTMUX_H_ */ +diff --git a/drivers/platform/x86/sepdk/inc/gfx.h b/drivers/platform/x86/sepdk/inc/gfx.h +new file mode 100644 +index 000000000000..2bad4d712527 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/inc/gfx.h +@@ -0,0 +1,39 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#ifndef _GFX_H_ ++#define _GFX_H_ ++ ++#include "lwpmudrv_ioctl.h" ++ ++extern OS_STATUS GFX_Read(S8 * buffer); ++ ++extern OS_STATUS GFX_Set_Event_Code(IOCTL_ARGS arg); ++ ++extern OS_STATUS GFX_Start(void); ++ ++extern OS_STATUS GFX_Stop(void); ++ ++#endif +diff --git a/drivers/platform/x86/sepdk/inc/gmch.h b/drivers/platform/x86/sepdk/inc/gmch.h +new file mode 100644 +index 000000000000..baa35728c4bf +--- /dev/null ++++ b/drivers/platform/x86/sepdk/inc/gmch.h +@@ -0,0 +1,31 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#ifndef _GMCH_H_ ++#define _GMCH_H_ ++ ++extern CS_DISPATCH_NODE gmch_dispatch; ++ ++#endif +diff --git a/drivers/platform/x86/sepdk/inc/haswellunc_sa.h b/drivers/platform/x86/sepdk/inc/haswellunc_sa.h +new file mode 100644 +index 000000000000..bd4fb6887d0c +--- /dev/null ++++ b/drivers/platform/x86/sepdk/inc/haswellunc_sa.h +@@ -0,0 +1,57 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#ifndef _HSWUNC_SA_H_INC_ ++#define _HSWUNC_SA_H_INC_ ++ ++/* ++ * Local to this architecture: Haswell uncore SA unit ++ * ++ */ ++#define HSWUNC_SA_DESKTOP_DID 0x000C04 ++#define HSWUNC_SA_NEXT_ADDR_OFFSET 4 ++#define HSWUNC_SA_BAR_ADDR_SHIFT 32 ++#define HSWUNC_SA_BAR_ADDR_MASK 0x0007FFFFFF000LL ++#define HSWUNC_SA_MAX_PCI_DEVICES 16 ++#define HSWUNC_SA_MAX_COUNT 0x00000000FFFFFFFFLL ++#define HSWUNC_SA_MAX_COUNTERS 8 ++ ++#define HSWUNC_SA_MCHBAR_MMIO_PAGE_SIZE (8 * 4096) ++#define HSWUNC_SA_PCIEXBAR_MMIO_PAGE_SIZE (57 * 4096) ++#define HSWUNC_SA_OTHER_BAR_MMIO_PAGE_SIZE 4096 ++#define HSWUNC_SA_GDXCBAR_OFFSET_LO 0x5420 ++#define HSWUNC_SA_GDXCBAR_OFFSET_HI 0x5424 ++#define HSWUNC_SA_GDXCBAR_MASK 0x7FFFFFF000LL ++#define HSWUNC_SA_CHAP_SAMPLE_DATA 0x00020000 ++#define HSWUNC_SA_CHAP_STOP 0x00040000 ++#define HSWUNC_SA_CHAP_CTRL_REG_OFFSET 0x0 ++ ++#define HSWUNC_SA_PAGE_MASK 0xfffffffffffff000 ++#define HSWUNC_SA_PAGE_OFFSET_MASK 0xfff ++#define HSWUNC_SA_PAGE_SIZE 0x1000 ++ ++extern DISPATCH_NODE hswunc_sa_dispatch; ++ ++#endif +diff --git a/drivers/platform/x86/sepdk/inc/jkt_unc_ha.h b/drivers/platform/x86/sepdk/inc/jkt_unc_ha.h +new file mode 100644 +index 000000000000..aa6bf7624075 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/inc/jkt_unc_ha.h +@@ -0,0 +1,37 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#ifndef _JKTUNC_HA_H_INC_ ++#define _JKTUNC_HA_H_INC_ ++ ++#define JKTUNC_HA_DID 0x3C46 ++#define JKTUNC_HA_DEVICE_NO 14 ++#define JKTUNC_HA_FUNC_NO 1 ++#define JKTUNC_HA_D2C_OFFSET 0x84 ++#define JKTUNC_HA_D2C_BITMASK 0x00000002 ++#define JKTUNC_HA_D2C_DID 0x3CA0 ++#define JKTUNC_HA_D2C_FUNC_NO 0 ++ ++#endif +diff --git a/drivers/platform/x86/sepdk/inc/jkt_unc_qpill.h b/drivers/platform/x86/sepdk/inc/jkt_unc_qpill.h +new file mode 100644 +index 000000000000..debde4d48252 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/inc/jkt_unc_qpill.h +@@ -0,0 +1,64 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++ ++#ifndef _JKTUNC_QPILL_H_INC_ ++#define _JKTUNC_QPILL_H_INC_ ++ ++/* ++ * Local to this architecture: JKT uncore QPILL unit ++ * ++ */ ++#define JKTUNC_QPILL0_DID 0x3C41 ++ // --- QPILL0 PerfMon DID --- B:D 1:8:2 ++#define JKTUNC_QPILL_MM0_DID 0x3C86 ++ // --- QPILL0 PerfMon MM Config DID --- B:D 1:8:6 ++#define JKTUNC_QPILL1_DID 0x3C42 ++ // --- QPILL1 PerfMon DID --- B:D 1:9:2 ++#define JKTUNC_QPILL2_DID 0x3C44 ++ // --- QPILL0 PerfMon DID --- B:D 1:8:2 ++#define JKTUNC_QPILL3_DID 0x3C45 ++ // --- QPILL0 PerfMon DID --- B:D 1:8:2 ++#define JKTUNC_QPILL_MM1_DID 0x3C96 ++ // --- QPILL1 PerfMon MM Config DID --- B:D 1:9:6 ++#define JKTUNC_QPILL_MCFG_DID 0x3C28 ++ // --- QPILL1 PerfMon MCFG DID --- B:D 0:5:0 ++#define JKTUNC_QPILL0_D2C_DID 0x3C80 ++ // --- D2C QPILL Port 1 config DID B:D:F X:8:0 ++#define JKTUNC_QPILL1_D2C_DID 0x3C90 ++ // --- D2C QPILL Port 2 config DID B:D:F X:9:0 ++ ++#define JKTUNC_QPILL_PERF_GLOBAL_CTRL 0x391 ++ ++#define IA32_DEBUG_CTRL 0x1D9 ++ ++#define JKTUNC_QPILL_D2C_OFFSET 0x80 ++#define JKTUNC_QPILL_D2C_BITMASK 0x00000002 ++#define JKTUNC_QPILL_FUNC_NO 2 ++#define JKTUNC_QPILL_D2C_FUNC_NO 0 ++ ++extern DISPATCH_NODE jktunc_qpill_dispatch; ++ ++#endif +diff --git a/drivers/platform/x86/sepdk/inc/linuxos.h b/drivers/platform/x86/sepdk/inc/linuxos.h +new file mode 100644 +index 000000000000..3e4c2c96476f +--- /dev/null ++++ b/drivers/platform/x86/sepdk/inc/linuxos.h +@@ -0,0 +1,79 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#ifndef _LINUXOS_H_ ++#define _LINUXOS_H_ ++ ++// defines for options parameter of samp_load_image_notify_routine() ++#define LOPTS_1ST_MODREC 0x1 ++#define LOPTS_GLOBAL_MODULE 0x2 ++#define LOPTS_EXE 0x4 ++ ++#define FOR_EACH_TASK for_each_process ++#if KERNEL_VERSION(3, 19, 00) <= LINUX_VERSION_CODE ++#define DRV_F_DENTRY f_path.dentry ++#else ++#define DRV_F_DENTRY f_dentry ++#endif ++ ++#if KERNEL_VERSION(2, 6, 25) > LINUX_VERSION_CODE ++#define D_PATH(vm_file, name, maxlen) \ ++ d_path((vm_file)->f_dentry, (vm_file)->f_vfsmnt, (name), (maxlen)) ++#else ++#define D_PATH(vm_file, name, maxlen) \ ++ d_path(&((vm_file)->f_path), (name), (maxlen)) ++#endif ++ ++#if KERNEL_VERSION(3, 7, 0) > LINUX_VERSION_CODE ++#define DRV_VM_MOD_EXECUTABLE(vma) (vma->vm_flags & VM_EXECUTABLE) ++#else ++#define DRV_VM_MOD_EXECUTABLE(vma) (linuxos_Equal_VM_Exe_File(vma)) ++#define DRV_MM_EXE_FILE_PRESENT ++#endif ++ ++#if KERNEL_VERSION(2, 6, 32) <= LINUX_VERSION_CODE ++#define DRV_ALLOW_VDSO ++#endif ++ ++#if defined(DRV_IA32) ++#define FIND_VMA(mm, data) find_vma((mm), (U32)(data)) ++#endif ++#if defined(DRV_EM64T) ++#define FIND_VMA(mm, data) find_vma((mm), (U64)(data)) ++#endif ++ ++extern VOID LINUXOS_Install_Hooks(void); ++ ++extern VOID LINUXOS_Uninstall_Hooks(void); ++ ++extern OS_STATUS LINUXOS_Enum_Process_Modules(DRV_BOOL at_end); ++ ++extern DRV_BOOL LINUXOS_Check_KVM_Guest_Process(void); ++#if defined(DRV_CPU_HOTPLUG) ++extern VOID LINUXOS_Register_Hotplug(void); ++ ++extern VOID LINUXOS_Unregister_Hotplug(void); ++#endif ++#endif +diff --git a/drivers/platform/x86/sepdk/inc/lwpmudrv.h b/drivers/platform/x86/sepdk/inc/lwpmudrv.h +new file mode 100644 +index 000000000000..37c8109a0e8b +--- /dev/null ++++ b/drivers/platform/x86/sepdk/inc/lwpmudrv.h +@@ -0,0 +1,551 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#ifndef _LWPMUDRV_H_ ++#define _LWPMUDRV_H_ ++ ++#include ++#include ++#include ++#if KERNEL_VERSION(4, 12, 0) > LINUX_VERSION_CODE ++#include ++#else ++#include ++#endif ++#include ++ ++#include "lwpmudrv_defines.h" ++#include "lwpmudrv_types.h" ++#include "lwpmudrv_ecb.h" ++#include "lwpmudrv_version.h" ++#include "lwpmudrv_struct.h" ++#include "pebs.h" ++#if defined(BUILD_CHIPSET) ++#include "lwpmudrv_chipset.h" ++#endif ++ ++#if defined(DRV_SEP_ACRN_ON) ++#include ++#include ++#endif ++ ++#if defined(X86_FEATURE_KAISER) || defined(CONFIG_KAISER) || \ ++ defined(KAISER_HEADER_PRESENT) ++#define DRV_USE_KAISER ++#elif defined(X86_FEATURE_PTI) ++#define DRV_USE_PTI ++#endif ++ ++/* ++ * Print macros for driver messages ++ */ ++ ++#if defined(MYDEBUG) ++#define SEP_PRINT_DEBUG(fmt, args...) \ ++ { \ ++ printk(KERN_INFO SEP_MSG_PREFIX " [DEBUG] " fmt, ##args); \ ++ } ++#else ++#define SEP_PRINT_DEBUG(fmt, args...) \ ++ { \ ++ ; \ ++ } ++#endif ++ ++#define SEP_PRINT(fmt, args...) \ ++ { \ ++ printk(KERN_INFO SEP_MSG_PREFIX " " fmt, ##args); \ ++ } ++ ++#define SEP_PRINT_WARNING(fmt, args...) \ ++ { \ ++ printk(KERN_ALERT SEP_MSG_PREFIX " [Warning] " fmt, ##args); \ ++ } ++ ++#define SEP_PRINT_ERROR(fmt, args...) \ ++ { \ ++ printk(KERN_CRIT SEP_MSG_PREFIX " [ERROR] " fmt, ##args); \ ++ } ++ ++// Macro to return the thread group id ++#define GET_CURRENT_TGID() (current->tgid) ++ ++#define OVERFLOW_ARGS U64 *, U64 * ++ ++typedef struct DRV_EVENT_MASK_NODE_S DRV_EVENT_MASK_NODE; ++typedef DRV_EVENT_MASK_NODE * DRV_EVENT_MASK; ++ ++struct DRV_EVENT_MASK_NODE_S { ++ U16 event_idx; // 0 <= index < MAX_EVENTS ++ U16 reserved1; ++ union { ++ U32 bitFields1; ++ struct { ++ U32 precise : 1; ++ U32 lbr_capture : 1; ++ U32 dear_capture : 1; ++ // Indicates which events need to have additional ++ // registers read because they are DEAR events. ++ U32 iear_capture : 1; ++ // Indicates which events need to have additional ++ // registers read because they are IEAR events. ++ U32 btb_capture : 1; ++ // Indicates which events need to have additional ++ // registers read because they are BTB events. ++ U32 ipear_capture : 1; ++ // Indicates which events need to have additional ++ // registers read because they are IPEAR events. ++ U32 uncore_capture : 1; ++ U32 branch : 1; ++ // Whether event is related to branch operation or not ++ U32 perf_metrics_capture : 1; ++ // Whether the event is related to perf_metrics or not ++ U32 reserved : 23; ++ } s1; ++ } u1; ++}; ++ ++#define DRV_EVENT_MASK_event_idx(d) ((d)->event_idx) ++#define DRV_EVENT_MASK_bitFields1(d) ((d)->u1.bitFields1) ++#define DRV_EVENT_MASK_precise(d) ((d)->u1.s1.precise) ++#define DRV_EVENT_MASK_lbr_capture(d) ((d)->u1.s1.lbr_capture) ++#define DRV_EVENT_MASK_dear_capture(d) ((d)->u1.s1.dear_capture) ++#define DRV_EVENT_MASK_iear_capture(d) ((d)->u1.s1.iear_capture) ++#define DRV_EVENT_MASK_btb_capture(d) ((d)->u1.s1.btb_capture) ++#define DRV_EVENT_MASK_ipear_capture(d) ((d)->u1.s1.ipear_capture) ++#define DRV_EVENT_MASK_uncore_capture(d) ((d)->u1.s1.uncore_capture) ++#define DRV_EVENT_MASK_branch(d) ((d)->u1.s1.branch) ++#define DRV_EVENT_MASK_perf_metrics_capture(d) \ ++ ((d)->u1.s1.perf_metrics_capture) ++ ++#define MAX_OVERFLOW_EVENTS 16 ++/* This defines the maximum number of overflow events per interrupt. \ ++ * In order to reduce memory footprint, the value should be at least \ ++ * the number of fixed and general PMU registers. \ ++ * Sandybridge with HT off has 11 PMUs(3 fixed and 8 generic) ++ */ ++ ++typedef struct DRV_MASKS_NODE_S DRV_MASKS_NODE; ++typedef DRV_MASKS_NODE * DRV_MASKS; ++ ++/* ++ * @macro DRV_EVENT_MASK_NODE_S ++ * @brief ++ * The structure is used to store overflow events when handling PMU interrupt. ++ * This approach should be more efficient than checking all event masks ++ * if there are many events to be monitored ++ * and only a few events among them have overflow per interrupt. ++ */ ++struct DRV_MASKS_NODE_S { ++ DRV_EVENT_MASK_NODE eventmasks[MAX_OVERFLOW_EVENTS]; ++ U8 masks_num; // 0 <= mask_num <= MAX_OVERFLOW_EVENTS ++}; ++ ++#define DRV_MASKS_masks_num(d) ((d)->masks_num) ++#define DRV_MASKS_eventmasks(d) ((d)->eventmasks) ++ ++/* ++ * Dispatch table for virtualized functions. ++ * Used to enable common functionality for different ++ * processor microarchitectures ++ */ ++typedef struct DISPATCH_NODE_S DISPATCH_NODE; ++typedef DISPATCH_NODE *DISPATCH; ++ ++struct DISPATCH_NODE_S { ++ VOID (*init)(PVOID); ++ VOID (*fini)(PVOID); ++ VOID (*write)(PVOID); ++ VOID (*freeze)(PVOID); ++ VOID (*restart)(PVOID); ++ VOID (*read_data)(PVOID); ++ VOID (*check_overflow)(DRV_MASKS); ++ VOID (*swap_group)(DRV_BOOL); ++ U64 (*read_lbrs)(PVOID, PVOID); ++ VOID (*cleanup)(PVOID); ++ VOID (*hw_errata)(void); ++ VOID (*read_power)(PVOID); ++ U64 (*check_overflow_errata)(ECB, U32, U64); ++ VOID (*read_counts)(PVOID, U32); ++ U64 (*check_overflow_gp_errata)(ECB, U64 *); ++ VOID (*read_ro)(PVOID, U32, U32); ++ VOID (*platform_info)(PVOID); ++ VOID (*trigger_read)(PVOID, U32); ++ // Counter reads triggered/initiated by User mode timer ++ VOID (*scan_for_uncore)(PVOID); ++ VOID (*read_metrics)(PVOID); ++}; ++ ++#if defined(BUILD_CHIPSET) ++/* ++ * Dispatch table for virtualized functions. ++ * Used to enable common functionality for different ++ * chipset types ++ */ ++typedef struct CS_DISPATCH_NODE_S CS_DISPATCH_NODE; ++typedef CS_DISPATCH_NODE *CS_DISPATCH; ++struct CS_DISPATCH_NODE_S { ++ U32 (*init_chipset)(void); ++ // initialize chipset (must be called before the others!) ++ VOID (*start_chipset)(void); // start the chipset counters ++ VOID (*read_counters)(PVOID); ++ // at interrupt time, read out the chipset counters ++ VOID (*stop_chipset)(void); // stop the chipset counters ++ VOID (*fini_chipset)(void); ++ // clean up resources and reset chipset state (called last) ++ VOID (*Trigger_Read)(void); ++ // GMCH counter reads triggered/initiated by User mode timer ++}; ++extern CS_DISPATCH cs_dispatch; ++#endif ++ ++/* ++ * global declarations ++ */ ++ ++extern VOID **PMU_register_data; ++extern VOID **desc_data; ++extern U64 *prev_counter_data; ++extern U64 *read_counter_info; ++extern U64 total_ram; ++extern U32 output_buffer_size; ++extern U32 saved_buffer_size; ++extern uid_t uid; ++extern DRV_CONFIG drv_cfg; ++extern volatile pid_t control_pid; ++extern U64 *interrupt_counts; ++extern EMON_BUFFER_DRIVER_HELPER emon_buffer_driver_helper; ++ ++extern DRV_BOOL multi_pebs_enabled; ++extern DRV_BOOL unc_buf_init; ++ ++extern DRV_SETUP_INFO_NODE req_drv_setup_info; ++ ++ ++/* needed for target agent support */ ++extern U32 osid; ++extern DRV_BOOL sched_switch_enabled; ++ ++#if defined(BUILD_CHIPSET) ++extern CHIPSET_CONFIG pma; ++#endif ++ ++extern UNCORE_TOPOLOGY_INFO_NODE uncore_topology; ++extern PLATFORM_TOPOLOGY_PROG_NODE platform_topology_prog_node; ++extern wait_queue_head_t wait_exit; ++/* ++ * end of declarations ++ */ ++ ++/*! ++ * @struct LWPMU_DEVICE_NODE_S ++ * @brief Struct to hold fields per device ++ * PMU_register_data_unc - MSR info ++ * dispatch_unc - dispatch table ++ * em_groups_counts_unc - # groups ++ * pcfg_unc - config struct ++ */ ++typedef struct LWPMU_DEVICE_NODE_S LWPMU_DEVICE_NODE; ++typedef LWPMU_DEVICE_NODE * LWPMU_DEVICE; ++ ++struct LWPMU_DEVICE_NODE_S { ++ VOID **PMU_register_data; ++ DISPATCH dispatch; ++ S32 em_groups_count; ++ VOID *pcfg; ++ U64 **unc_prev_value; ++ U64 ***unc_acc_value; ++ U64 counter_mask; ++ U64 num_events; ++ U32 num_units; ++ VOID *ec; ++ S32 *cur_group; ++ S32 pci_dev_node_index; ++ U32 device_type; ++ LBR lbr; ++ PWR pwr; ++ PEBS_INFO_NODE pebs_info_node; ++}; ++ ++#define LWPMU_DEVICE_PMU_register_data(dev) ((dev)->PMU_register_data) ++#define LWPMU_DEVICE_dispatch(dev) ((dev)->dispatch) ++#define LWPMU_DEVICE_em_groups_count(dev) ((dev)->em_groups_count) ++#define LWPMU_DEVICE_pcfg(dev) ((dev)->pcfg) ++#define LWPMU_DEVICE_prev_value(dev) ((dev)->unc_prev_value) ++#define LWPMU_DEVICE_acc_value(dev) ((dev)->unc_acc_value) ++#define LWPMU_DEVICE_counter_mask(dev) ((dev)->counter_mask) ++#define LWPMU_DEVICE_num_events(dev) ((dev)->num_events) ++#define LWPMU_DEVICE_num_units(dev) ((dev)->num_units) ++#define LWPMU_DEVICE_ec(dev) ((dev)->ec) ++#define LWPMU_DEVICE_cur_group(dev) ((dev)->cur_group) ++#define LWPMU_DEVICE_pci_dev_node_index(dev) ((dev)->pci_dev_node_index) ++#define LWPMU_DEVICE_device_type(dev) ((dev)->device_type) ++#define LWPMU_DEVICE_lbr(dev) ((dev)->lbr) ++#define LWPMU_DEVICE_pwr(dev) ((dev)->pwr) ++#define LWPMU_DEVICE_pebs_dispatch(dev) ((dev)->pebs_info_node.pebs_dispatch) ++ ++#define LWPMU_DEVICE_pebs_record_size(dev) \ ++ ((dev)->pebs_info_node.pebs_record_size) ++#define LWPMU_DEVICE_apebs_basic_offset(dev) \ ++ ((dev)->pebs_info_node.apebs_basic_offset) ++#define LWPMU_DEVICE_apebs_mem_offset(dev) \ ++ ((dev)->pebs_info_node.apebs_mem_offset) ++#define LWPMU_DEVICE_apebs_gpr_offset(dev) \ ++ ((dev)->pebs_info_node.apebs_gpr_offset) ++#define LWPMU_DEVICE_apebs_xmm_offset(dev) \ ++ ((dev)->pebs_info_node.apebs_xmm_offset) ++#define LWPMU_DEVICE_apebs_lbr_offset(dev) \ ++ ((dev)->pebs_info_node.apebs_lbr_offset) ++ ++extern U32 num_devices; ++extern U32 cur_device; ++extern LWPMU_DEVICE devices; ++extern U64 *pmu_state; ++ ++// Handy macro ++#define TSC_SKEW(this_cpu) (cpu_tsc[this_cpu] - cpu_tsc[0]) ++ ++/* ++ * The IDT / GDT descriptor for use in identifying code segments ++ */ ++#if defined(DRV_EM64T) ++#pragma pack(push, 1) ++typedef struct _idtgdtDesc { ++ U16 idtgdt_limit; ++ PVOID idtgdt_base; ++} IDTGDT_DESC; ++#pragma pack(pop) ++ ++extern IDTGDT_DESC gdt_desc; ++#endif ++ ++extern DRV_BOOL NMI_mode; ++extern DRV_BOOL KVM_guest_mode; ++ ++#if defined(DRV_SEP_ACRN_ON) ++#define SBUF_MAX_SIZE (1ULL << 22) ++#define SBUF_HEAD_SIZE 64 ++ ++#define TRACE_SBUF_SIZE (4 * 1024 * 1024) ++#define TRACE_ELEMENT_SIZE 32 /* byte */ ++#define TRACE_ELEMENT_NUM \ ++ ((TRACE_SBUF_SIZE - SBUF_HEAD_SIZE) / TRACE_ELEMENT_SIZE) ++ ++#define COLLECTOR_SEP 0 ++#define COLLECTOR_SOCWATCH 1 ++ ++enum PROFILING_FEATURE { ++ CORE_PMU_SAMPLING = 0, ++ CORE_PMU_COUNTING, ++ PEBS_PMU_SAMPLING, ++ LBR_PMU_SAMPLING, ++ UNCORE_PMU_SAMPLING, ++ VM_SWITCH_TRACING, ++ // Add socwatch feature ++}; ++ ++enum sbuf_type { ++ ACRN_TRACE, ++ ACRN_HVLOG, ++ ACRN_SEP, ++ ACRN_SOCWATCH, ++ ACRN_SBUF_TYPE_MAX, ++}; ++ ++struct data_header { ++ int32_t collector_id; ++ uint16_t cpu_id; ++ uint16_t data_type; ++ uint64_t tsc; /* TSC */ ++ uint64_t payload_size; ++ uint64_t reserved; ++} __aligned(32); ++ ++#define PROFILING_DATA_HEADER_SIZE (sizeof(struct data_header)) ++ ++struct core_pmu_sample { ++ /** context where PMI is triggered */ ++ uint32_t os_id; ++ /** the task id */ ++ uint32_t task_id; ++ /** instruction pointer */ ++ uint64_t rip; ++ /** the task name */ ++ char task[16]; ++ /** physical core ID */ ++ uint32_t cpu_id; ++ /** the process id */ ++ uint32_t process_id; ++ /** perf global status msr value (for overflow status) */ ++ uint64_t overflow_status; ++ /** rflags */ ++ uint32_t rflags; ++ /** code segment */ ++ uint32_t cs; ++} __aligned(32); ++ ++#define CORE_PMU_SAMPLE_SIZE (sizeof(struct core_pmu_sample)) ++ ++#define NUM_LBR_ENTRY 32 ++ ++struct lbr_pmu_sample { ++ /* LBR TOS */ ++ uint64_t lbr_tos; ++ /* LBR FROM IP */ ++ uint64_t lbr_from_ip[NUM_LBR_ENTRY]; ++ /* LBR TO IP */ ++ uint64_t lbr_to_ip[NUM_LBR_ENTRY]; ++ /* LBR info */ ++ uint64_t lbr_info[NUM_LBR_ENTRY]; ++} __aligned(32); ++ ++#define LBR_PMU_SAMPLE_SIZE (sizeof(struct lbr_pmu_sample)) ++ ++struct pmu_sample { ++ /* core pmu sample */ ++ struct core_pmu_sample csample; ++ /* lbr pmu sample */ ++ struct lbr_pmu_sample lsample; ++} __aligned(32); ++ ++#define PMU_SAMPLE_SIZE (sizeof(struct pmu_sample)) ++ ++struct vm_switch_trace { ++ uint64_t vmenter_tsc; ++ uint64_t vmexit_tsc; ++ uint64_t vmexit_reason; ++ int32_t os_id; ++} __aligned(32); ++ ++#define VM_SWITCH_TRACE_SIZE (sizeof(struct vm_switch_trace)) ++ ++typedef struct shared_buf shared_buf_t; ++typedef struct profiling_control profiling_control_t; ++typedef struct data_header data_header_t; ++typedef struct core_pmu_sample core_pmu_sample_t; ++typedef struct vm_switch_trace vm_switch_trace_t; ++ ++shared_buf_t *sbuf_allocate(uint32_t ele_num, uint32_t ele_size); ++void sbuf_free(shared_buf_t *sbuf); ++int sbuf_get(shared_buf_t *sbuf, uint8_t *data); ++int sbuf_share_setup(uint32_t pcpu_id, uint32_t sbuf_id, shared_buf_t *sbuf); ++ ++extern shared_buf_t **samp_buf_per_cpu; ++ ++#define MAX_NR_PCPUS 8 ++#define MAX_NR_VCPUS 8 ++#define MAX_NR_VMS 6 ++#define MAX_MSR_LIST_NUM 15 ++#define MAX_GROUP_NUM 1 ++ ++enum MSR_OP_STATUS { MSR_OP_READY = 0, MSR_OP_REQUESTED, MSR_OP_HANDLED }; ++ ++enum MSR_OP_TYPE { ++ MSR_OP_NONE = 0, ++ MSR_OP_READ, ++ MSR_OP_WRITE, ++ MSR_OP_READ_CLEAR ++}; ++ ++enum PMU_MSR_TYPE { PMU_MSR_CCCR = 0, PMU_MSR_ESCR, PMU_MSR_DATA }; ++ ++struct profiling_msr_op { ++ /* value to write or location to write into */ ++ uint64_t value; ++ /* MSR address to read/write; last entry will have value of -1 */ ++ uint32_t msr_id; ++ /* parameter; usage depends on operation */ ++ uint16_t param; ++ uint8_t op_type; ++ uint8_t reg_type; ++}; ++ ++struct profiling_msr_ops_list { ++ int32_t collector_id; ++ uint32_t num_entries; ++ int32_t msr_op_state; ++ struct profiling_msr_op entries[MAX_MSR_LIST_NUM]; ++}; ++ ++struct profiling_vcpu_pcpu_map { ++ int32_t vcpu_id; ++ int32_t pcpu_id; ++ int32_t apic_id; ++}; ++ ++struct profiling_vm_info { ++ int32_t vm_id; ++ u_char guid[16]; ++ char vm_name[16]; ++ int32_t num_vcpus; ++ struct profiling_vcpu_pcpu_map cpu_map[MAX_NR_VCPUS]; ++}; ++ ++struct profiling_vm_info_list { ++ int32_t num_vms; ++ struct profiling_vm_info vm_list[MAX_NR_VMS]; ++}; ++ ++struct profiling_version_info { ++ int32_t major; ++ int32_t minor; ++ int64_t supported_features; ++ int64_t reserved; ++}; ++ ++struct profiling_control { ++ int32_t collector_id; ++ int32_t reserved; ++ uint64_t switches; ++}; ++ ++struct profiling_pmi_config { ++ uint32_t num_groups; ++ uint32_t trigger_count; ++ struct profiling_msr_op initial_list[MAX_GROUP_NUM][MAX_MSR_LIST_NUM]; ++ struct profiling_msr_op start_list[MAX_GROUP_NUM][MAX_MSR_LIST_NUM]; ++ struct profiling_msr_op stop_list[MAX_GROUP_NUM][MAX_MSR_LIST_NUM]; ++ struct profiling_msr_op entry_list[MAX_GROUP_NUM][MAX_MSR_LIST_NUM]; ++ struct profiling_msr_op exit_list[MAX_GROUP_NUM][MAX_MSR_LIST_NUM]; ++}; ++ ++struct profiling_vmsw_config { ++ int32_t collector_id; ++ struct profiling_msr_op initial_list[MAX_MSR_LIST_NUM]; ++ struct profiling_msr_op entry_list[MAX_MSR_LIST_NUM]; ++ struct profiling_msr_op exit_list[MAX_MSR_LIST_NUM]; ++}; ++ ++struct profiling_pcpuid { ++ uint32_t leaf; ++ uint32_t subleaf; ++ uint32_t eax; ++ uint32_t ebx; ++ uint32_t ecx; ++ uint32_t edx; ++}; ++#endif ++ ++#endif +diff --git a/drivers/platform/x86/sepdk/inc/msrdefs.h b/drivers/platform/x86/sepdk/inc/msrdefs.h +new file mode 100644 +index 000000000000..40986ea111bb +--- /dev/null ++++ b/drivers/platform/x86/sepdk/inc/msrdefs.h +@@ -0,0 +1,81 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#ifndef _MSRDEFS_H_ ++#define _MSRDEFS_H_ ++ ++/* ++ * Arch Perf monitoring version 3 ++ */ ++#define IA32_PMC0 0x0C1 ++#define IA32_PMC1 0x0C2 ++#define IA32_PMC2 0x0C3 ++#define IA32_PMC3 0x0C4 ++#define IA32_PMC4 0x0C5 ++#define IA32_PMC5 0x0C6 ++#define IA32_PMC6 0x0C7 ++#define IA32_PMC7 0x0C8 ++#define IA32_FULL_PMC0 0x4C1 ++#define IA32_FULL_PMC1 0x4C2 ++#define IA32_PERFEVTSEL0 0x186 ++#define IA32_PERFEVTSEL1 0x187 ++#define IA32_FIXED_CTR0 0x309 ++#define IA32_FIXED_CTR1 0x30A ++#define IA32_FIXED_CTR2 0x30B ++#define IA32_FIXED_CTR3 0x30C ++#define IA32_PERF_CAPABILITIES 0x345 ++#define IA32_FIXED_CTRL 0x38D ++#define IA32_PERF_GLOBAL_STATUS 0x38E ++#define IA32_PERF_GLOBAL_CTRL 0x38F ++#define IA32_PERF_GLOBAL_OVF_CTRL 0x390 ++#define IA32_PEBS_ENABLE 0x3F1 ++#define IA32_MISC_ENABLE 0x1A0 ++#define IA32_DS_AREA 0x600 ++#define IA32_DEBUG_CTRL 0x1D9 ++#undef IA32_LBR_FILTER_SELECT ++#define IA32_LBR_FILTER_SELECT 0x1c8 ++#define IA32_PEBS_FRONTEND 0x3F7 ++#define IA32_PERF_METRICS 0x329 ++ ++#define COMPOUND_CTR_CTL 0x306 ++#define COMPOUND_PERF_CTR 0x307 ++#define COMPOUND_CTR_OVF_BIT 0x800 ++#define COMPOUND_CTR_OVF_SHIFT 12 ++ ++#define FIXED_CORE_CYCLE_GLOBAL_CTRL_MASK 0x200000000 ++#define FIXED_CORE_CYCLE_FIXED_CTRL_MASK 0xF0 ++ ++// REG INDEX inside GLOBAL CTRL SECTION ++enum { GLOBAL_CTRL_REG_INDEX = 0, ++ GLOBAL_OVF_CTRL_REG_INDEX, ++ PEBS_ENABLE_REG_INDEX, ++ DEBUG_CTRL_REG_INDEX, ++ FIXED_CTRL_REG_INDEX, ++}; ++ ++// REG INDEX inside GLOBAL STATUS SECTION ++enum { GLOBAL_STATUS_REG_INDEX = 0,}; ++ ++#endif +diff --git a/drivers/platform/x86/sepdk/inc/output.h b/drivers/platform/x86/sepdk/inc/output.h +new file mode 100644 +index 000000000000..483e0b5fb5d5 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/inc/output.h +@@ -0,0 +1,120 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#ifndef _OUTPUT_H_ ++#define _OUTPUT_H_ ++ ++#include ++#include ++ ++/* ++ * Initial allocation ++ * Size of buffer = 512KB (2^19) ++ * number of buffers = 2 ++ * The max size of the buffer cannot exceed 1<<22 i.e. 4MB ++ */ ++#define OUTPUT_SMALL_BUFFER (1 << 15) ++#define OUTPUT_LARGE_BUFFER (1 << 19) ++#define OUTPUT_CP_BUFFER (1 << 22) ++#define OUTPUT_MEMORY_THRESHOLD 0x8000000 ++ ++extern U32 output_buffer_size; ++extern U32 saved_buffer_size; ++#define OUTPUT_BUFFER_SIZE output_buffer_size ++#define OUTPUT_NUM_BUFFERS 2 ++#if defined(DRV_ANDROID) ++#define MODULE_BUFF_SIZE 1 ++#else ++#define MODULE_BUFF_SIZE 2 ++#endif ++ ++/* ++ * Data type declarations and accessors macros ++ */ ++typedef struct { ++ spinlock_t buffer_lock; ++ U32 remaining_buffer_size; ++ U32 current_buffer; ++ U32 total_buffer_size; ++ U32 next_buffer[OUTPUT_NUM_BUFFERS]; ++ U32 buffer_full[OUTPUT_NUM_BUFFERS]; ++ U8 *buffer[OUTPUT_NUM_BUFFERS]; ++ U32 signal_full; ++ DRV_BOOL tasklet_queued; ++} OUTPUT_NODE, *OUTPUT; ++ ++#define OUTPUT_buffer_lock(x) ((x)->buffer_lock) ++#define OUTPUT_remaining_buffer_size(x) ((x)->remaining_buffer_size) ++#define OUTPUT_total_buffer_size(x) ((x)->total_buffer_size) ++#define OUTPUT_buffer(x, y) ((x)->buffer[(y)]) ++#define OUTPUT_buffer_full(x, y) ((x)->buffer_full[(y)]) ++#define OUTPUT_current_buffer(x) ((x)->current_buffer) ++#define OUTPUT_signal_full(x) ((x)->signal_full) ++#define OUTPUT_tasklet_queued(x) ((x)->tasklet_queued) ++/* ++ * Add an array of control buffer for per-cpu ++ */ ++typedef struct { ++ wait_queue_head_t queue; ++ OUTPUT_NODE outbuf; ++ U32 sample_count; ++} BUFFER_DESC_NODE, *BUFFER_DESC; ++ ++#define BUFFER_DESC_queue(a) ((a)->queue) ++#define BUFFER_DESC_outbuf(a) ((a)->outbuf) ++#define BUFFER_DESC_sample_count(a) ((a)->sample_count) ++ ++extern BUFFER_DESC cpu_buf; // actually an array of BUFFER_DESC_NODE ++extern BUFFER_DESC unc_buf; ++extern BUFFER_DESC module_buf; ++extern BUFFER_DESC cpu_sideband_buf; ++/* ++ * Interface Functions ++ */ ++ ++extern int OUTPUT_Module_Fill(PVOID data, U16 size, U8 in_notification); ++extern OS_STATUS OUTPUT_Initialize(void); ++extern OS_STATUS OUTPUT_Initialize_UNC(void); ++extern void OUTPUT_Cleanup(void); ++extern void OUTPUT_Cleanup(void); ++extern int OUTPUT_Destroy(void); ++extern int OUTPUT_Flush(void); ++ ++extern ssize_t OUTPUT_Module_Read(struct file *filp, char __user *buf, ++ size_t count, loff_t *f_pos); ++ ++extern ssize_t OUTPUT_Sample_Read(struct file *filp, char __user *buf, ++ size_t count, loff_t *f_pos); ++ ++extern ssize_t OUTPUT_UncSample_Read(struct file *filp, char __user *buf, ++ size_t count, loff_t *f_pos); ++ ++extern ssize_t OUTPUT_SidebandInfo_Read(struct file *filp, char __user *buf, ++ size_t count, loff_t *f_pos); ++ ++extern void *OUTPUT_Reserve_Buffer_Space(BUFFER_DESC bd, U32 size, ++ DRV_BOOL defer, U8 in_notification, S32 cpu_idx); ++ ++#endif +diff --git a/drivers/platform/x86/sepdk/inc/pci.h b/drivers/platform/x86/sepdk/inc/pci.h +new file mode 100644 +index 000000000000..44d5304d86a5 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/inc/pci.h +@@ -0,0 +1,133 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#ifndef _PCI_H_ ++#define _PCI_H_ ++ ++#include "lwpmudrv_defines.h" ++ ++/* ++ * PCI Config Address macros ++ */ ++#define PCI_ENABLE 0x80000000 ++ ++#define PCI_ADDR_IO 0xCF8 ++#define PCI_DATA_IO 0xCFC ++ ++#define BIT0 0x1 ++#define BIT1 0x2 ++ ++/* ++ * Macro for forming a PCI configuration address ++ */ ++#define FORM_PCI_ADDR(bus, dev, fun, off) \ ++ (((PCI_ENABLE)) | ((bus & 0xFF) << 16) | ((dev & 0x1F) << 11) | \ ++ ((fun & 0x07) << 8) | ((off & 0xFF) << 0)) ++ ++#define VENDOR_ID_MASK 0x0000FFFF ++#define DEVICE_ID_MASK 0xFFFF0000 ++#define DEVICE_ID_BITSHIFT 16 ++#define LOWER_4_BYTES_MASK 0x00000000FFFFFFFF ++#define MAX_BUSNO 256 ++#define NEXT_ADDR_OFFSET 4 ++#define NEXT_ADDR_SHIFT 32 ++#define DRV_IS_PCI_VENDOR_ID_INTEL 0x8086 ++#define MAX_PCI_DEVS 32 ++ ++#define CONTINUE_IF_NOT_GENUINE_INTEL_DEVICE(value, vendor_id, device_id) \ ++ { \ ++ vendor_id = value & VENDOR_ID_MASK; \ ++ device_id = (value & DEVICE_ID_MASK) >> DEVICE_ID_BITSHIFT; \ ++ if (vendor_id != DRV_IS_PCI_VENDOR_ID_INTEL) { \ ++ continue; \ ++ } \ ++ } ++ ++#define CHECK_IF_GENUINE_INTEL_DEVICE(value, vendor_id, device_id, valid) \ ++ { \ ++ vendor_id = value & VENDOR_ID_MASK; \ ++ device_id = (value & DEVICE_ID_MASK) >> DEVICE_ID_BITSHIFT; \ ++ valid = 1; \ ++ if (vendor_id != DRV_IS_PCI_VENDOR_ID_INTEL) { \ ++ valid = 0; \ ++ } \ ++ } ++ ++typedef struct SEP_MMIO_NODE_S SEP_MMIO_NODE; ++ ++struct SEP_MMIO_NODE_S { ++ U64 physical_address; ++ U64 virtual_address; ++ U64 map_token; ++ U32 size; ++}; ++ ++#define SEP_MMIO_NODE_physical_address(x) ((x)->physical_address) ++#define SEP_MMIO_NODE_virtual_address(x) ((x)->virtual_address) ++#define SEP_MMIO_NODE_map_token(x) ((x)->map_token) ++#define SEP_MMIO_NODE_size(x) ((x)->size) ++ ++extern OS_STATUS PCI_Map_Memory(SEP_MMIO_NODE *node, U64 phy_address, ++ U32 map_size); ++ ++extern void PCI_Unmap_Memory(SEP_MMIO_NODE *node); ++ ++extern int PCI_Read_From_Memory_Address(U32 addr, U32 *val); ++ ++extern int PCI_Write_To_Memory_Address(U32 addr, U32 val); ++ ++/*** UNIVERSAL PCI ACCESSORS ***/ ++ ++extern VOID PCI_Initialize(void); ++ ++extern U32 PCI_Read_U32(U32 bus, U32 device, U32 function, U32 offset); ++ ++extern U32 PCI_Read_U32_Valid(U32 bus, U32 device, U32 function, U32 offset, ++ U32 invalid_value); ++ ++extern U64 PCI_Read_U64(U32 bus, U32 device, U32 function, U32 offset); ++ ++extern U64 PCI_Read_U64_Valid(U32 bus, U32 device, U32 function, U32 offset, ++ U64 invalid_value); ++ ++extern U32 PCI_Write_U32(U32 bus, U32 device, U32 function, U32 offset, ++ U32 value); ++ ++extern U32 PCI_Write_U64(U32 bus, U32 device, U32 function, U32 offset, ++ U64 value); ++ ++/*** UNIVERSAL MMIO ACCESSORS ***/ ++ ++extern U32 PCI_MMIO_Read_U32(U64 virtual_address_base, U32 offset); ++ ++extern U64 PCI_MMIO_Read_U64(U64 virtual_address_base, U32 offset); ++ ++extern void PCI_MMIO_Write_U32(U64 virtual_address_base, U32 offset, ++ U32 value); ++ ++extern void PCI_MMIO_Write_U64(U64 virtual_address_base, U32 offset, ++ U64 value); ++ ++#endif +diff --git a/drivers/platform/x86/sepdk/inc/pebs.h b/drivers/platform/x86/sepdk/inc/pebs.h +new file mode 100644 +index 000000000000..7a7bbe10e2ba +--- /dev/null ++++ b/drivers/platform/x86/sepdk/inc/pebs.h +@@ -0,0 +1,494 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#ifndef _PEBS_H_ ++#define _PEBS_H_ ++ ++ ++typedef struct PEBS_REC_NODE_S PEBS_REC_NODE; ++ ++struct PEBS_REC_NODE_S { ++ U64 r_flags; // Offset 0x00 ++ U64 linear_ip; // Offset 0x08 ++ U64 rax; // Offset 0x10 ++ U64 rbx; // Offset 0x18 ++ U64 rcx; // Offset 0x20 ++ U64 rdx; // Offset 0x28 ++ U64 rsi; // Offset 0x30 ++ U64 rdi; // Offset 0x38 ++ U64 rbp; // Offset 0x40 ++ U64 rsp; // Offset 0x48 ++ U64 r8; // Offset 0x50 ++ U64 r9; // Offset 0x58 ++ U64 r10; // Offset 0x60 ++ U64 r11; // Offset 0x68 ++ U64 r12; // Offset 0x70 ++ U64 r13; // Offset 0x78 ++ U64 r14; // Offset 0x80 ++ U64 r15; // Offset 0x88 ++}; ++ ++typedef struct PEBS_REC_EXT_NODE_S PEBS_REC_EXT_NODE; ++typedef PEBS_REC_EXT_NODE * PEBS_REC_EXT; ++struct PEBS_REC_EXT_NODE_S { ++ PEBS_REC_NODE pebs_basic; // Offset 0x00 to 0x88 ++ U64 glob_perf_overflow; // Offset 0x90 ++ U64 data_linear_address; // Offset 0x98 ++ U64 data_source; // Offset 0xA0 ++ U64 latency; // Offset 0xA8 ++}; ++ ++#define PEBS_REC_EXT_r_flags(x) ((x)->pebs_basic.r_flags) ++#define PEBS_REC_EXT_linear_ip(x) ((x)->pebs_basic.linear_ip) ++#define PEBS_REC_EXT_rax(x) ((x)->pebs_basic.rax) ++#define PEBS_REC_EXT_rbx(x) ((x)->pebs_basic.rbx) ++#define PEBS_REC_EXT_rcx(x) ((x)->pebs_basic.rcx) ++#define PEBS_REC_EXT_rdx(x) ((x)->pebs_basic.rdx) ++#define PEBS_REC_EXT_rsi(x) ((x)->pebs_basic.rsi) ++#define PEBS_REC_EXT_rdi(x) ((x)->pebs_basic.rdi) ++#define PEBS_REC_EXT_rbp(x) ((x)->pebs_basic.rbp) ++#define PEBS_REC_EXT_rsp(x) ((x)->pebs_basic.rsp) ++#define PEBS_REC_EXT_r8(x) ((x)->pebs_basic.r8) ++#define PEBS_REC_EXT_r9(x) ((x)->pebs_basic.r9) ++#define PEBS_REC_EXT_r10(x) ((x)->pebs_basic.r10) ++#define PEBS_REC_EXT_r11(x) ((x)->pebs_basic.r11) ++#define PEBS_REC_EXT_r12(x) ((x)->pebs_basic.r12) ++#define PEBS_REC_EXT_r13(x) ((x)->pebs_basic.r13) ++#define PEBS_REC_EXT_r14(x) ((x)->pebs_basic.r14) ++#define PEBS_REC_EXT_r15(x) ((x)->pebs_basic.r15) ++#define PEBS_REC_EXT_glob_perf_overflow(x) ((x)->glob_perf_overflow) ++#define PEBS_REC_EXT_data_linear_address(x) ((x)->data_linear_address) ++#define PEBS_REC_EXT_data_source(x) ((x)->data_source) ++#define PEBS_REC_EXT_latency(x) ((x)->latency) ++ ++typedef struct PEBS_REC_EXT1_NODE_S PEBS_REC_EXT1_NODE; ++typedef PEBS_REC_EXT1_NODE * PEBS_REC_EXT1; ++struct PEBS_REC_EXT1_NODE_S { ++ PEBS_REC_EXT_NODE pebs_ext; ++ U64 eventing_ip; //Offset 0xB0 ++ U64 hle_info; //Offset 0xB8 ++}; ++ ++#define PEBS_REC_EXT1_r_flags(x) ((x)->pebs_ext.pebs_basic.r_flags) ++#define PEBS_REC_EXT1_linear_ip(x) ((x)->pebs_ext.pebs_basic.linear_ip) ++#define PEBS_REC_EXT1_rax(x) ((x)->pebs_ext.pebs_basic.rax) ++#define PEBS_REC_EXT1_rbx(x) ((x)->pebs_ext.pebs_basic.rbx) ++#define PEBS_REC_EXT1_rcx(x) ((x)->pebs_ext.pebs_basic.rcx) ++#define PEBS_REC_EXT1_rdx(x) ((x)->pebs_ext.pebs_basic.rdx) ++#define PEBS_REC_EXT1_rsi(x) ((x)->pebs_ext.pebs_basic.rsi) ++#define PEBS_REC_EXT1_rdi(x) ((x)->pebs_ext.pebs_basic.rdi) ++#define PEBS_REC_EXT1_rbp(x) ((x)->pebs_ext.pebs_basic.rbp) ++#define PEBS_REC_EXT1_rsp(x) ((x)->pebs_ext.pebs_basic.rsp) ++#define PEBS_REC_EXT1_r8(x) ((x)->pebs_ext.pebs_basic.r8) ++#define PEBS_REC_EXT1_r9(x) ((x)->pebs_ext.pebs_basic.r9) ++#define PEBS_REC_EXT1_r10(x) ((x)->pebs_ext.pebs_basic.r10) ++#define PEBS_REC_EXT1_r11(x) ((x)->pebs_ext.pebs_basic.r11) ++#define PEBS_REC_EXT1_r12(x) ((x)->pebs_ext.pebs_basic.r12) ++#define PEBS_REC_EXT1_r13(x) ((x)->pebs_ext.pebs_basic.r13) ++#define PEBS_REC_EXT1_r14(x) ((x)->pebs_ext.pebs_basic.r14) ++#define PEBS_REC_EXT1_r15(x) ((x)->pebs_ext.pebs_basic.r15) ++#define PEBS_REC_EXT1_glob_perf_overflow(x) ((x)->pebs_ext.glob_perf_overflow) ++#define PEBS_REC_EXT1_data_linear_address(x) \ ++ ((x)->pebs_ext.data_linear_address) ++#define PEBS_REC_EXT1_data_source(x) ((x)->pebs_ext.data_source) ++#define PEBS_REC_EXT1_latency(x) ((x)->pebs_ext.latency) ++#define PEBS_REC_EXT1_eventing_ip(x) ((x)->eventing_ip) ++#define PEBS_REC_EXT1_hle_info(x) ((x)->hle_info) ++ ++typedef struct PEBS_REC_EXT2_NODE_S PEBS_REC_EXT2_NODE; ++typedef PEBS_REC_EXT2_NODE * PEBS_REC_EXT2; ++struct PEBS_REC_EXT2_NODE_S { ++ PEBS_REC_EXT1_NODE pebs_ext1; ++ U64 tsc; //Offset 0xC0 ++}; ++ ++#define PEBS_REC_EXT2_r_flags(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.r_flags) ++#define PEBS_REC_EXT2_linear_ip(x) \ ++ ((x)->pebs_ext1->pebs_ext.pebs_basic.linear_ip) ++#define PEBS_REC_EXT2_rax(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.rax) ++#define PEBS_REC_EXT2_rbx(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.rbx) ++#define PEBS_REC_EXT2_rcx(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.rcx) ++#define PEBS_REC_EXT2_rdx(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.rdx) ++#define PEBS_REC_EXT2_rsi(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.rsi) ++#define PEBS_REC_EXT2_rdi(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.rdi) ++#define PEBS_REC_EXT2_rbp(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.rbp) ++#define PEBS_REC_EXT2_rsp(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.rsp) ++#define PEBS_REC_EXT2_r8(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.r8) ++#define PEBS_REC_EXT2_r9(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.r9) ++#define PEBS_REC_EXT2_r10(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.r10) ++#define PEBS_REC_EXT2_r11(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.r11) ++#define PEBS_REC_EXT2_r12(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.r12) ++#define PEBS_REC_EXT2_r13(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.r13) ++#define PEBS_REC_EXT2_r14(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.r14) ++#define PEBS_REC_EXT2_r15(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.r15) ++#define PEBS_REC_EXT2_glob_perf_overflow(x) \ ++ ((x)->pebs_ext1->pebs_ext.glob_perf_overflow) ++#define PEBS_REC_EXT2_data_linear_address(x) \ ++ ((x)->pebs_ext1->pebs_ext.data_linear_address) ++#define PEBS_REC_EXT2_data_source(x) ((x)->pebs_ext1->pebs_ext.data_source) ++#define PEBS_REC_EXT2_latency(x) ((x)->pebs_ext1->pebs_ext.latency) ++#define PEBS_REC_EXT2_eventing_ip(x) ((x)->pebs_ext1->eventing_ip) ++#define PEBS_REC_EXT2_hle_info(x) ((x)->pebs_ext1->hle_info) ++#define PEBS_REC_EXT2_tsc(x) ((x)->tsc) ++ ++typedef struct APEBS_CONFIG_NODE_S APEBS_CONFIG_NODE; ++typedef APEBS_CONFIG_NODE * APEBS_CONFIG; ++ ++struct APEBS_CONFIG_NODE_S { ++ U8 apebs_enabled; ++ U8 collect_mem; ++ U8 collect_gpr; ++ U8 collect_xmm; ++ U8 collect_lbrs; ++ U8 precise_ip_lbrs; ++ U8 num_lbr_entries; ++ U16 basic_offset; ++ U16 mem_offset; ++ U16 gpr_offset; ++ U16 xmm_offset; ++ U16 lbr_offset; ++}; ++ ++#define APEBS_CONFIG_apebs_enabled(x) ((x)->apebs_enabled) ++#define APEBS_CONFIG_collect_mem(x) ((x)->collect_mem) ++#define APEBS_CONFIG_collect_gpr(x) ((x)->collect_gpr) ++#define APEBS_CONFIG_collect_xmm(x) ((x)->collect_xmm) ++#define APEBS_CONFIG_collect_lbrs(x) ((x)->collect_lbrs) ++#define APEBS_CONFIG_precise_ip_lbrs(x) ((x)->precise_ip_lbrs) ++#define APEBS_CONFIG_num_lbr_entries(x) ((x)->num_lbr_entries) ++#define APEBS_CONFIG_basic_offset(x) ((x)->basic_offset) ++#define APEBS_CONFIG_mem_offset(x) ((x)->mem_offset) ++#define APEBS_CONFIG_gpr_offset(x) ((x)->gpr_offset) ++#define APEBS_CONFIG_xmm_offset(x) ((x)->xmm_offset) ++#define APEBS_CONFIG_lbr_offset(x) ((x)->lbr_offset) ++ ++typedef struct ADAPTIVE_PEBS_BASIC_INFO_NODE_S ADAPTIVE_PEBS_BASIC_INFO_NODE; ++typedef ADAPTIVE_PEBS_BASIC_INFO_NODE * ADAPTIVE_PEBS_BASIC_INFO; ++ ++struct ADAPTIVE_PEBS_BASIC_INFO_NODE_S { ++ U64 record_info; // Offset 0x0 ++ // [47:0] - record format, [63:48] - record size ++ U64 eventing_ip; // Offset 0x8 ++ U64 applicable_counters; // Offset 0x10 ++ U64 tsc; // Offset 0x18 ++}; ++ ++#define ADAPTIVE_PEBS_BASIC_INFO_record_info(x) ((x)->record_info) ++#define ADAPTIVE_PEBS_BASIC_INFO_eventing_ip(x) ((x)->eventing_ip) ++#define ADAPTIVE_PEBS_BASIC_INFO_tsc(x) ((x)->tsc) ++#define ADAPTIVE_PEBS_BASIC_INFO_applicable_counters(x) \ ++ ((x)->applicable_counters) ++ ++typedef struct ADAPTIVE_PEBS_MEM_INFO_NODE_S ADAPTIVE_PEBS_MEM_INFO_NODE; ++typedef ADAPTIVE_PEBS_MEM_INFO_NODE * ADAPTIVE_PEBS_MEM_INFO; ++ ++struct ADAPTIVE_PEBS_MEM_INFO_NODE_S { ++ U64 data_linear_address; // Offset 0x20 ++ U64 data_source; // Offset 0x28 ++ U64 latency; // Offset 0x30 ++ U64 hle_info; // Offset 0x38 ++}; ++ ++#define ADAPTIVE_PEBS_MEM_INFO_data_linear_address(x) ((x)->data_linear_address) ++#define ADAPTIVE_PEBS_MEM_INFO_data_source(x) ((x)->data_source) ++#define ADAPTIVE_PEBS_MEM_INFO_latency(x) ((x)->latency) ++#define ADAPTIVE_PEBS_MEM_INFO_hle_info(x) ((x)->hle_info) ++ ++typedef struct ADAPTIVE_PEBS_GPR_INFO_NODE_S ADAPTIVE_PEBS_GPR_INFO_NODE; ++typedef ADAPTIVE_PEBS_GPR_INFO_NODE * ADAPTIVE_PEBS_GPR_INFO; ++ ++struct ADAPTIVE_PEBS_GPR_INFO_NODE_S { ++ U64 rflags; // Offset 0x40 ++ U64 rip; // Offset 0x48 ++ U64 rax; // Offset 0x50 ++ U64 rcx; // Offset 0x58 ++ U64 rdx; // Offset 0x60 ++ U64 rbx; // Offset 0x68 ++ U64 rsp; // Offset 0x70 ++ U64 rbp; // Offset 0x78 ++ U64 rsi; // Offset 0x80 ++ U64 rdi; // Offset 0x88 ++ U64 r8; // Offset 0x90 ++ U64 r9; // Offset 0x98 ++ U64 r10; // Offset 0xA0 ++ U64 r11; // Offset 0xA8 ++ U64 r12; // Offset 0xB0 ++ U64 r13; // Offset 0xB8 ++ U64 r14; // Offset 0xC0 ++ U64 r15; // Offset 0xC8 ++}; ++ ++#define ADAPTIVE_PEBS_GPR_INFO_rflags(x) ((x)->rflags) ++#define ADAPTIVE_PEBS_GPR_INFO_rip(x) ((x)->rip) ++#define ADAPTIVE_PEBS_GPR_INFO_rax(x) ((x)->rax) ++#define ADAPTIVE_PEBS_GPR_INFO_rcx(x) ((x)->rcx) ++#define ADAPTIVE_PEBS_GPR_INFO_rdx(x) ((x)->rdx) ++#define ADAPTIVE_PEBS_GPR_INFO_rbx(x) ((x)->rbx) ++#define ADAPTIVE_PEBS_GPR_INFO_rsp(x) ((x)->rsp) ++#define ADAPTIVE_PEBS_GPR_INFO_rbp(x) ((x)->rbp) ++#define ADAPTIVE_PEBS_GPR_INFO_rsi(x) ((x)->rsi) ++#define ADAPTIVE_PEBS_GPR_INFO_rdi(x) ((x)->rdi) ++#define ADAPTIVE_PEBS_GPR_INFO_r8(x) ((x)->r8) ++#define ADAPTIVE_PEBS_GPR_INFO_r9(x) ((x)->r9) ++#define ADAPTIVE_PEBS_GPR_INFO_r10(x) ((x)->r10) ++#define ADAPTIVE_PEBS_GPR_INFO_r11(x) ((x)->r11) ++#define ADAPTIVE_PEBS_GPR_INFO_r12(x) ((x)->r12) ++#define ADAPTIVE_PEBS_GPR_INFO_r13(x) ((x)->r13) ++#define ADAPTIVE_PEBS_GPR_INFO_r14(x) ((x)->r14) ++#define ADAPTIVE_PEBS_GPR_INFO_r15(x) ((x)->r15) ++ ++typedef struct ADAPTIVE_PEBS_XMM_INFO_NODE_S ADAPTIVE_PEBS_XMM_INFO_NODE; ++typedef ADAPTIVE_PEBS_XMM_INFO_NODE * ADAPTIVE_PEBS_XMM_INFO; ++ ++struct ADAPTIVE_PEBS_XMM_INFO_NODE_S { ++ U64 xmm0_l; // Offset 0xD0 ++ U64 xmm0_h; // Offset 0xD8 ++ U64 xmm1_l; // Offset 0xE0 ++ U64 xmm1_h; // Offset 0xE8 ++ U64 xmm2_l; // Offset 0xF0 ++ U64 xmm2_h; // Offset 0xF8 ++ U64 xmm3_l; // Offset 0x100 ++ U64 xmm3_h; // Offset 0x108 ++ U64 xmm4_l; // Offset 0x110 ++ U64 xmm4_h; // Offset 0x118 ++ U64 xmm5_l; // Offset 0x120 ++ U64 xmm5_h; // Offset 0x128 ++ U64 xmm6_l; // Offset 0x130 ++ U64 xmm6_h; // Offset 0x138 ++ U64 xmm7_l; // Offset 0x140 ++ U64 xmm7_h; // Offset 0x148 ++ U64 xmm8_l; // Offset 0x150 ++ U64 xmm8_h; // Offset 0x158 ++ U64 xmm9_l; // Offset 0x160 ++ U64 xmm9_h; // Offset 0x168 ++ U64 xmm10_l; // Offset 0x170 ++ U64 xmm10_h; // Offset 0x178 ++ U64 xmm11_l; // Offset 0x180 ++ U64 xmm11_h; // Offset 0x188 ++ U64 xmm12_l; // Offset 0x190 ++ U64 xmm12_h; // Offset 0x198 ++ U64 xmm13_l; // Offset 0x1A0 ++ U64 xmm13_h; // Offset 0x1A8 ++ U64 xmm14_l; // Offset 0x1B0 ++ U64 xmm14_h; // Offset 0x1B8 ++ U64 xmm15_l; // Offset 0x1C0 ++ U64 xmm15_h; // Offset 0x1C8 ++}; ++ ++#define ADAPTIVE_PEBS_XMM_INFO_xmm0_l(x) ((x)->xmm0_l) ++#define ADAPTIVE_PEBS_XMM_INFO_xmm0_h(x) ((x)->xmm0_h) ++#define ADAPTIVE_PEBS_XMM_INFO_xmm1_l(x) ((x)->xmm1_l) ++#define ADAPTIVE_PEBS_XMM_INFO_xmm1_h(x) ((x)->xmm1_h) ++#define ADAPTIVE_PEBS_XMM_INFO_xmm2_l(x) ((x)->xmm2_l) ++#define ADAPTIVE_PEBS_XMM_INFO_xmm2_h(x) ((x)->xmm2_h) ++#define ADAPTIVE_PEBS_XMM_INFO_xmm3_l(x) ((x)->xmm3_l) ++#define ADAPTIVE_PEBS_XMM_INFO_xmm3_h(x) ((x)->xmm3_h) ++#define ADAPTIVE_PEBS_XMM_INFO_xmm4_l(x) ((x)->xmm4_l) ++#define ADAPTIVE_PEBS_XMM_INFO_xmm4_h(x) ((x)->xmm4_h) ++#define ADAPTIVE_PEBS_XMM_INFO_xmm5_l(x) ((x)->xmm5_l) ++#define ADAPTIVE_PEBS_XMM_INFO_xmm5_h(x) ((x)->xmm5_h) ++#define ADAPTIVE_PEBS_XMM_INFO_xmm6_l(x) ((x)->xmm6_l) ++#define ADAPTIVE_PEBS_XMM_INFO_xmm6_h(x) ((x)->xmm6_h) ++#define ADAPTIVE_PEBS_XMM_INFO_xmm7_l(x) ((x)->xmm7_l) ++#define ADAPTIVE_PEBS_XMM_INFO_xmm7_h(x) ((x)->xmm7_h) ++#define ADAPTIVE_PEBS_XMM_INFO_xmm8_l(x) ((x)->xmm8_l) ++#define ADAPTIVE_PEBS_XMM_INFO_xmm8_h(x) ((x)->xmm8_h) ++#define ADAPTIVE_PEBS_XMM_INFO_xmm9_l(x) ((x)->xmm9_l) ++#define ADAPTIVE_PEBS_XMM_INFO_xmm9_h(x) ((x)->xmm9_h) ++#define ADAPTIVE_PEBS_XMM_INFO_xmm10_l(x) ((x)->xmm10_l) ++#define ADAPTIVE_PEBS_XMM_INFO_xmm10_h(x) ((x)->xmm10_h) ++#define ADAPTIVE_PEBS_XMM_INFO_xmm11_l(x) ((x)->xmm11_l) ++#define ADAPTIVE_PEBS_XMM_INFO_xmm11_h(x) ((x)->xmm11_h) ++#define ADAPTIVE_PEBS_XMM_INFO_xmm12_l(x) ((x)->xmm12_l) ++#define ADAPTIVE_PEBS_XMM_INFO_xmm12_h(x) ((x)->xmm12_h) ++#define ADAPTIVE_PEBS_XMM_INFO_xmm13_l(x) ((x)->xmm13_l) ++#define ADAPTIVE_PEBS_XMM_INFO_xmm13_h(x) ((x)->xmm13_h) ++#define ADAPTIVE_PEBS_XMM_INFO_xmm14_l(x) ((x)->xmm14_l) ++#define ADAPTIVE_PEBS_XMM_INFO_xmm14_h(x) ((x)->xmm14_h) ++#define ADAPTIVE_PEBS_XMM_INFO_xmm15_l(x) ((x)->xmm15_l) ++#define ADAPTIVE_PEBS_XMM_INFO_xmm15_h(x) ((x)->xmm15_h) ++ ++typedef struct ADAPTIVE_PEBS_LBR_INFO_NODE_S ADAPTIVE_PEBS_LBR_INFO_NODE; ++typedef ADAPTIVE_PEBS_LBR_INFO_NODE * ADAPTIVE_PEBS_LBR_INFO; ++ ++struct ADAPTIVE_PEBS_LBR_INFO_NODE_S { ++ U64 lbr_from; // Offset 0x1D0 ++ U64 lbr_to; // Offset 0x1D8 ++ U64 lbr_info; // Offset 0x1E0 ++}; ++ ++#define ADAPTIVE_PEBS_LBR_INFO_lbr_from(x) ((x)->lbr_from) ++#define ADAPTIVE_PEBS_LBR_INFO_lbr_to(x) ((x)->lbr_to) ++#define ADAPTIVE_PEBS_LBR_INFO_lbr_info(x) ((x)->lbr_info) ++ ++typedef struct LATENCY_INFO_NODE_S LATENCY_INFO_NODE; ++typedef LATENCY_INFO_NODE * LATENCY_INFO; ++ ++struct LATENCY_INFO_NODE_S { ++ U64 linear_address; ++ U64 data_source; ++ U64 latency; ++ U64 stack_pointer; ++ U64 phys_addr; ++}; ++ ++#define LATENCY_INFO_linear_address(x) ((x)->linear_address) ++#define LATENCY_INFO_data_source(x) ((x)->data_source) ++#define LATENCY_INFO_latency(x) ((x)->latency) ++#define LATENCY_INFO_stack_pointer(x) ((x)->stack_pointer) ++#define LATENCY_INFO_phys_addr(x) ((x)->phys_addr) ++ ++typedef struct DTS_BUFFER_EXT_NODE_S DTS_BUFFER_EXT_NODE; ++typedef DTS_BUFFER_EXT_NODE * DTS_BUFFER_EXT; ++struct DTS_BUFFER_EXT_NODE_S { ++ U64 base; // Offset 0x00 ++ U64 index; // Offset 0x08 ++ U64 max; // Offset 0x10 ++ U64 threshold; // Offset 0x18 ++ U64 pebs_base; // Offset 0x20 ++ U64 pebs_index; // Offset 0x28 ++ U64 pebs_max; // Offset 0x30 ++ U64 pebs_threshold; // Offset 0x38 ++ U64 counter_reset0; // Offset 0x40 ++ U64 counter_reset1; // Offset 0x48 ++ U64 counter_reset2; // Offset 0x50 ++ U64 counter_reset3; ++}; ++ ++#define DTS_BUFFER_EXT_base(x) ((x)->base) ++#define DTS_BUFFER_EXT_index(x) ((x)->index) ++#define DTS_BUFFER_EXT_max(x) ((x)->max) ++#define DTS_BUFFER_EXT_threshold(x) ((x)->threshold) ++#define DTS_BUFFER_EXT_pebs_base(x) ((x)->pebs_base) ++#define DTS_BUFFER_EXT_pebs_index(x) ((x)->pebs_index) ++#define DTS_BUFFER_EXT_pebs_max(x) ((x)->pebs_max) ++#define DTS_BUFFER_EXT_pebs_threshold(x) ((x)->pebs_threshold) ++#define DTS_BUFFER_EXT_counter_reset0(x) ((x)->counter_reset0) ++#define DTS_BUFFER_EXT_counter_reset1(x) ((x)->counter_reset1) ++#define DTS_BUFFER_EXT_counter_reset2(x) ((x)->counter_reset2) ++#define DTS_BUFFER_EXT_counter_reset3(x) ((x)->counter_reset3) ++ ++typedef struct DTS_BUFFER_EXT1_NODE_S DTS_BUFFER_EXT1_NODE; ++typedef DTS_BUFFER_EXT1_NODE * DTS_BUFFER_EXT1; ++struct DTS_BUFFER_EXT1_NODE_S { ++ DTS_BUFFER_EXT_NODE dts_buffer; ++ U64 counter_reset4; // Offset 0x60 ++ U64 counter_reset5; // Offset 0x68 ++ U64 counter_reset6; // Offset 0x70 ++ U64 counter_reset7; // Offset 0x78 ++ U64 fixed_counter_reset0; // Offset 0x80 ++ U64 fixed_counter_reset1; // Offset 0x88 ++ U64 fixed_counter_reset2; // Offset 0x90 ++ U64 fixed_counter_reset3; // Offset 0x98 ++}; ++ ++#define DTS_BUFFER_EXT1_base(x) ((x)->dts_buffer.base) ++#define DTS_BUFFER_EXT1_index(x) ((x)->dts_buffer.index) ++#define DTS_BUFFER_EXT1_max(x) ((x)->dts_buffer.max) ++#define DTS_BUFFER_EXT1_threshold(x) ((x)->dts_buffer.threshold) ++#define DTS_BUFFER_EXT1_pebs_base(x) ((x)->dts_buffer.pebs_base) ++#define DTS_BUFFER_EXT1_pebs_index(x) ((x)->dts_buffer.pebs_index) ++#define DTS_BUFFER_EXT1_pebs_max(x) ((x)->dts_buffer.pebs_max) ++#define DTS_BUFFER_EXT1_pebs_threshold(x) ((x)->dts_buffer.pebs_threshold) ++#define DTS_BUFFER_EXT1_counter_reset0(x) ((x)->dts_buffer.counter_reset0) ++#define DTS_BUFFER_EXT1_counter_reset1(x) ((x)->dts_buffer.counter_reset1) ++#define DTS_BUFFER_EXT1_counter_reset2(x) ((x)->dts_buffer.counter_reset2) ++#define DTS_BUFFER_EXT1_counter_reset3(x) ((x)->dts_buffer.counter_reset3) ++#define DTS_BUFFER_EXT1_counter_reset4(x) ((x)->counter_reset4) ++#define DTS_BUFFER_EXT1_counter_reset5(x) ((x)->counter_reset5) ++#define DTS_BUFFER_EXT1_counter_reset6(x) ((x)->counter_reset6) ++#define DTS_BUFFER_EXT1_counter_reset7(x) ((x)->counter_reset7) ++#define DTS_BUFFER_EXT1_fixed_counter_reset0(x) ((x)->fixed_counter_reset0) ++#define DTS_BUFFER_EXT1_fixed_counter_reset1(x) ((x)->fixed_counter_reset1) ++#define DTS_BUFFER_EXT1_fixed_counter_reset2(x) ((x)->fixed_counter_reset2) ++#define DTS_BUFFER_EXT1_fixed_counter_reset3(x) ((x)->fixed_counter_reset3) ++ ++extern OS_STATUS PEBS_Initialize(U32 dev_idx); ++ ++extern OS_STATUS PEBS_Allocate(void); ++ ++extern VOID PEBS_Destroy(void); ++ ++extern VOID PEBS_Flush_Buffer(void *); ++ ++extern VOID PEBS_Reset_Counter(S32 this_cpu, U32 index, U64 value); ++ ++extern VOID PEBS_Reset_Index(S32 this_cpu); ++ ++extern VOID PEBS_Modify_IP(void *sample, DRV_BOOL is_64bit_addr, U32 rec_index); ++ ++extern VOID PEBS_Modify_TSC(void *sample, U32 rec_index); ++ ++extern U32 PEBS_Get_Num_Records_Filled(void); ++ ++extern U64 PEBS_Fill_Buffer(S8 *buffer, EVENT_DESC evt_desc, U32 rec_index); ++ ++extern U64 APEBS_Fill_Buffer(S8 *buffer, EVENT_DESC evt_desc, U32 rec_index); ++ ++extern U64 PEBS_Overflowed(S32 this_cpu, U64 overflow_status, U32 rec_index); ++ ++/* ++ * Dispatch table for virtualized functions. ++ * Used to enable common functionality for different ++ * processor microarchitectures ++ */ ++typedef struct PEBS_DISPATCH_NODE_S PEBS_DISPATCH_NODE; ++typedef PEBS_DISPATCH_NODE * PEBS_DISPATCH; ++struct PEBS_DISPATCH_NODE_S { ++ VOID (*initialize_threshold)(DTS_BUFFER_EXT); ++ U64 (*overflow)(S32, U64, U32); ++ VOID (*modify_ip)(void *, DRV_BOOL, U32); ++ VOID (*modify_tsc)(void *, U32); ++ U32 (*get_num_records_filled)(void); ++}; ++ ++typedef struct PEBS_INFO_NODE_S PEBS_INFO_NODE; ++typedef PEBS_INFO_NODE *PEBS_INFO; ++struct PEBS_INFO_NODE_S { ++ PEBS_DISPATCH pebs_dispatch; ++ U32 pebs_record_size; ++ U16 apebs_basic_offset; ++ U16 apebs_mem_offset; ++ U16 apebs_gpr_offset; ++ U16 apebs_xmm_offset; ++ U16 apebs_lbr_offset; ++}; ++ ++#define APEBS_RECORD_SIZE_MASK 0xFFFF000000000000ULL //[63:48] ++#define APEBS_RECORD_FORMAT_MASK 0xFFFFFFFFFFFFULL //[47:0] ++#define APEBS_MEM_RECORD_FORMAT_MASK 0x1ULL ++#define APEBS_GPR_RECORD_FORMAT_MASK 0x2ULL ++#define APEBS_XMM_RECORD_FORMAT_MASK 0x4ULL ++#define APEBS_LBR_RECORD_FORMAT_MASK 0x8ULL ++ ++ ++extern PEBS_DISPATCH_NODE core2_pebs; ++extern PEBS_DISPATCH_NODE core2p_pebs; ++extern PEBS_DISPATCH_NODE corei7_pebs; ++extern PEBS_DISPATCH_NODE haswell_pebs; ++extern PEBS_DISPATCH_NODE perfver4_pebs; ++extern PEBS_DISPATCH_NODE perfver4_apebs; ++ ++#endif +diff --git a/drivers/platform/x86/sepdk/inc/perfver4.h b/drivers/platform/x86/sepdk/inc/perfver4.h +new file mode 100644 +index 000000000000..74ecf54179df +--- /dev/null ++++ b/drivers/platform/x86/sepdk/inc/perfver4.h +@@ -0,0 +1,51 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#ifndef _PERFVER4_H_ ++#define _PERFVER4_H_ ++ ++#include "msrdefs.h" ++ ++extern DISPATCH_NODE perfver4_dispatch; ++extern DISPATCH_NODE perfver4_dispatch_htoff_mode; ++extern DISPATCH_NODE perfver4_dispatch_nonht_mode; ++ ++#define PERFVER4_UNC_BLBYPASS_BITMASK 0x00000001 ++#define PERFVER4_UNC_DISABLE_BL_BYPASS_MSR 0x39C ++ ++#if defined(DRV_IA32) ++#define PERFVER4_LBR_DATA_BITS 32 ++#else ++#define PERFVER4_LBR_DATA_BITS 57 ++#endif ++ ++#define PERFVER4_LBR_BITMASK ((1ULL << PERFVER4_LBR_DATA_BITS) - 1) ++ ++#define PERFVER4_FROZEN_BIT_MASK 0xc00000000000000ULL ++#define PERFVER4_OVERFLOW_BIT_MASK_HT_ON 0x600000070000000FULL ++#define PERFVER4_OVERFLOW_BIT_MASK_HT_OFF 0x60000007000000FFULL ++#define PERFVER4_OVERFLOW_BIT_MASK_NON_HT 0x6000000F000000FFULL ++ ++#endif +diff --git a/drivers/platform/x86/sepdk/inc/pmi.h b/drivers/platform/x86/sepdk/inc/pmi.h +new file mode 100644 +index 000000000000..4fd71f74ceb9 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/inc/pmi.h +@@ -0,0 +1,65 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#ifndef _PMI_H_ ++#define _PMI_H_ ++ ++#include "lwpmudrv_defines.h" ++#include ++#include ++ ++#if defined(DRV_IA32) ++#if KERNEL_VERSION(2, 6, 25) > LINUX_VERSION_CODE ++#define REGS_xcs(regs) (regs->xcs) ++#define REGS_eip(regs) (regs->eip) ++#define REGS_eflags(regs) (regs->eflags) ++#else ++#define REGS_xcs(regs) (regs->cs) ++#define REGS_eip(regs) (regs->ip) ++#define REGS_eflags(regs) (regs->flags) ++#endif ++#endif ++ ++#if defined(DRV_EM64T) ++#define REGS_cs(regs) (regs->cs) ++ ++#if KERNEL_VERSION(2, 6, 25) > LINUX_VERSION_CODE ++#define REGS_rip(regs) (regs->rip) ++#define REGS_eflags(regs) (regs->eflags) ++#else ++#define REGS_rip(regs) (regs->ip) ++#define REGS_eflags(regs) (regs->flags) ++#endif ++#endif ++ ++asmlinkage VOID PMI_Interrupt_Handler(struct pt_regs *regs); ++ ++#if defined(DRV_SEP_ACRN_ON) ++S32 PMI_Buffer_Handler(PVOID data); ++#endif ++ ++extern U32 pmi_Get_CSD(U32, U32 *, U32 *); ++ ++#endif +diff --git a/drivers/platform/x86/sepdk/inc/sepdrv_p_state.h b/drivers/platform/x86/sepdk/inc/sepdrv_p_state.h +new file mode 100644 +index 000000000000..2a20394c393f +--- /dev/null ++++ b/drivers/platform/x86/sepdk/inc/sepdrv_p_state.h +@@ -0,0 +1,34 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#ifndef _SEPDRV_P_STATE_H_ ++#define _SEPDRV_P_STATE_H_ ++ ++#define DRV_APERF_MSR 0xE8 ++#define DRV_MPERF_MSR 0xE7 ++ ++extern OS_STATUS SEPDRV_P_STATE_Read(S8 *buffer, CPU_STATE pcpu); ++ ++#endif +diff --git a/drivers/platform/x86/sepdk/inc/silvermont.h b/drivers/platform/x86/sepdk/inc/silvermont.h +new file mode 100644 +index 000000000000..4a35b1db5047 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/inc/silvermont.h +@@ -0,0 +1,41 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#ifndef _SILVERMONT_H_ ++#define _SILVERMONT_H_ ++ ++#include "msrdefs.h" ++extern DISPATCH_NODE silvermont_dispatch; ++extern DISPATCH_NODE knights_dispatch; ++ ++#if defined(DRV_IA32) ++#define SILVERMONT_LBR_DATA_BITS 32 ++#else ++#define SILVERMONT_LBR_DATA_BITS 48 ++#endif ++ ++#define SILVERMONT_LBR_BITMASK ((1ULL << SILVERMONT_LBR_DATA_BITS) - 1) ++ ++#endif +diff --git a/drivers/platform/x86/sepdk/inc/sys_info.h b/drivers/platform/x86/sepdk/inc/sys_info.h +new file mode 100644 +index 000000000000..c5dd5621a58b +--- /dev/null ++++ b/drivers/platform/x86/sepdk/inc/sys_info.h +@@ -0,0 +1,71 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#ifndef _SYS_INFO_H_ ++#define _SYS_INFO_H_ ++ ++#include "lwpmudrv_defines.h" ++ ++#define KNIGHTS_FAMILY 0x06 ++#define KNL_MODEL 0x57 ++#define KNM_MODEL 0x85 ++ ++#define is_Knights_family(family, model) \ ++ ((family == KNIGHTS_FAMILY) && \ ++ ((model == KNL_MODEL) || (model == KNM_MODEL))) ++ ++typedef struct __generic_ioctl { ++ U32 size; ++ S32 ret; ++ U64 rsv[3]; ++} GENERIC_IOCTL; ++ ++#define GENERIC_IOCTL_size(gio) ((gio)->size) ++#define GENERIC_IOCTL_ret(gio) ((gio)->ret) ++ ++// ++// This one is unusual in that it's really a variable ++// size. The system_info field is just a easy way ++// to access the base information, but the actual size ++// when used tends to be much larger that what is ++// shown here. ++// ++typedef struct __system_info { ++ GENERIC_IOCTL gen; ++ VTSA_SYS_INFO sys_info; ++} IOCTL_SYS_INFO; ++ ++extern U32 *cpu_built_sysinfo; ++ ++#define IOCTL_SYS_INFO_gen(isi) ((isi)->gen) ++#define IOCTL_SYS_INFO_sys_info(isi) ((isi)->sys_info) ++ ++extern U32 SYS_INFO_Build(void); ++extern void SYS_INFO_Transfer(PVOID buf_usr_to_drv, ++ unsigned long len_usr_to_drv); ++extern void SYS_INFO_Destroy(void); ++extern void SYS_INFO_Build_Cpu(PVOID param); ++ ++#endif +diff --git a/drivers/platform/x86/sepdk/inc/unc_common.h b/drivers/platform/x86/sepdk/inc/unc_common.h +new file mode 100644 +index 000000000000..d1cc228982f0 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/inc/unc_common.h +@@ -0,0 +1,161 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#ifndef _UNC_COMMON_H_INC_ ++#define _UNC_COMMON_H_INC_ ++ ++#include "pci.h" ++ ++#define DRV_IS_PCI_VENDOR_ID_INTEL 0x8086 ++#define VENDOR_ID_MASK 0x0000FFFF ++#define DEVICE_ID_MASK 0xFFFF0000 ++#define DEVICE_ID_BITSHIFT 16 ++ ++#define UNCORE_SOCKETID_UBOX_LNID_OFFSET 0x40 ++#define UNCORE_SOCKETID_UBOX_GID_OFFSET 0x54 ++ ++#define INVALID_BUS_NUMBER -1 ++#define PCI_INVALID_VALUE 0xFFFFFFFF ++ ++typedef struct DEVICE_CALLBACK_NODE_S DEVICE_CALLBACK_NODE; ++typedef DEVICE_CALLBACK_NODE * DEVICE_CALLBACK; ++ ++struct DEVICE_CALLBACK_NODE_S { ++ DRV_BOOL (*is_Valid_Device)(U32); ++ DRV_BOOL (*is_Valid_For_Write)(U32, U32); ++ DRV_BOOL (*is_Unit_Ctl)(U32); ++ DRV_BOOL (*is_PMON_Ctl)(U32); ++}; ++ ++#define MAX_PCIDEV_UNITS 16 ++#define GET_MAX_PCIDEV_ENTRIES(num_pkg) \ ++ ((num_pkg > MAX_PCIDEV_UNITS) ? num_pkg : MAX_PCIDEV_UNITS) ++ ++typedef struct UNC_PCIDEV_NODE_S UNC_PCIDEV_NODE; ++ ++struct UNC_PCIDEV_NODE_S { ++ U32 num_entries; ++ U32 max_entries; ++ S32 *busno_list; // array for pcibus mapping ++ SEP_MMIO_NODE *mmio_map; // virtual memory mapping entries ++}; ++ ++#define UNC_PCIDEV_max_entries(x) ((x)->max_entries) ++#define UNC_PCIDEV_num_entries(x) ((x)->num_entries) ++#define UNC_PCIDEV_busno_list(x) ((x)->busno_list) ++#define UNC_PCIDEV_busno_entry(x, entry) ((x)->busno_list[entry]) ++#define UNC_PCIDEV_mmio_map(x) ((x)->mmio_map) ++#define UNC_PCIDEV_mmio_map_entry(x, entry) ((x)->mmio_map[entry]) ++#define UNC_PCIDEV_virtual_addr_entry(x, entry) \ ++ (SEP_MMIO_NODE_virtual_address(&UNC_PCIDEV_mmio_map_entry(x, entry))) ++ ++#define UNC_PCIDEV_is_busno_valid(x, entry) \ ++ (((x)->busno_list) && ((x)->num_entries > (entry)) && \ ++ ((x)->busno_list[(entry)] != INVALID_BUS_NUMBER)) ++#define UNC_PCIDEV_is_vaddr_valid(x, entry) \ ++ (((x)->mmio_map) && ((x)->num_entries > (entry)) && \ ++ ((x)->mmio_map[(entry)].virtual_address)) ++ ++extern UNC_PCIDEV_NODE unc_pcidev_map[]; ++ ++#define GET_BUS_MAP(dev_node, entry) \ ++ (UNC_PCIDEV_busno_entry((&(unc_pcidev_map[dev_node])), entry)) ++#define GET_NUM_MAP_ENTRIES(dev_node) \ ++ (UNC_PCIDEV_num_entries(&(unc_pcidev_map[dev_node]))) ++#define IS_MMIO_MAP_VALID(dev_node, entry) \ ++ (UNC_PCIDEV_is_vaddr_valid((&(unc_pcidev_map[dev_node])), entry)) ++#define IS_BUS_MAP_VALID(dev_node, entry) \ ++ (UNC_PCIDEV_is_busno_valid((&(unc_pcidev_map[dev_node])), entry)) ++#define virtual_address_table(dev_node, entry) \ ++ (UNC_PCIDEV_virtual_addr_entry(&(unc_pcidev_map[dev_node]), entry)) ++ ++extern OS_STATUS UNC_COMMON_Do_Bus_to_Socket_Map(U32 uncore_did, U32 dev_node, ++ U32 bus_no, U32 device_no, ++ U32 function_no); ++ ++extern VOID UNC_COMMON_Dummy_Func(PVOID param); ++ ++extern VOID UNC_COMMON_Read_Counts(PVOID param, U32 id); ++ ++/************************************************************/ ++/* ++ * UNC common PCI based API ++ * ++ ************************************************************/ ++ ++extern VOID UNC_COMMON_PCI_Write_PMU(PVOID param, U32 ubox_did, U32 control_msr, ++ U32 ctl_val, U32 pci_dev_index, ++ DEVICE_CALLBACK callback); ++ ++extern VOID UNC_COMMON_PCI_Enable_PMU(PVOID param, U32 control_msr, ++ U32 enable_val, U32 disable_val, ++ DEVICE_CALLBACK callback); ++ ++extern VOID UNC_COMMON_PCI_Disable_PMU(PVOID param, U32 control_msr, ++ U32 enable_val, U32 disable_val, ++ DEVICE_CALLBACK callback); ++ ++extern OS_STATUS UNC_COMMON_Add_Bus_Map(U32 uncore_did, U32 dev_node, ++ U32 bus_no); ++ ++extern OS_STATUS UNC_COMMON_Init(void); ++ ++extern VOID UNC_COMMON_Clean_Up(void); ++ ++extern VOID UNC_COMMON_PCI_Trigger_Read(U32 id); ++ ++extern VOID UNC_COMMON_PCI_Read_PMU_Data(PVOID param); ++ ++extern VOID UNC_COMMON_PCI_Scan_For_Uncore(PVOID param, U32 dev_info_node, ++ DEVICE_CALLBACK callback); ++ ++extern VOID UNC_COMMON_Get_Platform_Topology(U32 dev_info_node); ++ ++/************************************************************/ ++/* ++ * UNC common MSR based API ++ * ++ ************************************************************/ ++ ++extern VOID UNC_COMMON_MSR_Write_PMU(PVOID param, U32 control_msr, ++ U64 control_val, U64 reset_val, ++ DEVICE_CALLBACK callback); ++ ++extern VOID UNC_COMMON_MSR_Enable_PMU(PVOID param, U32 control_msr, ++ U64 control_val, U64 unit_ctl_val, ++ U64 pmon_ctl_val, ++ DEVICE_CALLBACK callback); ++ ++extern VOID UNC_COMMON_MSR_Disable_PMU(PVOID param, U32 control_msr, ++ U64 unit_ctl_val, U64 pmon_ctl_val, ++ DEVICE_CALLBACK callback); ++ ++extern VOID UNC_COMMON_MSR_Trigger_Read(U32 id); ++ ++extern VOID UNC_COMMON_MSR_Read_PMU_Data(PVOID param); ++ ++extern VOID UNC_COMMON_MSR_Clean_Up(PVOID param); ++ ++#endif +diff --git a/drivers/platform/x86/sepdk/inc/unc_gt.h b/drivers/platform/x86/sepdk/inc/unc_gt.h +new file mode 100644 +index 000000000000..3e95db32cfa8 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/inc/unc_gt.h +@@ -0,0 +1,86 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#ifndef _UNC_GT_H_INC_ ++#define _UNC_GT_H_INC_ ++ ++/* ++ * Local to this architecture: SNB uncore GT unit ++ * ++ */ ++#define GT_MMIO_SIZE 0x200000 ++#define NEXT_ADDR_OFFSET 4 ++#define UNC_GT_BAR_MASK 0xFFF00000 ++#define PERF_GLOBAL_CTRL 0x391 ++#define GT_CLEAR_COUNTERS 0xFFFF0000 ++ ++#define IA32_DEBUG_CTRL 0x1D9 ++#define MAX_FREE_RUNNING_EVENTS 6 ++#define GT_DID_1 0x102 ++#define INTEL_VENDOR_ID 0x8086 ++#define DRV_GET_PCI_VENDOR_ID(value) (value & 0x0000FFFF) ++#define DRV_GET_PCI_DEVICE_ID(value) ((value & 0xFFFF0000) >> 16) ++#define DRV_IS_INTEL_VENDOR_ID(value) (value == INTEL_VENDOR_ID) ++#define DRV_IS_GT_DEVICE_ID(value) (value == GT_DID_1) ++ ++//clock gating disable values ++#define UNC_GT_GCPUNIT_REG1 0x9400 ++#define UNC_GT_GCPUNIT_REG2 0x9404 ++#define UNC_GT_GCPUNIT_REG3 0x9408 ++#define UNC_GT_GCPUNIT_REG4 0x940c ++#define UNC_GT_GCPUNIT_REG1_VALUE 0xffffffff ++#define UNC_GT_GCPUNIT_REG2_VALUE 0xffffffff ++#define UNC_GT_GCPUNIT_REG3_VALUE 0xffe3ffff ++#define UNC_GT_GCPUNIT_REG4_VALUE 0x00000003 ++//RC6 disable ++#define UNC_GT_RC6_REG1 0xa090 ++#define UNC_GT_RC6_REG2 0xa094 ++#define UNC_GT_RC6_REG1_OR_VALUE 0x80000000 ++#define UNC_GT_RC6_REG2_VALUE 0x00000000 ++extern DISPATCH_NODE unc_gt_dispatch; ++ ++typedef struct GT_CTR_NODE_S GT_CTR_NODE; ++typedef GT_CTR_NODE * GT_CTR; ++struct GT_CTR_NODE_S { ++ union { ++ struct { ++ U32 low : 32; ++ U32 high : 12; ++ } bits; ++ U64 value; ++ } u; ++}; ++ ++#define GT_CTR_NODE_value(x) (x.u.value) ++#define GT_CTR_NODE_low(x) (x.u.bits.low) ++#define GT_CTR_NODE_high(x) (x.u.bits.high) ++#define GT_CTR_NODE_value_reset(x) x.u.value = 0 ++ ++#define DRV_WRITE_PCI_REG_ULONG(va, offset_delta, value) \ ++ writel(value, (void __iomem *)((char *)(va + offset_delta))) ++#define DRV_READ_PCI_REG_ULONG(va, offset_delta) \ ++ readl((void __iomem *)(char *)(va + offset_delta)) ++ ++#endif +diff --git a/drivers/platform/x86/sepdk/inc/utility.h b/drivers/platform/x86/sepdk/inc/utility.h +new file mode 100644 +index 000000000000..c5eca9612b00 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/inc/utility.h +@@ -0,0 +1,637 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#ifndef _UTILITY_H_ ++#define _UTILITY_H_ ++ ++/** ++// Data Types and Macros ++*/ ++#pragma pack(push, 1) ++ ++#pragma pack(pop) ++ ++/* ++ * Declarations ++ */ ++extern DISPATCH_NODE unc_msr_dispatch; ++extern DISPATCH_NODE unc_pci_dispatch; ++extern DISPATCH_NODE unc_mmio_dispatch; ++extern DISPATCH_NODE unc_mmio_fpga_dispatch; ++extern DISPATCH_NODE unc_power_dispatch; ++ ++/* ++ * These routines have macros defined in asm/system.h ++ */ ++#define SYS_Local_Irq_Enable() local_irq_enable() ++#define SYS_Local_Irq_Disable() local_irq_disable() ++#define SYS_Local_Irq_Save(flags) local_irq_save(flags) ++#define SYS_Local_Irq_Restore(flags) local_irq_restore(flags) ++ ++#include ++ ++#define SYS_MMIO_Read32(base, offset) \ ++ ((base) ? readl((void __iomem *)((UIOP)(base) + (offset))) : 0) ++extern U64 SYS_MMIO_Read64(U64 baseAddress, U64 offset); ++ ++extern U64 SYS_Read_MSR(U32 msr); ++ ++extern void SYS_Write_MSR(U32 msr, U64 val); ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) || \ ++ (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0) && \ ++ defined(CONFIG_UIDGID_STRICT_TYPE_CHECKS)) ++#define DRV_GET_UID(p) (p->cred->uid.val) ++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) ++#define DRV_GET_UID(p) (p->cred->uid) ++#else ++#define DRV_GET_UID(p) (p->uid) ++#endif ++ ++extern void SYS_Perfvec_Handler(void); ++ ++extern void *SYS_get_stack_ptr0(void); ++extern void *SYS_get_stack_ptr3(void); ++extern void *SYS_get_user_fp(void); ++extern short SYS_Get_cs(void); ++ ++#if defined(DRV_IA32) ++extern void *SYS_Get_IDT_Base_HWR(void); /// IDT base from hardware IDTR ++extern void *SYS_Get_GDT_Base_HWR(void); /// GDT base from hardware GDTR ++extern U64 SYS_Get_TSC(void); ++ ++#define SYS_Get_IDT_Base SYS_Get_IDT_Base_HWR ++#define SYS_Get_GDT_Base SYS_Get_GDT_Base_HWR ++#endif ++ ++#if defined(DRV_EM64T) ++extern unsigned short SYS_Get_Code_Selector0(void); ++extern void SYS_Get_IDT_Base(void **); ++extern void SYS_Get_GDT_Base(void **); ++#endif ++ ++extern void SYS_IO_Delay(void); ++#define SYS_Inb(port) inb(port) ++#define SYS_Outb(byte, port) outb(byte, port) ++ ++/* typedef int OSSTATUS; */ ++ ++/* ++ * Lock implementations ++ */ ++#define SYS_Locked_Inc(var) atomic_inc((var)) ++#define SYS_Locked_Dec(var) atomic_dec((var)) ++ ++extern void UTILITY_Read_TSC(U64 *pTsc); ++ ++extern void UTILITY_down_read_mm(struct mm_struct *mm); ++ ++extern void UTILITY_up_read_mm(struct mm_struct *mm); ++ ++extern void UTILITY_Read_Cpuid(U64 cpuid_function, U64 *rax_value, ++ U64 *rbx_value, U64 *rcx_value, U64 *rdx_value); ++ ++extern DISPATCH UTILITY_Configure_CPU(U32); ++ ++#if defined(DRV_IA32) ++asmlinkage void SYS_Get_CSD(U32, U32 *, U32 *); ++#endif ++ ++#if defined(BUILD_CHIPSET) ++extern CS_DISPATCH UTILITY_Configure_Chipset(void); ++#endif ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn extern unsigned long UTILITY_Find_Symbol (const char* name) ++ * ++ * @brief Finds the address of the specified kernel symbol. ++ * ++ * @param const char* name - name of the symbol to look for ++ * ++ * @return Symbol address (0 if could not find) ++ * ++ * Special Notes: ++ * This wrapper is needed due to kallsyms_lookup_name not being exported ++ * in kernel version 2.6.32.*. ++ * Careful! This code is *NOT* multithread-safe or reentrant! Should only ++ * be called from 1 context at a time! ++ */ ++extern unsigned long UTILITY_Find_Symbol(const char *name); ++ ++/************************************************************************/ ++/*********************** DRIVER LOG DECLARATIONS ************************/ ++/************************************************************************/ ++ ++#define DRV_LOG_COMPILER_MEM_BARRIER() { asm volatile("" : : : "memory"); } ++ ++#define DRV_LOG_DEFAULT_LOAD_VERBOSITY (LOG_CHANNEL_MOSTWHERE | LOG_CONTEXT_ALL) ++#define DRV_LOG_DEFAULT_INIT_VERBOSITY \ ++ (LOG_CHANNEL_MEMLOG | LOG_CHANNEL_AUXMEMLOG | LOG_CONTEXT_ALL) ++#define DRV_LOG_DEFAULT_DETECTION_VERBOSITY (DRV_LOG_DEFAULT_INIT_VERBOSITY) ++#define DRV_LOG_DEFAULT_ERROR_VERBOSITY \ ++ (LOG_CHANNEL_MOSTWHERE | LOG_CONTEXT_ALL) ++#define DRV_LOG_DEFAULT_STATE_CHANGE_VERBOSITY (DRV_LOG_DEFAULT_INIT_VERBOSITY) ++#define DRV_LOG_DEFAULT_MARK_VERBOSITY (LOG_CHANNEL_MOSTWHERE | LOG_CONTEXT_ALL) ++#define DRV_LOG_DEFAULT_DEBUG_VERBOSITY \ ++ (LOG_CHANNEL_MEMLOG | LOG_CHANNEL_PRINTK | LOG_CONTEXT_ALL) ++#define DRV_LOG_DEFAULT_FLOW_VERBOSITY \ ++ (LOG_CHANNEL_MEMLOG | LOG_CHANNEL_AUXMEMLOG | LOG_CONTEXT_ALL) ++#define DRV_LOG_DEFAULT_ALLOC_VERBOSITY (LOG_VERBOSITY_NONE) ++#define DRV_LOG_DEFAULT_INTERRUPT_VERBOSITY \ ++ (LOG_CHANNEL_MEMLOG | LOG_CONTEXT_ALL) ++#define DRV_LOG_DEFAULT_TRACE_VERBOSITY (LOG_VERBOSITY_NONE) ++#define DRV_LOG_DEFAULT_REGISTER_VERBOSITY (LOG_VERBOSITY_NONE) ++#define DRV_LOG_DEFAULT_NOTIFICATION_VERBOSITY \ ++ (LOG_CHANNEL_MEMLOG | LOG_CONTEXT_ALL) ++#define DRV_LOG_DEFAULT_WARNING_VERBOSITY \ ++ (LOG_CHANNEL_MOSTWHERE | LOG_CONTEXT_ALL) ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn extern void UTILITY_Log (U8 category, U8 in_notification, U8 secondary, ++ * const char* function_name, U32 func_name_len, ++ * U32 line_number, const char* format_string, ...) ++ * ++ * @brief Checks whether and where the message should be logged, ++ * and logs it as appropriate. ++ * ++ * @param U8 category - message category ++ * U8 in_notification - whether or not we are in a notification/OS ++ * callback context (information cannot be reliably obtained without passing ++ * it through the stack) ++ * U8 secondary - secondary information field for the message ++ * const char* function_name - name of the calling function ++ * U32 func_name_len - length of the name of the calling function ++ * (more efficient to pass it as parameter than finding it back at runtime) ++ * U32 line_number - line number of the call site ++ * const char* format_string - classical format string for ++ * printf-like functions ++ * ... - elements to print ++ * ++ * @return none ++ * ++ * Special Notes: ++ * Used to keep track of the IOCTL operation currently being processed. ++ * This information is saved in the log buffer (globally), as well as ++ * in every log entry. ++ * NB: only IOCTLs for which grabbing the ioctl mutex is necessary ++ * should be kept track of this way. ++ */ ++extern VOID UTILITY_Log(U8 category, U8 in_notification, U8 secondary, ++ const char *function_name, U32 func_name_len, ++ U32 line_number, const char *format_string, ...); ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn extern DRV_STATUS UTILITY_Driver_Log_Init (void) ++ * ++ * @brief Allocates and initializes the driver log buffer. ++ * ++ * @param none ++ * ++ * @return OS_SUCCESS on success, OS_NO_MEM on error. ++ * ++ * Special Notes: ++ * Should be (successfully) run before any non-LOAD log calls. ++ * Allocates memory without going through CONTROL_Allocate (to avoid ++ * complicating the instrumentation of CONTROL_* functions): calling ++ * UTILITY_Driver_Log_Free is necessary to free the log structure. ++ * Falls back to vmalloc when contiguous physical memory cannot be ++ * allocated. This does not impact runtime behavior, but may impact ++ * the easiness of retrieving the log from a core dump if the system ++ * crashes. ++ */ ++extern DRV_STATUS UTILITY_Driver_Log_Init(void); ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn extern DRV_STATUS UTILITY_Driver_Log_Free (void) ++ * ++ * @brief Frees the driver log buffer. ++ * ++ * @param none ++ * ++ * @return OS_SUCCESS on success, OS_NO_MEM on error. ++ * ++ * Special Notes: ++ * Should be done before unloading the driver. ++ * See UTILITY_Driver_Log_Init for details. ++ */ ++extern void UTILITY_Driver_Log_Free(void); ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn extern void UTILITY_Driver_Set_Active_Ioctl (U32 ioctl) ++ * ++ * @brief Sets the 'active_ioctl' global to the specified value. ++ * ++ * @param U32 ioctl - ioctl/drvop code to use ++ * ++ * @return none ++ * ++ * Special Notes: ++ * Used to keep track of the IOCTL operation currently being processed. ++ * This information is saved in the log buffer (globally), as well as ++ * in every log entry. ++ * NB: only IOCTLs for which grabbing the ioctl mutex is necessary ++ * should be kept track of this way. ++ */ ++extern void UTILITY_Driver_Set_Active_Ioctl(U32); ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn extern const char** UTILITY_Log_Category_Strings (void) ++ * ++ * @brief Accessor function for the log category string array ++ * ++ * @param none ++ * ++ * @return none ++ * ++ * Special Notes: ++ * Only needed for cosmetic purposes when adjusting category verbosities. ++ */ ++extern const char **UTILITY_Log_Category_Strings(void); ++ ++extern DRV_LOG_BUFFER driver_log_buffer; ++extern volatile U8 active_ioctl; ++ ++#define DRV_LOG() driver_log_buffer ++#define DRV_LOG_VERBOSITY(category) \ ++ ((DRV_LOG_BUFFER_verbosities(DRV_LOG()))[category]) ++#define SEP_IN_NOTIFICATION 1 ++ ++#define SEP_DRV_RAW_LOG(category, in_notification, second, message, ...) \ ++ UTILITY_Log(category, in_notification, second, __func__, \ ++ sizeof(__func__), __LINE__, message, ##__VA_ARGS__) ++#define SEP_DRV_ULK_LOG(category, in_notification, second, message, ...) \ ++ UTILITY_Log(category, in_notification, second, __func__, \ ++ sizeof(__func__), __LINE__, message, ##__VA_ARGS__) ++ ++#define SEP_DRV_LOG_INCREMENT_NB_ACTIVE_INTERRUPTS() \ ++ do { \ ++ __sync_fetch_and_add( \ ++ &DRV_LOG_BUFFER_nb_active_interrupts(DRV_LOG()), 1); \ ++ __sync_fetch_and_add( \ ++ &DRV_LOG_BUFFER_nb_interrupts(DRV_LOG()), 1); \ ++ } while (0) ++ ++#define SEP_DRV_LOG_DECREMENT_NB_ACTIVE_INTERRUPTS() \ ++ do { \ ++ __sync_fetch_and_add( \ ++ &DRV_LOG_BUFFER_nb_active_interrupts(DRV_LOG()), -1); \ ++ } while (0) ++ ++#define SEP_DRV_LOG_INCREMENT_NB_ACTIVE_NOTIFICATIONS() \ ++ do { \ ++ __sync_fetch_and_add( \ ++ &DRV_LOG_BUFFER_nb_active_notifications(DRV_LOG()), 1); \ ++ __sync_fetch_and_add( \ ++ &DRV_LOG_BUFFER_nb_notifications(DRV_LOG()), 1); \ ++ } while (0) ++ ++#define SEP_DRV_LOG_DECREMENT_NB_ACTIVE_NOTIFICATIONS() \ ++ do { \ ++ __sync_fetch_and_add( \ ++ &DRV_LOG_BUFFER_nb_active_notifications(DRV_LOG()), -1);\ ++ } while (0) ++ ++#define SEP_DRV_LOG_INCREMENT_NB_STATE_TRANSITIONS() \ ++ do { \ ++ __sync_fetch_and_add( \ ++ &DRV_LOG_BUFFER_nb_driver_state_transitions(DRV_LOG()), 1); \ ++ } while (0) ++ ++#define SEP_DRV_LOG_DISAMBIGUATE() \ ++ do { \ ++ __sync_fetch_and_add( \ ++ &DRV_LOG_BUFFER_disambiguator(DRV_LOG()), 1); \ ++ } while (0) ++ ++/************************************************************************/ ++/************************** CATEGORY LOG APIs ***************************/ ++/************************************************************************/ ++ ++// ERROR, WARNING and LOAD are always compiled in... ++#define SEP_DRV_LOG_ERROR(message, ...) \ ++ SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_ERROR, 0, DRV_LOG_NOTHING, message, \ ++ ##__VA_ARGS__) ++#define SEP_DRV_LOG_WARNING(message, ...) \ ++ SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_WARNING, 0, DRV_LOG_NOTHING, message, \ ++ ##__VA_ARGS__) ++#define SEP_DRV_LOG_NOTIFICATION_ERROR(in_notif, message, ...) \ ++ SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_ERROR, in_notif, DRV_LOG_NOTHING, \ ++ message, ##__VA_ARGS__) ++#define SEP_DRV_LOG_NOTIFICATION_WARNING(in_notif, message, ...) \ ++ SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_WARNING, in_notif, DRV_LOG_NOTHING, \ ++ message, ##__VA_ARGS__) ++#define SEP_DRV_LOG_LOAD(message, ...) \ ++ do { \ ++ if (DRV_LOG()) { \ ++ SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_LOAD, 0, \ ++ DRV_LOG_NOTHING, message, \ ++ ##__VA_ARGS__); \ ++ } else if (DRV_LOG_DEFAULT_LOAD_VERBOSITY & \ ++ LOG_CHANNEL_PRINTK) { \ ++ printk(KERN_ERR SEP_MSG_PREFIX " " message "\n", \ ++ ##__VA_ARGS__); \ ++ } \ ++ } while (0) ++ ++#if defined(DRV_MINIMAL_LOGGING) // MINIMAL LOGGING MODE ++#define SEP_DRV_LOG_INIT(message, ...) \ ++ { \ ++ } ++#define SEP_DRV_LOG_INIT_IN(message, ...) \ ++ { \ ++ } ++#define SEP_DRV_LOG_INIT_OUT(message, ...) \ ++ { \ ++ } ++#define SEP_DRV_LOG_DETECTION(message, ...) \ ++ { \ ++ } ++#define SEP_DRV_LOG_MARK(message, ...) \ ++ { \ ++ } ++#define SEP_DRV_LOG_DEBUG(message, ...) \ ++ { \ ++ } ++#define SEP_DRV_LOG_DEBUG_IN(message, ...) \ ++ { \ ++ } ++#define SEP_DRV_LOG_DEBUG_OUT(message, ...) \ ++ { \ ++ } ++#define SEP_DRV_LOG_FLOW_IN(message, ...) \ ++ { \ ++ } ++#define SEP_DRV_LOG_FLOW_OUT(message, ...) \ ++ { \ ++ } ++#define SEP_DRV_LOG_ALLOC(message, ...) \ ++ { \ ++ } ++#define SEP_DRV_LOG_ALLOC_IN(message, ...) \ ++ { \ ++ } ++#define SEP_DRV_LOG_ALLOC_OUT(message, ...) \ ++ { \ ++ } ++#define SEP_DRV_LOG_INTERRUPT_IN(message, ...) \ ++ SEP_DRV_LOG_INCREMENT_NB_ACTIVE_INTERRUPTS(); ++#define SEP_DRV_LOG_INTERRUPT_OUT(message, ...) \ ++ SEP_DRV_LOG_DECREMENT_NB_ACTIVE_INTERRUPTS(); ++#define SEP_DRV_LOG_NOTIFICATION_IN(message, ...) \ ++ SEP_DRV_LOG_INCREMENT_NB_ACTIVE_NOTIFICATIONS(); ++#define SEP_DRV_LOG_NOTIFICATION_OUT(message, ...) \ ++ SEP_DRV_LOG_DECREMENT_NB_ACTIVE_NOTIFICATIONS(); ++#define SEP_DRV_LOG_STATE_TRANSITION(former_state, new_state, message, ...) \ ++ { \ ++ (void)former_state; \ ++ SEP_DRV_LOG_INCREMENT_NB_STATE_TRANSITIONS(); \ ++ DRV_LOG_BUFFER_driver_state(DRV_LOG()) = new_state; \ ++ } ++#else // REGULAR LOGGING MODE (PART 1 / 2) ++#define SEP_DRV_LOG_INIT(message, ...) \ ++ SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_INIT, 0, DRV_LOG_NOTHING, message, \ ++ ##__VA_ARGS__) ++#define SEP_DRV_LOG_INIT_IN(message, ...) \ ++ SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_INIT, 0, DRV_LOG_FLOW_IN, message, \ ++ ##__VA_ARGS__) ++#define SEP_DRV_LOG_INIT_OUT(message, ...) \ ++ SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_INIT, 0, DRV_LOG_FLOW_OUT, message, \ ++ ##__VA_ARGS__) ++#define SEP_DRV_LOG_DETECTION(message, ...) \ ++ SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_DETECTION, 0, DRV_LOG_NOTHING, \ ++ message, ##__VA_ARGS__) ++#define SEP_DRV_LOG_MARK(message, ...) \ ++ SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_MARK, 0, DRV_LOG_NOTHING, message, \ ++ ##__VA_ARGS__) ++#define SEP_DRV_LOG_DEBUG(message, ...) \ ++ SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_DEBUG, 0, DRV_LOG_NOTHING, message, \ ++ ##__VA_ARGS__) ++#define SEP_DRV_LOG_DEBUG_IN(message, ...) \ ++ SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_DEBUG, 0, DRV_LOG_FLOW_IN, message, \ ++ ##__VA_ARGS__) ++#define SEP_DRV_LOG_DEBUG_OUT(message, ...) \ ++ SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_DEBUG, 0, DRV_LOG_FLOW_OUT, message, \ ++ ##__VA_ARGS__) ++#define SEP_DRV_LOG_FLOW_IN(message, ...) \ ++ SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_FLOW, 0, DRV_LOG_FLOW_IN, message, \ ++ ##__VA_ARGS__) ++#define SEP_DRV_LOG_FLOW_OUT(message, ...) \ ++ SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_FLOW, 0, DRV_LOG_FLOW_OUT, message, \ ++ ##__VA_ARGS__) ++#define SEP_DRV_LOG_ALLOC(message, ...) \ ++ SEP_DRV_ULK_LOG(DRV_LOG_CATEGORY_ALLOC, 0, DRV_LOG_NOTHING, message, \ ++ ##__VA_ARGS__) ++#define SEP_DRV_LOG_ALLOC_IN(message, ...) \ ++ SEP_DRV_ULK_LOG(DRV_LOG_CATEGORY_ALLOC, 0, DRV_LOG_FLOW_IN, message, \ ++ ##__VA_ARGS__) ++#define SEP_DRV_LOG_ALLOC_OUT(message, ...) \ ++ SEP_DRV_ULK_LOG(DRV_LOG_CATEGORY_ALLOC, 0, DRV_LOG_FLOW_OUT, message, \ ++ ##__VA_ARGS__) ++#define SEP_DRV_LOG_INTERRUPT_IN(message, ...) \ ++ { \ ++ SEP_DRV_LOG_INCREMENT_NB_ACTIVE_INTERRUPTS(); \ ++ SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_INTERRUPT, 0, \ ++ DRV_LOG_FLOW_IN, message, ##__VA_ARGS__); \ ++ } ++ ++#define SEP_DRV_LOG_INTERRUPT_OUT(message, ...) \ ++ { \ ++ SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_INTERRUPT, 0, \ ++ DRV_LOG_FLOW_OUT, message, ##__VA_ARGS__); \ ++ SEP_DRV_LOG_DECREMENT_NB_ACTIVE_INTERRUPTS(); \ ++ } ++ ++#define SEP_DRV_LOG_NOTIFICATION_IN(message, ...) \ ++ { \ ++ SEP_DRV_LOG_INCREMENT_NB_ACTIVE_NOTIFICATIONS(); \ ++ SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_NOTIFICATION, 1, \ ++ DRV_LOG_FLOW_IN, message, ##__VA_ARGS__); \ ++ } ++ ++#define SEP_DRV_LOG_NOTIFICATION_OUT(message, ...) \ ++ { \ ++ SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_NOTIFICATION, 1, \ ++ DRV_LOG_FLOW_OUT, message, ##__VA_ARGS__); \ ++ SEP_DRV_LOG_DECREMENT_NB_ACTIVE_NOTIFICATIONS(); \ ++ } ++ ++#define SEP_DRV_LOG_STATE_TRANSITION(former_state, new_state, message, ...) \ ++ { \ ++ SEP_DRV_LOG_INCREMENT_NB_STATE_TRANSITIONS(); \ ++ DRV_LOG_BUFFER_driver_state(DRV_LOG()) = new_state; \ ++ SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_STATE_CHANGE, 0, \ ++ ((U8)former_state << 4) | ((U8)new_state & 0xF), \ ++ message, ##__VA_ARGS__); \ ++ } ++ ++#endif ++ ++#if defined(DRV_MAXIMAL_LOGGING) // MAXIMAL LOGGING MODE ++#define SEP_DRV_LOG_TRACE(message, ...) \ ++ SEP_DRV_ULK_LOG(DRV_LOG_CATEGORY_TRACE, 0, DRV_LOG_NOTHING, message, \ ++ ##__VA_ARGS__) ++#define SEP_DRV_LOG_TRACE_IN(message, ...) \ ++ SEP_DRV_ULK_LOG(DRV_LOG_CATEGORY_TRACE, 0, DRV_LOG_FLOW_IN, message, \ ++ ##__VA_ARGS__) ++#define SEP_DRV_LOG_TRACE_OUT(message, ...) \ ++ SEP_DRV_ULK_LOG(DRV_LOG_CATEGORY_TRACE, 0, DRV_LOG_FLOW_OUT, message, \ ++ ##__VA_ARGS__) ++#define SEP_DRV_LOG_REGISTER_IN(message, ...) \ ++ SEP_DRV_ULK_LOG(DRV_LOG_CATEGORY_REGISTER, 0, DRV_LOG_FLOW_IN, \ ++ message, ##__VA_ARGS__) ++#define SEP_DRV_LOG_REGISTER_OUT(message, ...) \ ++ SEP_DRV_ULK_LOG(DRV_LOG_CATEGORY_REGISTER, 0, DRV_LOG_FLOW_OUT, \ ++ message, ##__VA_ARGS__) ++#define SEP_DRV_LOG_NOTIFICATION_TRACE(in_notif, message, ...) \ ++ SEP_DRV_ULK_LOG(DRV_LOG_CATEGORY_TRACE, in_notif, DRV_LOG_NOTHING, \ ++ message, ##__VA_ARGS__) ++#define SEP_DRV_LOG_NOTIFICATION_TRACE_IN(in_notif, message, ...) \ ++ SEP_DRV_ULK_LOG(DRV_LOG_CATEGORY_TRACE, in_notif, DRV_LOG_FLOW_IN, \ ++ message, ##__VA_ARGS__) ++#define SEP_DRV_LOG_NOTIFICATION_TRACE_OUT(in_notif, message, ...) \ ++ SEP_DRV_ULK_LOG(DRV_LOG_CATEGORY_TRACE, in_notif, DRV_LOG_FLOW_OUT, \ ++ message, ##__VA_ARGS__) ++#else // REGULAR LOGGING MODE (PART 2 / 2) ++#define SEP_DRV_LOG_TRACE(message, ...) \ ++ { \ ++ } ++#define SEP_DRV_LOG_TRACE_IN(message, ...) \ ++ { \ ++ } ++#define SEP_DRV_LOG_TRACE_OUT(message, ...) \ ++ { \ ++ } ++#define SEP_DRV_LOG_REGISTER_IN(message, ...) \ ++ { \ ++ } ++#define SEP_DRV_LOG_REGISTER_OUT(message, ...) \ ++ { \ ++ } ++#define SEP_DRV_LOG_NOTIFICATION_TRACE(in_notif, message, ...) \ ++ { \ ++ } ++#define SEP_DRV_LOG_NOTIFICATION_TRACE_IN(in_notif, message, ...) \ ++ { \ ++ } ++#define SEP_DRV_LOG_NOTIFICATION_TRACE_OUT(in_notif, message, ...) \ ++ { \ ++ } ++#endif ++ ++/************************************************************************/ ++/************************* FACILITATOR MACROS ***************************/ ++/************************************************************************/ ++ ++#define SEP_DRV_LOG_ERROR_INIT_OUT(message, ...) \ ++ { \ ++ SEP_DRV_LOG_ERROR(message, ##__VA_ARGS__); \ ++ SEP_DRV_LOG_INIT_OUT(message, ##__VA_ARGS__); \ ++ } ++ ++#define SEP_DRV_LOG_ERROR_FLOW_OUT(message, ...) \ ++ { \ ++ SEP_DRV_LOG_ERROR(message, ##__VA_ARGS__); \ ++ SEP_DRV_LOG_FLOW_OUT(message, ##__VA_ARGS__); \ ++ } ++#define SEP_DRV_LOG_ERROR_TRACE_OUT(message, ...) \ ++ { \ ++ SEP_DRV_LOG_ERROR(message, ##__VA_ARGS__); \ ++ SEP_DRV_LOG_TRACE_OUT(message, ##__VA_ARGS__); \ ++ } ++#define SEP_DRV_LOG_ERROR_ALLOC_OUT(message, ...) \ ++ { \ ++ SEP_DRV_LOG_ERROR(message, ##__VA_ARGS__); \ ++ SEP_DRV_LOG_ALLOC_OUT(message, ##__VA_ARGS__); \ ++ } ++ ++#define SEP_DRV_LOG_WARNING_FLOW_OUT(message, ...) \ ++ { \ ++ SEP_DRV_LOG_WARNING(message, ##__VA_ARGS__); \ ++ SEP_DRV_LOG_FLOW_OUT(message, ##__VA_ARGS__); \ ++ } ++ ++#define SEP_DRV_LOG_WARNING_TRACE_OUT(message, ...) \ ++ { \ ++ SEP_DRV_LOG_WARNING(message, ##__VA_ARGS__); \ ++ SEP_DRV_LOG_TRACE_OUT(message, ##__VA_ARGS__); \ ++ } ++#define SEP_DRV_LOG_WARNING_ALLOC_OUT(message, ...) \ ++ { \ ++ SEP_DRV_LOG_WARNING(message, ##__VA_ARGS__); \ ++ SEP_DRV_LOG_ALLOC_OUT(message, ##__VA_ARGS__); \ ++ } ++ ++#define SEP_DRV_LOG_INIT_TRACE_OUT(message, ...) \ ++ { \ ++ SEP_DRV_LOG_INIT(message, ##__VA_ARGS__); \ ++ SEP_DRV_LOG_TRACE_OUT(message, ##__VA_ARGS__); \ ++ } ++ ++#define SEP_DRV_LOG_WARNING_NOTIFICATION_OUT(message, ...) \ ++ { \ ++ SEP_DRV_LOG_WARNING(message, ##__VA_ARGS__); \ ++ SEP_DRV_LOG_NOTIFICATION_OUT(message, ##__VA_ARGS__); \ ++ } ++ ++ ++/************************************************************************/ ++/************************* DRIVER STATE MACROS **************************/ ++/************************************************************************/ ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn extern U32 UTILITY_Change_Driver_State (U32 allowed_prior_states, ++ * U32 state, const char* func, U32 line_number) ++ * ++ * @brief Updates the driver state (if the transition is legal). ++ * ++ * @param U32 allowed_prior_states - the bitmask representing the states ++ * from which the transition is allowed to occur ++ * U32 state - the destination state ++ * const char* func - the callsite's function's name ++ * U32 line_number - the callsite's line number ++ * ++ * @return 1 in case of success, 0 otherwise ++ * ++ * Special Notes: ++ * ++ */ ++extern U32 UTILITY_Change_Driver_State(U32 allowed_prior_states, U32 state, ++ const char *func, U32 line_number); ++ ++#define GET_DRIVER_STATE() GLOBAL_STATE_current_phase(driver_state) ++#define CHANGE_DRIVER_STATE(allowed_prior_states, state) \ ++ UTILITY_Change_Driver_State(allowed_prior_states, state, __func__, \ ++ __LINE__) ++#define DRIVER_STATE_IN(state, states) \ ++ (!!(MATCHING_STATE_BIT(state) & (states))) ++ ++#endif +diff --git a/drivers/platform/x86/sepdk/inc/valleyview_sochap.h b/drivers/platform/x86/sepdk/inc/valleyview_sochap.h +new file mode 100644 +index 000000000000..18214ea3ca76 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/inc/valleyview_sochap.h +@@ -0,0 +1,60 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#ifndef _VALLEYVIEW_SOCHAP_H_INC_ ++#define _VALLEYVIEW_SOCHAP_H_INC_ ++ ++/* ++ * Local to this architecture: Valleyview uncore SA unit ++ * ++ */ ++#define VLV_VISA_DESKTOP_DID 0x000C04 ++#define VLV_VISA_NEXT_ADDR_OFFSET 4 ++#define VLV_VISA_BAR_ADDR_SHIFT 32 ++#define VLV_VISA_BAR_ADDR_MASK 0x000FFFC00000LL ++#define VLV_VISA_MAX_PCI_DEVICES 16 ++#define VLV_VISA_MCR_REG_OFFSET 0xD0 ++#define VLV_VISA_MDR_REG_OFFSET 0xD4 ++#define VLV_VISA_MCRX_REG_OFFSET 0xD8 ++#define VLV_VISA_BYTE_ENABLES 0xF ++#define VLV_VISA_OP_CODE_SHIFT 24 ++#define VLV_VISA_PORT_ID_SHIFT 16 ++#define VLV_VISA_OFFSET_HI_MASK 0xFF ++#define VLV_VISA_OFFSET_LO_MASK 0xFF ++#define VLV_CHAP_SIDEBAND_PORT_ID 23 ++#define VLV_CHAP_SIDEBAND_WRITE_OP_CODE 1 ++#define VLV_CHAP_SIDEBAND_READ_OP_CODE 0 ++#define VLV_CHAP_MAX_COUNTERS 8 ++#define VLV_CHAP_MAX_COUNT 0x00000000FFFFFFFFLL ++ ++#define VLV_VISA_OTHER_BAR_MMIO_PAGE_SIZE 4096 ++#define VLV_VISA_CHAP_SAMPLE_DATA 0x00020000 ++#define VLV_VISA_CHAP_STOP 0x00040000 ++#define VLV_VISA_CHAP_START 0x00110000 ++#define VLV_VISA_CHAP_CTRL_REG_OFFSET 0x0 ++ ++extern DISPATCH_NODE valleyview_visa_dispatch; ++ ++#endif +diff --git a/drivers/platform/x86/sepdk/include/error_reporting_utils.h b/drivers/platform/x86/sepdk/include/error_reporting_utils.h +new file mode 100644 +index 000000000000..c1e90c441cc1 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/include/error_reporting_utils.h +@@ -0,0 +1,167 @@ ++ ++/*** ++ * ------------------------------------------------------------------------- ++ * INTEL CORPORATION PROPRIETARY INFORMATION ++ * This software is supplied under the terms of the accompanying license ++ * agreement or nondisclosure agreement with Intel Corporation and may not ++ * be copied or disclosed except in accordance with the terms of that ++ * agreement. ++ * Copyright(C) 2002-2018 Intel Corporation. All Rights Reserved. ++ * ------------------------------------------------------------------------- ++ ***/ ++ ++#ifndef __ERROR_REPORTING_UTILS_H__ ++#define __ERROR_REPORTING_UTILS_H__ ++ ++#define DRV_ASSERT_N_RET_VAL(ret_val) \ ++ { \ ++ DRV_ASSERT((ret_val) == VT_SUCCESS); \ ++ DRV_CHECK_N_RETURN_N_FAIL(ret_val); \ ++ } ++ ++#define DRV_ASSERT_N_CONTINUE(ret_val) \ ++ { \ ++ if ((ret_val) != VT_SUCCESS) { \ ++ LOG_ERR1(VTSA_T("Operation failed with error code "), \ ++ (ret_val)); \ ++ } \ ++ } ++ ++#define DRV_CHECK_N_RETURN_N_FAIL(ret_val) \ ++ { \ ++ if ((ret_val) != VT_SUCCESS) { \ ++ LOG_ERR1(VTSA_T("Operation failed with error code "), \ ++ (ret_val)); \ ++ return ret_val; \ ++ } \ ++ } ++ ++#define DRV_CHECK_N_RETURN_NO_RETVAL(ret_val) \ ++ { \ ++ if ((ret_val) != VT_SUCCESS) { \ ++ LOG_ERR1(VTSA_T("Operation failed with error code "), \ ++ (ret_val)); \ ++ return; \ ++ } \ ++ } ++ ++#define DRV_CHECK_PTR_N_RET_VAL(ptr) \ ++ { \ ++ if ((ptr) == NULL) { \ ++ LOG_ERR0(VTSA_T("Encountered null pointer")); \ ++ return VT_SAM_ERROR; \ ++ } \ ++ } ++ ++#define DRV_CHECK_PTR_N_RET_NULL(ptr) \ ++ { \ ++ if ((ptr) == NULL) { \ ++ LOG_ERR0(VTSA_T("Encountered null pointer")); \ ++ return NULL; \ ++ } \ ++ } ++ ++#define DRV_CHECK_PTR_N_LOG_NO_RETURN(ptr) \ ++ { \ ++ if ((ptr) == NULL) { \ ++ LOG_ERR0(VTSA_T("Encountered null pointer")); \ ++ } \ ++ } ++ ++#define DRV_CHECK_N_LOG_NO_RETURN(ret_val) \ ++ { \ ++ if ((ret_val) != VT_SUCCESS) { \ ++ LOG_ERR1(VTSA_T("Operation failed with error code "), \ ++ (ret_val)); \ ++ } \ ++ } ++ ++#define DRV_CHECK_N_RET_NEG_ONE(ret_val) \ ++ { \ ++ if ((ret_val) == -1) { \ ++ LOG_ERR0(VTSA_T( \ ++ "Operation failed with error code = -1")); \ ++ return VT_SAM_ERROR; \ ++ } \ ++ } ++ ++#define DRV_REQUIRES_TRUE_COND_RET_N_FAIL(cond) \ ++ { \ ++ if (!(cond)) { \ ++ LOG_ERR0(VTSA_T("Condition check failed")); \ ++ return VT_SAM_ERROR; \ ++ } \ ++ } ++ ++#define DRV_REQUIRES_TRUE_COND_RET_ASSIGNED_VAL(cond, ret_val) \ ++ { \ ++ if (!(cond)) { \ ++ LOG_ERR0(VTSA_T("Condition check failed")); \ ++ return ret_val; \ ++ } \ ++ } ++ ++#define DRV_CHECK_N_ERR_LOG_ERR_STRNG_N_RET(rise_err) \ ++ { \ ++ if (rise_err != VT_SUCCESS) { \ ++ PVOID rise_ptr = NULL; \ ++ const VTSA_CHAR *error_str = NULL; \ ++ RISE_open(&rise_ptr); \ ++ RISE_translate_err_code(rise_ptr, rise_err, \ ++ &error_str); \ ++ LogItW(LOG_LEVEL_ERROR | LOG_AREA_GENERAL, \ ++ L"Operation failed with error [ %d ] = %s\n", \ ++ rise_err, error_str); \ ++ RISE_close(rise_ptr); \ ++ return rise_err; \ ++ } \ ++ } ++ ++#define DRV_CHECK_PTR_N_CLEANUP(ptr, gotolabel, ret_val) \ ++ { \ ++ if ((ptr) == NULL) { \ ++ LOG_ERR0(VTSA_T("Encountered null pointer")); \ ++ ret_val = VT_SAM_ERROR; \ ++ goto gotolabel; \ ++ } \ ++ } ++ ++#define DRV_CHECK_ON_FAIL_CLEANUP_N_RETURN(ret_val, gotolabel) \ ++ { \ ++ if ((ret_val) != VT_SUCCESS) { \ ++ DRV_CHECK_N_LOG_NO_RETURN(ret_val); \ ++ goto gotolabel; \ ++ } \ ++ } ++ ++#define DRV_CHECK_N_CLEANUP_N_RETURN_RET_NEG_ONE(ret_val, gotolabel) \ ++ { \ ++ if ((ret_val) == -1) { \ ++ DRV_CHECK_N_LOG_NO_RETURN(ret_val); \ ++ goto gotolabel; \ ++ } \ ++ } ++ ++#define DRV_CHECK_PTR_ON_NULL_CLEANUP_N_RETURN(ptr, gotolabel) \ ++ { \ ++ if ((ptr) == NULL) { \ ++ DRV_CHECK_PTR_N_LOG_NO_RETURN(ptr); \ ++ goto gotolabel; \ ++ } \ ++ } ++ ++#define FREE_N_SET_NULL(ptr) \ ++ { \ ++ if (ptr != NULL) { \ ++ free(ptr); \ ++ ptr = NULL; \ ++ } \ ++ } ++ ++#define DELETE_N_SET_NULL(ptr) \ ++ { \ ++ delete ptr; \ ++ ptr = NULL; \ ++ } ++ ++#endif +diff --git a/drivers/platform/x86/sepdk/include/lwpmudrv_chipset.h b/drivers/platform/x86/sepdk/include/lwpmudrv_chipset.h +new file mode 100644 +index 000000000000..82531312af75 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/include/lwpmudrv_chipset.h +@@ -0,0 +1,274 @@ ++/*** ++ * ------------------------------------------------------------------------- ++ * INTEL CORPORATION PROPRIETARY INFORMATION ++ * This software is supplied under the terms of the accompanying license ++ * agreement or nondisclosure agreement with Intel Corporation and may not ++ * be copied or disclosed except in accordance with the terms of that ++ * agreement. ++ * Copyright(C) 2007-2018 Intel Corporation. All Rights Reserved. ++ * ------------------------------------------------------------------------- ++ ***/ ++ ++#ifndef _LWPMUDRV_CHIPSET_UTILS_H_ ++#define _LWPMUDRV_CHIPSET_UTILS_H_ ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++#define MAX_CHIPSET_EVENT_NAME 64 ++#define MAX_CHIPSET_COUNTERS 5 ++ /* TODO: this covers 1 fixed counter \ ++ * plus 4 general counters on GMCH; \ ++ * for other chipset devices, this \ ++ * can vary from 8 to 32; might consider \ ++ * making this per-chipset-type since \ ++ * event-multiplexing is currently not \ ++ * supported for chipset collections ++ */ ++ ++#if defined(_NTDDK_) ++#define CHIPSET_PHYS_ADDRESS PHYSICAL_ADDRESS ++#else ++#define CHIPSET_PHYS_ADDRESS U64 ++#endif ++ ++// possible values for whether chipset data is valid or not ++enum { DATA_IS_VALID, DATA_IS_INVALID, DATA_OUT_OF_RANGE }; ++ ++typedef struct CHIPSET_PCI_ARG_NODE_S CHIPSET_PCI_ARG_NODE; ++typedef CHIPSET_PCI_ARG_NODE * CHIPSET_PCI_ARG; ++ ++struct CHIPSET_PCI_ARG_NODE_S { ++ U32 address; ++ U32 value; ++}; ++ ++#define CHIPSET_PCI_ARG_address(chipset_pci) ((chipset_pci)->address) ++#define CHIPSET_PCI_ARG_value(chipset_pci) ((chipset_pci)->value) ++ ++typedef struct CHIPSET_PCI_SEARCH_ADDR_NODE_S CHIPSET_PCI_SEARCH_ADDR_NODE; ++typedef CHIPSET_PCI_SEARCH_ADDR_NODE * CHIPSET_PCI_SEARCH_ADDR; ++ ++struct CHIPSET_PCI_SEARCH_ADDR_NODE_S { ++ U32 start; ++ U32 stop; ++ U32 increment; ++ U32 addr; ++}; ++ ++#define CHIPSET_PCI_SEARCH_ADDR_start(pci_search_addr) \ ++ ((pci_search_addr)->start) ++#define CHIPSET_PCI_SEARCH_ADDR_stop(pci_search_addr) ((pci_search_addr)->stop) ++#define CHIPSET_PCI_SEARCH_ADDR_increment(pci_search_addr) \ ++ ((pci_search_addr)->increment) ++#define CHIPSET_PCI_SEARCH_ADDR_address(pci_search_addr) \ ++ ((pci_search_addr)->addr) ++ ++typedef struct CHIPSET_PCI_CONFIG_NODE_S CHIPSET_PCI_CONFIG_NODE; ++typedef CHIPSET_PCI_CONFIG_NODE * CHIPSET_PCI_CONFIG; ++ ++struct CHIPSET_PCI_CONFIG_NODE_S { ++ U32 bus; ++ U32 device; ++ U32 function; ++ U32 offset; ++ U32 value; ++}; ++ ++#define CHIPSET_PCI_CONFIG_bus(pci_config) ((pci_config)->bus) ++#define CHIPSET_PCI_CONFIG_device(pci_config) ((pci_config)->device) ++#define CHIPSET_PCI_CONFIG_function(pci_config) ((pci_config)->function) ++#define CHIPSET_PCI_CONFIG_offset(pci_config) ((pci_config)->offset) ++#define CHIPSET_PCI_CONFIG_value(pci_config) ((pci_config)->value) ++ ++typedef struct CHIPSET_MARKER_NODE_S CHIPSET_MARKER_NODE; ++typedef CHIPSET_MARKER_NODE * CHIPSET_MARKER; ++ ++struct CHIPSET_MARKER_NODE_S { ++ U32 processor_number; ++ U32 rsvd; ++ U64 tsc; ++}; ++ ++#define CHIPSET_MARKER_processor_number(chipset_marker) \ ++ ((pci_config)->processor_number) ++#define CHIPSET_MARKER_tsc(chipset_marker) ((pci_config)->tsc) ++ ++typedef struct CHAP_INTERFACE_NODE_S CHAP_INTERFACE_NODE; ++typedef CHAP_INTERFACE_NODE * CHAP_INTERFACE; ++ ++// CHAP chipset registers ++// Offsets for registers are command-0x00, event-0x04, status-0x08, data-0x0C ++struct CHAP_INTERFACE_NODE_S { ++ U32 command_register; ++ U32 event_register; ++ U32 status_register; ++ U32 data_register; ++}; ++ ++#define CHAP_INTERFACE_command_register(chap) ((chap)->command_register) ++#define CHAP_INTERFACE_event_register(chap) ((chap)->event_register) ++#define CHAP_INTERFACE_status_register(chap) ((chap)->status_register) ++#define CHAP_INTERFACE_data_register(chap) ((chap)->data_register) ++ ++/************************************************************************** ++ * GMCH Registers and Offsets ++ ************************************************************************** ++ */ ++ ++// Counter registers - each counter has 4 registers ++#define GMCH_MSG_CTRL_REG 0xD0 // message control register (MCR) 0xD0-0xD3 ++#define GMCH_MSG_DATA_REG 0xD4 // message data register (MDR) 0xD4-0xD7 ++ ++// Counter register offsets ++#define GMCH_PMON_CAPABILITIES \ ++ 0x0005F0F0 // when read, bit 0 enabled means GMCH counters are available ++#define GMCH_PMON_GLOBAL_CTRL \ ++ 0x0005F1F0 // simultaneously enables/disables fixed and general counters ++ ++// Fixed counters (32-bit) ++#define GMCH_PMON_FIXED_CTR_CTRL \ ++ 0x0005F4F0 // enables and filters the fixed counters ++#define GMCH_PMON_FIXED_CTR0 \ ++ 0x0005E8F0 // 32-bit fixed counter for GMCH_CORE_CLKS event ++#define GMCH_PMON_FIXED_CTR_OVF_VAL \ ++ 0xFFFFFFFFLL // overflow value for GMCH fixed counters ++ ++// General counters (38-bit) ++// NOTE: lower order bits on GP counters must be read before the higher bits! ++#define GMCH_PMON_GP_CTR0_L 0x0005F8F0 // GMCH GP counter 0, low bits ++#define GMCH_PMON_GP_CTR0_H 0x0005FCF0 // GMCH GP counter 0, high bits ++#define GMCH_PMON_GP_CTR1_L 0x0005F9F0 ++#define GMCH_PMON_GP_CTR1_H 0x0005FDF0 ++#define GMCH_PMON_GP_CTR2_L 0x0005FAF0 ++#define GMCH_PMON_GP_CTR2_H 0x0005FEF0 ++#define GMCH_PMON_GP_CTR3_L 0x0005FBF0 ++#define GMCH_PMON_GP_CTR3_H 0x0005FFF0 ++#define GMCH_PMON_GP_CTR_OVF_VAL 0x3FFFFFFFFFLL ++ // overflow value for GMCH general counters ++ ++// Register offsets for LNC ++#define LNC_GMCH_REGISTER_READ 0xD0000000 ++#define LNC_GMCH_REGISTER_WRITE 0xE0000000 ++ ++// Register offsets for SLT ++#define SLT_GMCH_REGISTER_READ 0x10000000 ++#define SLT_GMCH_REGISTER_WRITE 0x11000000 ++ ++// Register offsets for CDV ++#define CDV_GMCH_REGISTER_READ 0x10000000 ++#define CDV_GMCH_REGISTER_WRITE 0x11000000 ++ ++ ++typedef struct CHIPSET_EVENT_NODE_S CHIPSET_EVENT_NODE; ++typedef CHIPSET_EVENT_NODE * CHIPSET_EVENT; ++ ++//chipset event ++struct CHIPSET_EVENT_NODE_S { ++ U32 event_id; ++ U32 group_id; ++ char name[MAX_CHIPSET_EVENT_NAME]; ++ U32 pm; ++ U32 counter; ++}; ++ ++#define CHIPSET_EVENT_event_id(chipset_event) ((chipset_event)->event_id) ++#define CHIPSET_EVENT_group_id(chipset_event) ((chipset_event)->group_id) ++#define CHIPSET_EVENT_name(chipset_event) ((chipset_event)->name) ++#define CHIPSET_EVENT_pm(chipset_event) ((chipset_event)->pm) ++#define CHIPSET_EVENT_counter(chipset_event) ((chipset_event)->counter) ++ ++typedef struct CHIPSET_SEGMENT_NODE_S CHIPSET_SEGMENT_NODE; ++typedef CHIPSET_SEGMENT_NODE * CHIPSET_SEGMENT; ++ ++//chipset segment data ++struct CHIPSET_SEGMENT_NODE_S { ++ CHIPSET_PHYS_ADDRESS physical_address; ++ U64 virtual_address; ++ U16 size; ++ U16 number_of_counters; ++ U16 total_events; ++ U16 start_register; // (see driver for details) ++ U32 read_register; // read register offset (model dependent) ++ U32 write_register; // write register offset (model dependent) ++ CHIPSET_EVENT_NODE events[MAX_CHIPSET_COUNTERS]; ++}; ++ ++#define CHIPSET_SEGMENT_physical_address(chipset_segment) \ ++ ((chipset_segment)->physical_address) ++#define CHIPSET_SEGMENT_virtual_address(chipset_segment) \ ++ ((chipset_segment)->virtual_address) ++#define CHIPSET_SEGMENT_size(chipset_segment) ((chipset_segment)->size) ++#define CHIPSET_SEGMENT_num_counters(chipset_segment) \ ++ ((chipset_segment)->number_of_counters) ++#define CHIPSET_SEGMENT_total_events(chipset_segment) \ ++ ((chipset_segment)->total_events) ++#define CHIPSET_SEGMENT_start_register(chipset_segment) \ ++ ((chipset_segment)->start_register) ++#define CHIPSET_SEGMENT_read_register(chipset_segment) \ ++ ((chipset_segment)->read_register) ++#define CHIPSET_SEGMENT_write_register(chipset_segment) \ ++ ((chipset_segment)->write_register) ++#define CHIPSET_SEGMENT_events(chipset_segment) ((chipset_segment)->events) ++ ++typedef struct CHIPSET_CONFIG_NODE_S CHIPSET_CONFIG_NODE; ++typedef CHIPSET_CONFIG_NODE * CHIPSET_CONFIG; ++ ++//chipset struct used for communication between user mode and kernel ++struct CHIPSET_CONFIG_NODE_S { ++ U32 length; // length of this entire area ++ U32 major_version; ++ U32 minor_version; ++ U32 rsvd; ++ U64 cpu_counter_mask; ++ struct { ++ U64 processor : 1; // Processor PMU ++ U64 mch_chipset : 1; // MCH Chipset ++ U64 ich_chipset : 1; // ICH Chipset ++ U64 motherboard_time_flag : 1; // Motherboard_Time requested. ++ U64 host_processor_run : 1; ++ // Each processor should manage the MCH counts they see. ++ // Turn off for Gen 4 (NOA) runs. ++ U64 mmio_noa_registers : 1; // NOA ++ U64 bnb_chipset : 1; // BNB Chipset ++ U64 gmch_chipset : 1; // GMCH Chipset ++ U64 rsvd : 56; ++ } config_flags; ++ CHIPSET_SEGMENT_NODE mch; ++ CHIPSET_SEGMENT_NODE ich; ++ CHIPSET_SEGMENT_NODE mmio; ++ CHIPSET_SEGMENT_NODE bnb; ++ CHIPSET_SEGMENT_NODE gmch; ++}; ++ ++#define CHIPSET_CONFIG_length(chipset) ((chipset)->length) ++#define CHIPSET_CONFIG_major_version(chipset) ((chipset)->major_version) ++#define CHIPSET_CONFIG_minor_version(chipset) ((chipset)->minor_version) ++#define CHIPSET_CONFIG_cpu_counter_mask(chipset) ((chipset)->cpu_counter_mask) ++#define CHIPSET_CONFIG_processor(chipset) ((chipset)->config_flags.processor) ++#define CHIPSET_CONFIG_mch_chipset(chipset) \ ++ ((chipset)->config_flags.mch_chipset) ++#define CHIPSET_CONFIG_ich_chipset(chipset) \ ++ ((chipset)->config_flags.ich_chipset) ++#define CHIPSET_CONFIG_motherboard_time(chipset) \ ++ ((chipset)->config_flags.motherboard_time_flag) ++#define CHIPSET_CONFIG_host_proc_run(chipset) \ ++ ((chipset)->config_flags.host_processor_run) ++#define CHIPSET_CONFIG_noa_chipset(chipset) \ ++ ((chipset)->config_flags.mmio_noa_registers) ++#define CHIPSET_CONFIG_bnb_chipset(chipset) \ ++ ((chipset)->config_flags.bnb_chipset) ++#define CHIPSET_CONFIG_gmch_chipset(chipset) \ ++ ((chipset)->config_flags.gmch_chipset) ++#define CHIPSET_CONFIG_mch(chipset) ((chipset)->mch) ++#define CHIPSET_CONFIG_ich(chipset) ((chipset)->ich) ++#define CHIPSET_CONFIG_noa(chipset) ((chipset)->mmio) ++#define CHIPSET_CONFIG_bnb(chipset) ((chipset)->bnb) ++#define CHIPSET_CONFIG_gmch(chipset) ((chipset)->gmch) ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif +diff --git a/drivers/platform/x86/sepdk/include/lwpmudrv_defines.h b/drivers/platform/x86/sepdk/include/lwpmudrv_defines.h +new file mode 100644 +index 000000000000..d6889982ada0 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/include/lwpmudrv_defines.h +@@ -0,0 +1,507 @@ ++/*** ++ * ------------------------------------------------------------------------- ++ * INTEL CORPORATION PROPRIETARY INFORMATION ++ * This software is supplied under the terms of the accompanying license ++ * agreement or nondisclosure agreement with Intel Corporation and may not ++ * be copied or disclosed except in accordance with the terms of that ++ * agreement. ++ * Copyright(C) 2007-2018 Intel Corporation. All Rights Reserved. ++ * ------------------------------------------------------------------------- ++ ***/ ++ ++#ifndef _LWPMUDRV_DEFINES_H_ ++#define _LWPMUDRV_DEFINES_H_ ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++// ++// Start off with none of the OS'es are defined ++// ++#undef DRV_OS_WINDOWS ++#undef DRV_OS_LINUX ++#undef DRV_OS_SOLARIS ++#undef DRV_OS_MAC ++#undef DRV_OS_ANDROID ++#undef DRV_OS_UNIX ++ ++// ++// Make sure none of the architectures is defined here ++// ++#undef DRV_IA32 ++#undef DRV_EM64T ++ ++// ++// Make sure one (and only one) of the OS'es gets defined here ++// ++// Unfortunately entirex defines _WIN32 so we need to check for linux ++// first. The definition of these flags is one and only one ++// _OS_xxx is allowed to be defined. ++// ++#if defined(__ANDROID__) ++#define DRV_OS_ANDROID ++#define DRV_OS_UNIX ++#elif defined(__linux__) ++#define DRV_OS_LINUX ++#define DRV_OS_UNIX ++#elif defined(sun) ++#define DRV_OS_SOLARIS ++#define DRV_OS_UNIX ++#elif defined(_WIN32) ++#define DRV_OS_WINDOWS ++#elif defined(__APPLE__) ++#define DRV_OS_MAC ++#define DRV_OS_UNIX ++#elif defined(__FreeBSD__) ++#define DRV_OS_FREEBSD ++#define DRV_OS_UNIX ++#else ++#error "Compiling for an unknown OS" ++#endif ++ ++// ++// Make sure one (and only one) architecture is defined here ++// as well as one (and only one) pointer__ size ++// ++#if defined(_M_IX86) || defined(__i386__) ++#define DRV_IA32 ++#elif defined(_M_AMD64) || defined(__x86_64__) ++#define DRV_EM64T ++#else ++#error "Unknown architecture for compilation" ++#endif ++ ++// ++// Add a well defined definition of compiling for release (free) vs. ++// debug (checked). Once again, don't assume these are the only two values, ++// always have an else clause in case we want to expand this. ++// ++#if defined(DRV_OS_UNIX) ++#define WINAPI ++#endif ++ ++/* ++ * Add OS neutral defines for file processing. This is needed in both ++ * the user code and the kernel code for cleanliness ++ */ ++#undef DRV_FILE_DESC ++#undef DRV_INVALID_FILE_DESC_VALUE ++#define DRV_ASSERT assert ++ ++#if defined(DRV_OS_WINDOWS) ++ ++#define DRV_FILE_DESC HANDLE ++#define DRV_INVALID_FILE_DESC_VALUE INVALID_HANDLE_VALUE ++ ++#elif defined(DRV_OS_LINUX) || defined(DRV_OS_SOLARIS) || \ ++ defined(DRV_OS_ANDROID) ++ ++#define DRV_IOCTL_FILE_DESC SIOP ++#define DRV_FILE_DESC SIOP ++#define DRV_INVALID_FILE_DESC_VALUE -1 ++ ++#elif defined(DRV_OS_FREEBSD) ++ ++#define DRV_IOCTL_FILE_DESC S64 ++#define DRV_FILE_DESC S64 ++#define DRV_INVALID_FILE_DESC_VALUE -1 ++ ++#elif defined(DRV_OS_MAC) ++#if defined __LP64__ ++#define DRV_IOCTL_FILE_DESC S64 ++#define DRV_FILE_DESC S64 ++#define DRV_INVALID_FILE_DESC_VALUE (S64)(-1) ++#else ++#define DRV_IOCTL_FILE_DESC S32 ++#define DRV_FILE_DESC S32 ++#define DRV_INVALID_FILE_DESC_VALUE (S32)(-1) ++#endif ++ ++#else ++ ++#error "Compiling for an unknown OS" ++ ++#endif ++ ++#define OUT ++#define IN ++#define INOUT ++ ++// ++// VERIFY_SIZEOF let's you insert a compile-time check that the size of a data ++// type (e.g. a struct) is what you think it should be. Usually it is ++// important to know what the actual size of your struct is, and to make sure ++// it is the same across all platforms. So this will prevent the code from ++// compiling if something happens that you didn't expect, whether it's because ++// you counted wring, or more often because the compiler inserted padding that ++// you don't want. ++// ++// NOTE: 'elem' and 'size' must both be identifier safe, e.g. matching the ++// regular expression /^[0-9a-zA-Z_]$/. ++// ++// Example: ++// typedef struct { void *ptr; int data; } mytype; ++// VERIFY_SIZEOF(mytype, 8); ++// ^-- this is correct on 32-bit platforms, but fails ++// on 64-bit platforms, indicating a possible ++// portability issue. ++// ++#define VERIFY_SIZEOF(type, size) \ ++ { \ ++ enum { \ ++ sizeof_##type##_eq_##size = 1 / (int)(sizeof(type) == size) \ ++ } \ ++ } ++ ++#if defined(DRV_OS_WINDOWS) ++#define DRV_DLLIMPORT __declspec(dllimport) ++#define DRV_DLLEXPORT __declspec(dllexport) ++#endif ++#if defined(DRV_OS_UNIX) ++#define DRV_DLLIMPORT ++#define DRV_DLLEXPORT ++#endif ++ ++#if defined(DRV_OS_WINDOWS) ++#define FSI64RAW "I64" ++#define DRV_PATH_SEPARATOR "\\" ++#define L_DRV_PATH_SEPARATOR L"\\" ++#endif ++ ++#if defined(DRV_OS_UNIX) ++#define FSI64RAW "ll" ++#define DRV_PATH_SEPARATOR "/" ++#define L_DRV_PATH_SEPARATOR L"/" ++#endif ++ ++#define FSS64 "%" FSI64RAW "d" ++#define FSU64 "%" FSI64RAW "u" ++#define FSX64 "%" FSI64RAW "x" ++ ++#if defined(DRV_OS_WINDOWS) ++#define DRV_RTLD_NOW 0 ++#endif ++#if defined(DRV_OS_UNIX) ++#if defined(DRV_OS_FREEBSD) ++#define DRV_RTLD_NOW 0 ++#else ++#define DRV_RTLD_NOW RTLD_NOW ++#endif ++#endif ++ ++#define DRV_STRLEN (U32)(strlen) ++#define DRV_WCSLEN (U32)(wcslen) ++#define DRV_STRCSPN strcspn ++#define DRV_STRCHR strchr ++#define DRV_STRRCHR strrchr ++#define DRV_WCSRCHR wcsrchr ++ ++#if defined(DRV_OS_WINDOWS) ++#define DRV_STCHARLEN DRV_WCSLEN ++#else ++#define DRV_STCHARLEN DRV_STRLEN ++#endif ++ ++#if defined(DRV_OS_WINDOWS) ++#define DRV_STRCPY strcpy_s ++#define DRV_STRNCPY strncpy_s ++#define DRV_STRICMP _stricmp ++#define DRV_STRNCMP strncmp ++#define DRV_STRNICMP _strnicmp ++#define DRV_STRDUP _strdup ++#define DRV_WCSDUP _wcsdup ++#define DRV_STRCMP strcmp ++#define DRV_WCSCMP wcscmp ++#define DRV_SNPRINTF _snprintf_s ++#define DRV_SNWPRINTF _snwprintf_s ++#define DRV_VSNPRINTF _vsnprintf_s ++#define DRV_SSCANF sscanf_s ++#define DRV_STRCAT strcat_s ++#define DRV_STRNCAT strncat_s ++#define DRV_MEMCPY memcpy_s ++#define DRV_WMEMCPY wmemcpy_s ++#define DRV_STRTOK strtok_s ++#define DRV_STRTOUL strtoul ++#define DRV_STRTOULL _strtoui64 ++#define DRV_STRTOQ _strtoui64 ++#define DRV_FOPEN(fp, name, mode) fopen_s(&(fp), (name), (mode)) ++#define DRV_WFOPEN(fp, name, mode) _wfopen_s(&(fp), (name), (mode)) ++#define DRV_FCLOSE(fp) \ ++ { \ ++ if ((fp) != NULL) { \ ++ fclose((fp)); \ ++ } \ ++ } ++#define DRV_WCSCPY wcscpy_s ++#define DRV_WCSNCPY wcsncpy_s ++#define DRV_WCSCAT wcscat_s ++#define DRV_WCSNCAT wcsncat_s ++#define DRV_WCSTOK wcstok_s ++#define DRV_WCSSTR wcsstr ++#define DRV_STRERROR strerror_s ++#define DRV_SPRINTF sprintf_s ++#define DRV_VSPRINTF vsprintf_s ++#define DRV_VSWPRINTF vswprintf_s ++#define DRV_GETENV_S getenv_s ++#define DRV_WGETENV_S wgetenv_s ++#define DRV_PUTENV(name) _putenv(name) ++#define DRV_USTRCMP(X, Y) DRV_WCSCMP(X, Y) ++#define DRV_USTRDUP(X) DRV_WCSDUP(X) ++#define DRV_ACCESS(X) _access_s(X, 4) ++#define DRV_STRSTR strstr ++ ++#define DRV_STCHAR_COPY DRV_WCSNCPY ++ ++#define DRV_GETENV(buf, buf_size, name) _dupenv_s(&(buf), &(buf_size), (name)) ++#define DRV_WGETENV(buf, buf_size, name) _wdupenv_s(&(buf), &(buf_size), (name)) ++#define DRV_SCLOSE(fp) _close(fp) ++#define DRV_WRITE(fp, buf, buf_size) _write(fp, buf, buf_size); ++#define DRV_SOPEN_S(fp, name, oflag, shflag, pmode) \ ++ _sopen_s((fp), (name), (oflag), (shflag), (pmode)) ++#endif ++ ++#if defined(DRV_OS_UNIX) ++/* ++ Note: Many of the following macros have a "size" as the second argument. ++ Generally speaking, this is for compatibility with the _s versions ++ available on Windows. On Linux/Solaris/Mac, it is ignored. ++ On Windows, it is the size of the destination buffer and is used wrt ++ memory checking features available in the C runtime in debug mode. ++ Do not confuse it with the number of bytes to be copied, or such. ++ ++ On Windows, this size should correspond to the number of allocated characters ++ (char or wchar_t) pointed to by the first argument. See MSDN or more details. ++*/ ++#define DRV_STRICMP strcasecmp ++#define DRV_STRDUP strdup ++#define DRV_STRNDUP strndup ++#define DRV_STRCMP strcmp ++#define DRV_STRNCMP strncmp ++#define DRV_STRSTR strstr ++#define DRV_SNPRINTF(buf, buf_size, length, args...) \ ++ snprintf((buf), (length), ##args) ++#define DRV_SNWPRINTF(buf, buf_size, length, args...) \ ++ snwprintf((buf), (length), ##args) ++#define DRV_VSNPRINTF(buf, buf_size, length, args...) \ ++ vsnprintf((buf), (length), ##args) ++#define DRV_SSCANF sscanf ++#define DRV_STRCPY(dst, dst_size, src) strcpy((dst), (src)) ++#define DRV_STRNCPY(dst, dst_size, src, n) strncpy((dst), (src), (n)) ++#define DRV_STRCAT(dst, dst_size, src) strcat((dst), (src)) ++#define DRV_STRNCAT(dst, dst_size, src, n) strncat((dst), (src), (n)) ++#define DRV_MEMCPY(dst, dst_size, src, n) memcpy((dst), (src), (n)) ++#define DRV_STRTOK(tok, delim, context) strtok((tok), (delim)) ++#define DRV_STRTOUL strtoul ++#define DRV_STRTOULL strtoull ++#define DRV_STRTOL strtol ++#define DRV_FOPEN(fp, name, mode) { (fp) = fopen((name), (mode)); } ++#define DRV_FCLOSE(fp) \ ++ { \ ++ if ((fp) != NULL) { \ ++ fclose((fp)); \ ++ } \ ++ } ++ ++#define DRV_WCSCPY(dst, dst_size, src) wcscpy((dst), (const wchar_t *)(src)) ++#define DRV_WCSNCPY(dst, dst_size, src, count) \ ++ wcsncpy((dst), (const wchar_t *)(src), (count)) ++#define DRV_WCSCAT(dst, dst_size, src) wcscat((dst), (const wchar_t *)(src)) ++#define DRV_WCSTOK(tok, delim, context) \ ++ wcstok((tok), (const wchar_t *)(delim), (context)) ++#define DRV_STRERROR strerror ++#define DRV_SPRINTF(dst, dst_size, args...) sprintf((dst), ##args) ++#define DRV_VSPRINTF(dst, dst_size, length, args...) \ ++ vsprintf((dst), (length), ##args) ++#define DRV_VSWPRINTF(dst, dst_size, length, args...) \ ++ vswprintf((dst), (length), ##args) ++#define DRV_GETENV_S(dst, dst_size) getenv(dst) ++#define DRV_WGETENV_S(dst, dst_size) wgetenv(dst) ++#define DRV_PUTENV(name) putenv(name) ++#define DRV_GETENV(buf, buf_size, name) ((buf) = getenv((name))) ++#define DRV_USTRCMP(X, Y) DRV_STRCMP(X, Y) ++#define DRV_USTRDUP(X) DRV_STRDUP(X) ++#define DRV_ACCESS(X) access(X, X_OK) ++ ++#define DRV_STCHAR_COPY DRV_STRNCPY ++#endif ++ ++#if defined(DRV_OS_WINDOWS) ++#define DRV_STRTOK_R(tok, delim, context) strtok_s((tok), (delim), (context)) ++#else ++#define DRV_STRTOK_R(tok, delim, context) strtok_r((tok), (delim), (context)) ++#endif ++ ++#if defined(DRV_OS_LINUX) || defined(DRV_OS_MAC) || defined(DRV_OS_FREEBSD) ++#define DRV_STRTOQ strtoq ++#endif ++ ++#if defined(DRV_OS_ANDROID) ++#define DRV_STRTOQ strtol ++#endif ++ ++#if defined(DRV_OS_SOLARIS) ++#define DRV_STRTOQ strtoll ++#endif ++ ++#if defined(DRV_OS_LINUX) || defined(DRV_OS_FREEBSD) || defined(DRV_OS_MAC) ++#define DRV_WCSDUP wcsdup ++#endif ++ ++#if defined(DRV_OS_SOLARIS) ++#define DRV_WCSDUP solaris_wcsdup ++#endif ++ ++#if defined(DRV_OS_ANDROID) ++#define DRV_WCSDUP android_wcsdup ++#endif ++ ++/* ++ * Windows uses wchar_t and linux uses char for strings. ++ * Need an extra level of abstraction to standardize it. ++ */ ++#if defined(DRV_OS_WINDOWS) ++#define DRV_STDUP DRV_WCSDUP ++#define DRV_FORMAT_STRING(x) L##x ++#define DRV_PRINT_STRING(stream, format, ...) \ ++ fwprintf((stream), (format), __VA_ARGS__) ++#else ++#define DRV_STDUP DRV_STRDUP ++#define DRV_FORMAT_STRING(x) x ++#define DRV_PRINT_STRING(stream, format, ...) \ ++ fprintf((stream), (format), __VA_ARGS__) ++#endif ++ ++/* ++ * OS return types ++ */ ++#if defined(DRV_OS_UNIX) ++#define OS_STATUS int ++#define OS_SUCCESS 0 ++#if defined(BUILD_DRV_ESX) ++#define OS_ILLEGAL_IOCTL -1 ++#define OS_NO_MEM -2 ++#define OS_FAULT -3 ++#define OS_INVALID -4 ++#define OS_NO_SYSCALL -5 ++#define OS_RESTART_SYSCALL -6 ++#define OS_IN_PROGRESS -7 ++#else ++#define OS_ILLEGAL_IOCTL -ENOTTY ++#define OS_NO_MEM -ENOMEM ++#define OS_FAULT -EFAULT ++#define OS_INVALID -EINVAL ++#define OS_NO_SYSCALL -ENOSYS ++#define OS_RESTART_SYSCALL -ERESTARTSYS ++#define OS_IN_PROGRESS -EALREADY ++#endif ++#endif ++#if defined(DRV_OS_WINDOWS) ++#define OS_STATUS NTSTATUS ++#define OS_SUCCESS STATUS_SUCCESS ++#define OS_ILLEGAL_IOCTL STATUS_UNSUCCESSFUL ++#define OS_NO_MEM STATUS_UNSUCCESSFUL ++#define OS_FAULT STATUS_UNSUCCESSFUL ++#define OS_INVALID STATUS_UNSUCCESSFUL ++#define OS_NO_SYSCALL STATUS_UNSUCCESSFUL ++#define OS_RESTART_SYSCALL STATUS_UNSUCCESSFUL ++#define OS_IN_PROGRESS STATUS_UNSUCCESSFUL ++#endif ++ ++/**************************************************************************** ++ ** Driver State defintions ++ ***************************************************************************/ ++#define DRV_STATE_UNINITIALIZED 0 ++#define DRV_STATE_RESERVED 1 ++#define DRV_STATE_IDLE 2 ++#define DRV_STATE_PAUSED 3 ++#define DRV_STATE_STOPPED 4 ++#define DRV_STATE_RUNNING 5 ++#define DRV_STATE_PAUSING 6 ++#define DRV_STATE_PREPARE_STOP 7 ++#define DRV_STATE_TERMINATING 8 ++ ++#define MATCHING_STATE_BIT(state) ((U32)1 << state) ++#define STATE_BIT_UNINITIALIZED MATCHING_STATE_BIT(DRV_STATE_UNINITIALIZED) ++#define STATE_BIT_RESERVED MATCHING_STATE_BIT(DRV_STATE_RESERVED) ++#define STATE_BIT_IDLE MATCHING_STATE_BIT(DRV_STATE_IDLE) ++#define STATE_BIT_PAUSED MATCHING_STATE_BIT(DRV_STATE_PAUSED) ++#define STATE_BIT_STOPPED MATCHING_STATE_BIT(DRV_STATE_STOPPED) ++#define STATE_BIT_RUNNING MATCHING_STATE_BIT(DRV_STATE_RUNNING) ++#define STATE_BIT_PAUSING MATCHING_STATE_BIT(DRV_STATE_PAUSING) ++#define STATE_BIT_PREPARE_STOP MATCHING_STATE_BIT(DRV_STATE_PREPARE_STOP) ++#define STATE_BIT_TERMINATING MATCHING_STATE_BIT(DRV_STATE_TERMINATING) ++#define STATE_BIT_ANY ((U32)-1) ++ ++#define IS_COLLECTING_STATE(state) \ ++ (!!(MATCHING_STATE_BIT(state) & \ ++ (STATE_BIT_RUNNING | STATE_BIT_PAUSING | STATE_BIT_PAUSED))) ++ ++/* ++ * Stop codes ++ */ ++#define DRV_STOP_BASE 0 ++#define DRV_STOP_NORMAL 1 ++#define DRV_STOP_ASYNC 2 ++#define DRV_STOP_CANCEL 3 ++#define SEP_FREE(loc) \ ++ { \ ++ if ((loc)) { \ ++ free(loc); \ ++ loc = NULL; \ ++ } \ ++ } ++ ++#define MAX_EVENTS 256 // Limiting maximum multiplexing events to 256. ++#if defined(DRV_OS_UNIX) ++#define UNREFERENCED_PARAMETER(p) ((p) = (p)) ++#endif ++ ++/* ++ * Global marker names ++ */ ++#define START_MARKER_NAME "SEP_START_MARKER" ++#define PAUSE_MARKER_NAME "SEP_PAUSE_MARKER" ++#define RESUME_MARKER_NAME "SEP_RESUME_MARKER" ++ ++#define DRV_SOC_STRING_LEN (100 + MAX_MARKER_LENGTH) ++ ++/* ++ * Temp path ++ */ ++#define SEP_TMPDIR "SEP_TMP_DIR" ++#if defined(DRV_OS_WINDOWS) ++#define OS_TMPDIR "TEMP" ++#define GET_DEFAULT_TMPDIR(dir, size) \ ++ { \ ++ GetTempPath((U32)size, dir); \ ++ } ++#else ++#define OS_TMPDIR "TMPDIR" ++/* ++ * Unix has default tmp dir ++ */ ++#if defined(DRV_OS_ANDROID) ++#define TEMP_PATH "/data" ++#else ++#define TEMP_PATH "/tmp" ++#endif ++#define GET_DEFAULT_TMPDIR(dir, size) \ ++ { \ ++ DRV_STRCPY((STCHAR *)dir, (U32)size, (STCHAR *)TEMP_PATH); \ ++ } ++#endif ++ ++#define OS_ID_UNKNOWN -1 ++#define OS_ID_NATIVE 0 ++#define OS_ID_VMM 0 ++#define OS_ID_MODEM 1 ++#define OS_ID_ANDROID 2 ++#define OS_ID_SECVM 3 ++#define OS_ID_ACORN (U32)(-1) ++ ++#define PERF_HW_VER4 (5) ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif +diff --git a/drivers/platform/x86/sepdk/include/lwpmudrv_ecb.h b/drivers/platform/x86/sepdk/include/lwpmudrv_ecb.h +new file mode 100644 +index 000000000000..792ae65191b3 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/include/lwpmudrv_ecb.h +@@ -0,0 +1,1116 @@ ++ ++/*** ++ * ------------------------------------------------------------------------- ++ * INTEL CORPORATION PROPRIETARY INFORMATION ++ * This software is supplied under the terms of the accompanying license ++ * agreement or nondisclosure agreement with Intel Corporation and may not ++ * be copied or disclosed except in accordance with the terms of that ++ * agreement. ++ * Copyright(C) 2007-2018 Intel Corporation. All Rights Reserved. ++ * ------------------------------------------------------------------------- ++ ***/ ++ ++#ifndef _LWPMUDRV_ECB_UTILS_H_ ++#define _LWPMUDRV_ECB_UTILS_H_ ++ ++#if defined(DRV_OS_WINDOWS) ++#pragma warning(disable : 4200) ++#endif ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++// control register types ++#define CCCR 1 // counter configuration control register ++#define ESCR 2 // event selection control register ++#define DATA 4 // collected as snapshot of current value ++#define DATA_RO_DELTA 8 // read-only counter collected as current-previous ++#define DATA_RO_SS 16 ++// read-only counter collected as snapshot of current value ++#define METRICS 32 // hardware metrics ++ ++// event multiplexing modes ++#define EM_DISABLED -1 ++#define EM_TIMER_BASED 0 ++#define EM_EVENT_BASED_PROFILING 1 ++#define EM_TRIGGER_BASED 2 ++ ++// *************************************************************************** ++ ++/*!\struct EVENT_DESC_NODE ++ * \var sample_size - size of buffer in bytes to hold the sample + extras ++ * \var max_gp_events - max number of General Purpose events per EM group ++ * \var pebs_offset - offset in the sample to locate the pebs capture info ++ * \var lbr_offset - offset in the sample to locate the lbr information ++ * \var lbr_num_regs - offset in the sample to locate the number of ++ * lbr register information ++ * \var latency_offset_in_sample - offset in the sample to locate the ++ * latency information ++ * \var latency_size_in_sample - size of latency records in the sample ++ * \var latency_size_from_pebs_record - size of the latency data from ++ * pebs record in the sample ++ * \var latency_offset_in_pebs_record - offset in the sample to locate the ++ * latency information in pebs record ++ * \var power_offset_in_sample - offset in the sample to locate the ++ * power information ++ * \var ebc_offset - offset in the sample to locate the ebc count informatio ++ * \var uncore_ebc_offset - offset in the sample to locate the uncore ++ * ebc count information ++ * ++ * \var ro_offset - offset of RO data in the sample ++ * \var ro_count - total number of RO entries (including all of ++ * IEAR/DEAR/BTB/IPEAR) ++ * \var iear_offset - offset into RO data at which IEAR entries begin ++ * \var dear_offset - offset into RO data at which DEAR entries begin ++ * \var btb_offset - offset into RO data at which BTB entries begin ++ * (these use the same PMDs) ++ * \var ipear_offset - offset into RO data at which IPEAR entries begin ++ * (these use the same PMDs) ++ * \var iear_count - number of IEAR entries ++ * \var dear_count - number of DEAR entries ++ * \var btb_count - number of BTB entries ++ * \var ipear_count - number of IPEAR entries ++ * ++ * \var pwr_offset - offset in the sample to locate the pwr count info ++ * \var p_state_offset - offset in the sample to locate the p_state ++ * information (APERF/MPERF) ++ * ++ * \brief Data structure to describe the events and the mode ++ * ++ */ ++ ++typedef struct EVENT_DESC_NODE_S EVENT_DESC_NODE; ++typedef EVENT_DESC_NODE * EVENT_DESC; ++ ++struct EVENT_DESC_NODE_S { ++ U32 sample_size; ++ U32 pebs_offset; ++ U32 pebs_size; ++ U32 lbr_offset; ++ U32 lbr_num_regs; ++ U32 latency_offset_in_sample; ++ U32 latency_size_in_sample; ++ U32 latency_size_from_pebs_record; ++ U32 latency_offset_in_pebs_record; ++ U32 power_offset_in_sample; ++ U32 ebc_offset; ++ U32 uncore_ebc_offset; ++ U32 eventing_ip_offset; ++ U32 hle_offset; ++ U32 pwr_offset; ++ U32 callstack_offset; ++ U32 callstack_size; ++ U32 p_state_offset; ++ U32 pebs_tsc_offset; ++ U32 perfmetrics_offset; ++ U32 perfmetrics_size; ++ /* ----------ADAPTIVE PEBS FIELDS --------- */ ++ U16 applicable_counters_offset; ++ U16 gpr_info_offset; ++ U16 gpr_info_size; ++ U16 xmm_info_offset; ++ U16 xmm_info_size; ++ U16 lbr_info_size; ++ /*------------------------------------------*/ ++ U32 reserved2; ++ U64 reserved3; ++}; ++ ++// ++// Accessor macros for EVENT_DESC node ++// ++#define EVENT_DESC_sample_size(ec) ((ec)->sample_size) ++#define EVENT_DESC_pebs_offset(ec) ((ec)->pebs_offset) ++#define EVENT_DESC_pebs_size(ec) ((ec)->pebs_size) ++#define EVENT_DESC_lbr_offset(ec) ((ec)->lbr_offset) ++#define EVENT_DESC_lbr_num_regs(ec) ((ec)->lbr_num_regs) ++#define EVENT_DESC_latency_offset_in_sample(ec) ((ec)->latency_offset_in_sample) ++#define EVENT_DESC_latency_size_from_pebs_record(ec) \ ++ ((ec)->latency_size_from_pebs_record) ++#define EVENT_DESC_latency_offset_in_pebs_record(ec) \ ++ ((ec)->latency_offset_in_pebs_record) ++#define EVENT_DESC_latency_size_in_sample(ec) ((ec)->latency_size_in_sample) ++#define EVENT_DESC_power_offset_in_sample(ec) ((ec)->power_offset_in_sample) ++#define EVENT_DESC_ebc_offset(ec) ((ec)->ebc_offset) ++#define EVENT_DESC_uncore_ebc_offset(ec) ((ec)->uncore_ebc_offset) ++#define EVENT_DESC_eventing_ip_offset(ec) ((ec)->eventing_ip_offset) ++#define EVENT_DESC_hle_offset(ec) ((ec)->hle_offset) ++#define EVENT_DESC_pwr_offset(ec) ((ec)->pwr_offset) ++#define EVENT_DESC_callstack_offset(ec) ((ec)->callstack_offset) ++#define EVENT_DESC_callstack_size(ec) ((ec)->callstack_size) ++#define EVENT_DESC_perfmetrics_offset(ec) ((ec)->perfmetrics_offset) ++#define EVENT_DESC_perfmetrics_size(ec) ((ec)->perfmetrics_size) ++#define EVENT_DESC_p_state_offset(ec) ((ec)->p_state_offset) ++#define EVENT_DESC_pebs_tsc_offset(ec) ((ec)->pebs_tsc_offset) ++#define EVENT_DESC_applicable_counters_offset(ec) \ ++ ((ec)->applicable_counters_offset) ++#define EVENT_DESC_gpr_info_offset(ec) ((ec)->gpr_info_offset) ++#define EVENT_DESC_gpr_info_size(ec) ((ec)->gpr_info_size) ++#define EVENT_DESC_xmm_info_offset(ec) ((ec)->xmm_info_offset) ++#define EVENT_DESC_xmm_info_size(ec) ((ec)->xmm_info_size) ++#define EVENT_DESC_lbr_info_size(ec) ((ec)->lbr_info_size) ++ ++// *************************************************************************** ++ ++/*!\struct EVENT_CONFIG_NODE ++ * \var num_groups - The number of groups being programmed ++ * \var em_mode - Is EM valid? If so how? ++ * \var em_time_slice - EM valid? time slice in milliseconds ++ * \var sample_size - size of buffer in bytes to hold the sample + extra ++ * \var max_gp_events - Max number of General Purpose events per EM group ++ * \var pebs_offset - offset in the sample to locate the pebs capture ++ * information ++ * \var lbr_offset - offset in the sample to locate the lbr information ++ * \var lbr_num_regs - offset in the sample to locate the lbr information ++ * \var latency_offset_in_sample - offset in the sample to locate the ++ * latency information ++ * \var latency_size_in_sample - size of latency records in sample ++ * \var latency_size_from_pebs_record - offset in sample to locate latency ++ * size from pebs record ++ * \var latency_offset_in_pebs_record - offset in the sample to locate the ++ * latency information in pebs record ++ * \var power_offset_in_sample - offset in the sample to locate the ++ * power information ++ * \var ebc_offset - offset in the sample to locate the ++ * ebc count information ++ * ++ * \var pwr_offset - offset in the sample to locate the pwr count information ++ * \var p_state_offset - offset in the sample to locate the p_state ++ * information (APERF/MPERF) ++ * ++ * \brief Data structure to describe the events and the mode ++ * ++ */ ++ ++typedef struct EVENT_CONFIG_NODE_S EVENT_CONFIG_NODE; ++typedef EVENT_CONFIG_NODE * EVENT_CONFIG; ++ ++struct EVENT_CONFIG_NODE_S { ++ U32 num_groups; ++ S32 em_mode; ++ S32 em_factor; ++ S32 em_event_num; ++ U32 sample_size; ++ U32 max_gp_events; ++ U32 max_fixed_counters; ++ U32 max_ro_counters; // maximum read-only counters ++ U32 pebs_offset; ++ U32 pebs_size; ++ U32 lbr_offset; ++ U32 lbr_num_regs; ++ U32 latency_offset_in_sample; ++ U32 latency_size_in_sample; ++ U32 latency_size_from_pebs_record; ++ U32 latency_offset_in_pebs_record; ++ U32 power_offset_in_sample; ++ U32 ebc_offset; ++ U32 num_groups_unc; ++ U32 ebc_offset_unc; ++ U32 sample_size_unc; ++ U32 eventing_ip_offset; ++ U32 hle_offset; ++ U32 pwr_offset; ++ U32 callstack_offset; ++ U32 callstack_size; ++ U32 p_state_offset; ++ U32 pebs_tsc_offset; ++ U64 reserved1; ++ U64 reserved2; ++ U64 reserved3; ++ U64 reserved4; ++}; ++ ++// ++// Accessor macros for EVENT_CONFIG node ++// ++#define EVENT_CONFIG_num_groups(ec) ((ec)->num_groups) ++#define EVENT_CONFIG_mode(ec) ((ec)->em_mode) ++#define EVENT_CONFIG_em_factor(ec) ((ec)->em_factor) ++#define EVENT_CONFIG_em_event_num(ec) ((ec)->em_event_num) ++#define EVENT_CONFIG_sample_size(ec) ((ec)->sample_size) ++#define EVENT_CONFIG_max_gp_events(ec) ((ec)->max_gp_events) ++#define EVENT_CONFIG_max_fixed_counters(ec) ((ec)->max_fixed_counters) ++#define EVENT_CONFIG_max_ro_counters(ec) ((ec)->max_ro_counters) ++#define EVENT_CONFIG_pebs_offset(ec) ((ec)->pebs_offset) ++#define EVENT_CONFIG_pebs_size(ec) ((ec)->pebs_size) ++#define EVENT_CONFIG_lbr_offset(ec) ((ec)->lbr_offset) ++#define EVENT_CONFIG_lbr_num_regs(ec) ((ec)->lbr_num_regs) ++#define EVENT_CONFIG_latency_offset_in_sample(ec) \ ++ ((ec)->latency_offset_in_sample) ++#define EVENT_CONFIG_latency_size_from_pebs_record(ec) \ ++ ((ec)->latency_size_from_pebs_record) ++#define EVENT_CONFIG_latency_offset_in_pebs_record(ec) \ ++ ((ec)->latency_offset_in_pebs_record) ++#define EVENT_CONFIG_latency_size_in_sample(ec) ((ec)->latency_size_in_sample) ++#define EVENT_CONFIG_power_offset_in_sample(ec) ((ec)->power_offset_in_sample) ++#define EVENT_CONFIG_ebc_offset(ec) ((ec)->ebc_offset) ++#define EVENT_CONFIG_num_groups_unc(ec) ((ec)->num_groups_unc) ++#define EVENT_CONFIG_ebc_offset_unc(ec) ((ec)->ebc_offset_unc) ++#define EVENT_CONFIG_sample_size_unc(ec) ((ec)->sample_size_unc) ++#define EVENT_CONFIG_eventing_ip_offset(ec) ((ec)->eventing_ip_offset) ++#define EVENT_CONFIG_hle_offset(ec) ((ec)->hle_offset) ++#define EVENT_CONFIG_pwr_offset(ec) ((ec)->pwr_offset) ++#define EVENT_CONFIG_callstack_offset(ec) ((ec)->callstack_offset) ++#define EVENT_CONFIG_callstack_size(ec) ((ec)->callstack_size) ++#define EVENT_CONFIG_p_state_offset(ec) ((ec)->p_state_offset) ++#define EVENT_CONFIG_pebs_tsc_offset(ec) ((ec)->pebs_tsc_offset) ++ ++typedef enum { UNC_MUX = 1, UNC_COUNTER } UNC_SA_PROG_TYPE; ++ ++typedef enum { ++ UNC_PCICFG = 1, ++ UNC_MMIO, ++ UNC_STOP, ++ UNC_MEMORY, ++ UNC_STATUS ++} UNC_SA_CONFIG_TYPE; ++ ++typedef enum { ++ UNC_MCHBAR = 1, ++ UNC_DMIBAR, ++ UNC_PCIEXBAR, ++ UNC_GTTMMADR, ++ UNC_GDXCBAR, ++ UNC_CHAPADR, ++ UNC_SOCPCI, ++ UNC_NPKBAR ++} UNC_SA_BAR_TYPE; ++ ++typedef enum { UNC_OP_READ = 1, UNC_OP_WRITE, UNC_OP_RMW } UNC_SA_OPERATION; ++ ++typedef enum { ++ STATIC_COUNTER = 1, ++ FREERUN_COUNTER, ++ PROG_FREERUN_COUNTER ++} COUNTER_TYPES; ++ ++typedef enum { ++ PACKAGE_EVENT = 1, ++ MODULE_EVENT, ++ THREAD_EVENT, ++ SYSTEM_EVENT ++} EVENT_SCOPE_TYPES; ++ ++typedef enum { ++ DEVICE_CORE = 1, // CORE DEVICE ++ DEVICE_HETERO, ++ DEVICE_UNC_CBO = 10, // UNCORE DEVICES START ++ DEVICE_UNC_HA, ++ DEVICE_UNC_IMC, ++ DEVICE_UNC_IRP, ++ DEVICE_UNC_NCU, ++ DEVICE_UNC_PCU, ++ DEVICE_UNC_POWER, ++ DEVICE_UNC_QPI, ++ DEVICE_UNC_R2PCIE, ++ DEVICE_UNC_R3QPI, ++ DEVICE_UNC_SBOX, ++ DEVICE_UNC_GT, ++ DEVICE_UNC_UBOX, ++ DEVICE_UNC_WBOX, ++ DEVICE_UNC_COREI7, ++ DEVICE_UNC_CHA, ++ DEVICE_UNC_EDC, ++ DEVICE_UNC_IIO, ++ DEVICE_UNC_M2M, ++ DEVICE_UNC_EDRAM, ++ DEVICE_UNC_FPGA_CACHE, ++ DEVICE_UNC_FPGA_FAB, ++ DEVICE_UNC_FPGA_THERMAL, ++ DEVICE_UNC_FPGA_POWER, ++ DEVICE_UNC_FPGA_GB, ++ DEVICE_UNC_TELEMETRY = 150, // TELEMETRY DEVICE ++ DEVICE_UNC_CHAP = 200, // CHIPSET DEVICES START ++ DEVICE_UNC_GMCH, ++ DEVICE_UNC_GFX, ++ DEVICE_UNC_SOCPERF = 300, // UNCORE VISA DEVICES START ++ DEVICE_UNC_HFI_RXE = 400, // STL HFI ++ DEVICE_UNC_HFI_TXE, ++} DEVICE_TYPES; ++ ++typedef enum { ++ LBR_ENTRY_TOS = 0, ++ LBR_ENTRY_FROM_IP, ++ LBR_ENTRY_TO_IP, ++ LBR_ENTRY_INFO ++} LBR_ENTRY_TYPE; ++ ++// *************************************************************************** ++ ++/*!\struct EVENT_REG_ID_NODE ++ * \var reg_id - MSR index to r/w ++ * \var pci_id PCI based register and its details to operate on ++ */ ++typedef struct EVENT_REG_ID_NODE_S EVENT_REG_ID_NODE; ++typedef EVENT_REG_ID_NODE * EVENT_REG_ID; ++ ++struct EVENT_REG_ID_NODE_S { ++ U32 reg_id; ++ U32 pci_bus_no; ++ U32 pci_dev_no; ++ U32 pci_func_no; ++ U32 data_size; ++ U32 bar_index; // Points to the index (MMIO_INDEX_LIST) ++ // of bar memory map list to be used in mmio_bar_list of ECB ++ U32 reserved1; ++ U32 reserved2; ++ U64 reserved3; ++}; ++ ++// *************************************************************************** ++ ++typedef enum { ++ PMU_REG_RW_READ = 1, ++ PMU_REG_RW_WRITE, ++ PMU_REG_RW_READ_WRITE, ++} PMU_REG_RW_TYPES; ++ ++typedef enum { ++ PMU_REG_PROG_MSR = 1, ++ PMU_REG_PROG_PCI, ++ PMU_REG_PROG_MMIO, ++} PMU_REG_PROG_TYPES; ++ ++typedef enum { ++ PMU_REG_GLOBAL_CTRL = 1, ++ PMU_REG_UNIT_CTRL, ++ PMU_REG_UNIT_STATUS, ++ PMU_REG_DATA, ++ PMU_REG_EVENT_SELECT, ++ PMU_REG_FILTER, ++ PMU_REG_FIXED_CTRL, ++} PMU_REG_TYPES; ++ ++/*!\struct EVENT_REG_NODE ++ * \var reg_type - register type ++ * \var event_id_index - event ID index ++ * \var event_reg_id - register ID/pci register details ++ * \var desc_id - desc ID ++ * \var flags - flags ++ * \var reg_value - register value ++ * \var max_bits - max bits ++ * \var scheduled - boolean to specify if this event node has ++ * been scheduled already ++ * \var bus_no - PCI bus number ++ * \var dev_no - PCI device number ++ * \var func_no - PCI function number ++ * \var counter_type - Event counter type - static/freerun ++ * \var event_scope - Event scope - package/module/thread ++ * \var reg_prog_type - Register Programming type ++ * \var reg_rw_type - Register Read/Write type ++ * \var reg_order - Register order in the programming sequence ++ * \var ++ * \brief Data structure to describe the event registers ++ * ++ */ ++ ++typedef struct EVENT_REG_NODE_S EVENT_REG_NODE; ++typedef EVENT_REG_NODE * EVENT_REG; ++ ++struct EVENT_REG_NODE_S { ++ U8 reg_type; ++ U8 unit_id; ++ U16 event_id_index; ++ U16 counter_event_offset; ++ U16 reserved1; ++ EVENT_REG_ID_NODE event_reg_id; ++ U64 reg_value; ++ U16 desc_id; ++ U16 flags; ++ U32 reserved2; ++ U64 max_bits; ++ U8 scheduled; ++ S8 secondary_pci_offset_shift; ++ U16 secondary_pci_offset_offset; // offset of the offset... ++ U32 counter_type; ++ U32 event_scope; ++ U8 reg_prog_type; ++ U8 reg_rw_type; ++ U8 reg_order; ++ U8 bit_position; ++ U64 secondary_pci_offset_mask; ++ U32 core_event_id; ++ U32 uncore_buffer_offset_in_package; ++ U32 uncore_buffer_offset_in_system; ++ U32 reserved3; ++ U64 reserved4; ++ U64 reserved5; ++ U64 reserved6; ++}; ++ ++// ++// Accessor macros for EVENT_REG node ++// Note: the flags field is not directly addressible to prevent hackery ++// ++#define EVENT_REG_reg_type(x, i) ((x)[(i)].reg_type) ++#define EVENT_REG_event_id_index(x, i) ((x)[(i)].event_id_index) ++#define EVENT_REG_unit_id(x, i) ((x)[(i)].unit_id) ++#define EVENT_REG_counter_event_offset(x, i) ((x)[(i)].counter_event_offset) ++#define EVENT_REG_reg_id(x, i) ((x)[(i)].event_reg_id.reg_id) ++#define EVENT_REG_bus_no(x, i) ((x)[(i)].event_reg_id.pci_bus_no) ++#define EVENT_REG_dev_no(x, i) ((x)[(i)].event_reg_id.pci_dev_no) ++#define EVENT_REG_func_no(x, i) ((x)[(i)].event_reg_id.pci_func_no) ++#define EVENT_REG_offset(x, i) \ ++ ((x)[(i)].event_reg_id.reg_id) // points to the reg_id ++#define EVENT_REG_data_size(x, i) ((x)[(i)].event_reg_id.data_size) ++#define EVENT_REG_desc_id(x, i) ((x)[(i)].desc_id) ++#define EVENT_REG_flags(x, i) ((x)[(i)].flags) ++#define EVENT_REG_reg_value(x, i) ((x)[(i)].reg_value) ++#define EVENT_REG_max_bits(x, i) ((x)[(i)].max_bits) ++#define EVENT_REG_scheduled(x, i) ((x)[(i)].scheduled) ++#define EVENT_REG_secondary_pci_offset_shift(x, i) \ ++ ((x)[(i)].secondary_pci_offset_shift) ++#define EVENT_REG_secondary_pci_offset_offset(x, i) \ ++ ((x)[(i)].secondary_pci_offset_offset) ++#define EVENT_REG_secondary_pci_offset_mask(x, i) \ ++ ((x)[(i)].secondary_pci_offset_mask) ++ ++#define EVENT_REG_counter_type(x, i) ((x)[(i)].counter_type) ++#define EVENT_REG_event_scope(x, i) ((x)[(i)].event_scope) ++#define EVENT_REG_reg_prog_type(x, i) ((x)[(i)].reg_prog_type) ++#define EVENT_REG_reg_rw_type(x, i) ((x)[(i)].reg_rw_type) ++#define EVENT_REG_reg_order(x, i) ((x)[(i)].reg_order) ++#define EVENT_REG_bit_position(x, i) ((x)[(i)].bit_position) ++ ++#define EVENT_REG_core_event_id(x, i) ((x)[(i)].core_event_id) ++#define EVENT_REG_uncore_buffer_offset_in_package(x, i) \ ++ ((x)[(i)].uncore_buffer_offset_in_package) ++#define EVENT_REG_uncore_buffer_offset_in_system(x, i) \ ++ ((x)[(i)].uncore_buffer_offset_in_system) ++ ++// ++// Config bits ++// ++#define EVENT_REG_precise_bit 0x00000001 ++#define EVENT_REG_global_bit 0x00000002 ++#define EVENT_REG_uncore_bit 0x00000004 ++#define EVENT_REG_uncore_q_rst_bit 0x00000008 ++#define EVENT_REG_latency_bit 0x00000010 ++#define EVENT_REG_is_gp_reg_bit 0x00000020 ++#define EVENT_REG_clean_up_bit 0x00000040 ++#define EVENT_REG_em_trigger_bit 0x00000080 ++#define EVENT_REG_lbr_value_bit 0x00000100 ++#define EVENT_REG_fixed_reg_bit 0x00000200 ++#define EVENT_REG_multi_pkg_evt_bit 0x00001000 ++#define EVENT_REG_branch_evt_bit 0x00002000 ++ ++// ++// Accessor macros for config bits ++// ++#define EVENT_REG_precise_get(x, i) ((x)[(i)].flags & EVENT_REG_precise_bit) ++#define EVENT_REG_precise_set(x, i) ((x)[(i)].flags |= EVENT_REG_precise_bit) ++#define EVENT_REG_precise_clear(x, i) ((x)[(i)].flags &= ~EVENT_REG_precise_bit) ++ ++#define EVENT_REG_global_get(x, i) ((x)[(i)].flags & EVENT_REG_global_bit) ++#define EVENT_REG_global_set(x, i) ((x)[(i)].flags |= EVENT_REG_global_bit) ++#define EVENT_REG_global_clear(x, i) ((x)[(i)].flags &= ~EVENT_REG_global_bit) ++ ++#define EVENT_REG_uncore_get(x, i) ((x)[(i)].flags & EVENT_REG_uncore_bit) ++#define EVENT_REG_uncore_set(x, i) ((x)[(i)].flags |= EVENT_REG_uncore_bit) ++#define EVENT_REG_uncore_clear(x, i) ((x)[(i)].flags &= ~EVENT_REG_uncore_bit) ++ ++#define EVENT_REG_uncore_q_rst_get(x, i) \ ++ ((x)[(i)].flags & EVENT_REG_uncore_q_rst_bit) ++#define EVENT_REG_uncore_q_rst_set(x, i) \ ++ ((x)[(i)].flags |= EVENT_REG_uncore_q_rst_bit) ++#define EVENT_REG_uncore_q_rst_clear(x, i) \ ++ ((x)[(i)].flags &= ~EVENT_REG_uncore_q_rst_bit) ++ ++#define EVENT_REG_latency_get(x, i) ((x)[(i)].flags & EVENT_REG_latency_bit) ++#define EVENT_REG_latency_set(x, i) ((x)[(i)].flags |= EVENT_REG_latency_bit) ++#define EVENT_REG_latency_clear(x, i) ((x)[(i)].flags &= ~EVENT_REG_latency_bit) ++ ++#define EVENT_REG_is_gp_reg_get(x, i) ((x)[(i)].flags & EVENT_REG_is_gp_reg_bit) ++#define EVENT_REG_is_gp_reg_set(x, i) \ ++ ((x)[(i)].flags |= EVENT_REG_is_gp_reg_bit) ++#define EVENT_REG_is_gp_reg_clear(x, i) \ ++ ((x)[(i)].flags &= ~EVENT_REG_is_gp_reg_bit) ++ ++#define EVENT_REG_lbr_value_get(x, i) ((x)[(i)].flags & EVENT_REG_lbr_value_bit) ++#define EVENT_REG_lbr_value_set(x, i) \ ++ ((x)[(i)].flags |= EVENT_REG_lbr_value_bit) ++#define EVENT_REG_lbr_value_clear(x, i) \ ++ ((x)[(i)].flags &= ~EVENT_REG_lbr_value_bit) ++ ++#define EVENT_REG_fixed_reg_get(x, i) ((x)[(i)].flags & EVENT_REG_fixed_reg_bit) ++#define EVENT_REG_fixed_reg_set(x, i) \ ++ ((x)[(i)].flags |= EVENT_REG_fixed_reg_bit) ++#define EVENT_REG_fixed_reg_clear(x, i) \ ++ ((x)[(i)].flags &= ~EVENT_REG_fixed_reg_bit) ++ ++#define EVENT_REG_multi_pkg_evt_bit_get(x, i) \ ++ ((x)[(i)].flags & EVENT_REG_multi_pkg_evt_bit) ++#define EVENT_REG_multi_pkg_evt_bit_set(x, i) \ ++ ((x)[(i)].flags |= EVENT_REG_multi_pkg_evt_bit) ++#define EVENT_REG_multi_pkg_evt_bit_clear(x, i) \ ++ ((x)[(i)].flags &= ~EVENT_REG_multi_pkg_evt_bit) ++ ++#define EVENT_REG_clean_up_get(x, i) ((x)[(i)].flags & EVENT_REG_clean_up_bit) ++#define EVENT_REG_clean_up_set(x, i) ((x)[(i)].flags |= EVENT_REG_clean_up_bit) ++#define EVENT_REG_clean_up_clear(x, i) \ ++ ((x)[(i)].flags &= ~EVENT_REG_clean_up_bit) ++ ++#define EVENT_REG_em_trigger_get(x, i) \ ++ ((x)[(i)].flags & EVENT_REG_em_trigger_bit) ++#define EVENT_REG_em_trigger_set(x, i) \ ++ ((x)[(i)].flags |= EVENT_REG_em_trigger_bit) ++#define EVENT_REG_em_trigger_clear(x, i) \ ++ ((x)[(i)].flags &= ~EVENT_REG_em_trigger_bit) ++ ++#define EVENT_REG_branch_evt_get(x, i) \ ++ ((x)[(i)].flags & EVENT_REG_branch_evt_bit) ++#define EVENT_REG_branch_evt_set(x, i) \ ++ ((x)[(i)].flags |= EVENT_REG_branch_evt_bit) ++#define EVENT_REG_branch_evt_clear(x, i) \ ++ ((x)[(i)].flags &= ~EVENT_REG_branch_evt_bit) ++ ++// *************************************************************************** ++ ++/*!\struct DRV_PCI_DEVICE_ENTRY_NODE_S ++ * \var bus_no - PCI bus no to read ++ * \var dev_no - PCI device no to read ++ * \var func_no PCI device no to read ++ * \var bar_offset BASE Address Register offset of the PCI based PMU ++ * \var bit_offset Bit offset of the same ++ * \var size size of read/write ++ * \var bar_address the actual BAR present ++ * \var enable_offset Offset info to enable/disable ++ * \var enabled Status of enable/disable ++ * \brief Data structure to describe the PCI Device ++ * ++ */ ++ ++typedef struct DRV_PCI_DEVICE_ENTRY_NODE_S DRV_PCI_DEVICE_ENTRY_NODE; ++typedef DRV_PCI_DEVICE_ENTRY_NODE * DRV_PCI_DEVICE_ENTRY; ++ ++struct DRV_PCI_DEVICE_ENTRY_NODE_S { ++ U32 bus_no; ++ U32 dev_no; ++ U32 func_no; ++ U32 bar_offset; ++ U64 bar_mask; ++ U32 bit_offset; ++ U32 size; ++ U64 bar_address; ++ U32 enable_offset; ++ U32 enabled; ++ U32 base_offset_for_mmio; ++ U32 operation; ++ U32 bar_name; ++ U32 prog_type; ++ U32 config_type; ++ S8 bar_shift; // positive shifts right, negative shifts left ++ U8 reserved0; ++ U16 reserved1; ++ U64 value; ++ U64 mask; ++ U64 virtual_address; ++ U32 port_id; ++ U32 op_code; ++ U32 device_id; ++ U16 bar_num; ++ U16 feature_id; ++ U64 reserved2; ++ U64 reserved3; ++ U64 reserved4; ++}; ++ ++// ++// Accessor macros for DRV_PCI_DEVICE_NODE node ++// ++#define DRV_PCI_DEVICE_ENTRY_bus_no(x) ((x)->bus_no) ++#define DRV_PCI_DEVICE_ENTRY_dev_no(x) ((x)->dev_no) ++#define DRV_PCI_DEVICE_ENTRY_func_no(x) ((x)->func_no) ++#define DRV_PCI_DEVICE_ENTRY_bar_offset(x) ((x)->bar_offset) ++#define DRV_PCI_DEVICE_ENTRY_bar_mask(x) ((x)->bar_mask) ++#define DRV_PCI_DEVICE_ENTRY_bit_offset(x) ((x)->bit_offset) ++#define DRV_PCI_DEVICE_ENTRY_size(x) ((x)->size) ++#define DRV_PCI_DEVICE_ENTRY_bar_address(x) ((x)->bar_address) ++#define DRV_PCI_DEVICE_ENTRY_enable_offset(x) ((x)->enable_offset) ++#define DRV_PCI_DEVICE_ENTRY_enable(x) ((x)->enabled) ++#define DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio(x) ((x)->base_offset_for_mmio) ++#define DRV_PCI_DEVICE_ENTRY_operation(x) ((x)->operation) ++#define DRV_PCI_DEVICE_ENTRY_bar_name(x) ((x)->bar_name) ++#define DRV_PCI_DEVICE_ENTRY_prog_type(x) ((x)->prog_type) ++#define DRV_PCI_DEVICE_ENTRY_config_type(x) ((x)->config_type) ++#define DRV_PCI_DEVICE_ENTRY_bar_shift(x) ((x)->bar_shift) ++#define DRV_PCI_DEVICE_ENTRY_value(x) ((x)->value) ++#define DRV_PCI_DEVICE_ENTRY_mask(x) ((x)->mask) ++#define DRV_PCI_DEVICE_ENTRY_virtual_address(x) ((x)->virtual_address) ++#define DRV_PCI_DEVICE_ENTRY_port_id(x) ((x)->port_id) ++#define DRV_PCI_DEVICE_ENTRY_op_code(x) ((x)->op_code) ++#define DRV_PCI_DEVICE_ENTRY_device_id(x) ((x)->device_id) ++#define DRV_PCI_DEVICE_ENTRY_bar_num(x) ((x)->bar_num) ++#define DRV_PCI_DEVICE_ENTRY_feature_id(x) ((x)->feature_id) ++ ++// *************************************************************************** ++typedef enum { ++ PMU_OPERATION_INITIALIZE = 0, ++ PMU_OPERATION_WRITE, ++ PMU_OPERATION_ENABLE, ++ PMU_OPERATION_DISABLE, ++ PMU_OPERATION_READ, ++ PMU_OPERATION_CLEANUP, ++ PMU_OPERATION_READ_LBRS, ++ PMU_OPERATION_GLOBAL_REGS, ++ PMU_OPERATION_CTRL_GP, ++ PMU_OPERATION_DATA_FIXED, ++ PMU_OPERATION_DATA_GP, ++ PMU_OPERATION_OCR, ++ PMU_OPERATION_HW_ERRATA, ++ PMU_OPERATION_CHECK_OVERFLOW_GP_ERRATA, ++ PMU_OPERATION_CHECK_OVERFLOW_ERRATA, ++ PMU_OPERATION_ALL_REG, ++ PMU_OPERATION_DATA_ALL, ++ PMU_OPERATION_GLOBAL_STATUS, ++ PMU_OPERATION_METRICS, ++} PMU_OPERATION_TYPES; ++#define MAX_OPERATION_TYPES 32 ++ ++/*!\struct PMU_OPERATIONS_NODE ++ * \var operation_type - Type of operation from enumeration PMU_OPERATION_TYPES ++ * \var register_start - Start index of the registers for a specific operation ++ * \var register_len - Number of registers for a specific operation ++ * ++ * \brief ++ * Structure for defining start and end indices in the ECB entries array for ++ * each type of operation performed in the driver ++ * initialize, write, read, enable, disable, etc. ++ */ ++typedef struct PMU_OPERATIONS_NODE_S PMU_OPERATIONS_NODE; ++typedef PMU_OPERATIONS_NODE * PMU_OPERATIONS; ++struct PMU_OPERATIONS_NODE_S { ++ U32 operation_type; ++ U32 register_start; ++ U32 register_len; ++ U32 reserved1; ++ U32 reserved2; ++ U32 reserved3; ++}; ++#define PMU_OPERATIONS_operation_type(x) ((x)->operation_type) ++#define PMU_OPERATIONS_register_start(x) ((x)->register_start) ++#define PMU_OPERATIONS_register_len(x) ((x)->register_len) ++#define PMU_OPER_operation_type(x, i) ((x)[(i)].operation_type) ++#define PMU_OPER_register_start(x, i) ((x)[(i)].register_start) ++#define PMU_OPER_register_len(x, i) ((x)[(i)].register_len) ++ ++typedef enum { ++ ECB_MMIO_BAR1 = 1, ++ ECB_MMIO_BAR2 = 2, ++ ECB_MMIO_BAR3 = 3, ++ ECB_MMIO_BAR4 = 4, ++ ECB_MMIO_BAR5 = 5, ++ ECB_MMIO_BAR6 = 6, ++ ECB_MMIO_BAR7 = 7, ++ ECB_MMIO_BAR8 = 8, ++} MMIO_INDEX_LIST; ++#define MAX_MMIO_BARS 8 ++ ++/*!\struct MMIO_BAR_INFO_NODE ++ */ ++typedef struct MMIO_BAR_INFO_NODE_S MMIO_BAR_INFO_NODE; ++typedef MMIO_BAR_INFO_NODE * MMIO_BAR_INFO; ++ ++struct MMIO_BAR_INFO_NODE_S { ++ U32 bus_no; ++ U32 dev_no; ++ U32 func_no; ++ U32 offset; ++ U32 addr_size; ++ U32 map_size; ++ S8 bar_shift; ++ U8 reserved1; ++ U16 reserved2; ++ U32 reserved3; ++ U32 reserved4; ++ U32 reserved5; ++ U64 bar_mask; ++ U64 base_mmio_offset; ++ U64 physical_address; ++ U64 virtual_address; ++ U64 reserved6; ++ U64 reserved7; ++}; ++ ++/*!\struct ECB_NODE_S ++ * \var num_entries - Total number of entries in "entries". ++ * \var group_id - Group ID. ++ * \var num_events - Number of events in this group. ++ * \var cccr_start - Starting index of counter configuration control ++ * registers in "entries". ++ * \var cccr_pop - Number of counter configuration control ++ * registers in "entries". ++ * \var escr_start - Starting index of event selection control ++ * registers in "entries". ++ * \var escr_pop - Number of event selection control registers ++ * in "entries". ++ * \var data_start - Starting index of data registers in "entries". ++ * \var data_pop - Number of data registers in "entries". ++ * \var pcidev_entry_node PCI device details for one device ++ * \var entries - . All the register nodes required for programming ++ * ++ * \brief ++ */ ++ ++typedef struct ECB_NODE_S ECB_NODE; ++typedef ECB_NODE * ECB; ++ ++struct ECB_NODE_S { ++ U8 version; ++ U8 reserved1; ++ U16 reserved2; ++ U32 num_entries; ++ U32 group_id; ++ U32 num_events; ++ U32 cccr_start; ++ U32 cccr_pop; ++ U32 escr_start; ++ U32 escr_pop; ++ U32 data_start; ++ U32 data_pop; ++ U16 flags; ++ U8 pmu_timer_interval; ++ U8 reserved3; ++ U32 size_of_allocation; ++ U32 group_offset; ++ U32 reserved4; ++ DRV_PCI_DEVICE_ENTRY_NODE pcidev_entry_node; ++ U32 num_pci_devices; ++ U32 pcidev_list_offset; ++ DRV_PCI_DEVICE_ENTRY pcidev_entry_list; ++ U32 device_type; ++ U32 dev_node; ++ PMU_OPERATIONS_NODE operations[MAX_OPERATION_TYPES]; ++ U32 descriptor_id; ++ U32 reserved5; ++ U32 metric_start; ++ U32 metric_pop; ++ MMIO_BAR_INFO_NODE mmio_bar_list[MAX_MMIO_BARS]; ++ U64 reserved6; ++ U64 reserved7; ++ U64 reserved8; ++ EVENT_REG_NODE entries[]; ++}; ++ ++// ++// Accessor macros for ECB node ++// ++#define ECB_version(x) ((x)->version) ++#define ECB_num_entries(x) ((x)->num_entries) ++#define ECB_group_id(x) ((x)->group_id) ++#define ECB_num_events(x) ((x)->num_events) ++#define ECB_cccr_start(x) ((x)->cccr_start) ++#define ECB_cccr_pop(x) ((x)->cccr_pop) ++#define ECB_escr_start(x) ((x)->escr_start) ++#define ECB_escr_pop(x) ((x)->escr_pop) ++#define ECB_data_start(x) ((x)->data_start) ++#define ECB_data_pop(x) ((x)->data_pop) ++#define ECB_metric_start(x) ((x)->metric_start) ++#define ECB_metric_pop(x) ((x)->metric_pop) ++#define ECB_pcidev_entry_node(x) ((x)->pcidev_entry_node) ++#define ECB_num_pci_devices(x) ((x)->num_pci_devices) ++#define ECB_pcidev_list_offset(x) ((x)->pcidev_list_offset) ++#define ECB_pcidev_entry_list(x) ((x)->pcidev_entry_list) ++#define ECB_flags(x) ((x)->flags) ++#define ECB_pmu_timer_interval(x) ((x)->pmu_timer_interval) ++#define ECB_size_of_allocation(x) ((x)->size_of_allocation) ++#define ECB_group_offset(x) ((x)->group_offset) ++#define ECB_device_type(x) ((x)->device_type) ++#define ECB_dev_node(x) ((x)->dev_node) ++#define ECB_operations(x) ((x)->operations) ++#define ECB_descriptor_id(x) ((x)->descriptor_id) ++#define ECB_entries(x) ((x)->entries) ++ ++// for flag bit field ++#define ECB_direct2core_bit 0x0001 ++#define ECB_bl_bypass_bit 0x0002 ++#define ECB_pci_id_offset_bit 0x0003 ++#define ECB_pcu_ccst_debug 0x0004 ++ ++#define ECB_VERSION 2 ++ ++#define ECB_CONSTRUCT(x, num_entries, group_id, cccr_start, escr_start, \ ++ data_start, size_of_allocation) \ ++ { \ ++ ECB_num_entries((x)) = (num_entries); \ ++ ECB_group_id((x)) = (group_id); \ ++ ECB_cccr_start((x)) = (cccr_start); \ ++ ECB_cccr_pop((x)) = 0; \ ++ ECB_escr_start((x)) = (escr_start); \ ++ ECB_escr_pop((x)) = 0; \ ++ ECB_data_start((x)) = (data_start); \ ++ ECB_data_pop((x)) = 0; \ ++ ECB_metric_start((x)) = 0; \ ++ ECB_metric_pop((x)) = 0; \ ++ ECB_num_pci_devices((x)) = 0; \ ++ ECB_version((x)) = ECB_VERSION; \ ++ ECB_size_of_allocation((x)) = (size_of_allocation); \ ++ } ++ ++#define ECB_CONSTRUCT2(x, num_entries, group_id, size_of_allocation) \ ++ { \ ++ ECB_num_entries((x)) = (num_entries); \ ++ ECB_group_id((x)) = (group_id); \ ++ ECB_num_pci_devices((x)) = 0; \ ++ ECB_version((x)) = ECB_VERSION; \ ++ ECB_size_of_allocation((x)) = (size_of_allocation); \ ++ } ++ ++#define ECB_CONSTRUCT1(x, num_entries, group_id, cccr_start, escr_start, \ ++ data_start, num_pci_devices, size_of_allocation) \ ++ { \ ++ ECB_num_entries((x)) = (num_entries); \ ++ ECB_group_id((x)) = (group_id); \ ++ ECB_cccr_start((x)) = (cccr_start); \ ++ ECB_cccr_pop((x)) = 0; \ ++ ECB_escr_start((x)) = (escr_start); \ ++ ECB_escr_pop((x)) = 0; \ ++ ECB_data_start((x)) = (data_start); \ ++ ECB_data_pop((x)) = 0; \ ++ ECB_metric_start((x)) = 0; \ ++ ECB_metric_pop((x)) = 0; \ ++ ECB_num_pci_devices((x)) = (num_pci_devices); \ ++ ECB_version((x)) = ECB_VERSION; \ ++ ECB_size_of_allocation((x)) = (size_of_allocation); \ ++ } ++ ++ ++// ++// Accessor macros for ECB node entries ++// ++#define ECB_entries_reg_type(x, i) EVENT_REG_reg_type((ECB_entries(x)), (i)) ++#define ECB_entries_event_id_index(x, i) \ ++ EVENT_REG_event_id_index((ECB_entries(x)), (i)) ++#define ECB_entries_unit_id(x, i) EVENT_REG_unit_id((ECB_entries(x)), (i)) ++#define ECB_entries_counter_event_offset(x, i) \ ++ EVENT_REG_counter_event_offset((ECB_entries(x)), (i)) ++#define ECB_entries_reg_id(x, i) EVENT_REG_reg_id((ECB_entries(x)), (i)) ++#define ECB_entries_reg_prog_type(x, i) \ ++ EVENT_REG_reg_prog_type((ECB_entries(x)), (i)) ++#define ECB_entries_reg_offset(x, i) EVENT_REG_offset((ECB_entries(x)), (i)) ++#define ECB_entries_reg_data_size(x, i) \ ++ EVENT_REG_data_size((ECB_entries(x)), (i)) ++#define ECB_entries_desc_id(x, i) EVENT_REG_desc_id((ECB_entries(x)), i) ++#define ECB_entries_flags(x, i) EVENT_REG_flags((ECB_entries(x)), i) ++#define ECB_entries_reg_order(x, i) EVENT_REG_reg_order((ECB_entries(x)), i) ++#define ECB_entries_reg_value(x, i) EVENT_REG_reg_value((ECB_entries(x)), (i)) ++#define ECB_entries_max_bits(x, i) EVENT_REG_max_bits((ECB_entries(x)), (i)) ++#define ECB_entries_scheduled(x, i) EVENT_REG_scheduled((ECB_entries(x)), (i)) ++#define ECB_entries_counter_event_offset(x, i) \ ++ EVENT_REG_counter_event_offset((ECB_entries(x)), (i)) ++#define ECB_entries_bit_position(x, i) \ ++ EVENT_REG_bit_position((ECB_entries(x)), (i)) ++// PCI config-specific fields ++#define ECB_entries_bus_no(x, i) EVENT_REG_bus_no((ECB_entries(x)), (i)) ++#define ECB_entries_dev_no(x, i) EVENT_REG_dev_no((ECB_entries(x)), (i)) ++#define ECB_entries_func_no(x, i) EVENT_REG_func_no((ECB_entries(x)), (i)) ++#define ECB_entries_counter_type(x, i) \ ++ EVENT_REG_counter_type((ECB_entries(x)), (i)) ++#define ECB_entries_event_scope(x, i) \ ++ EVENT_REG_event_scope((ECB_entries(x)), (i)) ++#define ECB_entries_precise_get(x, i) \ ++ EVENT_REG_precise_get((ECB_entries(x)), (i)) ++#define ECB_entries_global_get(x, i) EVENT_REG_global_get((ECB_entries(x)), (i)) ++#define ECB_entries_uncore_get(x, i) EVENT_REG_uncore_get((ECB_entries(x)), (i)) ++#define ECB_entries_uncore_q_rst_get(x, i) \ ++ EVENT_REG_uncore_q_rst_get((ECB_entries(x)), (i)) ++#define ECB_entries_is_gp_reg_get(x, i) \ ++ EVENT_REG_is_gp_reg_get((ECB_entries(x)), (i)) ++#define ECB_entries_lbr_value_get(x, i) \ ++ EVENT_REG_lbr_value_get((ECB_entries(x)), (i)) ++#define ECB_entries_fixed_reg_get(x, i) \ ++ EVENT_REG_fixed_reg_get((ECB_entries(x)), (i)) ++#define ECB_entries_is_multi_pkg_bit_set(x, i) \ ++ EVENT_REG_multi_pkg_evt_bit_get((ECB_entries(x)), (i)) ++#define ECB_entries_clean_up_get(x, i) \ ++ EVENT_REG_clean_up_get((ECB_entries(x)), (i)) ++#define ECB_entries_em_trigger_get(x, i) \ ++ EVENT_REG_em_trigger_get((ECB_entries(x)), (i)) ++#define ECB_entries_branch_evt_get(x, i) \ ++ EVENT_REG_branch_evt_get((ECB_entries(x)), (i)) ++#define ECB_entries_reg_rw_type(x, i) \ ++ EVENT_REG_reg_rw_type((ECB_entries(x)), (i)) ++#define ECB_entries_secondary_pci_offset_offset(x, i) \ ++ EVENT_REG_secondary_pci_offset_offset((ECB_entries(x)), (i)) ++#define ECB_entries_secondary_pci_offset_shift(x, i) \ ++ EVENT_REG_secondary_pci_offset_shift((ECB_entries(x)), (i)) ++#define ECB_entries_secondary_pci_offset_mask(x, i) \ ++ EVENT_REG_secondary_pci_offset_mask((ECB_entries(x)), (i)) ++#define ECB_operations_operation_type(x, i) \ ++ PMU_OPER_operation_type((ECB_operations(x)), (i)) ++#define ECB_operations_register_start(x, i) \ ++ PMU_OPER_register_start((ECB_operations(x)), (i)) ++#define ECB_operations_register_len(x, i) \ ++ PMU_OPER_register_len((ECB_operations(x)), (i)) ++ ++#define ECB_entries_core_event_id(x, i) \ ++ EVENT_REG_core_event_id((ECB_entries(x)), (i)) ++#define ECB_entries_uncore_buffer_offset_in_package(x, i) \ ++ EVENT_REG_uncore_buffer_offset_in_package((ECB_entries(x)), (i)) ++#define ECB_entries_uncore_buffer_offset_in_system(x, i) \ ++ EVENT_REG_uncore_buffer_offset_in_system((ECB_entries(x)), (i)) ++ ++#define ECB_SET_OPERATIONS(x, operation_type, start, len) \ ++ { \ ++ ECB_operations_operation_type(x, operation_type) \ ++ = operation_type; \ ++ ECB_operations_register_start(x, operation_type) = start; \ ++ ECB_operations_register_len(x, operation_type) = len; \ ++ } ++ ++ ++// *************************************************************************** ++ ++/*!\struct LBR_ENTRY_NODE_S ++ * \var etype TOS = 0; FROM = 1; TO = 2 ++ * \var type_index ++ * \var reg_id ++ */ ++ ++typedef struct LBR_ENTRY_NODE_S LBR_ENTRY_NODE; ++typedef LBR_ENTRY_NODE * LBR_ENTRY; ++ ++struct LBR_ENTRY_NODE_S { ++ U16 etype; ++ U16 type_index; ++ U32 reg_id; ++}; ++ ++// ++// Accessor macros for LBR entries ++// ++#define LBR_ENTRY_NODE_etype(lentry) ((lentry).etype) ++#define LBR_ENTRY_NODE_type_index(lentry) ((lentry).type_index) ++#define LBR_ENTRY_NODE_reg_id(lentry) ((lentry).reg_id) ++ ++// *************************************************************************** ++ ++/*!\struct LBR_NODE_S ++ * \var num_entries - The number of entries ++ * \var entries - The entries in the list ++ * ++ * \brief Data structure to describe the LBR registers that need to be read ++ * ++ */ ++ ++typedef struct LBR_NODE_S LBR_NODE; ++typedef LBR_NODE * LBR; ++ ++struct LBR_NODE_S { ++ U32 size; ++ U32 num_entries; ++ LBR_ENTRY_NODE entries[]; ++}; ++ ++// ++// Accessor macros for LBR node ++// ++#define LBR_size(lbr) ((lbr)->size) ++#define LBR_num_entries(lbr) ((lbr)->num_entries) ++#define LBR_entries_etype(lbr, idx) ((lbr)->entries[idx].etype) ++#define LBR_entries_type_index(lbr, idx) ((lbr)->entries[idx].type_index) ++#define LBR_entries_reg_id(lbr, idx) ((lbr)->entries[idx].reg_id) ++ ++// *************************************************************************** ++ ++/*!\struct PWR_ENTRY_NODE_S ++ * \var etype none as yet ++ * \var type_index ++ * \var reg_id ++ */ ++ ++typedef struct PWR_ENTRY_NODE_S PWR_ENTRY_NODE; ++typedef PWR_ENTRY_NODE * PWR_ENTRY; ++ ++struct PWR_ENTRY_NODE_S { ++ U16 etype; ++ U16 type_index; ++ U32 reg_id; ++}; ++ ++// ++// Accessor macros for PWR entries ++// ++#define PWR_ENTRY_NODE_etype(lentry) ((lentry).etype) ++#define PWR_ENTRY_NODE_type_index(lentry) ((lentry).type_index) ++#define PWR_ENTRY_NODE_reg_id(lentry) ((lentry).reg_id) ++ ++// *************************************************************************** ++ ++/*!\struct PWR_NODE_S ++ * \var num_entries - The number of entries ++ * \var entries - The entries in the list ++ * ++ * \brief Data structure to describe the PWR registers that need to be read ++ * ++ */ ++ ++typedef struct PWR_NODE_S PWR_NODE; ++typedef PWR_NODE * PWR; ++ ++struct PWR_NODE_S { ++ U32 size; ++ U32 num_entries; ++ PWR_ENTRY_NODE entries[]; ++}; ++ ++// ++// Accessor macros for PWR node ++// ++#define PWR_size(lbr) ((lbr)->size) ++#define PWR_num_entries(lbr) ((lbr)->num_entries) ++#define PWR_entries_etype(lbr, idx) ((lbr)->entries[idx].etype) ++#define PWR_entries_type_index(lbr, idx) ((lbr)->entries[idx].type_index) ++#define PWR_entries_reg_id(lbr, idx) ((lbr)->entries[idx].reg_id) ++ ++// *************************************************************************** ++ ++/*!\struct RO_ENTRY_NODE_S ++ * \var type - DEAR, IEAR, BTB. ++ */ ++ ++typedef struct RO_ENTRY_NODE_S RO_ENTRY_NODE; ++typedef RO_ENTRY_NODE * RO_ENTRY; ++ ++struct RO_ENTRY_NODE_S { ++ U32 reg_id; ++}; ++ ++// ++// Accessor macros for RO entries ++// ++#define RO_ENTRY_NODE_reg_id(lentry) ((lentry).reg_id) ++ ++// *************************************************************************** ++ ++/*!\struct RO_NODE_S ++ * \var size - The total size including header and entries. ++ * \var num_entries - The number of entries. ++ * \var entries - The entries in the list. ++ * ++ * \brief Data structure to describe the RO registers that need to be read. ++ * ++ */ ++ ++typedef struct RO_NODE_S RO_NODE; ++typedef RO_NODE * RO; ++ ++struct RO_NODE_S { ++ U32 size; ++ U32 num_entries; ++ RO_ENTRY_NODE entries[]; ++}; ++ ++// ++// Accessor macros for RO node ++// ++#define RO_size(ro) ((ro)->size) ++#define RO_num_entries(ro) ((ro)->num_entries) ++#define RO_entries_reg_id(ro, idx) ((ro)->entries[idx].reg_id) ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif +diff --git a/drivers/platform/x86/sepdk/include/lwpmudrv_gfx.h b/drivers/platform/x86/sepdk/include/lwpmudrv_gfx.h +new file mode 100644 +index 000000000000..fe6583e2c44c +--- /dev/null ++++ b/drivers/platform/x86/sepdk/include/lwpmudrv_gfx.h +@@ -0,0 +1,33 @@ ++/*** ++ * ------------------------------------------------------------------------- ++ * INTEL CORPORATION PROPRIETARY INFORMATION ++ * This software is supplied under the terms of the accompanying license ++ * agreement or nondisclosure agreement with Intel Corporation and may not ++ * be copied or disclosed except in accordance with the terms of that ++ * agreement. ++ * Copyright(C) 2011-2018 Intel Corporation. All Rights Reserved. ++ * ------------------------------------------------------------------------- ++ ***/ ++ ++#ifndef _LWPMUDRV_GFX_H_ ++#define _LWPMUDRV_GFX_H_ ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++#define GFX_BASE_ADDRESS 0xFF200000 ++#define GFX_BASE_NEW_OFFSET 0x00080000 ++#define GFX_PERF_REG 0x040 // location of GFX counter relative to base ++#define GFX_NUM_COUNTERS 9 // max number of GFX counters per counter group ++#define GFX_CTR_OVF_VAL 0xFFFFFFFF // overflow value for GFX counters ++ ++#define GFX_REG_CTR_CTRL 0x01FF ++#define GFX_CTRL_DISABLE 0x1E00 ++ ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif +diff --git a/drivers/platform/x86/sepdk/include/lwpmudrv_ioctl.h b/drivers/platform/x86/sepdk/include/lwpmudrv_ioctl.h +new file mode 100644 +index 000000000000..a8d32466a4bd +--- /dev/null ++++ b/drivers/platform/x86/sepdk/include/lwpmudrv_ioctl.h +@@ -0,0 +1,284 @@ ++/**** ++ * ------------------------------------------------------------------------- ++ * INTEL CORPORATION PROPRIETARY INFORMATION ++ * This software is supplied under the terms of the accompanying license ++ * agreement or nondisclosure agreement with Intel Corporation and may not ++ * be copied or disclosed except in accordance with the terms of that ++ * agreement. ++ * Copyright(C) 2007-2018 Intel Corporation. All Rights Reserved. ++ * ------------------------------------------------------------------------- ++ ****/ ++ ++#ifndef _LWPMUDRV_IOCTL_H_ ++#define _LWPMUDRV_IOCTL_H_ ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++//SEP Driver Operation defines ++/* ++ "NOTE THAT the definition must be identical across all OSes" ++ "DO NOT add any OS specific compile flag" ++*/ ++#define DRV_OPERATION_START 1 ++#define DRV_OPERATION_STOP 2 ++#define DRV_OPERATION_INIT_PMU 3 ++#define DRV_OPERATION_INIT 4 ++#define DRV_OPERATION_EM_GROUPS 5 ++#define DRV_OPERATION_SET_CPU_MASK 17 ++#define DRV_OPERATION_PCI_READ 18 ++#define DRV_OPERATION_PCI_WRITE 19 ++#define DRV_OPERATION_READ_PCI_CONFIG 20 ++#define DRV_OPERATION_FD_PHYS 21 ++#define DRV_OPERATION_WRITE_PCI_CONFIG 22 ++#define DRV_OPERATION_INSERT_MARKER 23 ++#define DRV_OPERATION_GET_NORMALIZED_TSC 24 ++#define DRV_OPERATION_EM_CONFIG_NEXT 25 ++#define DRV_OPERATION_SYS_CONFIG 26 ++#define DRV_OPERATION_TSC_SKEW_INFO 27 ++#define DRV_OPERATION_NUM_CORES 28 ++#define DRV_OPERATION_COLLECT_SYS_CONFIG 29 ++#define DRV_OPERATION_GET_SYS_CONFIG 30 ++#define DRV_OPERATION_PAUSE 31 ++#define DRV_OPERATION_RESUME 32 ++#define DRV_OPERATION_SET_ASYNC_EVENT 33 ++#define DRV_OPERATION_ASYNC_STOP 34 ++#define DRV_OPERATION_TERMINATE 35 ++#define DRV_OPERATION_READ_MSRS 36 ++#define DRV_OPERATION_LBR_INFO 37 ++#define DRV_OPERATION_RESERVE 38 ++#define DRV_OPERATION_MARK 39 ++#define DRV_OPERATION_AWAIT_STOP 40 ++#define DRV_OPERATION_SEED_NAME 41 ++#define DRV_OPERATION_KERNEL_CS 42 ++#define DRV_OPERATION_SET_UID 43 ++#define DRV_OPERATION_VERSION 51 ++#define DRV_OPERATION_CHIPSET_INIT 52 ++#define DRV_OPERATION_GET_CHIPSET_DEVICE_ID 53 ++#define DRV_OPERATION_SWITCH_GROUP 54 ++#define DRV_OPERATION_GET_NUM_CORE_CTRS 55 ++#define DRV_OPERATION_PWR_INFO 56 ++#define DRV_OPERATION_NUM_DESCRIPTOR 57 ++#define DRV_OPERATION_DESC_NEXT 58 ++#define DRV_OPERATION_MARK_OFF 59 ++#define DRV_OPERATION_CREATE_MARKER 60 ++#define DRV_OPERATION_GET_DRIVER_STATE 61 ++#define DRV_OPERATION_READ_SWITCH_GROUP 62 ++#define DRV_OPERATION_EM_GROUPS_UNC 63 ++#define DRV_OPERATION_EM_CONFIG_NEXT_UNC 64 ++#define DRV_OPERATION_INIT_UNC 65 ++#define DRV_OPERATION_RO_INFO 66 ++#define DRV_OPERATION_READ_MSR 67 ++#define DRV_OPERATION_WRITE_MSR 68 ++#define DRV_OPERATION_THREAD_SET_NAME 69 ++#define DRV_OPERATION_GET_PLATFORM_INFO 70 ++#define DRV_OPERATION_GET_NORMALIZED_TSC_STANDALONE 71 ++#define DRV_OPERATION_READ_AND_RESET 72 ++#define DRV_OPERATION_SET_CPU_TOPOLOGY 73 ++#define DRV_OPERATION_INIT_NUM_DEV 74 ++#define DRV_OPERATION_SET_GFX_EVENT 75 ++#define DRV_OPERATION_GET_NUM_SAMPLES 76 ++#define DRV_OPERATION_SET_PWR_EVENT 77 ++#define DRV_OPERATION_SET_DEVICE_NUM_UNITS 78 ++#define DRV_OPERATION_TIMER_TRIGGER_READ 79 ++#define DRV_OPERATION_GET_INTERVAL_COUNTS 80 ++#define DRV_OPERATION_FLUSH 81 ++#define DRV_OPERATION_SET_SCAN_UNCORE_TOPOLOGY_INFO 82 ++#define DRV_OPERATION_GET_UNCORE_TOPOLOGY 83 ++#define DRV_OPERATION_GET_MARKER_ID 84 ++#define DRV_OPERATION_GET_SAMPLE_DROP_INFO 85 ++#define DRV_OPERATION_GET_DRV_SETUP_INFO 86 ++#define DRV_OPERATION_GET_PLATFORM_TOPOLOGY 87 ++#define DRV_OPERATION_GET_THREAD_COUNT 88 ++#define DRV_OPERATION_GET_THREAD_INFO 89 ++#define DRV_OPERATION_GET_DRIVER_LOG 90 ++#define DRV_OPERATION_CONTROL_DRIVER_LOG 91 ++#define DRV_OPERATION_SET_OSID 92 ++#define DRV_OPERATION_GET_AGENT_MODE 93 ++#define DRV_OPERATION_INIT_DRIVER 94 ++#define DRV_OPERATION_SET_EMON_BUFFER_DRIVER_HELPER 95 ++// Only used by MAC OS ++#define DRV_OPERATION_GET_ASLR_OFFSET 997 // this may not need ++#define DRV_OPERATION_SET_OSX_VERSION 998 ++#define DRV_OPERATION_PROVIDE_FUNCTION_PTRS 999 ++ ++// IOCTL_SETUP ++ ++// IOCTL_ARGS ++typedef struct IOCTL_ARGS_NODE_S IOCTL_ARGS_NODE; ++typedef IOCTL_ARGS_NODE * IOCTL_ARGS; ++ ++#if defined(DRV_EM64T) ++struct IOCTL_ARGS_NODE_S { ++ U64 len_drv_to_usr; ++ // buffer send from driver(target) to user(host), stands for read buffer ++ char *buf_drv_to_usr; ++ // length of the driver(target) to user(host) buffer ++ U64 len_usr_to_drv; ++ // buffer send from user(host) to driver(target) stands for write buffer ++ char *buf_usr_to_drv; // length of user(host) to driver(target) buffer ++ U32 command; ++}; ++#endif ++#if defined(DRV_IA32) ++struct IOCTL_ARGS_NODE_S { ++ U64 len_drv_to_usr; ++ // buffer send from driver(target) to user(host),stands for read buffer ++ char *buf_drv_to_usr; // length of driver(target) to user(host) buffer ++ char *reserved1; ++ U64 len_usr_to_drv; ++ // send from user(host) to driver(target),stands for write buffer ++ char *buf_usr_to_drv; // length of user(host) to driver(target) buffer ++ char *reserved2; ++ U32 command; ++}; ++#endif ++ ++#if defined(DRV_OS_WINDOWS) ++ ++// ++// NtDeviceIoControlFile IoControlCode values for this device. ++// ++// Warning: Remember that the low two bits of the code specify how the ++// buffers are passed to the driver! ++// ++// 16 bit device type. 12 bit function codes ++#define LWPMUDRV_IOCTL_DEVICE_TYPE 0xA000 ++// values 0-32768 reserved for Microsoft ++#define LWPMUDRV_IOCTL_FUNCTION 0x0A00 // values 0-2047 reserved for Microsoft ++ ++// ++// Basic CTL CODE macro to reduce typographical errors ++// Use for FILE_READ_ACCESS ++// ++#define LWPMUDRV_CTL_READ_CODE(x) \ ++ CTL_CODE(LWPMUDRV_IOCTL_DEVICE_TYPE, LWPMUDRV_IOCTL_FUNCTION + (x), \ ++ METHOD_BUFFERED, FILE_READ_ACCESS) ++ ++/* Refernece https://docs.microsoft.com/en-us/windows-hardware/drivers/kernel/defining-i-o-control-codes ++ CTL_CODE (DeviceType, Function, Method, Access) generates 32 bit code ++ ------------------------------------------------- ---------------- ++ | 31 | 30 ... 16 | 15 14 | 13 | 12 ... 2 | 1 0 | ++ ------------------------------------------------------------------- ++ | common | device | req access | custom | func code | transfer | ++ | | type | | | | type | ++ ------------------------------------------------------------------- ++*/ ++#define LWPMUDRV_DEVICE_TYPE(x) ((x & 0xFFFF0000) >> 16) ++#define LWPMUDRV_METHOD(x) (x & 3) ++#define LWPMUDRV_FUNCTION(x) (((x >> 2) & 0x00000FFF) - 0x0A00) ++ ++#define LWPMUDRV_IOCTL_CODE(x) LWPMUDRV_CTL_READ_CODE(x) ++ ++#elif defined(SEP_ESX) ++ ++typedef struct CPU_ARGS_NODE_S CPU_ARGS_NODE; ++typedef CPU_ARGS_NODE * CPU_ARGS; ++struct CPU_ARGS_NODE_S { ++ U64 len_drv_to_usr; ++ char *buf_drv_to_usr; ++ U32 command; ++ U32 CPU_ID; ++ U32 BUCKET_ID; ++}; ++ ++// IOCTL_SETUP ++#define LWPMU_IOC_MAGIC 99 ++#define OS_SUCCESS 0 ++#define OS_STATUS int ++//#define OS_ILLEGAL_IOCTL -ENOTTY ++//#define OS_NO_MEM -ENOMEM ++//#define OS_FAULT -EFAULT ++ ++#define LWPMUDRV_IOCTL_IO(x) (x) ++#define LWPMUDRV_IOCTL_IOR(x) (x) ++#define LWPMUDRV_IOCTL_IOW(x) (x) ++#define LWPMUDRV_IOCTL_IORW(x) (x) ++ ++#elif defined(DRV_OS_LINUX) || defined(DRV_OS_SOLARIS) || \ ++ defined(DRV_OS_ANDROID) ++// IOCTL_ARGS ++ ++// COMPAT IOCTL_ARGS ++#if defined(CONFIG_COMPAT) && defined(DRV_EM64T) ++typedef struct IOCTL_COMPAT_ARGS_NODE_S IOCTL_COMPAT_ARGS_NODE; ++typedef IOCTL_COMPAT_ARGS_NODE * IOCTL_COMPAT_ARGS; ++struct IOCTL_COMPAT_ARGS_NODE_S { ++ U64 len_drv_to_usr; ++ compat_uptr_t buf_drv_to_usr; ++ U64 len_usr_to_drv; ++ compat_uptr_t buf_usr_to_drv; ++}; ++#endif ++ ++// COMPAT IOCTL_SETUP ++// ++#define LWPMU_IOC_MAGIC 99 ++ ++#if defined(CONFIG_COMPAT) && defined(DRV_EM64T) ++#define LWPMUDRV_IOCTL_IO(x) _IO(LWPMU_IOC_MAGIC, (x)) ++#define LWPMUDRV_IOCTL_IOR(x) _IOR(LWPMU_IOC_MAGIC, (x), compat_uptr_t) ++#define LWPMUDRV_IOCTL_IOW(x) _IOW(LWPMU_IOC_MAGIC, (x), compat_uptr_t) ++#define LWPMUDRV_IOCTL_IORW(x) _IOW(LWPMU_IOC_MAGIC, (x), compat_uptr_t) ++#else ++#define LWPMUDRV_IOCTL_IO(x) _IO(LWPMU_IOC_MAGIC, (x)) ++#define LWPMUDRV_IOCTL_IOR(x) _IOR(LWPMU_IOC_MAGIC, (x), IOCTL_ARGS) ++#define LWPMUDRV_IOCTL_IOW(x) _IOW(LWPMU_IOC_MAGIC, (x), IOCTL_ARGS) ++#define LWPMUDRV_IOCTL_IORW(x) _IOW(LWPMU_IOC_MAGIC, (x), IOCTL_ARGS) ++#endif ++ ++#elif defined(DRV_OS_FREEBSD) ++ ++// IOCTL_SETUP ++// ++#define LWPMU_IOC_MAGIC 99 ++ ++/* FreeBSD is very strict about IOR/IOW/IOWR specifications on IOCTLs. ++ * Since these IOCTLs all pass down the real read/write buffer lengths ++ * and addresses inside of an IOCTL_ARGS_NODE data structure, we ++ * need to specify all of these as _IOW so that the kernel will ++ * view it as userspace passing the data to the driver, rather than ++ * the reverse. There are also some cases where Linux is passing ++ * a smaller type than IOCTL_ARGS_NODE, even though its really ++ * passing an IOCTL_ARGS_NODE. These needed to be fixed for FreeBSD. ++ */ ++#define LWPMUDRV_IOCTL_IO(x) _IO(LWPMU_IOC_MAGIC, (x)) ++#define LWPMUDRV_IOCTL_IOR(x) _IOW(LWPMU_IOC_MAGIC, (x), IOCTL_ARGS_NODE) ++#define LWPMUDRV_IOCTL_IOW(x) _IOW(LWPMU_IOC_MAGIC, (x), IOCTL_ARGS_NODE) ++#define LWPMUDRV_IOCTL_IORW(x) _IOW(LWPMU_IOC_MAGIC, (x), IOCTL_ARGS_NODE) ++ ++#elif defined(DRV_OS_MAC) ++ ++typedef struct CPU_ARGS_NODE_S CPU_ARGS_NODE; ++typedef CPU_ARGS_NODE * CPU_ARGS; ++struct CPU_ARGS_NODE_S { ++ U64 len_drv_to_usr; ++ char *buf_drv_to_usr; ++ U32 command; ++ U32 CPU_ID; ++ U32 BUCKET_ID; ++}; ++ ++// IOCTL_SETUP ++// ++#define LWPMU_IOC_MAGIC 99 ++#define OS_SUCCESS 0 ++#define OS_STATUS int ++#define OS_ILLEGAL_IOCTL -ENOTTY ++#define OS_NO_MEM -ENOMEM ++#define OS_FAULT -EFAULT ++ ++// Task file Opcodes. ++// keeping the definitions as IOCTL but in MAC OSX ++// these are really OpCodes consumed by Execute command. ++ ++#else ++#error "unknown OS in lwpmudrv_ioctl.h" ++#endif ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif +diff --git a/drivers/platform/x86/sepdk/include/lwpmudrv_pwr.h b/drivers/platform/x86/sepdk/include/lwpmudrv_pwr.h +new file mode 100644 +index 000000000000..e26a478a9bb1 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/include/lwpmudrv_pwr.h +@@ -0,0 +1,100 @@ ++/**** ++ * ------------------------------------------------------------------------- ++ * INTEL CORPORATION PROPRIETARY INFORMATION ++ * This software is supplied under the terms of the accompanying license ++ * agreement or nondisclosure agreement with Intel Corporation and may not ++ * be copied or disclosed except in accordance with the terms of that ++ * agreement. ++ * Copyright(C) 2011-2018 Intel Corporation. All Rights Reserved. ++ * ------------------------------------------------------------------------- ++****/ ++ ++#ifndef _LWPMUDRV_PWR_H_ ++#define _LWPMUDRV_PWR_H_ ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++#define MAX_EVENT_NAME_LEN 512 ++#define MAX_EVENT_DESC_LEN 1024 ++ ++// Power event groups ++enum PWR_EVENT_GROUPS { ++ IO_DEV_STATES = 1, ++ MMIO_DEV_STATES, ++ MMIO_SYS_STATES, ++ MMIO_IPC_DEV_RES, ++ MMIO_IPC_SYS_RES ++}; ++ ++typedef struct PWR_EVENT_INFO_NODE_S PWR_EVENT_INFO_NODE; ++typedef PWR_EVENT_INFO_NODE * PWR_EVENT_INFO; ++ ++struct PWR_EVENT_INFO_NODE_S { ++ U32 event_id; ++ U32 group_id; ++ char name[MAX_EVENT_NAME_LEN]; ++ char desc[MAX_EVENT_DESC_LEN]; ++ U32 io_baseaddr1; ++ U32 io_range1; ++ U32 io_baseaddr2; ++ U32 io_range2; ++ U32 offset; ++ U32 virtual_address; ++}; ++ ++#define PWR_EVENT_INFO_event_id(pwr_event) ((pwr_event)->event_id) ++#define PWR_EVENT_INFO_group_id(pwr_event) ((pwr_event)->group_id) ++#define PWR_EVENT_INFO_name(pwr_event) ((pwr_event)->name) ++#define PWR_EVENT_INFO_desc(pwr_event) ((pwr_event)->desc) ++#define PWR_EVENT_INFO_io_baseaddr1(pwr_event) ((pwr_event)->io_baseaddr1) ++#define PWR_EVENT_INFO_io_range1(pwr_event) ((pwr_event)->io_range1) ++#define PWR_EVENT_INFO_io_baseaddr2(pwr_event) ((pwr_event)->io_baseaddr2) ++#define PWR_EVENT_INFO_io_range2(pwr_event) ((pwr_event)->io_range2) ++#define PWR_EVENT_INFO_offset(pwr_event) ((pwr_event)->offset) ++#define PWR_EVENT_INFO_virtual_address(pwr_event) ((pwr_event)->virtual_address) ++ ++// IPC register offsets ++#define IPC_BASE_ADDRESS 0xFF11C000 ++#define IPC_CMD_OFFSET 0x00000000 ++#define IPC_STS_OFFSET 0x00000004 ++#define IPC_SPTR_OFFSET 0x00000008 ++#define IPC_DPTR_OFFSET 0x0000000C ++#define IPC_WBUF_OFFSET 0x00000080 ++#define IPC_RBUF_OFFSET 0x00000090 ++#define IPC_MAX_ADDR 0x100 ++ ++// Write 3bytes in IPC_WBUF (2bytes for address and 1byte for value) ++#define IPC_ADC_WRITE_1 0x000300FF ++// Write 2bytes in IPC_WBUF (2bytes for address) and read 1byte from IPC_RBUF ++#define IPC_ADC_READ_1 0x000210FF ++ ++// IPC commands ++#define IPC_MESSAGE_MSIC 0xFF ++#define IPC_MESSAGE_CC 0xEF ++#define IPC_MESSAGE_D_RESIDENCY 0xEA ++#define IPC_MESSAGE_S_RESIDENCY 0xEB ++ ++// IPC subcommands ++#define IPC_COMMAND_WRITE 0x0 ++#define IPC_COMMAND_READ 0x1 ++#define IPC_COMMAND_START_RESIDENCY 0x0 ++#define IPC_COMMAND_STOP_RESIDENCY 0x1 ++#define IPC_COMMAND_DUMP_RESIDENCY 0x2 ++ ++// IPC commands for S state residency counter ++#define S_RESIDENCY_BASE_ADDRESS 0xFFFF71E0 ++#define S_RESIDENCY_MAX_COUNTERS 0x4 ++#define S_RESIDENCY_MAX_STATES 0x3 ++// IPC commands for D state residency counter ++#define D_RESIDENCY_BASE_ADDRESS 0xFFFF7000 ++#define D_RESIDENCY_MAX_COUNTERS 0x78 // 40 LSS * 3 D states = 120 ++#define D_RESIDENCY_MAX_STATES 0x3 ++#define D_RESIDENCY_MAX_LSS 0x28 // 40 LSS ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif /* _LWPMUDRV_PWR_H_ */ +diff --git a/drivers/platform/x86/sepdk/include/lwpmudrv_struct.h b/drivers/platform/x86/sepdk/include/lwpmudrv_struct.h +new file mode 100644 +index 000000000000..c76ef5fa0e67 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/include/lwpmudrv_struct.h +@@ -0,0 +1,2059 @@ ++/*** ++ * ------------------------------------------------------------------------- ++ * INTEL CORPORATION PROPRIETARY INFORMATION ++ * This software is supplied under the terms of the accompanying license ++ * agreement or nondisclosure agreement with Intel Corporation and may not ++ * be copied or disclosed except in accordance with the terms of that ++ * agreement. ++ * Copyright(C) 2007-2018 Intel Corporation. All Rights Reserved. ++ * ------------------------------------------------------------------------- ++***/ ++ ++#ifndef _LWPMUDRV_STRUCT_UTILS_H_ ++#define _LWPMUDRV_STRUCT_UTILS_H_ ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++// processor execution modes ++#define MODE_UNKNOWN 99 ++// the following defines must start at 0 ++#define MODE_64BIT 3 ++#define MODE_32BIT 2 ++#define MODE_16BIT 1 ++#define MODE_V86 0 ++ ++// sampling methods ++#define SM_RTC 2020 // real time clock ++#define SM_VTD 2021 // OS Virtual Timer Device ++#define SM_NMI 2022 // non-maskable interrupt time based ++#define SM_EBS 2023 // event based sampling ++#define SM_EBC 2024 // event based counting ++ ++// sampling mechanism bitmap definitions ++#define INTERRUPT_RTC 0x1 ++#define INTERRUPT_VTD 0x2 ++#define INTERRUPT_NMI 0x4 ++#define INTERRUPT_EBS 0x8 ++ ++// Device types ++#define DEV_CORE 0x01 ++#define DEV_UNC 0x02 ++ ++// eflags defines ++#define EFLAGS_VM 0x00020000 // V86 mode ++#define EFLAGS_IOPL0 0 ++#define EFLAGS_IOPL1 0x00001000 ++#define EFLAGS_IOPL2 0x00002000 ++#define EFLAGS_IOPL3 0x00003000 ++#define MAX_EMON_GROUPS 1000 ++#define MAX_PCI_BUSNO 256 ++#define MAX_DEVICES 30 ++#define MAX_REGS 64 ++#define MAX_EMON_GROUPS 1000 ++#define MAX_PCI_DEVNO 32 ++#define MAX_PCI_FUNCNO 8 ++#define MAX_PCI_DEVUNIT 16 ++#define MAX_TURBO_VALUES 32 ++#define REG_BIT_MASK 0xFFFFFFFFFFFFFFFFULL ++ ++extern float freq_multiplier; ++ ++// Enumeration for invoking dispatch on multiple cpus or not ++typedef enum { DRV_MULTIPLE_INSTANCE = 0, DRV_SINGLE_INSTANCE } DRV_PROG_TYPE; ++ ++typedef struct DRV_CONFIG_NODE_S DRV_CONFIG_NODE; ++typedef DRV_CONFIG_NODE * DRV_CONFIG; ++ ++struct DRV_CONFIG_NODE_S { ++ U32 size; ++ U16 version; ++ U16 reserved1; ++ U32 num_events; ++ U32 num_chipset_events; ++ U32 chipset_offset; ++ S32 seed_name_len; ++ union { ++ S8 *seed_name; ++ U64 dummy1; ++ } u1; ++ union { ++ S8 *cpu_mask; ++ U64 dummy2; ++ } u2; ++ union { ++ U64 collection_config; ++ struct { ++ U64 start_paused : 1; ++ U64 counting_mode : 1; ++ U64 enable_chipset : 1; ++ U64 enable_gfx : 1; ++ U64 enable_pwr : 1; ++ U64 emon_mode : 1; ++ U64 debug_inject : 1; ++ U64 virt_phys_translation : 1; ++ U64 enable_p_state : 1; ++ U64 enable_cp_mode : 1; ++ U64 read_pstate_msrs : 1; ++ U64 use_pcl : 1; ++ U64 enable_ebc : 1; ++ U64 enable_tbc : 1; ++ U64 ds_area_available : 1; ++ U64 per_cpu_tsc : 1; ++ U64 reserved_field1 : 48; ++ } s1; ++ } u3; ++ U64 target_pid; ++ U32 os_of_interest; ++ U16 unc_timer_interval; ++ U16 unc_em_factor; ++ S32 p_state_trigger_index; ++ DRV_BOOL multi_pebs_enabled; ++ U32 reserved2; ++ U32 reserved3; ++ U64 reserved4; ++ U64 reserved5; ++ U64 reserved6; ++}; ++ ++#define DRV_CONFIG_size(cfg) ((cfg)->size) ++#define DRV_CONFIG_version(cfg) ((cfg)->version) ++#define DRV_CONFIG_num_events(cfg) ((cfg)->num_events) ++#define DRV_CONFIG_num_chipset_events(cfg) ((cfg)->num_chipset_events) ++#define DRV_CONFIG_chipset_offset(cfg) ((cfg)->chipset_offset) ++ ++#define DRV_CONFIG_seed_name(cfg) ((cfg)->u1.seed_name) ++#define DRV_CONFIG_seed_name_len(cfg) ((cfg)->seed_name_len) ++#define DRV_CONFIG_cpu_mask(cfg) ((cfg)->u2.cpu_mask) ++#define DRV_CONFIG_start_paused(cfg) ((cfg)->u3.s1.start_paused) ++#define DRV_CONFIG_counting_mode(cfg) ((cfg)->u3.s1.counting_mode) ++#define DRV_CONFIG_enable_chipset(cfg) ((cfg)->u3.s1.enable_chipset) ++#define DRV_CONFIG_enable_gfx(cfg) ((cfg)->u3.s1.enable_gfx) ++#define DRV_CONFIG_enable_pwr(cfg) ((cfg)->u3.s1.enable_pwr) ++#define DRV_CONFIG_emon_mode(cfg) ((cfg)->u3.s1.emon_mode) ++#define DRV_CONFIG_debug_inject(cfg) ((cfg)->u3.s1.debug_inject) ++#define DRV_CONFIG_virt_phys_translation(cfg) \ ++ ((cfg)->u3.s1.virt_phys_translation) ++#define DRV_CONFIG_enable_p_state(cfg) ((cfg)->u3.s1.enable_p_state) ++#define DRV_CONFIG_enable_cp_mode(cfg) ((cfg)->u3.s1.enable_cp_mode) ++#define DRV_CONFIG_read_pstate_msrs(cfg) ((cfg)->u3.s1.read_pstate_msrs) ++#define DRV_CONFIG_use_pcl(cfg) ((cfg)->u3.s1.use_pcl) ++#define DRV_CONFIG_event_based_counts(cfg) ((cfg)->u3.s1.enable_ebc) ++#define DRV_CONFIG_timer_based_counts(cfg) ((cfg)->u3.s1.enable_tbc) ++#define DRV_CONFIG_ds_area_available(cfg) ((cfg)->u3.s1.ds_area_available) ++#define DRV_CONFIG_per_cpu_tsc(cfg) ((cfg)->u3.s1.per_cpu_tsc) ++#define DRV_CONFIG_target_pid(cfg) ((cfg)->target_pid) ++#define DRV_CONFIG_os_of_interest(cfg) ((cfg)->os_of_interest) ++#define DRV_CONFIG_unc_timer_interval(cfg) ((cfg)->unc_timer_interval) ++#define DRV_CONFIG_unc_em_factor(cfg) ((cfg)->unc_em_factor) ++#define DRV_CONFIG_p_state_trigger_index(cfg) ((cfg)->p_state_trigger_index) ++#define DRV_CONFIG_multi_pebs_enabled(cfg) ((cfg)->multi_pebs_enabled) ++ ++#define DRV_CONFIG_VERSION 1 ++ ++typedef struct DEV_CONFIG_NODE_S DEV_CONFIG_NODE; ++typedef DEV_CONFIG_NODE * DEV_CONFIG; ++ ++struct DEV_CONFIG_NODE_S { ++ U16 size; ++ U16 version; ++ U32 dispatch_id; ++ U32 pebs_mode; ++ U32 pebs_record_num; ++ U32 results_offset; // to store the offset for this device's results ++ U32 max_gp_counters; ++ U32 device_type; ++ U32 core_type; ++ union { ++ U64 enable_bit_fields; ++ struct { ++ U64 pebs_capture : 1; ++ U64 collect_lbrs : 1; ++ U64 collect_callstacks : 1; ++ U64 collect_kernel_callstacks : 1; ++ U64 latency_capture : 1; ++ U64 power_capture : 1; ++ U64 htoff_mode : 1; ++ U64 eventing_ip_capture : 1; ++ U64 hle_capture : 1; ++ U64 precise_ip_lbrs : 1; ++ U64 store_lbrs : 1; ++ U64 tsc_capture : 1; ++ U64 enable_perf_metrics : 1; ++ U64 enable_adaptive_pebs : 1; ++ U64 apebs_collect_mem_info : 1; ++ U64 apebs_collect_gpr : 1; ++ U64 apebs_collect_xmm : 1; ++ U64 apebs_collect_lbrs : 1; ++ U64 collect_fixed_counter_pebs : 1; ++ U64 collect_os_callstacks : 1; ++ U64 reserved_field1 : 44; ++ } s1; ++ } u1; ++ U32 emon_unc_offset[MAX_EMON_GROUPS]; ++ U32 ebc_group_id_offset; ++ U8 num_perf_metrics; ++ U8 apebs_num_lbr_entries; ++ U16 emon_perf_metrics_offset; ++ U32 device_scope; ++ U32 reserved1; ++ U64 reserved2; ++ U64 reserved3; ++ U64 reserved4; ++}; ++ ++#define DEV_CONFIG_dispatch_id(cfg) ((cfg)->dispatch_id) ++#define DEV_CONFIG_pebs_mode(cfg) ((cfg)->pebs_mode) ++#define DEV_CONFIG_pebs_record_num(cfg) ((cfg)->pebs_record_num) ++#define DEV_CONFIG_results_offset(cfg) ((cfg)->results_offset) ++#define DEV_CONFIG_max_gp_counters(cfg) ((cfg)->max_gp_counters) ++ ++#define DEV_CONFIG_device_type(cfg) ((cfg)->device_type) ++#define DEV_CONFIG_core_type(cfg) ((cfg)->core_type) ++ ++#define DEV_CONFIG_pebs_capture(cfg) ((cfg)->u1.s1.pebs_capture) ++#define DEV_CONFIG_collect_lbrs(cfg) ((cfg)->u1.s1.collect_lbrs) ++#define DEV_CONFIG_collect_callstacks(cfg) ((cfg)->u1.s1.collect_callstacks) ++#define DEV_CONFIG_collect_kernel_callstacks(cfg) \ ++ ((cfg)->u1.s1.collect_kernel_callstacks) ++#define DEV_CONFIG_latency_capture(cfg) ((cfg)->u1.s1.latency_capture) ++#define DEV_CONFIG_power_capture(cfg) ((cfg)->u1.s1.power_capture) ++#define DEV_CONFIG_htoff_mode(cfg) ((cfg)->u1.s1.htoff_mode) ++#define DEV_CONFIG_eventing_ip_capture(cfg) ((cfg)->u1.s1.eventing_ip_capture) ++#define DEV_CONFIG_hle_capture(cfg) ((cfg)->u1.s1.hle_capture) ++#define DEV_CONFIG_precise_ip_lbrs(cfg) ((cfg)->u1.s1.precise_ip_lbrs) ++#define DEV_CONFIG_store_lbrs(cfg) ((cfg)->u1.s1.store_lbrs) ++#define DEV_CONFIG_tsc_capture(cfg) ((cfg)->u1.s1.tsc_capture) ++#define DEV_CONFIG_enable_perf_metrics(cfg) ((cfg)->u1.s1.enable_perf_metrics) ++#define DEV_CONFIG_enable_adaptive_pebs(cfg) ((cfg)->u1.s1.enable_adaptive_pebs) ++#define DEV_CONFIG_apebs_collect_mem_info(cfg) \ ++ ((cfg)->u1.s1.apebs_collect_mem_info) ++#define DEV_CONFIG_apebs_collect_gpr(cfg) ((cfg)->u1.s1.apebs_collect_gpr) ++#define DEV_CONFIG_apebs_collect_xmm(cfg) ((cfg)->u1.s1.apebs_collect_xmm) ++#define DEV_CONFIG_apebs_collect_lbrs(cfg) ((cfg)->u1.s1.apebs_collect_lbrs) ++#define DEV_CONFIG_collect_fixed_counter_pebs(cfg) \ ++ ((cfg)->u1.s1.collect_fixed_counter_pebs) ++#define DEV_CONFIG_collect_os_callstacks(cfg) \ ++ ((cfg)->u1.s1.collect_os_callstacks) ++#define DEV_CONFIG_enable_bit_fields(cfg) ((cfg)->u1.enable_bit_fields) ++#define DEV_CONFIG_emon_unc_offset(cfg, grp_num) \ ++ ((cfg)->emon_unc_offset[grp_num]) ++#define DEV_CONFIG_ebc_group_id_offset(cfg) ((cfg)->ebc_group_id_offset) ++#define DEV_CONFIG_num_perf_metrics(cfg) ((cfg)->num_perf_metrics) ++#define DEV_CONFIG_apebs_num_lbr_entries(cfg) ((cfg)->apebs_num_lbr_entries) ++#define DEV_CONFIG_emon_perf_metrics_offset(cfg) \ ++ ((cfg)->emon_perf_metrics_offset) ++#define DEV_CONFIG_device_scope(cfg) ((cfg)->device_scope) ++ ++typedef struct DEV_UNC_CONFIG_NODE_S DEV_UNC_CONFIG_NODE; ++typedef DEV_UNC_CONFIG_NODE * DEV_UNC_CONFIG; ++ ++struct DEV_UNC_CONFIG_NODE_S { ++ U16 size; ++ U16 version; ++ U32 dispatch_id; ++ U32 results_offset; ++ U32 device_type; ++ U32 device_scope; ++ U32 reserved1; ++ U32 emon_unc_offset[MAX_EMON_GROUPS]; ++ U64 reserved2; ++ U64 reserved3; ++ U64 reserved4; ++}; ++ ++#define DEV_UNC_CONFIG_dispatch_id(cfg) ((cfg)->dispatch_id) ++#define DEV_UNC_CONFIG_results_offset(cfg) ((cfg)->results_offset) ++#define DEV_UNC_CONFIG_emon_unc_offset(cfg, grp_num) \ ++ ((cfg)->emon_unc_offset[grp_num]) ++#define DEV_UNC_CONFIG_device_type(cfg) ((cfg)->device_type) ++#define DEV_UNC_CONFIG_device_scope(cfg) ((cfg)->device_scope) ++ ++/* ++ * X86 processor code descriptor ++ */ ++typedef struct CodeDescriptor_s { ++ union { ++ U32 lowWord; // low dword of descriptor ++ struct { // low broken out by fields ++ U16 limitLow; // segment limit 15:00 ++ U16 baseLow; // segment base 15:00 ++ } s1; ++ } u1; ++ union { ++ U32 highWord; // high word of descriptor ++ struct { // high broken out by bit fields ++ U32 baseMid : 8; // base 23:16 ++ U32 accessed : 1; // accessed ++ U32 readable : 1; // readable ++ U32 conforming : 1; // conforming code segment ++ U32 oneOne : 2; // always 11 ++ U32 dpl : 2; // Dpl ++ U32 pres : 1; // present bit ++ U32 limitHi : 4; // limit 19:16 ++ U32 sys : 1; // available for use by system ++ U32 reserved_0 : 1; // reserved, always 0 ++ U32 default_size : 1; ++ // default operation size (1=32bit, 0=16bit) ++ U32 granularity : 1; // granularity (1=32 bit, 0=20 bit) ++ U32 baseHi : 8; // base hi 31:24 ++ } s2; ++ } u2; ++} CodeDescriptor; ++ ++/* ++ * Module record. These are emitted whenever a DLL/EXE is loaded or unloaded. ++ * The filename fields may be 0 on an unload. The records reperesent a module ++ * for a certain span of time, delineated by the load / unload samplecounts. ++ * Note: ++ * The structure contains 64 bit fields which may cause the compiler to pad the ++ * length of the structure to an 8 byte boundary. ++ */ ++typedef struct ModuleRecord_s { ++ U16 recLength; // total length of this record (including this length, ++ // always U32 multiple) output from sampler is variable ++ // length (pathname at end of record) sampfile builder moves ++ // path names to a separate "literal pool" area ++ // so that these records become fixed length, and can be treated ++ // as an array see modrecFixedLen in header ++ ++ U16 segmentType : 2; ++ // V86, 16, 32, 64 (see MODE_ defines), maybe inaccurate for Win95 ++ // .. a 16 bit module may become a 32 bit module, inferred by ++ // ..looking at 1st sample record that matches the module selector ++ U16 loadEvent : 1; // 0 for load, 1 for unload ++ U16 processed : 1; // 0 for load, 1 for unload ++ U16 reserved0 : 12; ++ ++ U16 selector; // code selector or V86 segment ++ U16 segmentNameLength; ++ // length of the segment name if the segmentNameSet bit is set ++ U32 segmentNumber; ++ // segment number, Win95 can have multiple pieces for one module ++ union { ++ U32 flags; // all the flags as one dword ++ struct { ++ U32 exe : 1; // this module is an exe ++ U32 globalModule : 1; ++ // globally loaded module. There may be multiple ++ // module records for a global module, but the samples ++ // will only point to the 1st one, the others will be ++ // ignored. NT's Kernel32 is an example of this. ++ // REVISIT this?? ++ U32 bogusWin95 : 1; ++ // "bogus" win95 module. By bogus, we mean a ++ // module that has a pid of 0, no length and no base. ++ // Selector actually used as a 32 bit module. ++ U32 pidRecIndexRaw : 1; // pidRecIndex is raw OS pid ++ U32 sampleFound : 1; ++ // at least one sample referenced this module ++ U32 tscUsed : 1; // tsc set when record written ++ U32 duplicate : 1; ++ // 1st pass analysis has determined this is a ++ // duplicate load ++ U32 globalModuleTB5 : 1; ++ // module mapped into all processes on system ++ U32 segmentNameSet : 1; ++ // set if the segment name was collected ++ // (initially done for xbox collections) ++ U32 firstModuleRecInProcess : 1; ++ // if the pidCreatesTrackedInModuleRecs flag is set ++ // in the SampleHeaderEx struct and this flag ++ // is set, the associated module indicates ++ // the beginning of a new process ++ U32 source : 1; ++ // 0 for path in target system, ++ // 1 for path in host system ++ U32 unknownLoadAddress : 1; ++ // for 0 valid loadAddr64 value, ++ // 1 for invalid loadAddr64 value ++ U32 reserved1 : 20; ++ } s1; ++ } u2; ++ U64 length64; // module length ++ U64 loadAddr64; // load address ++ U32 pidRecIndex; ++ // process ID rec index (index into start of pid record section) ++ // .. (see pidRecIndexRaw). If pidRecIndex == 0 and pidRecIndexRaw == 1 ++ // ..then this is a kernel or global module. Can validly ++ // ..be 0 if not raw (array index). Use ReturnPid() to access this ++ // ..field ++ U32 osid; // OS identifier ++ U64 unloadTsc; // TSC collected on an unload event ++ U32 path; // module path name (section offset on disk) ++ // ..when initally written by sampler name is at end of this ++ // ..struct, when merged with main file names are pooled at end ++ // ..of ModuleRecord Section so ModulesRecords can be ++ // ..fixed length ++ U16 pathLength; // path name length (inludes terminating \0) ++ U16 filenameOffset; // offset into path name of base filename ++ U32 segmentName; // offset to the segmentName from the beginning of the ++ // module section in a processed module section ++ // (s/b 0 in a raw module record) ++ // in a raw module record, the segment name will follow the ++ // module name and the module name's terminating NULL char ++ U32 page_offset_high; ++ U64 tsc; // time stamp counter module event occurred ++ U32 parent_pid; // Parent PID of the process ++ U32 page_offset_low; ++} ModuleRecord; ++ ++#define MR_unloadTscSet(x, y) { (x)->unloadTsc = (y); } ++#define MR_unloadTscGet(x) ((x)->unloadTsc) ++ ++#define MR_page_offset_Set(x, y) \ ++ { \ ++ (x)->page_offset_low = (y)&0xFFFFFFFF; \ ++ (x)->page_offset_high = ((y) >> 32) & 0xFFFFFFFF; \ ++ } ++ ++#define MR_page_offset_Get(x) \ ++ ((((U64)(x)->page_offset_high) << 32) | (x)->page_offset_low) ++ ++// Accessor macros for ModuleRecord ++#define MODULE_RECORD_rec_length(x) ((x)->recLength) ++#define MODULE_RECORD_segment_type(x) ((x)->segmentType) ++#define MODULE_RECORD_load_event(x) ((x)->loadEvent) ++#define MODULE_RECORD_processed(x) ((x)->processed) ++#define MODULE_RECORD_selector(x) ((x)->selector) ++#define MODULE_RECORD_segment_name_length(x) ((x)->segmentNameLength) ++#define MODULE_RECORD_segment_number(x) ((x)->segmentNumber) ++#define MODULE_RECORD_flags(x) ((x)->u2.flags) ++#define MODULE_RECORD_exe(x) ((x)->u2.s1.exe) ++#define MODULE_RECORD_global_module(x) ((x)->u2.s1.globalModule) ++#define MODULE_RECORD_bogus_win95(x) ((x)->u2.s1.bogusWin95) ++#define MODULE_RECORD_pid_rec_index_raw(x) ((x)->u2.s1.pidRecIndexRaw) ++#define MODULE_RECORD_sample_found(x) ((x)->u2.s1.sampleFound) ++#define MODULE_RECORD_tsc_used(x) ((x)->u2.s1.tscUsed) ++#define MODULE_RECORD_duplicate(x) ((x)->u2.s1.duplicate) ++#define MODULE_RECORD_global_module_tb5(x) ((x)->u2.s1.globalModuleTB5) ++#define MODULE_RECORD_segment_name_set(x) ((x)->u2.s1.segmentNameSet) ++#define MODULE_RECORD_first_module_rec_in_process(x) \ ++ ((x)->u2.s1.firstModuleRecInProcess) ++#define MODULE_RECORD_source(x) ((x)->u2.s1.source) ++#define MODULE_RECORD_unknown_load_address(x) ((x)->u2.s1.unknownLoadAddress) ++#define MODULE_RECORD_length64(x) ((x)->length64) ++#define MODULE_RECORD_load_addr64(x) ((x)->loadAddr64) ++#define MODULE_RECORD_pid_rec_index(x) ((x)->pidRecIndex) ++#define MODULE_RECORD_load_sample_count(x) ((x)->u5.s2.loadSampleCount) ++#define MODULE_RECORD_unload_sample_count(x) ((x)->u5.s2.unloadSampleCount) ++#define MODULE_RECORD_unload_tsc(x) ((x)->unloadTsc) ++#define MODULE_RECORD_path(x) ((x)->path) ++#define MODULE_RECORD_path_length(x) ((x)->pathLength) ++#define MODULE_RECORD_filename_offset(x) ((x)->filenameOffset) ++#define MODULE_RECORD_segment_name(x) ((x)->segmentName) ++#define MODULE_RECORD_tsc(x) ((x)->tsc) ++#define MODULE_RECORD_parent_pid(x) ((x)->parent_pid) ++#define MODULE_RECORD_osid(x) ((x)->osid) ++ ++/* ++ * Sample record. Size can be determined by looking at the header record. ++ * There can be up to 3 sections. The SampleFileHeader defines the presence ++ * of sections and their offsets. Within a sample file, all of the sample ++ * records have the same number of sections and the same size. However, ++ * different sample record sections and sizes can exist in different ++ * sample files. Since recording counters and the time stamp counter for ++ * each sample can be space consuming, the user can determine whether or not ++ * this information is kept at sample collection time. ++ */ ++ ++typedef struct SampleRecordPC_s { // Program Counter section ++ U32 descriptor_id; ++ U32 osid; // OS identifier ++ union { ++ struct { ++ U64 iip; // IA64 interrupt instruction pointer ++ U64 ipsr; // IA64 interrupt processor status register ++ } s1; ++ struct { ++ U32 eip; // IA32 instruction pointer ++ U32 eflags; // IA32 eflags ++ CodeDescriptor csd; // IA32 code seg descriptor(8 bytes) ++ } s2; ++ } u1; ++ U16 cs; // IA32 cs (0 for IA64) ++ union { ++ U16 cpuAndOS; // cpu and OS info as one word ++ struct { // cpu and OS info broken out ++ U16 cpuNum : 12; // cpu number (0 - 4096) ++ U16 notVmid0 : 1; ++ // win95, vmid0 flag(1 means NOT vmid 0) ++ U16 codeMode : 2; // processor mode, see MODE_ defines ++ U16 uncore_valid : 1; ++ // identifies if the uncore count is valid ++ } s3; ++ } u2; ++ U32 tid; // OS thread ID (may get reused, see tidIsRaw) ++ U32 pidRecIndex; // process ID rec index (index into start of pid ++ // record section) .. can validly be 0 if not raw ++ // (array index). Use ReturnPid() to ++ // ..access this field .. (see pidRecIndexRaw) ++ union { ++ U32 bitFields2; ++ struct { ++ U32 mrIndex : 20; ++ // module record index (index into start of ++ // module rec section) .. (see mrIndexNone) ++ U32 eventIndex : 8; // index into the Events section ++ U32 tidIsRaw : 1; // tid is raw OS tid ++ U32 IA64PC : 1; // TRUE=this is a IA64 PC sample record ++ U32 pidRecIndexRaw : 1; // pidRecIndex is raw OS pid ++ U32 mrIndexNone : 1; // no mrIndex (unknown module) ++ } s4; ++ } u3; ++ U64 tsc; // processor timestamp counter ++} SampleRecordPC, *PSampleRecordPC; ++ ++#define SAMPLE_RECORD_descriptor_id(x) ((x)->descriptor_id) ++#define SAMPLE_RECORD_osid(x) ((x)->osid) ++#define SAMPLE_RECORD_iip(x) ((x)->u1.s1.iip) ++#define SAMPLE_RECORD_ipsr(x) ((x)->u1.s1.ipsr) ++#define SAMPLE_RECORD_eip(x) ((x)->u1.s2.eip) ++#define SAMPLE_RECORD_eflags(x) ((x)->u1.s2.eflags) ++#define SAMPLE_RECORD_csd(x) ((x)->u1.s2.csd) ++#define SAMPLE_RECORD_cs(x) ((x)->cs) ++#define SAMPLE_RECORD_cpu_and_os(x) ((x)->u2.cpuAndOS) ++#define SAMPLE_RECORD_cpu_num(x) ((x)->u2.s3.cpuNum) ++#define SAMPLE_RECORD_uncore_valid(x) ((x)->u2.s3.uncore_valid) ++#define SAMPLE_RECORD_not_vmid0(x) ((x)->u2.s3.notVmid0) ++#define SAMPLE_RECORD_code_mode(x) ((x)->u2.s3.codeMode) ++#define SAMPLE_RECORD_tid(x) ((x)->tid) ++#define SAMPLE_RECORD_pid_rec_index(x) ((x)->pidRecIndex) ++#define SAMPLE_RECORD_bit_fields2(x) ((x)->u3.bitFields2) ++#define SAMPLE_RECORD_mr_index(x) ((x)->u3.s4.mrIndex) ++#define SAMPLE_RECORD_event_index(x) ((x)->u3.s4.eventIndex) ++#define SAMPLE_RECORD_tid_is_raw(x) ((x)->u3.s4.tidIsRaw) ++#define SAMPLE_RECORD_ia64_pc(x) ((x)->u3.s4.IA64PC) ++#define SAMPLE_RECORD_pid_rec_index_raw(x) ((x)->u3.s4.pidRecIndexRaw) ++#define SAMPLE_RECORD_mr_index_none(x) ((x)->u3.s4.mrIndexNone) ++#define SAMPLE_RECORD_tsc(x) ((x)->tsc) ++ ++// end of SampleRecord sections ++ ++/* Uncore Sample Record definition. This is a skinny sample record used by ++ * uncore boxes to record samples. ++ * The sample record consists of a descriptor id, cpu info and timestamp. ++ */ ++ ++typedef struct UncoreSampleRecordPC_s { ++ U32 descriptor_id; ++ U32 osid; ++ U16 cpuNum; ++ U16 pkgNum; ++ union { ++ U32 flags; ++ struct { ++ U32 uncore_valid : 1; ++ // identifies if the uncore count is valid ++ U32 reserved1 : 31; ++ } s1; ++ } u1; ++ U64 reserved2; ++ U64 tsc; // processor timestamp counter ++} UncoreSampleRecordPC, *PUnocreSampleRecordPC; ++ ++#define UNCORE_SAMPLE_RECORD_descriptor_id(x) ((x)->descriptor_id) ++#define UNCORE_SAMPLE_RECORD_osid(x) ((x)->osid) ++#define UNCORE_SAMPLE_RECORD_cpu_num(x) ((x)->cpuNum) ++#define UNCORE_SAMPLE_RECORD_pkg_num(x) ((x)->pkgNum) ++#define UNCORE_SAMPLE_RECORD_uncore_valid(x) ((x)->u1.s1.uncore_valid) ++#define UNCORE_SAMPLE_RECORD_tsc(x) ((x)->tsc) ++ ++// end of UncoreSampleRecord section ++ ++// Definitions for user markers data ++// The instances of these structures will be written to user markers temp file ++#define MARKER_DEFAULT_TYPE "Default_Marker" ++#define MARKER_DEFAULT_ID 0 ++#define MAX_MARKER_LENGTH 136 ++ ++#define MARK_ID 4 ++#define MARK_DATA 2 ++#define THREAD_INFO 8 ++ ++/* ++ * Common Register descriptions ++ */ ++ ++/* ++ * Bits used in the debug control register ++ */ ++#define DEBUG_CTL_LBR 0x0000001 ++#define DEBUG_CTL_BTF 0x0000002 ++#define DEBUG_CTL_TR 0x0000040 ++#define DEBUG_CTL_BTS 0x0000080 ++#define DEBUG_CTL_BTINT 0x0000100 ++#define DEBUG_CTL_BT_OFF_OS 0x0000200 ++#define DEBUG_CTL_BTS_OFF_USR 0x0000400 ++#define DEBUG_CTL_FRZ_LBR_ON_PMI 0x0000800 ++#define DEBUG_CTL_FRZ_PMON_ON_PMI 0x0001000 ++#define DEBUG_CTL_ENABLE_UNCORE_PMI_BIT 0x0002000 ++ ++#define DEBUG_CTL_NODE_lbr_get(reg) ((reg)&DEBUG_CTL_LBR) ++#define DEBUG_CTL_NODE_lbr_set(reg) ((reg) |= DEBUG_CTL_LBR) ++#define DEBUG_CTL_NODE_lbr_clear(reg) ((reg) &= ~DEBUG_CTL_LBR) ++ ++#define DEBUG_CTL_NODE_btf_get(reg) ((reg)&DEBUG_CTL_BTF) ++#define DEBUG_CTL_NODE_btf_set(reg) ((reg) |= DEBUG_CTL_BTF) ++#define DEBUG_CTL_NODE_btf_clear(reg) ((reg) &= ~DEBUG_CTL_BTF) ++ ++#define DEBUG_CTL_NODE_tr_get(reg) ((reg)&DEBUG_CTL_TR) ++#define DEBUG_CTL_NODE_tr_set(reg) ((reg) |= DEBUG_CTL_TR) ++#define DEBUG_CTL_NODE_tr_clear(reg) ((reg) &= ~DEBUG_CTL_TR) ++ ++#define DEBUG_CTL_NODE_bts_get(reg) ((reg)&DEBUG_CTL_BTS) ++#define DEBUG_CTL_NODE_bts_set(reg) ((reg) |= DEBUG_CTL_BTS) ++#define DEBUG_CTL_NODE_bts_clear(reg) ((reg) &= ~DEBUG_CTL_BTS) ++ ++#define DEBUG_CTL_NODE_btint_get(reg) ((reg)&DEBUG_CTL_BTINT) ++#define DEBUG_CTL_NODE_btint_set(reg) ((reg) |= DEBUG_CTL_BTINT) ++#define DEBUG_CTL_NODE_btint_clear(reg) ((reg) &= ~DEBUG_CTL_BTINT) ++ ++#define DEBUG_CTL_NODE_bts_off_os_get(reg) ((reg)&DEBUG_CTL_BTS_OFF_OS) ++#define DEBUG_CTL_NODE_bts_off_os_set(reg) ((reg) |= DEBUG_CTL_BTS_OFF_OS) ++#define DEBUG_CTL_NODE_bts_off_os_clear(reg) ((reg) &= ~DEBUG_CTL_BTS_OFF_OS) ++ ++#define DEBUG_CTL_NODE_bts_off_usr_get(reg) ((reg)&DEBUG_CTL_BTS_OFF_USR) ++#define DEBUG_CTL_NODE_bts_off_usr_set(reg) ((reg) |= DEBUG_CTL_BTS_OFF_USR) ++#define DEBUG_CTL_NODE_bts_off_usr_clear(reg) ((reg) &= ~DEBUG_CTL_BTS_OFF_USR) ++ ++#define DEBUG_CTL_NODE_frz_lbr_on_pmi_get(reg) ((reg)&DEBUG_CTL_FRZ_LBR_ON_PMI) ++#define DEBUG_CTL_NODE_frz_lbr_on_pmi_set(reg) \ ++ ((reg) |= DEBUG_CTL_FRZ_LBR_ON_PMI) ++#define DEBUG_CTL_NODE_frz_lbr_on_pmi_clear(reg) \ ++ ((reg) &= ~DEBUG_CTL_FRZ_LBR_ON_PMI) ++ ++#define DEBUG_CTL_NODE_frz_pmon_on_pmi_get(reg) \ ++ ((reg)&DEBUG_CTL_FRZ_PMON_ON_PMI) ++#define DEBUG_CTL_NODE_frz_pmon_on_pmi_set(reg) \ ++ ((reg) |= DEBUG_CTL_FRZ_PMON_ON_PMI) ++#define DEBUG_CTL_NODE_frz_pmon_on_pmi_clear(reg) \ ++ ((reg) &= ~DEBUG_CTL_FRZ_PMON_ON_PMI) ++ ++#define DEBUG_CTL_NODE_enable_uncore_pmi_get(reg) \ ++ ((reg)&DEBUG_CTL_ENABLE_UNCORE_PMI) ++#define DEBUG_CTL_NODE_enable_uncore_pmi_set(reg) \ ++ ((reg) |= DEBUG_CTL_ENABLE_UNCORE_PMI) ++#define DEBUG_CTL_NODE_enable_uncore_pmi_clear(reg) \ ++ ((reg) &= ~DEBUG_CTL_ENABLE_UNCORE_PMI) ++ ++/* ++ * @macro SEP_VERSION_NODE_S ++ * @brief ++ * This structure supports versioning in Sep. The field major indicates major, ++ * version minor indicates the minor version and api indicates the api version ++ * for the current sep build. This structure is initialized at the time when ++ * the driver is loaded. ++ */ ++ ++typedef struct SEP_VERSION_NODE_S SEP_VERSION_NODE; ++typedef SEP_VERSION_NODE * SEP_VERSION; ++ ++struct SEP_VERSION_NODE_S { ++ union { ++ U32 sep_version; ++ struct { ++ S32 major : 8; ++ S32 minor : 8; ++ S32 api : 8; ++ S32 update : 8; ++ } s1; ++ } u1; ++}; ++ ++#define SEP_VERSION_NODE_sep_version(version) ((version)->u1.sep_version) ++#define SEP_VERSION_NODE_major(version) ((version)->u1.s1.major) ++#define SEP_VERSION_NODE_minor(version) ((version)->u1.s1.minor) ++#define SEP_VERSION_NODE_api(version) ((version)->u1.s1.api) ++#define SEP_VERSION_NODE_update(version) ((version)->u1.s1.update) ++ ++/* ++ * The VTSA_SYS_INFO_STRUCT information that is shared across kernel mode ++ * and user mode code, very specifically for tb5 file generation ++ */ ++ ++typedef enum { ++ GT_UNK = 0, ++ GT_PER_CPU, ++ GT_PER_CHIPSET, ++ GT_CPUID, ++ GT_NODE, ++ GT_SYSTEM, ++ GT_SAMPLE_RECORD_INFO ++} GEN_ENTRY_TYPES; ++ ++typedef enum { ++ GST_UNK = 0, ++ GST_X86, ++ GST_ITANIUM, ++ GST_SA, //strong arm ++ GST_XSC, ++ GST_EM64T, ++ GST_CS860 ++} GEN_ENTRY_SUBTYPES; ++ ++typedef struct __fixed_size_pointer { ++ union { ++ U64 fs_force_alignment; ++ struct { ++ U32 fs_unused; ++ U32 is_ptr : 1; ++ } s1; ++ } u1; ++ union { ++ U64 fs_offset; ++ void *fs_ptr; ++ } u2; ++} VTSA_FIXED_SIZE_PTR; ++ ++#define VTSA_FIXED_SIZE_PTR_is_ptr(fsp) ((fsp)->u1.s1.is_ptr) ++#define VTSA_FIXED_SIZE_PTR_fs_offset(fsp) ((fsp)->u2.fs_offset) ++#define VTSA_FIXED_SIZE_PTR_fs_ptr(fsp) ((fsp)->u2.fs_ptr) ++ ++typedef struct __generic_array_header { ++ // ++ // Information realted to the generic header ++ // ++ U32 hdr_size; // size of this generic header ++ // (for versioning and real data starts ++ // after the header) ++ ++ U32 next_field_hdr_padding; // make sure next field is 8-byte aligned ++ ++ // ++ // VTSA_FIXED_SIZE_PTR should always be on an 8-byte boundary... ++ // ++ // pointer to the next generic header if there is one ++ // ++ VTSA_FIXED_SIZE_PTR hdr_next_gen_hdr; ++ ++ U32 hdr_reserved[7]; // padding for future use - force to 64 bytes... ++ ++ // ++ // Information related to the array this header is describing ++ // ++ U32 array_num_entries; ++ U32 array_entry_size; ++ U16 array_type; // from the GEN_ENTRY_TYPES enumeration ++ U16 array_subtype; // from the GEN_ENTRY_SUBTYPES enumeration ++} VTSA_GEN_ARRAY_HDR; ++ ++#define VTSA_GEN_ARRAY_HDR_hdr_size(gah) ((gah)->hdr_size) ++#define VTSA_GEN_ARRAY_HDR_hdr_next_gen_hdr(gah) ((gah)->hdr_next_gen_hdr) ++#define VTSA_GEN_ARRAY_HDR_array_num_entries(gah) ((gah)->array_num_entries) ++#define VTSA_GEN_ARRAY_HDR_array_entry_size(gah) ((gah)->array_entry_size) ++#define VTSA_GEN_ARRAY_HDR_array_type(gah) ((gah)->array_type) ++#define VTSA_GEN_ARRAY_HDR_array_subtype(gah) ((gah)->array_subtype) ++ ++typedef struct __cpuid_x86 { ++ U32 cpuid_eax_input; ++ U32 cpuid_eax; ++ U32 cpuid_ebx; ++ U32 cpuid_ecx; ++ U32 cpuid_edx; ++} VTSA_CPUID_X86; ++ ++#define VTSA_CPUID_X86_cpuid_eax_input(cid) ((cid)->cpuid_eax_input) ++#define VTSA_CPUID_X86_cpuid_eax(cid) ((cid)->cpuid_eax) ++#define VTSA_CPUID_X86_cpuid_ebx(cid) ((cid)->cpuid_ebx) ++#define VTSA_CPUID_X86_cpuid_ecx(cid) ((cid)->cpuid_ecx) ++#define VTSA_CPUID_X86_cpuid_edx(cid) ((cid)->cpuid_edx) ++ ++typedef struct __cpuid_ipf { ++ U64 cpuid_select; ++ U64 cpuid_val; ++} VTSA_CPUID_IPF; ++ ++#define VTSA_CPUID_IPF_cpuid_select(cid) ((cid)->cpuid_select) ++#define VTSA_CPUID_IPF_cpuid_val(cid) ((cid)->cpuid_val) ++ ++typedef struct __generic_per_cpu { ++ // ++ // per cpu information ++ // ++ U32 cpu_number; // cpu number (as defined by the OS) ++ U32 cpu_speed_mhz; // cpu speed (in Mhz) ++ U32 cpu_fsb_mhz; // Front Side Bus speed (in Mhz) (if known) ++ U32 cpu_cache_L2; ++ // ??? USER: cpu L2 (marketing definition) cache size (if known) ++ ++ // ++ // And pointer to other structures. Keep this on an 8-byte boundary ++ // ++ // "pointer" to generic array header that should contain ++ // cpuid information for this cpu ++ // ++ VTSA_FIXED_SIZE_PTR cpu_cpuid_array; ++ ++ S64 cpu_tsc_offset; ++ // TSC offset from CPU 0 computed as (TSC CPU N - TSC CPU 0) ++ // ++ // intel processor number (from mkting). ++ // Currently 3 decimal digits (3xx, 5xx and 7xx) ++ // ++ U32 cpu_intel_processor_number; ++ ++ U32 cpu_cache_L3; ++ // ??? USER: cpu L3 (marketing definition) cache size (if known) ++ ++ U64 platform_id; ++ ++ // ++ // package/mapping information ++ // ++ // The hierarchy for uniquely identifying a logical processor ++ // in a system is node number/id (from the node structure), ++ // package number, core number, and thread number. ++ // Core number is for identifying a core within a package. ++ // ++ // Actually, on Itanium getting all this information is ++ // pretty involved with complicated algorithm using PAL calls. ++ // I don't know how important all this stuff is to the user. ++ // Maybe we can just have the place holder now and figure out ++ // how to fill them later. ++ // ++ U16 cpu_package_num; // package number for this cpu (if known) ++ U16 cpu_core_num; // core number (if known) ++ U16 cpu_hw_thread_num; // hw thread number inside the core (if known) ++ ++ U16 cpu_threads_per_core; // total number of h/w threads per core ++ U16 cpu_module_id; // Processor module number ++ U16 cpu_num_modules; // Number of processor modules ++ U32 cpu_core_type; // Core type for hetero ++ U32 arch_perfmon_ver; ++ U32 num_gp_counters; ++ U32 num_fixed_counters; ++ U32 reserved1; ++ U64 reserved2; ++ U64 reserved3; ++ ++} VTSA_GEN_PER_CPU; ++ ++#define VTSA_GEN_PER_CPU_cpu_number(p_cpu) ((p_cpu)->cpu_number) ++#define VTSA_GEN_PER_CPU_cpu_speed_mhz(p_cpu) ((p_cpu)->cpu_speed_mhz) ++#define VTSA_GEN_PER_CPU_cpu_fsb_mhz(p_cpu) ((p_cpu)->cpu_fsb_mhz) ++#define VTSA_GEN_PER_CPU_cpu_cache_L2(p_cpu) ((p_cpu)->cpu_cache_L2) ++#define VTSA_GEN_PER_CPU_cpu_cpuid_array(p_cpu) ((p_cpu)->cpu_cpuid_array) ++#define VTSA_GEN_PER_CPU_cpu_tsc_offset(p_cpu) ((p_cpu)->cpu_tsc_offset) ++#define VTSA_GEN_PER_CPU_cpu_intel_processor_number(p_cpu) \ ++ ((p_cpu)->cpu_intel_processor_number) ++#define VTSA_GEN_PER_CPU_cpu_cache_L3(p_cpu) ((p_cpu)->cpu_cache_L3) ++#define VTSA_GEN_PER_CPU_platform_id(p_cpu) ((p_cpu)->platform_id) ++#define VTSA_GEN_PER_CPU_cpu_package_num(p_cpu) ((p_cpu)->cpu_package_num) ++#define VTSA_GEN_PER_CPU_cpu_core_num(p_cpu) ((p_cpu)->cpu_core_num) ++#define VTSA_GEN_PER_CPU_cpu_hw_thread_num(p_cpu) ((p_cpu)->cpu_hw_thread_num) ++#define VTSA_GEN_PER_CPU_cpu_threads_per_core(p_cpu) \ ++ ((p_cpu)->cpu_threads_per_core) ++#define VTSA_GEN_PER_CPU_cpu_module_num(p_cpu) ((p_cpu)->cpu_module_id) ++#define VTSA_GEN_PER_CPU_cpu_num_modules(p_cpu) ((p_cpu)->cpu_num_modules) ++#define VTSA_GEN_PER_CPU_cpu_core_type(p_cpu) ((p_cpu)->cpu_core_type) ++#define VTSA_GEN_PER_CPU_arch_perfmon_ver(p_cpu) ((p_cpu)->arch_perfmon_ver) ++#define VTSA_GEN_PER_CPU_num_gp_counters(p_cpu) ((p_cpu)->num_gp_counters) ++#define VTSA_GEN_PER_CPU_num_fixed_counters(p_cpu) ((p_cpu)->num_fixed_counters) ++ ++typedef struct __node_info { ++ U32 node_type_from_shell; ++ U32 node_id; // The node number/id (if known) ++ ++ U32 node_num_available; // total number cpus on this node ++ U32 node_num_used; // USER: number used based on cpu mask at time of run ++ ++ U64 node_physical_memory; ++ // amount of physical memory (bytes) on this node ++ ++ // ++ // pointer to the first generic header that ++ // contains the per-cpu information ++ // ++ // Keep the VTSA_FIXED_SIZE_PTR on an 8-byte boundary... ++ // ++ VTSA_FIXED_SIZE_PTR node_percpu_array; ++ ++ U32 node_reserved[2]; // leave some space ++ ++} VTSA_NODE_INFO; ++ ++#define VTSA_NODE_INFO_node_type_from_shell(vni) ((vni)->node_type_from_shell) ++#define VTSA_NODE_INFO_node_id(vni) ((vni)->node_id) ++#define VTSA_NODE_INFO_node_num_available(vni) ((vni)->node_num_available) ++#define VTSA_NODE_INFO_node_num_used(vni) ((vni)->node_num_used) ++#define VTSA_NODE_INFO_node_physical_memory(vni) ((vni)->node_physical_memory) ++#define VTSA_NODE_INFO_node_percpu_array(vni) ((vni)->node_percpu_array) ++ ++typedef struct __sys_info { ++ // ++ // Keep this on an 8-byte boundary ++ // ++ VTSA_FIXED_SIZE_PTR node_array; // the per-node information ++ ++ U64 min_app_address; ++ // USER: lower allowed user space address (if known) ++ U64 max_app_address; ++ // USER: upper allowed user space address (if known) ++ U32 page_size; // Current page size ++ U32 allocation_granularity; ++ // USER: Granularity of allocation requests (if known) ++ U32 reserved1; // added for future fields ++ U32 reserved2; // alignment purpose ++ U64 reserved3[3]; // added for future fields ++ ++} VTSA_SYS_INFO; ++ ++#define VTSA_SYS_INFO_node_array(sys_info) ((sys_info)->node_array) ++#define VTSA_SYS_INFO_min_app_address(sys_info) ((sys_info)->min_app_address) ++#define VTSA_SYS_INFO_max_app_address(sys_info) ((sys_info)->max_app_address) ++#define VTSA_SYS_INFO_page_size(sys_info) ((sys_info)->page_size) ++#define VTSA_SYS_INFO_allocation_granularity(sys_info) \ ++ ((sys_info)->allocation_granularity) ++ ++typedef struct DRV_TOPOLOGY_INFO_NODE_S DRV_TOPOLOGY_INFO_NODE; ++typedef DRV_TOPOLOGY_INFO_NODE * DRV_TOPOLOGY_INFO; ++ ++struct DRV_TOPOLOGY_INFO_NODE_S { ++ U32 cpu_number; // cpu number (as defined by the OS) ++ U16 cpu_package_num; // package number for this cpu (if known) ++ U16 cpu_core_num; // core number (if known) ++ U16 cpu_hw_thread_num; // T0 or T1 if HT enabled ++ U16 reserved1; ++ S32 socket_master; ++ S32 core_master; ++ S32 thr_master; ++ U32 cpu_module_num; ++ U32 cpu_module_master; ++ U32 cpu_num_modules; ++ U32 cpu_core_type; ++ U32 arch_perfmon_ver; ++ U32 num_gp_counters; ++ U32 num_fixed_counters; ++ U32 reserved2; ++ U64 reserved3; ++ U64 reserved4; ++}; ++ ++#define DRV_TOPOLOGY_INFO_cpu_number(dti) ((dti)->cpu_number) ++#define DRV_TOPOLOGY_INFO_cpu_package_num(dti) ((dti)->cpu_package_num) ++#define DRV_TOPOLOGY_INFO_cpu_core_num(dti) ((dti)->cpu_core_num) ++#define DRV_TOPOLOGY_INFO_socket_master(dti) ((dti)->socket_master) ++#define DRV_TOPOLOGY_INFO_core_master(dti) ((dti)->core_master) ++#define DRV_TOPOLOGY_INFO_thr_master(dti) ((dti)->thr_master) ++#define DRV_TOPOLOGY_INFO_cpu_hw_thread_num(dti) ((dti)->cpu_hw_thread_num) ++#define DRV_TOPOLOGY_INFO_cpu_module_num(dti) ((dti)->cpu_module_num) ++#define DRV_TOPOLOGY_INFO_cpu_module_master(dti) ((dti)->cpu_module_master) ++#define DRV_TOPOLOGY_INFO_cpu_num_modules(dti) ((dti)->cpu_num_modules) ++#define DRV_TOPOLOGY_INFO_cpu_core_type(dti) ((dti)->cpu_core_type) ++#define DRV_TOPOLOGY_INFO_arch_perfmon_ver(dti) ((dti)->arch_perfmon_ver) ++#define DRV_TOPOLOGY_INFO_num_gp_counters(dti) ((dti)->num_gp_counters) ++#define DRV_TOPOLOGY_INFO_num_fixed_counters(dti) ((dti)->num_fixed_counters) ++ ++#define VALUE_TO_BE_DISCOVERED 0 ++ ++// dimm information ++typedef struct DRV_DIMM_INFO_NODE_S DRV_DIMM_INFO_NODE; ++typedef DRV_DIMM_INFO_NODE * DRV_DIMM_INFO; ++ ++struct DRV_DIMM_INFO_NODE_S { ++ U32 platform_id; ++ U32 channel_num; ++ U32 rank_num; ++ U32 value; ++ U8 mc_num; ++ U8 dimm_valid; ++ U8 valid_value; ++ U8 rank_value; ++ U8 density_value; ++ U8 width_value; ++ U16 socket_num; ++ U64 reserved1; ++ U64 reserved2; ++}; ++ ++#define DRV_DIMM_INFO_platform_id(di) ((di)->platform_id) ++#define DRV_DIMM_INFO_channel_num(di) ((di)->channel_num) ++#define DRV_DIMM_INFO_rank_num(di) ((di)->rank_num) ++#define DRV_DIMM_INFO_value(di) ((di)->value) ++#define DRV_DIMM_INFO_mc_num(di) ((di)->mc_num) ++#define DRV_DIMM_INFO_dimm_valid(di) ((di)->dimm_valid) ++#define DRV_DIMM_INFO_valid_value(di) ((di)->valid_value) ++#define DRV_DIMM_INFO_rank_value(di) ((di)->rank_value) ++#define DRV_DIMM_INFO_density_value(di) ((di)->density_value) ++#define DRV_DIMM_INFO_width_value(di) ((di)->width_value) ++#define DRV_DIMM_INFO_socket_num(di) ((di)->socket_num) ++ ++//platform information. need to get from driver ++#define MAX_PACKAGES 16 ++#define MAX_CHANNELS 8 ++#define MAX_RANKS 3 ++ ++typedef struct DRV_PLATFORM_INFO_NODE_S DRV_PLATFORM_INFO_NODE; ++typedef DRV_PLATFORM_INFO_NODE * DRV_PLATFORM_INFO; ++ ++struct DRV_PLATFORM_INFO_NODE_S { ++ U64 info; // platform info ++ U64 ddr_freq_index; // freq table index ++ U8 misc_valid; // misc enabled valid bit ++ U8 reserved1; // added for alignment purpose ++ U16 reserved2; ++ U32 vmm_timer_freq; // timer frequency from VMM on SoFIA (in HZ) ++ U64 misc_info; // misc enabled info ++ U64 ufs_freq; // ufs frequency (HSX only) ++ DRV_DIMM_INFO_NODE dimm_info[MAX_PACKAGES * MAX_CHANNELS * MAX_RANKS]; ++ U64 energy_multiplier; // Value of energy multiplier ++ U64 reserved3; ++ U64 reserved4; ++ U64 reserved5; ++ U64 reserved6; ++}; ++ ++#define DRV_PLATFORM_INFO_info(data) ((data)->info) ++#define DRV_PLATFORM_INFO_ddr_freq_index(data) ((data)->ddr_freq_index) ++#define DRV_PLATFORM_INFO_misc_valid(data) ((data)->misc_valid) ++#define DRV_PLATFORM_INFO_misc_info(data) ((data)->misc_info) ++#define DRV_PLATFORM_INFO_ufs_freq(data) ((data)->ufs_freq) ++#define DRV_PLATFORM_INFO_dimm_info(data) ((data)->dimm_info) ++#define DRV_PLATFORM_INFO_energy_multiplier(data) ((data)->energy_multiplier) ++#define DRV_PLATFORM_INFO_vmm_timer_freq(data) ((data)->vmm_timer_freq) ++ ++//platform information. need to get from Platform picker ++typedef struct PLATFORM_FREQ_INFO_NODE_S PLATFORM_FREQ_INFO_NODE; ++typedef PLATFORM_FREQ_INFO_NODE * PLATFORM_FREQ_INFO; ++ ++struct PLATFORM_FREQ_INFO_NODE_S { ++ float multiplier; // freq multiplier ++ double *table; // freq table ++ U32 table_size; // freq table size ++ U64 reserved1; ++ U64 reserved2; ++ U64 reserved3; ++ U64 reserved4; ++}; ++#define PLATFORM_FREQ_INFO_multiplier(data) ((data)->multiplier) ++#define PLATFORM_FREQ_INFO_table(data) ((data)->table) ++#define PLATFORM_FREQ_INFO_table_size(data) ((data)->table_size) ++ ++typedef struct DEVICE_INFO_NODE_S DEVICE_INFO_NODE; ++typedef DEVICE_INFO_NODE * DEVICE_INFO; //NEEDED in PP ++ ++struct DEVICE_INFO_NODE_S { ++ S8 *dll_name; ++ PVOID dll_handle; ++ S8 *cpu_name; ++ S8 *pmu_name; ++ DRV_STCHAR *event_db_file_name; ++ //PLATFORM_IDENTITY plat_identity; ++ // is undefined right now. Please take this as structure containing U64 ++ U32 plat_type; ++ // device type (e.g., DEVICE_INFO_CORE, etc. ... see enum below) ++ U32 plat_sub_type; ++ // cti_type (e.g., CTI_Sandybridge, etc., ... see env_info_types.h) ++ S32 dispatch_id; ++ // this will be set in user mode dlls and will be unique across all ++ // IPF, IA32 (including MIDS). ++ ECB *ecb; ++ EVENT_CONFIG ec; ++ DEV_CONFIG pcfg; ++ DEV_UNC_CONFIG pcfg_unc; ++ U32 num_of_groups; ++ U32 size_of_alloc; // size of each event control block ++ PVOID drv_event; ++ U32 num_events; ++ U32 event_id_index; ++ // event id index of device ++ // (basically how many events processed before this device) ++ U32 num_counters; ++ U32 group_index; ++ U32 num_packages; ++ U32 num_units; ++ U32 device_type; ++ U32 core_type; ++ U32 pmu_clone_id; // cti_type of platform to impersonate in device DLLs ++ U32 device_scope; ++ U32 reserved1; ++ U64 reserved2; ++ U64 reserved3; ++}; ++ ++#define MAX_EVENT_NAME_LENGTH 256 ++ ++#define DEVICE_INFO_dll_name(pdev) ((pdev)->dll_name) ++#define DEVICE_INFO_dll_handle(pdev) ((pdev)->dll_handle) ++#define DEVICE_INFO_cpu_name(pdev) ((pdev)->cpu_name) ++#define DEVICE_INFO_pmu_name(pdev) ((pdev)->pmu_name) ++#define DEVICE_INFO_event_db_file_name(pdev) ((pdev)->event_db_file_name) ++#define DEVICE_INFO_plat_type(pdev) ((pdev)->plat_type) ++#define DEVICE_INFO_plat_sub_type(pdev) ((pdev)->plat_sub_type) ++#define DEVICE_INFO_pmu_clone_id(pdev) ((pdev)->pmu_clone_id) ++#define DEVICE_INFO_dispatch_id(pdev) ((pdev)->dispatch_id) ++#define DEVICE_INFO_ecb(pdev) ((pdev)->ecb) ++#define DEVICE_INFO_ec(pdev) ((pdev)->ec) ++#define DEVICE_INFO_pcfg(pdev) ((pdev)->pcfg) ++#define DEVICE_INFO_pcfg_unc(pdev) ((pdev)->pcfg_unc) ++#define DEVICE_INFO_num_groups(pdev) ((pdev)->num_of_groups) ++#define DEVICE_INFO_size_of_alloc(pdev) ((pdev)->size_of_alloc) ++#define DEVICE_INFO_drv_event(pdev) ((pdev)->drv_event) ++#define DEVICE_INFO_num_events(pdev) ((pdev)->num_events) ++#define DEVICE_INFO_event_id_index(pdev) ((pdev)->event_id_index) ++#define DEVICE_INFO_num_counters(pdev) ((pdev)->num_counters) ++#define DEVICE_INFO_group_index(pdev) ((pdev)->group_index) ++#define DEVICE_INFO_num_packages(pdev) ((pdev)->num_packages) ++#define DEVICE_INFO_num_units(pdev) ((pdev)->num_units) ++#define DEVICE_INFO_device_type(pdev) ((pdev)->device_type) ++#define DEVICE_INFO_core_type(pdev) ((pdev)->core_type) ++#define DEVICE_INFO_device_scope(pdev) ((pdev)->device_scope) ++ ++typedef struct DEVICE_INFO_DATA_NODE_S DEVICE_INFO_DATA_NODE; ++typedef DEVICE_INFO_DATA_NODE * DEVICE_INFO_DATA; //NEEDED in PP ++ ++struct DEVICE_INFO_DATA_NODE_S { ++ DEVICE_INFO pdev_info; ++ U32 num_elements; ++ U32 num_allocated; ++ U64 reserved1; ++ U64 reserved2; ++ U64 reserved3; ++ U64 reserved4; ++}; ++ ++#define DEVICE_INFO_DATA_pdev_info(d) ((d)->pdev_info) ++#define DEVICE_INFO_DATA_num_elements(d) ((d)->num_elements) ++#define DEVICE_INFO_DATA_num_allocated(d) ((d)->num_allocated) ++ ++typedef enum { ++ DEVICE_INFO_CORE = 0, ++ DEVICE_INFO_UNCORE = 1, ++ DEVICE_INFO_CHIPSET = 2, ++ DEVICE_INFO_GFX = 3, ++ DEVICE_INFO_PWR = 4, ++ DEVICE_INFO_TELEMETRY = 5 ++} DEVICE_INFO_TYPE; ++ ++typedef enum { ++ INVALID_TERMINATE_TYPE = 0, ++ STOP_TERMINATE, ++ CANCEL_TERMINATE ++} ABNORMAL_TERMINATE_TYPE; ++ ++typedef enum { ++ DEVICE_SCOPE_PACKAGE = 0, ++ DEVICE_SCOPE_SYSTEM = 1 ++} DEVICE_SCOPE_TYPE; ++ ++typedef struct PCIFUNC_INFO_NODE_S PCIFUNC_INFO_NODE; ++typedef PCIFUNC_INFO_NODE * PCIFUNC_INFO; ++ ++struct PCIFUNC_INFO_NODE_S { ++ U32 valid; ++ U32 num_entries; ++ // the number of entries found with same ++ // but difference bus_no. ++ U64 deviceId; ++ U64 reserved1; ++ U64 reserved2; ++}; ++ ++#define PCIFUNC_INFO_NODE_funcno(x) ((x)->funcno) ++#define PCIFUNC_INFO_NODE_valid(x) ((x)->valid) ++#define PCIFUNC_INFO_NODE_deviceId(x) ((x)->deviceId) ++#define PCIFUNC_INFO_NODE_num_entries(x) ((x)->num_entries) ++ ++typedef struct PCIDEV_INFO_NODE_S PCIDEV_INFO_NODE; ++typedef PCIDEV_INFO_NODE * PCIDEV_INFO; ++ ++struct PCIDEV_INFO_NODE_S { ++ PCIFUNC_INFO_NODE func_info[MAX_PCI_FUNCNO]; ++ U32 valid; ++ U32 dispatch_id; ++ U64 reserved1; ++ U64 reserved2; ++}; ++ ++#define PCIDEV_INFO_NODE_func_info(x, i) ((x).func_info[i]) ++#define PCIDEV_INFO_NODE_valid(x) ((x).valid) ++ ++typedef struct UNCORE_PCIDEV_NODE_S UNCORE_PCIDEV_NODE; ++ ++struct UNCORE_PCIDEV_NODE_S { ++ PCIDEV_INFO_NODE pcidev[MAX_PCI_DEVNO]; ++ U32 dispatch_id; ++ U32 scan; ++ U32 num_uncore_units; ++ U32 num_deviceid_entries; ++ U8 dimm_device1; ++ U8 dimm_device2; ++ U16 reserved1; ++ U32 reserved2; ++ U64 reserved3; ++ U64 reserved4; ++ U32 deviceid_list[MAX_PCI_DEVNO]; ++}; ++ ++// Structure used to perform uncore device discovery ++ ++typedef struct UNCORE_TOPOLOGY_INFO_NODE_S UNCORE_TOPOLOGY_INFO_NODE; ++typedef UNCORE_TOPOLOGY_INFO_NODE * UNCORE_TOPOLOGY_INFO; ++ ++struct UNCORE_TOPOLOGY_INFO_NODE_S { ++ UNCORE_PCIDEV_NODE device[MAX_DEVICES]; ++}; ++ ++#define UNCORE_TOPOLOGY_INFO_device(x, dev_index) ((x)->device[dev_index]) ++#define UNCORE_TOPOLOGY_INFO_device_dispatch_id(x, dev_index) \ ++ ((x)->device[dev_index].dispatch_id) ++#define UNCORE_TOPOLOGY_INFO_device_scan(x, dev_index) \ ++ ((x)->device[dev_index].scan) ++#define UNCORE_TOPOLOGY_INFO_pcidev_valid(x, dev_index, devno) \ ++ ((x)->device[dev_index].pcidev[devno].valid) ++#define UNCORE_TOPOLOGY_INFO_pcidev_dispatch_id(x, dev_index, devno) \ ++ ((x)->device[dev_index].pcidev[devno].dispatch_id) ++#define UNCORE_TOPOLOGY_INFO_pcidev(x, dev_index, devno) \ ++ ((x)->device[dev_index].pcidev[devno]) ++#define UNCORE_TOPOLOGY_INFO_num_uncore_units(x, dev_index) \ ++ ((x)->device[dev_index].num_uncore_units) ++#define UNCORE_TOPOLOGY_INFO_num_deviceid_entries(x, dev_index) \ ++ ((x)->device[dev_index].num_deviceid_entries) ++#define UNCORE_TOPOLOGY_INFO_dimm_device1(x, dev_index) \ ++ ((x)->device[dev_index].dimm_device1) ++#define UNCORE_TOPOLOGY_INFO_dimm_device2(x, dev_index) \ ++ ((x)->device[dev_index].dimm_device2) ++#define UNCORE_TOPOLOGY_INFO_deviceid(x, dev_index, deviceid_idx) \ ++ ((x)->device[dev_index].deviceid_list[deviceid_idx]) ++#define UNCORE_TOPOLOGY_INFO_pcidev_set_funcno_valid(x, dev_index, devno, \ ++ funcno) \ ++ ((x)->device[dev_index].pcidev[devno].func_info[funcno].valid = 1) ++#define UNCORE_TOPOLOGY_INFO_pcidev_is_found_in_platform(x, dev_index, devno, \ ++ funcno) \ ++ ((x)->device[dev_index].pcidev[devno].func_info[funcno].num_entries) ++#define UNCORE_TOPOLOGY_INFO_pcidev_is_devno_funcno_valid(x, dev_index, devno, \ ++ funcno) \ ++ ((x)->device[dev_index].pcidev[devno].func_info[funcno].valid ? TRUE : \ ++ FALSE) ++#define UNCORE_TOPOLOGY_INFO_pcidev_is_device_found(x, dev_index, devno, \ ++ funcno) \ ++ ((x)->device[dev_index].pcidev[devno].func_info[funcno].num_entries > 0) ++ ++#define UNCORE_TOPOLOGY_INFO_pcidev_num_entries_found(x, dev_index, devno, \ ++ funcno) \ ++ ((x)->device[dev_index].pcidev[devno].func_info[funcno].num_entries) ++ ++typedef enum { ++ CORE_TOPOLOGY_NODE = 0, ++ UNCORE_TOPOLOGY_NODE_IMC = 1, ++ UNCORE_TOPOLOGY_NODE_UBOX = 2, ++ UNCORE_TOPOLOGY_NODE_QPI = 3, ++ MAX_TOPOLOGY_DEV = 4, ++ // When you adding new topo node to this enum, ++ // make sue MAX_TOPOLOGY_DEV is always the last one. ++} UNCORE_TOPOLOGY_NODE_INDEX_TYPE; ++ ++typedef struct PLATFORM_TOPOLOGY_REG_NODE_S PLATFORM_TOPOLOGY_REG_NODE; ++typedef PLATFORM_TOPOLOGY_REG_NODE * PLATFORM_TOPOLOGY_REG; ++ ++struct PLATFORM_TOPOLOGY_REG_NODE_S { ++ U32 bus; ++ U32 device; ++ U32 function; ++ U32 reg_id; ++ U64 reg_mask; ++ U64 reg_value[MAX_PACKAGES]; ++ U8 reg_type; ++ U8 device_valid; ++ U16 reserved1; ++ U32 reserved2; ++ U64 reserved3; ++ U64 reserved4; ++}; ++ ++#define PLATFORM_TOPOLOGY_REG_bus(x, i) ((x)[(i)].bus) ++#define PLATFORM_TOPOLOGY_REG_device(x, i) ((x)[(i)].device) ++#define PLATFORM_TOPOLOGY_REG_function(x, i) ((x)[(i)].function) ++#define PLATFORM_TOPOLOGY_REG_reg_id(x, i) ((x)[(i)].reg_id) ++#define PLATFORM_TOPOLOGY_REG_reg_mask(x, i) ((x)[(i)].reg_mask) ++#define PLATFORM_TOPOLOGY_REG_reg_type(x, i) ((x)[(i)].reg_type) ++#define PLATFORM_TOPOLOGY_REG_device_valid(x, i) ((x)[(i)].device_valid) ++#define PLATFORM_TOPOLOGY_REG_reg_value(x, i, package_no) \ ++ ((x)[(i)].reg_value[package_no]) ++ ++typedef struct PLATFORM_TOPOLOGY_DISCOVERY_NODE_S ++ PLATFORM_TOPOLOGY_DISCOVERY_NODE; ++typedef PLATFORM_TOPOLOGY_DISCOVERY_NODE * PLATFORM_TOPOLOGY_DISCOVERY; ++ ++struct PLATFORM_TOPOLOGY_DISCOVERY_NODE_S { ++ U32 device_index; ++ U32 device_id; ++ U32 num_registers; ++ U8 scope; ++ U8 prog_valid; ++ U16 reserved2; ++ U64 reserved3; ++ U64 reserved4; ++ U64 reserved5; ++ PLATFORM_TOPOLOGY_REG_NODE topology_regs[MAX_REGS]; ++}; ++ ++//Structure used to discover the uncore device topology_device ++ ++typedef struct PLATFORM_TOPOLOGY_PROG_NODE_S PLATFORM_TOPOLOGY_PROG_NODE; ++typedef PLATFORM_TOPOLOGY_PROG_NODE * PLATFORM_TOPOLOGY_PROG; ++ ++struct PLATFORM_TOPOLOGY_PROG_NODE_S { ++ U32 num_devices; ++ PLATFORM_TOPOLOGY_DISCOVERY_NODE topology_device[MAX_TOPOLOGY_DEV]; ++}; ++ ++#define PLATFORM_TOPOLOGY_PROG_num_devices(x) ((x)->num_devices) ++#define PLATFORM_TOPOLOGY_PROG_topology_device(x, dev_index) \ ++ ((x)->topology_device[dev_index]) ++#define PLATFORM_TOPOLOGY_PROG_topology_device_device_index(x, dev_index) \ ++ ((x)->topology_device[dev_index].device_index) ++#define PLATFORM_TOPOLOGY_PROG_topology_device_device_id(x, dev_index) \ ++ ((x)->topology_device[dev_index].device_id) ++#define PLATFORM_TOPOLOGY_PROG_topology_device_scope(x, dev_index) \ ++ ((x)->topology_device[dev_index].scope) ++#define PLATFORM_TOPOLOGY_PROG_topology_device_num_registers(x, dev_index) \ ++ ((x)->topology_device[dev_index].num_registers) ++#define PLATFORM_TOPOLOGY_PROG_topology_device_prog_valid(x, dev_index) \ ++ ((x)->topology_device[dev_index].prog_valid) ++#define PLATFORM_TOPOLOGY_PROG_topology_topology_regs(x, dev_index) \ ++ ((x)->topology_device[dev_index].topology_regs) ++ ++typedef struct FPGA_GB_DISCOVERY_NODE_S FPGA_GB_DISCOVERY_NODE; ++ ++struct FPGA_GB_DISCOVERY_NODE_S { ++ U16 bar_num; ++ U16 feature_id; ++ U32 device_id; ++ U64 afu_id_l; ++ U64 afu_id_h; ++ U32 feature_offset; ++ U32 feature_len; ++ U8 scan; ++ U8 valid; ++ U16 reserved1; ++ U32 reserved2; ++}; ++ ++typedef struct FPGA_GB_DEV_NODE_S FPGA_GB_DEV_NODE; ++typedef FPGA_GB_DEV_NODE * FPGA_GB_DEV; ++ ++struct FPGA_GB_DEV_NODE_S { ++ U32 num_devices; ++ FPGA_GB_DISCOVERY_NODE fpga_gb_device[MAX_DEVICES]; ++}; ++ ++#define FPGA_GB_DEV_num_devices(x) ((x)->num_devices) ++#define FPGA_GB_DEV_device(x, dev_index) ((x)->fpga_gb_device[dev_index]) ++#define FPGA_GB_DEV_bar_num(x, dev_index) \ ++ ((x)->fpga_gb_device[dev_index].bar_num) ++#define FPGA_GB_DEV_feature_id(x, dev_index) \ ++ ((x)->fpga_gb_device[dev_index].feature_id) ++#define FPGA_GB_DEV_device_id(x, dev_index) \ ++ ((x)->fpga_gb_device[dev_index].device_id) ++#define FPGA_GB_DEV_afu_id_low(x, dev_index) \ ++ ((x)->fpga_gb_device[dev_index].afu_id_l) ++#define FPGA_GB_DEV_afu_id_high(x, dev_index) \ ++ ((x)->fpga_gb_device[dev_index].afu_id_h) ++#define FPGA_GB_DEV_feature_offset(x, dev_index) \ ++ ((x)->fpga_gb_device[dev_index].feature_offset) ++#define FPGA_GB_DEV_feature_len(x, dev_index) \ ++ ((x)->fpga_gb_device[dev_index].feature_len) ++#define FPGA_GB_DEV_scan(x, dev_index) ((x)->fpga_gb_device[dev_index].scan) ++#define FPGA_GB_DEV_valid(x, dev_index) ((x)->fpga_gb_device[dev_index].valid) ++ ++typedef enum { ++ UNCORE_TOPOLOGY_INFO_NODE_IMC = 0, ++ UNCORE_TOPOLOGY_INFO_NODE_QPILL = 1, ++ UNCORE_TOPOLOGY_INFO_NODE_HA = 2, ++ UNCORE_TOPOLOGY_INFO_NODE_R3 = 3, ++ UNCORE_TOPOLOGY_INFO_NODE_R2 = 4, ++ UNCORE_TOPOLOGY_INFO_NODE_IRP = 5, ++ UNCORE_TOPOLOGY_INFO_NODE_IMC_UCLK = 6, ++ UNCORE_TOPOLOGY_INFO_NODE_EDC_ECLK = 7, ++ UNCORE_TOPOLOGY_INFO_NODE_EDC_UCLK = 8, ++ UNCORE_TOPOLOGY_INFO_NODE_M2M = 9, ++ UNCORE_TOPOLOGY_INFO_NODE_HFI_RXE = 10, ++ UNCORE_TOPOLOGY_INFO_NODE_HFI_TXE = 11, ++ UNCORE_TOPOLOGY_INFO_NODE_FPGA_CACHE = 12, ++ UNCORE_TOPOLOGY_INFO_NODE_FPGA_FAB = 13, ++ UNCORE_TOPOLOGY_INFO_NODE_FPGA_THERMAL = 14, ++ UNCORE_TOPOLOGY_INFO_NODE_FPGA_POWER = 15, ++} UNCORE_TOPOLOGY_INFO_NODE_INDEX_TYPE; ++ ++typedef struct SIDEBAND_INFO_NODE_S SIDEBAND_INFO_NODE; ++typedef SIDEBAND_INFO_NODE * SIDEBAND_INFO; ++ ++struct SIDEBAND_INFO_NODE_S { ++ U32 tid; ++ U32 pid; ++ U64 tsc; ++}; ++ ++#define SIDEBAND_INFO_pid(x) ((x)->pid) ++#define SIDEBAND_INFO_tid(x) ((x)->tid) ++#define SIDEBAND_INFO_tsc(x) ((x)->tsc) ++ ++typedef struct SAMPLE_DROP_NODE_S SAMPLE_DROP_NODE; ++typedef SAMPLE_DROP_NODE * SAMPLE_DROP; ++ ++struct SAMPLE_DROP_NODE_S { ++ U32 os_id; ++ U32 cpu_id; ++ U32 sampled; ++ U32 dropped; ++}; ++ ++#define SAMPLE_DROP_os_id(x) ((x)->os_id) ++#define SAMPLE_DROP_cpu_id(x) ((x)->cpu_id) ++#define SAMPLE_DROP_sampled(x) ((x)->sampled) ++#define SAMPLE_DROP_dropped(x) ((x)->dropped) ++ ++#define MAX_SAMPLE_DROP_NODES 20 ++ ++typedef struct SAMPLE_DROP_INFO_NODE_S SAMPLE_DROP_INFO_NODE; ++typedef SAMPLE_DROP_INFO_NODE * SAMPLE_DROP_INFO; ++ ++struct SAMPLE_DROP_INFO_NODE_S { ++ U32 size; ++ SAMPLE_DROP_NODE drop_info[MAX_SAMPLE_DROP_NODES]; ++}; ++ ++#define SAMPLE_DROP_INFO_size(x) ((x)->size) ++#define SAMPLE_DROP_INFO_drop_info(x, index) ((x)->drop_info[index]) ++ ++#define IS_PEBS_SAMPLE_RECORD(sample_record) \ ++ ((SAMPLE_RECORD_pid_rec_index(sample_record) == (U32)-1) && \ ++ (SAMPLE_RECORD_tid(sample_record) == (U32)-1)) ++ ++/* ++ * VMM vendor information ++ */ ++#define KVM_SIGNATURE "KVMKVMKVM\0\0\0" ++#define XEN_SIGNATURE "XenVMMXenVMM" ++#define VMWARE_SIGNATURE "VMwareVMware" ++#define HYPERV_SIGNATURE "Microsoft Hv" ++ ++#define DRV_VMM_UNKNOWN 0 ++#define DRV_VMM_MOBILEVISOR 1 ++#define DRV_VMM_KVM 2 ++#define DRV_VMM_XEN 3 ++#define DRV_VMM_HYPERV 4 ++#define DRV_VMM_VMWARE 5 ++#define DRV_VMM_ACRN 6 ++ ++/* ++ * @macro DRV_SETUP_INFO_NODE_S ++ * @brief ++ * This structure supports driver information such as NMI profiling mode. ++ */ ++ ++typedef struct DRV_SETUP_INFO_NODE_S DRV_SETUP_INFO_NODE; ++typedef DRV_SETUP_INFO_NODE * DRV_SETUP_INFO; ++ ++struct DRV_SETUP_INFO_NODE_S { ++ union { ++ U64 modes; ++ struct { ++ U64 nmi_mode : 1; ++ U64 vmm_mode : 1; ++ U64 vmm_vendor : 8; ++ U64 vmm_guest_vm : 1; ++ U64 pebs_accessible : 1; ++ U64 cpu_hotplug_mode : 1; ++ U64 matrix_inaccessible : 1; ++ U64 page_table_isolation : 2; ++ U64 pebs_ignored_by_pti : 1; ++ U64 reserved1 : 47; ++ } s1; ++ } u1; ++ U64 reserved2; ++ U64 reserved3; ++ U64 reserved4; ++}; ++ ++#define DRV_SETUP_INFO_nmi_mode(info) ((info)->u1.s1.nmi_mode) ++#define DRV_SETUP_INFO_vmm_mode(info) ((info)->u1.s1.vmm_mode) ++#define DRV_SETUP_INFO_vmm_vendor(info) ((info)->u1.s1.vmm_vendor) ++#define DRV_SETUP_INFO_vmm_guest_vm(info) ((info)->u1.s1.vmm_guest_vm) ++#define DRV_SETUP_INFO_pebs_accessible(info) ((info)->u1.s1.pebs_accessible) ++#define DRV_SETUP_INFO_cpu_hotplug_mode(info) ((info)->u1.s1.cpu_hotplug_mode) ++#define DRV_SETUP_INFO_matrix_inaccessible(info) \ ++ ((info)->u1.s1.matrix_inaccessible) ++#define DRV_SETUP_INFO_page_table_isolation(info) \ ++ ((info)->u1.s1.page_table_isolation) ++#define DRV_SETUP_INFO_pebs_ignored_by_pti(info) \ ++ ((info)->u1.s1.pebs_ignored_by_pti) ++ ++#define DRV_SETUP_INFO_PTI_DISABLED 0 ++#define DRV_SETUP_INFO_PTI_KPTI 1 ++#define DRV_SETUP_INFO_PTI_KAISER 2 ++#define DRV_SETUP_INFO_PTI_VA_SHADOW 3 ++#define DRV_SETUP_INFO_PTI_UNKNOWN 4 ++ ++/* ++ Type: task_info_t ++ Description: ++ Represents the equivalent of a Linux Thread. ++ Fields: ++ o id: A unique identifier. May be `NULL_TASK_ID`. ++ o name: Human-readable name for this task ++ o executable_name: Literal path to the binary elf that this task's ++ entry point is executing from. ++ o address_space_id: The unique ID for the address space this task is ++ running in. ++ */ ++struct task_info_node_s { ++ U64 id; ++ char name[32]; ++ U64 address_space_id; ++}; ++ ++/* ++ Type: REMOTE_SWITCH ++ Description: ++ Collection switch set on target ++*/ ++typedef struct REMOTE_SWITCH_NODE_S REMOTE_SWITCH_NODE; ++typedef REMOTE_SWITCH_NODE * REMOTE_SWITCH; ++ ++struct REMOTE_SWITCH_NODE_S { ++ U32 auto_mode : 1; ++ U32 adv_hotspot : 1; ++ U32 lbr_callstack : 2; ++ U32 full_pebs : 1; ++ U32 uncore_supported : 1; ++ U32 agent_mode : 2; ++ U32 sched_switch_enabled : 1; ++ U32 data_transfer_mode : 1; ++ U32 reserved1 : 22; ++ U32 reserved2; ++}; ++ ++#define REMOTE_SWITCH_auto_mode(x) ((x).auto_mode) ++#define REMOTE_SWITCH_adv_hotspot(x) ((x).adv_hotspot) ++#define REMOTE_SWITCH_lbr_callstack(x) ((x).lbr_callstack) ++#define REMOTE_SWITCH_full_pebs(x) ((x).full_pebs) ++#define REMOTE_SWITCH_uncore_supported(x) ((x).uncore_supported) ++#define REMOTE_SWITCH_agent_mode(x) ((x).agent_mode) ++#define REMOTE_SWITCH_sched_switch_enabled(x) ((x).sched_switch_enabled) ++#define REMOTE_SWITCH_data_transfer_mode(x) ((x).data_transfer_mode) ++ ++/* ++ Type: REMOTE_OS_INFO ++ Description: ++ Remote target OS system information ++*/ ++#define OSINFOLEN 64 ++typedef struct REMOTE_OS_INFO_NODE_S REMOTE_OS_INFO_NODE; ++typedef REMOTE_OS_INFO_NODE * REMOTE_OS_INFO; ++ ++struct REMOTE_OS_INFO_NODE_S { ++ U32 os_family; ++ U32 reserved1; ++ S8 sysname[OSINFOLEN]; ++ S8 release[OSINFOLEN]; ++ S8 version[OSINFOLEN]; ++}; ++ ++#define REMOTE_OS_INFO_os_family(x) ((x).os_family) ++#define REMOTE_OS_INFO_sysname(x) ((x).sysname) ++#define REMOTE_OS_INFO_release(x) ((x).release) ++#define REMOTE_OS_INFO_version(x) ((x).version) ++ ++/* ++ Type: REMOTE_HARDWARE_INFO ++ Description: ++ Remote target hardware information ++*/ ++typedef struct REMOTE_HARDWARE_INFO_NODE_S REMOTE_HARDWARE_INFO_NODE; ++typedef REMOTE_HARDWARE_INFO_NODE * REMOTE_HARDWARE_INFO; ++ ++struct REMOTE_HARDWARE_INFO_NODE_S { ++ U32 num_cpus; ++ U32 family; ++ U32 model; ++ U32 stepping; ++ U64 tsc_freq; ++ U64 reserved2; ++ U64 reserved3; ++}; ++ ++#define REMOTE_HARDWARE_INFO_num_cpus(x) ((x).num_cpus) ++#define REMOTE_HARDWARE_INFO_family(x) ((x).family) ++#define REMOTE_HARDWARE_INFO_model(x) ((x).model) ++#define REMOTE_HARDWARE_INFO_stepping(x) ((x).stepping) ++#define REMOTE_HARDWARE_INFO_tsc_frequency(x) ((x).tsc_freq) ++ ++/* ++ Type: SEP_AGENT_MODE ++ Description: ++ SEP mode on target agent ++*/ ++typedef enum { ++ NATIVE_AGENT = 0, ++ HOST_VM_AGENT, // Service OS in ACRN ++ GUEST_VM_AGENT // User OS in ACRN ++} SEP_AGENT_MODE; ++ ++/* ++ Type: DATA_TRANSFER_MODE ++ Description: ++ Data transfer mode from target agent to remote host ++*/ ++typedef enum { ++ IMMEDIATE_TRANSFER = 0, ++ DELAYED_TRANSFER // Send after collection is done ++} DATA_TRANSFER_MODE; ++ ++#define MAX_NUM_OS_ALLOWED 6 ++#define TARGET_IP_NAMELEN 64 ++ ++typedef struct TARGET_INFO_NODE_S TARGET_INFO_NODE; ++typedef TARGET_INFO_NODE * TARGET_INFO; ++ ++struct TARGET_INFO_NODE_S { ++ U32 num_of_agents; ++ U32 reserved; ++ U32 os_id[MAX_NUM_OS_ALLOWED]; ++ S8 ip_address[MAX_NUM_OS_ALLOWED][TARGET_IP_NAMELEN]; ++ REMOTE_OS_INFO_NODE os_info[MAX_NUM_OS_ALLOWED]; ++ REMOTE_HARDWARE_INFO_NODE hardware_info[MAX_NUM_OS_ALLOWED]; ++ REMOTE_SWITCH_NODE remote_switch[MAX_NUM_OS_ALLOWED]; ++}; ++ ++#define TARGET_INFO_num_of_agents(x) ((x)->num_of_agents) ++#define TARGET_INFO_os_id(x, i) ((x)->os_id[i]) ++#define TARGET_INFO_os_info(x, i) ((x)->os_info[i]) ++#define TARGET_INFO_ip_address(x, i) ((x)->ip_address[i]) ++#define TARGET_INFO_hardware_info(x, i) ((x)->hardware_info[i]) ++#define TARGET_INFO_remote_switch(x, i) ((x)->remote_switch[i]) ++ ++typedef struct CPU_MAP_TRACE_NODE_S CPU_MAP_TRACE_NODE; ++typedef CPU_MAP_TRACE_NODE * CPU_MAP_TRACE; ++ ++struct CPU_MAP_TRACE_NODE_S { ++ U64 tsc; ++ U32 os_id; ++ U32 vcpu_id; ++ U32 pcpu_id; ++ U8 is_static : 1; ++ U8 initial : 1; ++ U8 reserved1 : 6; ++ U8 reserved2; ++ U16 reserved3; ++ U64 reserved4; ++}; ++ ++#define CPU_MAP_TRACE_tsc(x) ((x)->tsc) ++#define CPU_MAP_TRACE_os_id(x) ((x)->os_id) ++#define CPU_MAP_TRACE_vcpu_id(x) ((x)->vcpu_id) ++#define CPU_MAP_TRACE_pcpu_id(x) ((x)->pcpu_id) ++#define CPU_MAP_TRACE_is_static(x) ((x)->is_static) ++#define CPU_MAP_TRACE_initial(x) ((x)->initial) ++ ++typedef struct VM_SWITCH_TRACE_NODE_S VM_SWITCH_TRACE_NODE; ++typedef VM_SWITCH_TRACE_NODE * VM_SWITCH_TRACE; ++ ++struct VM_SWITCH_TRACE_NODE_S { ++ U64 tsc; ++ U32 from_os_id; ++ U32 to_os_id; ++ U64 reason; ++ U64 reserved1; ++ U64 reserved2; ++}; ++ ++#define VM_SWITCH_TRACE_tsc(x) ((x)->tsc) ++#define VM_SWITCH_TRACE_from_os_id(x) ((x)->from_os_id) ++#define VM_SWITCH_TRACE_to_os_id(x) ((x)->to_os_id) ++#define VM_SWITCH_TRACE_reason(x) ((x)->reason) ++ ++typedef struct EMON_BUFFER_DRIVER_HELPER_NODE_S EMON_BUFFER_DRIVER_HELPER_NODE; ++typedef EMON_BUFFER_DRIVER_HELPER_NODE * EMON_BUFFER_DRIVER_HELPER; ++ ++struct EMON_BUFFER_DRIVER_HELPER_NODE_S { ++ U32 num_entries_per_package; ++ U32 num_cpu; ++ U32 power_num_package_events; ++ U32 power_num_module_events; ++ U32 power_num_thread_events; ++ U32 power_device_offset_in_package; ++ U32 core_num_events; ++ U32 core_index_to_thread_offset_map[]; ++}; ++ ++#define EMON_BUFFER_DRIVER_HELPER_num_entries_per_package(x) \ ++ ((x)->num_entries_per_package) ++#define EMON_BUFFER_DRIVER_HELPER_num_cpu(x) ((x)->num_cpu) ++#define EMON_BUFFER_DRIVER_HELPER_power_num_package_events(x) \ ++ ((x)->power_num_package_events) ++#define EMON_BUFFER_DRIVER_HELPER_power_num_module_events(x) \ ++ ((x)->power_num_module_events) ++#define EMON_BUFFER_DRIVER_HELPER_power_num_thread_events(x) \ ++ ((x)->power_num_thread_events) ++#define EMON_BUFFER_DRIVER_HELPER_power_device_offset_in_package(x) \ ++ ((x)->power_device_offset_in_package) ++#define EMON_BUFFER_DRIVER_HELPER_core_num_events(x) ((x)->core_num_events) ++#define EMON_BUFFER_DRIVER_HELPER_core_index_to_thread_offset_map(x) \ ++ ((x)->core_index_to_thread_offset_map) ++ ++// EMON counts buffer follow this hardware topology: ++// package -> device -> unit/thread -> event ++ ++// Calculate the CORE thread offset ++// Using for initialization: calculate the cpu_index_to_thread_offset_map ++// in emon_Create_Emon_Buffer_Descriptor() ++// EMON_BUFFER_CORE_THREAD_OFFSET = ++// package_id * num_entries_per_package + //package offset ++// device_offset_in_package + //device base offset ++// (core_id * threads_per_core + thread_id) * num_core_events + //thread offset ++#define EMON_BUFFER_CORE_THREAD_OFFSET(package_id, num_entries_per_package, \ ++ device_offset_in_package, core_id, \ ++ threads_per_core, thread_id, \ ++ num_core_events) \ ++ (package_id * num_entries_per_package + device_offset_in_package + \ ++ (core_id * threads_per_core + thread_id) * num_core_events) ++ ++// Take cpu_index and cpu_index_to_thread_offset_map to get thread_offset, ++// and calculate the CORE event offset ++// Using for kernel and emon_output.c printing function ++// EMON_BUFFER_CORE_EVENT_OFFSET = ++// cpu_index_to_thread_offset + //thread offset ++// core_event_id //event_offset ++#define EMON_BUFFER_CORE_EVENT_OFFSET(cpu_index_to_thread_offset, \ ++ core_event_id) \ ++ (cpu_index_to_thread_offset + core_event_id) ++ ++// Calculate the device level to UNCORE event offset ++// Using for kernel and emon_output.c printing function ++// EMON_BUFFER_UNCORE_PACKAGE_EVENT_OFFSET_IN_PACKAGE = ++// device_offset_in_package + //device_offset_in_package ++// device_unit_id * num_unit_events + //unit_offset ++// device_event_id //event_offset ++#define EMON_BUFFER_UNCORE_PACKAGE_EVENT_OFFSET_IN_PACKAGE( \ ++ device_offset_in_package, device_unit_id, num_unit_events, \ ++ device_event_id) \ ++ (device_offset_in_package + device_unit_id * num_unit_events + \ ++ device_event_id) ++ ++// Take 'device level to UNCORE event offset' and package_id, ++// calculate the UNCORE package level event offset ++// Using for emon_output.c printing function ++// EMON_BUFFER_UNCORE_PACKAGE_EVENT_OFFSET = ++// package_id * num_entries_per_package + //package_offset ++// uncore_offset_in_package; //offset_in_package ++#define EMON_BUFFER_UNCORE_PACKAGE_EVENT_OFFSET( \ ++ package_id, num_entries_per_package, uncore_offset_in_package) \ ++ (package_id * num_entries_per_package + uncore_offset_in_package) ++ ++// Take 'device level to UNCORE event offset', ++// calculate the UNCORE system level event offset ++// Using for emon_output.c printing function ++// EMON_BUFFER_UNCORE_SYSTEM_EVENT_OFFSET = ++// device_offset_in_system + //device_offset_in_system ++// device_unit_id * num_system_events + //device_unit_offset ++// device_event_id //event_offset ++#define EMON_BUFFER_UNCORE_SYSTEM_EVENT_OFFSET(device_offset_in_system, \ ++ device_unit_id, \ ++ num_system_events, \ ++ device_event_id) \ ++ (device_offset_in_system + device_unit_id * num_system_events + \ ++ device_event_id) ++ ++// Calculate the package level power event offset ++// Using for kernel and emon_output.c printing function ++// EMON_BUFFER_UNCORE_PACKAGE_POWER_EVENT_OFFSET = ++// package_id * num_entries_per_package + //package offset ++// device_offset_in_package + //device offset ++// package_event_offset //power package event offset ++#define EMON_BUFFER_UNCORE_PACKAGE_POWER_EVENT_OFFSET( \ ++ package_id, num_entries_per_package, device_offset_in_package, \ ++ device_event_offset) \ ++ (package_id * num_entries_per_package + device_offset_in_package + \ ++ device_event_offset) ++ ++// Calculate the module level power event offset ++// Using for kernel and emon_output.c printing function ++// EMON_BUFFER_UNCORE_MODULE_POWER_EVENT_OFFSET = ++// package_id * num_entries_per_package + //package offset ++// device_offset_in_package + //device offset ++// num_package_events + //package event offset ++// module_id * num_module_events + //module offset ++// module_event_offset //power module event offset ++#define EMON_BUFFER_UNCORE_MODULE_POWER_EVENT_OFFSET( \ ++ package_id, num_entries_per_package, device_offset_in_package, \ ++ num_package_events, module_id, num_module_events, device_event_offset) \ ++ (package_id * num_entries_per_package + device_offset_in_package + \ ++ num_package_events + module_id * num_module_events + \ ++ device_event_offset) ++ ++// Calculate the package level power event offset ++// Using for kernel and emon_output.c printing function ++// EMON_BUFFER_UNCORE_THREAD_POWER_EVENT_OFFSET = ++// package_id * num_entries_per_package + //package offset ++// device_offset_in_package + //device offset ++// num_package_events + //package offset ++// num_modules_per_package * num_module_events + //module offset ++// (core_id*threads_per_core+thread_id)*num_thread_events + //thread offset ++// thread_event_offset //power thread event offset ++#define EMON_BUFFER_UNCORE_THREAD_POWER_EVENT_OFFSET( \ ++ package_id, num_entries_per_package, device_offset_in_package, \ ++ num_package_events, num_modules_per_package, num_module_events, \ ++ core_id, threads_per_core, thread_id, num_unit_events, \ ++ device_event_offset) \ ++ (package_id * num_entries_per_package + device_offset_in_package + \ ++ num_package_events + \ ++ num_modules_per_package * num_module_events + \ ++ (core_id * threads_per_core + thread_id) * num_unit_events + \ ++ device_event_offset) ++ ++/* ++ ************************************ ++ * DRIVER LOG BUFFER DECLARATIONS * ++ ************************************ ++ */ ++ ++#define DRV_MAX_NB_LOG_CATEGORIES 256 // Must be a multiple of 8 ++#define DRV_NB_LOG_CATEGORIES 14 ++#define DRV_LOG_CATEGORY_LOAD 0 ++#define DRV_LOG_CATEGORY_INIT 1 ++#define DRV_LOG_CATEGORY_DETECTION 2 ++#define DRV_LOG_CATEGORY_ERROR 3 ++#define DRV_LOG_CATEGORY_STATE_CHANGE 4 ++#define DRV_LOG_CATEGORY_MARK 5 ++#define DRV_LOG_CATEGORY_DEBUG 6 ++#define DRV_LOG_CATEGORY_FLOW 7 ++#define DRV_LOG_CATEGORY_ALLOC 8 ++#define DRV_LOG_CATEGORY_INTERRUPT 9 ++#define DRV_LOG_CATEGORY_TRACE 10 ++#define DRV_LOG_CATEGORY_REGISTER 11 ++#define DRV_LOG_CATEGORY_NOTIFICATION 12 ++#define DRV_LOG_CATEGORY_WARNING 13 ++ ++#define LOG_VERBOSITY_UNSET 0xFF ++#define LOG_VERBOSITY_DEFAULT 0xFE ++#define LOG_VERBOSITY_NONE 0 ++ ++#define LOG_CHANNEL_MEMLOG 0x1 ++#define LOG_CHANNEL_AUXMEMLOG 0x2 ++#define LOG_CHANNEL_PRINTK 0x4 ++#define LOG_CHANNEL_TRACEK 0x8 ++#define LOG_CHANNEL_MOSTWHERE \ ++ (LOG_CHANNEL_MEMLOG | LOG_CHANNEL_AUXMEMLOG | LOG_CHANNEL_PRINTK) ++#define LOG_CHANNEL_EVERYWHERE \ ++ (LOG_CHANNEL_MEMLOG | LOG_CHANNEL_AUXMEMLOG | LOG_CHANNEL_PRINTK | \ ++ LOG_CHANNEL_TRACEK) ++#define LOG_CHANNEL_MASK LOG_CATEGORY_VERBOSITY_EVERYWHERE ++ ++#define LOG_CONTEXT_REGULAR 0x10 ++#define LOG_CONTEXT_INTERRUPT 0x20 ++#define LOG_CONTEXT_NOTIFICATION 0x40 ++#define LOG_CONTEXT_ALL \ ++ (LOG_CONTEXT_REGULAR | LOG_CONTEXT_INTERRUPT | LOG_CONTEXT_NOTIFICATION) ++#define LOG_CONTEXT_MASK LOG_CONTEXT_ALL ++#define LOG_CONTEXT_SHIFT 4 ++ ++#define DRV_LOG_NOTHING 0 ++#define DRV_LOG_FLOW_IN 1 ++#define DRV_LOG_FLOW_OUT 2 ++ ++/* ++ * @macro DRV_LOG_ENTRY_NODE_S ++ * @brief ++ * This structure is used to store a log message from the driver. ++ */ ++ ++#define DRV_LOG_MESSAGE_LENGTH 64 ++#define DRV_LOG_FUNCTION_NAME_LENGTH 32 ++ ++typedef struct DRV_LOG_ENTRY_NODE_S DRV_LOG_ENTRY_NODE; ++typedef DRV_LOG_ENTRY_NODE * DRV_LOG_ENTRY; ++struct DRV_LOG_ENTRY_NODE_S { ++ char function_name[DRV_LOG_FUNCTION_NAME_LENGTH]; ++ char message[DRV_LOG_MESSAGE_LENGTH]; ++ ++ U16 temporal_tag; ++ U16 integrity_tag; ++ ++ U8 category; ++ U8 secondary_info; // Secondary attribute: ++ // former driver state for STATE category ++ // 'ENTER' or 'LEAVE' for FLOW and TRACE categories ++ U16 processor_id; ++ // NB: not guaranteed to be accurate (due to preemption/core migration) ++ ++ U64 tsc; ++ ++ U16 nb_active_interrupts; // never 100% accurate, merely indicative ++ U8 active_drv_operation; // only 100% accurate IOCTL-called functions ++ U8 driver_state; ++ ++ U16 line_number; // as per the __LINE__ macro ++ ++ U16 nb_active_notifications; ++ ++ U64 reserved; // need padding to reach 128 bytes ++}; // this structure should be exactly 128-byte long ++ ++#define DRV_LOG_ENTRY_temporal_tag(ent) ((ent)->temporal_tag) ++#define DRV_LOG_ENTRY_integrity_tag(ent) ((ent)->integrity_tag) ++#define DRV_LOG_ENTRY_category(ent) ((ent)->category) ++#define DRV_LOG_ENTRY_secondary_info(ent) ((ent)->secondary_info) ++#define DRV_LOG_ENTRY_processor_id(ent) ((ent)->processor_id) ++#define DRV_LOG_ENTRY_tsc(ent) ((ent)->tsc) ++#define DRV_LOG_ENTRY_driver_state(ent) ((ent)->driver_state) ++#define DRV_LOG_ENTRY_active_drv_operation(ent) ((ent)->active_drv_operation) ++#define DRV_LOG_ENTRY_nb_active_interrupts(ent) ((ent)->nb_active_interrupts) ++#define DRV_LOG_ENTRY_nb_active_notifications(ent) \ ++ ((ent)->nb_active_notifications) ++#define DRV_LOG_ENTRY_line_number(ent) ((ent)->line_number) ++#define DRV_LOG_ENTRY_message(ent) ((ent)->message) ++#define DRV_LOG_ENTRY_function_name(ent) ((ent)->function_name) ++ ++/* ++ * @macro DRV_LOG_BUFFER_NODE_S ++ * @brief Circular buffer structure storing the latest ++ * DRV_LOG_MAX_NB_ENTRIES driver messages ++ */ ++ ++#define DRV_LOG_SIGNATURE_SIZE 8 // Must be a multiple of 8 ++#define DRV_LOG_SIGNATURE_0 'S' ++#define DRV_LOG_SIGNATURE_1 'e' ++#define DRV_LOG_SIGNATURE_2 'P' ++#define DRV_LOG_SIGNATURE_3 'd' ++#define DRV_LOG_SIGNATURE_4 'R' ++#define DRV_LOG_SIGNATURE_5 'v' ++#define DRV_LOG_SIGNATURE_6 '5' ++#define DRV_LOG_SIGNATURE_7 '\0' ++// The signature is "SePdRv4"; not declared as string on purpose to avoid ++// false positives when trying to identify the log buffer in a crash dump ++ ++#define DRV_LOG_VERSION 1 ++#define DRV_LOG_FILLER_BYTE 1 ++ ++#define DRV_LOG_DRIVER_VERSION_SIZE 64 // Must be a multiple of 8 ++#define DRV_LOG_MAX_NB_PRI_ENTRIES (8192 * 2) ++// 2MB buffer [*HAS TO BE* a power of 2!] [8192 entries = 1 MB] ++#define DRV_LOG_MAX_NB_AUX_ENTRIES (8192) ++// 1MB buffer [*HAS TO BE* a power of 2!] ++#define DRV_LOG_MAX_NB_ENTRIES \ ++ (DRV_LOG_MAX_NB_PRI_ENTRIES + DRV_LOG_MAX_NB_AUX_ENTRIES) ++ ++typedef struct DRV_LOG_BUFFER_NODE_S DRV_LOG_BUFFER_NODE; ++typedef DRV_LOG_BUFFER_NODE * DRV_LOG_BUFFER; ++struct DRV_LOG_BUFFER_NODE_S { ++ char header_signature[DRV_LOG_SIGNATURE_SIZE]; ++ // some signature to be able to locate the log even without -g; ASCII ++ // would help should we change the signature for each log's version ++ // instead of keeping it in a dedicated field? ++ ++ U32 log_size; // filled with sizeof(this structure) at init. ++ U32 max_nb_pri_entries; ++ // filled with the driver's "DRV_LOG_MAX_NB_PRIM_ENTRIES" at init. ++ ++ U32 max_nb_aux_entries; ++ // filled with the driver's "DRV_LOG_MAX_NB_AUX_ENTRIES" at init. ++ U32 reserved1; ++ ++ U64 init_time; // primary log disambiguator ++ ++ U32 disambiguator; ++ // used to differentiate the driver's version of the log when a ++ // full memory dump can contain some from userland ++ U32 log_version; // 0 at first, increase when format changes? ++ ++ U32 pri_entry_index; ++ // should be incremented *atomically* as a means to (re)allocate ++ // the next primary log entry. ++ U32 aux_entry_index; ++ // should be incremented *atomically* as a means to (re)allocate ++ // the next auxiliary log entry. ++ ++ char driver_version[DRV_LOG_DRIVER_VERSION_SIZE]; ++ ++ U8 driver_state; ++ U8 active_drv_operation; ++ U16 reserved2; ++ U32 nb_drv_operations; ++ ++ U32 nb_interrupts; ++ U16 nb_active_interrupts; ++ U16 nb_active_notifications; ++ ++ U32 nb_notifications; ++ U32 nb_driver_state_transitions; ++ ++ U8 contiguous_physical_memory; ++ U8 reserved3; ++ U16 reserved4; ++ U32 reserved5; ++ ++ U8 verbosities[DRV_MAX_NB_LOG_CATEGORIES]; ++ ++ DRV_LOG_ENTRY_NODE entries[DRV_LOG_MAX_NB_ENTRIES]; ++ ++ char footer_signature[DRV_LOG_SIGNATURE_SIZE]; ++}; ++ ++#define DRV_LOG_BUFFER_pri_entry_index(log) ((log)->pri_entry_index) ++#define DRV_LOG_BUFFER_aux_entry_index(log) ((log)->aux_entry_index) ++#define DRV_LOG_BUFFER_header_signature(log) ((log)->header_signature) ++#define DRV_LOG_BUFFER_footer_signature(log) ((log)->footer_signature) ++#define DRV_LOG_BUFFER_log_size(log) ((log)->log_size) ++#define DRV_LOG_BUFFER_driver_version(log) ((log)->driver_version) ++#define DRV_LOG_BUFFER_driver_state(log) ((log)->driver_state) ++#define DRV_LOG_BUFFER_active_drv_operation(log) ((log)->active_drv_operation) ++#define DRV_LOG_BUFFER_nb_interrupts(log) ((log)->nb_interrupts) ++#define DRV_LOG_BUFFER_nb_active_interrupts(log) ((log)->nb_active_interrupts) ++#define DRV_LOG_BUFFER_nb_notifications(log) ((log)->nb_notifications) ++#define DRV_LOG_BUFFER_nb_active_notifications(log) \ ++ ((log)->nb_active_notifications) ++#define DRV_LOG_BUFFER_nb_driver_state_transitions(log) \ ++ ((log)->nb_driver_state_transitions) ++#define DRV_LOG_BUFFER_nb_drv_operations(log) ((log)->nb_drv_operations) ++#define DRV_LOG_BUFFER_max_nb_pri_entries(log) ((log)->max_nb_pri_entries) ++#define DRV_LOG_BUFFER_max_nb_aux_entries(log) ((log)->max_nb_aux_entries) ++#define DRV_LOG_BUFFER_init_time(log) ((log)->init_time) ++#define DRV_LOG_BUFFER_disambiguator(log) ((log)->disambiguator) ++#define DRV_LOG_BUFFER_log_version(log) ((log)->log_version) ++#define DRV_LOG_BUFFER_entries(log) ((log)->entries) ++#define DRV_LOG_BUFFER_contiguous_physical_memory(log) \ ++ ((log)->contiguous_physical_memory) ++#define DRV_LOG_BUFFER_verbosities(log) ((log)->verbosities) ++ ++#define DRV_LOG_CONTROL_MAX_DATA_SIZE \ ++ DRV_MAX_NB_LOG_CATEGORIES // Must be a multiple of 8 ++ ++typedef struct DRV_LOG_CONTROL_NODE_S DRV_LOG_CONTROL_NODE; ++typedef DRV_LOG_CONTROL_NODE * DRV_LOG_CONTROL; ++ ++struct DRV_LOG_CONTROL_NODE_S { ++ U32 command; ++ U32 reserved1; ++ U8 data[DRV_LOG_CONTROL_MAX_DATA_SIZE]; ++ // only DRV_NB_LOG_CATEGORIES elements will be used, but let's plan for ++ // backwards compatibility if LOG_CATEGORY_UNSET, READ instead of WRITE ++ ++ U64 reserved2; ++ // may later want to add support for resizing the buffer, ++ // or only log 100 first interrupts, etc. ++ U64 reserved3; ++ U64 reserved4; ++ U64 reserved5; ++}; ++ ++#define DRV_LOG_CONTROL_command(x) ((x)->command) ++#define DRV_LOG_CONTROL_verbosities(x) ((x)->data) ++#define DRV_LOG_CONTROL_message(x) \ ++ ((x)->data) // Userland 'MARK' messages use the 'data' field too. ++#define DRV_LOG_CONTROL_log_size(x) (*((U32 *)((x)->data))) ++ ++#define DRV_LOG_CONTROL_COMMAND_NONE 0 ++#define DRV_LOG_CONTROL_COMMAND_ADJUST_VERBOSITY 1 ++#define DRV_LOG_CONTROL_COMMAND_MARK 2 ++#define DRV_LOG_CONTROL_COMMAND_QUERY_SIZE 3 ++#define DRV_LOG_CONTROL_COMMAND_BENCHMARK 4 ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif +diff --git a/drivers/platform/x86/sepdk/include/lwpmudrv_types.h b/drivers/platform/x86/sepdk/include/lwpmudrv_types.h +new file mode 100644 +index 000000000000..7fe842eee890 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/include/lwpmudrv_types.h +@@ -0,0 +1,159 @@ ++/*** ++ * ------------------------------------------------------------------------- ++ * INTEL CORPORATION PROPRIETARY INFORMATION ++ * This software is supplied under the terms of the accompanying license ++ * agreement or nondisclosure agreement with Intel Corporation and may not ++ * be copied or disclosed except in accordance with the terms of that ++ * agreement. ++ * Copyright(C) 2007-2018 Intel Corporation. All Rights Reserved. ++ * ------------------------------------------------------------------------- ++ ***/ ++ ++#ifndef _LWPMUDRV_TYPES_H_ ++#define _LWPMUDRV_TYPES_H_ ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++#if defined(BUILD_DRV_ESX) ++//SR: added size_t def ++typedef unsigned long size_t; ++typedef unsigned long ssize_t; ++#endif ++ ++typedef unsigned char U8; ++typedef char S8; ++typedef short S16; ++typedef unsigned short U16; ++typedef unsigned int U32; ++typedef int S32; ++#if defined(DRV_OS_WINDOWS) ++typedef unsigned __int64 U64; ++typedef __int64 S64; ++#elif defined(DRV_OS_LINUX) || defined(DRV_OS_SOLARIS) || \ ++ defined(DRV_OS_MAC) || defined(DRV_OS_ANDROID) || \ ++ defined(DRV_OS_FREEBSD) ++typedef unsigned long long U64; ++typedef long long S64; ++typedef unsigned long ULONG; ++typedef void VOID; ++typedef void *LPVOID; ++ ++#if defined(BUILD_DRV_ESX) ++//SR: added UWORD64 def ++typedef union _UWORD64 { ++ struct { ++ U32 low; ++ S32 hi; ++ } c; ++ S64 qword; ++} UWORD64, *PWORD64; ++#endif ++#else ++#error "Undefined OS" ++#endif ++ ++#if defined(DRV_IA32) ++typedef S32 SIOP; ++typedef U32 UIOP; ++#elif defined(DRV_EM64T) ++typedef S64 SIOP; ++typedef U64 UIOP; ++#else ++#error "Unexpected Architecture seen" ++#endif ++ ++typedef U32 DRV_BOOL; ++typedef void *PVOID; ++ ++#if !defined(__DEFINE_STCHAR__) ++#define __DEFINE_STCHAR__ ++#if defined(UNICODE) ++typedef wchar_t STCHAR; ++#define VTSA_T(x) L##x ++#else ++typedef char STCHAR; ++#define VTSA_T(x) x ++#endif ++#endif ++ ++#if defined(DRV_OS_WINDOWS) ++#include ++typedef wchar_t DRV_STCHAR; ++typedef wchar_t VTSA_CHAR; ++#else ++typedef char DRV_STCHAR; ++#endif ++ ++// ++// Handy Defines ++// ++typedef U32 DRV_STATUS; ++ ++#define MAX_STRING_LENGTH 1024 ++#define MAXNAMELEN 256 ++ ++#if defined(DRV_OS_WINDOWS) ++#define UNLINK _unlink ++#define RENAME rename ++#define WCSDUP _wcsdup ++#endif ++#if defined(DRV_OS_LINUX) || defined(DRV_OS_SOLARIS) || defined(DRV_OS_MAC) || \ ++ defined(DRV_OS_ANDROID) || defined(DRV_OS_FREEBSD) ++#define UNLINK unlink ++#define RENAME rename ++#endif ++ ++#if defined(DRV_OS_SOLARIS) && !defined(_KERNEL) ++//wcsdup is missing on Solaris ++#include ++#include ++ ++static inline wchar_t *solaris_wcsdup(const wchar_t *wc) ++{ ++ wchar_t *tmp = (wchar_t *)malloc((wcslen(wc) + 1) * sizeof(wchar_t)); ++ ++ wcscpy(tmp, wc); ++ return tmp; ++} ++#define WCSDUP solaris_wcsdup ++#endif ++ ++#if defined(DRV_OS_LINUX) || defined(DRV_OS_FREEBSD) || defined(DRV_OS_MAC) ++#define WCSDUP wcsdup ++#endif ++ ++#if !defined(_WCHAR_T_DEFINED) ++#if defined(DRV_OS_LINUX) || defined(DRV_OS_ANDROID) || defined(DRV_OS_SOLARIS) ++#if !defined(_GNU_SOURCE) ++#define _GNU_SOURCE ++#endif ++#endif ++#endif ++ ++#if (defined(DRV_OS_LINUX) || defined(DRV_OS_ANDROID)) && !defined(__KERNEL__) ++#include ++typedef wchar_t VTSA_CHAR; ++#endif ++ ++#if (defined(DRV_OS_MAC) || defined(DRV_OS_FREEBSD) || \ ++ defined(DRV_OS_SOLARIS)) && \ ++ !defined(_KERNEL) ++#include ++typedef wchar_t VTSA_CHAR; ++#endif ++ ++#define TRUE 1 ++#define FALSE 0 ++ ++#define ALIGN_4(x) (((x) + 3) & ~3) ++#define ALIGN_8(x) (((x) + 7) & ~7) ++#define ALIGN_16(x) (((x) + 15) & ~15) ++#define ALIGN_32(x) (((x) + 31) & ~31) ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif +diff --git a/drivers/platform/x86/sepdk/include/lwpmudrv_version.h b/drivers/platform/x86/sepdk/include/lwpmudrv_version.h +new file mode 100644 +index 000000000000..a2cbedd44573 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/include/lwpmudrv_version.h +@@ -0,0 +1,111 @@ ++/**** ++ * ------------------------------------------------------------------------- ++ * INTEL CORPORATION PROPRIETARY INFORMATION ++ * This software is supplied under the terms of the accompanying license ++ * agreement or nondisclosure agreement with Intel Corporation and may not ++ * be copied or disclosed except in accordance with the terms of that ++ * agreement. ++ * Copyright(C) 2010-2018 Intel Corporation. All Rights Reserved. ++ * ------------------------------------------------------------------------- ++****/ ++/* ++ * File : lwpmudrv_version.h ++ */ ++ ++#ifndef _LWPMUDRV_VERSION_H_ ++#define _LWPMUDRV_VERSION_H_ ++ ++#define _STRINGIFY(x) #x ++#define STRINGIFY(x) _STRINGIFY(x) ++#define _STRINGIFY_W(x) L#x ++#define STRINGIFY_W(x) _STRINGIFY_W(x) ++ ++#define SEP_MAJOR_VERSION 5 ++#define SEP_MINOR_VERSION 0 ++#define SEP_UPDATE_VERSION 0 ++#define SEP_API_VERSION SEP_UPDATE_VERSION ++#if SEP_UPDATE_VERSION > 0 ++#define SEP_UPDATE_STRING " Update " STRINGIFY(SEP_UPDATE_VERSION) ++#else ++#define SEP_UPDATE_STRING "" ++#endif ++#define SEP_RELEASE_STRING "" ++ ++#define EMON_MAJOR_VERSION SEP_MAJOR_VERSION ++#define EMON_MINOR_VERSION SEP_MINOR_VERSION ++#define EMON_PRODUCT_RELEASE_STRING SEP_UPDATE_VERSION ++ ++#if defined(SEP_ENABLE_PRIVATE_CPUS) ++#define PRODUCT_TYPE "private" ++#define SEP_NAME "sepint" ++#define SEP_NAME_W L"sepint" ++#else ++#define PRODUCT_TYPE "public" ++#define SEP_NAME "sep" ++#define SEP_NAME_W L"sep" ++#endif ++ ++#if !defined(PRODUCT_BUILDER) ++#define PRODUCT_BUILDER unknown ++#endif ++ ++#define TB_FILE_EXT ".tb7" ++#define TB_FILE_EXT_W L".tb7" ++ ++#define SEP_PRODUCT_NAME "Sampling Enabling Product" ++#define EMON_PRODUCT_NAME "EMON" ++ ++#define PRODUCT_VERSION_DATE __DATE__ " at " __TIME__ ++ ++#define SEP_PRODUCT_COPYRIGHT \ ++ "Copyright(C) 2007-2018 Intel Corporation. All rights reserved." ++#define EMON_PRODUCT_COPYRIGHT \ ++ "Copyright(C) 1993-2018 Intel Corporation. All rights reserved." ++ ++#define PRODUCT_DISCLAIMER \ ++ "Warning: This computer program is protected under U.S. and \n" \ ++ "international copyright laws, and may only be used or copied in \n" \ ++ "accordance with the terms of the license agreement. Except as \n" \ ++ "permitted by such license, no part of this computer program may \n" \ ++ "be reproduced, stored in a retrieval system, or transmitted \n" \ ++ "in any form or by any means without the express written consent \n" \ ++ "of Intel Corporation." ++ ++#define PRODUCT_VERSION \ ++ STRINGIFY(SEP_MAJOR_VERSION) "." STRINGIFY(SEP_MINOR_VERSION) ++ ++#define SEP_MSG_PREFIX \ ++ SEP_NAME "" STRINGIFY(SEP_MAJOR_VERSION) "_" STRINGIFY( \ ++ SEP_MINOR_VERSION) ":" ++#define SEP_VERSION_STR \ ++ STRINGIFY(SEP_MAJOR_VERSION) \ ++ "." STRINGIFY(SEP_MINOR_VERSION) "." STRINGIFY(SEP_API_VERSION) ++ ++#if defined(DRV_OS_WINDOWS) ++ ++#define SEP_DRIVER_NAME SEP_NAME "drv" STRINGIFY(SEP_MAJOR_VERSION) ++#define SEP_DRIVER_NAME_W SEP_NAME_W L"drv" STRINGIFY_W(SEP_MAJOR_VERSION) ++#define SEP_DEVICE_NAME SEP_DRIVER_NAME ++ ++#endif ++ ++#if defined(DRV_OS_LINUX) || defined(DRV_OS_SOLARIS) || \ ++ defined(DRV_OS_ANDROID) || defined(DRV_OS_FREEBSD) ++ ++#define SEP_DRIVER_NAME SEP_NAME "" STRINGIFY(SEP_MAJOR_VERSION) ++#define SEP_SAMPLES_NAME SEP_DRIVER_NAME "_s" ++#define SEP_UNCORE_NAME SEP_DRIVER_NAME "_u" ++#define SEP_SIDEBAND_NAME SEP_DRIVER_NAME "_b" ++#define SEP_DEVICE_NAME "/dev/" SEP_DRIVER_NAME ++ ++#endif ++ ++#if defined(DRV_OS_MAC) ++ ++#define SEP_DRIVER_NAME SEP_NAME "" STRINGIFY(SEP_MAJOR_VERSION) ++#define SEP_SAMPLES_NAME SEP_DRIVER_NAME "_s" ++#define SEP_DEVICE_NAME SEP_DRIVER_NAME ++ ++#endif ++ ++#endif +diff --git a/drivers/platform/x86/sepdk/include/pax_shared.h b/drivers/platform/x86/sepdk/include/pax_shared.h +new file mode 100644 +index 000000000000..a706232c9b4a +--- /dev/null ++++ b/drivers/platform/x86/sepdk/include/pax_shared.h +@@ -0,0 +1,180 @@ ++/**** ++ * ------------------------------------------------------------------------- ++ * INTEL CORPORATION PROPRIETARY INFORMATION ++ * This software is supplied under the terms of the accompanying license ++ * agreement or nondisclosure agreement with Intel Corporation and may not ++ * be copied or disclosed except in accordance with the terms of that ++ * agreement. ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ------------------------------------------------------------------------- ++****/ ++ ++/* ++ * ++ * Description: types and definitions shared between PAX kernel ++ * and user modes ++ * ++ * NOTE: alignment on page boundaries is required on 64-bit platforms! ++ * ++*/ ++ ++#ifndef _PAX_SHARED_H_ ++#define _PAX_SHARED_H_ ++ ++#include "lwpmudrv_defines.h" ++#include "lwpmudrv_types.h" ++ ++#define _STRINGIFY(x) #x ++#define STRINGIFY(x) _STRINGIFY(x) ++ ++// PAX versioning ++ ++#define PAX_MAJOR_VERSION 1 // major version ++// (increment only when PAX driver is incompatible with previous versions) ++#define PAX_MINOR_VERSION 0 // minor version ++// (increment only when new APIs added, but driver remains backwards compatible) ++#define PAX_BUGFIX_VERSION 2 // bugfix version ++// (increment only for bug fix that don't affect usermode/driver compatibility) ++ ++#define PAX_VERSION_STR \ ++ STRINGIFY(PAX_MAJOR_VERSION) \ ++ "." STRINGIFY(PAX_MINOR_VERSION) "." STRINGIFY(PAX_BUGFIX_VERSION) ++ ++// PAX device name ++ ++#if defined(DRV_OS_WINDOWS) ++#define PAX_NAME "sepdal" ++#define PAX_NAME_W L"sepdal" ++#else ++#define PAX_NAME "pax" ++#endif ++ ++// PAX PMU reservation states ++ ++#define PAX_PMU_RESERVED 1 ++#define PAX_PMU_UNRESERVED 0 ++ ++#define PAX_GUID_UNINITIALIZED 0 ++ ++// PAX_IOCTL definitions ++ ++#if defined(DRV_OS_WINDOWS) ++ ++// ++// The name of the device as seen by the driver ++// ++#define LSTRING(x) L#x ++#define PAX_OBJECT_DEVICE_NAME L"\\Device\\sepdal" // LSTRING(PAX_NAME) ++#define PAX_OBJECT_LINK_NAME L"\\DosDevices\\sepdal" // LSTRING(PAX_NAME) ++ ++#define PAX_DEVICE_NAME PAX_NAME // for CreateFile called by app ++ ++#define PAX_IOCTL_DEVICE_TYPE 0xA000 // values 0-32768 reserved for Microsoft ++#define PAX_IOCTL_FUNCTION 0xA00 // values 0-2047 reserved for Microsoft ++ ++// ++// Basic CTL CODE macro to reduce typographical errors ++// ++#define PAX_CTL_READ_CODE(x) \ ++ CTL_CODE(PAX_IOCTL_DEVICE_TYPE, PAX_IOCTL_FUNCTION + (x), \ ++ METHOD_BUFFERED, FILE_READ_ACCESS) ++ ++#define PAX_IOCTL_INFO PAX_CTL_READ_CODE(1) ++#define PAX_IOCTL_STATUS PAX_CTL_READ_CODE(2) ++#define PAX_IOCTL_RESERVE_ALL PAX_CTL_READ_CODE(3) ++#define PAX_IOCTL_UNRESERVE PAX_CTL_READ_CODE(4) ++ ++#elif defined(DRV_OS_LINUX) || defined(DRV_OS_ANDROID) || \ ++ defined(DRV_OS_SOLARIS) ++ ++#define PAX_DEVICE_NAME "/dev/" PAX_NAME ++ ++#define PAX_IOC_MAGIC 100 ++#define PAX_IOCTL_INFO _IOW(PAX_IOC_MAGIC, 1, IOCTL_ARGS) ++#define PAX_IOCTL_STATUS _IOW(PAX_IOC_MAGIC, 2, IOCTL_ARGS) ++#define PAX_IOCTL_RESERVE_ALL _IO(PAX_IOC_MAGIC, 3) ++#define PAX_IOCTL_UNRESERVE _IO(PAX_IOC_MAGIC, 4) ++ ++#if defined(HAVE_COMPAT_IOCTL) && defined(DRV_EM64T) ++#define PAX_IOCTL_COMPAT_INFO _IOW(PAX_IOC_MAGIC, 1, compat_uptr_t) ++#define PAX_IOCTL_COMPAT_STATUS _IOW(PAX_IOC_MAGIC, 2, compat_uptr_t) ++#define PAX_IOCTL_COMPAT_RESERVE_ALL _IO(PAX_IOC_MAGIC, 3) ++#define PAX_IOCTL_COMPAT_UNRESERVE _IO(PAX_IOC_MAGIC, 4) ++#endif ++ ++#elif defined(DRV_OS_FREEBSD) ++ ++#define PAX_DEVICE_NAME "/dev/" PAX_NAME ++ ++#define PAX_IOC_MAGIC 100 ++#define PAX_IOCTL_INFO _IOW(PAX_IOC_MAGIC, 1, IOCTL_ARGS_NODE) ++#define PAX_IOCTL_STATUS _IOW(PAX_IOC_MAGIC, 2, IOCTL_ARGS_NODE) ++#define PAX_IOCTL_RESERVE_ALL _IO(PAX_IOC_MAGIC, 3) ++#define PAX_IOCTL_UNRESERVE _IO(PAX_IOC_MAGIC, 4) ++ ++#elif defined(DRV_OS_MAC) ++ ++// OSX driver names are always in reverse DNS form. ++#define PAXDriverClassName com_intel_driver_PAX ++#define kPAXDriverClassName "com_intel_driver_PAX" ++#define PAX_DEVICE_NAME "com.intel.driver.PAX" ++ ++// User client method dispatch selectors. ++enum { kPAXUserClientOpen, ++ kPAXUserClientClose, ++ kPAXReserveAll, ++ kPAXUnreserve, ++ kPAXGetStatus, ++ kPAXGetInfo, ++ kPAXDataIO, ++ kNumberOfMethods // Must be last ++}; ++ ++#else ++#warning "unknown OS in pax_shared.h" ++#endif ++ ++// data for PAX_IOCTL_INFO call ++ ++struct PAX_INFO_NODE_S { ++ volatile U64 managed_by; // entity managing PAX ++ volatile U32 version; // PAX version number ++ volatile U64 reserved1; // force 8-byte alignment ++ volatile U32 reserved2; // unreserved ++}; ++ ++typedef struct PAX_INFO_NODE_S PAX_INFO_NODE; ++typedef PAX_INFO_NODE * PAX_INFO; ++ ++// data for PAX_IOCTL_STATUS call ++ ++struct PAX_STATUS_NODE_S { ++ volatile U64 guid; // reservation ID (globally unique identifier) ++ volatile DRV_FILE_DESC pid; // pid of process that has the reservation ++ volatile U64 start_time; // reservation start time ++ volatile U32 is_reserved; // 1 if there is a reservation, 0 otherwise ++}; ++ ++typedef struct PAX_STATUS_NODE_S PAX_STATUS_NODE; ++typedef PAX_STATUS_NODE * PAX_STATUS; ++ ++struct PAX_VERSION_NODE_S { ++ union { ++ U32 version; ++ struct { ++ U32 major : 8; ++ U32 minor : 8; ++ U32 bugfix : 16; ++ } s1; ++ } u1; ++}; ++ ++typedef struct PAX_VERSION_NODE_S PAX_VERSION_NODE; ++typedef PAX_VERSION_NODE * PAX_VERSION; ++ ++#define PAX_VERSION_NODE_version(v) ((v)->u1.version) ++#define PAX_VERSION_NODE_major(v) ((v)->u1.s1.major) ++#define PAX_VERSION_NODE_minor(v) ((v)->u1.s1.minor) ++#define PAX_VERSION_NODE_bugfix(v) ((v)->u1.s1.bugfix) ++ ++#endif +diff --git a/drivers/platform/x86/sepdk/include/rise_errors.h b/drivers/platform/x86/sepdk/include/rise_errors.h +new file mode 100644 +index 000000000000..29fb278def7d +--- /dev/null ++++ b/drivers/platform/x86/sepdk/include/rise_errors.h +@@ -0,0 +1,326 @@ ++/*** ++ * ------------------------------------------------------------------------- ++ * INTEL CORPORATION PROPRIETARY INFORMATION ++ * This software is supplied under the terms of the accompanying license ++ * agreement or nondisclosure agreement with Intel Corporation and may not ++ * be copied or disclosed except in accordance with the terms of that ++ * agreement. ++ * Copyright(C) 2004-2018 Intel Corporation. All Rights Reserved. ++ * ------------------------------------------------------------------------- ++***/ ++ ++#ifndef _RISE_ERRORS_H_ ++#define _RISE_ERRORS_H_ ++ ++// ++// NOTE: ++// ++// 1) Before adding an error code, first make sure the error code doesn't ++// already exist. If it does, use that, don't create a new one just because... ++// ++// 2) When adding an error code, add it to the end of the list. Don't insert ++// error numbers in the middle of the list! For backwards compatibility, ++// we don't want the numbers changing unless we really need them ++// to for some reason (like we want to switch to negative error numbers) ++// ++// 3) Change the VT_LAST_ERROR_CODE macro to point to the (newly added) ++// last error. This is done so SW can verify the number of error codes ++// possible matches the number of error strings it has ++// ++// 4) Don't forget to update the error string table to include your ++// error code (rise.c). Since the goal is something human readable ++// you don't need to use abbreviations in there (ie. don't say "bad param", ++// say "bad parameter" or "illegal parameter passed in") ++// ++// 5) Compile and run the test_rise app (in the test_rise directory) to ++// verify things are still working ++// ++// ++ ++#define VT_SUCCESS 0 ++#define VT_FAILURE -1 ++ ++/*************************************************************/ ++ ++#define VT_INVALID_MAX_SAMP 1 ++#define VT_INVALID_SAMP_PER_BUFF 2 ++#define VT_INVALID_SAMP_INTERVAL 3 ++#define VT_INVALID_PATH 4 ++#define VT_TB5_IN_USE 5 ++#define VT_INVALID_NUM_EVENTS 6 ++#define VT_INTERNAL_ERROR 8 ++#define VT_BAD_EVENT_NAME 9 ++#define VT_NO_SAMP_SESSION 10 ++#define VT_NO_EVENTS 11 ++#define VT_MULTIPLE_RUNS 12 ++#define VT_NO_SAM_PARAMS 13 ++#define VT_SDB_ALREADY_EXISTS 14 ++#define VT_SAMPLING_ALREADY_STARTED 15 ++#define VT_TBS_NOT_SUPPORTED 16 ++#define VT_INVALID_SAMPARAMS_SIZE 17 ++#define VT_INVALID_EVENT_SIZE 18 ++#define VT_ALREADY_PROCESSES 19 ++#define VT_INVALID_EVENTS_PATH 20 ++#define VT_INVALID_LICENSE 21 ++ ++/******************************************************/ ++//SEP error codes ++ ++#define VT_SAM_ERROR 22 ++#define VT_SAMPLE_FILE_ALREADY_MAPPED 23 ++#define VT_INVALID_SAMPLE_FILE 24 ++#define VT_UNKNOWN_SECTION_NUMBER 25 ++#define VT_NO_MEMORY 26 ++#define VT_ENV_VAR_NOT_FOUND 27 ++#define VT_SAMPLE_FILE_NOT_MAPPED 28 ++#define VT_BUFFER_OVERFLOW 29 ++#define VT_USER_OP_COMPLETED 30 ++#define VT_BINARY_NOT_FOUND 31 ++#define VT_ISM_NOT_INITIALIZED 32 ++#define VT_NO_SYMBOLS 33 ++#define VT_SAMPLE_FILE_MAPPING_ERROR 34 ++#define VT_BUFFER_NULL 35 ++#define VT_UNEXPECTED_NULL_PTR 36 ++#define VT_BINARY_LOAD_FAILED 37 ++#define VT_FUNCTION_NOT_FOUND_IN_BINARY 38 ++#define VT_ENTRY_NOT_FOUND 39 ++#define VT_SEP_SYNTAX_ERROR 40 ++#define VT_SEP_OPTIONS_ERROR 41 ++#define VT_BAD_EVENT_MODIFIER 42 ++#define VT_INCOMPATIBLE_PARAMS 43 ++#define VT_FILE_OPEN_FAILED 44 ++#define VT_EARLY_EXIT 45 ++#define VT_TIMEOUT_RETURN 46 ++#define VT_NO_CHILD_PROCESS 47 ++#define VT_DRIVER_RUNNING 48 ++#define VT_DRIVER_STOPPED 49 ++#define VT_MULTIPLE_RUNS_NEEDED 50 ++#define VT_QUIT_IMMEDIATE 51 ++#define VT_DRIVER_INIT_FAILED 52 ++#define VT_NO_TB5_CREATED 53 ++#define VT_NO_WRITE_PERMISSION 54 ++#define VT_DSA_INIT_FAILED 55 ++#define VT_INVALID_CPU_MASK 56 ++#define VT_SAMP_IN_RUNNING_STATE 57 ++#define VT_SAMP_IN_PAUSE_STATE 58 ++#define VT_SAMP_IN_STOP_STATE 59 ++#define VT_SAMP_NO_SESSION 60 ++#define VT_NOT_CONFIGURED 61 ++#define VT_LAUNCH_BUILD64_FAILED 62 ++#define VT_BAD_PARAMETER 63 ++#define VT_ISM_INIT_FAILED 64 ++#define VT_INVALID_STATE_TRANS 65 ++#define VT_EARLY_EXIT_N_CANCEL 66 ++#define VT_EVT_MGR_NOT_INIT 67 ++#define VT_ISM_SECTION_ENUM_FAILED 68 ++#define VT_VG_PARSER_ERROR 69 ++#define VT_MISSING_VALUE_FOR_TOKEN 70 ++#define VT_EMPTY_SAMPLE_FILE_NAME 71 ++#define VT_UNEXPECTED_VALUE 72 ++#define VT_NOT_IMPLEMENTED 73 ++#define VT_MISSING_COL_DEPNDNCIES 74 ++#define VT_DEP_COL_NOT_LIB_DEFINED 75 ++#define VT_COL_NOT_REG_WITH_LIB 76 ++#define VT_SECTION_ALREADY_IN_USE 77 ++#define VT_SECTION_NOT_EXIST 78 ++#define VT_STREAM_NOT_EXIST 79 ++#define VT_INVALID_STREAM 80 ++#define VT_STREAM_ALREADY_IN_USE 81 ++#define VT_DATA_DESC_NOT_EXIST 82 ++#define VT_INVALID_ERROR_CODE 83 ++#define VT_INCOMPATIBLE_VERSION 84 ++#define VT_LEGACY_DATA_NOT_EXIST 85 ++#define VT_INVALID_READ_START 86 ++#define VT_DRIVER_OPEN_FAILED 87 ++#define VT_DRIVER_IOCTL_FAILED 88 ++#define VT_SAMP_FILE_CREATE_FAILED 89 ++#define VT_MODULE_FILE_CREATE_FAILED 90 ++#define VT_INVALID_SAMPLE_FILE_NAME 91 ++#define VT_INVALID_MODULE_FILE_NAME 92 ++#define VT_FORK_CHILD_PROCESS_FAILED 93 ++#define VT_UNEXPECTED_MISMATCH_IN_STRING_TYPES 94 ++#define VT_INCOMPLETE_TB5_ENCOUNTERED 95 ++#define VT_ERR_CONVERSION_FROM_STRING_2_NUMBER 96 ++#define VT_INVALID_STRING 97 ++#define VT_UNSUPPORTED_DATA_SIZE 98 ++#define VT_TBRW_INIT_FAILED 99 ++#define VT_PLUGIN_UNLOAD 100 ++#define VT_PLUGIN_ENTRY_NULL 101 ++#define VT_UNKNOWN_PLUGIN 102 ++#define VT_BUFFER_TOO_SMALL 103 ++#define VT_CANNOT_MODIFY_COLUMN 104 ++#define VT_MULT_FILTERS_NOT_ALLOWED 105 ++#define VT_ADDRESS_IN_USE 106 ++#define VT_NO_MORE_MMAPS 107 ++#define VT_MAX_PAGES_IN_DS_EXCEEDED 108 ++#define VT_INVALID_COL_TYPE_IN_GROUP_INFO 109 ++#define VT_AGG_FN_ON_VARCHAR_NOT_SUPP 110 ++#define VT_INVALID_ACCESS_PERMS 111 ++#define VT_NO_DATA_TO_DISPLAY 112 ++#define VT_TB5_IS_NOT_BOUND 113 ++#define VT_MISSING_GROUP_BY_COLUMN 114 ++#define VT_SMRK_MAX_STREAMS_EXCEEDED 115 ++#define VT_SMRK_STREAM_NOT_CREATED 116 ++#define VT_SMRK_NOT_IMPL 117 ++#define VT_SMRK_TYPE_NOT_IMPL 118 ++#define VT_SMRK_TYPE_ALREADY_SET 119 ++#define VT_SMRK_NO_STREAM 120 ++#define VT_SMRK_INVALID_STREAM_TYPE 121 ++#define VT_SMRK_STREAM_NOT_FOUND 122 ++#define VT_SMRK_FAIL 123 ++#define VT_SECTION_NOT_READABLE 124 ++#define VT_SECTION_NOT_WRITEABLE 125 ++#define VT_GLOBAL_SECTION_NOT_CLOSED 126 ++#define VT_STREAM_SECTION_NOT_CLOSED 127 ++#define VT_STREAM_NOT_CLOSED 128 ++#define VT_STREAM_NOT_BOUND 129 ++#define VT_NO_COLS_SPECIFIED 130 ++#define VT_NOT_ALL_SECTIONS_CLOSED 131 ++#define VT_SMRK_INVALID_PTR 132 ++#define VT_UNEXPECTED_BIND_MISMATCH 133 ++#define VT_WIN_TIMER_ERROR 134 ++#define VT_ONLY_SNGL_DEPNDT_COL_ALLWD 135 ++#define VT_BAD_MODULE 136 ++#define VT_INPUT_SOURCE_INFO_NOT_SET 137 ++#define VT_UNSUPPORTED_TIME_GRAN 138 ++#define VT_NO_SAMPLES_COLLECTED 139 ++#define VT_INVALID_CPU_TYPE_VERSION 140 ++#define VT_BIND_UNEXPECTED_1STMODREC 141 ++#define VT_BIND_MODULES_NOT_SORTED 142 ++#define VT_UNEXPECTED_NUM_CPUIDS 143 ++#define VT_UNSUPPORTED_ARCH_TYPE 144 ++#define VT_NO_DATA_TO_WRITE 145 ++#define VT_EM_TIME_SLICE_TOO_SMALL 146 ++#define VT_EM_TOO_MANY_EVENT_GROUPS 147 ++#define VT_EM_ZERO_GROUPS 148 ++#define VT_EM_NOT_SUPPORTED 149 ++#define VT_PMU_IN_USE 150 ++#define VT_TOO_MANY_INTERRUPTS 151 ++#define VT_MAX_SAMPLES_REACHED 152 ++#define VT_MODULE_COLLECTION_FAILED 153 ++#define VT_INCOMPATIBLE_DRIVER 154 ++#define VT_UNABLE_LOCATE_TRIGGER_EVENT 155 ++#define VT_COMMAND_NOT_HANDLED 156 ++#define VT_DRIVER_VERSION_MISMATCH 157 ++#define VT_MAX_MARKERS 158 ++#define VT_DRIVER_COMM_FAILED 159 ++#define VT_CHIPSET_CONFIG_FAILED 160 ++#define VT_BAD_DATA_BASE 161 ++#define VT_PAX_SERVICE_NOT_CONNECTED 162 ++#define VT_PAX_SERVICE_ERROR 163 ++#define VT_PAX_PMU_RESERVE_FAILED 164 ++#define VT_INVALID_CPU_INFO_TYPE 165 ++#define VT_CACHE_DOESNT_EXIST 166 ++#define VT_UNSUPPORTED_UNCORE_ARCH_TYPE 167 ++#define VT_EXCEEDED_MAX_EVENTS 168 ++#define VT_MARKER_TIMER_FAILED 169 ++#define VT_PAX_PMU_UNRESERVE_FAILED 170 ++#define VT_MULTIPLE_PROCESSES_FOUND 171 ++#define VT_NO_SUCH_PROCESS_FOUND 172 ++#define VT_PCL_NOT_ENABLED 173 ++#define VT_PCL_UID_CHECK 174 ++#define VT_DEL_RESULTS_DIR_FAILED 175 ++#define VT_NO_VALID_EVENTS 176 ++#define VT_INVALID_EVENT 177 ++#define VT_EVENTS_COUNTED 178 ++#define VT_EVENTS_COLLECTED 179 ++#define VT_UNSUPPORTED_GFX_ARCH_TYPE 180 ++#define VT_GFX_CONFIG_FAILED 181 ++#define VT_UNSUPPORTED_NON_NATIVE_MODE 182 ++#define VT_INVALID_DEVICE 183 ++#define VT_ENV_SETUP_FAILED 184 ++#define VT_RESUME_NOT_RECEIVED 185 ++#define VT_UNSUPPORTED_PWR_ARCH_TYPE 186 ++#define VT_PWR_CONFIG_FAILED 187 ++#define VT_NMI_WATCHDOG_FOUND 188 ++#define VT_NO_PMU_RESOURCES 189 ++#define VT_MIC_CARD_NOT_ONLINE 190 ++#define VT_FREEZE_ON_PMI_NOT_AVAIL 191 ++#define VT_FLUSH_FAILED 192 ++#define VT_FLUSH_SUCCESS 193 ++#define VT_WRITE_ERROR 194 ++#define VT_NO_SPACE 195 ++#define VT_MSR_ACCESS_ERROR 196 ++#define VT_PEBS_NOT_SUPPORTED 197 ++#define VT_LUA_PARSE_ERROR 198 ++#define VT_COMM_CONNECTION_CLOSED_BY_REMOTE 199 ++#define VT_COMM_LISTEN_ERROR 200 ++#define VT_COMM_BIND_ERROR 201 ++#define VT_COMM_ACCEPT_ERROR 202 ++#define VT_COMM_SEND_ERROR 203 ++#define VT_COMM_RECV_ERROR 204 ++#define VT_COMM_SOCKET_ERROR 205 ++#define VT_COMM_CONNECT_ERROR 206 ++#define VT_TARGET_COLLECTION_MISMATCH 207 ++#define VT_INVALID_SEP_DRIVER_LOG 208 ++#define VT_COMM_PROTOCOL_VERSION_MISTMATCH 209 ++#define VT_SAMP_IN_UNEXPECTED_STATE 210 ++#define VT_COMM_RECV_BUF_RESIZE_ERROR 211 ++ ++/* ++ * define error code for checking on async marker request ++ */ ++#define VT_INVALID_MARKER_ID -1 ++ ++/* ++ * ************************************************************ ++ * NOTE: after adding new error code(s), remember to also ++ * update the following: ++ * 1) VT_LAST_ERROR_CODE below ++ * 2) viewer/sampling_utils/src/rise.c ++ * 3) collector/controller/sep_msg_catalog.xmc ++ * 4) qnx_kernel/sepdk/include/rise_errors.h ++ * ++ * ************************************************************ ++ */ ++ ++// ++// To make error checking easier, the special VT_LAST_ERROR_CODE ++// should be set to whatever is the last error on the list above ++// ++#define VT_LAST_ERROR_CODE VT_COMM_RECV_BUF_RESIZE_ERROR ++ ++// ++// Define a macro to determine success or failure. Users of this ++// error header file should use the macros instead of direct ++// checks so that we can change the error numbers in the future ++// (such as making negative numbers be an error indication and positive ++// numbers being a success with a value indication) ++// ++#define VTSA_SUCCESS(x) ((x) == VT_SUCCESS) ++#define VTSA_FAILED(x) (!VTSA_SUCCESS(x)) ++ ++// ++// These should be deprecated, but we'll keep them here just in case ++// ++#define SEP_IS_SUCCESS(x) VTSA_SUCCESS(x) ++#define SEP_IS_FAILED(x) VTSA_FAILED(x) ++ ++/************************************************************* ++ * API Error Codes ++ *************************************************************/ ++#define VTAPI_INVALID_MAX_SAMP VT_INVALID_MAX_SAMP ++#define VTAPI_INVALID_SAMP_PER_BUFF VT_INVALID_SAMP_PER_BUFF ++#define VTAPI_INVALID_SAMP_INTERVAL VT_INVALID_SAMP_INTERVAL ++#define VTAPI_INVALID_PATH VT_INVALID_PATH ++#define VTAPI_TB5_IN_USE VT_TB5_IN_USE ++#define VTAPI_INVALID_NUM_EVENTS VT_INVALID_NUM_EVENTS ++#define VTAPI_INTERNAL_ERROR VT_INTERNAL_ERROR ++#define VTAPI_BAD_EVENT_NAME VT_BAD_EVENT_NAME ++#define VTAPI_NO_SAMP_SESSION VT_NO_SAMP_SESSION ++#define VTAPI_NO_EVENTS VT_NO_EVENTS ++#define VTAPI_MULTIPLE_RUNS VT_MULTIPLE_RUNS ++#define VTAPI_NO_SAM_PARAMS VT_NO_SAM_PARAMS ++#define VTAPI_SDB_ALREADY_EXISTS VT_SDB_ALREADY_EXISTS ++#define VTAPI_SAMPLING_ALREADY_STARTED VT_SAMPLING_ALREADY_STARTED ++#define VTAPI_TBS_NOT_SUPPORTED VT_TBS_NOT_SUPPORTED ++#define VTAPI_INVALID_SAMPARAMS_SIZE VT_INVALID_SAMPARAMS_SIZE ++#define VTAPI_INVALID_EVENT_SIZE VT_INVALID_EVENT_SIZE ++#define VTAPI_ALREADY_PROCESSES VT_ALREADY_PROCESSES ++#define VTAPI_INVALID_EVENTS_PATH VT_INVALID_EVENTS_PATH ++#define VTAPI_INVALID_LICENSE VT_INVALID_LICENSE ++ ++typedef int RISE_ERROR; ++typedef void *RISE_PTR; ++ ++#endif +diff --git a/drivers/platform/x86/sepdk/pax/Makefile b/drivers/platform/x86/sepdk/pax/Makefile +new file mode 100755 +index 000000000000..267d70eeaab5 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/pax/Makefile +@@ -0,0 +1,4 @@ ++ccflags-y := -I$(src)/../include -I$(src)/../inc ++ ++obj-$(CONFIG_SEP_PAX) += pax.o ++ +diff --git a/drivers/platform/x86/sepdk/pax/pax.c b/drivers/platform/x86/sepdk/pax/pax.c +new file mode 100755 +index 000000000000..f8eebf989b0e +--- /dev/null ++++ b/drivers/platform/x86/sepdk/pax/pax.c +@@ -0,0 +1,967 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#if defined(CONFIG_HARDLOCKUP_DETECTOR) && \ ++ LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) ++#include ++#include ++#include ++#include ++#endif ++ ++#include "lwpmudrv_defines.h" ++#include "lwpmudrv_types.h" ++#include "lwpmudrv.h" ++#include "lwpmudrv_ioctl.h" ++ ++#include "control.h" ++#include "pax_shared.h" ++#include "pax.h" ++ ++MODULE_AUTHOR("Copyright(C) 2009-2018 Intel Corporation"); ++MODULE_VERSION(PAX_NAME "_" PAX_VERSION_STR); ++MODULE_LICENSE("Dual BSD/GPL"); ++ ++typedef struct PAX_DEV_NODE_S PAX_DEV_NODE; ++typedef PAX_DEV_NODE * PAX_DEV; ++ ++struct PAX_DEV_NODE_S { ++ long buffer; ++ struct semaphore sem; ++ struct cdev cdev; ++}; ++ ++#define PAX_DEV_buffer(dev) ((dev)->buffer) ++#define PAX_DEV_sem(dev) ((dev)->sem) ++#define PAX_DEV_cdev(dev) ((dev)->cdev) ++ ++// global variables for the PAX driver ++ ++static PAX_DEV pax_control; // main control ++static dev_t pax_devnum; // the major char device number for PAX ++static PAX_VERSION_NODE pax_version; // version of PAX ++static PAX_INFO_NODE pax_info; // information on PAX ++static PAX_STATUS_NODE pax_status; // PAX reservation status ++ ++static struct class *pax_class; ++ ++#define NMI_WATCHDOG_PATH "/proc/sys/kernel/nmi_watchdog" ++static S8 nmi_watchdog_restore = '0'; ++ ++static struct proc_dir_entry *pax_version_file; ++ ++static int pax_version_proc_read(struct seq_file *, void *); ++static int pax_version_proc_open(struct inode *, struct file *); ++static struct file_operations pax_version_ops = { ++ .owner = THIS_MODULE, ++ .open = pax_version_proc_open, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = single_release, ++}; ++ ++// Print macros for kernel debugging ++ ++#if defined(DEBUG) ++#define PAX_PRINT_DEBUG(fmt, args...) \ ++ { \ ++ printk(KERN_INFO "PAX: [DEBUG] " fmt, ##args); \ ++ } ++#else ++#define PAX_PRINT_DEBUG(fmt, args...) \ ++ { \ ++ ; \ ++ } ++#endif ++#define PAX_PRINT(fmt, args...) \ ++ { \ ++ printk(KERN_INFO "PAX: " fmt, ##args); \ ++ } ++#define PAX_PRINT_WARNING(fmt, args...) \ ++ { \ ++ printk(KERN_ALERT "PAX: [Warning] " fmt, ##args); \ ++ } ++#define PAX_PRINT_ERROR(fmt, args...) \ ++ { \ ++ printk(KERN_CRIT "PAX: [ERROR] " fmt, ##args); \ ++ } ++ ++// various other useful macros ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25) ++#define PAX_FIND_TASK_BY_PID(pid) find_task_by_pid(pid) ++#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0) ++#define PAX_FIND_TASK_BY_PID(pid) \ ++ pid_task(find_pid_ns(pid, &init_pid_ns), PIDTYPE_PID); ++#else ++#define PAX_FIND_TASK_BY_PID(pid) pid_task(find_get_pid(pid), PIDTYPE_PID); ++#endif ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) ++#define PAX_TASKLIST_READ_LOCK() read_lock(&tasklist_lock) ++#define PAX_TASKLIST_READ_UNLOCK() read_unlock(&tasklist_lock) ++#else ++#define PAX_TASKLIST_READ_LOCK() rcu_read_lock() ++#define PAX_TASKLIST_READ_UNLOCK() rcu_read_unlock() ++#endif ++ ++#if defined(CONFIG_HARDLOCKUP_DETECTOR) && \ ++ LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) ++ ++static struct task_struct *pax_Enable_NMIWatchdog_Thread; ++static struct semaphore pax_Enable_NMIWatchdog_Sem; ++static struct task_struct *pax_Disable_NMIWatchdog_Thread; ++static struct semaphore pax_Disable_NMIWatchdog_Sem; ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn S32 pax_Disable_NMIWatchdog(PVOID data) ++ * ++ * @param data - Pointer to data ++ * ++ * @return S32 ++ * ++ * @brief Disable nmi watchdog ++ * ++ * Special Notes ++ */ ++static S32 pax_Disable_NMIWatchdog(PVOID data) ++{ ++ struct file *fd; ++ mm_segment_t old_fs; ++ struct cred *kcred; ++ loff_t pos = 0; ++ S8 new_val = '0'; ++ ++ up(&pax_Disable_NMIWatchdog_Sem); ++ ++ kcred = prepare_kernel_cred(NULL); ++ if (kcred) { ++ commit_creds(kcred); ++ } else { ++ PAX_PRINT_ERROR( ++ "pax_Disable_NMIWatchdog: prepare_kernel_cred returns NULL\n"); ++ } ++ ++ fd = filp_open(NMI_WATCHDOG_PATH, O_RDWR, 0); ++ ++ if (fd) { ++ fd->f_op->read(fd, (char __user *)&nmi_watchdog_restore, 1, &fd->f_pos); ++ PAX_PRINT_DEBUG("Existing nmi_watchdog value = %c\n", ++ nmi_watchdog_restore); ++ ++ if (nmi_watchdog_restore != '0') { ++ old_fs = get_fs(); ++ set_fs(KERNEL_DS); ++ fd->f_op->write(fd, (char __user *)&new_val, 1, &pos); ++ set_fs(old_fs); ++ } else { ++ PAX_PRINT_DEBUG( ++ "pax_Disable_NMIWatchdog: NMI watchdog already disabled!\n"); ++ } ++ ++ filp_close(fd, NULL); ++ } else { ++ PAX_PRINT_ERROR( ++ "pax_Disable_NMIWatchdog: filp_open returns NULL\n"); ++ } ++ ++ while (!kthread_should_stop()) { ++ schedule(); ++ } ++ ++ return 0; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn S32 pax_Check_NMIWatchdog(PVOID data) ++ * ++ * @param data - Pointer to data ++ * ++ * @return S32 ++ * ++ * @brief Check nmi watchdog ++ * ++ * Special Notes ++ */ ++ ++#if 0 ++static S32 pax_Check_NMIWatchdog(PVOID data) ++{ ++ struct file *fd; ++ struct cred *kcred; ++ ++ kcred = prepare_kernel_cred(NULL); ++ if (kcred) { ++ commit_creds(kcred); ++ } ++ ++ fd = filp_open(NMI_WATCHDOG_PATH, O_RDWR, 0); ++ ++ if (fd) { ++ fd->f_op->read(fd, &nmi_watchdog_restore, 1, &fd->f_pos); ++ PAX_PRINT_DEBUG("Checking nmi_watchdog value = %c\n", ++ nmi_watchdog_restore); ++ filp_close(fd, NULL); ++ } ++ ++ do_exit(0); ++ ++ return 0; ++} ++#endif ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn S32 pax_Enable_NMIWatchdog(PVOID data) ++ * ++ * @param data - Pointer to data ++ * ++ * @return S32 ++ * ++ * @brief Enable nmi watchdog ++ * ++ * Special Notes ++ */ ++static S32 pax_Enable_NMIWatchdog(PVOID data) ++{ ++ struct file *fd; ++ mm_segment_t old_fs; ++ struct cred *kcred; ++ loff_t pos = 0; ++ S8 new_val = '1'; ++ ++ up(&pax_Enable_NMIWatchdog_Sem); ++ ++ kcred = prepare_kernel_cred(NULL); ++ if (kcred) { ++ commit_creds(kcred); ++ } else { ++ PAX_PRINT_ERROR( ++ "pax_Enable_NMIWatchdog: prepare_kernel_cred returns NULL!\n"); ++ } ++ ++ fd = filp_open(NMI_WATCHDOG_PATH, O_WRONLY, 0); ++ ++ if (fd) { ++ old_fs = get_fs(); ++ set_fs(KERNEL_DS); ++ fd->f_op->write(fd, (char __user *)&new_val, 1, &pos); ++ set_fs(old_fs); ++ ++ filp_close(fd, NULL); ++ } else { ++ PAX_PRINT_ERROR( ++ "pax_Enable_NMIWatchdog: filp_open returns NULL!\n"); ++ } ++ ++ while (!kthread_should_stop()) { ++ schedule(); ++ } ++ ++ return 0; ++} ++#endif ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn void pax_Init() ++ * ++ * @param none ++ * ++ * @return none ++ * ++ * @brief Initialize PAX system ++ * ++ * Special Notes ++ */ ++static void pax_Init(void) ++{ ++ // ++ // Initialize PAX driver version (done once at driver load time) ++ // ++ ++ PAX_VERSION_NODE_major(&pax_version) = PAX_MAJOR_VERSION; ++ PAX_VERSION_NODE_minor(&pax_version) = PAX_MINOR_VERSION; ++ PAX_VERSION_NODE_bugfix(&pax_version) = PAX_BUGFIX_VERSION; ++ ++ // initialize PAX_Info ++ pax_info.version = PAX_VERSION_NODE_version(&pax_version); ++ pax_info.managed_by = 1; // THIS_MODULE->name; ++ ++ // initialize PAX_Status ++ pax_status.guid = PAX_GUID_UNINITIALIZED; ++ pax_status.pid = 0; ++ pax_status.start_time = 0; ++ pax_status.is_reserved = PAX_PMU_UNRESERVED; ++ ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn void pax_Cleanup() ++ * ++ * @param none ++ * ++ * @return none ++ * ++ * @brief UnInitialize PAX system ++ * ++ * Special Notes ++ */ ++static void pax_Cleanup(void) ++{ ++ // uninitialize PAX_Info ++ pax_info.managed_by = 0; ++ ++ // uninitialize PAX_Status ++ pax_status.guid = PAX_GUID_UNINITIALIZED; ++ pax_status.pid = 0; ++ pax_status.start_time = 0; ++ pax_status.is_reserved = PAX_PMU_UNRESERVED; ++ ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn U32 pax_Process_Valid() ++ * ++ * @param U32 pid - process ID ++ * ++ * @return TRUE or FALSE ++ * ++ * @brief Check whether process with pid still exists, and if so, ++ * whether it is still "alive". If so, then process is ++ * deemed valid. Otherwise, process is deemed invalid. ++ * ++ * Special Notes ++ */ ++static U32 pax_Process_Valid(U32 pid) ++{ ++ struct task_struct *process_task; ++ U32 valid_process; ++ ++ // ++ // There doesn't seem to be a way to force the process_task to continue ++ // to exist after the read_lock is released (SMP system could delete the ++ // process after lock is released on another processor), so we need to ++ // do all the work with the lock held... There is a routine on later ++ // 2.6 kernels (get_task_struct() and put_task_struct()) which seems ++ // to do what we want, but the code behind the macro calls a function ++ // that isn't EXPORT'ed so we can't use it in a device driver... ++ // ++ PAX_TASKLIST_READ_LOCK(); ++ process_task = PAX_FIND_TASK_BY_PID(pax_status.pid); ++ if ((process_task == NULL) || ++ (process_task->exit_state == EXIT_ZOMBIE) || ++ (process_task->exit_state == EXIT_DEAD)) { ++ // not a valid process ++ valid_process = FALSE; ++ } else { ++ // process is "alive", so assume it is still valid ... ++ valid_process = TRUE; ++ } ++ PAX_TASKLIST_READ_UNLOCK(); ++ ++ return valid_process; ++} ++ ++// ************************************************************************** ++// ++// below are PAX Open/Read/Write device functions (appears in /proc/kallsyms) ++// ++// ************************************************************************** ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn int pax_Open() ++ * ++ * @param struct inode *inode ++ * @param struct file *filp ++ * ++ * @return int (TODO: check for open failure) ++ * ++ * @brief This function is called when doing an open(/dev/pax) ++ * ++ * Special Notes ++ */ ++static int pax_Open(struct inode *inode, struct file *filp) ++{ ++ PAX_PRINT_DEBUG("open called on maj:%d, min:%d\n", imajor(inode), ++ iminor(inode)); ++ filp->private_data = container_of(inode->i_cdev, PAX_DEV_NODE, cdev); ++ ++ return 0; ++} ++ ++// ************************************************************************** ++// ++// below are PAX IOCTL function handlers ++// ++// ************************************************************************** ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn OS_STATUS pax_Get_Info() ++ * ++ * @param IOCTL_ARGS arg - pointer to the output buffer ++ * ++ * @return OS_STATUS ++ * ++ * @brief Local function that handles the PAX_IOCTL_INFO call ++ * Returns static information related to PAX (e.g., version) ++ * ++ * Special Notes ++ */ ++static OS_STATUS pax_Get_Info(IOCTL_ARGS arg) ++{ ++ int error; ++ ++ error = copy_to_user((void __user *)(arg->buf_usr_to_drv), ++ &pax_info, sizeof(PAX_INFO_NODE)); ++ ++ if (error != 0) { ++ PAX_PRINT_ERROR( ++ "pax_Get_Info: unable to copy to user (error=%d)!\n", ++ error); ++ return OS_FAULT; ++ } ++ ++ PAX_PRINT_DEBUG("pax_Get_Info: sending PAX info (%ld bytes):\n", ++ sizeof(PAX_INFO_NODE)); ++ PAX_PRINT_DEBUG("pax_Get_Info: raw_version = %u (0x%x)\n", ++ pax_info.version, pax_info.version); ++ PAX_PRINT_DEBUG("pax_Get_Info: major = %u\n", ++ PAX_VERSION_NODE_major(&pax_version)); ++ PAX_PRINT_DEBUG("pax_Get_Info: minor = %u\n", ++ PAX_VERSION_NODE_minor(&pax_version)); ++ PAX_PRINT_DEBUG("pax_Get_Info: bugfix = %u\n", ++ PAX_VERSION_NODE_bugfix(&pax_version)); ++ PAX_PRINT_DEBUG("pax_Get_Info: managed_by = %lu\n", ++ (long unsigned int)pax_info.managed_by); ++ PAX_PRINT_DEBUG("pax_Get_Info: information sent.\n"); ++ ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn OS_STATUS pax_Get_Status() ++ * ++ * @param IOCTL_ARGS arg - pointer to the output buffer ++ * ++ * @return OS_STATUS ++ * ++ * @brief Local function that handles the PAX_IOCTL_STATUS call ++ * Returns status of the reservation (e.g., who owns) ++ * ++ * Special Notes ++ */ ++static OS_STATUS pax_Get_Status(IOCTL_ARGS arg) ++{ ++ int error; ++ ++ error = copy_to_user((void __user *)(arg->buf_usr_to_drv), ++ &pax_status, sizeof(PAX_STATUS_NODE)); ++ if (error != 0) { ++ PAX_PRINT_ERROR( ++ "pax_Get_Status: unable to copy to user (error=%d)!\n", ++ error); ++ return OS_FAULT; ++ } ++ ++ PAX_PRINT_DEBUG("pax_Get_Status: sending PAX status (%ld bytes):\n", ++ sizeof(PAX_STATUS_NODE)); ++ PAX_PRINT_DEBUG("pax_Get_Status: guid = %lu\n", ++ (long unsigned int)pax_status.guid); ++ PAX_PRINT_DEBUG("pax_Get_Status: pid = %lu\n", ++ (long unsigned int)pax_status.pid); ++ PAX_PRINT_DEBUG("pax_Get_Status: start_time = %lu\n", ++ (long unsigned int)pax_status.start_time); ++ PAX_PRINT_DEBUG("pax_Get_Status: is_reserved = %u\n", ++ pax_status.is_reserved); ++ PAX_PRINT_DEBUG("pax_Get_Status: status sent.\n"); ++ ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn OS_STATUS pax_Unreserve() ++ * ++ * @param none ++ * ++ * @return OS_STATUS ++ * ++ * @brief Local function that handles the PAX_IOCTL_UNRESERVE call ++ * Returns OS_SUCCESS if PMU unreservation succeeded, otherwise failure ++ * ++ * Special Notes ++ */ ++static OS_STATUS pax_Unreserve(void) ++{ ++ // if no reservation is currently held, then return success ++ if (pax_status.is_reserved == PAX_PMU_UNRESERVED) { ++ PAX_PRINT_DEBUG("pax_Unreserve: currently unreserved\n"); ++ return OS_SUCCESS; ++ } ++ ++ // otherwise, there is a reservation ... ++ // allow the process which started the reservation to unreserve ++ // or if that process is invalid, then any other process can unreserve ++ if ((pax_status.pid == current->pid) || ++ (!pax_Process_Valid(pax_status.pid))) { ++ S32 reservation = -1; ++ PAX_PRINT_DEBUG( ++ "pax_Unreserve: pid %d attempting to unreserve PMU held by pid %d\n", ++ (U32)current->pid, (U32)pax_status.pid); ++ ++#if !defined(DRV_ANDROID) && !defined(DRV_CHROMEOS) && \ ++ defined(CONFIG_HARDLOCKUP_DETECTOR) && \ ++ LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) ++ if (nmi_watchdog_restore != '0') { ++ PAX_PRINT_DEBUG( ++ "Attempting to enable NMI watchdog...\n"); ++ ++ sema_init(&pax_Enable_NMIWatchdog_Sem, 0); ++ ++ pax_Enable_NMIWatchdog_Thread = ++ kthread_run(&pax_Enable_NMIWatchdog, NULL, ++ "pax_enable_nmi_watchdog"); ++ if (!pax_Enable_NMIWatchdog_Thread || ++ pax_Enable_NMIWatchdog_Thread == ERR_PTR(-ENOMEM)) { ++ PAX_PRINT_ERROR( ++ "pax_Unreserve: could not create pax_enable_nmi_watchdog kthread."); ++ } else { ++ down(&pax_Enable_NMIWatchdog_Sem); ++ kthread_stop(pax_Enable_NMIWatchdog_Thread); ++ } ++ pax_Enable_NMIWatchdog_Thread = NULL; ++ nmi_watchdog_restore = '0'; ++ } ++#endif ++ ++ reservation = cmpxchg(&pax_status.is_reserved, PAX_PMU_RESERVED, ++ PAX_PMU_UNRESERVED); ++ if (reservation < 0) { ++ // no-op ... eliminates "variable not used" compiler warning ++ } ++ PAX_PRINT_DEBUG("pax_Unreserve: reserve=%d, is_reserved=%d\n", ++ reservation, pax_status.is_reserved); ++ // unreserve but keep track of last PID/GUID that had reservation ++ } ++ ++ PAX_PRINT_DEBUG("pax_Unreserve: pid %d unreserve status: %d\n", ++ current->pid, pax_status.is_reserved); ++ ++ return ((pax_status.is_reserved == PAX_PMU_UNRESERVED) ? OS_SUCCESS : ++ OS_FAULT); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn OS_STATUS pax_Reserve_All() ++ * ++ * @param none ++ * ++ * @return OS_STATUS ++ * ++ * @brief Local function that handles the PAX_IOCTL_RESERVE_ALL call ++ * Returns OS_SUCCESS if PMU reservation succeeded, otherwise failure ++ * ++ * Special Notes ++ */ ++static OS_STATUS pax_Reserve_All(void) ++{ ++ S32 reservation = -1; // previous reservation state (initially, unknown) ++ ++ // check if PMU can be unreserved ++ if (pax_status.is_reserved == PAX_PMU_RESERVED) { ++ OS_STATUS unreserve_err = pax_Unreserve(); ++ if (unreserve_err != OS_SUCCESS) { ++ return unreserve_err; // attempt to unreserve failed, so return error ++ } ++ } ++ ++ PAX_PRINT_DEBUG("pax_Reserve_All: pid %d attempting to reserve PMU\n", ++ current->pid); ++ ++ // at this point, there is no reservation, so commence race to reserve ... ++ reservation = cmpxchg(&pax_status.is_reserved, PAX_PMU_UNRESERVED, ++ PAX_PMU_RESERVED); ++ ++ // only one request to reserve will succeed, and when it does, update status ++ // information with the successful request ++ if ((reservation == PAX_PMU_UNRESERVED) && ++ (pax_status.is_reserved == PAX_PMU_RESERVED)) { ++ pax_status.start_time = rdtsc_ordered(); ++ pax_status.pid = current->pid; ++ ++#if !defined(DRV_ANDROID) && !defined(DRV_CHROMEOS) && \ ++ defined(CONFIG_HARDLOCKUP_DETECTOR) && \ ++ LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) ++ sema_init(&pax_Disable_NMIWatchdog_Sem, 0); ++ pax_Disable_NMIWatchdog_Thread = ++ kthread_run(&pax_Disable_NMIWatchdog, NULL, ++ "pax_disable_nmi_watchdog"); ++ if (!pax_Disable_NMIWatchdog_Thread || ++ pax_Disable_NMIWatchdog_Thread == ERR_PTR(-ENOMEM)) { ++ PAX_PRINT_ERROR( ++ "pax_Reserve_All: could not create pax_disable_nmi_watchdog kthread."); ++ } else { ++ down(&pax_Disable_NMIWatchdog_Sem); ++ kthread_stop(pax_Disable_NMIWatchdog_Thread); ++ } ++ pax_Disable_NMIWatchdog_Thread = NULL; ++#endif ++ ++ return OS_SUCCESS; ++ } ++ ++ return OS_FAULT; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn OS_STATUS pax_Service_IOCTL() ++ * ++ * @param inode - pointer to the device object ++ * @param filp - pointer to the file object ++ * @param cmd - ioctl value (defined in lwpmu_ioctl.h) ++ * @param arg - arg or arg pointer ++ * ++ * @return OS_STATUS ++ * ++ * @brief Worker function that handles IOCTL requests from the user mode ++ * ++ * Special Notes ++ */ ++static IOCTL_OP_TYPE pax_Service_IOCTL(IOCTL_USE_INODE struct file *filp, ++ unsigned int cmd, ++ IOCTL_ARGS_NODE local_args) ++{ ++ int status = OS_SUCCESS; ++ ++ // dispatch to appropriate PAX IOCTL function ++ switch (cmd) { ++ case PAX_IOCTL_INFO: ++ PAX_PRINT_DEBUG("PAX_IOCTL_INFO\n"); ++ status = pax_Get_Info(&local_args); ++ break; ++ ++ case PAX_IOCTL_STATUS: ++ PAX_PRINT_DEBUG("PAX_IOCTL_STATUS\n"); ++ status = pax_Get_Status(&local_args); ++ break; ++ ++ case PAX_IOCTL_RESERVE_ALL: ++ PAX_PRINT_DEBUG("PAX_IOCTL_RESERVE_ALL\n"); ++ status = pax_Reserve_All(); ++ break; ++ ++ case PAX_IOCTL_UNRESERVE: ++ PAX_PRINT_DEBUG("PAX_IOCTL_UNRESERVE\n"); ++ status = pax_Unreserve(); ++ break; ++ ++ default: ++ PAX_PRINT_ERROR("unknown IOCTL cmd: %d magic:%d number:%d\n", ++ cmd, _IOC_TYPE(cmd), _IOC_NR(cmd)); ++ status = OS_ILLEGAL_IOCTL; ++ break; ++ } ++ ++ return status; ++} ++ ++static long pax_Device_Control(IOCTL_USE_INODE struct file *filp, ++ unsigned int cmd, unsigned long arg) ++{ ++ int status = OS_SUCCESS; ++ IOCTL_ARGS_NODE local_args; ++ ++ memset(&local_args, 0, sizeof(IOCTL_ARGS_NODE)); ++ if (arg) { ++ status = copy_from_user(&local_args, (void __user *)arg, ++ sizeof(IOCTL_ARGS_NODE)); ++ if (status != OS_SUCCESS) ++ return status; ++ } ++ ++ status = pax_Service_IOCTL(IOCTL_USE_INODE filp, cmd, local_args); ++ return status; ++} ++ ++#if defined(CONFIG_COMPAT) && defined(DRV_EM64T) ++static IOCTL_OP_TYPE pax_Device_Control_Compat(struct file *filp, ++ unsigned int cmd, ++ unsigned long arg) ++{ ++ int status = OS_SUCCESS; ++ IOCTL_COMPAT_ARGS_NODE local_args_compat; ++ IOCTL_ARGS_NODE local_args; ++ ++ memset(&local_args_compat, 0, sizeof(IOCTL_COMPAT_ARGS_NODE)); ++ if (arg) { ++ status = copy_from_user(&local_args_compat, ++ (void __user *)arg, ++ sizeof(IOCTL_COMPAT_ARGS_NODE)); ++ if (status != OS_SUCCESS) ++ return status; ++ } ++ ++ local_args.len_drv_to_usr = local_args_compat.len_drv_to_usr; ++ local_args.len_usr_to_drv = local_args_compat.len_usr_to_drv; ++ local_args.buf_drv_to_usr = ++ (char *)compat_ptr(local_args_compat.buf_drv_to_usr); ++ local_args.buf_usr_to_drv = ++ (char *)compat_ptr(local_args_compat.buf_usr_to_drv); ++ ++ if (cmd == PAX_IOCTL_COMPAT_INFO) { ++ cmd = PAX_IOCTL_INFO; ++ } ++ local_args.command = cmd; ++ ++ status = pax_Service_IOCTL(filp, cmd, local_args); ++ ++ return status; ++} ++#endif ++ ++// ************************************************************************** ++// ++// PAX device file operation definitions (required by kernel) ++// ++// ************************************************************************** ++ ++/* ++ * Structure that declares the usual file access functions ++ * First one is for pax, the control functions ++ */ ++static struct file_operations pax_Fops = { ++ .owner = THIS_MODULE, ++ IOCTL_OP = pax_Device_Control, ++#if defined(CONFIG_COMPAT) && defined(DRV_EM64T) ++ .compat_ioctl = pax_Device_Control_Compat, ++#endif ++ .read = NULL, ++ .write = NULL, ++ .open = pax_Open, ++ .release = NULL, ++ .llseek = NULL, ++}; ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn int pax_Setup_Cdev() ++ * ++ * @param dev - pointer to the device object ++ * @param devnum - major/minor device number ++ * @param fops - point to file operations struct ++ * ++ * @return int ++ * ++ * @brief Set up functions to be handled by PAX device ++ * ++ * Special Notes ++ */ ++static int pax_Setup_Cdev(PAX_DEV dev, struct file_operations *fops, ++ dev_t devnum) ++{ ++ cdev_init(&PAX_DEV_cdev(dev), fops); ++ PAX_DEV_cdev(dev).owner = THIS_MODULE; ++ PAX_DEV_cdev(dev).ops = fops; ++ ++ return cdev_add(&PAX_DEV_cdev(dev), devnum, 1); ++} ++ ++static int pax_version_proc_read(struct seq_file *file, void *v) ++{ ++ seq_printf(file, "%u", PAX_VERSION_NODE_version(&pax_version)); ++ ++ return 0; ++} ++ ++static int pax_version_proc_open(struct inode *inode, struct file *file) ++{ ++ return single_open(file, pax_version_proc_read, NULL); ++} ++ ++// ************************************************************************** ++// ++// Exported PAX functions (see pax.h) ; will appear under /proc/kallsyms ++// ++// ************************************************************************** ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn int pax_Load() ++ * ++ * @param none ++ * ++ * @return int ++ * ++ * @brief Load the PAX subsystem ++ * ++ * Special Notes ++ */ ++int pax_Load(void) ++{ ++ int result; ++ struct device *pax_device; ++ ++ pax_control = NULL; ++ ++ PAX_PRINT_DEBUG("checking for %s interface...\n", PAX_NAME); ++ ++ /* If PAX interface does not exist, create it */ ++ pax_devnum = MKDEV(0, 0); ++ PAX_PRINT_DEBUG("got major device %d\n", pax_devnum); ++ /* allocate character device */ ++ result = alloc_chrdev_region(&pax_devnum, 0, 1, PAX_NAME); ++ if (result < 0) { ++ PAX_PRINT_ERROR("unable to alloc chrdev_region for %s!\n", ++ PAX_NAME); ++ return result; ++ } ++ ++ pax_class = class_create(THIS_MODULE, "pax"); ++ if (IS_ERR(pax_class)) { ++ PAX_PRINT_ERROR("Error registering pax class\n"); ++ } ++ pax_device = device_create(pax_class, NULL, pax_devnum, NULL, "pax"); ++ if (pax_device == NULL) { ++ return OS_INVALID; ++ } ++ ++ PAX_PRINT_DEBUG("%s major number is %d\n", PAX_NAME, MAJOR(pax_devnum)); ++ /* Allocate memory for the PAX control device */ ++ pax_control = (PVOID)kzalloc(sizeof(PAX_DEV_NODE), GFP_KERNEL); ++ if (!pax_control) { ++ PAX_PRINT_ERROR("Unable to allocate memory for %s device\n", ++ PAX_NAME); ++ return OS_NO_MEM; ++ } ++ // /* Initialize memory for the PAX control device */ ++ // memset(pax_control, '\0', sizeof(PAX_DEV_NODE)); ++ /* Register PAX file operations with the OS */ ++ result = pax_Setup_Cdev(pax_control, &pax_Fops, pax_devnum); ++ if (result) { ++ PAX_PRINT_ERROR("Unable to add %s as char device (error=%d)\n", ++ PAX_NAME, result); ++ return result; ++ } ++ ++ pax_Init(); ++ ++ pax_version_file = ++ proc_create("pax_version", 0, NULL, &pax_version_ops); ++ if (pax_version_file == NULL) { ++ SEP_PRINT_ERROR("Unalbe to create the pax_version proc file\n"); ++ } ++ ++ // ++ // Display driver version information ++ // ++ PAX_PRINT("PMU arbitration service v%d.%d.%d has been started.\n", ++ PAX_VERSION_NODE_major(&pax_version), ++ PAX_VERSION_NODE_minor(&pax_version), ++ PAX_VERSION_NODE_bugfix(&pax_version)); ++ ++ return result; ++} ++ ++EXPORT_SYMBOL(pax_Load); ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn int pax_Unload() ++ * ++ * @param none ++ * ++ * @return none ++ * ++ * @brief Unload the PAX subsystem ++ * ++ * Special Notes ++ */ ++void pax_Unload(void) ++{ ++ // warn if unable to unreserve ++ if (pax_Unreserve() != OS_SUCCESS) { ++ PAX_PRINT_WARNING( ++ "Unloading driver with existing reservation ...."); ++ PAX_PRINT_WARNING(" guid = %lu\n", ++ (long unsigned int)pax_status.guid); ++ PAX_PRINT_WARNING(" pid = %ld\n", ++ (long int)pax_status.pid); ++ PAX_PRINT_WARNING(" start_time = %lu\n", ++ (long unsigned int)pax_status.start_time); ++ PAX_PRINT_WARNING(" is_reserved = %u\n", ++ pax_status.is_reserved); ++ } ++ ++ // unregister PAX device ++ unregister_chrdev(MAJOR(pax_devnum), "pax"); ++ device_destroy(pax_class, pax_devnum); ++ class_destroy(pax_class); ++ ++ cdev_del(&PAX_DEV_cdev(pax_control)); ++ unregister_chrdev_region(pax_devnum, 1); ++ if (pax_control != NULL) { ++ kfree(pax_control); ++ } ++ ++ remove_proc_entry("pax_version", NULL); ++ ++ // ++ // Display driver version information ++ // ++ PAX_PRINT("PMU arbitration service v%d.%d.%d has been stopped.\n", ++ PAX_VERSION_NODE_major(&pax_version), ++ PAX_VERSION_NODE_minor(&pax_version), ++ PAX_VERSION_NODE_bugfix(&pax_version)); ++ ++ // clean up resources used by PAX ++ pax_Cleanup(); ++ ++} ++ ++EXPORT_SYMBOL(pax_Unload); ++ ++/* Declaration of the init and exit functions */ ++module_init(pax_Load); ++module_exit(pax_Unload); +diff --git a/drivers/platform/x86/sepdk/pax/pax.h b/drivers/platform/x86/sepdk/pax/pax.h +new file mode 100755 +index 000000000000..b7d48f874958 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/pax/pax.h +@@ -0,0 +1,33 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#ifndef _PAX_H_ ++#define _PAX_H_ ++ ++int pax_Load(void); ++ ++void pax_Unload(void); ++ ++#endif +diff --git a/drivers/platform/x86/sepdk/sep/Makefile b/drivers/platform/x86/sepdk/sep/Makefile +new file mode 100755 +index 000000000000..405e55d53c97 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/sep/Makefile +@@ -0,0 +1,67 @@ ++ccflags-y := -I$(src)/../include -I$(src)/../inc -I$(src)/.. ++ccflags-y += -DSEP_CONFIG_MODULE_LAYOUT ++# TODO: verify kaiser.h ++#ccflags-y += -DKAISER_HEADER_PRESENT ++ccflags-y += -DDRV_CPU_HOTPLUG -DDRV_USE_TASKLET_WORKAROUND ++ ++asflags-y := -I$(src)/.. ++ ++ifdef CONFIG_SEP_PER_USER_MODE ++ ccflags-y += -DSECURE_SEP ++endif ++ ++ifdef CONFIG_SEP_MINLOG_MODE ++ ccflags-y += -DDRV_MINIMAL_LOGGING ++endif ++ ++ifdef CONFIG_SEP_MAXLOG_MODE ++ ccflags-y += -DDRV_MAXIMAL_LOGGING ++endif ++ ++ifdef CONFIG_SEP_PRIVATE_BUILD ++ ccflags-y += -DENABLE_CPUS -DBUILD_CHIPSET -DBUILD_GFX ++endif ++ ++ifdef CONFIG_SEP_ACRN ++ ccflags-y += -DDRV_SEP_ACRN_ON ++endif ++ ++obj-$(CONFIG_SEP) += sep5.o ++ ++sep5-y := lwpmudrv.o \ ++ control.o \ ++ cpumon.o \ ++ eventmux.o \ ++ linuxos.o \ ++ output.o \ ++ pmi.o \ ++ sys_info.o \ ++ utility.o \ ++ valleyview_sochap.o \ ++ unc_power.o \ ++ core2.o \ ++ perfver4.o \ ++ silvermont.o \ ++ pci.o \ ++ apic.o \ ++ pebs.o \ ++ unc_gt.o \ ++ unc_mmio.o \ ++ unc_msr.o \ ++ unc_common.o \ ++ unc_pci.o \ ++ sepdrv_p_state.o ++ ++ ++ifdef CONFIG_X86_64 ++ sep5-y += sys64.o ++endif ++ ++ifdef CONFIG_X86_32 ++ sep5-y += sys32.o ++endif ++ ++sep5-$(CONFIG_SEP_PRIVATE_BUILD) += chap.o \ ++ gmch.o \ ++ gfx.o \ ++ unc_sa.o +diff --git a/drivers/platform/x86/sepdk/sep/apic.c b/drivers/platform/x86/sepdk/sep/apic.c +new file mode 100755 +index 000000000000..693c526d63de +--- /dev/null ++++ b/drivers/platform/x86/sepdk/sep/apic.c +@@ -0,0 +1,228 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#include "lwpmudrv_defines.h" ++#include ++#include ++#include ++#include ++#include ++#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32) ++#include ++#endif ++#if defined(CONFIG_XEN_DOM0) && LINUX_VERSION_CODE > KERNEL_VERSION(3, 3, 0) ++#include ++#include ++#endif ++ ++#include "lwpmudrv_types.h" ++#include "lwpmudrv_ecb.h" ++#include "apic.h" ++#include "lwpmudrv.h" ++#include "control.h" ++#include "utility.h" ++ ++ ++#if defined(DRV_SEP_ACRN_ON) ++extern struct profiling_vm_info_list *vm_info_list; ++#else ++static DEFINE_PER_CPU(unsigned long, saved_apic_lvtpc); ++#endif ++ ++/*! ++ * @fn VOID apic_Get_APIC_ID(S32 cpu) ++ * ++ * @brief Obtain APIC ID ++ * ++ * @param S32 cpuid - cpu index ++ * ++ * @return U32 APIC ID ++ */ ++static VOID apic_Get_APIC_ID(S32 cpu) ++{ ++ U32 apic_id = 0; ++ CPU_STATE pcpu; ++#if defined(DRV_SEP_ACRN_ON) ++ U32 i; ++#endif ++ ++ SEP_DRV_LOG_TRACE_IN("CPU: %d.", cpu); ++ pcpu = &pcb[cpu]; ++ ++#if defined(CONFIG_XEN_DOM0) && LINUX_VERSION_CODE > KERNEL_VERSION(3, 3, 0) ++ if (xen_initial_domain()) { ++ S32 ret = 0; ++ struct xen_platform_op op = { ++ .cmd = XENPF_get_cpuinfo, ++ .interface_version = XENPF_INTERFACE_VERSION, ++ .u.pcpu_info.xen_cpuid = cpu, ++ }; ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) ++ ret = HYPERVISOR_platform_op(&op); ++#else ++ ret = HYPERVISOR_dom0_op(&op); ++#endif ++ if (ret) { ++ SEP_DRV_LOG_ERROR( ++ "apic_Get_APIC_ID:Error in reading APIC ID on Xen PV"); ++ apic_id = 0; ++ } else { ++ apic_id = op.u.pcpu_info.apic_id; ++ } ++ } else { ++#endif ++#ifdef CONFIG_X86_LOCAL_APIC ++ apic_id = read_apic_id(); ++#endif ++#if defined(CONFIG_XEN_DOM0) && LINUX_VERSION_CODE > KERNEL_VERSION(3, 3, 0) ++ } ++#endif ++ ++#if defined(DRV_SEP_ACRN_ON) ++ CPU_STATE_apic_id(pcpu) = 0; ++ if (vm_info_list == NULL) { ++ SEP_PRINT_ERROR( ++ "apic_Get_APIC_ID: Error in reading APIC ID on ACRN\n"); ++ } else { ++ for (i = 0; i < vm_info_list->num_vms; i++) { ++ if (vm_info_list->vm_list[i].vm_id == 0xFFFFFFFF) { ++ CPU_STATE_apic_id(pcpu) = ++ vm_info_list->vm_list[i] ++ .cpu_map[cpu] ++ .apic_id; ++ break; ++ } ++ } ++ } ++#else ++ CPU_STATE_apic_id(pcpu) = apic_id; ++#endif ++ ++ SEP_DRV_LOG_TRACE_OUT("Apic_id[%d] is %d.", cpu, ++ CPU_STATE_apic_id(pcpu)); ++} ++ ++/*! ++ * @fn extern VOID APIC_Init(param) ++ * ++ * @brief initialize the local APIC ++ * ++ * @param int cpu_idx - The cpu to deinit ++ * ++ * @return None ++ * ++ * Special Notes: ++ * This routine is expected to be called via the CONTROL_Parallel routine ++ */ ++VOID APIC_Init(PVOID param) ++{ ++ S32 me; ++ ++ SEP_DRV_LOG_TRACE_IN("Param: %p.", param); ++ ++ if (param == NULL) { ++ preempt_disable(); ++ me = CONTROL_THIS_CPU(); ++ preempt_enable(); ++ } else { ++ me = *(S32 *)param; ++ } ++ ++ apic_Get_APIC_ID(me); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/*! ++ * @fn extern VOID APIC_Install_Interrupt_Handler(param) ++ * ++ * @brief Install the interrupt handler ++ * ++ * @param int param - The linear address of the Local APIC ++ * ++ * @return None ++ * ++ * Special Notes: ++ * The linear address is necessary if the LAPIC is used. If X2APIC is ++ * used the linear address is not necessary. ++ */ ++VOID APIC_Install_Interrupt_Handler(PVOID param) ++{ ++ SEP_DRV_LOG_TRACE_IN("Param: %p.", param); ++ ++#if !defined(DRV_SEP_ACRN_ON) ++ per_cpu(saved_apic_lvtpc, CONTROL_THIS_CPU()) = apic_read(APIC_LVTPC); ++ apic_write(APIC_LVTPC, APIC_DM_NMI); ++#endif ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/*! ++ * @fn extern VOID APIC_Enable_PMI(void) ++ * ++ * @brief Enable the PMU interrupt ++ * ++ * @param None ++ * ++ * @return None ++ * ++ * Special Notes: ++ * ++ */ ++VOID APIC_Enable_Pmi(VOID) ++{ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++#if !defined(DRV_SEP_ACRN_ON) ++ apic_write(APIC_LVTPC, APIC_DM_NMI); ++#endif ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/*! ++ * @fn extern VOID APIC_Restore_LVTPC(void) ++ * ++ * @brief Restore APIC LVTPC value ++ * ++ * @param None ++ * ++ * @return None ++ * ++ * Special Notes: ++ * ++ */ ++VOID APIC_Restore_LVTPC(PVOID param) ++{ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++#if !defined(DRV_SEP_ACRN_ON) ++ apic_write(APIC_LVTPC, per_cpu(saved_apic_lvtpc, CONTROL_THIS_CPU())); ++#endif ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} +diff --git a/drivers/platform/x86/sepdk/sep/chap.c b/drivers/platform/x86/sepdk/sep/chap.c +new file mode 100755 +index 000000000000..434e9aeb658e +--- /dev/null ++++ b/drivers/platform/x86/sepdk/sep/chap.c +@@ -0,0 +1,474 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#include ++#include ++ ++#include "lwpmudrv_defines.h" ++#include "lwpmudrv_types.h" ++#include "rise_errors.h" ++#include "lwpmudrv_ecb.h" ++#include "lwpmudrv_struct.h" ++#include "lwpmudrv_chipset.h" ++#include "inc/lwpmudrv.h" ++#include "inc/control.h" ++#include "inc/ecb_iterators.h" ++#include "inc/utility.h" ++#include "inc/chap.h" ++ ++extern DRV_CONFIG drv_cfg; ++extern CHIPSET_CONFIG pma; ++extern CPU_STATE pcb; ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static U32 chap_Init_Chipset(void) ++ * ++ * @brief Chipset PMU initialization ++ * ++ * @param None ++ * ++ * @return VT_SUCCESS if successful, otherwise error ++ * ++ * Special Notes: ++ * ++ */ ++static U32 chap_Init_Chipset(void) ++{ ++ U32 i; ++ CHIPSET_SEGMENT mch_chipset_seg = &CHIPSET_CONFIG_mch(pma); ++ CHIPSET_SEGMENT ich_chipset_seg = &CHIPSET_CONFIG_ich(pma); ++ CHIPSET_SEGMENT noa_chipset_seg = &CHIPSET_CONFIG_noa(pma); ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ SEP_DRV_LOG_TRACE("Initializing chipset ..."); ++ ++ if (DRV_CONFIG_enable_chipset(drv_cfg)) { ++ for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { ++ pcb[i].chipset_count_init = TRUE; ++ } ++ if ((CHIPSET_CONFIG_mch_chipset(pma)) && ++ (CHIPSET_SEGMENT_virtual_address(mch_chipset_seg) == 0)) { ++ // Map virtual address of PCI CHAP interface ++ CHIPSET_SEGMENT_virtual_address( ++ mch_chipset_seg) = ++ (U64)(UIOP)ioremap_nocache( ++ CHIPSET_SEGMENT_physical_address( ++ mch_chipset_seg), ++ CHIPSET_SEGMENT_size( ++ mch_chipset_seg)); ++ } ++ ++ if ((CHIPSET_CONFIG_ich_chipset(pma)) && ++ (CHIPSET_SEGMENT_virtual_address(ich_chipset_seg) == 0)) { ++ // Map the virtual address of PCI CHAP interface ++ CHIPSET_SEGMENT_virtual_address( ++ ich_chipset_seg) = ++ (U64)(UIOP)ioremap_nocache( ++ CHIPSET_SEGMENT_physical_address( ++ ich_chipset_seg), ++ CHIPSET_SEGMENT_size( ++ ich_chipset_seg)); ++ } ++ ++ // Here we map the MMIO registers for the Gen X processors. ++ if ((CHIPSET_CONFIG_noa_chipset(pma)) && ++ (CHIPSET_SEGMENT_virtual_address(noa_chipset_seg) == 0)) { ++ // Map the virtual address of PCI CHAP interface ++ CHIPSET_SEGMENT_virtual_address( ++ noa_chipset_seg) = ++ (U64)(UIOP)ioremap_nocache( ++ CHIPSET_SEGMENT_physical_address( ++ noa_chipset_seg), ++ CHIPSET_SEGMENT_size( ++ noa_chipset_seg)); ++ } ++ ++ // ++ // always collect processor events ++ // ++ CHIPSET_CONFIG_processor(pma) = 1; ++ } else { ++ CHIPSET_CONFIG_processor(pma) = 0; ++ } ++ SEP_DRV_LOG_TRACE("Initializing chipset done."); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++ return VT_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static U32 chap_Start_Chipset(void) ++ * @param None ++ * @return VT_SUCCESS if successful, otherwise error ++ * @brief Start collection on the Chipset PMU ++ * ++ * Special Notes: ++ * ++ */ ++static VOID chap_Start_Chipset(void) ++{ ++ U32 i; ++ CHAP_INTERFACE chap; ++ CHIPSET_SEGMENT mch_chipset_seg = &CHIPSET_CONFIG_mch(pma); ++ CHIPSET_SEGMENT ich_chipset_seg = &CHIPSET_CONFIG_ich(pma); ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ // ++ // reset and start chipset counters ++ // ++ SEP_DRV_LOG_TRACE("Starting chipset counters...\n"); ++ if (pma) { ++ chap = (CHAP_INTERFACE)(UIOP)CHIPSET_SEGMENT_virtual_address( ++ mch_chipset_seg); ++ if (chap != NULL) { ++ for (i = 0; ++ i < CHIPSET_SEGMENT_total_events(mch_chipset_seg); ++ i++) { ++ CHAP_INTERFACE_command_register(&chap[i]) = ++ 0x00040000; // Reset to zero ++ CHAP_INTERFACE_command_register(&chap[i]) = ++ 0x00010000; // Restart ++ } ++ } ++ ++ chap = (CHAP_INTERFACE)(UIOP)CHIPSET_SEGMENT_virtual_address( ++ ich_chipset_seg); ++ if (chap != NULL) { ++ for (i = 0; ++ i < CHIPSET_SEGMENT_total_events(ich_chipset_seg); ++ i++) { ++ CHAP_INTERFACE_command_register(&chap[i]) = ++ 0x00040000; // Reset to zero ++ CHAP_INTERFACE_command_register(&chap[i]) = ++ 0x00010000; // Restart ++ } ++ } ++ } ++ ++ SEP_DRV_LOG_TRACE("Starting chipset counters done.\n"); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static U32 chap_Read_Counters(PVOID param) ++ * ++ * @brief Read the CHAP counter data ++ * ++ * @param PVOID param - address of the buffer to write into ++ * ++ * @return None ++ * ++ * Special Notes: ++ * ++ */ ++static VOID chap_Read_Counters(PVOID param) ++{ ++ U64 *data; ++ CHAP_INTERFACE chap; ++ U32 mch_cpu; ++ int i, data_index; ++ U64 tmp_data; ++ U64 *mch_data; ++ U64 *ich_data; ++ U64 *mmio_data; ++ U64 *mmio; ++ U32 this_cpu; ++ CHIPSET_SEGMENT mch_chipset_seg = &CHIPSET_CONFIG_mch(pma); ++ CHIPSET_SEGMENT ich_chipset_seg = &CHIPSET_CONFIG_ich(pma); ++ CHIPSET_SEGMENT noa_chipset_seg = &CHIPSET_CONFIG_noa(pma); ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ data = param; ++ data_index = 0; ++ ++ // Save the Motherboard time. This is universal time for this ++ // system. This is the only 64-bit timer so we save it first so ++ // always aligned on 64-bit boundary that way. ++ ++ if (CHIPSET_CONFIG_mch_chipset(pma)) { ++ mch_data = data + data_index; ++ // Save the MCH counters. ++ chap = (CHAP_INTERFACE)(UIOP)CHIPSET_SEGMENT_virtual_address( ++ mch_chipset_seg); ++ for (i = CHIPSET_SEGMENT_start_register(mch_chipset_seg); ++ i < CHIPSET_SEGMENT_total_events(mch_chipset_seg); i++) { ++ CHAP_INTERFACE_command_register(&chap[i]) = ++ 0x00020000; // Sample ++ } ++ ++ // The StartingReadRegister is only used for special event ++ // configs that use CHAP counters to trigger events in other ++ // CHAP counters. This is an unusual request but useful in ++ // getting the number of lit subspans - implying a count of the ++ // number of triangles. I am not sure it will be used ++ // elsewhere. We cannot read some of the counters because it ++ // will invalidate their configuration to trigger other CHAP ++ // counters. Yuk! ++ data_index += CHIPSET_SEGMENT_start_register(mch_chipset_seg); ++ for (i = CHIPSET_SEGMENT_start_register(mch_chipset_seg); ++ i < CHIPSET_SEGMENT_total_events(mch_chipset_seg); i++) { ++ data[data_index++] = ++ CHAP_INTERFACE_data_register(&chap[i]); ++ } ++ ++ // Initialize the counters on the first interrupt ++ if (pcb[this_cpu].chipset_count_init == TRUE) { ++ for (i = 0; ++ i < CHIPSET_SEGMENT_total_events(mch_chipset_seg); ++ i++) { ++ pcb[this_cpu].last_mch_count[i] = mch_data[i]; ++ } ++ } ++ ++ // Now compute the delta! ++ // NOTE: Special modification to accomodate Gen 4 work - count ++ // everything since last interrupt - regardless of cpu! This ++ // way there is only one count of the Gen 4 counters. ++ // ++ mch_cpu = CHIPSET_CONFIG_host_proc_run(pma) ? this_cpu : 0; ++ for (i = 0; i < CHIPSET_SEGMENT_total_events(mch_chipset_seg); ++ i++) { ++ tmp_data = mch_data[i]; ++ if (mch_data[i] < pcb[mch_cpu].last_mch_count[i]) { ++ mch_data[i] = mch_data[i] + (U32)(-1) - ++ pcb[mch_cpu].last_mch_count[i]; ++ } else { ++ mch_data[i] = mch_data[i] - ++ pcb[mch_cpu].last_mch_count[i]; ++ } ++ pcb[mch_cpu].last_mch_count[i] = tmp_data; ++ } ++ } ++ ++ if (CHIPSET_CONFIG_ich_chipset(pma)) { ++ // Save the ICH counters. ++ ich_data = data + data_index; ++ chap = (CHAP_INTERFACE)(UIOP)CHIPSET_SEGMENT_virtual_address( ++ ich_chipset_seg); ++ for (i = 0; i < CHIPSET_SEGMENT_total_events(ich_chipset_seg); ++ i++) { ++ CHAP_INTERFACE_command_register(&chap[i]) = ++ 0x00020000; // Sample ++ } ++ ++ for (i = 0; i < CHIPSET_SEGMENT_total_events(ich_chipset_seg); ++ i++) { ++ data[data_index++] = ++ CHAP_INTERFACE_data_register(&chap[i]); ++ } ++ ++ // Initialize the counters on the first interrupt ++ if (pcb[this_cpu].chipset_count_init == TRUE) { ++ for (i = 0; ++ i < CHIPSET_SEGMENT_total_events(ich_chipset_seg); ++ i++) { ++ ++ pcb[this_cpu].last_ich_count[i] = ich_data[i]; ++ } ++ } ++ ++ // Now compute the delta! ++ for (i = 0; i < CHIPSET_SEGMENT_total_events(ich_chipset_seg); ++ i++) { ++ tmp_data = ich_data[i]; ++ if (ich_data[i] < pcb[this_cpu].last_ich_count[i]) { ++ ich_data[i] = ich_data[i] + (U32)(-1) - ++ pcb[this_cpu].last_ich_count[i]; ++ } else { ++ ich_data[i] = ich_data[i] - ++ pcb[this_cpu].last_ich_count[i]; ++ } ++ pcb[this_cpu].last_ich_count[i] = tmp_data; ++ } ++ } ++ ++ if (CHIPSET_CONFIG_noa_chipset(pma)) { ++ // Save the MMIO counters. ++ mmio_data = data + data_index; ++ mmio = (U64 *)(UIOP)CHIPSET_SEGMENT_virtual_address( ++ noa_chipset_seg); ++ ++ for (i = 0; i < CHIPSET_SEGMENT_total_events(noa_chipset_seg); ++ i++) { ++ data[data_index++] = ++ mmio[i * 2 + 2244]; // 64-bit quantity ++ } ++ ++ // Initialize the counters on the first interrupt ++ if (pcb[this_cpu].chipset_count_init == TRUE) { ++ for (i = 0; ++ i < CHIPSET_SEGMENT_total_events(noa_chipset_seg); ++ i++) { ++ pcb[this_cpu].last_mmio_count[i] = mmio_data[i]; ++ } ++ } ++ ++ // Now compute the delta! ++ for (i = 0; i < CHIPSET_SEGMENT_total_events(noa_chipset_seg); ++ i++) { ++ tmp_data = mmio_data[i]; ++ if (mmio_data[i] < pcb[this_cpu].last_mmio_count[i]) { ++ mmio_data[i] = mmio_data[i] + (U32)(-1) - ++ pcb[this_cpu].last_mmio_count[i]; ++ } else { ++ mmio_data[i] = mmio_data[i] - ++ pcb[this_cpu].last_mmio_count[i]; ++ } ++ pcb[this_cpu].last_mmio_count[i] = tmp_data; ++ } ++ } ++ ++ pcb[this_cpu].chipset_count_init = FALSE; ++ ++ FOR_EACH_DATA_REG(pecb, i) ++ { ++ data[data_index++] = SYS_Read_MSR(ECB_entries_reg_id(pecb, i)); ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, i), (U64)0); ++ } ++ END_FOR_EACH_DATA_REG; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static VOID chap_Stop_Chipset(void) ++ * ++ * @brief Stop the Chipset PMU ++ * ++ * @param None ++ * ++ * @return None ++ * ++ * Special Notes: ++ * ++ */ ++static VOID chap_Stop_Chipset(void) ++{ ++ U32 i; ++ CHAP_INTERFACE chap; ++ CHIPSET_SEGMENT mch_chipset_seg = &CHIPSET_CONFIG_mch(pma); ++ CHIPSET_SEGMENT ich_chipset_seg = &CHIPSET_CONFIG_ich(pma); ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ // ++ // reset and start chipset counters ++ // ++ SEP_DRV_LOG_TRACE("Stopping chipset counters..."); ++ ++ if (pma == NULL) { ++ return; ++ } ++ ++ if (CHIPSET_CONFIG_mch_chipset(pma)) { ++ chap = (CHAP_INTERFACE)(UIOP) ++ CHIPSET_SEGMENT_virtual_address(mch_chipset_seg); ++ if (chap != NULL) { ++ for (i = 0; ++ i < CHIPSET_SEGMENT_total_events(mch_chipset_seg); ++ i++) { ++ CHAP_INTERFACE_command_register(&chap[i]) ++ = 0x00000000; // Stop ++ CHAP_INTERFACE_command_register(&chap[i]) ++ = 0x00040000; // Reset to Zero ++ } ++ } ++ } ++ ++ if (CHIPSET_CONFIG_ich_chipset(pma)) { ++ chap = (CHAP_INTERFACE)(UIOP) ++ CHIPSET_SEGMENT_virtual_address( ++ ich_chipset_seg); ++ if (chap != NULL) { ++ for (i = 0; ++ i < CHIPSET_SEGMENT_total_events(ich_chipset_seg); ++ i++) { ++ CHAP_INTERFACE_command_register(&chap[i]) ++ = 0x00000000; // Stop ++ CHAP_INTERFACE_command_register(&chap[i]) ++ = 0x00040000; // Reset to Zero ++ } ++ } ++ } ++ ++ if (CHIPSET_CONFIG_mch_chipset(pma) && ++ CHIPSET_SEGMENT_virtual_address(mch_chipset_seg)) { ++ ++ iounmap((void __iomem *)(UIOP)CHIPSET_SEGMENT_virtual_address( ++ mch_chipset_seg)); ++ CHIPSET_SEGMENT_virtual_address(mch_chipset_seg) = 0; ++ } ++ ++ if (CHIPSET_CONFIG_ich_chipset(pma) && ++ CHIPSET_SEGMENT_virtual_address(ich_chipset_seg)) { ++ ++ iounmap((void __iomem *)(UIOP)CHIPSET_SEGMENT_virtual_address( ++ ich_chipset_seg)); ++ CHIPSET_SEGMENT_virtual_address(ich_chipset_seg) = 0; ++ } ++ CONTROL_Free_Memory(pma); ++ pma = NULL; ++ ++ SEP_DRV_LOG_TRACE("Stopped chipset counters."); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static VOID chap_Fini_Chipset(void) ++ * ++ * @brief Finish routine on a per-logical-core basis ++ * ++ * @param None ++ * ++ * @return None ++ * ++ * Special Notes: ++ * ++ */ ++static VOID chap_Fini_Chipset(void) ++{ ++ SEP_DRV_LOG_TRACE_IN(""); ++ SEP_DRV_LOG_TRACE_OUT("Empty function."); ++} ++ ++CS_DISPATCH_NODE chap_dispatch = { ++ .init_chipset = chap_Init_Chipset, ++ .start_chipset = chap_Start_Chipset, ++ .read_counters = chap_Read_Counters, ++ .stop_chipset = chap_Stop_Chipset, ++ .fini_chipset = chap_Fini_Chipset, ++ .Trigger_Read = NULL ++}; +diff --git a/drivers/platform/x86/sepdk/sep/control.c b/drivers/platform/x86/sepdk/sep/control.c +new file mode 100755 +index 000000000000..474de2c3e578 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/sep/control.c +@@ -0,0 +1,896 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#include "lwpmudrv_defines.h" ++#include ++#include ++#include ++#include ++#include ++ ++#include "lwpmudrv_types.h" ++#include "rise_errors.h" ++#include "lwpmudrv_ecb.h" ++#include "lwpmudrv.h" ++#include "control.h" ++#include "utility.h" ++#include ++#include ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) ++#define SMP_CALL_FUNCTION(func, ctx, retry, wait) \ ++ smp_call_function((func), (ctx), (wait)) ++#define SMP_CALL_FUNCTION_SINGLE(cpuid, func, ctx, retry, wait) \ ++ smp_call_function_single((cpuid), (func), (ctx), (wait)) ++#define ON_EACH_CPU(func, ctx, retry, wait) on_each_cpu((func), (ctx), (wait)) ++#else ++#define SMP_CALL_FUNCTION(func, ctx, retry, wait) \ ++ smp_call_function((func), (ctx), (retry), (wait)) ++#define SMP_CALL_FUNCTION_SINGLE(cpuid, func, ctx, retry, wait) \ ++ smp_call_function_single((cpuid), (func), (ctx), (retry), (wait)) ++#define ON_EACH_CPU(func, ctx, retry, wait) \ ++ on_each_cpu((func), (ctx), (retry), (wait)) ++#endif ++ ++#if defined(DRV_SEP_ACRN_ON) ++void (*local_vfree_atomic)(const void *addr) = NULL; ++#endif ++ ++/* ++ */ ++GLOBAL_STATE_NODE driver_state; ++MSR_DATA msr_data; ++static MEM_TRACKER mem_tr_head; // start of the mem tracker list ++static MEM_TRACKER mem_tr_tail; // end of mem tracker list ++static spinlock_t mem_tr_lock; // spinlock for mem tracker list ++static unsigned long flags; ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID CONTROL_Invoke_Cpu (func, ctx, arg) ++ * ++ * @brief Set up a DPC call and insert it into the queue ++ * ++ * @param IN cpu_idx - the core id to dispatch this function to ++ * IN func - function to be invoked by the specified core(s) ++ * IN ctx - pointer to the parameter block for each function ++ * invocation ++ * ++ * @return None ++ * ++ * Special Notes: ++ * ++ */ ++VOID CONTROL_Invoke_Cpu(int cpu_idx, VOID (*func)(PVOID), PVOID ctx) ++{ ++ SEP_DRV_LOG_TRACE_IN("CPU: %d, function: %p, ctx: %p.", cpu_idx, func, ++ ctx); ++ SMP_CALL_FUNCTION_SINGLE(cpu_idx, func, ctx, 0, 1); ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/* ++ * @fn VOID CONTROL_Invoke_Parallel_Service(func, ctx, blocking, exclude) ++ * ++ * @param func - function to be invoked by each core in the system ++ * @param ctx - pointer to the parameter block for each function invocation ++ * @param blocking - Wait for invoked function to complete ++ * @param exclude - exclude the current core from executing the code ++ * ++ * @returns None ++ * ++ * @brief Service routine to handle all kinds of parallel invoke on all CPU calls ++ * ++ * Special Notes: ++ * Invoke the function provided in parallel in either a blocking or ++ * non-blocking mode. The current core may be excluded if desired. ++ * NOTE - Do not call this function directly from source code. ++ * Use the aliases CONTROL_Invoke_Parallel(), CONTROL_Invoke_Parallel_NB(), ++ * or CONTROL_Invoke_Parallel_XS(). ++ * ++ */ ++VOID CONTROL_Invoke_Parallel_Service(VOID (*func)(PVOID), PVOID ctx, ++ int blocking, int exclude) ++{ ++ SEP_DRV_LOG_TRACE_IN("Fn: %p, ctx: %p, block: %d, excl: %d.", ++ func, ctx, blocking, exclude); ++ ++ GLOBAL_STATE_cpu_count(driver_state) = 0; ++ GLOBAL_STATE_dpc_count(driver_state) = 0; ++ ++ if (GLOBAL_STATE_num_cpus(driver_state) == 1) { ++ if (!exclude) { ++ func(ctx); ++ } ++ SEP_DRV_LOG_TRACE_OUT(""); ++ return; ++ } ++ if (!exclude) { ++ ON_EACH_CPU(func, ctx, 0, blocking); ++ SEP_DRV_LOG_TRACE_OUT(""); ++ return; ++ } ++ ++ preempt_disable(); ++ SMP_CALL_FUNCTION(func, ctx, 0, blocking); ++ preempt_enable(); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/* ++ * @fn VOID control_Memory_Tracker_Delete_Node(mem_tr) ++ * ++ * @param IN mem_tr - memory tracker node to delete ++ * ++ * @returns None ++ * ++ * @brief Delete specified node in the memory tracker ++ * ++ * Special Notes: ++ * Assumes mem_tr_lock is already held while calling this function! ++ */ ++static VOID control_Memory_Tracker_Delete_Node(MEM_TRACKER mem_tr) ++{ ++ MEM_TRACKER prev_tr = NULL; ++ MEM_TRACKER next_tr = NULL; ++ U32 size = 0; ++ ++ SEP_DRV_LOG_ALLOC_IN(""); ++ ++ if (!mem_tr) { ++ SEP_DRV_LOG_ALLOC_OUT("mem_tr is NULL!"); ++ return; ++ } ++ size = MEM_TRACKER_max_size(mem_tr) * sizeof(MEM_EL_NODE); ++ // update the linked list ++ prev_tr = MEM_TRACKER_prev(mem_tr); ++ next_tr = MEM_TRACKER_next(mem_tr); ++ if (prev_tr) { ++ MEM_TRACKER_next(prev_tr) = next_tr; ++ } ++ if (next_tr) { ++ MEM_TRACKER_prev(next_tr) = prev_tr; ++ } ++ ++ // free the allocated mem_el array (if any) ++ if (MEM_TRACKER_mem(mem_tr)) { ++ if (MEM_TRACKER_array_vmalloc(mem_tr)) { ++ vfree(MEM_TRACKER_mem(mem_tr)); ++ } else { ++ if (size < MAX_KMALLOC_SIZE) { ++ kfree(MEM_TRACKER_mem(mem_tr)); ++ } else { ++ free_pages( ++ (unsigned long)MEM_TRACKER_mem(mem_tr), ++ get_order(size)); ++ } ++ } ++ } ++ ++ // free the mem_tracker node ++ if (MEM_TRACKER_node_vmalloc(mem_tr)) { ++ vfree(mem_tr); ++ } else { ++ kfree(mem_tr); ++ } ++ SEP_DRV_LOG_ALLOC_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/* ++ * @fn VOID control_Memory_Tracker_Create_Node(void) ++ * ++ * @param None - size of the memory to allocate ++ * ++ * @returns OS_SUCCESS if successful, otherwise error ++ * ++ * @brief Initialize the memory tracker ++ * ++ * Special Notes: ++ * Assumes mem_tr_lock is already held while calling this function! ++ * ++ * Since this function can be called within either GFP_KERNEL or ++ * GFP_ATOMIC contexts, the most restrictive allocation is used ++ * (viz., GFP_ATOMIC). ++ */ ++static U32 control_Memory_Tracker_Create_Node(void) ++{ ++ U32 size = MEM_EL_MAX_ARRAY_SIZE * sizeof(MEM_EL_NODE); ++ PVOID location = NULL; ++ MEM_TRACKER mem_tr = NULL; ++ ++ SEP_DRV_LOG_ALLOC_IN(""); ++ ++ // create a mem tracker node ++ mem_tr = (MEM_TRACKER)kmalloc(sizeof(MEM_TRACKER_NODE), GFP_ATOMIC); ++ if (!mem_tr) { ++ mem_tr = (MEM_TRACKER)vmalloc(sizeof(MEM_TRACKER_NODE)); ++ if (mem_tr) { ++ MEM_TRACKER_node_vmalloc(mem_tr) = 1; ++ } else { ++ SEP_DRV_LOG_ERROR_ALLOC_OUT( ++ "Failed to allocate mem tracker node."); ++ return OS_FAULT; ++ } ++ } else { ++ MEM_TRACKER_node_vmalloc(mem_tr) = 0; ++ } ++ SEP_DRV_LOG_TRACE("Node %p, vmalloc %d.", mem_tr, ++ MEM_TRACKER_node_vmalloc(mem_tr)); ++ ++ // create an initial array of mem_el's inside the mem tracker node ++ MEM_TRACKER_array_vmalloc(mem_tr) = 0; ++ if (size < MAX_KMALLOC_SIZE) { ++ location = (PVOID)kmalloc(size, GFP_ATOMIC); ++ SEP_DRV_LOG_ALLOC("Allocated small memory (0x%p, %d).", ++ location, (S32)size); ++ } else { ++ location = (PVOID)__get_free_pages(GFP_ATOMIC, get_order(size)); ++ SEP_DRV_LOG_ALLOC("Allocated large memory (0x%p, %d).", ++ location, (S32)size); ++ } ++ if (!location) { ++ location = (PVOID)vmalloc(size); ++ if (location) { ++ MEM_TRACKER_array_vmalloc(mem_tr) = 1; ++ SEP_DRV_LOG_ALLOC( ++ "Allocated memory (vmalloc) (0x%p, %d).", ++ location, (S32)size); ++ } else { ++ if (MEM_TRACKER_node_vmalloc(mem_tr)) { ++ vfree(mem_tr); ++ } else { ++ kfree(mem_tr); ++ } ++ SEP_DRV_LOG_ERROR_ALLOC_OUT( ++ "Failed to allocate mem_el array... deleting node."); ++ return OS_FAULT; ++ } ++ } ++ ++ // initialize new mem tracker node ++ MEM_TRACKER_mem(mem_tr) = location; ++ MEM_TRACKER_prev(mem_tr) = NULL; ++ MEM_TRACKER_next(mem_tr) = NULL; ++ ++ // initialize mem_tracker's mem_el array ++ MEM_TRACKER_max_size(mem_tr) = MEM_EL_MAX_ARRAY_SIZE; ++ MEM_TRACKER_elements(mem_tr) = 0; ++ memset(MEM_TRACKER_mem(mem_tr), 0, size); ++ ++ // update the linked list ++ if (!mem_tr_head) { ++ mem_tr_head = mem_tr; ++ } else { ++ MEM_TRACKER_prev(mem_tr) = mem_tr_tail; ++ MEM_TRACKER_next(mem_tr_tail) = mem_tr; ++ } ++ mem_tr_tail = mem_tr; ++ ++ SEP_DRV_LOG_ALLOC_OUT("Allocated node=0x%p, max_elements=%d, size=%d.", ++ MEM_TRACKER_mem(mem_tr_tail), ++ MEM_EL_MAX_ARRAY_SIZE, size); ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/* ++ * @fn VOID control_Memory_Tracker_Add(location, size, vmalloc_flag) ++ * ++ * @param IN location - memory location ++ * @param IN size - size of the memory to allocate ++ * @param IN vmalloc_flag - flag that indicates if the allocation was done with vmalloc ++ * ++ * @returns None ++ * ++ * @brief Keep track of allocated memory with memory tracker ++ * ++ * Special Notes: ++ * Starting from first mem_tracker node, the algorithm ++ * finds the first "hole" in the mem_tracker list and ++ * tracks the memory allocation there. ++ */ ++static U32 control_Memory_Tracker_Add(PVOID location, ssize_t size, ++ DRV_BOOL vmalloc_flag) ++{ ++ S32 i, n; ++ U32 status; ++ DRV_BOOL found; ++ MEM_TRACKER mem_tr; ++ ++ SEP_DRV_LOG_ALLOC_IN("Location: %p, size: %u, flag: %u.", location, ++ (U32)size, vmalloc_flag); ++ ++ spin_lock_irqsave(&mem_tr_lock, flags); ++ ++ // check if there is space in ANY of mem_tracker's nodes for the memory item ++ mem_tr = mem_tr_head; ++ found = FALSE; ++ status = OS_SUCCESS; ++ i = n = 0; ++ while (mem_tr && (!found)) { ++ if (MEM_TRACKER_elements(mem_tr) < ++ MEM_TRACKER_max_size(mem_tr)) { ++ for (i = 0; i < MEM_TRACKER_max_size(mem_tr); i++) { ++ if (!MEM_TRACKER_mem_address(mem_tr, i)) { ++ SEP_DRV_LOG_ALLOC( ++ "Found index %d of %d available.", ++ i, ++ MEM_TRACKER_max_size(mem_tr) - ++ 1); ++ n = i; ++ found = TRUE; ++ break; ++ } ++ } ++ } ++ if (!found) { ++ mem_tr = MEM_TRACKER_next(mem_tr); ++ } ++ } ++ ++ if (!found) { ++ // extend into (i.e., create new) mem_tracker node ... ++ status = control_Memory_Tracker_Create_Node(); ++ if (status != OS_SUCCESS) { ++ SEP_DRV_LOG_ERROR("Unable to create mem tracker node."); ++ goto finish_add; ++ } ++ // use mem tracker tail node and first available entry in mem_el array ++ mem_tr = mem_tr_tail; ++ n = 0; ++ } ++ ++ // we now have a location in mem tracker to keep track of the memory item ++ MEM_TRACKER_mem_address(mem_tr, n) = location; ++ MEM_TRACKER_mem_size(mem_tr, n) = size; ++ MEM_TRACKER_mem_vmalloc(mem_tr, n) = vmalloc_flag; ++ MEM_TRACKER_elements(mem_tr)++; ++ SEP_DRV_LOG_ALLOC("Tracking (0x%p, %d) in node %d of %d.", location, ++ (S32)size, n, MEM_TRACKER_max_size(mem_tr) - 1); ++ ++finish_add: ++ spin_unlock_irqrestore(&mem_tr_lock, flags); ++ ++ SEP_DRV_LOG_ALLOC_OUT("Result: %u.", status); ++ return status; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/* ++ * @fn VOID CONTROL_Memory_Tracker_Init(void) ++ * ++ * @param None ++ * ++ * @returns None ++ * ++ * @brief Initializes Memory Tracker ++ * ++ * Special Notes: ++ * This should only be called when the driver is being loaded. ++ */ ++VOID CONTROL_Memory_Tracker_Init(void) ++{ ++ SEP_DRV_LOG_ALLOC_IN("Initializing mem tracker."); ++ ++ mem_tr_head = NULL; ++ mem_tr_tail = NULL; ++ ++ spin_lock_init(&mem_tr_lock); ++ ++ SEP_DRV_LOG_ALLOC_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/* ++ * @fn VOID CONTROL_Memory_Tracker_Free(void) ++ * ++ * @param None ++ * ++ * @returns None ++ * ++ * @brief Frees memory used by Memory Tracker ++ * ++ * Special Notes: ++ * This should only be called when the driver is being unloaded. ++ */ ++VOID CONTROL_Memory_Tracker_Free(void) ++{ ++ S32 i; ++ MEM_TRACKER temp; ++ ++ SEP_DRV_LOG_ALLOC_IN("Destroying mem tracker."); ++ ++ spin_lock_irqsave(&mem_tr_lock, flags); ++ ++ // check for any memory that was not freed, and free it ++ while (mem_tr_head) { ++ if (MEM_TRACKER_elements(mem_tr_head)) { ++ for (i = 0; i < MEM_TRACKER_max_size(mem_tr_head); ++ i++) { ++ if (MEM_TRACKER_mem_address(mem_tr_head, i)) { ++ SEP_DRV_LOG_WARNING( ++ "Index %d of %d, not freed (0x%p, %d) ... freeing now.", ++ i, ++ MEM_TRACKER_max_size( ++ mem_tr_head) - ++ 1, ++ MEM_TRACKER_mem_address( ++ mem_tr_head, i), ++ MEM_TRACKER_mem_size( ++ mem_tr_head, i)); ++ ++ if (MEM_TRACKER_mem_vmalloc(mem_tr_head, ++ i)) { ++ vfree(MEM_TRACKER_mem_address( ++ mem_tr_head, i)); ++ } else { ++ free_pages( ++ (unsigned long) ++ MEM_TRACKER_mem_address( ++ mem_tr_head, ++ i), ++ get_order(MEM_TRACKER_mem_size( ++ mem_tr_head, ++ i))); ++ } ++ MEM_TRACKER_mem_address(mem_tr_head, ++ i) = NULL; ++ MEM_TRACKER_mem_size(mem_tr_head, i) = ++ 0; ++ MEM_TRACKER_mem_vmalloc(mem_tr_head, ++ i) = 0; ++ } ++ } ++ } ++ temp = mem_tr_head; ++ mem_tr_head = MEM_TRACKER_next(mem_tr_head); ++ control_Memory_Tracker_Delete_Node(temp); ++ } ++ ++ mem_tr_tail = NULL; ++ ++ spin_unlock_irqrestore(&mem_tr_lock, flags); ++ ++ SEP_DRV_LOG_ALLOC_OUT("Mem tracker destruction complete."); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/* ++ * @fn VOID CONTROL_Memory_Tracker_Compaction(void) ++ * ++ * @param None ++ * ++ * @returns None ++ * ++ * @brief Compacts the memory allocator if holes are detected ++ * ++ * Special Notes: ++ * The algorithm compacts mem_tracker nodes such that ++ * node entries are full starting from mem_tr_head ++ * up until the first empty node is detected, after ++ * which nodes up to mem_tr_tail will be empty. ++ * At end of collection (or at other safe sync point), ++ * we reclaim/compact space used by mem tracker. ++ */ ++VOID CONTROL_Memory_Tracker_Compaction(void) ++{ ++ S32 i, j, n, m, c, d; ++ DRV_BOOL found, overlap; ++ MEM_TRACKER mem_tr1, mem_tr2, empty_tr; ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ spin_lock_irqsave(&mem_tr_lock, flags); ++ ++ mem_tr1 = mem_tr_head; ++ ++ i = j = n = c = d = 0; ++ ++ /* ++ * step1: free up the track node which does not contain any elements. ++ */ ++ while (mem_tr1) { ++ SEP_DRV_LOG_ALLOC("Node %p, index %d, elememts %d.", mem_tr1, n, ++ MEM_TRACKER_elements(mem_tr1)); ++ if (MEM_TRACKER_elements(mem_tr1)) { ++ mem_tr1 = MEM_TRACKER_next(mem_tr1); ++ } else { ++ empty_tr = mem_tr1; ++ mem_tr1 = MEM_TRACKER_next(mem_tr1); ++ if (empty_tr == mem_tr_head) { ++ mem_tr_head = mem_tr1; ++ } ++ if (empty_tr == mem_tr_tail) { ++ mem_tr_tail = MEM_TRACKER_prev(empty_tr); ++ } ++ control_Memory_Tracker_Delete_Node(empty_tr); ++ d++; ++ SEP_DRV_LOG_ALLOC("Delete node %p.", mem_tr1); ++ } ++ } ++ ++ mem_tr1 = mem_tr_head; ++ mem_tr2 = mem_tr_tail; ++ ++ /* ++ * there is no need to compact if memory tracker was never used, or only have one track node ++ */ ++ overlap = (mem_tr1 == mem_tr2); ++ if (!mem_tr1 || !mem_tr2 || overlap) { ++ goto finish_compact; ++ } ++ ++ /* ++ * step2: there are more than 2 track node. ++ * starting from head node, find an empty element slot in a node ++ * if there is no empty slot or the node is tail, the compact is done. ++ * find an element in tail node, and move it to the empty slot fount below. ++ * if tail node is empty after moving, free it up. ++ * repeat until only one node. ++ */ ++ m = MEM_TRACKER_max_size(mem_tr2) - 1; ++ while (!overlap) { ++ // find an empty node ++ found = FALSE; ++ while (!found && !overlap && mem_tr1) { ++ SEP_DRV_LOG_TRACE( ++ "Looking at mem_tr1 0x%p, index=%d, elements %d.", ++ mem_tr1, n, MEM_TRACKER_elements(mem_tr1)); ++ if (MEM_TRACKER_elements(mem_tr1) < ++ MEM_TRACKER_max_size(mem_tr1)) { ++ for (i = n; i < MEM_TRACKER_max_size(mem_tr1); ++ i++) { ++ if (!MEM_TRACKER_mem_address(mem_tr1, ++ i)) { ++ SEP_DRV_LOG_TRACE( ++ "Found index %d of %d empty.", ++ i, ++ MEM_TRACKER_max_size( ++ mem_tr1) - ++ 1); ++ found = TRUE; ++ break; // tentative ++ } ++ } ++ } ++ ++ // if no overlap and an empty node was not found, then advance to next node ++ if (!found) { ++ mem_tr1 = MEM_TRACKER_next(mem_tr1); ++ // check for overlap ++ overlap = (mem_tr1 == mem_tr2); ++ n = 0; ++ } ++ } ++ // all nodes going in forward direction are full, so exit ++ if (!found || overlap || !mem_tr1) { ++ goto finish_compact; ++ } ++ ++ // find a non-empty node ++ found = FALSE; ++ while (!found && !overlap && mem_tr2) { ++ SEP_DRV_LOG_ALLOC( ++ "Looking at mem_tr2 0x%p, index=%d, elements %d.", ++ mem_tr2, m, MEM_TRACKER_elements(mem_tr2)); ++ if (MEM_TRACKER_elements(mem_tr2)) { ++ for (j = m; j >= 0; j--) { ++ if (MEM_TRACKER_mem_address(mem_tr2, ++ j)) { ++ SEP_DRV_LOG_ALLOC( ++ "Found index %d of %d non-empty.", ++ j, ++ MEM_TRACKER_max_size( ++ mem_tr2) - ++ 1); ++ found = TRUE; ++ // Any reason why we are not 'breaking' here? ++ } ++ } ++ } ++ ++ // if no overlap and no non-empty node was found, then retreat to prev node ++ if (!found) { ++ empty_tr = mem_tr2; // keep track of empty node ++ mem_tr2 = MEM_TRACKER_prev(mem_tr2); ++ m = MEM_TRACKER_max_size(mem_tr2) - 1; ++ mem_tr_tail = mem_tr2; // keep track of new tail ++ // reclaim empty mem_tracker node ++ control_Memory_Tracker_Delete_Node(empty_tr); ++ // keep track of number of node deletions performed ++ d++; ++ // check for overlap ++ overlap = (mem_tr1 == mem_tr2); ++ } ++ } ++ // all nodes going in reverse direction are empty, so exit ++ if (!found || overlap || !mem_tr2) { ++ goto finish_compact; ++ } ++ ++ // swap empty node with non-empty node so that "holes" get bubbled towards the end of list ++ MEM_TRACKER_mem_address(mem_tr1, i) = ++ MEM_TRACKER_mem_address(mem_tr2, j); ++ MEM_TRACKER_mem_size(mem_tr1, i) = ++ MEM_TRACKER_mem_size(mem_tr2, j); ++ MEM_TRACKER_mem_vmalloc(mem_tr1, i) = ++ MEM_TRACKER_mem_vmalloc(mem_tr2, j); ++ MEM_TRACKER_elements(mem_tr1)++; ++ ++ MEM_TRACKER_mem_address(mem_tr2, j) = NULL; ++ MEM_TRACKER_mem_size(mem_tr2, j) = 0; ++ MEM_TRACKER_mem_vmalloc(mem_tr2, j) = FALSE; ++ MEM_TRACKER_elements(mem_tr2)--; ++ ++ SEP_DRV_LOG_ALLOC( ++ "Node <%p, elemts %d, index %d> moved to <%p, elemts %d, index %d>.", ++ mem_tr2, MEM_TRACKER_elements(mem_tr2), j, mem_tr1, ++ MEM_TRACKER_elements(mem_tr1), i); ++ ++ // keep track of number of memory compactions performed ++ c++; ++ ++ // start new search starting from next element in mem_tr1 ++ n = i + 1; ++ ++ // start new search starting from prev element in mem_tr2 ++ m = j - 1; ++ } ++ ++finish_compact: ++ spin_unlock_irqrestore(&mem_tr_lock, flags); ++ ++ SEP_DRV_LOG_FLOW_OUT( ++ "Number of elements compacted = %d, nodes deleted = %d.", c, d); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/* ++ * @fn PVOID CONTROL_Allocate_Memory(size) ++ * ++ * @param IN size - size of the memory to allocate ++ * ++ * @returns char* - pointer to the allocated memory block ++ * ++ * @brief Allocate and zero memory ++ * ++ * Special Notes: ++ * Allocate memory in the GFP_KERNEL pool. ++ * ++ * Use this if memory is to be allocated within a context where ++ * the allocator can block the allocation (e.g., by putting ++ * the caller to sleep) while it tries to free up memory to ++ * satisfy the request. Otherwise, if the allocation must ++ * occur atomically (e.g., caller cannot sleep), then use ++ * CONTROL_Allocate_KMemory instead. ++ */ ++PVOID CONTROL_Allocate_Memory(size_t size) ++{ ++ U32 status; ++ PVOID location = NULL; ++ ++ SEP_DRV_LOG_ALLOC_IN("Attempting to allocate %d bytes.", (S32)size); ++ ++ if (size <= 0) { ++ SEP_DRV_LOG_WARNING_ALLOC_OUT( ++ "Cannot allocate a number of bytes <= 0."); ++ return NULL; ++ } ++ ++ // determine whether to use mem_tracker or not ++ if (size < MAX_KMALLOC_SIZE) { ++ location = (PVOID)kmalloc(size, GFP_KERNEL); ++ SEP_DRV_LOG_ALLOC("Allocated small memory (0x%p, %d)", location, ++ (S32)size); ++ } ++ if (!location) { ++ location = (PVOID)vmalloc(size); ++ if (location) { ++ status = control_Memory_Tracker_Add(location, size, ++ TRUE); ++ SEP_DRV_LOG_ALLOC("Allocated large memory (0x%p, %d)", ++ location, (S32)size); ++ if (status != OS_SUCCESS) { ++ // failed to track in mem_tracker, so free up memory and return NULL ++ SEP_DRV_LOG_ERROR( ++ "Allocated %db; failed to track w/ MEM_TRACKER. Freeing...", ++ (S32)size); ++ vfree(location); ++ location = NULL; ++ } ++ } ++ } ++ ++ if (!location) { ++ SEP_DRV_LOG_ERROR("Failed to allocated %db.", (S32)size); ++ } else { ++ memset(location, 0, size); ++ } ++ ++ SEP_DRV_LOG_ALLOC_OUT("Returning %p.", location); ++ return location; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/* ++ * @fn PVOID CONTROL_Allocate_KMemory(size) ++ * ++ * @param IN size - size of the memory to allocate ++ * ++ * @returns char* - pointer to the allocated memory block ++ * ++ * @brief Allocate and zero memory ++ * ++ * Special Notes: ++ * Allocate memory in the GFP_ATOMIC pool. ++ * ++ * Use this if memory is to be allocated within a context where ++ * the allocator cannot block the allocation (e.g., by putting ++ * the caller to sleep) as it tries to free up memory to ++ * satisfy the request. Examples include interrupt handlers, ++ * process context code holding locks, etc. ++ */ ++PVOID CONTROL_Allocate_KMemory(size_t size) ++{ ++ U32 status; ++ PVOID location; ++ ++ SEP_DRV_LOG_ALLOC_IN("Attempting to allocate %d bytes.", (S32)size); ++ ++ if (size <= 0) { ++ SEP_DRV_LOG_ALLOC_OUT( ++ "Cannot allocate a number of bytes <= 0."); ++ return NULL; ++ } ++ ++ if (size < MAX_KMALLOC_SIZE) { ++ location = (PVOID)kmalloc(size, GFP_ATOMIC); ++ SEP_DRV_LOG_ALLOC("Allocated small memory (0x%p, %d)", location, ++ (S32)size); ++ } else { ++ location = (PVOID)__get_free_pages(GFP_ATOMIC, get_order(size)); ++ if (location) { ++ status = control_Memory_Tracker_Add(location, size, ++ FALSE); ++ SEP_DRV_LOG_ALLOC("Allocated large memory (0x%p, %d)", ++ location, (S32)size); ++ if (status != OS_SUCCESS) { ++ // failed to track in mem_tracker, so free up memory and return NULL ++ SEP_DRV_LOG_ERROR( ++ "Allocated %db; failed to track w/ MEM_TRACKER. Freeing...", ++ (S32)size); ++ free_pages((unsigned long)location, ++ get_order(size)); ++ location = NULL; ++ } ++ } ++ } ++ ++ if (!location) { ++ SEP_DRV_LOG_ERROR("Failed to allocated %db.", (S32)size); ++ } else { ++ memset(location, 0, size); ++ } ++ ++ SEP_DRV_LOG_ALLOC_OUT("Returning %p.", location); ++ return location; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/* ++ * @fn PVOID CONTROL_Free_Memory(location) ++ * ++ * @param IN location - size of the memory to allocate ++ * ++ * @returns pointer to the allocated memory block ++ * ++ * @brief Frees the memory block ++ * ++ * Special Notes: ++ * Does not try to free memory if fed with a NULL pointer ++ * Expected usage: ++ * ptr = CONTROL_Free_Memory(ptr); ++ * Does not do compaction ... can have "holes" in ++ * mem_tracker list after this operation. ++ */ ++PVOID CONTROL_Free_Memory(PVOID location) ++{ ++ S32 i; ++ DRV_BOOL found; ++ MEM_TRACKER mem_tr; ++ ++ SEP_DRV_LOG_ALLOC_IN("Attempting to free %p.", location); ++ ++ if (!location) { ++ SEP_DRV_LOG_ALLOC_OUT("Cannot free NULL."); ++ return NULL; ++ } ++ ++#if defined(DRV_SEP_ACRN_ON) ++ if (!local_vfree_atomic) { ++ local_vfree_atomic = (PVOID)UTILITY_Find_Symbol("vfree_atomic"); ++ if (!local_vfree_atomic) { ++ SEP_PRINT_ERROR("Could not find 'vfree_atomic'!\n"); ++ } ++ } ++#endif ++ spin_lock_irqsave(&mem_tr_lock, flags); ++ ++ // scan through mem_tracker nodes for matching entry (if any) ++ mem_tr = mem_tr_head; ++ found = FALSE; ++ while (mem_tr) { ++ for (i = 0; i < MEM_TRACKER_max_size(mem_tr); i++) { ++ if (location == MEM_TRACKER_mem_address(mem_tr, i)) { ++ SEP_DRV_LOG_ALLOC( ++ "Freeing large memory location 0x%p", ++ location); ++ found = TRUE; ++ if (MEM_TRACKER_mem_vmalloc(mem_tr, i)) { ++#if defined(DRV_SEP_ACRN_ON) ++ if (unlikely(in_atomic() && ++ local_vfree_atomic)) { ++ local_vfree_atomic(location); ++ } else { ++#endif ++ vfree(location); ++ } ++ ++#if defined(DRV_SEP_ACRN_ON) ++ } ++#endif ++ else { ++ free_pages( ++ (unsigned long)location, ++ get_order(MEM_TRACKER_mem_size( ++ mem_tr, i))); ++ } ++ MEM_TRACKER_mem_address(mem_tr, i) = NULL; ++ MEM_TRACKER_mem_size(mem_tr, i) = 0; ++ MEM_TRACKER_mem_vmalloc(mem_tr, i) = 0; ++ MEM_TRACKER_elements(mem_tr)--; ++ goto finish_free; ++ } ++ } ++ mem_tr = MEM_TRACKER_next(mem_tr); ++ } ++ ++finish_free: ++ spin_unlock_irqrestore(&mem_tr_lock, flags); ++ ++ // must have been of smaller than the size limit for mem tracker nodes ++ if (!found) { ++ SEP_DRV_LOG_ALLOC("Freeing small memory location 0x%p", ++ location); ++ kfree(location); ++ } ++ ++ SEP_DRV_LOG_ALLOC_OUT("Success. Returning NULL."); ++ return NULL; ++} +diff --git a/drivers/platform/x86/sepdk/sep/core2.c b/drivers/platform/x86/sepdk/sep/core2.c +new file mode 100755 +index 000000000000..a56ad28cd097 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/sep/core2.c +@@ -0,0 +1,2137 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#include "lwpmudrv_defines.h" ++#include ++#include ++#include ++ ++#include "lwpmudrv_types.h" ++#include "lwpmudrv_ecb.h" ++#include "lwpmudrv_struct.h" ++ ++#include "lwpmudrv.h" ++#include "utility.h" ++#include "control.h" ++#include "output.h" ++#include "core2.h" ++#include "ecb_iterators.h" ++#include "pebs.h" ++#include "apic.h" ++ ++#if !defined(DRV_ANDROID) ++#include "jkt_unc_ha.h" ++#include "jkt_unc_qpill.h" ++#include "pci.h" ++#endif ++ ++extern EVENT_CONFIG global_ec; ++extern U64 *read_counter_info; ++extern LBR lbr; ++extern DRV_CONFIG drv_cfg; ++extern DEV_CONFIG pcfg; ++extern PWR pwr; ++extern U64 *interrupt_counts; ++extern DRV_SETUP_INFO_NODE req_drv_setup_info; ++extern EMON_BUFFER_DRIVER_HELPER emon_buffer_driver_helper; ++ ++#if !defined(DRV_ANDROID) ++static U32 direct2core_data_saved; ++static U32 bl_bypass_data_saved; ++#endif ++ ++static U32 restore_reg_addr[3]; ++ ++typedef struct SADDR_S { ++ S64 addr : CORE2_LBR_DATA_BITS; ++} SADDR; ++ ++#define SADDR_addr(x) ((x).addr) ++#define MSR_ENERGY_MULTIPLIER 0x606 // Energy Multiplier MSR ++ ++#if !defined(DRV_ANDROID) ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn void core2_Disable_Direct2core(ECB) ++ * ++ * @param pecb ECB of group being scheduled ++ * ++ * @return None No return needed ++ * ++ * @brief program the QPILL and HA register for disabling of direct2core ++ * ++ * Special Notes ++ */ ++static VOID core2_Disable_Direct2core(ECB pecb) ++{ ++ U32 busno = 0; ++ U32 dev_idx = 0; ++ U32 base_idx = 0; ++ U32 device_id = 0; ++ U32 value = 0; ++ U32 vendor_id = 0; ++ U32 core2_qpill_dev_no[2] = { 8, 9 }; ++ U32 this_cpu; ++ ++ SEP_DRV_LOG_TRACE_IN("PECB: %p.", pecb); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ ++ // Discover the bus # for HA ++ for (busno = 0; busno < MAX_BUSNO; busno++) { ++ value = PCI_Read_U32(busno, JKTUNC_HA_DEVICE_NO, ++ JKTUNC_HA_D2C_FUNC_NO, 0); ++ vendor_id = value & VENDOR_ID_MASK; ++ device_id = (value & DEVICE_ID_MASK) >> DEVICE_ID_BITSHIFT; ++ ++ if (vendor_id != DRV_IS_PCI_VENDOR_ID_INTEL) { ++ continue; ++ } ++ if (device_id != JKTUNC_HA_D2C_DID) { ++ continue; ++ } ++ value = 0; ++ // now program at the offset ++ value = PCI_Read_U32(busno, JKTUNC_HA_DEVICE_NO, ++ JKTUNC_HA_D2C_FUNC_NO, ++ JKTUNC_HA_D2C_OFFSET); ++ restore_ha_direct2core[this_cpu][busno] = 0; ++ restore_ha_direct2core[this_cpu][busno] = value; ++ } ++ for (busno = 0; busno < MAX_BUSNO; busno++) { ++ value = PCI_Read_U32(busno, JKTUNC_HA_DEVICE_NO, ++ JKTUNC_HA_D2C_FUNC_NO, 0); ++ vendor_id = value & VENDOR_ID_MASK; ++ device_id = (value & DEVICE_ID_MASK) >> DEVICE_ID_BITSHIFT; ++ ++ if (vendor_id != DRV_IS_PCI_VENDOR_ID_INTEL) { ++ continue; ++ } ++ if (device_id != JKTUNC_HA_D2C_DID) { ++ continue; ++ } ++ ++ // now program at the offset ++ value = PCI_Read_U32(busno, JKTUNC_HA_DEVICE_NO, ++ JKTUNC_HA_D2C_FUNC_NO, ++ JKTUNC_HA_D2C_OFFSET); ++ value |= value | JKTUNC_HA_D2C_BITMASK; ++ PCI_Write_U32(busno, JKTUNC_HA_DEVICE_NO, JKTUNC_HA_D2C_FUNC_NO, ++ JKTUNC_HA_D2C_OFFSET, value); ++ } ++ ++ // Discover the bus # for QPI ++ for (dev_idx = 0; dev_idx < 2; dev_idx++) { ++ base_idx = dev_idx * MAX_BUSNO; ++ for (busno = 0; busno < MAX_BUSNO; busno++) { ++ value = PCI_Read_U32(busno, core2_qpill_dev_no[dev_idx], ++ JKTUNC_QPILL_D2C_FUNC_NO, 0); ++ vendor_id = value & VENDOR_ID_MASK; ++ device_id = ++ (value & DEVICE_ID_MASK) >> DEVICE_ID_BITSHIFT; ++ ++ if (vendor_id != DRV_IS_PCI_VENDOR_ID_INTEL) { ++ continue; ++ } ++ if ((device_id != JKTUNC_QPILL0_D2C_DID) && ++ (device_id != JKTUNC_QPILL1_D2C_DID)) { ++ continue; ++ } ++ // now program at the corresponding offset ++ value = PCI_Read_U32(busno, core2_qpill_dev_no[dev_idx], ++ JKTUNC_QPILL_D2C_FUNC_NO, ++ JKTUNC_QPILL_D2C_OFFSET); ++ restore_qpi_direct2core[this_cpu][base_idx + busno] = 0; ++ restore_qpi_direct2core[this_cpu][base_idx + busno] = ++ value; ++ } ++ } ++ for (dev_idx = 0; dev_idx < 2; dev_idx++) { ++ for (busno = 0; busno < MAX_BUSNO; busno++) { ++ value = PCI_Read_U32(busno, core2_qpill_dev_no[dev_idx], ++ JKTUNC_QPILL_D2C_FUNC_NO, 0); ++ vendor_id = value & VENDOR_ID_MASK; ++ device_id = ++ (value & DEVICE_ID_MASK) >> DEVICE_ID_BITSHIFT; ++ ++ if (vendor_id != DRV_IS_PCI_VENDOR_ID_INTEL) { ++ continue; ++ } ++ if ((device_id != JKTUNC_QPILL0_D2C_DID) && ++ (device_id != JKTUNC_QPILL1_D2C_DID)) { ++ continue; ++ } ++ // now program at the corresponding offset ++ value = PCI_Read_U32(busno, core2_qpill_dev_no[dev_idx], ++ JKTUNC_QPILL_D2C_FUNC_NO, ++ JKTUNC_QPILL_D2C_OFFSET); ++ value |= value | JKTUNC_QPILL_D2C_BITMASK; ++ PCI_Write_U32(busno, core2_qpill_dev_no[dev_idx], ++ JKTUNC_QPILL_D2C_FUNC_NO, ++ JKTUNC_QPILL_D2C_OFFSET, value); ++ } ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn void core2_Disable_BL_Bypass(ECB) ++ * ++ * @param pecb ECB of group being scheduled ++ * ++ * @return None No return needed ++ * ++ * @brief Disable the BL Bypass ++ * ++ * Special Notes ++ */ ++static VOID core2_Disable_BL_Bypass(ECB pecb) ++{ ++ U64 value; ++ U32 this_cpu; ++ ++ SEP_DRV_LOG_TRACE_IN("PECB: %p.", pecb); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ ++ value = SYS_Read_MSR(CORE2UNC_DISABLE_BL_BYPASS_MSR); ++ restore_bl_bypass[this_cpu] = 0; ++ restore_bl_bypass[this_cpu] = value; ++ value |= CORE2UNC_BLBYPASS_BITMASK; ++ SYS_Write_MSR(CORE2UNC_DISABLE_BL_BYPASS_MSR, value); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++#endif ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn void core2_Write_PMU(param) ++ * ++ * @param param dummy parameter which is not used ++ * ++ * @return None No return needed ++ * ++ * @brief Initial set up of the PMU registers ++ * ++ * Special Notes ++ * Initial write of PMU registers. ++ * Walk through the enties and write the value of the register accordingly. ++ * Assumption: For CCCR registers the enable bit is set to value 0. ++ * When current_group = 0, then this is the first time this routine is called, ++ * initialize the locks and set up EM tables. ++ */ ++static VOID core2_Write_PMU(VOID *param) ++{ ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ ECB pecb; ++ U32 dev_idx; ++ U32 cur_grp; ++ EVENT_CONFIG ec; ++ DISPATCH dispatch; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ dev_idx = core_to_dev_map[this_cpu]; ++ cur_grp = CPU_STATE_current_group(pcpu); ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; ++ ec = LWPMU_DEVICE_ec(&devices[dev_idx]); ++ dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); ++ ++ if (!pecb) { ++ SEP_DRV_LOG_TRACE_OUT( ++ "No programming for this device in this group."); ++ return; ++ } ++ ++ if (CPU_STATE_current_group(pcpu) == 0) { ++ if (EVENT_CONFIG_mode(ec) != EM_DISABLED) { ++ U32 index; ++ U32 st_index; ++ U32 j; ++ ++ /* Save all the initialization values away into an array for Event Multiplexing. */ ++ for (j = 0; j < EVENT_CONFIG_num_groups(ec); j++) { ++ CPU_STATE_current_group(pcpu) = j; ++ st_index = CPU_STATE_current_group(pcpu) * ++ EVENT_CONFIG_max_gp_events(ec); ++ FOR_EACH_REG_CORE_OPERATION( ++ pecb, i, PMU_OPERATION_DATA_GP) ++ { ++ index = st_index + i - ++ ECB_operations_register_start( ++ pecb, ++ PMU_OPERATION_DATA_GP); ++ CPU_STATE_em_tables(pcpu)[index] = ++ ECB_entries_reg_value(pecb, i); ++ } ++ END_FOR_EACH_REG_CORE_OPERATION; ++ } ++ /* Reset the current group to the very first one. */ ++ CPU_STATE_current_group(pcpu) = ++ this_cpu % EVENT_CONFIG_num_groups(ec); ++ } ++ } ++ ++ if (dispatch->hw_errata) { ++ dispatch->hw_errata(); ++ } ++ ++ FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_ALL_REG) ++ { ++ /* ++ * Writing the GLOBAL Control register enables the PMU to start counting. ++ * So write 0 into the register to prevent any counting from starting. ++ */ ++ if (i == ECB_SECTION_REG_INDEX(pecb, GLOBAL_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)) { ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); ++ continue; ++ } ++ /* ++ * PEBS is enabled for this collection session ++ */ ++ if (DRV_SETUP_INFO_pebs_accessible(&req_drv_setup_info) && ++ i == ECB_SECTION_REG_INDEX(pecb, PEBS_ENABLE_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS) && ++ ECB_entries_reg_value(pecb, i)) { ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); ++ continue; ++ } ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, i), ++ ECB_entries_reg_value(pecb, i)); ++#if defined(MYDEBUG) ++ { ++ U64 val = SYS_Read_MSR(ECB_entries_reg_id(pecb, i)); ++ SEP_DRV_LOG_TRACE( ++ "Register 0x%x: wrvalue 0x%llx, rdvalue 0x%llx.", ++ ECB_entries_reg_id(pecb, i), ++ ECB_entries_reg_value(pecb, i), val); ++ } ++#endif ++ } ++ END_FOR_EACH_REG_CORE_OPERATION; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn void core2_Disable_PMU(param) ++ * ++ * @param param dummy parameter which is not used ++ * ++ * @return None No return needed ++ * ++ * @brief Zero out the global control register. This automatically disables the PMU counters. ++ * ++ */ ++static VOID core2_Disable_PMU(PVOID param) ++{ ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ ECB pecb; ++ U32 dev_idx; ++ U32 cur_grp; ++ DEV_CONFIG pcfg; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ dev_idx = core_to_dev_map[this_cpu]; ++ cur_grp = CPU_STATE_current_group(pcpu); ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; ++ pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ ++ if (!pecb) { ++ SEP_DRV_LOG_TRACE_OUT( ++ "No programming for this device in this group."); ++ return; ++ } ++ ++ if (GET_DRIVER_STATE() != DRV_STATE_RUNNING) { ++ SEP_DRV_LOG_TRACE("Driver state is not RUNNING."); ++ SYS_Write_MSR(ECB_entries_reg_id( ++ pecb, ECB_SECTION_REG_INDEX( ++ pecb, GLOBAL_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ 0LL); ++ if (DEV_CONFIG_pebs_mode(pcfg)) { ++ SYS_Write_MSR( ++ ECB_entries_reg_id( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, PEBS_ENABLE_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ 0LL); ++ } ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn void core2_Enable_PMU(param) ++ * ++ * @param param dummy parameter which is not used ++ * ++ * @return None No return needed ++ * ++ * @brief Set the enable bit for all the Control registers ++ * ++ */ ++static VOID core2_Enable_PMU(PVOID param) ++{ ++ /* ++ * Get the value from the event block ++ * 0 == location of the global control reg for this block. ++ * Generalize this location awareness when possible ++ */ ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ ECB pecb; ++ U32 dev_idx; ++ U32 cur_grp; ++ DEV_CONFIG pcfg; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ dev_idx = core_to_dev_map[this_cpu]; ++ cur_grp = CPU_STATE_current_group(pcpu); ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; ++ pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ ++ if (!pecb) { ++ SEP_DRV_LOG_TRACE_OUT( ++ "No programming for this device in this group."); ++ return; ++ } ++ ++ if (KVM_guest_mode) { ++ SYS_Write_MSR(ECB_entries_reg_id( ++ pecb, ECB_SECTION_REG_INDEX( ++ pecb, GLOBAL_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ 0LL); ++ } ++ if (GET_DRIVER_STATE() == DRV_STATE_RUNNING) { ++ APIC_Enable_Pmi(); ++ if (CPU_STATE_reset_mask(pcpu)) { ++ SEP_DRV_LOG_TRACE("Overflow reset mask %llx.", ++ CPU_STATE_reset_mask(pcpu)); ++ // Reinitialize the global overflow control register ++ SYS_Write_MSR( ++ ECB_entries_reg_id( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, GLOBAL_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ ECB_entries_reg_value( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, GLOBAL_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS))); ++ SYS_Write_MSR( ++ ECB_entries_reg_id( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, DEBUG_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ ECB_entries_reg_value( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, DEBUG_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS))); ++ CPU_STATE_reset_mask(pcpu) = 0LL; ++ } ++ if (CPU_STATE_group_swap(pcpu)) { ++ CPU_STATE_group_swap(pcpu) = 0; ++ SYS_Write_MSR( ++ ECB_entries_reg_id( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, GLOBAL_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ ECB_entries_reg_value( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, GLOBAL_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS))); ++ if (DEV_CONFIG_pebs_mode(pcfg)) { ++ SYS_Write_MSR( ++ ECB_entries_reg_id( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, ++ PEBS_ENABLE_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ ECB_entries_reg_value( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, ++ PEBS_ENABLE_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS))); ++ } ++ SYS_Write_MSR( ++ ECB_entries_reg_id( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, DEBUG_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ ECB_entries_reg_value( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, DEBUG_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS))); ++#if defined(MYDEBUG) ++ { ++ U64 val; ++ val = SYS_Read_MSR(ECB_entries_reg_id( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, GLOBAL_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS))); ++ SEP_DRV_LOG_TRACE( ++ "Write reg 0x%x--- read 0x%llx.", ++ ECB_entries_reg_id( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, ++ GLOBAL_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ val); ++ } ++#endif ++ } ++ } ++ SEP_DRV_LOG_TRACE("Reenabled PMU with value 0x%llx.", ++ ECB_entries_reg_value(pecb, 0)); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn void corei7_Enable_PMU_2(param) ++ * ++ * @param param dummy parameter which is not used ++ * ++ * @return None No return needed ++ * ++ * @brief Set the enable bit for all the Control registers ++ * ++ */ ++static VOID corei7_Enable_PMU_2(PVOID param) ++{ ++ /* ++ * Get the value from the event block ++ * 0 == location of the global control reg for this block. ++ */ ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ ECB pecb; ++ U64 pebs_val = 0; ++ U32 dev_idx; ++ U32 cur_grp; ++ DEV_CONFIG pcfg; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ dev_idx = core_to_dev_map[this_cpu]; ++ cur_grp = CPU_STATE_current_group(pcpu); ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; ++ pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ ++ if (!pecb) { ++ SEP_DRV_LOG_TRACE_OUT( ++ "No programming for this device in this group."); ++ return; ++ } ++ ++ if (KVM_guest_mode) { ++ SYS_Write_MSR(ECB_entries_reg_id( ++ pecb, ECB_SECTION_REG_INDEX( ++ pecb, GLOBAL_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ 0LL); ++ } ++ if (GET_DRIVER_STATE() == DRV_STATE_RUNNING) { ++ APIC_Enable_Pmi(); ++ if (CPU_STATE_group_swap(pcpu)) { ++ CPU_STATE_group_swap(pcpu) = 0; ++ if (DEV_CONFIG_pebs_mode(pcfg)) { ++ pebs_val = SYS_Read_MSR(ECB_entries_reg_id( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, PEBS_ENABLE_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS))); ++ if (ECB_entries_reg_value( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, PEBS_ENABLE_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)) != ++ 0) { ++ SYS_Write_MSR( ++ ECB_entries_reg_id( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, ++ PEBS_ENABLE_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ ECB_entries_reg_value( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, ++ PEBS_ENABLE_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS))); ++ } else if (pebs_val != 0) { ++ SYS_Write_MSR( ++ ECB_entries_reg_id( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, ++ PEBS_ENABLE_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ 0LL); ++ } ++ } ++ SYS_Write_MSR( ++ ECB_entries_reg_id( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, DEBUG_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ ECB_entries_reg_value( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, DEBUG_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS))); ++ SYS_Write_MSR( ++ ECB_entries_reg_id( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, GLOBAL_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ ECB_entries_reg_value( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, GLOBAL_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS))); ++#if defined(MYDEBUG) ++ SEP_DRV_LOG_TRACE("Reenabled PMU with value 0x%llx.", ++ ECB_entries_reg_value(pecb, 0)); ++#endif ++ } ++ if (CPU_STATE_reset_mask(pcpu)) { ++#if defined(MYDEBUG) ++ SEP_DRV_LOG_TRACE("Overflow reset mask %llx.", ++ CPU_STATE_reset_mask(pcpu)); ++#endif ++ // Reinitialize the global overflow control register ++ SYS_Write_MSR( ++ ECB_entries_reg_id( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, GLOBAL_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ ECB_entries_reg_value( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, GLOBAL_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS))); ++ SYS_Write_MSR( ++ ECB_entries_reg_id( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, DEBUG_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ ECB_entries_reg_value( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, DEBUG_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS))); ++ CPU_STATE_reset_mask(pcpu) = 0LL; ++ } ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn core2_Read_PMU_Data(param) ++ * ++ * @param param dummy parameter which is not used ++ * ++ * @return None No return needed ++ * ++ * @brief Read all the data MSR's into a buffer. Called by the interrupt handler. ++ * ++ */ ++static void core2_Read_PMU_Data(PVOID param) ++{ ++ U32 j; ++ U64 *buffer = read_counter_info; ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ ECB pecb; ++ U32 dev_idx; ++ U32 cur_grp; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ preempt_disable(); ++ this_cpu = CONTROL_THIS_CPU(); ++ preempt_enable(); ++ pcpu = &pcb[this_cpu]; ++ dev_idx = core_to_dev_map[this_cpu]; ++ cur_grp = CPU_STATE_current_group(pcpu); ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; ++ ++ if (!pecb) { ++ SEP_DRV_LOG_TRACE_OUT( ++ "No programming for this device in this group."); ++ return; ++ } ++ ++ SEP_DRV_LOG_TRACE("PMU control_data 0x%p, buffer 0x%p.", ++ LWPMU_DEVICE_PMU_register_data(&devices[dev_idx]), ++ buffer); ++ FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_ALL) ++ { ++ j = EMON_BUFFER_CORE_EVENT_OFFSET( ++ EMON_BUFFER_DRIVER_HELPER_core_index_to_thread_offset_map( ++ emon_buffer_driver_helper)[this_cpu], ++ ECB_entries_core_event_id(pecb, i)); ++ ++ buffer[j] = SYS_Read_MSR(ECB_entries_reg_id(pecb, i)); ++ SEP_DRV_LOG_TRACE("j=%u, value=%llu, cpu=%u, event_id=%u", j, ++ buffer[j], this_cpu, ++ ECB_entries_core_event_id(pecb, i)); ++ } ++ END_FOR_EACH_REG_CORE_OPERATION; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn core2_Check_Overflow_Errata(pecb, index, overflow_status) ++ * ++ * @param pecb: The current event control block ++ * @param index: index of the register to process ++ * @param overflow_status: current overflow mask ++ * ++ * @return Updated Event mask of the overflowed registers. ++ * ++ * @brief Go through the overflow errata for the architecture and set the mask ++ * ++ * Special Notes ++ * fixed_counter1 on some architectures gets interfered by ++ * other event counts. Overcome this problem by reading the ++ * counter value and resetting the overflow mask. ++ * ++ */ ++static U64 core2_Check_Overflow_Errata(ECB pecb, U32 index, U64 overflow_status) ++{ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ if (DRV_CONFIG_num_events(drv_cfg) == 1) { ++ SEP_DRV_LOG_TRACE_OUT("Res: %llu. (num_events = 1)", ++ overflow_status); ++ return overflow_status; ++ } ++ if (ECB_entries_reg_id(pecb, index) == IA32_FIXED_CTR1 && ++ (overflow_status & 0x200000000LL) == 0LL) { ++ U64 val = SYS_Read_MSR(IA32_FIXED_CTR1); ++ val &= ECB_entries_max_bits(pecb, index); ++ if (val < ECB_entries_reg_value(pecb, index)) { ++ overflow_status |= 0x200000000LL; ++ SEP_DRV_LOG_TRACE( ++ "Reset -- clk count %llx, status %llx.", val, ++ overflow_status); ++ } ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT("Res: %llu.", overflow_status); ++ return overflow_status; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn void core2_Check_Overflow(masks) ++ * ++ * @param masks the mask structure to populate ++ * ++ * @return None No return needed ++ * ++ * @brief Called by the data processing method to figure out which registers have overflowed. ++ * ++ */ ++static void core2_Check_Overflow(DRV_MASKS masks) ++{ ++ U32 index; ++ U64 overflow_status = 0; ++ U32 this_cpu; ++ BUFFER_DESC bd; ++ CPU_STATE pcpu; ++ ECB pecb; ++ U32 dev_idx; ++ U32 cur_grp; ++ DEV_CONFIG pcfg; ++ DISPATCH dispatch; ++ U64 overflow_status_clr = 0; ++ DRV_EVENT_MASK_NODE event_flag; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ bd = &cpu_buf[this_cpu]; ++ pcpu = &pcb[this_cpu]; ++ dev_idx = core_to_dev_map[this_cpu]; ++ cur_grp = CPU_STATE_current_group(pcpu); ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; ++ pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); ++ ++ if (!pecb) { ++ SEP_DRV_LOG_TRACE_OUT( ++ "No programming for this device in this group."); ++ return; ++ } ++ ++ // initialize masks ++ DRV_MASKS_masks_num(masks) = 0; ++ ++ overflow_status = SYS_Read_MSR(ECB_entries_reg_id( ++ pecb, ECB_SECTION_REG_INDEX(pecb, GLOBAL_STATUS_REG_INDEX, ++ PMU_OPERATION_GLOBAL_STATUS))); ++ ++ if (DEV_CONFIG_pebs_mode(pcfg)) { ++ overflow_status = PEBS_Overflowed(this_cpu, overflow_status, 0); ++ } ++ overflow_status_clr = overflow_status; ++ ++ if (dispatch->check_overflow_gp_errata) { ++ overflow_status = dispatch->check_overflow_gp_errata( ++ pecb, &overflow_status_clr); ++ } ++ SEP_DRV_LOG_TRACE("Overflow: cpu: %d, status 0x%llx.", this_cpu, ++ overflow_status); ++ index = 0; ++ BUFFER_DESC_sample_count(bd) = 0; ++ FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_ALL) ++ { ++ if (ECB_entries_fixed_reg_get(pecb, i)) { ++ index = i - ++ ECB_operations_register_start( ++ pecb, PMU_OPERATION_DATA_FIXED) + ++ 0x20; ++ if (dispatch->check_overflow_errata) { ++ overflow_status = ++ dispatch->check_overflow_errata( ++ pecb, i, overflow_status); ++ } ++ } else if (ECB_entries_is_gp_reg_get(pecb, i)) { ++ index = i - ECB_operations_register_start( ++ pecb, PMU_OPERATION_DATA_GP); ++ } else { ++ continue; ++ } ++ if (overflow_status & ((U64)1 << index)) { ++ SEP_DRV_LOG_TRACE("Overflow: cpu: %d, index %d.", ++ this_cpu, index); ++ SEP_DRV_LOG_TRACE( ++ "register 0x%x --- val 0%llx.", ++ ECB_entries_reg_id(pecb, i), ++ SYS_Read_MSR(ECB_entries_reg_id(pecb, i))); ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, i), ++ ECB_entries_reg_value(pecb, i)); ++ ++ if (DRV_CONFIG_enable_cp_mode(drv_cfg)) { ++ /* Increment the interrupt count. */ ++ if (interrupt_counts) { ++ interrupt_counts ++ [this_cpu * ++ DRV_CONFIG_num_events( ++ drv_cfg) + ++ ECB_entries_event_id_index( ++ pecb, i)] += 1; ++ } ++ } ++ ++ DRV_EVENT_MASK_bitFields1(&event_flag) = (U8)0; ++ if (ECB_entries_fixed_reg_get(pecb, i)) { ++ CPU_STATE_p_state_counting(pcpu) = 1; ++ } ++ if (ECB_entries_precise_get(pecb, i)) { ++ DRV_EVENT_MASK_precise(&event_flag) = 1; ++ } ++ if (ECB_entries_lbr_value_get(pecb, i)) { ++ DRV_EVENT_MASK_lbr_capture(&event_flag) = 1; ++ } ++ if (ECB_entries_uncore_get(pecb, i)) { ++ DRV_EVENT_MASK_uncore_capture(&event_flag) = 1; ++ } ++ if (ECB_entries_branch_evt_get(pecb, i)) { ++ DRV_EVENT_MASK_branch(&event_flag) = 1; ++ } ++ ++ if (DRV_MASKS_masks_num(masks) < MAX_OVERFLOW_EVENTS) { ++ DRV_EVENT_MASK_bitFields1( ++ DRV_MASKS_eventmasks(masks) + ++ DRV_MASKS_masks_num(masks)) = ++ DRV_EVENT_MASK_bitFields1(&event_flag); ++ DRV_EVENT_MASK_event_idx( ++ DRV_MASKS_eventmasks(masks) + ++ DRV_MASKS_masks_num(masks)) = ++ ECB_entries_event_id_index(pecb, i); ++ DRV_MASKS_masks_num(masks)++; ++ } else { ++ SEP_DRV_LOG_ERROR( ++ "The array for event masks is full."); ++ } ++ ++ SEP_DRV_LOG_TRACE("overflow -- 0x%llx, index 0x%llx.", ++ overflow_status, (U64)1 << index); ++ SEP_DRV_LOG_TRACE("slot# %d, reg_id 0x%x, index %d.", i, ++ ECB_entries_reg_id(pecb, i), index); ++ if (ECB_entries_event_id_index(pecb, i) == ++ CPU_STATE_trigger_event_num(pcpu)) { ++ CPU_STATE_trigger_count(pcpu)--; ++ } ++ } ++ } ++ END_FOR_EACH_REG_CORE_OPERATION; ++ ++ CPU_STATE_reset_mask(pcpu) = overflow_status_clr; ++ // Reinitialize the global overflow control register ++ SYS_Write_MSR(ECB_entries_reg_id( ++ pecb, ECB_SECTION_REG_INDEX( ++ pecb, GLOBAL_OVF_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ overflow_status_clr); ++ ++ SEP_DRV_LOG_TRACE("Check Overflow completed %d.", this_cpu); ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn core2_Swap_Group(restart) ++ * ++ * @param restart dummy parameter which is not used ++ * ++ * @return None No return needed ++ * ++ * @brief Perform the mechanics of swapping the event groups for event mux operations ++ * ++ * Special Notes ++ * Swap function for event multiplexing. ++ * Freeze the counting. ++ * Swap the groups. ++ * Enable the counting. ++ * Reset the event trigger count ++ * ++ */ ++static VOID core2_Swap_Group(DRV_BOOL restart) ++{ ++ U32 index; ++ U32 next_group; ++ U32 st_index; ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ U32 dev_idx; ++ DISPATCH dispatch; ++ EVENT_CONFIG ec; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ dev_idx = core_to_dev_map[this_cpu]; ++ dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); ++ ec = LWPMU_DEVICE_ec(&devices[dev_idx]); ++ ++ st_index = ++ CPU_STATE_current_group(pcpu) * EVENT_CONFIG_max_gp_events(ec); ++ next_group = (CPU_STATE_current_group(pcpu) + 1); ++ if (next_group >= EVENT_CONFIG_num_groups(ec)) { ++ next_group = 0; ++ } ++ ++ SEP_DRV_LOG_TRACE("current group : 0x%x.", ++ CPU_STATE_current_group(pcpu)); ++ SEP_DRV_LOG_TRACE("next group : 0x%x.", next_group); ++ ++ // Save the counters for the current group ++ if (!DRV_CONFIG_event_based_counts(drv_cfg)) { ++ FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_GP) ++ { ++ index = st_index + i - ++ ECB_operations_register_start( ++ pecb, PMU_OPERATION_DATA_GP); ++ CPU_STATE_em_tables(pcpu)[index] = ++ SYS_Read_MSR(ECB_entries_reg_id(pecb, i)); ++ SEP_DRV_LOG_TRACE("Saved value for reg 0x%x : 0x%llx.", ++ ECB_entries_reg_id(pecb, i), ++ CPU_STATE_em_tables(pcpu)[index]); ++ } ++ END_FOR_EACH_REG_CORE_OPERATION; ++ } ++ ++ CPU_STATE_current_group(pcpu) = next_group; ++ ++ if (dispatch->hw_errata) { ++ dispatch->hw_errata(); ++ } ++ ++ // First write the GP control registers (eventsel) ++ FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_CTRL_GP) ++ { ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, i), ++ ECB_entries_reg_value(pecb, i)); ++ } ++ END_FOR_EACH_REG_CORE_OPERATION; ++ ++ if (DRV_CONFIG_event_based_counts(drv_cfg)) { ++ // In EBC mode, reset the counts for all events except for trigger event ++ FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_ALL) ++ { ++ if (ECB_entries_event_id_index(pecb, i) != ++ CPU_STATE_trigger_event_num(pcpu)) { ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); ++ } ++ } ++ END_FOR_EACH_REG_CORE_OPERATION; ++ } else { ++ // Then write the gp count registers ++ st_index = CPU_STATE_current_group(pcpu) * ++ EVENT_CONFIG_max_gp_events(ec); ++ FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_GP) ++ { ++ index = st_index + i - ++ ECB_operations_register_start( ++ pecb, PMU_OPERATION_DATA_GP); ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, i), ++ CPU_STATE_em_tables(pcpu)[index]); ++ SEP_DRV_LOG_TRACE( ++ "Restore value for reg 0x%x : 0x%llx.", ++ ECB_entries_reg_id(pecb, i), ++ CPU_STATE_em_tables(pcpu)[index]); ++ } ++ END_FOR_EACH_REG_CORE_OPERATION; ++ } ++ ++ FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_OCR) ++ { ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, i), ++ ECB_entries_reg_value(pecb, i)); ++ } ++ END_FOR_EACH_REG_CORE_OPERATION; ++ ++ /* ++ * reset the em factor when a group is swapped ++ */ ++ CPU_STATE_trigger_count(pcpu) = EVENT_CONFIG_em_factor(ec); ++ ++ /* ++ * The enable routine needs to rewrite the control registers ++ */ ++ CPU_STATE_reset_mask(pcpu) = 0LL; ++ CPU_STATE_group_swap(pcpu) = 1; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn core2_Initialize(params) ++ * ++ * @param params dummy parameter which is not used ++ * ++ * @return None No return needed ++ * ++ * @brief Initialize the PMU setting up for collection ++ * ++ * Special Notes ++ * Saves the relevant PMU state (minimal set of MSRs required ++ * to avoid conflicts with other Linux tools, such as Oprofile). ++ * This function should be called in parallel across all CPUs ++ * prior to the start of sampling, before PMU state is changed. ++ * ++ */ ++static VOID core2_Initialize(VOID *param) ++{ ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ U32 dev_idx; ++ DEV_CONFIG pcfg; ++ U32 i = 0; ++ ECB pecb = NULL; ++ U32 cur_grp; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ dev_idx = core_to_dev_map[this_cpu]; ++ pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ ++ if (pcb == NULL) { ++ SEP_DRV_LOG_TRACE_OUT( ++ "No programming for this device in this group."); ++ return; ++ } ++ ++ pcpu = &pcb[this_cpu]; ++ cur_grp = CPU_STATE_current_group(pcpu); ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; ++ CPU_STATE_pmu_state(pcpu) = pmu_state + (this_cpu * 3); ++ if (CPU_STATE_pmu_state(pcpu) == NULL) { ++ SEP_DRV_LOG_WARNING_TRACE_OUT( ++ "Unable to save PMU state on CPU %d.", this_cpu); ++ return; ++ } ++ ++ restore_reg_addr[0] = ECB_entries_reg_id( ++ pecb, ECB_SECTION_REG_INDEX(pecb, DEBUG_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)); ++ restore_reg_addr[1] = ECB_entries_reg_id( ++ pecb, ECB_SECTION_REG_INDEX(pecb, GLOBAL_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)); ++ restore_reg_addr[2] = ECB_entries_reg_id( ++ pecb, ECB_SECTION_REG_INDEX(pecb, FIXED_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)); ++ // save the original PMU state on this CPU (NOTE: must only be called ONCE per collection) ++ CPU_STATE_pmu_state(pcpu)[0] = SYS_Read_MSR(restore_reg_addr[0]); ++ CPU_STATE_pmu_state(pcpu)[1] = SYS_Read_MSR(restore_reg_addr[1]); ++ CPU_STATE_pmu_state(pcpu)[2] = SYS_Read_MSR(restore_reg_addr[2]); ++ ++ if (DRV_CONFIG_ds_area_available(drv_cfg) && ++ DEV_CONFIG_pebs_mode(pcfg)) { ++ SYS_Write_MSR(ECB_entries_reg_id( ++ pecb, ECB_SECTION_REG_INDEX( ++ pecb, PEBS_ENABLE_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ 0LL); ++ } ++ ++ SEP_DRV_LOG_TRACE("Saving PMU state on CPU %d:", this_cpu); ++ SEP_DRV_LOG_TRACE(" msr_val(IA32_DEBUG_CTRL)=0x%llx.", ++ CPU_STATE_pmu_state(pcpu)[0]); ++ SEP_DRV_LOG_TRACE(" msr_val(IA32_PERF_GLOBAL_CTRL)=0x%llx.", ++ CPU_STATE_pmu_state(pcpu)[1]); ++ SEP_DRV_LOG_TRACE(" msr_val(IA32_FIXED_CTRL)=0x%llx.", ++ CPU_STATE_pmu_state(pcpu)[2]); ++ ++#if !defined(DRV_ANDROID) ++ if (!CPU_STATE_socket_master(pcpu)) { ++ SEP_DRV_LOG_TRACE_OUT("Not socket master."); ++ return; ++ } ++ ++ direct2core_data_saved = 0; ++ bl_bypass_data_saved = 0; ++ cur_grp = CPU_STATE_current_group(pcpu); ++ ++ if (restore_ha_direct2core && restore_qpi_direct2core) { ++ for (i = 0; i < GLOBAL_STATE_num_em_groups(driver_state); i++) { ++ pecb = LWPMU_DEVICE_PMU_register_data( ++ &devices[dev_idx])[i]; ++ if (pecb && (ECB_flags(pecb) & ECB_direct2core_bit)) { ++ core2_Disable_Direct2core( ++ LWPMU_DEVICE_PMU_register_data( ++ &devices[dev_idx])[cur_grp]); ++ direct2core_data_saved = 1; ++ break; ++ } ++ } ++ } ++ if (restore_bl_bypass) { ++ for (i = 0; i < GLOBAL_STATE_num_em_groups(driver_state); i++) { ++ pecb = LWPMU_DEVICE_PMU_register_data( ++ &devices[dev_idx])[i]; ++ if (pecb && (ECB_flags(pecb) & ECB_bl_bypass_bit)) { ++ core2_Disable_BL_Bypass( ++ LWPMU_DEVICE_PMU_register_data( ++ &devices[dev_idx])[cur_grp]); ++ bl_bypass_data_saved = 1; ++ break; ++ } ++ } ++ } ++#endif ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn core2_Destroy(params) ++ * ++ * @param params dummy parameter which is not used ++ * ++ * @return None No return needed ++ * ++ * @brief Reset the PMU setting up after collection ++ * ++ * Special Notes ++ * Restores the previously saved PMU state done in core2_Initialize. ++ * This function should be called in parallel across all CPUs ++ * after sampling collection ends/terminates. ++ * ++ */ ++static VOID core2_Destroy(VOID *param) ++{ ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ if (pcb == NULL) { ++ SEP_DRV_LOG_TRACE_OUT( ++ "No programming for this device in this group."); ++ return; ++ } ++ ++ preempt_disable(); ++ this_cpu = CONTROL_THIS_CPU(); ++ preempt_enable(); ++ pcpu = &pcb[this_cpu]; ++ ++ if (CPU_STATE_pmu_state(pcpu) == NULL) { ++ SEP_DRV_LOG_WARNING_TRACE_OUT( ++ "Unable to restore PMU state on CPU %d.", this_cpu); ++ return; ++ } ++ ++ SEP_DRV_LOG_TRACE("Clearing PMU state on CPU %d:", this_cpu); ++ SEP_DRV_LOG_TRACE(" msr_val(IA32_DEBUG_CTRL)=0x0."); ++ SEP_DRV_LOG_TRACE(" msr_val(IA32_PERF_GLOBAL_CTRL)=0x0."); ++ SEP_DRV_LOG_TRACE(" msr_val(IA32_FIXED_CTRL)=0x0."); ++ ++ // Tentative code below (trying to avoid race conditions with the NMI watchdog). Should be evaluated in the coming few days. (2018/05/21) ++ SYS_Write_MSR(restore_reg_addr[0], 0); ++ SYS_Write_MSR(restore_reg_addr[1], 0); ++ SYS_Write_MSR(restore_reg_addr[2], 0); ++ ++ CPU_STATE_pmu_state(pcpu) = NULL; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ++ * @fn core2_Read_LBRs(buffer) ++ * ++ * @param IN buffer - pointer to the buffer to write the data into ++ * @return Last branch source IP address ++ * ++ * @brief Read all the LBR registers into the buffer provided and return ++ * ++ */ ++static U64 core2_Read_LBRs(VOID *buffer, PVOID data) ++{ ++ U32 i, count = 0; ++ U64 *lbr_buf = NULL; ++ U64 value = 0; ++ U64 tos_ip_addr = 0; ++ U64 tos_ptr = 0; ++ SADDR saddr; ++ U32 this_cpu; ++ U32 dev_idx; ++ LBR lbr; ++ DEV_CONFIG pcfg; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ dev_idx = core_to_dev_map[this_cpu]; ++ pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ lbr = LWPMU_DEVICE_lbr(&devices[dev_idx]); ++ ++ if (buffer && DEV_CONFIG_store_lbrs(pcfg)) { ++ lbr_buf = (U64 *)buffer; ++ } ++ ++ for (i = 0; i < LBR_num_entries(lbr); i++) { ++ value = SYS_Read_MSR(LBR_entries_reg_id(lbr, i)); ++ if (buffer && DEV_CONFIG_store_lbrs(pcfg)) { ++ *lbr_buf = value; ++ } ++ SEP_DRV_LOG_TRACE("core2_Read_LBRs %u, 0x%llx.", i, value); ++ if (i == 0) { ++ tos_ptr = value; ++ } else { ++ if (LBR_entries_etype(lbr, i) == ++ LBR_ENTRY_FROM_IP) { // LBR from register ++ if (tos_ptr == count) { ++ SADDR_addr(saddr) = ++ value & CORE2_LBR_BITMASK; ++ tos_ip_addr = (U64)SADDR_addr( ++ saddr); // Add signed extension ++ SEP_DRV_LOG_TRACE( ++ "Tos_ip_addr %llu, 0x%llx.", ++ tos_ptr, value); ++ } ++ count++; ++ } ++ } ++ if (buffer && DEV_CONFIG_store_lbrs(pcfg)) { ++ lbr_buf++; ++ } ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT("Res: %llu.", tos_ip_addr); ++ return tos_ip_addr; ++} ++ ++/* ++ * @fn corei7_Read_LBRs(buffer) ++ * ++ * @param IN buffer - pointer to the buffer to write the data into ++ * @return Last branch source IP address ++ * ++ * @brief Read all the LBR registers into the buffer provided and return ++ * ++ */ ++static U64 corei7_Read_LBRs(VOID *buffer, PVOID data) ++{ ++ U32 i, count = 0; ++ U64 *lbr_buf = NULL; ++ U64 value = 0; ++ U64 tos_ip_addr = 0; ++ U64 tos_ptr = 0; ++ SADDR saddr; ++ U32 pairs = 0; ++ U32 this_cpu; ++ U32 dev_idx; ++ LBR lbr; ++ DEV_CONFIG pcfg; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ dev_idx = core_to_dev_map[this_cpu]; ++ pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ lbr = LWPMU_DEVICE_lbr(&devices[dev_idx]); ++ ++ if (buffer && DEV_CONFIG_store_lbrs(pcfg)) { ++ lbr_buf = (U64 *)buffer; ++ } ++ ++ if (LBR_num_entries(lbr) > 0) { ++ pairs = (LBR_num_entries(lbr) - 1) / 2; ++ } ++ for (i = 0; i < LBR_num_entries(lbr); i++) { ++ value = SYS_Read_MSR(LBR_entries_reg_id(lbr, i)); ++ if (buffer && DEV_CONFIG_store_lbrs(pcfg)) { ++ *lbr_buf = value; ++ } ++ if (DEV_CONFIG_collect_callstacks(pcfg)) { ++ if ((LBR_entries_etype(lbr, i) == LBR_ENTRY_FROM_IP && ++ i > tos_ptr + 1) || ++ (LBR_entries_etype(lbr, i) == LBR_ENTRY_TO_IP && ++ i > tos_ptr + pairs + 1)) { ++ if (buffer && DEV_CONFIG_store_lbrs(pcfg)) { ++ *lbr_buf = 0x0ULL; ++ lbr_buf++; ++ } ++ continue; ++ } ++ } ++#if defined(DRV_SEP_ACRN_ON) ++ if (DEV_CONFIG_collect_callstacks(pcfg)) { ++ if ((LBR_entries_etype(lbr, i) == LBR_ENTRY_FROM_IP && ++ i > tos_ptr + 1) || ++ (LBR_entries_etype(lbr, i) == LBR_ENTRY_TO_IP && ++ i > tos_ptr + pairs + 1)) { ++ if (buffer && DEV_CONFIG_store_lbrs(pcfg)) { ++ *lbr_buf = 0x0ULL; ++ lbr_buf++; ++ } ++ continue; ++ } ++ } ++#endif ++ SEP_DRV_LOG_TRACE("I: %u, value: 0x%llx.", i, value); ++ if (i == 0) { ++ tos_ptr = value; ++ } else { ++ if (LBR_entries_etype(lbr, i) == ++ LBR_ENTRY_FROM_IP) { // LBR from register ++ if (tos_ptr == count) { ++ SADDR_addr(saddr) = ++ value & CORE2_LBR_BITMASK; ++ tos_ip_addr = (U64)SADDR_addr( ++ saddr); // Add signed extension ++ SEP_DRV_LOG_TRACE( ++ "tos_ip_addr %llu, 0x%llx.", ++ tos_ptr, value); ++ } ++ count++; ++ } ++ } ++ if (buffer && DEV_CONFIG_store_lbrs(pcfg)) { ++ lbr_buf++; ++ } ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT("Res: %llu.", tos_ip_addr); ++ return tos_ip_addr; ++} ++ ++static VOID core2_Clean_Up(VOID *param) ++{ ++#if !defined(DRV_ANDROID) ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ U32 busno = 0; ++ U32 dev_idx = 0; ++ U32 base_idx = 0; ++ U32 device_id = 0; ++ U32 value = 0; ++ U32 vendor_id = 0; ++ U32 core2_qpill_dev_no[2] = { 8, 9 }; ++#endif ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++#if !defined(DRV_ANDROID) ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++#endif ++ ++ FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_ALL_REG) ++ { ++ if (ECB_entries_clean_up_get(pecb, i)) { ++ SEP_DRV_LOG_TRACE("clean up set --- RegId --- %x.", ++ ECB_entries_reg_id(pecb, i)); ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); ++ } ++ } ++ END_FOR_EACH_REG_CORE_OPERATION; ++ ++#if !defined(DRV_ANDROID) ++ if (!CPU_STATE_socket_master(pcpu)) { ++ SEP_DRV_LOG_TRACE_OUT("Not socket master."); ++ return; ++ } ++ ++ if (restore_ha_direct2core && restore_qpi_direct2core && ++ direct2core_data_saved) { ++ // Discover the bus # for HA ++ for (busno = 0; busno < MAX_BUSNO; busno++) { ++ value = PCI_Read_U32(busno, JKTUNC_HA_DEVICE_NO, ++ JKTUNC_HA_D2C_FUNC_NO, 0); ++ vendor_id = value & VENDOR_ID_MASK; ++ device_id = ++ (value & DEVICE_ID_MASK) >> DEVICE_ID_BITSHIFT; ++ ++ if (vendor_id != DRV_IS_PCI_VENDOR_ID_INTEL) { ++ continue; ++ } ++ if (device_id != JKTUNC_HA_D2C_DID) { ++ continue; ++ } ++ ++ // now program at the offset ++ PCI_Write_U32(busno, JKTUNC_HA_DEVICE_NO, ++ JKTUNC_HA_D2C_FUNC_NO, ++ JKTUNC_HA_D2C_OFFSET, ++ restore_ha_direct2core[this_cpu][busno]); ++ } ++ ++ // Discover the bus # for QPI ++ for (dev_idx = 0; dev_idx < 2; dev_idx++) { ++ base_idx = dev_idx * MAX_BUSNO; ++ for (busno = 0; busno < MAX_BUSNO; busno++) { ++ value = PCI_Read_U32( ++ busno, core2_qpill_dev_no[dev_idx], ++ JKTUNC_QPILL_D2C_FUNC_NO, 0); ++ vendor_id = value & VENDOR_ID_MASK; ++ device_id = (value & DEVICE_ID_MASK) >> ++ DEVICE_ID_BITSHIFT; ++ ++ if (vendor_id != DRV_IS_PCI_VENDOR_ID_INTEL) { ++ continue; ++ } ++ if ((device_id != JKTUNC_QPILL0_D2C_DID) && ++ (device_id != JKTUNC_QPILL1_D2C_DID)) { ++ continue; ++ } ++ // now program at the corresponding offset ++ PCI_Write_U32(busno, ++ core2_qpill_dev_no[dev_idx], ++ JKTUNC_QPILL_D2C_FUNC_NO, ++ JKTUNC_QPILL_D2C_OFFSET, ++ restore_qpi_direct2core[this_cpu] ++ [base_idx + ++ busno]); ++ } ++ } ++ } ++ if (restore_bl_bypass && bl_bypass_data_saved) { ++ SYS_Write_MSR(CORE2UNC_DISABLE_BL_BYPASS_MSR, ++ restore_bl_bypass[this_cpu]); ++ } ++#endif ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++static VOID corei7_Errata_Fix(void) ++{ ++ U32 this_cpu = CONTROL_THIS_CPU(); ++ CPU_STATE pcpu = &pcb[this_cpu]; ++ ECB(pecb) = NULL; ++ U32 dev_idx, cur_grp; ++ DEV_CONFIG pcfg; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ dev_idx = core_to_dev_map[this_cpu]; ++ pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ cur_grp = CPU_STATE_current_group(pcpu); ++ pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; ++ ++ if (DEV_CONFIG_pebs_mode(pcfg)) { ++ SYS_Write_MSR(ECB_entries_reg_id( ++ pecb, ECB_SECTION_REG_INDEX( ++ pecb, PEBS_ENABLE_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ 0LL); ++ } ++ ++ FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_HW_ERRATA) ++ { ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, i), ++ ECB_entries_reg_value(pecb, i)); ++ } ++ END_FOR_EACH_REG_CORE_OPERATION; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++static VOID corei7_Errata_Fix_2(void) ++{ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_HW_ERRATA) ++ { ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, i), ++ ECB_entries_reg_value(pecb, i)); ++ } ++ END_FOR_EACH_REG_CORE_OPERATION; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn void core2_Check_Overflow_Htoff_Mode(masks) ++ * ++ * @param masks the mask structure to populate ++ * ++ * @return None No return needed ++ * ++ * @brief Called by the data processing method to figure out which registers have overflowed. ++ * ++ */ ++static void core2_Check_Overflow_Htoff_Mode(DRV_MASKS masks) ++{ ++ U32 index; ++ U64 value = 0; ++ U64 overflow_status = 0; ++ U32 this_cpu; ++ BUFFER_DESC bd; ++ CPU_STATE pcpu; ++ U32 dev_idx; ++ U32 cur_grp; ++ DISPATCH dispatch; ++ DEV_CONFIG pcfg; ++ ECB pecb; ++ U64 overflow_status_clr = 0; ++ DRV_EVENT_MASK_NODE event_flag; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ bd = &cpu_buf[this_cpu]; ++ pcpu = &pcb[this_cpu]; ++ dev_idx = core_to_dev_map[this_cpu]; ++ cur_grp = CPU_STATE_current_group(pcpu); ++ dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); ++ pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; ++ ++ SEP_DRV_LOG_TRACE(""); ++ ++ if (!pecb) { ++ SEP_DRV_LOG_TRACE_OUT( ++ "No programming for this device in this group."); ++ return; ++ } ++ ++ // initialize masks ++ DRV_MASKS_masks_num(masks) = 0; ++ ++ overflow_status = SYS_Read_MSR(ECB_entries_reg_id( ++ pecb, ECB_SECTION_REG_INDEX(pecb, GLOBAL_STATUS_REG_INDEX, ++ PMU_OPERATION_GLOBAL_STATUS))); ++ ++ if (DEV_CONFIG_pebs_mode(pcfg)) { ++ overflow_status = PEBS_Overflowed(this_cpu, overflow_status, 0); ++ } ++ overflow_status_clr = overflow_status; ++ SEP_DRV_LOG_TRACE("Overflow: cpu: %d, status 0x%llx.", this_cpu, ++ overflow_status); ++ index = 0; ++ BUFFER_DESC_sample_count(bd) = 0; ++ ++ if (dispatch->check_overflow_gp_errata) { ++ overflow_status = dispatch->check_overflow_gp_errata( ++ pecb, &overflow_status_clr); ++ } ++ ++ FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_ALL) ++ { ++ if (ECB_entries_fixed_reg_get(pecb, i)) { ++ index = i - ++ ECB_operations_register_start( ++ pecb, PMU_OPERATION_DATA_FIXED) + ++ 0x20; ++ } else if (ECB_entries_is_gp_reg_get(pecb, i) && ++ ECB_entries_reg_value(pecb, i) != 0) { ++ index = i - ECB_operations_register_start( ++ pecb, PMU_OPERATION_DATA_GP); ++ if (i >= (ECB_operations_register_start( ++ pecb, PMU_OPERATION_DATA_GP) + ++ 4) && ++ i <= (ECB_operations_register_start( ++ pecb, PMU_OPERATION_DATA_GP) + ++ 7)) { ++ value = SYS_Read_MSR( ++ ECB_entries_reg_id(pecb, i)); ++ if (value > 0 && value <= 0x100000000LL) { ++ overflow_status |= ((U64)1 << index); ++ } ++ } ++ } else { ++ continue; ++ } ++ if (overflow_status & ((U64)1 << index)) { ++ SEP_DRV_LOG_TRACE("Overflow: cpu: %d, index %d.", ++ this_cpu, index); ++ SEP_DRV_LOG_TRACE( ++ "register 0x%x --- val 0%llx.", ++ ECB_entries_reg_id(pecb, i), ++ SYS_Read_MSR(ECB_entries_reg_id(pecb, i))); ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, i), ++ ECB_entries_reg_value(pecb, i)); ++ ++ if (DRV_CONFIG_enable_cp_mode(drv_cfg)) { ++ /* Increment the interrupt count. */ ++ if (interrupt_counts) { ++ interrupt_counts ++ [this_cpu * ++ DRV_CONFIG_num_events( ++ drv_cfg) + ++ ECB_entries_event_id_index( ++ pecb, i)] += 1; ++ } ++ } ++ ++ DRV_EVENT_MASK_bitFields1(&event_flag) = (U8)0; ++ if (ECB_entries_fixed_reg_get(pecb, i)) { ++ CPU_STATE_p_state_counting(pcpu) = 1; ++ } ++ if (ECB_entries_precise_get(pecb, i)) { ++ DRV_EVENT_MASK_precise(&event_flag) = 1; ++ } ++ if (ECB_entries_lbr_value_get(pecb, i)) { ++ DRV_EVENT_MASK_lbr_capture(&event_flag) = 1; ++ } ++ if (ECB_entries_branch_evt_get(pecb, i)) { ++ DRV_EVENT_MASK_branch(&event_flag) = 1; ++ } ++ ++ if (DRV_MASKS_masks_num(masks) < MAX_OVERFLOW_EVENTS) { ++ DRV_EVENT_MASK_bitFields1( ++ DRV_MASKS_eventmasks(masks) + ++ DRV_MASKS_masks_num(masks)) = ++ DRV_EVENT_MASK_bitFields1(&event_flag); ++ DRV_EVENT_MASK_event_idx( ++ DRV_MASKS_eventmasks(masks) + ++ DRV_MASKS_masks_num(masks)) = ++ ECB_entries_event_id_index(pecb, i); ++ DRV_MASKS_masks_num(masks)++; ++ } else { ++ SEP_DRV_LOG_ERROR( ++ "The array for event masks is full."); ++ } ++ ++ SEP_DRV_LOG_TRACE("overflow -- 0x%llx, index 0x%llx.", ++ overflow_status, (U64)1 << index); ++ SEP_DRV_LOG_TRACE("slot# %d, reg_id 0x%x, index %d.", i, ++ ECB_entries_reg_id(pecb, i), index); ++ if (ECB_entries_event_id_index(pecb, i) == ++ CPU_STATE_trigger_event_num(pcpu)) { ++ CPU_STATE_trigger_count(pcpu)--; ++ } ++ } ++ } ++ END_FOR_EACH_REG_CORE_OPERATION; ++ ++ CPU_STATE_reset_mask(pcpu) = overflow_status_clr; ++ // Reinitialize the global overflow control register ++ SYS_Write_MSR(ECB_entries_reg_id( ++ pecb, ECB_SECTION_REG_INDEX( ++ pecb, GLOBAL_OVF_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ overflow_status_clr); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn void core2_Read_Power(buffer) ++ * ++ * @param buffer - pointer to the buffer to write the data into ++ * ++ * @return None No return needed ++ * ++ * @brief Read all the power MSRs into the buffer provided and return. ++ * ++ */ ++static VOID corei7_Read_Power(VOID *buffer) ++{ ++ U32 i; ++ U64 *pwr_buf = (U64 *)buffer; ++ U32 this_cpu; ++ U32 dev_idx; ++ PWR pwr; ++ ++ SEP_DRV_LOG_TRACE_IN("Buffer: %p.", buffer); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ dev_idx = core_to_dev_map[this_cpu]; ++ pwr = LWPMU_DEVICE_pwr(&devices[dev_idx]); ++ ++ for (i = 0; i < PWR_num_entries(pwr); i++) { ++ *pwr_buf = SYS_Read_MSR(PWR_entries_reg_id(pwr, i)); ++ pwr_buf++; ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn core2_Read_Counts(param, id) ++ * ++ * @param param The read thread node to process ++ * @param id The event id for the which the sample is generated ++ * ++ * @return None No return needed ++ * ++ * @brief Read CPU event based counts for the events with reg value=0 and store into the buffer param; ++ * ++ */ ++static VOID core2_Read_Counts(PVOID param, U32 id) ++{ ++ U64 *data; ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ U32 dev_idx; ++ DEV_CONFIG pcfg; ++ U32 event_id = 0; ++ ++ SEP_DRV_LOG_TRACE_IN("Param: %p, id: %u.", param, id); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ dev_idx = core_to_dev_map[this_cpu]; ++ pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ ++ if (DEV_CONFIG_ebc_group_id_offset(pcfg)) { ++ // Write GroupID ++ data = (U64 *)((S8 *)param + ++ DEV_CONFIG_ebc_group_id_offset(pcfg)); ++ *data = CPU_STATE_current_group(pcpu) + 1; ++ } ++ ++ FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_ALL) ++ { ++ if (ECB_entries_counter_event_offset(pecb, i) == 0) { ++ continue; ++ } ++ data = (U64 *)((S8 *)param + ++ ECB_entries_counter_event_offset(pecb, i)); ++ event_id = ECB_entries_event_id_index(pecb, i); ++ if (event_id == id) { ++ *data = ~(ECB_entries_reg_value(pecb, i) - 1) & ++ ECB_entries_max_bits(pecb, i); ++ ; ++ } else { ++ *data = SYS_Read_MSR(ECB_entries_reg_id(pecb, i)); ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); ++ } ++ } ++ END_FOR_EACH_REG_CORE_OPERATION; ++ ++ if (DRV_CONFIG_enable_p_state(drv_cfg)) { ++ CPU_STATE_p_state_counting(pcpu) = 0; ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn corei7_Check_Overflow_Errata(pecb) ++ * ++ * @param pecb: The current event control block ++ * @param overflow_status: current overflow mask ++ * ++ * @return Updated Event mask of the overflowed registers. ++ * ++ * @brief There is a bug where highly correlated precise events do ++ * not raise an indication on overflows in Core i7 and SNB. ++ */ ++static U64 corei7_Check_Overflow_Errata(ECB pecb__, U64 *overflow_status_clr) ++{ ++ U64 index = 0, value = 0, overflow_status = 0; ++ ++ SEP_DRV_LOG_TRACE_IN("PECB: %p, overflow_status_clr: %p.", pecb__, ++ overflow_status_clr); ++ ++ overflow_status = *overflow_status_clr; ++ ++ if (DRV_CONFIG_num_events(drv_cfg) == 1) { ++ SEP_DRV_LOG_TRACE_OUT("Res = %llu (num_events = 1).", ++ overflow_status); ++ return overflow_status; ++ } ++ ++ FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_ALL) ++ { ++ if (ECB_entries_reg_value(pecb, i) == 0) { ++ continue; ++ } ++ if (ECB_entries_is_gp_reg_get(pecb, i)) { ++ index = i - ECB_operations_register_start( ++ pecb, PMU_OPERATION_DATA_GP); ++ value = SYS_Read_MSR(ECB_entries_reg_id(pecb, i)); ++ if (value > 0LL && value <= 0x100000000LL) { ++ overflow_status |= ((U64)1 << index); ++ *overflow_status_clr |= ((U64)1 << index); ++ SEP_DRV_LOG_TRACE("Counter 0x%x value 0x%llx.", ++ ECB_entries_reg_id(pecb, i), ++ value); ++ } ++ continue; ++ } ++ if (ECB_entries_fixed_reg_get(pecb, i)) { ++ index = i - ++ ECB_operations_register_start( ++ pecb, PMU_OPERATION_DATA_FIXED) + ++ 0x20; ++ if (!(overflow_status & ((U64)1 << index))) { ++ value = SYS_Read_MSR( ++ ECB_entries_reg_id(pecb, i)); ++ if (ECB_entries_reg_id(pecb, i) == ++ ECB_entries_reg_id( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, 0, ++ PMU_OPERATION_CHECK_OVERFLOW_GP_ERRATA))) { ++ if (!(value > 0LL && ++ value <= 0x1000000LL) && ++ (*overflow_status_clr & ++ ((U64)1 << index))) { ++ //Clear it only for overflow_status so that we do not create sample records ++ //Please do not remove the check for MSR index ++ overflow_status = ++ overflow_status & ++ ~((U64)1 << index); ++ continue; ++ } ++ } ++ if (value > 0LL && value <= 0x100000000LL) { ++ overflow_status |= ((U64)1 << index); ++ *overflow_status_clr |= ++ ((U64)1 << index); ++ SEP_DRV_LOG_TRACE( ++ "counter 0x%x value 0x%llx\n", ++ ECB_entries_reg_id(pecb, i), ++ value); ++ } ++ } ++ } ++ } ++ END_FOR_EACH_REG_CORE_OPERATION; ++ ++ SEP_DRV_LOG_TRACE_OUT("Res = %llu.", overflow_status); ++ return overflow_status; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn U64 corei7_Read_Platform_Info ++ * ++ * @brief Reads the MSR_PLATFORM_INFO register if present ++ * ++ * @param void ++ * ++ * @return value read from the register ++ * ++ * Special Notes: ++ * ++ */ ++static VOID corei7_Platform_Info(PVOID data) ++{ ++ DRV_PLATFORM_INFO platform_data = (DRV_PLATFORM_INFO)data; ++ U64 value = 0; ++ ++ SEP_DRV_LOG_TRACE_IN("Data: %p.", data); ++ ++ if (!platform_data) { ++ SEP_DRV_LOG_TRACE_OUT("Platform_data is NULL!"); ++ return; ++ } ++ ++ DRV_PLATFORM_INFO_energy_multiplier(platform_data) = 0; ++ ++#define IA32_MSR_PLATFORM_INFO 0xCE ++ value = SYS_Read_MSR(IA32_MSR_PLATFORM_INFO); ++ ++ DRV_PLATFORM_INFO_info(platform_data) = value; ++ DRV_PLATFORM_INFO_ddr_freq_index(platform_data) = 0; ++#undef IA32_MSR_PLATFORM_INFO ++#define IA32_MSR_MISC_ENABLE 0x1A4 ++ DRV_PLATFORM_INFO_misc_valid(platform_data) = 1; ++ value = SYS_Read_MSR(IA32_MSR_MISC_ENABLE); ++ DRV_PLATFORM_INFO_misc_info(platform_data) = value; ++#undef IA32_MSR_MISC_ENABLE ++ SEP_DRV_LOG_TRACE("Read from MSR_ENERGY_MULTIPLIER reg is %llu.", ++ SYS_Read_MSR(MSR_ENERGY_MULTIPLIER)); ++ DRV_PLATFORM_INFO_energy_multiplier(platform_data) = ++ (U32)(SYS_Read_MSR(MSR_ENERGY_MULTIPLIER) & 0x00001F00) >> 8; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn U64 corei7_Platform_Info_Nehalem ++ * ++ * @brief Reads the MSR_PLATFORM_INFO register if present ++ * ++ * @param void ++ * ++ * @return value read from the register ++ * ++ * Special Notes: ++ * ++ */ ++static VOID corei7_Platform_Info_Nehalem(PVOID data) ++{ ++ DRV_PLATFORM_INFO platform_data = (DRV_PLATFORM_INFO)data; ++ U64 value = 0; ++ ++ SEP_DRV_LOG_TRACE_IN("Data: %p.", data); ++ ++ if (!platform_data) { ++ SEP_DRV_LOG_TRACE_OUT("Platform_data is NULL!"); ++ return; ++ } ++ ++#define IA32_MSR_PLATFORM_INFO 0xCE ++ value = SYS_Read_MSR(IA32_MSR_PLATFORM_INFO); ++ ++ DRV_PLATFORM_INFO_info(platform_data) = value; ++ DRV_PLATFORM_INFO_ddr_freq_index(platform_data) = 0; ++#undef IA32_MSR_PLATFORM_INFO ++#define IA32_MSR_MISC_ENABLE 0x1A4 ++ DRV_PLATFORM_INFO_misc_valid(platform_data) = 1; ++ value = SYS_Read_MSR(IA32_MSR_MISC_ENABLE); ++ DRV_PLATFORM_INFO_misc_info(platform_data) = value; ++#undef IA32_MSR_MISC_ENABLE ++ DRV_PLATFORM_INFO_energy_multiplier(platform_data) = 0; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ++ * Initialize the dispatch table ++ */ ++DISPATCH_NODE core2_dispatch = { .init = core2_Initialize, ++ .fini = core2_Destroy, ++ .write = core2_Write_PMU, ++ .freeze = core2_Disable_PMU, ++ .restart = core2_Enable_PMU, ++ .read_data = core2_Read_PMU_Data, ++ .check_overflow = core2_Check_Overflow, ++ .swap_group = core2_Swap_Group, ++ .read_lbrs = core2_Read_LBRs, ++ .cleanup = core2_Clean_Up, ++ .hw_errata = NULL, ++ .read_power = NULL, ++ .check_overflow_errata = ++ core2_Check_Overflow_Errata, ++ .read_counts = core2_Read_Counts, ++ .check_overflow_gp_errata = NULL, ++ .read_ro = NULL, ++ .platform_info = NULL, ++ .trigger_read = NULL, ++ .scan_for_uncore = NULL, ++ .read_metrics = NULL }; ++ ++DISPATCH_NODE corei7_dispatch = { .init = core2_Initialize, ++ .fini = core2_Destroy, ++ .write = core2_Write_PMU, ++ .freeze = core2_Disable_PMU, ++ .restart = core2_Enable_PMU, ++ .read_data = core2_Read_PMU_Data, ++ .check_overflow = core2_Check_Overflow, ++ .swap_group = core2_Swap_Group, ++ .read_lbrs = corei7_Read_LBRs, ++ .cleanup = core2_Clean_Up, ++ .hw_errata = corei7_Errata_Fix, ++ .read_power = corei7_Read_Power, ++ .check_overflow_errata = NULL, ++ .read_counts = core2_Read_Counts, ++ .check_overflow_gp_errata = ++ corei7_Check_Overflow_Errata, ++ .read_ro = NULL, ++ .platform_info = corei7_Platform_Info, ++ .trigger_read = NULL, ++ .scan_for_uncore = NULL, ++ .read_metrics = NULL }; ++ ++DISPATCH_NODE corei7_dispatch_2 = { .init = core2_Initialize, ++ .fini = core2_Destroy, ++ .write = core2_Write_PMU, ++ .freeze = core2_Disable_PMU, ++ .restart = corei7_Enable_PMU_2, ++ .read_data = core2_Read_PMU_Data, ++ .check_overflow = core2_Check_Overflow, ++ .swap_group = core2_Swap_Group, ++ .read_lbrs = corei7_Read_LBRs, ++ .cleanup = core2_Clean_Up, ++ .hw_errata = corei7_Errata_Fix_2, ++ .read_power = corei7_Read_Power, ++ .check_overflow_errata = NULL, ++ .read_counts = core2_Read_Counts, ++ .check_overflow_gp_errata = ++ corei7_Check_Overflow_Errata, ++ .read_ro = NULL, ++ .platform_info = corei7_Platform_Info, ++ .trigger_read = NULL, ++ .scan_for_uncore = NULL, ++ .read_metrics = NULL }; ++ ++DISPATCH_NODE corei7_dispatch_nehalem = { ++ .init = core2_Initialize, ++ .fini = core2_Destroy, ++ .write = core2_Write_PMU, ++ .freeze = core2_Disable_PMU, ++ .restart = core2_Enable_PMU, ++ .read_data = core2_Read_PMU_Data, ++ .check_overflow = core2_Check_Overflow, ++ .swap_group = core2_Swap_Group, ++ .read_lbrs = corei7_Read_LBRs, ++ .cleanup = core2_Clean_Up, ++ .hw_errata = corei7_Errata_Fix, ++ .read_power = corei7_Read_Power, ++ .check_overflow_errata = NULL, ++ .read_counts = core2_Read_Counts, ++ .check_overflow_gp_errata = corei7_Check_Overflow_Errata, ++ .read_ro = NULL, ++ .platform_info = corei7_Platform_Info_Nehalem, ++ .trigger_read = NULL, ++ .scan_for_uncore = NULL, ++ .read_metrics = NULL ++}; ++ ++DISPATCH_NODE corei7_dispatch_htoff_mode = { ++ .init = core2_Initialize, ++ .fini = core2_Destroy, ++ .write = core2_Write_PMU, ++ .freeze = core2_Disable_PMU, ++ .restart = core2_Enable_PMU, ++ .read_data = core2_Read_PMU_Data, ++ .check_overflow = core2_Check_Overflow_Htoff_Mode, ++ .swap_group = core2_Swap_Group, ++ .read_lbrs = corei7_Read_LBRs, ++ .cleanup = core2_Clean_Up, ++ .hw_errata = corei7_Errata_Fix, ++ .read_power = corei7_Read_Power, ++ .check_overflow_errata = NULL, ++ .read_counts = core2_Read_Counts, ++ .check_overflow_gp_errata = corei7_Check_Overflow_Errata, ++ .read_ro = NULL, ++ .platform_info = corei7_Platform_Info, ++ .trigger_read = NULL, ++ .scan_for_uncore = NULL, ++ .read_metrics = NULL ++}; ++ ++DISPATCH_NODE corei7_dispatch_htoff_mode_2 = { ++ .init = core2_Initialize, ++ .fini = core2_Destroy, ++ .write = core2_Write_PMU, ++ .freeze = core2_Disable_PMU, ++ .restart = corei7_Enable_PMU_2, ++ .read_data = core2_Read_PMU_Data, ++ .check_overflow = core2_Check_Overflow_Htoff_Mode, ++ .swap_group = core2_Swap_Group, ++ .read_lbrs = corei7_Read_LBRs, ++ .cleanup = core2_Clean_Up, ++ .hw_errata = corei7_Errata_Fix_2, ++ .read_power = corei7_Read_Power, ++ .check_overflow_errata = NULL, ++ .read_counts = core2_Read_Counts, ++ .check_overflow_gp_errata = corei7_Check_Overflow_Errata, ++ .read_ro = NULL, ++ .platform_info = corei7_Platform_Info, ++ .trigger_read = NULL, ++ .scan_for_uncore = NULL, ++ .read_metrics = NULL ++}; +diff --git a/drivers/platform/x86/sepdk/sep/cpumon.c b/drivers/platform/x86/sepdk/sep/cpumon.c +new file mode 100755 +index 000000000000..ac8ade14f106 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/sep/cpumon.c +@@ -0,0 +1,357 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++/* ++ * CVS_Id="$Id$" ++ */ ++ ++#include "lwpmudrv_defines.h" ++#include ++#include ++#if defined(DRV_EM64T) ++#include ++#endif ++ ++#include "lwpmudrv_types.h" ++#include "lwpmudrv_ecb.h" ++#include "apic.h" ++#include "lwpmudrv.h" ++#include "control.h" ++#include "utility.h" ++#include "cpumon.h" ++#include "pmi.h" ++#include "sys_info.h" ++ ++#include ++#include ++ ++#if !defined(DRV_SEP_ACRN_ON) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)) ++#include ++static int cpumon_NMI_Handler(unsigned int cmd, struct pt_regs *regs) ++{ ++ U32 captured_state = GET_DRIVER_STATE(); ++ ++ if (DRIVER_STATE_IN(captured_state, STATE_BIT_RUNNING | ++ STATE_BIT_PAUSING | ++ STATE_BIT_PREPARE_STOP | ++ STATE_BIT_TERMINATING)) { ++ if (captured_state != DRV_STATE_TERMINATING) { ++ PMI_Interrupt_Handler(regs); ++ } ++ return NMI_HANDLED; ++ } else { ++ return NMI_DONE; ++ } ++} ++ ++#define EBS_NMI_CALLBACK cpumon_NMI_Handler ++ ++#else ++#include ++static int cpumon_NMI_Handler(struct notifier_block *self, unsigned long val, ++ void *data) ++{ ++ struct die_args *args = (struct die_args *)data; ++ U32 captured_state = GET_DRIVER_STATE(); ++ ++ if (args) { ++ switch (val) { ++ case DIE_NMI: ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38)) ++ case DIE_NMI_IPI: ++#endif ++ if (DRIVER_STATE_IN(captured_state, ++ STATE_BIT_RUNNING | ++ STATE_BIT_PAUSING | ++ STATE_BIT_PREPARE_STOP | ++ STATE_BIT_TERMINATING)) { ++ if (captured_state != DRV_STATE_TERMINATING) { ++ PMI_Interrupt_Handler(args->regs); ++ } ++ return NOTIFY_STOP; ++ } ++ } ++ } ++ return NOTIFY_DONE; ++} ++ ++static struct notifier_block cpumon_notifier = { .notifier_call = ++ cpumon_NMI_Handler, ++ .next = NULL, ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38)) ++ .priority = 2 ++#else ++ .priority = NMI_LOCAL_LOW_PRIOR, ++#endif ++}; ++#endif ++#endif ++ ++static volatile S32 cpuhook_installed; ++ ++/* ++ * CPU Monitoring Functionality ++ */ ++ ++/* ++ * General per-processor initialization ++ */ ++#if defined(DRV_CPU_HOTPLUG) ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn DRV_BOOL CPUMON_is_Online_Allowed() ++ * ++ * @param None ++ * ++ * @return DRV_BOOL TRUE if cpu is allowed to go Online, else FALSE ++ * ++ * @brief Checks if the cpu is allowed to go online during the ++ * @brief current driver state ++ * ++ */ ++DRV_BOOL CPUMON_is_Online_Allowed(void) ++{ ++ DRV_BOOL is_allowed = FALSE; ++#if !defined(DRV_SEP_ACRN_ON) ++ U32 cur_driver_state; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ cur_driver_state = GET_DRIVER_STATE(); ++ ++ switch (cur_driver_state) { ++ case DRV_STATE_IDLE: ++ case DRV_STATE_PAUSED: ++ case DRV_STATE_RUNNING: ++ case DRV_STATE_PAUSING: ++ is_allowed = TRUE; ++ break; ++ default: ++ SEP_DRV_LOG_TRACE( ++ "CPU is prohibited to online in driver state %d.", ++ cur_driver_state); ++ break; ++ } ++#endif ++ ++ SEP_DRV_LOG_TRACE_OUT("Res: %u.", is_allowed); ++ return is_allowed; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn DRV_BOOL CPUMON_is_Offline_Allowed() ++ * ++ * @param None ++ * ++ * @return DRV_BOOL TRUE if cpu is allowed to go Offline, else FALSE ++ * ++ * @brief Checks if the cpu is allowed to go offline during the ++ * @brief current driver state ++ * ++ */ ++DRV_BOOL CPUMON_is_Offline_Allowed(void) ++{ ++ DRV_BOOL is_allowed = FALSE; ++#if !defined(DRV_SEP_ACRN_ON) ++ U32 cur_driver_state; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ cur_driver_state = GET_DRIVER_STATE(); ++ ++ switch (cur_driver_state) { ++ case DRV_STATE_PAUSED: ++ case DRV_STATE_RUNNING: ++ case DRV_STATE_PAUSING: ++ is_allowed = TRUE; ++ break; ++ default: ++ SEP_DRV_LOG_TRACE( ++ "CPU is prohibited to offline in driver state %d.", ++ cur_driver_state); ++ break; ++ } ++#endif ++ ++ SEP_DRV_LOG_TRACE_OUT("Res: %u.", is_allowed); ++ return is_allowed; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID CPUMON_Online_Cpu( ++ * PVOID param) ++ * ++ * @param PVOID parm ++ * ++ * @return None ++ * ++ * @brief Sets a cpu online, initialize APIC on it, ++ * @brief Build the sys_info for this cpu ++ * ++ */ ++VOID CPUMON_Online_Cpu(PVOID param) ++{ ++ S32 this_cpu; ++ CPU_STATE pcpu; ++ ++ SEP_DRV_LOG_TRACE_IN("Dummy parm: %p.", parm); ++ ++ if (param == NULL) { ++ preempt_disable(); ++ this_cpu = CONTROL_THIS_CPU(); ++ preempt_enable(); ++ } else { ++ this_cpu = *(S32 *)param; ++ } ++ pcpu = &pcb[this_cpu]; ++ if (pcpu == NULL) { ++ SEP_DRV_LOG_WARNING_TRACE_OUT("Unable to set CPU %d online!", ++ this_cpu); ++ return; ++ } ++ SEP_DRV_LOG_INIT("Setting CPU %d online, PCPU = %p.", this_cpu, pcpu); ++ CPU_STATE_offlined(pcpu) = FALSE; ++ CPU_STATE_accept_interrupt(pcpu) = 1; ++ CPU_STATE_initial_mask(pcpu) = 1; ++ CPU_STATE_group_swap(pcpu) = 1; ++ APIC_Init(NULL); ++ APIC_Install_Interrupt_Handler(NULL); ++ ++ SYS_INFO_Build_Cpu(NULL); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID CPUMON_Offline_Cpu( ++ * PVOID param) ++ * ++ * @param PVOID parm ++ * ++ * @return None ++ * ++ * @brief Sets a cpu offline ++ * ++ */ ++VOID CPUMON_Offline_Cpu(PVOID param) ++{ ++ S32 this_cpu; ++ CPU_STATE pcpu; ++ ++ SEP_DRV_LOG_TRACE_IN("Dummy parm: %p.", parm); ++ ++ if (param == NULL) { ++ preempt_disable(); ++ this_cpu = CONTROL_THIS_CPU(); ++ preempt_enable(); ++ } else { ++ this_cpu = *(S32 *)param; ++ } ++ pcpu = &pcb[this_cpu]; ++ ++ if (pcpu == NULL) { ++ SEP_DRV_LOG_WARNING_TRACE_OUT("Unable to set CPU %d offline.", ++ this_cpu); ++ return; ++ } ++ SEP_DRV_LOG_INIT("Setting CPU %d offline.", this_cpu); ++ CPU_STATE_offlined(pcpu) = TRUE; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++#endif ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn extern void CPUMON_Install_Cpuhooks(void) ++ * ++ * @param None ++ * ++ * @return None No return needed ++ * ++ * @brief set up the interrupt handler (on a per-processor basis) ++ * @brief Initialize the APIC in two phases (current CPU, then others) ++ * ++ */ ++VOID CPUMON_Install_Cpuhooks(void) ++{ ++#if !defined(DRV_SEP_ACRN_ON) ++ S32 me = 0; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ if (cpuhook_installed) { ++ SEP_DRV_LOG_WARNING_TRACE_OUT("Cpuhook already installed."); ++ return; ++ } ++ ++ CONTROL_Invoke_Parallel(APIC_Init, NULL); ++ CONTROL_Invoke_Parallel(APIC_Install_Interrupt_Handler, ++ (PVOID)(size_t)me); ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)) ++ register_nmi_handler(NMI_LOCAL, EBS_NMI_CALLBACK, 0, "sep_pmi"); ++#else ++ register_die_notifier(&cpumon_notifier); ++#endif ++ ++ cpuhook_installed = 1; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++#endif ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn extern void CPUMON_Remove_Cpuhools(void) ++ * ++ * @param None ++ * ++ * @return None No return needed ++ * ++ * @brief De-Initialize the APIC in phases ++ * @brief clean up the interrupt handler (on a per-processor basis) ++ * ++ */ ++VOID CPUMON_Remove_Cpuhooks(void) ++{ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++#if !defined(DRV_SEP_ACRN_ON) ++ CONTROL_Invoke_Parallel(APIC_Restore_LVTPC, NULL); ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)) ++ unregister_nmi_handler(NMI_LOCAL, "sep_pmi"); ++#else ++ unregister_die_notifier(&cpumon_notifier); ++#endif ++ ++ cpuhook_installed = 0; ++#endif ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} +diff --git a/drivers/platform/x86/sepdk/sep/eventmux.c b/drivers/platform/x86/sepdk/sep/eventmux.c +new file mode 100755 +index 000000000000..1d8099dc674a +--- /dev/null ++++ b/drivers/platform/x86/sepdk/sep/eventmux.c +@@ -0,0 +1,446 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#include "lwpmudrv_defines.h" ++#include ++#include ++#include ++#include ++#include "lwpmudrv_types.h" ++#include "lwpmudrv_ecb.h" ++#include "lwpmudrv_struct.h" ++#include "lwpmudrv.h" ++#include "control.h" ++#include "utility.h" ++#include "eventmux.h" ++ ++static PVOID em_tables; ++static size_t em_tables_size; ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID eventmux_Allocate_Groups ( ++ * VOID *params ++ * ) ++ * ++ * @brief Allocate memory need to support event multiplexing ++ * ++ * @param params - pointer to a S32 that holds the size of buffer to allocate ++ * ++ * @return NONE ++ * ++ * Special Notes: ++ * Allocate the memory needed to save different group counters ++ * Called via the parallel control mechanism ++ */ ++static VOID eventmux_Allocate_Groups(PVOID params) ++{ ++ U32 this_cpu; ++ CPU_STATE cpu_state; ++ U32 dev_idx; ++ EVENT_CONFIG ec; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ preempt_disable(); ++ this_cpu = CONTROL_THIS_CPU(); ++ cpu_state = &pcb[this_cpu]; ++ dev_idx = core_to_dev_map[this_cpu]; ++ ec = LWPMU_DEVICE_ec(&devices[dev_idx]); ++ preempt_enable(); ++ ++ if (EVENT_CONFIG_mode(ec) == EM_DISABLED || ++ EVENT_CONFIG_num_groups(ec) == 1) { ++ return; ++ } ++ ++ CPU_STATE_em_tables(cpu_state) = ++ em_tables + CPU_STATE_em_table_offset(cpu_state); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID eventmux_Deallocate_Groups ( ++ * VOID *params ++ * ) ++ * ++ * @brief Free the scratch memory need to support event multiplexing ++ * ++ * @param params - pointer to NULL ++ * ++ * @return NONE ++ * ++ * Special Notes: ++ * Free the memory needed to save different group counters ++ * Called via the parallel control mechanism ++ */ ++static VOID eventmux_Deallocate_Groups(PVOID params) ++{ ++ U32 this_cpu; ++ CPU_STATE cpu_state; ++ U32 dev_idx; ++ EVENT_CONFIG ec; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ preempt_disable(); ++ this_cpu = CONTROL_THIS_CPU(); ++ cpu_state = &pcb[this_cpu]; ++ dev_idx = core_to_dev_map[this_cpu]; ++ ec = LWPMU_DEVICE_ec(&devices[dev_idx]); ++ preempt_enable(); ++ ++ if (EVENT_CONFIG_mode(ec) == EM_DISABLED || ++ EVENT_CONFIG_num_groups(ec) == 1) { ++ return; ++ } ++ ++ CPU_STATE_em_tables(cpu_state) = NULL; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID eventmux_Timer_Callback_Thread ( ++ * ) ++ * ++ * @brief Stop all the timer threads and terminate them ++ * ++ * @param none ++ * ++ * @return NONE ++ * ++ * Special Notes: ++ * timer routine - The event multiplexing happens here. ++ */ ++static VOID eventmux_Timer_Callback_Thread( ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) ++ struct timer_list *tl ++#else ++ unsigned long arg ++#endif ++) ++{ ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ U32 dev_idx; ++ DISPATCH dispatch; ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) ++ SEP_DRV_LOG_TRACE_IN(""); ++#else ++ SEP_DRV_LOG_TRACE_IN("Arg: %u.", (U32)arg); ++#endif ++ ++ preempt_disable(); ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ dev_idx = core_to_dev_map[this_cpu]; ++ dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); ++ preempt_enable(); ++ ++ if (CPU_STATE_em_tables(pcpu) == NULL) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("Em_tables is NULL!"); ++ return; ++ } ++ ++ dispatch->swap_group(TRUE); ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) ++ mod_timer(CPU_STATE_em_timer(pcpu), ++ jiffies + CPU_STATE_em_timer_delay(pcpu)); ++#else ++ CPU_STATE_em_timer(pcpu)->expires = jiffies + arg; ++ add_timer(CPU_STATE_em_timer(pcpu)); ++#endif ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID eventmux_Prepare_Timer_Threads ( ++ * VOID ++ * ) ++ * ++ * @brief Stop all the timer threads and terminate them ++ * ++ * @param NONE ++ * ++ * @return NONE ++ * ++ * Special Notes: ++ * Set up the timer threads to prepare for event multiplexing. ++ * Do not start the threads as yet ++ */ ++static VOID eventmux_Prepare_Timer_Threads(PVOID arg) ++{ ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ U32 dev_idx; ++ EVENT_CONFIG ec; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ // initialize and set up the timer for all cpus ++ // Do not start the timer as yet. ++ preempt_disable(); ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ dev_idx = core_to_dev_map[this_cpu]; ++ ec = LWPMU_DEVICE_ec(&devices[dev_idx]); ++ preempt_enable(); ++ ++ if (EVENT_CONFIG_mode(ec) != EM_TIMER_BASED) { ++ return; ++ } ++ ++ CPU_STATE_em_timer(pcpu) = (struct timer_list *)CONTROL_Allocate_Memory( ++ sizeof(struct timer_list)); ++ ++ if (CPU_STATE_em_timer(pcpu) == NULL) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("Pcpu = NULL!"); ++ return; ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID eventmux_Cancel_Timers ( ++ * VOID ++ * ) ++ * ++ * @brief Stop all the timer threads and terminate them ++ * ++ * @param NONE ++ * ++ * @return NONE ++ * ++ * Special Notes: ++ * Cancel all the timer threads that have been started ++ */ ++static VOID eventmux_Cancel_Timers(void) ++{ ++ CPU_STATE pcpu; ++ S32 i; ++ U32 dev_idx; ++ EVENT_CONFIG ec; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ /* ++ * Cancel the timer for all active CPUs ++ */ ++ for (i = 0; i < GLOBAL_STATE_active_cpus(driver_state); i++) { ++ pcpu = &pcb[i]; ++ dev_idx = core_to_dev_map[i]; ++ ec = LWPMU_DEVICE_ec(&devices[dev_idx]); ++ if (EVENT_CONFIG_mode(ec) != EM_TIMER_BASED) { ++ continue; ++ } ++ del_timer_sync(CPU_STATE_em_timer(pcpu)); ++ CPU_STATE_em_timer(pcpu) = ++ (struct timer_list *)CONTROL_Free_Memory( ++ CPU_STATE_em_timer(pcpu)); ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID eventmux_Start_Timers ( ++ * long unsigned arg ++ * ) ++ * ++ * @brief Start the timer on a single cpu ++ * ++ * @param delay interval time in jiffies ++ * ++ * @return NONE ++ * ++ * Special Notes: ++ * start the timer on a single cpu ++ * Call from each cpu to get cpu affinity for Timer_Callback_Thread ++ */ ++static VOID eventmux_Start_Timers(PVOID arg) ++{ ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ U32 dev_idx; ++ EVENT_CONFIG ec; ++ unsigned long delay; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ preempt_disable(); ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ dev_idx = core_to_dev_map[this_cpu]; ++ ec = LWPMU_DEVICE_ec(&devices[dev_idx]); ++ preempt_enable(); ++ ++ if (EVENT_CONFIG_mode(ec) != EM_TIMER_BASED || ++ EVENT_CONFIG_num_groups(ec) == 1) { ++ return; ++ } ++ ++ /* ++ * notice we want to use group 0's time slice for the initial timer ++ */ ++ delay = msecs_to_jiffies(EVENT_CONFIG_em_factor(ec)); ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) ++ CPU_STATE_em_timer_delay(pcpu) = delay; ++ timer_setup(CPU_STATE_em_timer(pcpu), eventmux_Timer_Callback_Thread, ++ 0); ++ mod_timer(CPU_STATE_em_timer(pcpu), ++ jiffies + CPU_STATE_em_timer_delay(pcpu)); ++#else ++ init_timer(CPU_STATE_em_timer(pcpu)); ++ CPU_STATE_em_timer(pcpu)->function = eventmux_Timer_Callback_Thread; ++ CPU_STATE_em_timer(pcpu)->data = delay; ++ CPU_STATE_em_timer(pcpu)->expires = jiffies + delay; ++ add_timer(CPU_STATE_em_timer(pcpu)); ++#endif ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID EVENTMUX_Start ( ++ * VOID ++ * ) ++ * ++ * @brief Start the timers and enable all the threads ++ * ++ * @param NONE ++ * ++ * @return NONE ++ * ++ * Special Notes: ++ * if event multiplexing has been enabled, set up the time slices and ++ * start the timer threads for all the timers ++ */ ++VOID EVENTMUX_Start(void) ++{ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ /* ++ * Start the timer for all cpus ++ */ ++ CONTROL_Invoke_Parallel(eventmux_Start_Timers, NULL); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID EVENTMUX_Initialize ( ++ * VOID ++ * ) ++ * ++ * @brief Initialize the event multiplexing module ++ * ++ * @param NONE ++ * ++ * @return NONE ++ * ++ * Special Notes: ++ * if event multiplexing has been enabled, ++ * then allocate the memory needed to save and restore all the counter data ++ * set up the timers needed, but do not start them ++ */ ++VOID EVENTMUX_Initialize(void) ++{ ++ S32 size_of_vector; ++ S32 cpu_num; ++ CPU_STATE pcpu; ++ U32 dev_idx; ++ EVENT_CONFIG ec; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ for (cpu_num = 0; cpu_num < GLOBAL_STATE_num_cpus(driver_state); ++ cpu_num++) { ++ pcpu = &pcb[cpu_num]; ++ dev_idx = core_to_dev_map[cpu_num]; ++ ec = LWPMU_DEVICE_ec(&devices[dev_idx]); ++ if (EVENT_CONFIG_mode(ec) == EM_DISABLED || ++ EVENT_CONFIG_num_groups(ec) == 1) { ++ continue; ++ } ++ size_of_vector = EVENT_CONFIG_num_groups(ec) * ++ EVENT_CONFIG_max_gp_events(ec) * sizeof(S64); ++ CPU_STATE_em_table_offset(pcpu) = em_tables_size; ++ em_tables_size += size_of_vector; ++ } ++ ++ if (em_tables_size) { ++ em_tables = CONTROL_Allocate_Memory(em_tables_size); ++ } ++ CONTROL_Invoke_Parallel(eventmux_Allocate_Groups, NULL); ++ ++ CONTROL_Invoke_Parallel(eventmux_Prepare_Timer_Threads, ++ (VOID *)(size_t)0); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID EVENTMUX_Destroy ( ++ * VOID ++ * ) ++ * ++ * @brief Clean up the event multiplexing threads ++ * ++ * @param NONE ++ * ++ * @return NONE ++ * ++ * Special Notes: ++ * if event multiplexing has been enabled, then stop and cancel all the timers ++ * free up all the memory that is associated with EM ++ */ ++VOID EVENTMUX_Destroy(void) ++{ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ eventmux_Cancel_Timers(); ++ ++ if (em_tables) { ++ em_tables = CONTROL_Free_Memory(em_tables); ++ em_tables_size = 0; ++ } ++ CONTROL_Invoke_Parallel(eventmux_Deallocate_Groups, (VOID *)(size_t)0); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} +diff --git a/drivers/platform/x86/sepdk/sep/gfx.c b/drivers/platform/x86/sepdk/sep/gfx.c +new file mode 100755 +index 000000000000..38342f6740c4 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/sep/gfx.c +@@ -0,0 +1,261 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#include ++#include ++ ++#include "lwpmudrv_defines.h" ++#include "lwpmudrv_types.h" ++#include "lwpmudrv_ecb.h" ++#include "lwpmudrv_gfx.h" ++#include "lwpmudrv.h" ++#include "inc/pci.h" ++#include "gfx.h" ++#include "utility.h" ++ ++static char *gfx_virtual_addr; ++static SEP_MMIO_NODE gfx_map; ++static U32 gfx_code = GFX_CTRL_DISABLE; ++static U32 gfx_counter[GFX_NUM_COUNTERS]; ++static U32 gfx_overflow[GFX_NUM_COUNTERS]; ++ ++/*! ++ * @fn OS_STATUS GFX_Read ++ * ++ * @brief Reads the counters into the buffer provided for the purpose ++ * ++ * @param buffer - buffer to read the counts into ++ * ++ * @return STATUS_SUCCESS if read succeeded, otherwise error ++ * ++ * @note ++ */ ++OS_STATUS GFX_Read(S8 *buffer) ++{ ++ U64 *samp = (U64 *)buffer; ++ U32 i; ++ U32 val; ++ char *reg_addr; ++ ++ SEP_DRV_LOG_TRACE_IN("Buffer: %p.", buffer); ++ ++ // GFX counting was not specified ++ if (gfx_virtual_addr == NULL || gfx_code == GFX_CTRL_DISABLE) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT( ++ "OS_INVALID (!gfx_virtual_addr || gfx_code==GFX_CTRL_DISABLE)"); ++ return OS_INVALID; ++ } ++ ++ // check for sampling buffer ++ if (!samp) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("OS_INVALID (!samp)."); ++ return OS_INVALID; ++ } ++ ++ // set the GFX register address ++ reg_addr = gfx_virtual_addr + GFX_PERF_REG; ++ ++ // for all counters - save the information to the sampling stream ++ for (i = 0; i < GFX_NUM_COUNTERS; i++) { ++ // read the ith GFX event count ++ reg_addr += 4; ++ val = *(U32 *)(reg_addr); ++#if defined(GFX_COMPUTE_DELTAS) ++ // if the current count is bigger than the previous one, ++ // then the counter overflowed ++ // so make sure the delta gets adjusted to account for it ++ if (val < gfx_counter[i]) { ++ samp[i] = val + (GFX_CTR_OVF_VAL - gfx_counter[i]); ++ } else { ++ samp[i] = val - gfx_counter[i]; ++ } ++#else ++ // just keep track of raw count for this counter ++ // if the current count is bigger than the previous one, ++ // then the counter overflowed ++ if (val < gfx_counter[i]) { ++ gfx_overflow[i]++; ++ } ++ samp[i] = val + (U64)gfx_overflow[i] * GFX_CTR_OVF_VAL; ++#endif ++ // save the current count ++ gfx_counter[i] = val; ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT("OS_SUCCESS."); ++ return OS_SUCCESS; ++} ++ ++/*! ++ * @fn OS_STATUS GFX_Set_Event_Code ++ * ++ * @brief Programs the Graphics PMU with the right event code ++ * ++ * @param arg - buffer containing graphics event code ++ * ++ * @return STATUS_SUCCESS if success, otherwise error ++ * ++ * @note ++ */ ++OS_STATUS GFX_Set_Event_Code(IOCTL_ARGS arg) ++{ ++ U32 i; ++ char *reg_addr; ++ U32 reg_value; ++ ++ SEP_DRV_LOG_FLOW_IN("Arg: %p.", arg); ++ ++ // extract the graphics event code from usermode ++ if (get_user(gfx_code, (int __user *)arg->buf_usr_to_drv)) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "OS_FAULT (Unable to obtain gfx_code from usermode!)."); ++ return OS_FAULT; ++ } ++ SEP_DRV_LOG_TRACE("Got gfx_code=0x%x.", gfx_code); ++ ++ // memory map the address to GFX counters, if not already done ++ if (gfx_virtual_addr == NULL) { ++ PCI_Map_Memory(&gfx_map, GFX_BASE_ADDRESS + GFX_BASE_NEW_OFFSET, ++ PAGE_SIZE); ++ gfx_virtual_addr = ++ (char *)(UIOP)SEP_MMIO_NODE_virtual_address(&gfx_map); ++ } ++ ++ // initialize the GFX counts ++ for (i = 0; i < GFX_NUM_COUNTERS; i++) { ++ gfx_counter[i] = 0; ++ gfx_overflow[i] = 0; ++ // only used if storing raw counts ++ // (i.e., GFX_COMPUTE_DELTAS is undefined) ++ } ++ ++ // get current GFX event code ++ if (gfx_virtual_addr == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "OS_INVALID (Invalid gfx_virtual_addr=0x%p!).", ++ gfx_virtual_addr); ++ return OS_INVALID; ++ } ++ ++ reg_addr = gfx_virtual_addr + GFX_PERF_REG; ++ reg_value = *(U32 *)(reg_addr); ++ SEP_DRV_LOG_TRACE("Read reg_value=0x%x from reg_addr=0x%p.", reg_value, ++ reg_addr); ++ ++ /* Update the GFX counter group */ ++ // write the GFX counter group with reset = 1 for all counters ++ reg_value = (gfx_code | GFX_REG_CTR_CTRL); ++ *(U32 *)(reg_addr) = reg_value; ++ SEP_DRV_LOG_TRACE("Wrote reg_value=0x%x to reg_addr=0x%p.", reg_value, ++ reg_addr); ++ ++ SEP_DRV_LOG_FLOW_OUT("OS_SUCCESS."); ++ return OS_SUCCESS; ++} ++ ++/*! ++ * @fn OS_STATUS GFX_Start ++ * ++ * @brief Starts the count of the Graphics PMU ++ * ++ * @param NONE ++ * ++ * @return OS_SUCCESS if success, otherwise error ++ * ++ * @note ++ */ ++OS_STATUS GFX_Start(void) ++{ ++ U32 reg_value; ++ char *reg_addr; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ // GFX counting was not specified ++ if (gfx_virtual_addr == NULL || gfx_code == GFX_CTRL_DISABLE) { ++ SEP_DRV_LOG_ERROR( ++ "Invalid gfx_virtual_addr=0x%p or gfx_code=0x%x.", ++ gfx_virtual_addr, gfx_code); ++ SEP_DRV_LOG_TRACE_OUT("OS_INVALID."); ++ return OS_INVALID; ++ } ++ ++ // turn on GFX counters as per event code ++ reg_addr = gfx_virtual_addr + GFX_PERF_REG; ++ *(U32 *)(reg_addr) = gfx_code; ++ ++ // verify event code was written properly ++ reg_value = *(U32 *)reg_addr; ++ if (reg_value != gfx_code) { ++ SEP_DRV_LOG_ERROR("Got register value 0x%x, expected 0x%x.", ++ reg_value, gfx_code); ++ SEP_DRV_LOG_TRACE_OUT("OS_INVALID."); ++ return OS_INVALID; ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT("OS_SUCCESS."); ++ return OS_SUCCESS; ++} ++ ++/*! ++ * @fn OS_STATUS GFX_Stop ++ * ++ * @brief Stops the count of the Graphics PMU ++ * ++ * @param NONE ++ * ++ * @return OS_SUCCESS if success, otherwise error ++ * ++ * @note ++ */ ++OS_STATUS GFX_Stop(void) ++{ ++ char *reg_addr; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ // GFX counting was not specified ++ if (gfx_virtual_addr == NULL || gfx_code == GFX_CTRL_DISABLE) { ++ SEP_DRV_LOG_ERROR( ++ "Invalid gfx_virtual_addr=0x%p or gfx_code=0x%x.", ++ gfx_virtual_addr, gfx_code); ++ SEP_DRV_LOG_TRACE_OUT("OS_INVALID."); ++ return OS_INVALID; ++ } ++ ++ // turn off GFX counters ++ reg_addr = gfx_virtual_addr + GFX_PERF_REG; ++ *(U32 *)(reg_addr) = GFX_CTRL_DISABLE; ++ ++ // unmap the memory mapped virtual address ++ PCI_Unmap_Memory(&gfx_map); ++ gfx_virtual_addr = NULL; ++ ++ // reset the GFX global variables ++ gfx_code = GFX_CTRL_DISABLE; ++ ++ SEP_DRV_LOG_TRACE_OUT("OS_SUCCESS."); ++ return OS_SUCCESS; ++} +diff --git a/drivers/platform/x86/sepdk/sep/gmch.c b/drivers/platform/x86/sepdk/sep/gmch.c +new file mode 100755 +index 000000000000..41b9ee8b67a5 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/sep/gmch.c +@@ -0,0 +1,505 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#include ++#include ++#include ++#include ++#include "lwpmudrv_defines.h" ++#include "lwpmudrv_types.h" ++ ++#if defined(PCI_HELPERS_API) ++#include ++#include ++#endif ++ ++#include "rise_errors.h" ++#include "lwpmudrv_ecb.h" ++#include "lwpmudrv_struct.h" ++#include "lwpmudrv_chipset.h" ++#include "inc/lwpmudrv.h" ++#include "inc/control.h" ++#include "inc/utility.h" ++#include "inc/gmch.h" ++#include "inc/pci.h" ++ ++// global variables for determining which register offsets to use ++static U32 gmch_register_read; // value=0 indicates invalid read register ++static U32 gmch_register_write; // value=0 indicates invalid write register ++static U32 number_of_events; ++ ++//global variable for reading GMCH counter values ++static U64 *gmch_current_data; ++static U64 *gmch_to_read_data; ++ ++// global variable for tracking number of overflows per GMCH counter ++static U32 gmch_overflow[MAX_CHIPSET_COUNTERS]; ++static U64 last_gmch_count[MAX_CHIPSET_COUNTERS]; ++ ++extern DRV_CONFIG drv_cfg; ++extern CHIPSET_CONFIG pma; ++extern CPU_STATE pcb; ++ ++/* ++ * @fn gmch_PCI_Read32(address) ++ * ++ * @brief Read the 32bit value specified by the address ++ * ++ * @return the read value ++ * ++ */ ++#if defined(PCI_HELPERS_API) ++#define gmch_PCI_Read32 intel_mid_msgbus_read32_raw ++#else ++static U32 gmch_PCI_Read32(unsigned long address) ++{ ++ U32 read_value = 0; ++ ++ SEP_DRV_LOG_TRACE_IN("Address: %lx.", address); ++ ++ PCI_Write_U32(0, 0, 0, GMCH_MSG_CTRL_REG, (U32)address); ++ read_value = PCI_Read_U32(0, 0, 0, GMCH_MSG_DATA_REG); ++ ++ SEP_DRV_LOG_TRACE_OUT("Res: %x.", read_value); ++ return read_value; ++} ++#endif ++ ++/* ++ * @fn gmch_PCI_Write32(address, data) ++ * ++ * @brief Write the 32bit value into the address specified ++ * ++ * @return None ++ * ++ */ ++#if defined(PCI_HELPERS_API) ++#define gmch_PCI_Write32 intel_mid_msgbus_write32_raw ++#else ++static void gmch_PCI_Write32(unsigned long address, unsigned long data) ++{ ++ SEP_DRV_LOG_TRACE_IN("Address: %lx, data: %lx.", address, data); ++ ++ PCI_Write_U32(0, 0, 0, GMCH_MSG_DATA_REG, data); ++ PCI_Write_U32(0, 0, 0, GMCH_MSG_CTRL_REG, address); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++#endif ++ ++/* ++ * @fn gmch_Check_Enabled() ++ * ++ * @brief Read GMCH PMON capabilities ++ * ++ * @param None ++ * ++ * @return GMCH enable bits ++ * ++ */ ++static ULONG gmch_Check_Enabled(void) ++{ ++ ULONG enabled_value; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ enabled_value = ++ gmch_PCI_Read32(GMCH_PMON_CAPABILITIES + gmch_register_read); ++ ++ SEP_DRV_LOG_TRACE_OUT("Res: %lx.", enabled_value); ++ return enabled_value; ++} ++ ++/* ++ * @fn gmch_Init_Chipset() ++ * ++ * @brief Initialize GMCH Counters. See note below. ++ * ++ * @param None ++ * ++ * @note This function must be called BEFORE any other function ++ * in this file! ++ * ++ * @return VT_SUCCESS if successful, error otherwise ++ * ++ */ ++static U32 gmch_Init_Chipset(void) ++{ ++ int i; ++ CHIPSET_SEGMENT cs; ++ CHIPSET_SEGMENT gmch_chipset_seg; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ cs = &CHIPSET_CONFIG_gmch(pma); ++ gmch_chipset_seg = &CHIPSET_CONFIG_gmch(pma); ++ ++ // configure read/write registers offsets according to usermode setting ++ if (cs) { ++ gmch_register_read = CHIPSET_SEGMENT_read_register(cs); ++ gmch_register_write = CHIPSET_SEGMENT_write_register(cs); ++ ; ++ } ++ if (gmch_register_read == 0 || gmch_register_write == 0) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT( ++ "VT_CHIPSET_CONFIG_FAILED(Invalid GMCH read/write registers!)"); ++ return VT_CHIPSET_CONFIG_FAILED; ++ } ++ ++ number_of_events = CHIPSET_SEGMENT_total_events(gmch_chipset_seg); ++ SEP_DRV_LOG_INIT("Number of chipset events %d.", number_of_events); ++ ++ // Allocate memory for reading GMCH counter values + the group id ++ gmch_current_data = ++ CONTROL_Allocate_Memory((number_of_events + 1) * sizeof(U64)); ++ if (!gmch_current_data) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("OS_NO_MEM (!gmch_current_data)."); ++ return OS_NO_MEM; ++ } ++ gmch_to_read_data = ++ CONTROL_Allocate_Memory((number_of_events + 1) * sizeof(U64)); ++ if (!gmch_to_read_data) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("OS_NO_MEM (!gmch_to_read_data)."); ++ return OS_NO_MEM; ++ } ++ ++ if (!DRV_CONFIG_enable_chipset(drv_cfg)) { ++ SEP_DRV_LOG_TRACE_OUT( ++ "VT_SUCCESS (!DRV_CONFIG_enable_chipset(drv_cfg))."); ++ return VT_SUCCESS; ++ } ++ ++ if (!CHIPSET_CONFIG_gmch_chipset(pma)) { ++ SEP_DRV_LOG_TRACE_OUT( ++ "VT_SUCCESS (!CHIPSET_CONFIG_gmch_chipset(drv_cfg))."); ++ return VT_SUCCESS; ++ } ++ // initialize the GMCH per-counter overflow numbers ++ for (i = 0; i < MAX_CHIPSET_COUNTERS; i++) { ++ gmch_overflow[i] = 0; ++ last_gmch_count[i] = 0; ++ } ++ ++ // disable fixed and GP counters ++ gmch_PCI_Write32(GMCH_PMON_GLOBAL_CTRL + gmch_register_write, ++ 0x00000000); ++ // clear fixed counter filter ++ gmch_PCI_Write32(GMCH_PMON_FIXED_CTR_CTRL + gmch_register_write, ++ 0x00000000); ++ ++ SEP_DRV_LOG_TRACE_OUT("VT_SUCCESS."); ++ return VT_SUCCESS; ++} ++ ++/* ++ * @fn gmch_Start_Counters() ++ * ++ * @brief Start the GMCH Counters. ++ * ++ * @param None ++ * ++ * @return None ++ * ++ */ ++static VOID gmch_Start_Counters(void) ++{ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ // reset and start chipset counters ++ if (pma == NULL) { ++ SEP_DRV_LOG_ERROR("gmch_Start_Counters: ERROR pma=NULL."); ++ } ++ ++ // enable fixed and GP counters ++ gmch_PCI_Write32(GMCH_PMON_GLOBAL_CTRL + gmch_register_write, ++ 0x0001000F); ++ // enable fixed counter filter ++ gmch_PCI_Write32(GMCH_PMON_FIXED_CTR_CTRL + gmch_register_write, ++ 0x00000001); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ++ * @fn gmch_Trigger_Read() ++ * ++ * @brief Read the GMCH counters through PCI Config space ++ * ++ * @return None ++ * ++ */ ++static VOID gmch_Trigger_Read(void) ++{ ++ U64 *data; ++ int i, data_index; ++ U64 val; ++ U64 *gmch_data; ++ U32 counter_data_low; ++ U32 counter_data_high; ++ U64 counter_data; ++ U64 cmd_register_low_read; ++ U64 cmd_register_high_read; ++ U32 gp_counter_index = 0; ++ U64 overflow; ++ U32 cur_driver_state; ++ ++ CHIPSET_SEGMENT gmch_chipset_seg; ++ CHIPSET_EVENT chipset_events; ++ U64 *temp; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ cur_driver_state = GET_DRIVER_STATE(); ++ ++ if (!IS_COLLECTING_STATE(cur_driver_state)) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("Invalid driver state!"); ++ return; ++ } ++ ++ if (pma == NULL) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("pma is NULL!"); ++ return; ++ } ++ ++ if (gmch_current_data == NULL) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("gmch_current_data is NULL!"); ++ return; ++ } ++ ++ if (CHIPSET_CONFIG_gmch_chipset(pma) == 0) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT( ++ "CHIPSET_CONFIG_gmch_chipset(pma) is NULL!"); ++ return; ++ } ++ ++ data = gmch_current_data; ++ data_index = 0; ++ ++ preempt_disable(); ++ SYS_Local_Irq_Disable(); ++ gmch_chipset_seg = &CHIPSET_CONFIG_gmch(pma); ++ chipset_events = CHIPSET_SEGMENT_events(gmch_chipset_seg); ++ ++ // Write GroupID ++ data[data_index] = 1; ++ // Increment the data index as the event id starts from zero ++ data_index++; ++ ++ // GMCH data will be written as gmch_data[0], gmch_data[1], ... ++ gmch_data = data + data_index; ++ ++ // read the GMCH counters and add them into the sample record ++ ++ // iterate through GMCH counters configured to collect on events ++ for (i = 0; i < CHIPSET_SEGMENT_total_events(gmch_chipset_seg); i++) { ++ U32 event_id = CHIPSET_EVENT_event_id(&chipset_events[i]); ++ // read count for fixed GMCH counter event ++ if (event_id == 0) { ++ cmd_register_low_read = ++ GMCH_PMON_FIXED_CTR0 + gmch_register_read; ++ data[data_index++] = ++ (U64)gmch_PCI_Read32(cmd_register_low_read); ++ overflow = GMCH_PMON_FIXED_CTR_OVF_VAL; ++ } else { ++ // read count for general GMCH counter event ++ switch (gp_counter_index) { ++ case 0: ++ default: ++ cmd_register_low_read = GMCH_PMON_GP_CTR0_L + ++ gmch_register_read; ++ cmd_register_high_read = GMCH_PMON_GP_CTR0_H + ++ gmch_register_read; ++ break; ++ ++ case 1: ++ cmd_register_low_read = GMCH_PMON_GP_CTR1_L + ++ gmch_register_read; ++ cmd_register_high_read = GMCH_PMON_GP_CTR1_H + ++ gmch_register_read; ++ break; ++ ++ case 2: ++ cmd_register_low_read = GMCH_PMON_GP_CTR2_L + ++ gmch_register_read; ++ cmd_register_high_read = GMCH_PMON_GP_CTR2_H + ++ gmch_register_read; ++ break; ++ ++ case 3: ++ cmd_register_low_read = GMCH_PMON_GP_CTR3_L + ++ gmch_register_read; ++ cmd_register_high_read = GMCH_PMON_GP_CTR3_H + ++ gmch_register_read; ++ break; ++ } ++ counter_data_low = ++ gmch_PCI_Read32(cmd_register_low_read); ++ counter_data_high = ++ gmch_PCI_Read32(cmd_register_high_read); ++ counter_data = (U64)counter_data_high; ++ data[data_index++] = ++ (counter_data << 32) + counter_data_low; ++ overflow = GMCH_PMON_GP_CTR_OVF_VAL; ++ gp_counter_index++; ++ } ++ ++ /* Compute the running count of the event. */ ++ gmch_data[i] &= overflow; ++ val = gmch_data[i]; ++ if (gmch_data[i] < last_gmch_count[i]) { ++ gmch_overflow[i]++; ++ } ++ gmch_data[i] = gmch_data[i] + gmch_overflow[i] * overflow; ++ last_gmch_count[i] = val; ++ } ++ ++ temp = gmch_to_read_data; ++ gmch_to_read_data = gmch_current_data; ++ gmch_current_data = temp; ++ SYS_Local_Irq_Enable(); ++ preempt_enable(); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ++ * @fn gmch_Read_Counters() ++ * ++ * @brief Copy the GMCH data to the sampling data stream. ++ * ++ * @param param - pointer to data stream where samples are to be written ++ * ++ * @return None ++ * ++ */ ++static VOID gmch_Read_Counters(PVOID param) ++{ ++ U64 *data; ++ int i; ++ U32 cur_driver_state; ++ ++ SEP_DRV_LOG_TRACE_IN("Param: %p.", param); ++ ++ cur_driver_state = GET_DRIVER_STATE(); ++ ++ if (!IS_COLLECTING_STATE(cur_driver_state)) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("Invalid driver state!"); ++ return; ++ } ++ ++ if (pma == NULL) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("pma is NULL!"); ++ return; ++ } ++ ++ if (param == NULL) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("param is NULL!"); ++ return; ++ } ++ ++ if (gmch_to_read_data == NULL) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("gmch_to_read_data is NULL!"); ++ return; ++ } ++ ++ /* ++ * Account for the group id that is placed at start of chipset array ++ * Number of data elements to be transferred is number_of_events + 1. ++ */ ++ data = param; ++ for (i = 0; i < number_of_events + 1; i++) { ++ data[i] = gmch_to_read_data[i]; ++ SEP_DRV_LOG_TRACE( ++ "Interrupt gmch read counters data %d is: 0x%llx.", i, ++ data[i]); ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ++ * @fn gmch_Stop_Counters() ++ * ++ * @brief Stop the GMCH counters ++ * ++ * @param None ++ * ++ * @return None ++ * ++ */ ++static VOID gmch_Stop_Counters(void) ++{ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ // stop and reset the chipset counters ++ number_of_events = 0; ++ if (pma == NULL) { ++ SEP_DRV_LOG_ERROR("gmch_Stop_Counters: pma=NULL."); ++ } ++ ++ // disable fixed and GP counters ++ gmch_PCI_Write32(GMCH_PMON_GLOBAL_CTRL + gmch_register_write, ++ 0x00000000); ++ gmch_PCI_Write32(GMCH_PMON_FIXED_CTR_CTRL + gmch_register_write, ++ 0x00000000); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ++ * @fn gmch_Fini_Chipset() ++ * ++ * @brief Reset GMCH to state where it can be used again. ++ * Called at cleanup phase. ++ * ++ * @param None ++ * ++ * @return None ++ * ++ */ ++static VOID gmch_Fini_Chipset(void) ++{ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ if (!gmch_Check_Enabled()) { ++ SEP_DRV_LOG_WARNING("GMCH is not enabled!"); ++ } ++ ++ gmch_current_data = CONTROL_Free_Memory(gmch_current_data); ++ gmch_to_read_data = CONTROL_Free_Memory(gmch_to_read_data); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++// ++// Initialize the GMCH chipset dispatch table ++// ++ ++CS_DISPATCH_NODE gmch_dispatch = { .init_chipset = gmch_Init_Chipset, ++ .start_chipset = gmch_Start_Counters, ++ .read_counters = gmch_Read_Counters, ++ .stop_chipset = gmch_Stop_Counters, ++ .fini_chipset = gmch_Fini_Chipset, ++ .Trigger_Read = gmch_Trigger_Read }; +diff --git a/drivers/platform/x86/sepdk/sep/linuxos.c b/drivers/platform/x86/sepdk/sep/linuxos.c +new file mode 100755 +index 000000000000..08da10e614d8 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/sep/linuxos.c +@@ -0,0 +1,1477 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#include "lwpmudrv_defines.h" ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) ++#include ++#include ++#else ++#include ++#endif ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) ++#include ++#endif ++#include ++#include ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) ++#include ++#endif ++ ++#include "lwpmudrv_types.h" ++#include "lwpmudrv_ecb.h" ++#include "lwpmudrv_struct.h" ++#include "inc/lwpmudrv.h" ++#include "inc/control.h" ++#include "inc/utility.h" ++#include "inc/cpumon.h" ++#include "inc/output.h" ++#include "inc/pebs.h" ++ ++#include "inc/linuxos.h" ++#include "inc/apic.h" ++ ++#include ++#include ++ ++ ++extern DRV_BOOL multi_pebs_enabled; ++extern DRV_BOOL sched_switch_enabled; ++extern uid_t uid; ++extern volatile pid_t control_pid; ++static volatile S32 hooks_installed; ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) ++static struct tracepoint *tp_sched_switch; ++#endif ++ ++#define HOOK_FREE 0 ++#define HOOK_UNINSTALL -10000 ++static atomic_t hook_state = ATOMIC_INIT(HOOK_UNINSTALL); ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) && defined(DRV_CPU_HOTPLUG) ++static enum cpuhp_state cpuhp_sepdrv_state; ++#endif ++extern wait_queue_head_t wait_exit; ++ ++static PVOID local_tasklist_lock; ++ ++#define MY_TASK PROFILE_TASK_EXIT ++#define MY_UNMAP PROFILE_MUNMAP ++#ifdef CONFIG_X86_64 ++#define MR_SEG_NUM 0 ++#else ++#define MR_SEG_NUM 2 ++#endif ++ ++#if !defined(KERNEL_IMAGE_SIZE) ++#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024) ++#endif ++ ++#if defined(DRV_IA32) ++static U16 linuxos_Get_Exec_Mode(struct task_struct *p) ++{ ++ return (unsigned short)MODE_32BIT; ++} ++#endif ++ ++#if defined(DRV_EM64T) ++static U16 linuxos_Get_Exec_Mode(struct task_struct *p) ++{ ++ SEP_DRV_LOG_TRACE_IN("P: %p.", p); ++ ++ if (!p) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("MODE_UNKNOWN (p is NULL!)."); ++ return MODE_UNKNOWN; ++ } ++ ++ if (test_tsk_thread_flag(p, TIF_IA32)) { ++ SEP_DRV_LOG_TRACE_OUT( ++ "Res: %u (test_tsk_thread_flag TIF_IA32).", ++ (U16)(unsigned short)MODE_32BIT); ++ return (unsigned short)MODE_32BIT; ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT("Res: %u.", (U16)(unsigned short)MODE_64BIT); ++ return (unsigned short)MODE_64BIT; ++} ++#endif ++ ++static S32 linuxos_Load_Image_Notify_Routine(char *name, U64 base, U32 size, ++ U64 page_offset, U32 pid, ++ U32 parent_pid, U32 options, ++ unsigned short mode, ++ S32 load_event, U32 segment_num, ++ U32 kernel_modules, U32 osid) ++{ ++ char *raw_path; ++ ModuleRecord *mra; ++ char buf[sizeof(ModuleRecord) + MAXNAMELEN + 32]; ++ U64 tsc_read; ++ S32 local_load_event = (load_event == -1) ? 0 : load_event; ++ U64 page_offset_shift; ++ ++ SEP_DRV_LOG_NOTIFICATION_TRACE_IN(load_event == 1, ++ "Name: '%s', pid: %d.", name, pid); ++ ++ mra = (ModuleRecord *)buf; ++ memset(mra, '\0', sizeof(buf)); ++ raw_path = (char *)mra + sizeof(ModuleRecord); ++ ++ page_offset_shift = page_offset << PAGE_SHIFT; ++ MR_page_offset_Set(mra, page_offset_shift); ++ MODULE_RECORD_segment_type(mra) = mode; ++ MODULE_RECORD_load_addr64(mra) = (U64)(size_t)base; ++ MODULE_RECORD_length64(mra) = size; ++ MODULE_RECORD_tsc_used(mra) = 1; ++ MODULE_RECORD_first_module_rec_in_process(mra) = ++ options & LOPTS_1ST_MODREC; ++ MODULE_RECORD_segment_number(mra) = segment_num; ++ MODULE_RECORD_exe(mra) = (LOPTS_EXE & options) ? 1 : 0; ++ MODULE_RECORD_global_module_tb5(mra) = ++ (options & LOPTS_GLOBAL_MODULE) ? 1 : 0; ++ MODULE_RECORD_global_module(mra) = ++ (options & LOPTS_GLOBAL_MODULE) ? 1 : 0; ++ MODULE_RECORD_processed(mra) = 0; ++ MODULE_RECORD_parent_pid(mra) = parent_pid; ++ MODULE_RECORD_osid(mra) = osid; ++ MODULE_RECORD_pid_rec_index(mra) = pid; ++ ++ if (kernel_modules) { ++ MODULE_RECORD_tsc(mra) = 0; ++ MR_unloadTscSet(mra, (U64)(0xffffffffffffffffLL)); ++ } else { ++ UTILITY_Read_TSC(&tsc_read); ++ preempt_disable(); ++ tsc_read -= TSC_SKEW(CONTROL_THIS_CPU()); ++ preempt_enable(); ++ ++ if (local_load_event) { ++ MR_unloadTscSet(mra, tsc_read); ++ } else { ++ MR_unloadTscSet(mra, (U64)(-1)); ++ } ++ } ++ ++ MODULE_RECORD_pid_rec_index_raw(mra) = 1; // raw pid ++#if defined(DEBUG) ++ if (total_loads_init) { ++ SEP_DRV_LOG_NOTIFICATION_TRACE( ++ load_event == 1, ++ "Setting pid_rec_index_raw pid 0x%x %s.", pid, name); ++ } ++#endif ++ ++ strncpy(raw_path, name, MAXNAMELEN); ++ raw_path[MAXNAMELEN] = 0; ++ MODULE_RECORD_path_length(mra) = (U16)strlen(raw_path) + 1; ++ MODULE_RECORD_rec_length(mra) = (U16)ALIGN_8( ++ sizeof(ModuleRecord) + MODULE_RECORD_path_length(mra)); ++ ++#if defined(DRV_IA32) ++ MODULE_RECORD_selector(mra) = (pid == 0) ? __KERNEL_CS : __USER_CS; ++#endif ++#if defined(DRV_EM64T) ++ if (mode == MODE_64BIT) { ++ MODULE_RECORD_selector(mra) = ++ (pid == 0) ? __KERNEL_CS : __USER_CS; ++ } else if (mode == MODE_32BIT) { ++ MODULE_RECORD_selector(mra) = ++ (pid == 0) ? __KERNEL32_CS : __USER32_CS; ++ } ++#endif ++ ++ OUTPUT_Module_Fill((PVOID)mra, MODULE_RECORD_rec_length(mra), ++ load_event == 1); ++ ++ SEP_DRV_LOG_NOTIFICATION_TRACE_OUT(load_event == 1, "OS_SUCCESS"); ++ return OS_SUCCESS; ++} ++ ++#ifdef DRV_MM_EXE_FILE_PRESENT ++static DRV_BOOL linuxos_Equal_VM_Exe_File(struct vm_area_struct *vma) ++{ ++ S8 name_vm_file[MAXNAMELEN]; ++ S8 name_exe_file[MAXNAMELEN]; ++ S8 *pname_vm_file = NULL; ++ S8 *pname_exe_file = NULL; ++ DRV_BOOL res; ++ ++ SEP_DRV_LOG_TRACE_IN("FMA: %p.", vma); ++ ++ if (vma == NULL) { ++ SEP_DRV_LOG_TRACE_OUT("FALSE (!vma)."); ++ return FALSE; ++ } ++ ++ if (vma->vm_file == NULL) { ++ SEP_DRV_LOG_TRACE_OUT("FALSE (!vma->vm_file)."); ++ return FALSE; ++ } ++ ++ if (vma->vm_mm->exe_file == NULL) { ++ SEP_DRV_LOG_TRACE_OUT("FALSE (!vma->vm_mm->exe_file)."); ++ return FALSE; ++ } ++ ++ pname_vm_file = D_PATH(vma->vm_file, ++ name_vm_file, MAXNAMELEN); ++ pname_exe_file = D_PATH(vma->vm_mm->exe_file, ++ name_exe_file, MAXNAMELEN); ++ res = strcmp(pname_vm_file, pname_exe_file) == 0; ++ ++ SEP_DRV_LOG_TRACE_OUT("Res: %u.", res); ++ return res; ++} ++#endif ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn linuxos_Map_Kernel_Modules (void) ++ * ++ * @brief Obtain kernel module details from modules list ++ * and map the details to the module record. ++ * ++ * @return S32 VT_SUCCESS on success ++ */ ++static S32 linuxos_Map_Kernel_Modules(void) ++{ ++ struct module *current_module; ++ struct list_head *modules; ++ U16 exec_mode; ++ unsigned long long addr; ++ unsigned long long size; ++#if defined(CONFIG_RANDOMIZE_BASE) ++ unsigned long dyn_addr = 0; ++#endif ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++#if defined(CONFIG_MODULES) ++ mutex_lock(&module_mutex); ++ ++#if defined(DRV_EM64T) ++ addr = (unsigned long)__START_KERNEL_map; ++ exec_mode = MODE_64BIT; ++#elif defined(DRV_IA32) ++ addr = (unsigned long)PAGE_OFFSET; ++ exec_mode = MODE_32BIT; ++#else ++ exec_mode = MODE_UNKNOWN; ++#endif ++ ++ SEP_DRV_LOG_TRACE( ++ " kernel module address size"); ++ SEP_DRV_LOG_TRACE( ++ " ------------------- ------------------ -------"); ++ ++ addr += (CONFIG_PHYSICAL_START + (CONFIG_PHYSICAL_ALIGN - 1)) & ++ ~(CONFIG_PHYSICAL_ALIGN - 1); ++ size = (unsigned long)KERNEL_IMAGE_SIZE - ++ ((CONFIG_PHYSICAL_START + (CONFIG_PHYSICAL_ALIGN - 1)) & ++ ~(CONFIG_PHYSICAL_ALIGN - 1)) - ++ 1; ++ ++#if defined(CONFIG_RANDOMIZE_BASE) ++ if (!dyn_addr) { ++ dyn_addr = (unsigned long)UTILITY_Find_Symbol("_text"); ++ if (!dyn_addr) { ++ dyn_addr = (unsigned long)UTILITY_Find_Symbol("_stext"); ++ } ++ ++ if (dyn_addr && dyn_addr > addr) { ++ dyn_addr &= ~(PAGE_SIZE - 1); ++ size -= (dyn_addr - addr); ++ addr = dyn_addr; ++ } else { ++ SEP_DRV_LOG_WARNING_TRACE_OUT( ++ "Could not find the kernel start address!"); ++ } ++ } ++#endif ++ ++ linuxos_Load_Image_Notify_Routine( ++ "vmlinux", addr, size, 0, 0, 0, ++ LOPTS_1ST_MODREC | LOPTS_GLOBAL_MODULE | LOPTS_EXE, exec_mode, ++ -1, MR_SEG_NUM, 1, OS_ID_NATIVE); ++ ++ SEP_DRV_LOG_TRACE("kmodule: %20s 0x%llx 0x%llx.", "vmlinux", addr, ++ size); ++ ++#if defined(DRV_SEP_ACRN_ON) ++ linuxos_Load_Image_Notify_Routine( ++ "VMM", 0x0, (U32)0xffffffffffffffffLL, 0, 0, 0, ++ LOPTS_1ST_MODREC | LOPTS_GLOBAL_MODULE | LOPTS_EXE, exec_mode, ++ -1, MR_SEG_NUM, 1, OS_ID_ACORN); ++#endif ++ ++ for (modules = (struct list_head *)(THIS_MODULE->list.prev); ++ (unsigned long)modules > MODULES_VADDR; modules = modules->prev) ++ ; ++ list_for_each_entry (current_module, modules, list) { ++ char *name = current_module->name; ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) || \ ++ defined(SEP_CONFIG_MODULE_LAYOUT) ++ addr = (unsigned long)current_module->core_layout.base; ++ size = current_module->core_layout.size; ++#else ++ addr = (unsigned long)current_module->module_core; ++ size = current_module->core_size; ++#endif ++ ++ if (module_is_live(current_module)) { ++ SEP_DRV_LOG_TRACE("kmodule: %20s 0x%llx 0x%llx.", ++ name, addr, size); ++ linuxos_Load_Image_Notify_Routine( ++ name, addr, size, 0, 0, 0, LOPTS_GLOBAL_MODULE, ++ exec_mode, -1, 0, 1, OS_ID_NATIVE); ++ } ++ } ++ ++ mutex_unlock(&module_mutex); ++#endif ++ SEP_DRV_LOG_TRACE_OUT("OS_SUCCESS"); ++ return OS_SUCCESS; ++} ++ ++// ++// Register the module for a process. The task_struct and mm ++// should be locked if necessary to make sure they don't change while we're ++// iterating... ++// Used as a service routine ++// ++static S32 linuxos_VMA_For_Process(struct task_struct *p, ++ struct vm_area_struct *vma, S32 load_event, ++ U32 *first) ++{ ++ U32 options = 0; ++ S8 name[MAXNAMELEN]; ++ S8 *pname = NULL; ++ U32 ppid = 0; ++ U16 exec_mode; ++ U64 page_offset = 0; ++ ++#if defined(DRV_ANDROID) ++ char andr_app[TASK_COMM_LEN]; ++#endif ++ ++ SEP_DRV_LOG_NOTIFICATION_TRACE_IN( ++ load_event == 1, "P = %p, vma = %p, load_event: %d, first: %p.", ++ p, vma, load_event, first); ++ ++ if (p == NULL) { ++ SEP_DRV_LOG_NOTIFICATION_ERROR(load_event == 1, ++ "Skipped p=NULL."); ++ SEP_DRV_LOG_NOTIFICATION_TRACE_OUT(load_event == 1, ++ "OS_SUCCESS (!p)."); ++ return OS_SUCCESS; ++ } ++ ++ if (vma->vm_file) ++ pname = D_PATH(vma->vm_file, name, MAXNAMELEN); ++ ++ page_offset = vma->vm_pgoff; ++ ++ if (!IS_ERR(pname) && pname != NULL) { ++ SEP_DRV_LOG_NOTIFICATION_TRACE(load_event == 1, ++ "enum: %s, %d, %lx, %lx %llu.", ++ pname, p->pid, vma->vm_start, ++ (vma->vm_end - vma->vm_start), ++ page_offset); ++ ++ // if the VM_EXECUTABLE flag is set then this is the module ++ // that is being used to name the module ++ if (DRV_VM_MOD_EXECUTABLE(vma)) { ++ options |= LOPTS_EXE; ++#if defined(DRV_ANDROID) ++ if (!strcmp(pname, "/system/bin/app_process") || ++ !strcmp(pname, "/system/bin/app_process32") || ++ !strcmp(pname, "/system/bin/app_process64")) { ++ memset(andr_app, '\0', TASK_COMM_LEN); ++ strncpy(andr_app, p->comm, TASK_COMM_LEN); ++ pname = andr_app; ++ } ++#endif ++ } ++ // mark the first of the bunch... ++ if (*first == 1) { ++ options |= LOPTS_1ST_MODREC; ++ *first = 0; ++ } ++ } ++#if defined(DRV_ALLOW_VDSO) ++ else if ((vma->vm_mm != NULL) && ++ vma->vm_start == (long)vma->vm_mm->context.vdso) { ++ pname = "[vdso]"; ++ } ++#endif ++#if defined(DRV_ALLOW_SYSCALL) ++ else if (vma->vm_start == VSYSCALL_START) { ++ pname = "[vsyscall]"; ++ } ++#endif ++ ++ if (pname != NULL) { ++ options = 0; ++ if (DRV_VM_MOD_EXECUTABLE(vma)) { ++ options |= LOPTS_EXE; ++ } ++ ++ if (p && p->parent) { ++ ppid = p->parent->tgid; ++ } ++ exec_mode = linuxos_Get_Exec_Mode(p); ++ // record this module ++ linuxos_Load_Image_Notify_Routine( ++ pname, vma->vm_start, (vma->vm_end - vma->vm_start), ++ page_offset, p->pid, ppid, options, exec_mode, ++ load_event, 1, 0, OS_ID_NATIVE); ++ } ++ ++ SEP_DRV_LOG_NOTIFICATION_TRACE_OUT(load_event == 1, "OS_SUCCESS."); ++ return OS_SUCCESS; ++} ++ ++// ++// Common loop to enumerate all modules for a process. The task_struct and mm ++// should be locked if necessary to make sure they don't change while we're ++// iterating... ++// ++static S32 linuxos_Enum_Modules_For_Process(struct task_struct *p, ++ struct mm_struct *mm, ++ S32 load_event) ++{ ++ struct vm_area_struct *mmap; ++ U32 first = 1; ++ ++#if defined(SECURE_SEP) ++ uid_t l_uid; ++#endif ++ ++ SEP_DRV_LOG_NOTIFICATION_TRACE_IN(load_event == 1, ++ "P: %p, mm: %p, load_event: %d.", p, ++ mm, load_event); ++ ++#if defined(SECURE_SEP) ++ l_uid = DRV_GET_UID(p); ++ /* ++ * Check for: same uid, or root uid ++ */ ++ if (l_uid != uid && l_uid != 0) { ++ SEP_DRV_LOG_NOTIFICATION_TRACE_OUT( ++ load_event == 1, ++ "OS_SUCCESS (secure_sep && l_uid != uid && l_uid != 0)."); ++ return OS_SUCCESS; ++ } ++#endif ++ for (mmap = mm->mmap; mmap; mmap = mmap->vm_next) { ++ /* We have 3 distinct conditions here. ++ * 1) Is the page executable? ++ * 2) Is is a part of the vdso area? ++ * 3) Is it the vsyscall area? ++ */ ++ if (((mmap->vm_flags & VM_EXEC) && mmap->vm_file && ++ mmap->vm_file->DRV_F_DENTRY) ++#if defined(DRV_ALLOW_VDSO) ++ || (mmap->vm_mm && ++ mmap->vm_start == (long)mmap->vm_mm->context.vdso) ++#endif ++#if defined(DRV_ALLOW_VSYSCALL) ++ || (mmap->vm_start == VSYSCALL_START) ++#endif ++ ) { ++ ++ linuxos_VMA_For_Process(p, mmap, load_event, &first); ++ } ++ } ++ ++ SEP_DRV_LOG_NOTIFICATION_TRACE_OUT(load_event == 1, "OS_SUCCESS"); ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static int linuxos_Exec_Unmap_Notify( ++ * struct notifier_block *self, ++ * unsigned long val, ++ * VOID *data) ++ * ++ * @brief this function is called whenever a task exits ++ * ++ * @param self IN - not used ++ * val IN - not used ++ * data IN - this is cast in the mm_struct of the task that is call unmap ++ * ++ * @return none ++ * ++ * Special Notes: ++ * ++ * This notification is called from do_munmap(mm/mmap.c). This is called when ever ++ * a module is loaded or unloaded. It looks like it is called right after a module is ++ * loaded or before its unloaded (if using dlopen, dlclose). ++ * However it is not called when a process is exiting instead exit_mmap is called ++ * (resulting in an EXIT_MMAP notification). ++ */ ++static int linuxos_Exec_Unmap_Notify(struct notifier_block *self, ++ unsigned long val, PVOID data) ++{ ++ struct mm_struct *mm; ++ struct vm_area_struct *mmap = NULL; ++ U32 first = 1; ++ U32 cur_driver_state; ++ ++#if defined(SECURE_SEP) ++ uid_t l_uid; ++#endif ++ ++ SEP_DRV_LOG_NOTIFICATION_IN("Self: %p, val: %lu, data: %p.", self, val, ++ data); ++ SEP_DRV_LOG_NOTIFICATION_TRACE(SEP_IN_NOTIFICATION, ++ "enter: unmap: hook_state %d.", ++ atomic_read(&hook_state)); ++ ++ cur_driver_state = GET_DRIVER_STATE(); ++ ++#if defined(SECURE_SEP) ++ l_uid = DRV_GET_UID(current); ++ /* ++ * Check for: same uid, or root uid ++ */ ++ if (l_uid != uid && l_uid != 0) { ++ SEP_DRV_LOG_NOTIFICATION_OUT( ++ "Returns 0 (secure_sep && l_uid != uid && l_uid != 0)."); ++ return 0; ++ } ++#endif ++ ++ if (!IS_COLLECTING_STATE(cur_driver_state)) { ++ SEP_DRV_LOG_NOTIFICATION_OUT("Early exit (driver state)."); ++ return 0; ++ } ++ if (!atomic_add_negative(1, &hook_state)) { ++ SEP_DRV_LOG_NOTIFICATION_TRACE(SEP_IN_NOTIFICATION, ++ "unmap: hook_state %d.", ++ atomic_read(&hook_state)); ++ mm = get_task_mm(current); ++ if (mm) { ++ UTILITY_down_read_mm(mm); ++ mmap = FIND_VMA(mm, data); ++ if (mmap && mmap->vm_file && ++ (mmap->vm_flags & VM_EXEC)) { ++ linuxos_VMA_For_Process(current, mmap, TRUE, ++ &first); ++ } ++ UTILITY_up_read_mm(mm); ++ mmput(mm); ++ } ++ } ++ atomic_dec(&hook_state); ++ SEP_DRV_LOG_NOTIFICATION_TRACE(SEP_IN_NOTIFICATION, ++ "exit: unmap done: hook_state %d.", ++ atomic_read(&hook_state)); ++ ++ SEP_DRV_LOG_NOTIFICATION_OUT("Returns 0."); ++ return 0; ++} ++ ++#if defined(DRV_CPU_HOTPLUG) ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID linuxos_Handle_Online_cpu( ++ * PVOID param) ++ * ++ * @param PVOID param ++ * ++ * @return None ++ * ++ * @brief Callback function to set the cpu online ++ * @brief and begin collection on it ++ */ ++static VOID linuxos_Handle_Online_cpu(PVOID param) ++{ ++ U32 this_cpu; ++ U32 dev_idx; ++ DISPATCH dispatch; ++ ++ SEP_DRV_LOG_NOTIFICATION_TRACE_IN(SEP_IN_NOTIFICATION, ++ "Dummy param: %p.", param); ++ ++ preempt_disable(); ++ this_cpu = CONTROL_THIS_CPU(); ++ dev_idx = core_to_dev_map[this_cpu]; ++ dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); ++ preempt_enable(); ++ CPUMON_Online_Cpu((PVOID)&this_cpu); ++ if (CPU_STATE_pmu_state(&pcb[this_cpu]) == NULL) { ++ if (dispatch && dispatch->init) { ++ dispatch->init(NULL); ++ } ++ } ++ if (dispatch && dispatch->write) { ++ dispatch->write(NULL); ++ } ++ CPU_STATE_group_swap(&pcb[this_cpu]) = 1; ++ if (GET_DRIVER_STATE() == DRV_STATE_RUNNING) { ++ // possible race conditions with notifications. ++ // cleanup should wait until all notifications are done, ++ // and new notifications should not proceed ++ if (dispatch && dispatch->restart) { ++ dispatch->restart(NULL); ++ } ++ } ++ ++ SEP_DRV_LOG_NOTIFICATION_TRACE_OUT(SEP_IN_NOTIFICATION, ""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID linuxos_Handle_Offline_cpu( ++ * PVOID param) ++ * ++ * @param PVOID param ++ * ++ * @return None ++ * ++ * @brief Callback function to set the cpu offline ++ * @brief and stop collection on it ++ */ ++static VOID linuxos_Handle_Offline_cpu(PVOID param) ++{ ++ U32 this_cpu; ++ U32 apic_lvterr; ++ U32 dev_idx; ++ DISPATCH dispatch; ++ SEP_DRV_LOG_NOTIFICATION_TRACE_IN(SEP_IN_NOTIFICATION, ++ "Dummy param: %p.", param); ++ ++ preempt_disable(); ++ this_cpu = CONTROL_THIS_CPU(); ++ dev_idx = core_to_dev_map[this_cpu]; ++ dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); ++ preempt_enable(); ++ CPUMON_Offline_Cpu((PVOID)&this_cpu); ++ if (dispatch && dispatch->freeze) { ++ dispatch->freeze(NULL); ++ } ++ apic_lvterr = apic_read(APIC_LVTERR); ++ apic_write(APIC_LVTERR, apic_lvterr | APIC_LVT_MASKED); ++ APIC_Restore_LVTPC(NULL); ++ apic_write(APIC_LVTERR, apic_lvterr); ++ ++ SEP_DRV_LOG_NOTIFICATION_TRACE_OUT(SEP_IN_NOTIFICATION, ""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn int linuxos_online_cpu( ++ * unsigned int cpu) ++ * ++ * @param unsigned int cpu ++ * ++ * @return None ++ * ++ * @brief Invokes appropriate call back function when CPU is online ++ */ ++static int linuxos_online_cpu(unsigned int cpu) ++{ ++ SEP_DRV_LOG_NOTIFICATION_IN("Cpu %d coming online.", cpu); ++ ++ if (CPUMON_is_Online_Allowed()) { ++ CONTROL_Invoke_Cpu(cpu, linuxos_Handle_Online_cpu, NULL); ++ SEP_DRV_LOG_NOTIFICATION_OUT("Cpu %d came online.", cpu); ++ return 0; ++ } else { ++ SEP_DRV_LOG_WARNING_NOTIFICATION_OUT( ++ "Cpu %d is not allowed to come online!", cpu); ++ return 0; ++ } ++} ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn int linuxos_offline_cpu( ++ * unsigned int cpu) ++ * ++ * @param unsigned int cpu ++ * ++ * @return None ++ * ++ * @brief Invokes appropriate call back function when CPU is offline ++ */ ++static int linuxos_offline_cpu(unsigned int cpu) ++{ ++ SEP_DRV_LOG_NOTIFICATION_IN("Cpu %d going offline.", cpu); ++ ++ if (CPUMON_is_Offline_Allowed()) { ++ CONTROL_Invoke_Cpu(cpu, linuxos_Handle_Offline_cpu, NULL); ++ SEP_DRV_LOG_NOTIFICATION_OUT("Cpu %d went offline.", cpu); ++ return 0; ++ } else { ++ SEP_DRV_LOG_WARNING_NOTIFICATION_OUT( ++ "Cpu %d is not allowed to go offline!", cpu); ++ return 0; ++ } ++} ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn OS_STATUS linuxos_Hotplug_Notifier( ++ * struct notifier_block *block, unsigned long action, void *pcpu) ++ * ++ * @param struct notifier_block *block - notifier block ++ * unsigned long action - notifier action ++ * void *pcpu - per cpu pcb ++ * ++ * @return NOTIFY_OK, if successful ++ * ++ * @brief Hotplug Notifier function that handles various cpu states ++ * @brief and invokes respective callback functions ++ */ ++static OS_STATUS linuxos_Hotplug_Notifier(struct notifier_block *block, ++ unsigned long action, void *pcpu) ++{ ++ U32 cpu = (unsigned int)(unsigned long)pcpu; ++ ++ SEP_DRV_LOG_NOTIFICATION_IN( ++ "Cpu: %u, action: %u.", cpu, ++ action); // nb: will overcount number of pending notifications ++ // when using this routine ++ ++ switch (action & ~CPU_TASKS_FROZEN) { ++ case CPU_DOWN_FAILED: ++ SEP_DRV_LOG_ERROR("SEP cpu %d offline failed!", cpu); ++ case CPU_ONLINE: ++ linuxos_online_cpu(cpu); ++ break; ++ case CPU_DOWN_PREPARE: ++ linuxos_offline_cpu(cpu); ++ break; ++ default: ++ SEP_DRV_LOG_WARNING( ++ "DEFAULT: cpu %d unhandled action value is %d.", cpu, ++ action); ++ break; ++ } ++ ++ SEP_DRV_LOG_NOTIFICATION_OUT(""); ++ return NOTIFY_OK; ++} ++ ++static struct notifier_block cpu_hotplug_notifier = { ++ .notifier_call = &linuxos_Hotplug_Notifier, ++}; ++#endif ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID LINUXOS_Register_Hotplug( ++ * VOID) ++ * ++ * @param None ++ * ++ * @return None ++ * ++ * @brief Registers the Hotplug Notifier ++ */ ++VOID LINUXOS_Register_Hotplug(void) ++{ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) ++ S32 err; ++ ++ SEP_DRV_LOG_INIT_IN( ++ "Kernel version >= 4.10.0: using direct notifications."); ++ ++ err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "ia64/sep5:online", ++ linuxos_online_cpu, ++ linuxos_offline_cpu); ++ cpuhp_sepdrv_state = (int)err; ++#else ++ SEP_DRV_LOG_INIT_IN("Kernel version < 4.10.0: using notification hub."); ++ register_cpu_notifier(&cpu_hotplug_notifier); ++#endif ++ SEP_DRV_LOG_INIT_OUT("Hotplug notifier registered."); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID LINUXOS_Unregister_Hotplug( ++ * VOID) ++ * ++ * @param None ++ * ++ * @return None ++ * ++ * @brief Unregisters the Hotplug Notifier ++ */ ++VOID LINUXOS_Unregister_Hotplug(void) ++{ ++ SEP_DRV_LOG_INIT_IN("Unregistering hotplug notifier."); ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) ++ cpuhp_remove_state_nocalls(cpuhp_sepdrv_state); ++#else ++ unregister_cpu_notifier(&cpu_hotplug_notifier); ++#endif ++ SEP_DRV_LOG_INIT_OUT("Hotplug notifier unregistered."); ++} ++#endif ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn OS_STATUS LINUXOS_Enum_Process_Modules(DRV_BOOL at_end) ++ * ++ * @brief gather all the process modules that are present. ++ * ++ * @param at_end - the collection happens at the end of the sampling run ++ * ++ * @return OS_SUCCESS ++ * ++ * Special Notes: ++ * This routine gathers all the process modules that are present ++ * in the system at this time. If at_end is set to be TRUE, then ++ * act as if all the modules are being unloaded. ++ * ++ */ ++OS_STATUS LINUXOS_Enum_Process_Modules(DRV_BOOL at_end) ++{ ++ int n = 0; ++ struct task_struct *p; ++ ++ SEP_DRV_LOG_TRACE_IN("At_end: %u.", at_end); ++ SEP_DRV_LOG_TRACE("Begin tasks."); ++ ++ if (GET_DRIVER_STATE() == DRV_STATE_TERMINATING) { ++ SEP_DRV_LOG_TRACE_OUT("OS_SUCCESS (TERMINATING)."); ++ return OS_SUCCESS; ++ } ++ ++ if (!local_tasklist_lock) { ++ local_tasklist_lock = ++ (PVOID)(UIOP)UTILITY_Find_Symbol("tasklist_lock"); ++ if (!local_tasklist_lock) { ++ SEP_DRV_LOG_WARNING("Could not find tasklist_lock."); ++ } ++ } ++ ++ // In some machines the tasklist_lock symbol does not exist. ++ // For temporary solution we skip the lock if there is no tasklist_lock ++ if (local_tasklist_lock) { ++#if defined( \ ++ DEFINE_QRWLOCK) // assuming that if DEFINE_QRWLOCK is defined, then tasklist_lock was defined using it ++ qread_lock(local_tasklist_lock); ++#else ++ read_lock(local_tasklist_lock); ++#endif ++ } ++ ++ FOR_EACH_TASK(p) ++ { ++ struct mm_struct *mm; ++ ++ SEP_DRV_LOG_TRACE("Looking at task %d.", n); ++ /* ++ * Call driver notification routine for each module ++ * that is mapped into the process created by the fork ++ */ ++ p->comm[TASK_COMM_LEN - 1] = 0; ++ // making sure there is a trailing 0 ++ mm = get_task_mm(p); ++ ++ if (!mm) { ++ SEP_DRV_LOG_TRACE( ++ "Skipped (p->mm=NULL). P=0x%p, pid=%d, p->comm=%s.", ++ p, p->pid, p->comm); ++ linuxos_Load_Image_Notify_Routine( ++ p->comm, 0, 0, 0, p->pid, ++ (p->parent) ? p->parent->tgid : 0, ++ LOPTS_EXE | LOPTS_1ST_MODREC, ++ linuxos_Get_Exec_Mode(p), ++ 2, // '2' to trigger 'if (load_event)' conditions ++ 1, 0, OS_ID_NATIVE); ++ continue; ++ } ++ ++ UTILITY_down_read_mm(mm); ++ linuxos_Enum_Modules_For_Process(p, mm, at_end ? -1 : 0); ++ UTILITY_up_read_mm(mm); ++ mmput(mm); ++ n++; ++ } ++ ++ if (local_tasklist_lock) { ++#if defined(DEFINE_QRWLOCK) ++ qread_unlock(local_tasklist_lock); ++#else ++ read_unlock(local_tasklist_lock); ++#endif ++ } ++ ++ SEP_DRV_LOG_TRACE("Enum_Process_Modules done with %d tasks.", n); ++ ++ SEP_DRV_LOG_TRACE_OUT("OS_SUCCESS."); ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static int linuxos_Exit_Task_Notify(struct notifier_block * self, ++ * unsigned long val, PVOID data) ++ * @brief this function is called whenever a task exits ++ * ++ * @param self IN - not used ++ * val IN - not used ++ * data IN - this is cast into the task_struct of the exiting task ++ * ++ * @return none ++ * ++ * Special Notes: ++ * this function is called whenever a task exits. It is called right before ++ * the virtual memory areas are freed. We just enumerate through all the modules ++ * of the task and set the unload sample count and the load event flag to 1 to ++ * indicate this is a module unload ++ */ ++static int linuxos_Exit_Task_Notify(struct notifier_block *self, ++ unsigned long val, PVOID data) ++{ ++ struct task_struct *p = (struct task_struct *)data; ++ int status = OS_SUCCESS; ++ U32 cur_driver_state; ++ struct mm_struct *mm; ++ ++ SEP_DRV_LOG_NOTIFICATION_IN("Self: %p, val: %lu, data: %p.", self, val, ++ data); ++ ++ cur_driver_state = GET_DRIVER_STATE(); ++ ++ if (cur_driver_state == DRV_STATE_UNINITIALIZED || ++ cur_driver_state == DRV_STATE_TERMINATING) { ++ SEP_DRV_LOG_NOTIFICATION_OUT("Early exit (driver state)."); ++ return status; ++ } ++ SEP_DRV_LOG_TRACE("Pid = %d tgid = %d.", p->pid, p->tgid); ++ if (p->pid == control_pid) { ++ SEP_DRV_LOG_NOTIFICATION_TRACE( ++ SEP_IN_NOTIFICATION, ++ "The collector task has been terminated via an uncatchable signal."); ++ SEP_DRV_LOG_NOTIFICATION_WARNING(SEP_IN_NOTIFICATION, ++ "Sep was killed!"); ++ CHANGE_DRIVER_STATE(STATE_BIT_ANY, DRV_STATE_TERMINATING); ++ wake_up_interruptible(&wait_exit); ++ ++ SEP_DRV_LOG_NOTIFICATION_OUT("Res = %u (pid == control_pid).", ++ status); ++ return status; ++ } ++ ++ if (cur_driver_state != DRV_STATE_IDLE && ++ !IS_COLLECTING_STATE(cur_driver_state)) { ++ SEP_DRV_LOG_NOTIFICATION_OUT("Res = %u (stopping collection).", ++ status); ++ return status; ++ } ++ ++ mm = get_task_mm(p); ++ if (!mm) { ++ SEP_DRV_LOG_NOTIFICATION_OUT("Res = %u (!p->mm).", status); ++ return status; ++ } ++ UTILITY_down_read_mm(mm); ++ if (GET_DRIVER_STATE() != DRV_STATE_TERMINATING) { ++ if (!atomic_add_negative(1, &hook_state)) { ++ linuxos_Enum_Modules_For_Process(p, mm, 1); ++ } ++ atomic_dec(&hook_state); ++ } ++ UTILITY_up_read_mm(mm); ++ mmput(mm); ++ ++ SEP_DRV_LOG_NOTIFICATION_TRACE(SEP_IN_NOTIFICATION, "Hook_state %d.", ++ atomic_read(&hook_state)); ++ ++ SEP_DRV_LOG_NOTIFICATION_OUT("Res = %u.", status); ++ return status; ++} ++ ++/* ++ * The notifier block. All the static entries have been defined at this point ++ */ ++static struct notifier_block linuxos_exec_unmap_nb = { ++ .notifier_call = linuxos_Exec_Unmap_Notify, ++}; ++ ++static struct notifier_block linuxos_exit_task_nb = { ++ .notifier_call = linuxos_Exit_Task_Notify, ++}; ++ ++#if defined(CONFIG_TRACEPOINTS) ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn void capture_sched_switch(VOID *) ++ * @brief capture current pid/tid on all cpus ++ * ++ * @param p IN - not used ++ * ++ * @return none ++ * ++ * Special Notes: ++ * ++ * None ++ */ ++static void capture_sched_switch(void *p) ++{ ++ U32 this_cpu; ++ BUFFER_DESC bd; ++ SIDEBAND_INFO sideband_info; ++ U64 tsc; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ UTILITY_Read_TSC(&tsc); ++ ++ preempt_disable(); ++ this_cpu = CONTROL_THIS_CPU(); ++ preempt_enable(); ++ ++ bd = &cpu_sideband_buf[this_cpu]; ++ if (bd == NULL) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("Bd is NULL!"); ++ return; ++ } ++ ++ sideband_info = (SIDEBAND_INFO)OUTPUT_Reserve_Buffer_Space( ++ bd, sizeof(SIDEBAND_INFO_NODE), FALSE, !SEP_IN_NOTIFICATION, ++ (S32)this_cpu); ++ if (sideband_info == NULL) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("Sideband_info is NULL!"); ++ return; ++ } ++ ++ SIDEBAND_INFO_pid(sideband_info) = current->tgid; ++ SIDEBAND_INFO_tid(sideband_info) = current->pid; ++ SIDEBAND_INFO_tsc(sideband_info) = tsc; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn void record_pebs_process_info(...) ++ * @brief record all sched switch pid/tid info ++ * ++ * @param ignore IN - not used ++ * from IN ++ * to IN ++ * ++ * @return none ++ * ++ * Special Notes: ++ * ++ * None ++ */ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) ++static void record_pebs_process_info(void *ignore, bool preempt, ++ struct task_struct *from, ++ struct task_struct *to) ++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) ++static void record_pebs_process_info(void *ignore, struct task_struct *from, ++ struct task_struct *to) ++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) ++static void record_pebs_process_info(struct rq *ignore, ++ struct task_struct *from, ++ struct task_struct *to) ++#endif ++{ ++ U32 this_cpu; ++ BUFFER_DESC bd; ++ SIDEBAND_INFO sideband_info; ++ U64 tsc; ++ U32 cur_driver_state; ++ ++ SEP_DRV_LOG_NOTIFICATION_IN("From: %p, to: %p.", from, to); ++ ++ cur_driver_state = GET_DRIVER_STATE(); ++ ++ if (cur_driver_state != DRV_STATE_IDLE && ++ !IS_COLLECTING_STATE(cur_driver_state)) { ++ SEP_DRV_LOG_NOTIFICATION_OUT("Early exit (driver state)."); ++ return; ++ } ++ ++ UTILITY_Read_TSC(&tsc); ++ ++ preempt_disable(); ++ this_cpu = CONTROL_THIS_CPU(); ++ preempt_enable(); ++ ++ SEP_DRV_LOG_NOTIFICATION_TRACE(SEP_IN_NOTIFICATION, ++ "[OUT<%d:%d:%s>-IN<%d:%d:%s>].", ++ from->tgid, from->pid, from->comm, ++ to->tgid, to->pid, to->comm); ++ ++ bd = &cpu_sideband_buf[this_cpu]; ++ if (bd == NULL) { ++ SEP_DRV_LOG_NOTIFICATION_OUT("Early exit (!bd)."); ++ return; ++ } ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)) && \ ++ (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) ++ sideband_info = (SIDEBAND_INFO)OUTPUT_Reserve_Buffer_Space( ++ bd, sizeof(SIDEBAND_INFO_NODE), TRUE, SEP_IN_NOTIFICATION, ++ (S32)this_cpu); ++#else ++ sideband_info = (SIDEBAND_INFO)OUTPUT_Reserve_Buffer_Space( ++ bd, sizeof(SIDEBAND_INFO_NODE), FALSE, SEP_IN_NOTIFICATION, ++ (S32)this_cpu); ++#endif ++ ++ if (sideband_info == NULL) { ++ SEP_DRV_LOG_NOTIFICATION_OUT("Early exit (!sideband_info)."); ++ return; ++ } ++ ++ SIDEBAND_INFO_pid(sideband_info) = to->tgid; ++ SIDEBAND_INFO_tid(sideband_info) = to->pid; ++ SIDEBAND_INFO_tsc(sideband_info) = tsc; ++ ++ SEP_DRV_LOG_NOTIFICATION_OUT(""); ++} ++#endif ++#endif ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn void find_sched_switch_tracepoint ++ * @brief find trace poing for sched_switch ++ * ++ * @param tp pass in by system ++ * param pointer of trace point ++ * ++ * @return none ++ * ++ * Special Notes: ++ * ++ * None ++ */ ++static void find_sched_switch_tracepoint(struct tracepoint *tp, VOID *param) ++{ ++ struct tracepoint **ptp = (struct tracepoint **)param; ++ ++ SEP_DRV_LOG_TRACE_IN("Tp: %p, param: %p.", tp, param); ++ ++ if (tp && ptp) { ++ SEP_DRV_LOG_TRACE("trace point name: %s.", tp->name); ++ if (!strcmp(tp->name, "sched_switch")) { ++ SEP_DRV_LOG_TRACE( ++ "Found trace point for sched_switch."); ++ *ptp = tp; ++ } ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++#endif ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn int install_sched_switch_callback(void) ++ * @brief registers sched_switch callbacks for PEBS sideband ++ * ++ * @param none ++ * ++ * @return 0 success else error number ++ * ++ * Special Notes: ++ * ++ * None ++ */ ++static int install_sched_switch_callback(void) ++{ ++ int err = 0; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ SEP_DRV_LOG_INIT("Installing PEBS linux OS Hooks."); ++ ++#if defined(CONFIG_TRACEPOINTS) ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) ++ if (!tp_sched_switch) { ++ for_each_kernel_tracepoint(&find_sched_switch_tracepoint, ++ &tp_sched_switch); ++ } ++ if (!tp_sched_switch) { ++ err = -EIO; ++ SEP_DRV_LOG_INIT( ++ "Please check Linux is built w/ CONFIG_CONTEXT_SWITCH_TRACER."); ++ } else { ++ err = tracepoint_probe_register( ++ tp_sched_switch, (void *)record_pebs_process_info, ++ NULL); ++ } ++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) ++ err = register_trace_sched_switch(record_pebs_process_info, NULL); ++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) ++ err = register_trace_sched_switch(record_pebs_process_info); ++#else ++ SEP_DRV_LOG_INIT( ++ "Please use Linux kernel version >= 2.6.28 to use multiple pebs."); ++ err = -1; ++#endif ++ CONTROL_Invoke_Parallel(capture_sched_switch, NULL); ++#endif ++ ++ SEP_DRV_LOG_TRACE_OUT("Res: %d.", err); ++ return err; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID LINUXOS_Install_Hooks(void) ++ * @brief registers the profiling callbacks ++ * ++ * @param none ++ * ++ * @return none ++ * ++ * Special Notes: ++ * ++ * None ++ */ ++VOID LINUXOS_Install_Hooks(void) ++{ ++ int err = 0; ++ int err2 = 0; ++ ++ SEP_DRV_LOG_INIT_IN("Installing Linux OS Hooks."); ++ ++ if (hooks_installed == 1) { ++ SEP_DRV_LOG_INIT_OUT("The OS Hooks are already installed."); ++ return; ++ } ++ ++ linuxos_Map_Kernel_Modules(); ++ ++ err = profile_event_register(MY_UNMAP, &linuxos_exec_unmap_nb); ++ err2 = profile_event_register(MY_TASK, &linuxos_exit_task_nb); ++ if (err || err2) { ++ if (err == OS_NO_SYSCALL) { ++ SEP_DRV_LOG_WARNING( ++ "This kernel does not implement kernel profiling hooks..."); ++ SEP_DRV_LOG_WARNING( ++ "...task termination and image unloads will not be tracked..."); ++ SEP_DRV_LOG_WARNING("...during sampling session!"); ++ } ++ } ++ ++ if (multi_pebs_enabled || sched_switch_enabled) { ++ err = install_sched_switch_callback(); ++ if (err) { ++ SEP_DRV_LOG_WARNING( ++ "Failed to install sched_switch callback for multiple pebs."); ++ } ++ } ++ ++ hooks_installed = 1; ++ atomic_set(&hook_state, HOOK_FREE); ++ ++ SEP_DRV_LOG_INIT_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn int uninstall_sched_switch_callback(void) ++ * @brief unregisters sched_switch callbacks for PEBS sideband ++ * ++ * @param none ++ * ++ * @return 0 success else error number ++ * ++ * Special Notes: ++ * ++ * None ++ */ ++static int uninstall_sched_switch_callback(void) ++{ ++ int err = 0; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ SEP_DRV_LOG_INIT("Uninstalling PEBS Linux OS Hooks."); ++ ++#if defined(CONFIG_TRACEPOINTS) ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) ++ if (!tp_sched_switch) { ++ err = -EIO; ++ SEP_DRV_LOG_INIT( ++ "Please check Linux is built w/ CONFIG_CONTEXT_SWITCH_TRACER."); ++ } else { ++ err = tracepoint_probe_unregister( ++ tp_sched_switch, (void *)record_pebs_process_info, ++ NULL); ++ } ++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) ++ err = unregister_trace_sched_switch(record_pebs_process_info, NULL); ++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) ++ err = unregister_trace_sched_switch(record_pebs_process_info); ++#else ++ SEP_DRV_LOG_INIT( ++ "Please use Linux kernel version >= 2.6.28 to use multiple pebs."); ++ err = -1; ++#endif ++ CONTROL_Invoke_Parallel(capture_sched_switch, NULL); ++#endif ++ ++ SEP_DRV_LOG_TRACE_OUT("Res: %d.", err); ++ return err; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID LINUXOS_Uninstall_Hooks(void) ++ * @brief unregisters the profiling callbacks ++ * ++ * @param none ++ * ++ * @return ++ * ++ * Special Notes: ++ * ++ * None ++ */ ++VOID LINUXOS_Uninstall_Hooks(void) ++{ ++ int err = 0; ++ int value = 0; ++ int tries = 10; ++ ++ SEP_DRV_LOG_INIT_IN("Uninstalling Linux OS Hooks."); ++ ++ if (hooks_installed == 0) { ++ SEP_DRV_LOG_INIT_OUT("Hooks are not installed!"); ++ return; ++ } ++ ++ hooks_installed = 0; ++ profile_event_unregister(MY_UNMAP, &linuxos_exec_unmap_nb); ++ profile_event_unregister(MY_TASK, &linuxos_exit_task_nb); ++ ++ if (multi_pebs_enabled || sched_switch_enabled) { ++ err = uninstall_sched_switch_callback(); ++ if (err) { ++ SEP_DRV_LOG_WARNING( ++ "Failed to uninstall sched_switch callback for multiple pebs."); ++ } ++ } ++ ++ value = atomic_cmpxchg(&hook_state, HOOK_FREE, HOOK_UNINSTALL); ++ if ((value == HOOK_FREE) || ++ (value == HOOK_UNINSTALL)) { // already in free or uninstall state ++ SEP_DRV_LOG_INIT_OUT( ++ "Uninstall hook done (already in state %d).", value); ++ return; ++ } ++ atomic_add(HOOK_UNINSTALL, &hook_state); ++ while (tries) { ++ SYS_IO_Delay(); ++ SYS_IO_Delay(); ++ value = atomic_read(&hook_state); ++ if (value == HOOK_UNINSTALL) { ++ break; ++ } ++ tries--; ++ } ++ ++ SEP_DRV_LOG_INIT_OUT("Done -- state %d, tries %d.", value, tries); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn DRV_BOOL LINUXOS_Check_KVM_Guest_Process() ++ * ++ * @brief check the presence of kvm guest process ++ * ++ * @param none ++ * ++ * @return TRUE if the kvm guest process is running, FALSE if not ++ */ ++DRV_BOOL LINUXOS_Check_KVM_Guest_Process(void) ++{ ++ struct task_struct *p; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ if (!local_tasklist_lock) { ++ local_tasklist_lock = ++ (PVOID)(UIOP)UTILITY_Find_Symbol("tasklist_lock"); ++ if (!local_tasklist_lock) { ++ SEP_DRV_LOG_WARNING("Could not find tasklist_lock."); ++ } ++ } ++ ++ // In some machines the tasklist_lock symbol does not exist. ++ // For temporary solution we skip the lock if there is no tasklist_lock ++ if (local_tasklist_lock) { ++#if defined(DEFINE_QRWLOCK) ++ qread_lock(local_tasklist_lock); ++#else ++ read_lock(local_tasklist_lock); ++#endif ++ } ++ ++ FOR_EACH_TASK(p) ++ { ++ // if (p == NULL) { ++ // continue; ++ // } ++ ++ p->comm[TASK_COMM_LEN - 1] = ++ 0; // making sure there is a trailing 0 ++ ++ if (!strncmp(p->comm, "qemu-kvm", 8)) { ++ if (local_tasklist_lock) { ++#if defined(DEFINE_QRWLOCK) ++ qread_unlock(local_tasklist_lock); ++#else ++ read_unlock(local_tasklist_lock); ++#endif ++ } ++ ++ SEP_DRV_LOG_INIT_TRACE_OUT("TRUE (found qemu-kvm!)."); ++ return TRUE; ++ } ++ } ++ ++ if (local_tasklist_lock) { ++#if defined(DEFINE_QRWLOCK) ++ qread_unlock(local_tasklist_lock); ++#else ++ read_unlock(local_tasklist_lock); ++#endif ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT("FALSE"); ++ return FALSE; ++} +diff --git a/drivers/platform/x86/sepdk/sep/lwpmudrv.c b/drivers/platform/x86/sepdk/sep/lwpmudrv.c +new file mode 100755 +index 000000000000..f13552c20774 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/sep/lwpmudrv.c +@@ -0,0 +1,7537 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#include "lwpmudrv_defines.h" ++#include "lwpmudrv_version.h" ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) ++#include ++#else ++#include ++#endif ++#include ++#include ++#include ++#include ++#include ++#if defined(CONFIG_HYPERVISOR_GUEST) ++#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34) ++#include ++#endif ++#endif ++#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32) ++#include ++#endif ++ ++#if defined(CONFIG_XEN_HAVE_VPMU) ++#include ++#include ++#include ++#endif ++ ++#include "lwpmudrv_types.h" ++#include "lwpmudrv_ecb.h" ++#include "lwpmudrv_ioctl.h" ++#include "lwpmudrv_struct.h" ++#include "inc/ecb_iterators.h" ++#include "inc/unc_common.h" ++ ++#if defined(BUILD_GFX) ++#include "gfx.h" ++#endif ++ ++#if defined(BUILD_CHIPSET) ++#include "lwpmudrv_chipset.h" ++#endif ++#include "pci.h" ++ ++#include "apic.h" ++#include "cpumon.h" ++#include "lwpmudrv.h" ++#include "utility.h" ++#include "control.h" ++#include "core2.h" ++#include "pmi.h" ++ ++#include "output.h" ++#include "linuxos.h" ++#include "sys_info.h" ++#include "eventmux.h" ++#include "pebs.h" ++ ++MODULE_AUTHOR("Copyright(C) 2007-2018 Intel Corporation"); ++MODULE_VERSION(SEP_NAME "_" SEP_VERSION_STR); ++MODULE_LICENSE("Dual BSD/GPL"); ++ ++static struct task_struct *abnormal_handler; ++#if defined(DRV_SEP_ACRN_ON) ++static struct task_struct *acrn_buffer_handler[MAX_NR_PCPUS] = { NULL }; ++#endif ++ ++typedef struct LWPMU_DEV_NODE_S LWPMU_DEV_NODE; ++typedef LWPMU_DEV_NODE * LWPMU_DEV; ++ ++struct LWPMU_DEV_NODE_S { ++ long buffer; ++ struct semaphore sem; ++ struct cdev cdev; ++}; ++ ++#define LWPMU_DEV_buffer(dev) ((dev)->buffer) ++#define LWPMU_DEV_sem(dev) ((dev)->sem) ++#define LWPMU_DEV_cdev(dev) ((dev)->cdev) ++ ++/* Global variables of the driver */ ++static SEP_VERSION_NODE drv_version; ++U64 *read_counter_info; ++U64 *prev_counter_data; ++static U64 prev_counter_size; ++VOID **desc_data; ++U64 total_ram; ++U32 output_buffer_size = OUTPUT_LARGE_BUFFER; ++U32 saved_buffer_size; ++static S32 desc_count; ++uid_t uid; ++DRV_CONFIG drv_cfg; ++static DEV_CONFIG cur_pcfg; ++volatile pid_t control_pid; ++U64 *interrupt_counts; ++static LWPMU_DEV lwpmu_control; ++static LWPMU_DEV lwmod_control; ++static LWPMU_DEV lwsamp_control; ++static LWPMU_DEV lwsampunc_control; ++static LWPMU_DEV lwsideband_control; ++EMON_BUFFER_DRIVER_HELPER emon_buffer_driver_helper; ++ ++/* needed for multiple devices (core/uncore) */ ++U32 num_devices; ++static U32 num_core_devs; ++U32 cur_device; ++LWPMU_DEVICE devices; ++static U32 uncore_em_factor; ++static unsigned long unc_timer_interval; ++static struct timer_list *unc_read_timer; ++static S32 max_groups_unc; ++DRV_BOOL multi_pebs_enabled = FALSE; ++DRV_BOOL unc_buf_init = FALSE; ++DRV_BOOL NMI_mode = TRUE; ++DRV_BOOL KVM_guest_mode = FALSE; ++DRV_SETUP_INFO_NODE req_drv_setup_info; ++ ++/* needed for target agent support */ ++U32 osid = OS_ID_NATIVE; ++DRV_BOOL sched_switch_enabled = FALSE; ++ ++#if defined(DRV_SEP_ACRN_ON) ++struct profiling_vm_info_list *vm_info_list; ++shared_buf_t **samp_buf_per_cpu; ++#endif ++ ++#define UNCORE_EM_GROUP_SWAP_FACTOR 100 ++#define PMU_DEVICES 2 // pmu, mod ++ ++extern U32 *cpu_built_sysinfo; ++ ++#define DRV_DEVICE_DELIMITER "!" ++ ++#if defined(DRV_USE_UNLOCKED_IOCTL) ++static struct mutex ioctl_lock; ++#endif ++ ++#if defined(BUILD_CHIPSET) ++CHIPSET_CONFIG pma; ++CS_DISPATCH cs_dispatch; ++#endif ++static S8 *cpu_mask_bits; ++ ++/* ++ * Global data: Buffer control structure ++ */ ++BUFFER_DESC cpu_buf; ++BUFFER_DESC unc_buf; ++BUFFER_DESC module_buf; ++BUFFER_DESC cpu_sideband_buf; ++ ++static dev_t lwpmu_DevNum; /* the major and minor parts for SEP3 base */ ++static dev_t lwsamp_DevNum; /* the major and minor parts for SEP3 percpu */ ++static dev_t lwsampunc_DevNum; ++/* the major and minor parts for SEP3 per package */ ++static dev_t lwsideband_DevNum; ++ ++static struct class *pmu_class; ++ ++//extern volatile int config_done; ++ ++CPU_STATE pcb; ++static size_t pcb_size; ++U32 *core_to_package_map; ++U32 *core_to_phys_core_map; ++U32 *core_to_thread_map; ++U32 *core_to_dev_map; ++U32 *threads_per_core; ++U32 num_packages; ++U64 *pmu_state; ++U64 *cpu_tsc; ++static U64 *prev_cpu_tsc; ++static U64 *diff_cpu_tsc; ++U64 *restore_bl_bypass; ++U32 **restore_ha_direct2core; ++U32 **restore_qpi_direct2core; ++U32 *occupied_core_ids; ++UNCORE_TOPOLOGY_INFO_NODE uncore_topology; ++PLATFORM_TOPOLOGY_PROG_NODE platform_topology_prog_node; ++static PLATFORM_TOPOLOGY_PROG_NODE req_platform_topology_prog_node; ++ ++#if !defined(DRV_SEP_ACRN_ON) ++static U8 *prev_set_CR4; ++#endif ++ ++wait_queue_head_t wait_exit; ++ ++// extern OS_STATUS SOCPERF_Switch_Group3 (void); ++ ++#if !defined(DRV_USE_UNLOCKED_IOCTL) ++#define MUTEX_INIT(lock) ++#define MUTEX_LOCK(lock) ++#define MUTEX_UNLOCK(lock) ++#else ++#define MUTEX_INIT(lock) mutex_init(&(lock)); ++#define MUTEX_LOCK(lock) mutex_lock(&(lock)) ++#define MUTEX_UNLOCK(lock) mutex_unlock(&(lock)) ++#endif ++ ++#if defined(CONFIG_XEN_HAVE_VPMU) ++typedef struct xen_pmu_params xen_pmu_params_t; ++typedef struct xen_pmu_data xen_pmu_data_t; ++ ++static DEFINE_PER_CPU(xen_pmu_data_t *, sep_xenpmu_shared); ++#endif ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn void lwpmudrv_PWR_Info(IOCTL_ARGS arg) ++ * ++ * @param arg - Pointer to the IOCTL structure ++ * ++ * @return OS_STATUS ++ * ++ * @brief Make a copy of the Power control information that has been passed in. ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_PWR_Info(IOCTL_ARGS arg) ++{ ++ SEP_DRV_LOG_FLOW_IN(""); ++ if (DEV_CONFIG_power_capture(cur_pcfg) == FALSE) { ++ SEP_DRV_LOG_WARNING_FLOW_OUT( ++ "'Success' (Power capture is disabled!)."); ++ return OS_SUCCESS; ++ } ++ ++ // make sure size of incoming arg is correct ++ if ((arg->len_usr_to_drv != sizeof(PWR_NODE)) || ++ (arg->buf_usr_to_drv == NULL)) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "OS_FAULT (PWR capture has not been configured!)."); ++ return OS_FAULT; ++ } ++ ++ // ++ // First things first: Make a copy of the data for global use. ++ // ++ LWPMU_DEVICE_pwr(&devices[cur_device]) = ++ CONTROL_Allocate_Memory((int)arg->len_usr_to_drv); ++ if (!LWPMU_DEVICE_pwr(&devices[cur_device])) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Memory allocation failure!"); ++ return OS_NO_MEM; ++ } ++ ++ if (copy_from_user(LWPMU_DEVICE_pwr(&devices[cur_device]), ++ (void __user *)arg->buf_usr_to_drv, arg->len_usr_to_drv)) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); ++ return OS_FAULT; ++ } ++ ++ SEP_DRV_LOG_FLOW_OUT("Success"); ++ return OS_SUCCESS; ++} ++ ++/* ++ * @fn void lwpmudrv_Allocate_Restore_Buffer ++ * ++ * @param ++ * ++ * @return OS_STATUS ++ * ++ * @brief allocate buffer space to save/restore the data (for JKT, QPILL and HA register) before collection ++ */ ++static OS_STATUS lwpmudrv_Allocate_Restore_Buffer(void) ++{ ++ int i = 0; ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ if (!restore_ha_direct2core) { ++ restore_ha_direct2core = CONTROL_Allocate_Memory( ++ GLOBAL_STATE_num_cpus(driver_state) * sizeof(U32 *)); ++ if (!restore_ha_direct2core) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT( ++ "Memory allocation failure for restore_ha_direct2core!"); ++ return OS_NO_MEM; ++ } ++ for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { ++ restore_ha_direct2core[i] = CONTROL_Allocate_Memory( ++ MAX_BUSNO * sizeof(U32)); ++ } ++ } ++ if (!restore_qpi_direct2core) { ++ restore_qpi_direct2core = CONTROL_Allocate_Memory( ++ GLOBAL_STATE_num_cpus(driver_state) * sizeof(U32 *)); ++ if (!restore_qpi_direct2core) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT( ++ "Memory allocation failure for restore_qpi_direct2core!"); ++ return OS_NO_MEM; ++ } ++ for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { ++ restore_qpi_direct2core[i] = CONTROL_Allocate_Memory( ++ 2 * MAX_BUSNO * sizeof(U32)); ++ } ++ } ++ if (!restore_bl_bypass) { ++ restore_bl_bypass = CONTROL_Allocate_Memory( ++ GLOBAL_STATE_num_cpus(driver_state) * sizeof(U64)); ++ if (!restore_bl_bypass) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT( ++ "Memory allocation failure for restore_bl_bypass!"); ++ return OS_NO_MEM; ++ } ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT("Success"); ++ return OS_SUCCESS; ++} ++ ++/* ++ * @fn void lwpmudrv_Allocate_Uncore_Buffer ++ * ++ * @param ++ * ++ * @return OS_STATUS ++ * ++ * @brief allocate buffer space for writing/reading uncore data ++ */ ++static OS_STATUS lwpmudrv_Allocate_Uncore_Buffer(void) ++{ ++ U32 i, j, k, l; ++ U32 max_entries = 0; ++ U32 num_entries; ++ ECB ecb; ++ ++ SEP_DRV_LOG_TRACE_IN( ++ ""); // this function is not checking memory allocations properly ++ ++ for (i = num_core_devs; i < num_devices; i++) { ++ if (!LWPMU_DEVICE_pcfg(&devices[i])) { ++ continue; ++ } ++ LWPMU_DEVICE_acc_value(&devices[i]) = ++ CONTROL_Allocate_Memory(num_packages * sizeof(U64 **)); ++ LWPMU_DEVICE_prev_value(&devices[i]) = ++ CONTROL_Allocate_Memory(num_packages * sizeof(U64 *)); ++ for (j = 0; j < num_packages; j++) { ++ // Allocate memory and zero out accumulator array (one per group) ++ LWPMU_DEVICE_acc_value(&devices[i])[j] = ++ CONTROL_Allocate_Memory( ++ LWPMU_DEVICE_em_groups_count( ++ &devices[i]) * ++ sizeof(U64 *)); ++ for (k = 0; ++ k < LWPMU_DEVICE_em_groups_count(&devices[i]); ++ k++) { ++ ecb = LWPMU_DEVICE_PMU_register_data( ++ &devices[i])[k]; ++ num_entries = ++ ECB_num_events(ecb) * ++ LWPMU_DEVICE_num_units(&devices[i]); ++ LWPMU_DEVICE_acc_value(&devices[i])[j][k] = ++ CONTROL_Allocate_Memory(num_entries * ++ sizeof(U64)); ++ for (l = 0; l < num_entries; l++) { ++ LWPMU_DEVICE_acc_value( ++ &devices[i])[j][k][l] = 0LL; ++ } ++ if (max_entries < num_entries) { ++ max_entries = num_entries; ++ } ++ } ++ // Allocate memory and zero out prev_value array (one across groups) ++ LWPMU_DEVICE_prev_value(&devices[i])[j] = ++ CONTROL_Allocate_Memory(max_entries * ++ sizeof(U64)); ++ for (k = 0; k < max_entries; k++) { ++ LWPMU_DEVICE_prev_value(&devices[i])[j][k] = ++ 0LL; ++ } ++ } ++ max_entries = 0; ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT("Success"); ++ return OS_SUCCESS; ++} ++ ++/* ++ * @fn void lwpmudrv_Free_Uncore_Buffer ++ * ++ * @param ++ * ++ * @return OS_STATUS ++ * ++ * @brief Free uncore data buffers ++ */ ++static OS_STATUS lwpmudrv_Free_Uncore_Buffer(U32 i) ++{ ++ U32 j, k; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ if (LWPMU_DEVICE_prev_value(&devices[i])) { ++ for (j = 0; j < num_packages; j++) { ++ LWPMU_DEVICE_prev_value(&devices[i])[j] = ++ CONTROL_Free_Memory(LWPMU_DEVICE_prev_value( ++ &devices[i])[j]); ++ } ++ LWPMU_DEVICE_prev_value(&devices[i]) = CONTROL_Free_Memory( ++ LWPMU_DEVICE_prev_value(&devices[i])); ++ } ++ if (LWPMU_DEVICE_acc_value(&devices[i])) { ++ for (j = 0; j < num_packages; j++) { ++ if (LWPMU_DEVICE_acc_value(&devices[i])[j]) { ++ for (k = 0; k < LWPMU_DEVICE_em_groups_count( ++ &devices[i]); ++ k++) { ++ LWPMU_DEVICE_acc_value( ++ &devices[i])[j][k] = ++ CONTROL_Free_Memory( ++ LWPMU_DEVICE_acc_value( ++ &devices[i])[j] ++ [k]); ++ } ++ LWPMU_DEVICE_acc_value(&devices[i])[j] = ++ CONTROL_Free_Memory( ++ LWPMU_DEVICE_acc_value( ++ &devices[i])[j]); ++ } ++ } ++ LWPMU_DEVICE_acc_value(&devices[i]) = CONTROL_Free_Memory( ++ LWPMU_DEVICE_acc_value(&devices[i])); ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT("Success"); ++ return OS_SUCCESS; ++} ++ ++/* ++ * @fn void lwpmudrv_Free_Restore_Buffer ++ * ++ * @param ++ * ++ * @return OS_STATUS ++ * ++ * @brief allocate buffer space to save/restore the data (for JKT, QPILL and HA register) before collection ++ */ ++static OS_STATUS lwpmudrv_Free_Restore_Buffer(void) ++{ ++ U32 i = 0; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ if (restore_ha_direct2core) { ++ for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { ++ restore_ha_direct2core[i] = ++ CONTROL_Free_Memory(restore_ha_direct2core[i]); ++ } ++ restore_ha_direct2core = ++ CONTROL_Free_Memory(restore_ha_direct2core); ++ } ++ if (restore_qpi_direct2core) { ++ for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { ++ restore_qpi_direct2core[i] = ++ CONTROL_Free_Memory(restore_qpi_direct2core[i]); ++ } ++ restore_qpi_direct2core = ++ CONTROL_Free_Memory(restore_qpi_direct2core); ++ } ++ if (restore_bl_bypass) { ++ restore_bl_bypass = CONTROL_Free_Memory(restore_bl_bypass); ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT("Success"); ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Initialize_State(void) ++ * ++ * @param none ++ * ++ * @return OS_STATUS ++ * ++ * @brief Allocates the memory needed at load time. Initializes all the ++ * @brief necessary state variables with the default values. ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Initialize_State(void) ++{ ++ S32 i, max_cpu_id = 0; ++ ++ SEP_DRV_LOG_INIT_IN(""); ++ ++ for_each_possible_cpu (i) { ++ if (cpu_present(i)) { ++ if (i > max_cpu_id) { ++ max_cpu_id = i; ++ } ++ } ++ } ++ max_cpu_id++; ++ ++ /* ++ * Machine Initializations ++ * Abstract this information away into a separate entry point ++ * ++ * Question: Should we allow for the use of Hot-cpu ++ * add/subtract functionality while the driver is executing? ++ */ ++ if (max_cpu_id > num_present_cpus()) { ++ GLOBAL_STATE_num_cpus(driver_state) = max_cpu_id; ++ } else { ++ GLOBAL_STATE_num_cpus(driver_state) = num_present_cpus(); ++ } ++ GLOBAL_STATE_active_cpus(driver_state) = num_online_cpus(); ++ GLOBAL_STATE_cpu_count(driver_state) = 0; ++ GLOBAL_STATE_dpc_count(driver_state) = 0; ++ GLOBAL_STATE_num_em_groups(driver_state) = 0; ++ CHANGE_DRIVER_STATE(STATE_BIT_ANY, DRV_STATE_UNINITIALIZED); ++ ++ SEP_DRV_LOG_INIT_OUT("Success: num_cpus=%d, active_cpus=%d.", ++ GLOBAL_STATE_num_cpus(driver_state), ++ GLOBAL_STATE_active_cpus(driver_state)); ++ return OS_SUCCESS; ++} ++ ++#if !defined(CONFIG_PREEMPT_COUNT) && !defined(DRV_SEP_ACRN_ON) ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static void lwpmudrv_Fill_TSC_Info (PVOID param) ++ * ++ * @param param - pointer the buffer to fill in. ++ * ++ * @return none ++ * ++ * @brief Read the TSC and write into the correct array slot. ++ * ++ * Special Notes ++ */ ++atomic_t read_now; ++static wait_queue_head_t read_tsc_now; ++static VOID lwpmudrv_Fill_TSC_Info(PVOID param) ++{ ++ U32 this_cpu; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ preempt_disable(); ++ this_cpu = CONTROL_THIS_CPU(); ++ preempt_enable(); ++ // ++ // Wait until all CPU's are ready to proceed ++ // This will serve as a synchronization point to compute tsc skews. ++ // ++ ++ if (atomic_read(&read_now) >= 1) { ++ if (atomic_dec_and_test(&read_now) == FALSE) { ++ wait_event_interruptible(read_tsc_now, ++ (atomic_read(&read_now) >= 1)); ++ } ++ } else { ++ wake_up_interruptible_all(&read_tsc_now); ++ } ++ UTILITY_Read_TSC(&cpu_tsc[this_cpu]); ++ SEP_DRV_LOG_TRACE("This cpu %d --- tsc --- 0x%llx.", this_cpu, ++ cpu_tsc[this_cpu]); ++ ++ SEP_DRV_LOG_TRACE_OUT("Success"); ++} ++#endif ++ ++/********************************************************************* ++ * Internal Driver functions ++ * Should be called only from the lwpmudrv_DeviceControl routine ++ *********************************************************************/ ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static void lwpmudrv_Dump_Tracer(const char *) ++ * ++ * @param Name of the tracer ++ * ++ * @return void ++ * ++ * @brief Function that handles the generation of markers into the ftrace stream ++ * ++ * Special Notes ++ */ ++static void lwpmudrv_Dump_Tracer(const char *name, U64 tsc) ++{ ++ SEP_DRV_LOG_TRACE_IN(""); ++ if (tsc == 0) { ++ preempt_disable(); ++ UTILITY_Read_TSC(&tsc); ++ tsc -= TSC_SKEW(CONTROL_THIS_CPU()); ++ preempt_enable(); ++ } ++ SEP_DRV_LOG_TRACE_OUT("Success"); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Version(IOCTL_ARGS arg) ++ * ++ * @param arg - pointer to the IOCTL_ARGS structure ++ * ++ * @return OS_STATUS ++ * ++ * @brief Local function that handles the LWPMU_IOCTL_VERSION call. ++ * @brief Returns the version number of the kernel mode sampling. ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Version(IOCTL_ARGS arg) ++{ ++ OS_STATUS status; ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ // Check if enough space is provided for collecting the data ++ if ((arg->len_drv_to_usr != sizeof(U32)) || ++ (arg->buf_drv_to_usr == NULL)) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments."); ++ return OS_FAULT; ++ } ++ ++ status = put_user(SEP_VERSION_NODE_sep_version(&drv_version), ++ (U32 __user *)arg->buf_drv_to_usr); ++ ++ SEP_DRV_LOG_FLOW_OUT("Return value: %d", status); ++ return status; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Reserve(IOCTL_ARGS arg) ++ * ++ * @param arg - pointer to the IOCTL_ARGS structure ++ * ++ * @return OS_STATUS ++ * ++ * @brief ++ * @brief Local function that handles the LWPMU_IOCTL_RESERVE call. ++ * @brief Sets the state to RESERVED if possible. Returns BUSY if unable ++ * @brief to reserve the PMU. ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Reserve(IOCTL_ARGS arg) ++{ ++ OS_STATUS status = OS_SUCCESS; ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ // Check if enough space is provided for collecting the data ++ if ((arg->len_drv_to_usr != sizeof(S32)) || ++ (arg->buf_drv_to_usr == NULL)) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments."); ++ return OS_FAULT; ++ } ++ ++ status = put_user(!CHANGE_DRIVER_STATE(STATE_BIT_UNINITIALIZED, ++ DRV_STATE_RESERVED), ++ (int __user*)arg->buf_drv_to_usr); ++ ++ SEP_DRV_LOG_FLOW_OUT("Return value: %d", status); ++ return status; ++} ++ ++#if !defined(DRV_SEP_ACRN_ON) ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Finish_Op(void) ++ * ++ * @param - none ++ * ++ * @return OS_STATUS ++ * ++ * @brief Finalize PMU after collection ++ * ++ * Special Notes ++ */ ++static VOID lwpmudrv_Finish_Op(PVOID param) ++{ ++ U32 this_cpu = CONTROL_THIS_CPU(); ++ U32 dev_idx = core_to_dev_map[this_cpu]; ++ DISPATCH dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ if (dispatch != NULL && dispatch->fini != NULL) { ++ dispatch->fini(&dev_idx); ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++#endif ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static VOID lwpmudrv_Clean_Up(DRV_BOOL) ++ * ++ * @param DRV_BOOL finish - Flag to call finish ++ * ++ * @return VOID ++ * ++ * @brief Cleans up the memory allocation. ++ * ++ * Special Notes ++ */ ++static VOID lwpmudrv_Clean_Up(DRV_BOOL finish) ++{ ++ U32 i; ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (DRV_CONFIG_use_pcl(drv_cfg) == TRUE) { ++ drv_cfg = CONTROL_Free_Memory(drv_cfg); ++ goto signal_end; ++ } ++ ++ if (devices) { ++ U32 id; ++ U32 num_groups = 0; ++ EVENT_CONFIG ec; ++ DISPATCH dispatch_unc = NULL; ++ ++ for (id = 0; id < num_devices; id++) { ++ if (LWPMU_DEVICE_pcfg(&devices[id])) { ++ if (LWPMU_DEVICE_device_type(&devices[id]) == ++ DEVICE_INFO_UNCORE) { ++ dispatch_unc = LWPMU_DEVICE_dispatch( ++ &devices[id]); ++ if (dispatch_unc && ++ dispatch_unc->fini) { ++ SEP_DRV_LOG_TRACE( ++ "LWP: calling UNC Init."); ++ dispatch_unc->fini( ++ (PVOID *)&id); ++ } ++ lwpmudrv_Free_Uncore_Buffer(id); ++ } else if (finish) { ++#if !defined(DRV_SEP_ACRN_ON) ++ CONTROL_Invoke_Parallel( ++ lwpmudrv_Finish_Op, NULL); ++#endif ++ } ++ } ++ ++ if (LWPMU_DEVICE_PMU_register_data(&devices[id])) { ++ ec = LWPMU_DEVICE_ec(&devices[id]); ++ if (LWPMU_DEVICE_device_type(&devices[id]) == ++ DEVICE_INFO_CORE) { ++ num_groups = ++ EVENT_CONFIG_num_groups(ec); ++ } else { ++ num_groups = ++ EVENT_CONFIG_num_groups_unc(ec); ++ } ++ for (i = 0; i < num_groups; i++) { ++ LWPMU_DEVICE_PMU_register_data( ++ &devices[id])[i] = ++ CONTROL_Free_Memory( ++ LWPMU_DEVICE_PMU_register_data( ++ &devices[id])[i]); ++ } ++ LWPMU_DEVICE_PMU_register_data(&devices[id]) = ++ CONTROL_Free_Memory( ++ LWPMU_DEVICE_PMU_register_data( ++ &devices[id])); ++ } ++ LWPMU_DEVICE_pcfg(&devices[id]) = CONTROL_Free_Memory( ++ LWPMU_DEVICE_pcfg(&devices[id])); ++ LWPMU_DEVICE_ec(&devices[id]) = CONTROL_Free_Memory( ++ LWPMU_DEVICE_ec(&devices[id])); ++ if (LWPMU_DEVICE_lbr(&devices[id])) { ++ LWPMU_DEVICE_lbr(&devices[id]) = ++ CONTROL_Free_Memory( ++ LWPMU_DEVICE_lbr(&devices[id])); ++ } ++ if (LWPMU_DEVICE_pwr(&devices[id])) { ++ LWPMU_DEVICE_pwr(&devices[id]) = ++ CONTROL_Free_Memory( ++ LWPMU_DEVICE_pwr(&devices[id])); ++ } ++ if (LWPMU_DEVICE_cur_group(&devices[id])) { ++ LWPMU_DEVICE_cur_group(&devices[id]) = ++ CONTROL_Free_Memory( ++ LWPMU_DEVICE_cur_group( ++ &devices[id])); ++ } ++ } ++ devices = CONTROL_Free_Memory(devices); ++ } ++ ++ if (desc_data) { ++ for (i = 0; i < GLOBAL_STATE_num_descriptors(driver_state); ++ i++) { ++ desc_data[i] = CONTROL_Free_Memory(desc_data[i]); ++ } ++ desc_data = CONTROL_Free_Memory(desc_data); ++ } ++ ++ if (restore_bl_bypass) { ++ restore_bl_bypass = CONTROL_Free_Memory(restore_bl_bypass); ++ } ++ ++ if (restore_qpi_direct2core) { ++ for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { ++ restore_qpi_direct2core[i] = ++ CONTROL_Free_Memory(restore_qpi_direct2core[i]); ++ } ++ restore_qpi_direct2core = ++ CONTROL_Free_Memory(restore_qpi_direct2core); ++ } ++ ++ if (restore_ha_direct2core) { ++ for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { ++ restore_ha_direct2core[i] = ++ CONTROL_Free_Memory(restore_ha_direct2core[i]); ++ } ++ restore_ha_direct2core = ++ CONTROL_Free_Memory(restore_ha_direct2core); ++ } ++ ++ drv_cfg = CONTROL_Free_Memory(drv_cfg); ++ pmu_state = CONTROL_Free_Memory(pmu_state); ++ cpu_mask_bits = CONTROL_Free_Memory(cpu_mask_bits); ++ core_to_dev_map = CONTROL_Free_Memory(core_to_dev_map); ++#if defined(BUILD_CHIPSET) ++ pma = CONTROL_Free_Memory(pma); ++#endif ++ ++signal_end: ++ GLOBAL_STATE_num_em_groups(driver_state) = 0; ++ GLOBAL_STATE_num_descriptors(driver_state) = 0; ++ num_devices = 0; ++ num_core_devs = 0; ++ max_groups_unc = 0; ++ control_pid = 0; ++ unc_buf_init = FALSE; ++ ++ OUTPUT_Cleanup(); ++ memset(pcb, 0, pcb_size); ++ ++ SEP_DRV_LOG_FLOW_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static NTSTATUS lwpmudrv_Initialize_Driver (PVOID buf_usr_to_drv, size_t len_usr_to_drv) ++ * ++ * @param buf_usr_to_drv - pointer to the input buffer ++ * @param len_usr_to_drv - size of the input buffer ++ * ++ * @return NTSTATUS ++ * ++ * @brief Local function that handles the LWPMU_IOCTL_INIT_DRIVER call. ++ * @brief Sets up the interrupt handler. ++ * @brief Set up the output buffers/files needed to make the driver ++ * @brief operational. ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Initialize_Driver(PVOID buf_usr_to_drv, ++ size_t len_usr_to_drv) ++{ ++ S32 cpu_num; ++ int status = OS_SUCCESS; ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (buf_usr_to_drv == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments."); ++ return OS_FAULT; ++ } ++ ++ if (!CHANGE_DRIVER_STATE(STATE_BIT_RESERVED, DRV_STATE_IDLE)) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Unexpected driver state!"); ++ return OS_FAULT; ++ } ++ ++ interrupt_counts = NULL; ++ pmu_state = NULL; ++ ++ drv_cfg = CONTROL_Allocate_Memory(len_usr_to_drv); ++ if (!drv_cfg) { ++ status = OS_NO_MEM; ++ SEP_DRV_LOG_ERROR("Memory allocation failure for drv_cfg!"); ++ goto clean_return; ++ } ++ ++ if (copy_from_user(drv_cfg, (void __user *)buf_usr_to_drv, ++ len_usr_to_drv)) { ++ SEP_DRV_LOG_ERROR("Memory copy failure for drv_cfg!"); ++ status = OS_FAULT; ++ goto clean_return; ++ } ++ ++ if (DRV_CONFIG_enable_cp_mode(drv_cfg)) { ++#if (defined(DRV_EM64T)) ++ if (output_buffer_size == OUTPUT_LARGE_BUFFER) { ++ output_buffer_size = OUTPUT_CP_BUFFER; ++ } ++#endif ++ interrupt_counts = CONTROL_Allocate_Memory( ++ GLOBAL_STATE_num_cpus(driver_state) * ++ DRV_CONFIG_num_events(drv_cfg) * sizeof(U64)); ++ if (interrupt_counts == NULL) { ++ SEP_DRV_LOG_ERROR( ++ "Memory allocation failure for interrupt_counts!"); ++ status = OS_NO_MEM; ++ goto clean_return; ++ } ++ } else if (output_buffer_size == OUTPUT_CP_BUFFER) { ++ output_buffer_size = OUTPUT_LARGE_BUFFER; ++ } ++ ++ if (DRV_CONFIG_use_pcl(drv_cfg) == TRUE) { ++ SEP_DRV_LOG_FLOW_OUT("Success, using PCL."); ++ return OS_SUCCESS; ++ } ++ ++ pmu_state = CONTROL_Allocate_KMemory( ++ GLOBAL_STATE_num_cpus(driver_state) * sizeof(U64) * 3); ++ if (!pmu_state) { ++ SEP_DRV_LOG_ERROR("Memory allocation failure for pmu_state!"); ++ status = OS_NO_MEM; ++ goto clean_return; ++ } ++ uncore_em_factor = 0; ++ for (cpu_num = 0; cpu_num < GLOBAL_STATE_num_cpus(driver_state); ++ cpu_num++) { ++ CPU_STATE_accept_interrupt(&pcb[cpu_num]) = 1; ++ CPU_STATE_initial_mask(&pcb[cpu_num]) = 1; ++ CPU_STATE_group_swap(&pcb[cpu_num]) = 1; ++ CPU_STATE_reset_mask(&pcb[cpu_num]) = 0; ++ CPU_STATE_num_samples(&pcb[cpu_num]) = 0; ++ CPU_STATE_last_p_state_valid(&pcb[cpu_num]) = FALSE; ++#if defined(DRV_CPU_HOTPLUG) ++ CPU_STATE_offlined(&pcb[cpu_num]) = TRUE; ++#else ++ CPU_STATE_offlined(&pcb[cpu_num]) = FALSE; ++#endif ++ CPU_STATE_nmi_handled(&pcb[cpu_num]) = 0; ++ } ++ ++ DRV_CONFIG_seed_name(drv_cfg) = NULL; ++ DRV_CONFIG_seed_name_len(drv_cfg) = 0; ++ ++ SEP_DRV_LOG_TRACE("Config : size = %d.", DRV_CONFIG_size(drv_cfg)); ++ SEP_DRV_LOG_TRACE("Config : counting_mode = %d.", ++ DRV_CONFIG_counting_mode(drv_cfg)); ++ ++ control_pid = current->pid; ++ SEP_DRV_LOG_TRACE("Control PID = %d.", control_pid); ++ ++ if (core_to_dev_map == NULL) { ++ core_to_dev_map = CONTROL_Allocate_Memory( ++ GLOBAL_STATE_num_cpus(driver_state) * sizeof(U32)); ++ } ++ ++ if (DRV_CONFIG_counting_mode(drv_cfg) == FALSE) { ++ if (cpu_buf == NULL) { ++ cpu_buf = CONTROL_Allocate_Memory( ++ GLOBAL_STATE_num_cpus(driver_state) * ++ sizeof(BUFFER_DESC_NODE)); ++ if (!cpu_buf) { ++ SEP_DRV_LOG_ERROR( ++ "Memory allocation failure for cpu_buf!"); ++ status = OS_NO_MEM; ++ goto clean_return; ++ } ++ } ++ ++ if (module_buf == NULL) { ++ module_buf = CONTROL_Allocate_Memory( ++ sizeof(BUFFER_DESC_NODE)); ++ if (!module_buf) { ++ status = OS_NO_MEM; ++ goto clean_return; ++ } ++ } ++ ++#if defined(CONFIG_TRACEPOINTS) ++ multi_pebs_enabled = (DRV_CONFIG_multi_pebs_enabled(drv_cfg) && ++ (DRV_SETUP_INFO_page_table_isolation( ++ &req_drv_setup_info) == ++ DRV_SETUP_INFO_PTI_DISABLED)); ++#endif ++ if (multi_pebs_enabled || sched_switch_enabled) { ++ if (cpu_sideband_buf == NULL) { ++ cpu_sideband_buf = CONTROL_Allocate_Memory( ++ GLOBAL_STATE_num_cpus(driver_state) * ++ sizeof(BUFFER_DESC_NODE)); ++ if (!cpu_sideband_buf) { ++ SEP_DRV_LOG_ERROR( ++ "Memory allocation failure for cpu_sideband_buf!"); ++ status = OS_NO_MEM; ++ goto clean_return; ++ } ++ } ++ } ++ ++#if defined(DRV_SEP_ACRN_ON) ++ if (samp_buf_per_cpu == NULL) { ++ samp_buf_per_cpu = ++ (shared_buf_t **)CONTROL_Allocate_Memory( ++ GLOBAL_STATE_num_cpus(driver_state) * ++ sizeof(shared_buf_t *)); ++ if (!samp_buf_per_cpu) { ++ SEP_PRINT_ERROR( ++ "lwpmudrv_Initialize: unable to allocate memory for samp_buf_per_cpu\n"); ++ goto clean_return; ++ } ++ } ++ ++ for (cpu_num = 0; cpu_num < GLOBAL_STATE_num_cpus(driver_state); ++ cpu_num++) { ++ samp_buf_per_cpu[cpu_num] = sbuf_allocate( ++ TRACE_ELEMENT_NUM, TRACE_ELEMENT_SIZE); ++ if (!samp_buf_per_cpu[cpu_num]) { ++ pr_err("Failed to allocate sampbuf on cpu%d\n", ++ cpu_num); ++ goto clean_return; ++ } ++ ++ status = sbuf_share_setup(cpu_num, ACRN_SEP, ++ samp_buf_per_cpu[cpu_num]); ++ if (status < 0) { ++ status = OS_FAULT; ++ pr_err("Failed to set up sampbuf on cpu%d\n", ++ cpu_num); ++ goto clean_return; ++ } ++ } ++#endif ++ ++ /* ++ * Allocate the output and control buffers for each CPU in the system ++ * Allocate and set up the temp output files for each CPU in the system ++ * Allocate and set up the temp outout file for detailing the Modules in the system ++ */ ++ status = OUTPUT_Initialize(); ++ if (status != OS_SUCCESS) { ++ SEP_DRV_LOG_ERROR("OUTPUT_Initialize failed!"); ++ goto clean_return; ++ } ++ ++ /* ++ * Program the APIC and set up the interrupt handler ++ */ ++#if !defined(DRV_SEP_ACRN_ON) ++ CPUMON_Install_Cpuhooks(); ++#endif ++ SEP_DRV_LOG_TRACE("Finished Installing cpu hooks."); ++#if defined(DRV_CPU_HOTPLUG) ++ for (cpu_num = 0; cpu_num < GLOBAL_STATE_num_cpus(driver_state); ++ cpu_num++) { ++ if (cpu_built_sysinfo && ++ cpu_built_sysinfo[cpu_num] == 0) { ++ cpu_tsc[cpu_num] = cpu_tsc[0]; ++ CONTROL_Invoke_Cpu(cpu_num, SYS_INFO_Build_Cpu, ++ NULL); ++ } ++ } ++#endif ++ ++#if defined(DRV_EM64T) ++ SYS_Get_GDT_Base((PVOID *)&gdt_desc); ++#endif ++ SEP_DRV_LOG_TRACE("About to install module notification."); ++ LINUXOS_Install_Hooks(); ++ } ++ ++clean_return: ++ if (status != OS_SUCCESS) { ++ drv_cfg = CONTROL_Free_Memory(drv_cfg); ++ interrupt_counts = CONTROL_Free_Memory(interrupt_counts); ++ pmu_state = CONTROL_Free_Memory(pmu_state); ++ CHANGE_DRIVER_STATE(STATE_BIT_ANY, DRV_STATE_TERMINATING); ++ } ++ ++ SEP_DRV_LOG_FLOW_OUT("Return value: %d.", status); ++ return status; ++} ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Initialize (PVOID buf_usr_to_drv, size_t len_usr_to_drv) ++ * ++ * @param buf_usr_to_drv - pointer to the input buffer ++ * @param len_usr_to_drv - size of the input buffer ++ * ++ * @return OS_STATUS ++ * ++ * @brief Local function that handles the LWPMU_IOCTL_INIT call. ++ * @brief Sets up the interrupt handler. ++ * @brief Set up the output buffers/files needed to make the driver ++ * @brief operational. ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Initialize(PVOID buf_usr_to_drv, ++ size_t len_usr_to_drv) ++{ ++ int status = OS_SUCCESS; ++ S32 cpu_num; ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (buf_usr_to_drv == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments."); ++ return OS_FAULT; ++ } ++ ++ if (cur_device >= num_devices) { ++ CHANGE_DRIVER_STATE(STATE_BIT_ANY, DRV_STATE_TERMINATING); ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "No more devices to allocate! Wrong lwpmudrv_Init_Num_Devices."); ++ return OS_FAULT; ++ } ++ ++ /* ++ * Program State Initializations ++ */ ++ LWPMU_DEVICE_pcfg(&devices[cur_device]) = ++ CONTROL_Allocate_Memory(len_usr_to_drv); ++ if (!LWPMU_DEVICE_pcfg(&devices[cur_device])) { ++ status = OS_NO_MEM; ++ SEP_DRV_LOG_ERROR("Memory allocation failure for pcfg!"); ++ goto clean_return; ++ } ++ ++ if (copy_from_user(LWPMU_DEVICE_pcfg(&devices[cur_device]), ++ (void __user *)buf_usr_to_drv, len_usr_to_drv)) { ++ SEP_DRV_LOG_ERROR("Memory copy failure for pcfg!"); ++ status = OS_FAULT; ++ goto clean_return; ++ } ++ cur_pcfg = (DEV_CONFIG)LWPMU_DEVICE_pcfg(&devices[cur_device]); ++ ++ if (DRV_CONFIG_use_pcl(drv_cfg) == TRUE) { ++ SEP_DRV_LOG_FLOW_OUT("Success, using PCL."); ++ return OS_SUCCESS; ++ } ++ ++ LWPMU_DEVICE_dispatch(&devices[cur_device]) = ++ UTILITY_Configure_CPU(DEV_CONFIG_dispatch_id(cur_pcfg)); ++ if (LWPMU_DEVICE_dispatch(&devices[cur_device]) == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Dispatch pointer is NULL!"); ++ status = OS_INVALID; ++ goto clean_return; ++ } ++ ++#if !defined(DRV_SEP_ACRN_ON) ++ if (DRV_CONFIG_counting_mode(drv_cfg) == FALSE) { ++ status = PEBS_Initialize(cur_device); ++ if (status != OS_SUCCESS) { ++ SEP_DRV_LOG_ERROR("PEBS_Initialize failed!"); ++ goto clean_return; ++ } ++ } ++#endif ++ ++ /* Create core to device ID map */ ++ for (cpu_num = 0; cpu_num < GLOBAL_STATE_num_cpus(driver_state); ++ cpu_num++) { ++ if (CPU_STATE_core_type(&pcb[cpu_num]) == ++ DEV_CONFIG_core_type(cur_pcfg)) { ++ core_to_dev_map[cpu_num] = cur_device; ++ } ++ } ++ num_core_devs++; //New core device ++ LWPMU_DEVICE_device_type(&devices[cur_device]) = DEVICE_INFO_CORE; ++ ++clean_return: ++ if (status != OS_SUCCESS) { ++ // release all memory allocated in this function: ++ lwpmudrv_Clean_Up(FALSE); ++#if !defined(DRV_SEP_ACRN_ON) ++ PEBS_Destroy(); ++#endif ++ CHANGE_DRIVER_STATE(STATE_BIT_ANY, DRV_STATE_TERMINATING); ++ } ++ ++ SEP_DRV_LOG_FLOW_OUT("Return value: %d.", status); ++ return status; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Initialize_Num_Devices(IOCTL_ARGS arg) ++ * ++ * @param arg - pointer to the IOCTL_ARGS structure ++ * ++ * @return OS_STATUS ++ * ++ * @brief ++ * @brief Local function that handles the LWPMU_IOCTL_INIT_NUM_DEV call. ++ * @brief Init # uncore devices. ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Initialize_Num_Devices(IOCTL_ARGS arg) ++{ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ // Check if enough space is provided for collecting the data ++ if ((arg->len_usr_to_drv != sizeof(U32)) || ++ (arg->buf_usr_to_drv == NULL)) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments."); ++ return OS_FAULT; ++ } ++ ++ if (copy_from_user(&num_devices, (void __user *)arg->buf_usr_to_drv, ++ arg->len_usr_to_drv)) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure"); ++ return OS_FAULT; ++ } ++ /* ++ * Allocate memory for number of devices ++ */ ++ if (num_devices != 0) { ++ devices = CONTROL_Allocate_Memory(num_devices * ++ sizeof(LWPMU_DEVICE_NODE)); ++ if (!devices) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Unable to allocate memory for devices!"); ++ return OS_NO_MEM; ++ } ++ } ++ cur_device = 0; ++ ++ SEP_DRV_LOG_FLOW_OUT("Success: num_devices=%d, devices=0x%p.", ++ num_devices, devices); ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Initialize_UNC(PVOID buf_usr_to_drv, U32 len_usr_to_drv) ++ * ++ * @param buf_usr_to_drv - pointer to the input buffer ++ * @param len_usr_to_drv - size of the input buffer ++ * ++ * @return OS_STATUS ++ * ++ * @brief Local function that handles the LWPMU_IOCTL_INIT call. ++ * @brief Sets up the interrupt handler. ++ * @brief Set up the output buffers/files needed to make the driver ++ * @brief operational. ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Initialize_UNC(PVOID buf_usr_to_drv, ++ U32 len_usr_to_drv) ++{ ++ DEV_UNC_CONFIG pcfg_unc; ++ U32 i; ++ int status = OS_SUCCESS; ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Skipped: current state is not IDLE."); ++ return OS_IN_PROGRESS; ++ } ++ ++ if (!devices) { ++ CHANGE_DRIVER_STATE(STATE_BIT_ANY, DRV_STATE_TERMINATING); ++ SEP_DRV_LOG_ERROR_FLOW_OUT("No devices allocated!"); ++ return OS_INVALID; ++ } ++ ++ /* ++ * Program State Initializations: ++ * Foreach device, copy over pcfg and configure dispatch table ++ */ ++ if (cur_device >= num_devices) { ++ CHANGE_DRIVER_STATE(STATE_BIT_ANY, DRV_STATE_TERMINATING); ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "No more devices to allocate! Wrong lwpmudrv_Init_Num_Devices."); ++ return OS_FAULT; ++ } ++ if (buf_usr_to_drv == NULL) { ++ CHANGE_DRIVER_STATE(STATE_BIT_ANY, DRV_STATE_TERMINATING); ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments."); ++ return OS_FAULT; ++ } ++ if (len_usr_to_drv != sizeof(DEV_UNC_CONFIG_NODE)) { ++ CHANGE_DRIVER_STATE(STATE_BIT_ANY, DRV_STATE_TERMINATING); ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Got len_usr_to_drv=%d, expecting size=%d", ++ len_usr_to_drv, (int)sizeof(DEV_UNC_CONFIG_NODE)); ++ return OS_FAULT; ++ } ++ // allocate memory ++ LWPMU_DEVICE_pcfg(&devices[cur_device]) = ++ CONTROL_Allocate_Memory(sizeof(DEV_UNC_CONFIG_NODE)); ++ // copy over pcfg ++ if (copy_from_user(LWPMU_DEVICE_pcfg(&devices[cur_device]), ++ (void __user *)buf_usr_to_drv, len_usr_to_drv)) { ++ CHANGE_DRIVER_STATE(STATE_BIT_ANY, DRV_STATE_TERMINATING); ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Failed to copy from user!"); ++ return OS_FAULT; ++ } ++ // configure dispatch from dispatch_id ++ pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[cur_device]); ++ if (!pcfg_unc) { ++ CHANGE_DRIVER_STATE(STATE_BIT_ANY, DRV_STATE_TERMINATING); ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid pcfg_unc."); ++ return OS_INVALID; ++ } ++ ++ LWPMU_DEVICE_dispatch(&devices[cur_device]) = ++ UTILITY_Configure_CPU(DEV_UNC_CONFIG_dispatch_id(pcfg_unc)); ++ if (LWPMU_DEVICE_dispatch(&devices[cur_device]) == NULL) { ++ CHANGE_DRIVER_STATE(STATE_BIT_ANY, DRV_STATE_TERMINATING); ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Unable to configure CPU!"); ++ return OS_FAULT; ++ } ++ ++ LWPMU_DEVICE_cur_group(&devices[cur_device]) = ++ CONTROL_Allocate_Memory(num_packages * sizeof(S32)); ++ if (LWPMU_DEVICE_cur_group(&devices[cur_device]) == NULL) { ++ CHANGE_DRIVER_STATE(STATE_BIT_ANY, DRV_STATE_TERMINATING); ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Cur_grp allocation failed for device %u!", cur_device); ++ return OS_NO_MEM; ++ } ++ for (i = 0; i < num_packages; i++) { ++ LWPMU_DEVICE_cur_group(&devices[cur_device])[i] = 0; ++ } ++ ++ LWPMU_DEVICE_em_groups_count(&devices[cur_device]) = 0; ++ LWPMU_DEVICE_num_units(&devices[cur_device]) = 0; ++ LWPMU_DEVICE_device_type(&devices[cur_device]) = DEVICE_INFO_UNCORE; ++ ++ if (DRV_CONFIG_counting_mode(drv_cfg) == FALSE) { ++ if (unc_buf == NULL) { ++ unc_buf = CONTROL_Allocate_Memory( ++ num_packages * sizeof(BUFFER_DESC_NODE)); ++ if (!unc_buf) { ++ CHANGE_DRIVER_STATE(STATE_BIT_ANY, ++ DRV_STATE_TERMINATING); ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Memory allocation failure."); ++ return OS_NO_MEM; ++ } ++ } ++ ++ if (!unc_buf_init) { ++ status = OUTPUT_Initialize_UNC(); ++ if (status != OS_SUCCESS) { ++ CHANGE_DRIVER_STATE(STATE_BIT_ANY, ++ DRV_STATE_TERMINATING); ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "OUTPUT_Initialize failed!"); ++ return status; ++ } ++ unc_buf_init = TRUE; ++ } ++ } ++ ++ SEP_DRV_LOG_FLOW_OUT("unc dispatch id = %d.", ++ DEV_UNC_CONFIG_dispatch_id(pcfg_unc)); ++ ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Terminate(void) ++ * ++ * @param none ++ * ++ * @return OS_STATUS ++ * ++ * @brief Local function that handles the DRV_OPERATION_TERMINATE call. ++ * @brief Cleans up the interrupt handler and resets the PMU state. ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Terminate(void) ++{ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (GET_DRIVER_STATE() == DRV_STATE_UNINITIALIZED) { ++ SEP_DRV_LOG_FLOW_OUT("Success (already uninitialized)."); ++ return OS_SUCCESS; ++ } ++ ++ if (!CHANGE_DRIVER_STATE(STATE_BIT_STOPPED | STATE_BIT_TERMINATING, ++ DRV_STATE_UNINITIALIZED)) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Unexpected state!"); ++ return OS_FAULT; ++ } ++ ++ if (drv_cfg && DRV_CONFIG_counting_mode(drv_cfg) == FALSE) { ++ LINUXOS_Uninstall_Hooks(); ++ } ++ ++ lwpmudrv_Clean_Up(TRUE); ++ ++ SEP_DRV_LOG_FLOW_OUT("Success"); ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static void lwpmudrv_Switch_To_Next_Group(param) ++ * ++ * @param none ++ * ++ * @return none ++ * ++ * @brief Switch to the next event group for both core and uncore. ++ * @brief This function assumes an active collection is frozen ++ * @brief or no collection is active. ++ * ++ * Special Notes ++ */ ++static VOID lwpmudrv_Switch_To_Next_Group(void) ++{ ++ S32 cpuid; ++ U32 i, j; ++ CPU_STATE pcpu; ++ EVENT_CONFIG ec; ++ DEV_UNC_CONFIG pcfg_unc; ++ DISPATCH dispatch_unc; ++ ECB pecb_unc = NULL; ++ U32 cur_grp = 0; ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ for (cpuid = 0; cpuid < GLOBAL_STATE_num_cpus(driver_state); cpuid++) { ++ pcpu = &pcb[cpuid]; ++ ec = (EVENT_CONFIG)LWPMU_DEVICE_ec( ++ &devices[core_to_dev_map[cpuid]]); ++ CPU_STATE_current_group(pcpu)++; ++ // make the event group list circular ++ CPU_STATE_current_group(pcpu) %= EVENT_CONFIG_num_groups(ec); ++ } ++ ++ if (num_devices) { ++ for (i = num_core_devs; i < num_devices; i++) { ++ pcfg_unc = LWPMU_DEVICE_pcfg(&devices[i]); ++ dispatch_unc = LWPMU_DEVICE_dispatch(&devices[i]); ++ if (LWPMU_DEVICE_em_groups_count(&devices[i]) > 1) { ++ if (pcb && pcfg_unc && dispatch_unc && ++ DRV_CONFIG_emon_mode(drv_cfg)) { ++ for (j = 0; j < num_packages; j++) { ++ cur_grp = ++ LWPMU_DEVICE_cur_group( ++ &devices[i])[j]; ++ pecb_unc = ++ LWPMU_DEVICE_PMU_register_data( ++ &devices[i]) ++ [cur_grp]; ++ LWPMU_DEVICE_cur_group( ++ &devices[i])[j]++; ++ if (CPU_STATE_current_group( ++ &pcb[0]) == 0) { ++ LWPMU_DEVICE_cur_group( ++ &devices[i])[j] = ++ 0; ++ } ++ LWPMU_DEVICE_cur_group( ++ &devices[i])[j] %= ++ LWPMU_DEVICE_em_groups_count( ++ &devices[i]); ++ } ++ SEP_DRV_LOG_TRACE( ++ "Swap Group to %d for device %d.", ++ cur_grp, i); ++ if (pecb_unc && ++ ECB_device_type(pecb_unc) == ++ DEVICE_UNC_SOCPERF) { ++ // SOCPERF_Switch_Group3(); ++ } ++ } ++ } ++ } ++ } ++ ++ SEP_DRV_LOG_FLOW_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwmpudrv_Get_Driver_State(IOCTL_ARGS arg) ++ * ++ * @param arg - pointer to the IOCTL_ARGS structure ++ * ++ * @return OS_STATUS ++ * ++ * @brief Local function that handles the LWPMU_IOCTL_GET_Driver_State call. ++ * @brief Returns the current driver state. ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Get_Driver_State(IOCTL_ARGS arg) ++{ ++ OS_STATUS status = OS_SUCCESS; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ // Check if enough space is provided for collecting the data ++ if ((arg->len_drv_to_usr != sizeof(U32)) || ++ (arg->buf_drv_to_usr == NULL)) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("Invalid arguments!"); ++ return OS_FAULT; ++ } ++ ++ status = put_user(GET_DRIVER_STATE(), (U32 __user*)arg->buf_drv_to_usr); ++ ++ SEP_DRV_LOG_TRACE_OUT("Return value: %d.", status); ++ return status; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Pause_Uncore(void) ++ * ++ * @param - 1 if switching group, 0 otherwise ++ * ++ * @return OS_STATUS ++ * ++ * @brief Pause the uncore collection ++ * ++ * Special Notes ++ */ ++static VOID lwpmudrv_Pause_Uncore(PVOID param) ++{ ++ U32 i; ++ U32 switch_grp; ++ DEV_UNC_CONFIG pcfg_unc = NULL; ++ DISPATCH dispatch_unc = NULL; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ switch_grp = *((U32 *)param); ++ ++ for (i = num_core_devs; i < num_devices; i++) { ++ pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[i]); ++ dispatch_unc = LWPMU_DEVICE_dispatch(&devices[i]); ++ ++ if (pcfg_unc && dispatch_unc && dispatch_unc->freeze) { ++ SEP_DRV_LOG_TRACE("LWP: calling UNC Pause."); ++ if (switch_grp) { ++ if (LWPMU_DEVICE_em_groups_count(&devices[i]) > ++ 1) { ++ dispatch_unc->freeze(&i); ++ } ++ } else { ++ dispatch_unc->freeze(&i); ++ } ++ } ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++#if !defined(DRV_SEP_ACRN_ON) ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Pause_Op(void) ++ * ++ * @param - none ++ * ++ * @return OS_STATUS ++ * ++ * @brief Pause the core/uncore collection ++ * ++ * Special Notes ++ */ ++static VOID lwpmudrv_Pause_Op(PVOID param) ++{ ++ U32 dev_idx; ++ DISPATCH dispatch; ++ U32 switch_grp = 0; ++ U32 this_cpu = CONTROL_THIS_CPU(); ++ ++ dev_idx = core_to_dev_map[this_cpu]; ++ dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ if (dispatch != NULL && dispatch->freeze != NULL && ++ DRV_CONFIG_use_pcl(drv_cfg) == FALSE) { ++ dispatch->freeze(param); ++ } ++ ++ lwpmudrv_Pause_Uncore((PVOID)&switch_grp); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++#endif ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Pause(void) ++ * ++ * @param - none ++ * ++ * @return OS_STATUS ++ * ++ * @brief Pause the collection ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Pause(void) ++{ ++ int i; ++ int done = FALSE; ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (!pcb || !drv_cfg) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Pcb or drv_cfg pointer is NULL!"); ++ return OS_INVALID; ++ } ++ ++ if (CHANGE_DRIVER_STATE(STATE_BIT_RUNNING, DRV_STATE_PAUSING)) { ++ if (DRV_CONFIG_use_pcl(drv_cfg) == FALSE) { ++ for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); ++ i++) { ++ CPU_STATE_accept_interrupt(&pcb[i]) = 0; ++ } ++ while (!done) { ++ done = TRUE; ++ for (i = 0; ++ i < GLOBAL_STATE_num_cpus(driver_state); ++ i++) { ++ if (atomic_read(&CPU_STATE_in_interrupt( ++ &pcb[i]))) { ++ done = FALSE; ++ } ++ } ++ } ++ } ++#if !defined(DRV_SEP_ACRN_ON) ++ CONTROL_Invoke_Parallel(lwpmudrv_Pause_Op, NULL); ++#endif ++ /* ++ * This means that the PAUSE state has been reached. ++ */ ++ CHANGE_DRIVER_STATE(STATE_BIT_PAUSING, DRV_STATE_PAUSED); ++ } ++ ++ SEP_DRV_LOG_FLOW_OUT("Success"); ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Resume_Uncore(void) ++ * ++ * @param - 1 if switching group, 0 otherwise ++ * ++ * @return OS_STATUS ++ * ++ * @brief Resume the uncore collection ++ * ++ * Special Notes ++ */ ++static VOID lwpmudrv_Resume_Uncore(PVOID param) ++{ ++ U32 i; ++ U32 switch_grp; ++ DEV_UNC_CONFIG pcfg_unc = NULL; ++ DISPATCH dispatch_unc = NULL; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ switch_grp = *((U32 *)param); ++ ++ for (i = num_core_devs; i < num_devices; i++) { ++ pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[i]); ++ dispatch_unc = LWPMU_DEVICE_dispatch(&devices[i]); ++ ++ if (pcfg_unc && dispatch_unc && dispatch_unc->restart) { ++ SEP_DRV_LOG_TRACE("LWP: calling UNC Resume."); ++ if (switch_grp) { ++ if (LWPMU_DEVICE_em_groups_count(&devices[i]) > ++ 1) { ++ dispatch_unc->restart(&i); ++ } ++ } else { ++ dispatch_unc->restart(&i); ++ } ++ } ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++#if !defined(DRV_SEP_ACRN_ON) ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Resume_Op(void) ++ * ++ * @param - none ++ * ++ * @return OS_STATUS ++ * ++ * @brief Resume the core/uncore collection ++ * ++ * Special Notes ++ */ ++static VOID lwpmudrv_Resume_Op(PVOID param) ++{ ++ U32 this_cpu = CONTROL_THIS_CPU(); ++ U32 dev_idx = core_to_dev_map[this_cpu]; ++ DISPATCH dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); ++ U32 switch_grp = 0; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ if (dispatch != NULL && dispatch->restart != NULL && ++ DRV_CONFIG_use_pcl(drv_cfg) == FALSE) { ++ dispatch->restart((VOID *)(size_t)0); ++ } ++ ++ lwpmudrv_Resume_Uncore((PVOID)&switch_grp); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++#endif ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Resume(void) ++ * ++ * @param - none ++ * ++ * @return OS_STATUS ++ * ++ * @brief Resume the collection ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Resume(void) ++{ ++ int i; ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (!pcb || !drv_cfg) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Pcb or drv_cfg pointer is NULL!"); ++ return OS_INVALID; ++ } ++ ++ /* ++ * If we are in the process of pausing sampling, wait until the pause has been ++ * completed. Then start the Resume process. ++ */ ++ while (GET_DRIVER_STATE() == DRV_STATE_PAUSING) { ++ /* ++ * This delay probably needs to be expanded a little bit more for large systems. ++ * For now, it is probably sufficient. ++ */ ++ SYS_IO_Delay(); ++ SYS_IO_Delay(); ++ } ++ ++ if (CHANGE_DRIVER_STATE(STATE_BIT_PAUSED, DRV_STATE_RUNNING)) { ++ for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { ++ if (cpu_mask_bits) { ++ CPU_STATE_accept_interrupt(&pcb[i]) = ++ cpu_mask_bits[i] ? 1 : 0; ++ CPU_STATE_group_swap(&pcb[i]) = 1; ++ } else { ++ CPU_STATE_accept_interrupt(&pcb[i]) = 1; ++ CPU_STATE_group_swap(&pcb[i]) = 1; ++ } ++ } ++#if !defined(DRV_SEP_ACRN_ON) ++ CONTROL_Invoke_Parallel(lwpmudrv_Resume_Op, NULL); ++#endif ++ } ++ ++ SEP_DRV_LOG_FLOW_OUT("Success"); ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Write_Uncore(void) ++ * ++ * @param - 1 if switching group, 0 otherwise ++ * ++ * @return OS_STATUS ++ * ++ * @brief Program the uncore collection ++ * ++ * Special Notes ++ */ ++static VOID lwpmudrv_Write_Uncore(PVOID param) ++{ ++ U32 i; ++ U32 switch_grp; ++ DEV_UNC_CONFIG pcfg_unc = NULL; ++ DISPATCH dispatch_unc = NULL; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ switch_grp = *((U32 *)param); ++ ++ for (i = num_core_devs; i < num_devices; i++) { ++ pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[i]); ++ dispatch_unc = LWPMU_DEVICE_dispatch(&devices[i]); ++ ++ if (pcfg_unc && dispatch_unc && dispatch_unc->write) { ++ SEP_DRV_LOG_TRACE("LWP: calling UNC Write."); ++ if (switch_grp) { ++ if (LWPMU_DEVICE_em_groups_count(&devices[i]) > ++ 1) { ++ dispatch_unc->write(&i); ++ } ++ } else { ++ dispatch_unc->write(&i); ++ } ++ } ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Write_Op(void) ++ * ++ * @param - Do operation for Core only ++ * ++ * @return OS_STATUS ++ * ++ * @brief Program the core/uncore collection ++ * ++ * Special Notes ++ */ ++static VOID lwpmudrv_Write_Op(PVOID param) ++{ ++ U32 this_cpu = CONTROL_THIS_CPU(); ++ ++ U32 dev_idx = core_to_dev_map[this_cpu]; ++ DISPATCH dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); ++ U32 switch_grp = 0; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ if (dispatch != NULL && dispatch->write != NULL) { ++ dispatch->write((VOID *)(size_t)0); ++ } ++ ++ if (param == NULL) { ++ lwpmudrv_Write_Uncore((PVOID)&switch_grp); ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Switch_Group(void) ++ * ++ * @param none ++ * ++ * @return OS_STATUS ++ * ++ * @brief Switch the current group that is being collected. ++ * ++ * Special Notes ++ * This routine is called from the user mode code to handle the multiple group ++ * situation. 4 distinct steps are taken: ++ * Step 1: Pause the sampling ++ * Step 2: Increment the current group count ++ * Step 3: Write the new group to the PMU ++ * Step 4: Resume sampling ++ */ ++static OS_STATUS lwpmudrv_Switch_Group(void) ++{ ++ S32 idx; ++ CPU_STATE pcpu; ++ EVENT_CONFIG ec; ++ OS_STATUS status = OS_SUCCESS; ++ U32 current_state = GET_DRIVER_STATE(); ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (!pcb || !drv_cfg) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Pcb or drv_cfg pointer is NULL!"); ++ return OS_INVALID; ++ } ++ ++ if (current_state != DRV_STATE_RUNNING && ++ current_state != DRV_STATE_PAUSED) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Return value: %d (invalid driver state!).", status); ++ return status; ++ } ++ ++ status = lwpmudrv_Pause(); ++ ++ for (idx = 0; idx < GLOBAL_STATE_num_cpus(driver_state); idx++) { ++ pcpu = &pcb[idx]; ++ ec = (EVENT_CONFIG)LWPMU_DEVICE_ec( ++ &devices[core_to_dev_map[idx]]); ++ CPU_STATE_current_group(pcpu)++; ++ // make the event group list circular ++ CPU_STATE_current_group(pcpu) %= EVENT_CONFIG_num_groups(ec); ++ } ++#if !defined(DRV_SEP_ACRN_ON) ++ CONTROL_Invoke_Parallel(lwpmudrv_Write_Op, ++ (VOID *)(size_t)CONTROL_THIS_CPU()); ++#else ++ lwpmudrv_Write_Op((VOID *)(size_t)CONTROL_THIS_CPU()); ++#endif ++ if (drv_cfg && DRV_CONFIG_start_paused(drv_cfg) == FALSE) { ++ lwpmudrv_Resume(); ++ } ++ ++ SEP_DRV_LOG_FLOW_OUT("Return value: %d", status); ++ return status; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Trigger_Read_Op(void) ++ * ++ * @param - none ++ * ++ * @return OS_STATUS ++ * ++ * @brief Read uncore data ++ * ++ * Special Notes ++ */ ++static VOID lwpmudrv_Trigger_Read_Op(PVOID param) ++{ ++ DEV_UNC_CONFIG pcfg_unc = NULL; ++ DISPATCH dispatch_unc = NULL; ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ U32 package_num; ++ U64 tsc; ++ BUFFER_DESC bd; ++ EVENT_DESC evt_desc; ++ U32 cur_grp; ++ ECB pecb; ++ U32 sample_size = 0; ++ U32 offset = 0; ++ PVOID buf; ++ UncoreSampleRecordPC *psamp; ++ U32 i; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ package_num = core_to_package_map[this_cpu]; ++ ++ if (!DRIVER_STATE_IN(GET_DRIVER_STATE(), ++ STATE_BIT_RUNNING | STATE_BIT_PAUSED)) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("State is not RUNNING or PAUSED!"); ++ return; ++ } ++ ++ if (!CPU_STATE_socket_master(pcpu)) { ++ SEP_DRV_LOG_TRACE_OUT("Not socket master."); ++ return; ++ } ++ ++ UTILITY_Read_TSC(&tsc); ++ bd = &unc_buf[package_num]; ++ ++ for (i = num_core_devs; i < num_devices; i++) { ++ pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[i]); ++ if (pcfg_unc) { ++ cur_grp = LWPMU_DEVICE_cur_group( ++ &devices[i])[package_num]; ++ pecb = LWPMU_DEVICE_PMU_register_data( ++ &devices[i])[cur_grp]; ++ evt_desc = desc_data[ECB_descriptor_id(pecb)]; ++ sample_size += EVENT_DESC_sample_size(evt_desc); ++ } ++ } ++ ++ buf = OUTPUT_Reserve_Buffer_Space(bd, sample_size, FALSE, ++ !SEP_IN_NOTIFICATION, -1); ++ ++ if (buf) { ++ for (i = num_core_devs; i < num_devices; i++) { ++ pcfg_unc = ++ (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[i]); ++ dispatch_unc = LWPMU_DEVICE_dispatch(&devices[i]); ++ if (pcfg_unc && dispatch_unc && ++ dispatch_unc->trigger_read) { ++ cur_grp = LWPMU_DEVICE_cur_group( ++ &devices[i])[package_num]; ++ pecb = LWPMU_DEVICE_PMU_register_data( ++ &devices[i])[cur_grp]; ++ evt_desc = desc_data[ECB_descriptor_id(pecb)]; ++ ++ psamp = (UncoreSampleRecordPC *)(((S8 *)buf) + ++ offset); ++ UNCORE_SAMPLE_RECORD_descriptor_id(psamp) = ++ ECB_descriptor_id(pecb); ++ UNCORE_SAMPLE_RECORD_tsc(psamp) = tsc; ++ UNCORE_SAMPLE_RECORD_uncore_valid(psamp) = 1; ++ UNCORE_SAMPLE_RECORD_cpu_num(psamp) = ++ (U16)this_cpu; ++ UNCORE_SAMPLE_RECORD_pkg_num(psamp) = ++ (U16)package_num; ++ ++ dispatch_unc->trigger_read(psamp, i); ++ offset += EVENT_DESC_sample_size(evt_desc); ++ } ++ } ++ } else { ++ SEP_DRV_LOG_WARNING( ++ "Buffer space reservation failed; some samples will be dropped."); ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Uncore_Switch_Group(void) ++ * ++ * @param none ++ * ++ * @return OS_STATUS ++ * ++ * @brief Switch the current group that is being collected. ++ * ++ * Special Notes ++ * This routine is called from the user mode code to handle the multiple group ++ * situation. 4 distinct steps are taken: ++ * Step 1: Pause the sampling ++ * Step 2: Increment the current group count ++ * Step 3: Write the new group to the PMU ++ * Step 4: Resume sampling ++ */ ++static OS_STATUS lwpmudrv_Uncore_Switch_Group(void) ++{ ++ OS_STATUS status = OS_SUCCESS; ++ U32 current_state = GET_DRIVER_STATE(); ++ U32 i = 0; ++ U32 j, k; ++ DEV_UNC_CONFIG pcfg_unc; ++ DISPATCH dispatch_unc; ++ ECB ecb_unc; ++ U32 cur_grp; ++ U32 num_units; ++ U32 switch_grp = 1; ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (!devices || !drv_cfg) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Devices or drv_cfg pointer is NULL!"); ++ return OS_INVALID; ++ } ++ ++ if (current_state != DRV_STATE_RUNNING && ++ current_state != DRV_STATE_PAUSED) { ++ SEP_DRV_LOG_FLOW_OUT("Driver state is not RUNNING or PAUSED!"); ++ return OS_INVALID; ++ } ++ ++ if (max_groups_unc > 1) { ++ CONTROL_Invoke_Parallel(lwpmudrv_Pause_Uncore, ++ (PVOID)&switch_grp); ++ for (i = num_core_devs; i < num_devices; i++) { ++ pcfg_unc = LWPMU_DEVICE_pcfg(&devices[i]); ++ dispatch_unc = LWPMU_DEVICE_dispatch(&devices[i]); ++ num_units = LWPMU_DEVICE_num_units(&devices[i]); ++ if (!pcfg_unc || !dispatch_unc) { ++ continue; ++ } ++ if (LWPMU_DEVICE_em_groups_count(&devices[i]) > 1) { ++ for (j = 0; j < num_packages; j++) { ++ cur_grp = LWPMU_DEVICE_cur_group( ++ &devices[i])[j]; ++ ecb_unc = ++ LWPMU_DEVICE_PMU_register_data( ++ &devices[i])[cur_grp]; ++ // Switch group ++ LWPMU_DEVICE_cur_group( ++ &devices[i])[j]++; ++ LWPMU_DEVICE_cur_group( ++ &devices[i])[j] %= ++ LWPMU_DEVICE_em_groups_count( ++ &devices[i]); ++ if (ecb_unc && ++ (ECB_device_type(ecb_unc) == ++ DEVICE_UNC_SOCPERF) && ++ (j == 0)) { ++ // SOCPERF_Switch_Group3(); ++ } ++ // Post group switch ++ cur_grp = LWPMU_DEVICE_cur_group( ++ &devices[i])[j]; ++ ecb_unc = ++ LWPMU_DEVICE_PMU_register_data( ++ &devices[i])[cur_grp]; ++ for (k = 0; ++ k < (ECB_num_events(ecb_unc) * ++ num_units); ++ k++) { ++ LWPMU_DEVICE_prev_value( ++ &devices[i])[j][k] = ++ 0LL; //zero out prev_value for new collection ++ } ++ } ++ } ++ } ++ CONTROL_Invoke_Parallel(lwpmudrv_Write_Uncore, ++ (PVOID)&switch_grp); ++ CONTROL_Invoke_Parallel(lwpmudrv_Resume_Uncore, ++ (PVOID)&switch_grp); ++ } ++ ++ SEP_DRV_LOG_FLOW_OUT("Return value: %d", status); ++ return status; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static VOID lwpmudrv_Trigger_Read(void) ++ * ++ * @param - none ++ * ++ * @return - OS_STATUS ++ * ++ * @brief Read the Counter Data. ++ * ++ * Special Notes ++ */ ++static VOID lwpmudrv_Trigger_Read( ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) ++ struct timer_list *tl ++#else ++ unsigned long arg ++#endif ++) ++{ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ if (GET_DRIVER_STATE() != DRV_STATE_RUNNING) { ++ SEP_DRV_LOG_TRACE_OUT("Success: driver state is not RUNNING"); ++ return; ++ } ++#if defined(BUILD_CHIPSET) ++ if (cs_dispatch && cs_dispatch->Trigger_Read) { ++ cs_dispatch->Trigger_Read(); ++ } ++#endif ++ ++ if (drv_cfg && DRV_CONFIG_use_pcl(drv_cfg) == TRUE) { ++ SEP_DRV_LOG_TRACE_OUT("Success: Using PCL"); ++ return; ++ } ++ ++ CONTROL_Invoke_Parallel(lwpmudrv_Trigger_Read_Op, NULL); ++ ++ uncore_em_factor++; ++ if (uncore_em_factor == DRV_CONFIG_unc_em_factor(drv_cfg)) { ++ SEP_DRV_LOG_TRACE("Switching Uncore Group..."); ++ lwpmudrv_Uncore_Switch_Group(); ++ uncore_em_factor = 0; ++ } ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) ++ mod_timer(unc_read_timer, jiffies + unc_timer_interval); ++#else ++ unc_read_timer->expires = jiffies + unc_timer_interval; ++ add_timer(unc_read_timer); ++#endif ++ ++ SEP_DRV_LOG_TRACE_OUT("Success."); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static void lwmudrv_Read_Specific_TSC (PVOID param) ++ * ++ * @param param - pointer to the result ++ * ++ * @return none ++ * ++ * @brief Read the tsc value in the current processor and ++ * @brief write the result into param. ++ * ++ * Special Notes ++ */ ++static VOID lwpmudrv_Read_Specific_TSC(PVOID param) ++{ ++ U32 this_cpu; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ preempt_disable(); ++ this_cpu = CONTROL_THIS_CPU(); ++ if (this_cpu == 0) { ++ UTILITY_Read_TSC((U64 *)param); ++ } ++ preempt_enable(); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID lwpmudrv_Uncore_Stop_Timer (void) ++ * ++ * @brief Stop the uncore read timer ++ * ++ * @param none ++ * ++ * @return none ++ * ++ * Special Notes: ++ */ ++static VOID lwpmudrv_Uncore_Stop_Timer(void) ++{ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (unc_read_timer == NULL) { ++ return; ++ } ++ ++ del_timer_sync(unc_read_timer); ++ unc_read_timer = CONTROL_Free_Memory(unc_read_timer); ++ ++ SEP_DRV_LOG_FLOW_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn OS_STATUS lwpmudrv_Uncore_Start_Timer (void) ++ * ++ * @brief Start the uncore read timer ++ * ++ * @param none ++ * ++ * @return OS_STATUS ++ * ++ * Special Notes: ++ */ ++static VOID lwpmudrv_Uncore_Start_Timer(void) ++{ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ unc_timer_interval = ++ msecs_to_jiffies(DRV_CONFIG_unc_timer_interval(drv_cfg)); ++ unc_read_timer = CONTROL_Allocate_Memory(sizeof(struct timer_list)); ++ if (unc_read_timer == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Memory allocation failure for unc_read_timer!"); ++ return; ++ } ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) ++ timer_setup(unc_read_timer, lwpmudrv_Trigger_Read, 0); ++ mod_timer(unc_read_timer, jiffies + unc_timer_interval); ++#else ++ init_timer(unc_read_timer); ++ unc_read_timer->function = lwpmudrv_Trigger_Read; ++ unc_read_timer->expires = jiffies + unc_timer_interval; ++ add_timer(unc_read_timer); ++#endif ++ ++ SEP_DRV_LOG_FLOW_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Init_Op(void) ++ * ++ * @param - none ++ * ++ * @return OS_STATUS ++ * ++ * @brief Initialize PMU before collection ++ * ++ * Special Notes ++ */ ++static VOID lwpmudrv_Init_Op(PVOID param) ++{ ++ U32 this_cpu; ++ U32 dev_idx; ++ DISPATCH dispatch; ++ ++ preempt_disable(); ++ this_cpu = CONTROL_THIS_CPU(); ++ preempt_enable(); ++ dev_idx = core_to_dev_map[this_cpu]; ++ dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ if (dispatch != NULL && dispatch->init != NULL) { ++ dispatch->init(&dev_idx); ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Init_PMU(void) ++ * ++ * @param - none ++ * ++ * @return - OS_STATUS ++ * ++ * @brief Initialize the PMU and the driver state in preparation for data collection. ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Init_PMU(IOCTL_ARGS args) ++{ ++ DEV_UNC_CONFIG pcfg_unc = NULL; ++ DISPATCH dispatch_unc = NULL; ++ EVENT_CONFIG ec; ++ U32 i; ++ U32 emon_buffer_size = 0; ++ OS_STATUS status = OS_SUCCESS; ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (args->len_usr_to_drv == 0 || args->buf_usr_to_drv == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments."); ++ return OS_INVALID; ++ } ++ ++ if (copy_from_user(&emon_buffer_size, (void __user *)args->buf_usr_to_drv, ++ sizeof(U32))) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure"); ++ return OS_FAULT; ++ } ++ prev_counter_size = emon_buffer_size; ++ ++ if (!drv_cfg) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("drv_cfg not set!"); ++ return OS_FAULT; ++ } ++ if (DRV_CONFIG_use_pcl(drv_cfg) == TRUE) { ++ SEP_DRV_LOG_FLOW_OUT("Success: using PCL."); ++ return OS_SUCCESS; ++ } ++ ++ if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Discarded: driver state is not IDLE!"); ++ return OS_IN_PROGRESS; ++ } ++ ++ for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { ++ ec = (EVENT_CONFIG)LWPMU_DEVICE_ec( ++ &devices[core_to_dev_map[i]]); ++ CPU_STATE_trigger_count(&pcb[i]) = EVENT_CONFIG_em_factor(ec); ++ CPU_STATE_trigger_event_num(&pcb[i]) = ++ EVENT_CONFIG_em_event_num(ec); ++ } ++ ++ // set cur_device's total groups to max groups of all devices ++ max_groups_unc = 0; ++ for (i = num_core_devs; i < num_devices; i++) { ++ if (max_groups_unc < ++ LWPMU_DEVICE_em_groups_count(&devices[i])) { ++ max_groups_unc = ++ LWPMU_DEVICE_em_groups_count(&devices[i]); ++ } ++ } ++ // now go back and up total groups for all devices ++ if (DRV_CONFIG_emon_mode(drv_cfg) == TRUE) { ++ for (i = num_core_devs; i < num_devices; i++) { ++ if (LWPMU_DEVICE_em_groups_count(&devices[i]) < ++ max_groups_unc) { ++ LWPMU_DEVICE_em_groups_count(&devices[i]) = ++ max_groups_unc; ++ } ++ } ++ } ++ ++ // allocate save/restore space before program the PMU ++ lwpmudrv_Allocate_Restore_Buffer(); ++ ++ // allocate uncore read buffers for SEP ++ if (unc_buf_init && !DRV_CONFIG_emon_mode(drv_cfg)) { ++ lwpmudrv_Allocate_Uncore_Buffer(); ++ } ++ ++ // must be done after pcb is created and before PMU is first written to ++#if !defined(DRV_SEP_ACRN_ON) ++ CONTROL_Invoke_Parallel(lwpmudrv_Init_Op, NULL); ++#else ++ lwpmudrv_Init_Op(NULL); ++#endif ++ ++ for (i = num_core_devs; i < num_devices; i++) { ++ pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[i]); ++ dispatch_unc = LWPMU_DEVICE_dispatch(&devices[i]); ++ if (pcfg_unc && dispatch_unc && dispatch_unc->init) { ++ dispatch_unc->init((VOID *)&i); ++ } ++ } ++ ++ // Allocate PEBS buffers ++ if (DRV_CONFIG_counting_mode(drv_cfg) == FALSE) { ++ PEBS_Allocate(); ++ } ++ ++ // ++ // Transfer the data into the PMU registers ++ // ++#if !defined(DRV_SEP_ACRN_ON) ++ CONTROL_Invoke_Parallel(lwpmudrv_Write_Op, NULL); ++#else ++ lwpmudrv_Write_Op(NULL); ++#endif ++ ++ SEP_DRV_LOG_TRACE("IOCTL_Init_PMU - finished initial Write."); ++ ++ if (DRV_CONFIG_counting_mode(drv_cfg) == TRUE || ++ DRV_CONFIG_emon_mode(drv_cfg) == TRUE) { ++ if (!read_counter_info) { ++ read_counter_info = ++ CONTROL_Allocate_Memory(emon_buffer_size); ++ if (!read_counter_info) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Memory allocation failure!"); ++ return OS_NO_MEM; ++ } ++ } ++ if (!prev_counter_data) { ++ prev_counter_data = ++ CONTROL_Allocate_Memory(emon_buffer_size); ++ if (!prev_counter_data) { ++ read_counter_info = ++ CONTROL_Free_Memory(read_counter_info); ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Memory allocation failure!"); ++ return OS_NO_MEM; ++ } ++ } ++ if (!emon_buffer_driver_helper) { ++ // allocate size = size of EMON_BUFFER_DRIVER_HELPER_NODE + the number of entries in core_index_to_thread_offset_map, which is num of cpu ++ emon_buffer_driver_helper = CONTROL_Allocate_Memory( ++ sizeof(EMON_BUFFER_DRIVER_HELPER_NODE) + ++ sizeof(U32) * ++ GLOBAL_STATE_num_cpus(driver_state)); ++ if (!emon_buffer_driver_helper) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Memory allocation failure!"); ++ return OS_NO_MEM; ++ } ++ } ++ } ++ ++ SEP_DRV_LOG_FLOW_OUT("Return value: %d", status); ++ return status; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static void lwpmudrv_Read_MSR(pvoid param) ++ * ++ * @param param - pointer to the buffer to store the MSR counts ++ * ++ * @return none ++ * ++ * @brief ++ * @brief Read the U64 value at address in buf_drv_to_usr and ++ * @brief write the result into buf_usr_to_drv. ++ * ++ * Special Notes ++ */ ++static VOID lwpmudrv_Read_MSR(PVOID param) ++{ ++ S32 cpu_idx; ++ MSR_DATA this_node; ++#if !defined(DRV_SEP_ACRN_ON) ++ S64 reg_num; ++#else ++ struct profiling_msr_ops_list *msr_list; ++#endif ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ if (param == NULL) { ++ preempt_disable(); ++ cpu_idx = (S32)CONTROL_THIS_CPU(); ++ preempt_enable(); ++ } else { ++ cpu_idx = *(S32 *)param; ++ } ++#if !defined(DRV_SEP_ACRN_ON) ++ this_node = &msr_data[cpu_idx]; ++ reg_num = MSR_DATA_addr(this_node); ++ ++ if (reg_num != 0) { ++ MSR_DATA_value(this_node) = ++ (U64)SYS_Read_MSR((U32)MSR_DATA_addr(this_node)); ++ } ++#else ++ msr_list = (struct profiling_msr_ops_list *)CONTROL_Allocate_Memory( ++ GLOBAL_STATE_num_cpus(driver_state) * ++ sizeof(struct profiling_msr_ops_list)); ++ memset(msr_list, 0, ++ GLOBAL_STATE_num_cpus(driver_state) * ++ sizeof(struct profiling_msr_ops_list)); ++ for (cpu_idx = 0; cpu_idx < GLOBAL_STATE_num_cpus(driver_state); ++ cpu_idx++) { ++ this_node = &msr_data[cpu_idx]; ++ msr_list[cpu_idx].collector_id = COLLECTOR_SEP; ++ msr_list[cpu_idx].entries[0].msr_id = MSR_DATA_addr(this_node); ++ msr_list[cpu_idx].entries[0].op_type = MSR_OP_READ; ++ msr_list[cpu_idx].entries[0].value = 0LL; ++ msr_list[cpu_idx].num_entries = 1; ++ msr_list[cpu_idx].msr_op_state = MSR_OP_REQUESTED; ++ } ++ ++ BUG_ON(!virt_addr_valid(msr_list)); ++ ++ acrn_hypercall2(HC_PROFILING_OPS, PROFILING_MSR_OPS, ++ virt_to_phys(msr_list)); ++ ++ for (cpu_idx = 0; cpu_idx < GLOBAL_STATE_num_cpus(driver_state); ++ cpu_idx++) { ++ this_node = &msr_data[cpu_idx]; ++ MSR_DATA_value(this_node) = msr_list[cpu_idx].entries[0].value; ++ } ++ ++ msr_list = CONTROL_Free_Memory(msr_list); ++#endif ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Read_MSR_All_Cores(IOCTL_ARGS arg) ++ * ++ * @param arg - pointer to the IOCTL_ARGS structure ++ * ++ * @return OS_STATUS ++ * ++ * @brief Read the U64 value at address into buf_drv_to_usr and write ++ * @brief the result into buf_usr_to_drv. ++ * @brief Returns OS_SUCCESS if the read across all cores succeed, ++ * @brief otherwise OS_FAULT. ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Read_MSR_All_Cores(IOCTL_ARGS arg) ++{ ++ U64 *val; ++ S32 reg_num; ++ S32 i; ++ MSR_DATA node; ++#if defined(DRV_SEP_ACRN_ON) ++ S32 this_cpu = 0; ++#endif ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if ((arg->len_usr_to_drv != sizeof(U32)) || ++ (arg->buf_usr_to_drv == NULL) || (arg->buf_drv_to_usr == NULL)) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments!"); ++ return OS_FAULT; ++ } ++ ++ val = (U64 *)arg->buf_drv_to_usr; ++ if (val == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("NULL buf_usr_to_drv!"); ++ return OS_FAULT; ++ } ++ ++ if (copy_from_user(®_num, (void __user *)arg->buf_usr_to_drv, sizeof(U32))) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); ++ return OS_FAULT; ++ } ++ ++ msr_data = CONTROL_Allocate_Memory(GLOBAL_STATE_num_cpus(driver_state) * ++ sizeof(MSR_DATA_NODE)); ++ if (!msr_data) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Memory allocation failure!"); ++ return OS_NO_MEM; ++ } ++ ++ for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { ++ node = &msr_data[i]; ++ MSR_DATA_addr(node) = reg_num; ++ } ++ ++#if !defined(DRV_SEP_ACRN_ON) ++ CONTROL_Invoke_Parallel(lwpmudrv_Read_MSR, NULL); ++#else ++ lwpmudrv_Read_MSR(&this_cpu); ++#endif ++ ++ /* copy values to arg array? */ ++ if (arg->len_drv_to_usr < GLOBAL_STATE_num_cpus(driver_state)) { ++ msr_data = CONTROL_Free_Memory(msr_data); ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Not enough memory allocated in output buffer!"); ++ return OS_FAULT; ++ } ++ for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { ++ node = &msr_data[i]; ++ if (copy_to_user((void __user *)&val[i], (U64 *)&MSR_DATA_value(node), ++ sizeof(U64))) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); ++ return OS_FAULT; ++ } ++ } ++ ++ msr_data = CONTROL_Free_Memory(msr_data); ++ ++ SEP_DRV_LOG_FLOW_OUT("Success"); ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static void lwpmudrv_Write_MSR(pvoid iaram) ++ * ++ * @param param - pointer to array containing the MSR address and the value to be written ++ * ++ * @return none ++ * ++ * @brief ++ * @brief Read the U64 value at address in buf_drv_to_usr and ++ * @brief write the result into buf_usr_to_drv. ++ * ++ * Special Notes ++ */ ++static VOID lwpmudrv_Write_MSR(PVOID param) ++{ ++ S32 cpu_idx; ++ MSR_DATA this_node; ++#if !defined(DRV_SEP_ACRN_ON) ++ U32 reg_num; ++ U64 val; ++#else ++ struct profiling_msr_ops_list *msr_list; ++#endif ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ if (param == NULL) { ++ preempt_disable(); ++ cpu_idx = (S32)CONTROL_THIS_CPU(); ++ preempt_enable(); ++ } else { ++ cpu_idx = *(S32 *)param; ++ } ++ ++#if !defined(DRV_SEP_ACRN_ON) ++ this_node = &msr_data[cpu_idx]; ++ reg_num = (U32)MSR_DATA_addr(this_node); ++ val = (U64)MSR_DATA_value(this_node); ++ // don't attempt to write MSR 0 ++ if (reg_num == 0) { ++ preempt_enable(); ++ SEP_DRV_LOG_ERROR_TRACE_OUT("Error: tried to write MSR 0!"); ++ return; ++ } ++ ++ SYS_Write_MSR(reg_num, val); ++ preempt_enable(); ++ ++#else ++ msr_list = (struct profiling_msr_ops_list *)CONTROL_Allocate_Memory( ++ GLOBAL_STATE_num_cpus(driver_state) * ++ sizeof(struct profiling_msr_ops_list)); ++ memset(msr_list, 0, ++ GLOBAL_STATE_num_cpus(driver_state) * ++ sizeof(struct profiling_msr_ops_list)); ++ for (cpu_idx = 0; cpu_idx < GLOBAL_STATE_num_cpus(driver_state); ++ cpu_idx++) { ++ this_node = &msr_data[cpu_idx]; ++ msr_list[cpu_idx].collector_id = COLLECTOR_SEP; ++ msr_list[cpu_idx].entries[0].msr_id = MSR_DATA_addr(this_node); ++ msr_list[cpu_idx].entries[0].op_type = MSR_OP_WRITE; ++ msr_list[cpu_idx].entries[0].value = MSR_DATA_value(this_node); ++ msr_list[cpu_idx].num_entries = 1; ++ msr_list[cpu_idx].msr_op_state = MSR_OP_REQUESTED; ++ } ++ ++ BUG_ON(!virt_addr_valid(msr_list)); ++ ++ acrn_hypercall2(HC_PROFILING_OPS, PROFILING_MSR_OPS, ++ virt_to_phys(msr_list)); ++ ++ msr_list = CONTROL_Free_Memory(msr_list); ++#endif ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Write_MSR_All_Cores(IOCTL_ARGS arg) ++ * ++ * @param arg - pointer to the IOCTL_ARGS structure ++ * ++ * @return OS_STATUS ++ * ++ * @brief Read the U64 value at address into buf_usr_to_drv and write ++ * @brief the result into buf_usr_to_drv. ++ * @brief Returns OS_SUCCESS if the write across all cores succeed, ++ * @brief otherwise OS_FAULT. ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Write_MSR_All_Cores(IOCTL_ARGS arg) ++{ ++ EVENT_REG_NODE buf; ++ EVENT_REG buf_usr_to_drv = &buf; ++ U32 reg_num; ++ U64 val; ++ S32 i; ++ MSR_DATA node; ++#if defined(DRV_SEP_ACRN_ON) ++ S32 this_cpu = 0; ++#endif ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (arg->len_usr_to_drv < sizeof(EVENT_REG_NODE) || ++ arg->buf_usr_to_drv == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments!"); ++ return OS_FAULT; ++ } ++ ++ if (copy_from_user(buf_usr_to_drv, (void __user *)arg->buf_usr_to_drv, ++ sizeof(EVENT_REG_NODE))) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); ++ return OS_FAULT; ++ } ++ reg_num = (U32)EVENT_REG_reg_id(buf_usr_to_drv, 0); ++ val = (U64)EVENT_REG_reg_value(buf_usr_to_drv, 0); ++ ++ msr_data = CONTROL_Allocate_Memory(GLOBAL_STATE_num_cpus(driver_state) * ++ sizeof(MSR_DATA_NODE)); ++ if (!msr_data) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Memory allocation failure"); ++ return OS_NO_MEM; ++ } ++ ++ for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { ++ node = &msr_data[i]; ++ MSR_DATA_addr(node) = reg_num; ++ MSR_DATA_value(node) = val; ++ } ++ ++#if !defined(DRV_SEP_ACRN_ON) ++ CONTROL_Invoke_Parallel(lwpmudrv_Write_MSR, NULL); ++#else ++ lwpmudrv_Write_MSR(&this_cpu); ++#endif ++ ++ msr_data = CONTROL_Free_Memory(msr_data); ++ ++ SEP_DRV_LOG_FLOW_OUT("Success"); ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static void lwpmudrv_Read_Data_Op(PVOID param) ++ * ++ * @param param - dummy ++ * ++ * @return void ++ * ++ * @brief Read all the core/uncore data counters at one shot ++ * ++ * Special Notes ++ */ ++static void lwpmudrv_Read_Data_Op(VOID *param) ++{ ++ U32 this_cpu; ++ DISPATCH dispatch; ++ U32 dev_idx; ++ DEV_UNC_CONFIG pcfg_unc; ++ ++ preempt_disable(); ++ this_cpu = CONTROL_THIS_CPU(); ++ preempt_enable(); ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ if (devices == NULL) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("Devices is null!"); ++ return; ++ } ++ dev_idx = core_to_dev_map[this_cpu]; ++ dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); ++ if (dispatch != NULL && dispatch->read_data != NULL) { ++ dispatch->read_data(param); ++ } ++ for (dev_idx = num_core_devs; dev_idx < num_devices; dev_idx++) { ++ pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ if (pcfg_unc == NULL) { ++ continue; ++ } ++ if (!(DRV_CONFIG_emon_mode(drv_cfg) || ++ DRV_CONFIG_counting_mode(drv_cfg))) { ++ continue; ++ } ++ dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); ++ if (dispatch == NULL) { ++ continue; ++ } ++ if (dispatch->read_data == NULL) { ++ continue; ++ } ++ dispatch->read_data((VOID *)&dev_idx); ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Read_MSRs(IOCTL_ARG arg) ++ * ++ * @param arg - pointer to the IOCTL_ARGS structure ++ * ++ * @return OS_STATUS ++ * ++ * @brief Read all the programmed data counters and accumulate them ++ * @brief into a single buffer. ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Read_MSRs(IOCTL_ARGS arg) ++{ ++#if defined(DRV_SEP_ACRN_ON) ++ S32 this_cpu = 0; ++#endif ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (arg->len_drv_to_usr == 0 || arg->buf_drv_to_usr == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments."); ++ return OS_SUCCESS; ++ } ++ // ++ // Transfer the data in the PMU registers to the output buffer ++ // ++ if (!read_counter_info) { ++ read_counter_info = ++ CONTROL_Allocate_Memory(arg->len_drv_to_usr); ++ if (!read_counter_info) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Memory allocation failure"); ++ return OS_NO_MEM; ++ } ++ } ++ if (!prev_counter_data) { ++ prev_counter_data = ++ CONTROL_Allocate_Memory(arg->len_drv_to_usr); ++ if (!prev_counter_data) { ++ read_counter_info = ++ CONTROL_Free_Memory(read_counter_info); ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Memory allocation failure"); ++ return OS_NO_MEM; ++ } ++ } ++ memset(read_counter_info, 0, arg->len_drv_to_usr); ++ ++#if !defined(DRV_SEP_ACRN_ON) ++ CONTROL_Invoke_Parallel(lwpmudrv_Read_Data_Op, NULL); ++#else ++ lwpmudrv_Read_Data_Op(&this_cpu); ++#endif ++ ++ if (copy_to_user((void __user *)arg->buf_drv_to_usr, read_counter_info, ++ arg->len_drv_to_usr)) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure"); ++ return OS_FAULT; ++ } ++ ++ SEP_DRV_LOG_FLOW_OUT("Success"); ++ return OS_SUCCESS; ++} ++ ++#if defined(DRV_SEP_ACRN_ON) ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static void lwpmudrv_Read_Metrics_Op(PVOID param) ++ * ++ * @param param - dummy ++ * ++ * @return void ++ * ++ * @brief Read metrics register IA32_PERF_METRICS to collect topdown metrics ++ * from PMU ++ * ++ * Special Notes ++ */ ++static VOID lwpmudrv_Read_Metrics_Op(PVOID param) ++{ ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ U32 offset; ++ U32 dev_idx; ++ DEV_CONFIG pcfg; ++ DISPATCH dispatch; ++ ++ SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ dev_idx = core_to_dev_map[this_cpu]; ++ pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); ++ // The pmu metric will be append after core event at thread level (basically treat them as extra core events). ++ // Move the pointer to the end of the core event for this cpu index. ++ offset = EMON_BUFFER_CORE_EVENT_OFFSET( ++ EMON_BUFFER_DRIVER_HELPER_core_index_to_thread_offset_map( ++ emon_buffer_driver_helper)[this_cpu], ++ EMON_BUFFER_DRIVER_HELPER_core_num_events( ++ emon_buffer_driver_helper)); ++ ++ if (!DEV_CONFIG_enable_perf_metrics(pcfg) || ++ !DEV_CONFIG_emon_perf_metrics_offset(pcfg) || ++ (CPU_STATE_current_group(pcpu) != 0)) { ++ return; ++ } ++ ++ if (dispatch != NULL && dispatch->read_metrics != NULL) { ++ dispatch->read_metrics(read_counter_info + offset); ++ SEP_DRV_LOG_TRACE("Data read = %llu.", ++ *(read_counter_info + offset)); ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Read_Metrics(IOCTL_ARGS args) ++ * ++ * @param args - pointer to IOCTL_ARGS_NODE structure ++ * ++ * @return OS_STATUS ++ * ++ * @brief Read metrics register on all cpus and accumulate them into the output ++ * buffer ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Read_Metrics(IOCTL_ARGS args) ++{ ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ U32 offset; ++ U64 *p_buffer; ++ DEV_CONFIG pcfg; ++ U32 idx; ++ ++ SEP_DRV_LOG_FLOW_IN("Args: %p.", args); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ p_buffer = (U64 *)(args->buf_drv_to_usr); ++ ++ if (args->len_drv_to_usr == 0 || args->buf_drv_to_usr == NULL) { ++ SEP_DRV_LOG_FLOW_OUT("Invalid parameters!"); ++ return OS_SUCCESS; ++ } ++ ++ if (!read_counter_info) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Read_counter_info is NULL!"); ++ return OS_FAULT; ++ } ++ ++ CONTROL_Invoke_Parallel(lwpmudrv_Read_Metrics_Op, NULL); ++ for (idx = 0; idx < num_core_devs; idx++) { ++ pcfg = LWPMU_DEVICE_pcfg(&devices[idx]); ++ offset = DEV_CONFIG_emon_perf_metrics_offset(pcfg); ++ if (!DEV_CONFIG_enable_perf_metrics(pcfg) || !offset || ++ (CPU_STATE_current_group(pcpu) != 0)) { ++ continue; ++ } ++ p_buffer += offset; ++ if (copy_to_user((void __user *)p_buffer, read_counter_info + offset, ++ (sizeof(U64) * num_packages * ++ GLOBAL_STATE_num_cpus(driver_state) * ++ DEV_CONFIG_num_perf_metrics(pcfg)))) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Failed copy_to_user for read_counter_info!"); ++ return OS_FAULT; ++ } ++ } ++ ++ SEP_DRV_LOG_FLOW_OUT("Success."); ++ return OS_SUCCESS; ++} ++ ++#endif ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Read_Counters_And_Switch_Group(IOCTL_ARGS arg) ++ * ++ * @param arg - pointer to the IOCTL_ARGS structure ++ * ++ * @return OS_STATUS ++ * ++ * @brief Read / Store the counters and switch to the next valid group. ++ * ++ * Special Notes ++ * This routine is called from the user mode code to handle the multiple group ++ * situation. 10 distinct steps are taken: ++ * Step 1: Save the previous cpu's tsc ++ * Step 2: Read the current cpu's tsc ++ * Step 3: Pause the counting PMUs ++ * Step 4: Calculate the difference between the current and previous cpu's tsc ++ * Step 5: Save original buffer ptr and copy cpu's tsc into the output buffer ++ * Increment the buffer position by number of CPU ++ * Step 6: Read the currently programmed data PMUs and copy the data into the output buffer ++ * Restore the original buffer ptr. ++ * Step 7: Write the new group to the PMU ++ * Step 8: Write the new group to the PMU ++ * Step 9: Read the current cpu's tsc for next collection (so read MSRs time not included in report) ++ * Step 10: Resume the counting PMUs ++ */ ++static OS_STATUS lwpmudrv_Read_Counters_And_Switch_Group(IOCTL_ARGS arg) ++{ ++ U64 *p_buffer = NULL; ++ char *orig_r_buf_ptr = NULL; ++ U64 orig_r_buf_len = 0; ++ OS_STATUS status = OS_SUCCESS; ++ DRV_BOOL enter_in_pause_state = 0; ++ U32 i = 0; ++#if !defined(CONFIG_PREEMPT_COUNT) ++ U64 *tmp = NULL; ++#endif ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (arg->buf_drv_to_usr == NULL || arg->len_drv_to_usr == 0) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments."); ++ return OS_FAULT; ++ } ++ ++ if (!DRIVER_STATE_IN(GET_DRIVER_STATE(), ++ STATE_BIT_RUNNING | STATE_BIT_PAUSED)) { ++ SEP_DRV_LOG_FLOW_OUT( ++ "'Success'/error: driver state is not RUNNING or PAUSED!"); ++ return OS_SUCCESS; ++ } ++ ++ if (GET_DRIVER_STATE() == DRV_STATE_PAUSED) { ++ enter_in_pause_state = 1; ++ } ++ ++ // step 1 ++#if !defined(CONFIG_PREEMPT_COUNT) ++ if (DRV_CONFIG_per_cpu_tsc(drv_cfg)) { ++ // swap cpu_tsc and prev_cpu_tsc, so that cpu_tsc is saved in prev_cpu_tsc. ++ tmp = prev_cpu_tsc; ++ prev_cpu_tsc = cpu_tsc; ++ cpu_tsc = tmp; ++ } else ++#endif ++ prev_cpu_tsc[0] = cpu_tsc[0]; ++ ++ // step 2 ++ // if per_cpu_tsc is not defined, read cpu0's tsc and save in var cpu_tsc[0] ++ // if per_cpu_tsc is defined, read all cpu's tsc and save in var cpu_tsc by lwpmudrv_Fill_TSC_Info ++#if !defined(CONFIG_PREEMPT_COUNT) ++ if (DRV_CONFIG_per_cpu_tsc(drv_cfg)) { ++ atomic_set(&read_now, GLOBAL_STATE_num_cpus(driver_state)); ++ init_waitqueue_head(&read_tsc_now); ++ CONTROL_Invoke_Parallel(lwpmudrv_Fill_TSC_Info, ++ (PVOID)(size_t)0); ++ } else ++#endif ++ CONTROL_Invoke_Cpu(0, lwpmudrv_Read_Specific_TSC, &cpu_tsc[0]); ++ ++ // step 3 ++ // Counters should be frozen right after time stamped. ++ if (!enter_in_pause_state) { ++ status = lwpmudrv_Pause(); ++ } ++ ++ // step 4 ++ if (DRV_CONFIG_per_cpu_tsc(drv_cfg)) { ++ for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { ++#if !defined(CONFIG_PREEMPT_COUNT) ++ diff_cpu_tsc[i] = cpu_tsc[i] - prev_cpu_tsc[i]; ++#else ++ // if CONFIG_PREEMPT_COUNT is defined, means lwpmudrv_Fill_TSC_Info can not be run. ++ // return all cpu's tsc difference with cpu0's tsc difference instead ++ diff_cpu_tsc[i] = cpu_tsc[0] - prev_cpu_tsc[0]; ++#endif ++ } ++ } else { ++ diff_cpu_tsc[0] = cpu_tsc[0] - prev_cpu_tsc[0]; ++ } ++ ++ // step 5 ++ orig_r_buf_ptr = arg->buf_drv_to_usr; ++ orig_r_buf_len = arg->len_drv_to_usr; ++ ++ if (copy_to_user((void __user *)arg->buf_drv_to_usr, diff_cpu_tsc, ++ GLOBAL_STATE_num_cpus(driver_state) * sizeof(U64))) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); ++ return OS_FAULT; ++ } ++ ++ p_buffer = (U64 *)(arg->buf_drv_to_usr); ++ p_buffer += GLOBAL_STATE_num_cpus(driver_state); ++ arg->buf_drv_to_usr = (char *)p_buffer; ++ arg->len_drv_to_usr -= ++ GLOBAL_STATE_num_cpus(driver_state) * sizeof(U64); ++ ++ // step 6 ++ status = lwpmudrv_Read_MSRs(arg); ++ ++#if defined(DRV_SEP_ACRN_ON) ++ status = lwpmudrv_Read_Metrics(arg); ++#endif ++ arg->buf_drv_to_usr = orig_r_buf_ptr; ++ arg->len_drv_to_usr = orig_r_buf_len; ++ ++ // step 7 ++ // for each processor, increment its current group number ++ lwpmudrv_Switch_To_Next_Group(); ++ ++ // step 8 ++#if !defined(DRV_SEP_ACRN_ON) ++ CONTROL_Invoke_Parallel(lwpmudrv_Write_Op, NULL); ++#else ++ lwpmudrv_Write_Op(NULL); ++#endif ++ ++ // step 9 ++ // if per_cpu_tsc is defined, read all cpu's tsc and save in cpu_tsc for next run ++#if !defined(CONFIG_PREEMPT_COUNT) ++ if (DRV_CONFIG_per_cpu_tsc(drv_cfg)) { ++ atomic_set(&read_now, GLOBAL_STATE_num_cpus(driver_state)); ++ init_waitqueue_head(&read_tsc_now); ++ CONTROL_Invoke_Parallel(lwpmudrv_Fill_TSC_Info, ++ (PVOID)(size_t)0); ++ } else ++#endif ++ CONTROL_Invoke_Cpu(0, lwpmudrv_Read_Specific_TSC, &cpu_tsc[0]); ++ ++ // step 10 ++ if (!enter_in_pause_state) { ++ status = lwpmudrv_Resume(); ++ } ++ ++ SEP_DRV_LOG_FLOW_OUT("Return value: %d", status); ++ return status; ++} ++ ++/* ++ * @fn static OS_STATUS lwpmudrv_Read_And_Reset_Counters(IOCTL_ARGS arg) ++ * ++ * @param arg - pointer to the IOCTL_ARGS structure ++ * ++ * @return OS_STATUS ++ * ++ * @brief Read the current value of the counters, and reset them all to 0. ++ * ++ * Special Notes ++ * This routine is called from the user mode code to handle the multiple group ++ * situation. 9 distinct steps are taken: ++ * Step 1: Save the previous cpu's tsc ++ * Step 2: Read the current cpu's tsc ++ * Step 3: Pause the counting PMUs ++ * Step 4: Calculate the difference between the current and previous cpu's tsc ++ * Step 5: Save original buffer ptr and copy cpu's tsc into the output buffer ++ * Increment the buffer position by number of CPU ++ * Step 6: Read the currently programmed data PMUs and copy the data into the output buffer ++ * Restore the original buffer ptr. ++ * Step 7: Write the new group to the PMU ++ * Step 8: Read the current cpu's tsc for next collection (so read MSRs time not included in report) ++ * Step 9: Resume the counting PMUs ++ */ ++static OS_STATUS lwpmudrv_Read_And_Reset_Counters(IOCTL_ARGS arg) ++{ ++ U64 *p_buffer = NULL; ++ char *orig_r_buf_ptr = NULL; ++ U64 orig_r_buf_len = 0; ++ OS_STATUS status = OS_SUCCESS; ++ DRV_BOOL enter_in_pause_state = 0; ++ U32 i = 0; ++#if !defined(CONFIG_PREEMPT_COUNT) ++ U64 *tmp = NULL; ++#endif ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (arg->buf_drv_to_usr == NULL || arg->len_drv_to_usr == 0) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments."); ++ return OS_FAULT; ++ } ++ ++ if (!DRIVER_STATE_IN(GET_DRIVER_STATE(), ++ STATE_BIT_RUNNING | STATE_BIT_PAUSED)) { ++ SEP_DRV_LOG_FLOW_OUT( ++ "'Success'/error: driver state is not RUNNING or PAUSED!"); ++ return OS_SUCCESS; ++ } ++ ++ if (GET_DRIVER_STATE() == DRV_STATE_PAUSED) { ++ enter_in_pause_state = 1; ++ } ++ ++ // step 1 ++#if !defined(CONFIG_PREEMPT_COUNT) ++ if (DRV_CONFIG_per_cpu_tsc(drv_cfg)) { ++ // swap cpu_tsc and prev_cpu_tsc, so that cpu_tsc is saved in prev_cpu_tsc. ++ tmp = prev_cpu_tsc; ++ prev_cpu_tsc = cpu_tsc; ++ cpu_tsc = tmp; ++ } else ++#endif ++ prev_cpu_tsc[0] = cpu_tsc[0]; ++ ++ // step 2 ++ // if per_cpu_tsc is not defined, read cpu0's tsc into var cpu_tsc[0] ++ // if per_cpu_tsc is defined, read all cpu's tsc into var cpu_tsc by lwpmudrv_Fill_TSC_Info ++#if !defined(CONFIG_PREEMPT_COUNT) ++ if (DRV_CONFIG_per_cpu_tsc(drv_cfg)) { ++ atomic_set(&read_now, GLOBAL_STATE_num_cpus(driver_state)); ++ init_waitqueue_head(&read_tsc_now); ++ CONTROL_Invoke_Parallel(lwpmudrv_Fill_TSC_Info, ++ (PVOID)(size_t)0); ++ } else ++#endif ++ CONTROL_Invoke_Cpu(0, lwpmudrv_Read_Specific_TSC, &cpu_tsc[0]); ++ ++ // step 3 ++ // Counters should be frozen right after time stamped. ++ if (!enter_in_pause_state) { ++ status = lwpmudrv_Pause(); ++ if (status != OS_INVALID) { ++ return status; ++ } ++ } ++ ++ // step 4 ++ if (DRV_CONFIG_per_cpu_tsc(drv_cfg)) { ++ for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { ++#if !defined(CONFIG_PREEMPT_COUNT) ++ diff_cpu_tsc[i] = cpu_tsc[i] - prev_cpu_tsc[i]; ++#else ++ // if CONFIG_PREEMPT_COUNT is defined, means lwpmudrv_Fill_TSC_Info can not be run. ++ // return all cpu's tsc difference with cpu0's tsc difference instead ++ diff_cpu_tsc[i] = cpu_tsc[0] - prev_cpu_tsc[0]; ++#endif ++ } ++ } else { ++ diff_cpu_tsc[0] = cpu_tsc[0] - prev_cpu_tsc[0]; ++ } ++ ++ // step 5 ++ orig_r_buf_ptr = arg->buf_drv_to_usr; ++ orig_r_buf_len = arg->len_drv_to_usr; ++ ++ if (copy_to_user((void __user *)arg->buf_drv_to_usr, diff_cpu_tsc, ++ GLOBAL_STATE_num_cpus(driver_state) * sizeof(U64))) { ++ return OS_FAULT; ++ } ++ ++ p_buffer = (U64 *)(arg->buf_drv_to_usr); ++ p_buffer += GLOBAL_STATE_num_cpus(driver_state); ++ arg->buf_drv_to_usr = (char *)p_buffer; ++ arg->len_drv_to_usr -= ++ GLOBAL_STATE_num_cpus(driver_state) * sizeof(U64); ++ ++ // step 6 ++ status = lwpmudrv_Read_MSRs(arg); ++#if defined(DRV_SEP_ACRN_ON) ++ status = lwpmudrv_Read_Metrics(arg); ++#endif ++ arg->buf_drv_to_usr = orig_r_buf_ptr; ++ arg->len_drv_to_usr = orig_r_buf_len; ++ ++ // step 7 ++#if !defined(DRV_SEP_ACRN_ON) ++ CONTROL_Invoke_Parallel(lwpmudrv_Write_Op, NULL); ++#else ++ lwpmudrv_Write_Op(NULL); ++#endif ++ ++ // step 8 ++ // if per_cpu_tsc is defined, read all cpu's tsc and save in cpu_tsc for next run ++#if !defined(CONFIG_PREEMPT_COUNT) ++ if (DRV_CONFIG_per_cpu_tsc(drv_cfg)) { ++ atomic_set(&read_now, GLOBAL_STATE_num_cpus(driver_state)); ++ init_waitqueue_head(&read_tsc_now); ++ CONTROL_Invoke_Parallel(lwpmudrv_Fill_TSC_Info, ++ (PVOID)(size_t)0); ++ } else ++#endif ++ CONTROL_Invoke_Cpu(0, lwpmudrv_Read_Specific_TSC, &cpu_tsc[0]); ++ ++ // step 9 ++ if (!enter_in_pause_state) { ++ status = lwpmudrv_Resume(); ++ if (status != OS_SUCCESS) ++ return status; ++ } ++ ++ SEP_DRV_LOG_FLOW_OUT("Return value: %d", status); ++ return status; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Set_Num_EM_Groups(IOCTL_ARGS arg) ++ * ++ * @param arg - pointer to the IOCTL_ARGS structure ++ * ++ * @return OS_STATUS ++ * ++ * @brief Configure the event multiplexing group. ++ * ++ * Special Notes ++ * None ++ */ ++static OS_STATUS lwpmudrv_Set_EM_Config(IOCTL_ARGS arg) ++{ ++ EVENT_CONFIG ec; ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Skipped: Driver state is not IDLE!"); ++ return OS_IN_PROGRESS; ++ } ++ ++ if (arg->buf_usr_to_drv == NULL || ++ arg->len_usr_to_drv != sizeof(EVENT_CONFIG_NODE)) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments."); ++ return OS_INVALID; ++ } ++ ++ LWPMU_DEVICE_ec(&devices[cur_device]) = ++ CONTROL_Allocate_Memory(sizeof(EVENT_CONFIG_NODE)); ++ if (!LWPMU_DEVICE_ec(&devices[cur_device])) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Memory allocation failure for ec!"); ++ return OS_NO_MEM; ++ } ++ ++ if (copy_from_user(LWPMU_DEVICE_ec(&devices[cur_device]), ++ (void __user *)arg->buf_usr_to_drv, sizeof(EVENT_CONFIG_NODE))) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Memory copy failure (event config)!"); ++ return OS_FAULT; ++ } ++ ++ ec = (EVENT_CONFIG)LWPMU_DEVICE_ec(&devices[cur_device]); ++ LWPMU_DEVICE_PMU_register_data(&devices[cur_device]) = ++ CONTROL_Allocate_Memory(EVENT_CONFIG_num_groups(ec) * ++ sizeof(VOID *)); ++ if (!LWPMU_DEVICE_PMU_register_data(&devices[cur_device])) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Memory allocation failure for PMU_register_data!"); ++ return OS_NO_MEM; ++ } ++ ++ EVENTMUX_Initialize(); ++ ++ SEP_DRV_LOG_FLOW_OUT("OS_SUCCESS."); ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Set_EM_Config_UNC(IOCTL_ARGS arg) ++ * ++ * @param arg - pointer to the IOCTL_ARGS structure ++ * ++ * @return OS_STATUS ++ * ++ * @brief Set the number of em groups in the global state node. ++ * @brief Also, copy the EVENT_CONFIG struct that has been passed in, ++ * @brief into a global location for now. ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Set_EM_Config_UNC(IOCTL_ARGS arg) ++{ ++ EVENT_CONFIG ec; ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Skipped: driver state is not IDLE!"); ++ return OS_IN_PROGRESS; ++ } ++ ++ // allocate memory ++ LWPMU_DEVICE_ec(&devices[cur_device]) = ++ CONTROL_Allocate_Memory(sizeof(EVENT_CONFIG_NODE)); ++ if (copy_from_user(LWPMU_DEVICE_ec(&devices[cur_device]), ++ (void __user *)arg->buf_usr_to_drv, arg->len_usr_to_drv)) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Memory copy failure for LWPMU_device_ec!"); ++ return OS_FAULT; ++ } ++ // configure num_groups from ec of the specific device ++ ec = (EVENT_CONFIG)LWPMU_DEVICE_ec(&devices[cur_device]); ++ SEP_DRV_LOG_TRACE("Num Groups UNCORE: %d.", ++ EVENT_CONFIG_num_groups_unc(ec)); ++ LWPMU_DEVICE_PMU_register_data(&devices[cur_device]) = ++ CONTROL_Allocate_Memory(EVENT_CONFIG_num_groups_unc(ec) * ++ sizeof(VOID *)); ++ if (!LWPMU_DEVICE_PMU_register_data(&devices[cur_device])) { ++ LWPMU_DEVICE_ec(&devices[cur_device]) = CONTROL_Free_Memory( ++ LWPMU_DEVICE_ec(&devices[cur_device])); ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Memory allocation failure for LWPMU_DEVICE_PMU_register_data"); ++ return OS_NO_MEM; ++ } ++ LWPMU_DEVICE_em_groups_count(&devices[cur_device]) = 0; ++ ++ SEP_DRV_LOG_FLOW_OUT("Success"); ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Configure_events(IOCTL_ARGS arg) ++ * ++ * @param arg - pointer to the IOCTL_ARGS structure ++ * ++ * @return OS_STATUS ++ * ++ * @brief Copies one group of events into kernel space at ++ * @brief PMU_register_data[em_groups_count]. ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Configure_Events(IOCTL_ARGS arg) ++{ ++ U32 group_id; ++ ECB ecb; ++ U32 em_groups_count; ++ EVENT_CONFIG ec; ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Skipped: driver state is not IDLE!"); ++ return OS_IN_PROGRESS; ++ } ++ ++ ec = (EVENT_CONFIG)LWPMU_DEVICE_ec(&devices[cur_device]); ++ em_groups_count = LWPMU_DEVICE_em_groups_count(&devices[cur_device]); ++ ++ if (em_groups_count >= EVENT_CONFIG_num_groups(ec)) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Error: EM groups number exceeded initial configuration!"); ++ return OS_INVALID; ++ } ++ if (arg->buf_usr_to_drv == NULL || ++ arg->len_usr_to_drv < sizeof(ECB_NODE)) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments."); ++ return OS_INVALID; ++ } ++ ++ ecb = CONTROL_Allocate_Memory(arg->len_usr_to_drv); ++ if (!ecb) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Memory allocation failure for ecb!"); ++ return OS_NO_MEM; ++ } ++ if (copy_from_user(ecb, (void __user *)arg->buf_usr_to_drv, arg->len_usr_to_drv)) { ++ CONTROL_Free_Memory(ecb); ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure for ecb data!"); ++ return OS_FAULT; ++ } ++ group_id = ECB_group_id(ecb); ++ ++ if (group_id >= EVENT_CONFIG_num_groups(ec)) { ++ CONTROL_Free_Memory(ecb); ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Group_id is larger than total number of groups!"); ++ return OS_INVALID; ++ } ++ ++ LWPMU_DEVICE_PMU_register_data(&devices[cur_device])[group_id] = ecb; ++ LWPMU_DEVICE_em_groups_count(&devices[cur_device]) = group_id + 1; ++ ++ SEP_DRV_LOG_FLOW_OUT("Success"); ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Configure_events_UNC(IOCTL_ARGS arg) ++ * ++ * @param arg - pointer to the IOCTL_ARGS structure ++ * ++ * @return OS_STATUS ++ * ++ * @brief Make a copy of the uncore registers that need to be programmed ++ * @brief for the next event set used for event multiplexing ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Configure_Events_UNC(IOCTL_ARGS arg) ++{ ++ VOID **PMU_register_data_unc; ++ S32 em_groups_count_unc; ++ ECB ecb; ++ EVENT_CONFIG ec_unc; ++ DEV_UNC_CONFIG pcfg_unc; ++ U32 group_id = 0; ++ ECB in_ecb = NULL; ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Skipped: driver state is not IDLE!"); ++ return OS_IN_PROGRESS; ++ } ++ ++ em_groups_count_unc = ++ LWPMU_DEVICE_em_groups_count(&devices[cur_device]); ++ PMU_register_data_unc = ++ LWPMU_DEVICE_PMU_register_data(&devices[cur_device]); ++ ec_unc = LWPMU_DEVICE_ec(&devices[cur_device]); ++ pcfg_unc = LWPMU_DEVICE_pcfg(&devices[cur_device]); ++ ++ if (pcfg_unc == NULL || ec_unc == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Pcfg_unc or ec_unc NULL!"); ++ return OS_INVALID; ++ } ++ ++ if (em_groups_count_unc >= (S32)EVENT_CONFIG_num_groups_unc(ec_unc)) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Uncore EM groups number exceeded initial configuration!"); ++ return OS_INVALID; ++ } ++ if (arg->buf_usr_to_drv == NULL || ++ arg->len_usr_to_drv < sizeof(ECB_NODE)) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments."); ++ return OS_INVALID; ++ } ++ ++ in_ecb = CONTROL_Allocate_Memory(arg->len_usr_to_drv); ++ if (!in_ecb) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Memory allocation failure for uncore ecb!"); ++ return OS_NO_MEM; ++ } ++ if (copy_from_user(in_ecb, (void __user *)arg->buf_usr_to_drv, arg->len_usr_to_drv)) { ++ CONTROL_Free_Memory(in_ecb); ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Memory copy failure for uncore ecb data!"); ++ return OS_FAULT; ++ } ++ group_id = ECB_group_id(in_ecb); ++ ++ if (group_id >= EVENT_CONFIG_num_groups_unc(ec_unc)) { ++ CONTROL_Free_Memory(in_ecb); ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Group_id is larger than total number of groups!"); ++ return OS_INVALID; ++ } ++ ++ PMU_register_data_unc[group_id] = in_ecb; ++ // at this point, we know the number of uncore events for this device, ++ // so allocate the results buffer per thread for uncore only for SEP event based uncore counting ++ ecb = PMU_register_data_unc[group_id]; ++ if (ecb == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Encountered NULL ECB!"); ++ return OS_INVALID; ++ } ++ LWPMU_DEVICE_num_events(&devices[cur_device]) = ECB_num_events(ecb); ++ LWPMU_DEVICE_em_groups_count(&devices[cur_device]) = group_id + 1; ++ ++ SEP_DRV_LOG_FLOW_OUT("Success"); ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Set_Sample_Descriptors(IOCTL_ARGS arg) ++ * ++ * @param arg - pointer to the IOCTL_ARGS structure ++ * ++ * @return OS_STATUS ++ * ++ * @brief Set the number of descriptor groups in the global state node. ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Set_Sample_Descriptors(IOCTL_ARGS arg) ++{ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Skipped: driver state is not IDLE!"); ++ return OS_IN_PROGRESS; ++ } ++ if (arg->len_usr_to_drv != sizeof(U32) || arg->buf_usr_to_drv == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Invalid arguments (Unknown size of Sample Descriptors!)."); ++ return OS_INVALID; ++ } ++ ++ desc_count = 0; ++ if (copy_from_user(&GLOBAL_STATE_num_descriptors(driver_state), ++ (void __user *)arg->buf_usr_to_drv, sizeof(U32))) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure"); ++ return OS_FAULT; ++ } ++ ++ desc_data = CONTROL_Allocate_Memory( ++ (size_t)GLOBAL_STATE_num_descriptors(driver_state) * ++ sizeof(VOID *)); ++ if (desc_data == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Memory allocation failure for desc_data!"); ++ return OS_NO_MEM; ++ } ++ ++ SEP_DRV_LOG_FLOW_OUT("Success"); ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Configure_Descriptors(IOCTL_ARGS arg) ++ * ++ * @param arg - pointer to the IOCTL_ARGS structure ++ * @return OS_STATUS ++ * ++ * @brief Make a copy of the descriptors that need to be read in order ++ * @brief to configure a sample record. ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Configure_Descriptors(IOCTL_ARGS arg) ++{ ++ U32 uncopied; ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Skipped: driver state is not IDLE!"); ++ return OS_IN_PROGRESS; ++ } ++ ++ if (desc_count >= GLOBAL_STATE_num_descriptors(driver_state)) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Descriptor groups number exceeded initial configuration!"); ++ return OS_INVALID; ++ } ++ ++ if (arg->len_usr_to_drv == 0 || arg->buf_usr_to_drv == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arg value!"); ++ return OS_INVALID; ++ } ++ if (desc_data == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("NULL desc_data!"); ++ return OS_INVALID; ++ } ++ // ++ // First things first: Make a copy of the data for global use. ++ // ++ desc_data[desc_count] = CONTROL_Allocate_Memory(arg->len_usr_to_drv); ++ uncopied = copy_from_user(desc_data[desc_count], (void __user *)arg->buf_usr_to_drv, ++ arg->len_usr_to_drv); ++ if (uncopied > 0) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Unable to copy desc_data from user!"); ++ return OS_NO_MEM; ++ } ++ SEP_DRV_LOG_TRACE("Added descriptor # %d.", desc_count); ++ desc_count++; ++ ++ SEP_DRV_LOG_FLOW_OUT("Success"); ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_LBR_Info(IOCTL_ARGS arg) ++ * ++ * ++ * @param arg - pointer to the IOCTL_ARGS structure ++ * @return OS_STATUS ++ * ++ * @brief Make a copy of the LBR information that is passed in. ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_LBR_Info(IOCTL_ARGS arg) ++{ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Skipped: driver state is not IDLE!"); ++ return OS_IN_PROGRESS; ++ } ++ ++ if (cur_pcfg == NULL || DEV_CONFIG_collect_lbrs(cur_pcfg) == FALSE) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "LBR capture has not been configured!"); ++ return OS_INVALID; ++ } ++ ++ if (arg->len_usr_to_drv == 0 || arg->buf_usr_to_drv == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments!"); ++ return OS_INVALID; ++ } ++ ++ // ++ // First things first: Make a copy of the data for global use. ++ // ++ ++ LWPMU_DEVICE_lbr(&devices[cur_device]) = ++ CONTROL_Allocate_Memory((int)arg->len_usr_to_drv); ++ if (!LWPMU_DEVICE_lbr(&devices[cur_device])) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Error: Memory allocation failure for lbr!"); ++ return OS_NO_MEM; ++ } ++ ++ if (copy_from_user(LWPMU_DEVICE_lbr(&devices[cur_device]), ++ (void __user *)arg->buf_usr_to_drv, arg->len_usr_to_drv)) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Memory copy failure for lbr struct!"); ++ return OS_FAULT; ++ } ++ ++ SEP_DRV_LOG_FLOW_OUT("Success"); ++ return OS_SUCCESS; ++} ++ ++#if !defined(DRV_SEP_ACRN_ON) ++#define CR4_PCE 0x00000100 //Performance-monitoring counter enable RDPMC ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static void lwpmudrv_Set_CR4_PCE_Bit(PVOID param) ++ * ++ * @param param - dummy parameter ++ * ++ * @return NONE ++ * ++ * @brief Set CR4's PCE bit on the logical processor ++ * ++ * Special Notes ++ */ ++static VOID lwpmudrv_Set_CR4_PCE_Bit(PVOID param) ++{ ++ U32 this_cpu; ++#if defined(DRV_IA32) ++ U32 prev_CR4_value = 0; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ // remember if RDPMC bit previously set ++ // and then enabled it ++ __asm__("movl %%cr4, %%eax\n\t" ++ "movl %%eax, %0\n\t" ++ "orl %1, %%eax\n\t" ++ "movl %%eax, %%cr4\n\t" ++ : "=irg"(prev_CR4_value) ++ : "irg"(CR4_PCE) ++ : "eax"); ++#endif ++#if defined(DRV_EM64T) ++ U64 prev_CR4_value = 0; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ // remember if RDPMC bit previously set ++ // and then enabled it ++ __asm__("movq %%cr4, %%rax\n\t" ++ "movq %%rax, %0\n\t" ++ "orq %1, %%rax\n\t" ++ "movq %%rax, %%cr4" ++ : "=irg"(prev_CR4_value) ++ : "irg"(CR4_PCE) ++ : "rax"); ++#endif ++ preempt_disable(); ++ this_cpu = CONTROL_THIS_CPU(); ++ preempt_enable(); ++ ++ // if bit RDPMC bit was set before, ++ // set flag for when we clear it ++ if (prev_CR4_value & CR4_PCE) { ++ prev_set_CR4[this_cpu] = 1; ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static void lwpmudrv_Clear_CR4_PCE_Bit(PVOID param) ++ * ++ * @param param - dummy parameter ++ * ++ * @return NONE ++ * ++ * @brief ClearSet CR4's PCE bit on the logical processor ++ * ++ * Special Notes ++ */ ++static VOID lwpmudrv_Clear_CR4_PCE_Bit(PVOID param) ++{ ++ U32 this_cpu; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ preempt_disable(); ++ this_cpu = CONTROL_THIS_CPU(); ++ preempt_enable(); ++ ++ // only clear the CR4 bit if it wasn't set ++ // before we started ++ if (prev_set_CR4 && !prev_set_CR4[this_cpu]) { ++#if defined(DRV_IA32) ++ __asm__("movl %%cr4, %%eax\n\t" ++ "andl %0, %%eax\n\t" ++ "movl %%eax, %%cr4\n" ++ : ++ : "irg"(~CR4_PCE) ++ : "eax"); ++#endif ++#if defined(DRV_EM64T) ++ __asm__("movq %%cr4, %%rax\n\t" ++ "andq %0, %%rax\n\t" ++ "movq %%rax, %%cr4\n" ++ : ++ : "irg"(~CR4_PCE) ++ : "rax"); ++#endif ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++#endif ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Start(void) ++ * ++ * @param none ++ * ++ * @return OS_STATUS ++ * ++ * @brief Local function that handles the LWPMU_IOCTL_START call. ++ * @brief Set up the OS hooks for process/thread/load notifications. ++ * @brief Write the initial set of MSRs. ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Start(void) ++{ ++ OS_STATUS status = OS_SUCCESS; ++#if !defined(CONFIG_PREEMPT_COUNT) && !defined(DRV_SEP_ACRN_ON) ++ U32 cpu_num; ++#endif ++#if defined(DRV_SEP_ACRN_ON) ++ struct profiling_control *control = NULL; ++ S32 i; ++#endif ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (!CHANGE_DRIVER_STATE(STATE_BIT_IDLE, DRV_STATE_RUNNING)) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Skipped: driver state is not IDLE!"); ++ return OS_IN_PROGRESS; ++ } ++ ++ if (drv_cfg == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("NULL drv_cfg!"); ++ return OS_INVALID; ++ } ++ ++ if (DRV_CONFIG_use_pcl(drv_cfg) == TRUE) { ++ if (DRV_CONFIG_start_paused(drv_cfg)) { ++ CHANGE_DRIVER_STATE(STATE_BIT_RUNNING, ++ DRV_STATE_PAUSED); ++ } ++ SEP_DRV_LOG_FLOW_OUT("[PCL enabled] Early return value: %d", ++ status); ++ return status; ++ } ++ ++#if !defined(DRV_SEP_ACRN_ON) ++ prev_set_CR4 = CONTROL_Allocate_Memory( ++ GLOBAL_STATE_num_cpus(driver_state) * sizeof(U8)); ++ CONTROL_Invoke_Parallel(lwpmudrv_Set_CR4_PCE_Bit, (PVOID)(size_t)0); ++#endif ++ ++#if !defined(CONFIG_PREEMPT_COUNT) && !defined(DRV_SEP_ACRN_ON) ++ atomic_set(&read_now, GLOBAL_STATE_num_cpus(driver_state)); ++ init_waitqueue_head(&read_tsc_now); ++ CONTROL_Invoke_Parallel(lwpmudrv_Fill_TSC_Info, (PVOID)(size_t)0); ++#endif ++ ++#if !defined(CONFIG_PREEMPT_COUNT) && !defined(DRV_SEP_ACRN_ON) ++ for (cpu_num = 0; cpu_num < GLOBAL_STATE_num_cpus(driver_state); ++ cpu_num++) { ++ if (CPU_STATE_offlined(&pcb[cpu_num])) { ++ cpu_tsc[cpu_num] = cpu_tsc[0]; ++ } ++ } ++#else ++ UTILITY_Read_TSC(&cpu_tsc[0]); ++#endif ++ ++ if (DRV_CONFIG_start_paused(drv_cfg)) { ++ CHANGE_DRIVER_STATE(STATE_BIT_RUNNING, DRV_STATE_PAUSED); ++ } else { ++#if !defined(DRV_SEP_ACRN_ON) ++ CONTROL_Invoke_Parallel(lwpmudrv_Resume_Op, NULL); ++#else ++ control = (struct profiling_control *)CONTROL_Allocate_Memory( ++ sizeof(struct profiling_control)); ++ if (control == NULL) { ++ SEP_PRINT_ERROR( ++ "lwpmudrv_Start: Unable to allocate memory\n"); ++ return OS_NO_MEM; ++ } ++ memset(control, 0, sizeof(struct profiling_control)); ++ ++ BUG_ON(!virt_addr_valid(control)); ++ control->collector_id = COLLECTOR_SEP; ++ ++ acrn_hypercall2(HC_PROFILING_OPS, PROFILING_GET_CONTROL_SWITCH, ++ virt_to_phys(control)); ++ ++ SEP_PRINT_DEBUG("ACRN profiling collection running 0x%llx\n", ++ control->switches); ++ ++ if (DRV_CONFIG_counting_mode(drv_cfg) == FALSE) { ++ control->switches |= (1 << CORE_PMU_SAMPLING) | ++ (1 << VM_SWITCH_TRACING); ++ if (DEV_CONFIG_collect_lbrs(cur_pcfg)) { ++ control->switches |= (1 << LBR_PMU_SAMPLING); ++ } ++ } else { ++ control->switches |= (1 << CORE_PMU_COUNTING); ++ } ++ ++ acrn_hypercall2(HC_PROFILING_OPS, PROFILING_SET_CONTROL_SWITCH, ++ virt_to_phys(control)); ++ control = CONTROL_Free_Memory(control); ++ ++ if (DRV_CONFIG_counting_mode(drv_cfg) == FALSE) { ++ char kthread_name[MAXNAMELEN]; ++ for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); ++ i++) { ++ snprintf(kthread_name, MAXNAMELEN, "%s_%d", ++ "SEPDRV_BUFFER_HANDLER", i); ++ acrn_buffer_handler[i] = ++ kthread_create(PMI_Buffer_Handler, ++ (VOID *)(size_t)i, ++ kthread_name); ++ if (acrn_buffer_handler[i]) { ++ wake_up_process(acrn_buffer_handler[i]); ++ } ++ } ++ SEP_PRINT_DEBUG( ++ "lwpmudrv_Prepare_Stop: flushed all the remaining buffer\n"); ++ } ++#endif ++ ++#if defined(BUILD_CHIPSET) ++ if (DRV_CONFIG_enable_chipset(drv_cfg) && cs_dispatch != NULL && ++ cs_dispatch->start_chipset != NULL) { ++ cs_dispatch->start_chipset(); ++ } ++#endif ++ ++ EVENTMUX_Start(); ++ lwpmudrv_Dump_Tracer("start", 0); ++ ++#if defined(BUILD_GFX) ++ SEP_DRV_LOG_TRACE("Enable_gfx=%d.", ++ (int)DRV_CONFIG_enable_gfx(drv_cfg)); ++ if (DRV_CONFIG_enable_gfx(drv_cfg)) { ++ GFX_Start(); ++ } ++#endif ++ if (unc_buf_init) { ++ lwpmudrv_Uncore_Start_Timer(); ++ } ++ } ++ ++ SEP_DRV_LOG_FLOW_OUT("Return value: %d", status); ++ return status; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Cleanup_Op(void) ++ * ++ * @param - none ++ * ++ * @return OS_STATUS ++ * ++ * @brief Clean up registers after collection ++ * ++ * Special Notes ++ */ ++static VOID lwpmudrv_Cleanup_Op(PVOID param) ++{ ++ U32 this_cpu; ++ U32 dev_idx; ++ DISPATCH dispatch; ++ ++ preempt_disable(); ++ this_cpu = CONTROL_THIS_CPU(); ++ preempt_enable(); ++ dev_idx = core_to_dev_map[this_cpu]; ++ dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ if (dispatch != NULL && dispatch->cleanup != NULL) { ++ dispatch->cleanup(&dev_idx); ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ++ * @fn lwpmudrv_Prepare_Stop(); ++ * ++ * @param NONE ++ * @return OS_STATUS ++ * ++ * @brief Local function that handles the DRV_OPERATION_STOP call. ++ * @brief Cleans up the interrupt handler. ++ */ ++static OS_STATUS lwpmudrv_Prepare_Stop(void) ++{ ++ S32 i; ++ S32 done = FALSE; ++ S32 cpu_num; ++#if defined(DRV_SEP_ACRN_ON) ++ struct profiling_control *control = NULL; ++#endif ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (GET_DRIVER_STATE() != DRV_STATE_TERMINATING) { ++ if (!CHANGE_DRIVER_STATE(STATE_BIT_RUNNING | STATE_BIT_PAUSED, ++ DRV_STATE_PREPARE_STOP)) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Unexpected driver state."); ++ return OS_INVALID; ++ } ++ } else { ++ SEP_DRV_LOG_WARNING("Abnormal termination path."); ++ } ++ ++ if (drv_cfg == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("drv_cfg is NULL!"); ++ return OS_INVALID; ++ } ++ ++ if (DRV_CONFIG_use_pcl(drv_cfg) == TRUE) { ++ SEP_DRV_LOG_FLOW_OUT("Success: using PCL"); ++ return OS_SUCCESS; ++ } ++ ++ for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { ++ CPU_STATE_accept_interrupt(&pcb[i]) = 0; ++ } ++ while (!done) { ++ done = TRUE; ++ for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { ++ if (atomic_read(&CPU_STATE_in_interrupt(&pcb[i]))) { ++ done = FALSE; ++ } ++ } ++ } ++#if !defined(DRV_SEP_ACRN_ON) ++ CONTROL_Invoke_Parallel(lwpmudrv_Pause_Op, NULL); ++#else ++ control = (struct profiling_control *)CONTROL_Allocate_Memory( ++ sizeof(struct profiling_control)); ++ if (control == NULL) { ++ SEP_PRINT_ERROR("lwpmudrv_Start: Unable to allocate memory\n"); ++ return OS_NO_MEM; ++ } ++ memset(control, 0, sizeof(struct profiling_control)); ++ ++ BUG_ON(!virt_addr_valid(control)); ++ control->collector_id = COLLECTOR_SEP; ++ ++ acrn_hypercall2(HC_PROFILING_OPS, PROFILING_GET_CONTROL_SWITCH, ++ virt_to_phys(control)); ++ ++ SEP_PRINT_DEBUG("ACRN profiling collection running 0x%llx\n", ++ control->switches); ++ ++ if (DRV_CONFIG_counting_mode(drv_cfg) == FALSE) { ++ control->switches &= ++ ~((1 << CORE_PMU_SAMPLING) | (1 << VM_SWITCH_TRACING)); ++ } else { ++ control->switches &= ~(1 << CORE_PMU_COUNTING); ++ } ++ ++ acrn_hypercall2(HC_PROFILING_OPS, PROFILING_SET_CONTROL_SWITCH, ++ virt_to_phys(control)); ++ control = CONTROL_Free_Memory(control); ++#endif ++ ++ SEP_DRV_LOG_TRACE("Outside of all interrupts."); ++ ++#if defined(BUILD_CHIPSET) ++ if (DRV_CONFIG_enable_chipset(drv_cfg) && cs_dispatch != NULL && ++ cs_dispatch->stop_chipset != NULL) { ++ cs_dispatch->stop_chipset(); ++ } ++#endif ++ ++#if defined(BUILD_GFX) ++ SEP_DRV_LOG_TRACE("Enable_gfx=%d.", ++ (int)DRV_CONFIG_enable_gfx(drv_cfg)); ++ if (DRV_CONFIG_enable_gfx(drv_cfg)) { ++ GFX_Stop(); ++ } ++#endif ++ ++ if (unc_buf_init) { ++ lwpmudrv_Uncore_Stop_Timer(); ++ } ++ ++ if (drv_cfg == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("drv_cfg is NULL!"); ++ return OS_INVALID; ++ } ++ ++ /* ++ * Clean up all the control registers ++ */ ++#if !defined(DRV_SEP_ACRN_ON) ++ CONTROL_Invoke_Parallel(lwpmudrv_Cleanup_Op, (VOID *)NULL); ++#else ++ lwpmudrv_Cleanup_Op(NULL); ++#endif ++ ++ SEP_DRV_LOG_TRACE("Cleanup finished."); ++ lwpmudrv_Free_Restore_Buffer(); ++ ++#if !defined(DRV_SEP_ACRN_ON) ++ if (prev_set_CR4) { ++ CONTROL_Invoke_Parallel(lwpmudrv_Clear_CR4_PCE_Bit, ++ (VOID *)(size_t)0); ++ prev_set_CR4 = CONTROL_Free_Memory(prev_set_CR4); ++ } ++#endif ++ ++#if defined(BUILD_CHIPSET) ++ if (DRV_CONFIG_enable_chipset(drv_cfg) && cs_dispatch && ++ cs_dispatch->fini_chipset) { ++ cs_dispatch->fini_chipset(); ++ } ++#endif ++ ++ for (cpu_num = 0; cpu_num < GLOBAL_STATE_num_cpus(driver_state); ++ cpu_num++) { ++ SEP_DRV_LOG_TRACE( ++ "# of PMU interrupts via NMI triggered on cpu%d: %u.", ++ cpu_num, CPU_STATE_nmi_handled(&pcb[cpu_num])); ++ } ++ ++ SEP_DRV_LOG_FLOW_OUT("Success."); ++ return OS_SUCCESS; ++} ++ ++/* ++ * @fn lwpmudrv_Finish_Stop(); ++ * ++ * @param NONE ++ * @return OS_STATUS ++ * ++ * @brief Local function that handles the DRV_OPERATION_STOP call. ++ * @brief Cleans up the interrupt handler. ++ */ ++static OS_STATUS lwpmudrv_Finish_Stop(void) ++{ ++ OS_STATUS status = OS_SUCCESS; ++ S32 idx, cpu; ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (GET_DRIVER_STATE() != DRV_STATE_TERMINATING) { ++ if (!CHANGE_DRIVER_STATE(STATE_BIT_PREPARE_STOP, ++ DRV_STATE_STOPPED)) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Unexpected driver state!"); ++ return OS_FAULT; ++ } ++ } else { ++ SEP_DRV_LOG_WARNING("Abnormal termination path."); ++ } ++ ++ if (drv_cfg == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("drv_cfg is NULL!"); ++ return OS_INVALID; ++ } ++ ++ if (DRV_CONFIG_counting_mode(drv_cfg) == FALSE) { ++ if (GET_DRIVER_STATE() != DRV_STATE_TERMINATING) { ++#if !defined(DRV_SEP_ACRN_ON) ++ CONTROL_Invoke_Parallel(PEBS_Flush_Buffer, NULL); ++#endif ++ /* ++ * Make sure that the module buffers are not deallocated and that the module flush ++ * thread has not been terminated. ++ */ ++ if (GET_DRIVER_STATE() != DRV_STATE_TERMINATING) { ++ status = LINUXOS_Enum_Process_Modules(TRUE); ++ } ++ OUTPUT_Flush(); ++ } ++ /* ++ * Clean up the interrupt handler via the IDT ++ */ ++#if !defined(DRV_SEP_ACRN_ON) ++ CPUMON_Remove_Cpuhooks(); ++ PEBS_Destroy(); ++#else ++ for (cpu = 0; cpu < GLOBAL_STATE_num_cpus(driver_state); ++ cpu++) { ++ sbuf_share_setup(cpu, ACRN_SEP, NULL); ++ sbuf_free(samp_buf_per_cpu[cpu]); ++ } ++ samp_buf_per_cpu = CONTROL_Free_Memory(samp_buf_per_cpu); ++#endif ++ EVENTMUX_Destroy(); ++ } ++ ++ if (DRV_CONFIG_enable_cp_mode(drv_cfg)) { ++ if (interrupt_counts) { ++ for (cpu = 0; cpu < GLOBAL_STATE_num_cpus(driver_state); ++ cpu++) { ++ for (idx = 0; ++ idx < DRV_CONFIG_num_events(drv_cfg); ++ idx++) { ++ SEP_DRV_LOG_TRACE( ++ "Interrupt count: CPU %d, event %d = %lld.", ++ cpu, idx, ++ interrupt_counts ++ [cpu * DRV_CONFIG_num_events( ++ drv_cfg) + ++ idx]); ++ } ++ } ++ } ++ } ++ ++ read_counter_info = CONTROL_Free_Memory(read_counter_info); ++ prev_counter_data = CONTROL_Free_Memory(prev_counter_data); ++ emon_buffer_driver_helper = ++ CONTROL_Free_Memory(emon_buffer_driver_helper); ++ lwpmudrv_Dump_Tracer("stop", 0); ++ ++ SEP_DRV_LOG_FLOW_OUT("Return value: %d", status); ++ return status; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Get_Normalized_TSC(IOCTL_ARGS arg) ++ * ++ * @param arg - Pointer to the IOCTL structure ++ * ++ * @return OS_STATUS ++ * ++ * @brief Return the current value of the normalized TSC. ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Get_Normalized_TSC(IOCTL_ARGS arg) ++{ ++ U64 tsc = 0; ++ U64 this_cpu = 0; ++ size_t size_to_copy = sizeof(U64); ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ if (arg->len_drv_to_usr != size_to_copy || ++ arg->buf_drv_to_usr == NULL) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("Invalid arguments!"); ++ return OS_INVALID; ++ } ++ ++ preempt_disable(); ++ UTILITY_Read_TSC(&tsc); ++ this_cpu = CONTROL_THIS_CPU(); ++ tsc -= TSC_SKEW(CONTROL_THIS_CPU()); ++ preempt_enable(); ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) ++ if (drv_cfg && DRV_CONFIG_use_pcl(drv_cfg) == TRUE) { ++ preempt_disable(); ++ tsc = cpu_clock(this_cpu); ++ preempt_enable(); ++ } else { ++#endif ++ tsc -= TSC_SKEW(this_cpu); ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) ++ } ++#endif ++ if (copy_to_user((void __user *)arg->buf_drv_to_usr, (VOID *)&tsc, size_to_copy)) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("Memory copy failure!"); ++ return OS_FAULT; ++ } ++ lwpmudrv_Dump_Tracer("marker", tsc); ++ ++ SEP_DRV_LOG_TRACE_OUT("Success"); ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Get_Num_Cores(IOCTL_ARGS arg) ++ * ++ * @param arg - Pointer to the IOCTL structure ++ * ++ * @return OS_STATUS ++ * ++ * @brief Quickly return the (total) number of cpus in the system. ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Get_Num_Cores(IOCTL_ARGS arg) ++{ ++ OS_STATUS status = OS_SUCCESS; ++ S32 num = GLOBAL_STATE_num_cpus(driver_state); ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (arg->len_drv_to_usr != sizeof(S32) || arg->buf_drv_to_usr == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Error: Invalid arguments."); ++ return OS_INVALID; ++ } ++ ++ SEP_DRV_LOG_TRACE("Num_Cores is %d, buf_usr_to_drv is 0x%p.", num, ++ arg->buf_drv_to_usr); ++ status = put_user(num, (S32 __user*)arg->buf_drv_to_usr); ++ ++ SEP_DRV_LOG_FLOW_OUT("Return value: %d", status); ++ return status; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Set_CPU_Mask(PVOID buf_usr_to_drv, U32 len_usr_to_drv) ++ * ++ * @param buf_usr_to_drv - pointer to the CPU mask buffer ++ * @param len_usr_to_drv - size of the CPU mask buffer ++ * ++ * @return OS_STATUS ++ * ++ * @brief process the CPU mask as requested by the user ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Set_CPU_Mask(PVOID buf_usr_to_drv, ++ size_t len_usr_to_drv) ++{ ++ U32 cpu_count = 0; ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Skipped: driver state is not IDLE!"); ++ return OS_IN_PROGRESS; ++ } ++ ++ if (len_usr_to_drv == 0 || buf_usr_to_drv == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "len_usr_to_drv == 0 or buf_usr_to_drv is NULL!"); ++ return OS_INVALID; ++ } ++ ++ cpu_mask_bits = CONTROL_Allocate_Memory((int)len_usr_to_drv); ++ if (!cpu_mask_bits) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Memory allocation failure for cpu_mask_bits!"); ++ return OS_NO_MEM; ++ } ++ ++ if (copy_from_user(cpu_mask_bits, (void __user *)buf_usr_to_drv, ++ (int)len_usr_to_drv)) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); ++ return OS_FAULT; ++ } ++ ++ for (cpu_count = 0; ++ cpu_count < (U32)GLOBAL_STATE_num_cpus(driver_state); ++ cpu_count++) { ++ CPU_STATE_accept_interrupt(&pcb[cpu_count]) = ++ cpu_mask_bits[cpu_count] ? 1 : 0; ++ CPU_STATE_initial_mask(&pcb[cpu_count]) = ++ cpu_mask_bits[cpu_count] ? 1 : 0; ++ } ++ ++ SEP_DRV_LOG_FLOW_OUT("Success"); ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Get_KERNEL_CS(IOCTL_ARGS arg) ++ * ++ * @param arg - Pointer to the IOCTL structure ++ * ++ * @return OS_STATUS ++ * ++ * @brief Return the value of the Kernel symbol KERNEL_CS. ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Get_KERNEL_CS(IOCTL_ARGS arg) ++{ ++ OS_STATUS status = OS_SUCCESS; ++ S32 num = __KERNEL_CS; ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (arg->len_drv_to_usr != sizeof(S32) || arg->buf_drv_to_usr == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Error: Invalid arguments."); ++ return OS_INVALID; ++ } ++ ++ SEP_DRV_LOG_TRACE("__KERNEL_CS is %d, buf_usr_to_drv is 0x%p.", num, ++ arg->buf_drv_to_usr); ++ status = put_user(num, (S32 __user *)arg->buf_drv_to_usr); ++ ++ SEP_DRV_LOG_FLOW_OUT("Return value: %d.", status); ++ return status; ++} ++ ++/* ++ * @fn lwpmudrv_Set_UID ++ * ++ * @param IN arg - pointer to the output buffer ++ * @return OS_STATUS ++ * ++ * @brief Receive the value of the UID of the collector process. ++ */ ++static OS_STATUS lwpmudrv_Set_UID(IOCTL_ARGS arg) ++{ ++ OS_STATUS status = OS_SUCCESS; ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (arg->len_usr_to_drv != sizeof(uid_t) || ++ arg->buf_usr_to_drv == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Error: Invalid arguments."); ++ return OS_INVALID; ++ } ++ ++ if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Skipped: driver state is not IDLE!"); ++ return OS_IN_PROGRESS; ++ } ++ ++ status = get_user(uid, (S32 __user *)arg->buf_usr_to_drv); ++ SEP_DRV_LOG_TRACE("Uid is %d.", uid); ++ ++ SEP_DRV_LOG_FLOW_OUT("Return value: %d.", status); ++ return status; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Get_TSC_Skew_Info(IOCTL_ARGS arg) ++ * ++ * @param arg - Pointer to the IOCTL structure ++ * ++ * @return OS_STATUS ++ * @brief Return the current value of the TSC skew data ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Get_TSC_Skew_Info(IOCTL_ARGS arg) ++{ ++ S64 *skew_array; ++ size_t skew_array_len; ++ S32 i; ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ skew_array_len = GLOBAL_STATE_num_cpus(driver_state) * sizeof(U64); ++ if (arg->len_drv_to_usr < skew_array_len || ++ arg->buf_drv_to_usr == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Input buffer too small or NULL!"); ++ return OS_INVALID; ++ } ++ ++ if (!DRV_CONFIG_enable_cp_mode(drv_cfg) && ++ GET_DRIVER_STATE() != DRV_STATE_STOPPED) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Skipped: cp_mode not enabled and driver is not STOPPED!"); ++ return OS_IN_PROGRESS; ++ } ++ ++ SEP_DRV_LOG_TRACE("Dispatched with len_drv_to_usr=%lld.", ++ arg->len_drv_to_usr); ++ ++ skew_array = CONTROL_Allocate_Memory(skew_array_len); ++ if (skew_array == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Memory allocation failure for skew_array!"); ++ return OS_NO_MEM; ++ } ++ ++ for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { ++ skew_array[i] = TSC_SKEW(i); ++ } ++ ++ if (copy_to_user((void __user *)arg->buf_drv_to_usr, skew_array, skew_array_len)) { ++ skew_array = CONTROL_Free_Memory(skew_array); ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Memory copy failure for skew_array!"); ++ return OS_FAULT; ++ } ++ ++ skew_array = CONTROL_Free_Memory(skew_array); ++ ++ SEP_DRV_LOG_FLOW_OUT("Success"); ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Collect_Sys_Config(IOCTL_ARGS arg) ++ * ++ * @param arg - Pointer to the IOCTL structure ++ * ++ * @return OS_STATUS ++ * ++ * @brief Local function that handles the COLLECT_SYS_CONFIG call. ++ * @brief Builds and collects the SYS_INFO data needed. ++ * @brief Writes the result into the argument. ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Collect_Sys_Config(IOCTL_ARGS arg) ++{ ++ OS_STATUS status = OS_SUCCESS; ++ U32 num; ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ num = SYS_INFO_Build(); ++ ++ if (arg->len_drv_to_usr < sizeof(S32) || arg->buf_drv_to_usr == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Error: Invalid arguments."); ++ return OS_INVALID; ++ } ++ ++ SEP_DRV_LOG_TRACE("Size of sys info is %d.", num); ++ status = put_user(num, (S32 __user *)arg->buf_drv_to_usr); ++ ++ SEP_DRV_LOG_FLOW_OUT("Return value: %d", status); ++ return status; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Sys_Config(IOCTL_ARGS arg) ++ * ++ * @param arg - Pointer to the IOCTL structure ++ * ++ * @return OS_STATUS ++ * ++ * @brief Return the current value of the normalized TSC. ++ * ++ * @brief Transfers the VTSA_SYS_INFO data back to the abstraction layer. ++ * @brief The buf_usr_to_drv should have enough space to handle the transfer. ++ */ ++static OS_STATUS lwpmudrv_Sys_Config(IOCTL_ARGS arg) ++{ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (arg->len_drv_to_usr == 0 || arg->buf_drv_to_usr == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Error: Invalid arguments."); ++ return OS_INVALID; ++ } ++ ++ SYS_INFO_Transfer(arg->buf_drv_to_usr, arg->len_drv_to_usr); ++ ++ SEP_DRV_LOG_FLOW_OUT("Success"); ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Samp_Read_Num_Of_Core_Counters(IOCTL_ARGS arg) ++ * ++ * @param arg - Pointer to the IOCTL structure ++ * ++ * @return OS_STATUS ++ * ++ * @brief Read memory mapped i/o physical location ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Samp_Read_Num_Of_Core_Counters(IOCTL_ARGS arg) ++{ ++ U64 rax, rbx, rcx, rdx, num_basic_functions; ++ U32 val = 0; ++ OS_STATUS status = OS_SUCCESS; ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (arg->len_drv_to_usr == 0 || arg->buf_drv_to_usr == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Error: Invalid arguments."); ++ return OS_INVALID; ++ } ++ ++ UTILITY_Read_Cpuid(0x0, &num_basic_functions, &rbx, &rcx, &rdx); ++ ++ if (num_basic_functions >= 0xA) { ++ UTILITY_Read_Cpuid(0xA, &rax, &rbx, &rcx, &rdx); ++ val = ((U32)(rax >> 8)) & 0xFF; ++ } ++ status = put_user(val, (U32 __user *)arg->buf_drv_to_usr); ++ SEP_DRV_LOG_TRACE("Num of counter is %d.", val); ++ ++ SEP_DRV_LOG_FLOW_OUT("Return value: %d.", status); ++ return status; ++} ++ ++#if defined(BUILD_CHIPSET) ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static DRV_BOOL lwpmudrv_Is_Physical_Address_Free(U32 physical_addrss) ++ * ++ * @param physical_address - physical address ++ * ++ * @return DRV_BOOL ++ * ++ * @brief Check if physical address is available ++ * ++ * Special Notes ++ */ ++static DRV_BOOL lwpmudrv_Is_Physical_Address_Free(U32 physical_address) ++{ ++ U32 new_value; ++ U32 test_value = 0; ++ U32 value = 0; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { ++ SEP_DRV_LOG_WARNING_TRACE_OUT( ++ "FALSE: driver state is not IDLE!"); ++ return FALSE; ++ } ++ if (physical_address == 0) { ++ SEP_DRV_LOG_WARNING_TRACE_OUT("FALSE: is NULL!"); ++ return FALSE; ++ } ++ ++ // First attempt read ++ // ++ PCI_Read_From_Memory_Address(physical_address, &value); ++ ++ // Value must be 0xFFFFFFFFF or there is NO chance ++ // that this memory location is available. ++ // ++ if (value != 0xFFFFFFFF) { ++ SEP_DRV_LOG_TRACE_OUT("FALSE: value is not 0xFFFFFFFF!"); ++ return FALSE; ++ } ++ ++ // ++ // Try to write a bit to a zero (this probably ++ // isn't too safe, but this is just for testing) ++ // ++ new_value = 0xFFFFFFFE; ++ PCI_Write_To_Memory_Address(physical_address, new_value); ++ PCI_Read_From_Memory_Address(physical_address, &test_value); ++ ++ // Write back original ++ PCI_Write_To_Memory_Address(physical_address, value); ++ ++ if (new_value == test_value) { ++ // The write appeared to change the ++ // memory, it must be mapped already ++ // ++ SEP_DRV_LOG_TRACE_OUT("FALSE: appears to be mapped already!"); ++ return FALSE; ++ } ++ ++ if (test_value == 0xFFFFFFFF) { ++ // The write did not change the bit, so ++ // apparently, this memory must not be mapped ++ // to anything. ++ // ++ SEP_DRV_LOG_TRACE_OUT("TRUE: appears not to be mapped!"); ++ return TRUE; ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT("FALSE: Odd case!"); ++ return FALSE; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Samp_Find_Physical_Address(IOCTL_ARGS arg) ++ * ++ * @param arg - Pointer to the IOCTL structure ++ * ++ * @return OS_STATUS ++ * ++ * @brief Find a free physical address ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Samp_Find_Physical_Address(IOCTL_ARGS arg) ++{ ++ CHIPSET_PCI_SEARCH_ADDR_NODE user_addr; ++ CHIPSET_PCI_SEARCH_ADDR search_addr; ++ U32 addr; ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ search_addr = (CHIPSET_PCI_SEARCH_ADDR)arg->buf_usr_to_drv; ++ ++ if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Skipped: driver state is not IDLE."); ++ return OS_IN_PROGRESS; ++ } ++ ++ if (arg->len_drv_to_usr == 0 || arg->buf_drv_to_usr == NULL || ++ arg->len_usr_to_drv == 0 || arg->buf_usr_to_drv == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments!"); ++ return OS_INVALID; ++ } ++ ++ if (!search_addr) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Search_addr is NULL!"); ++ return OS_FAULT; ++ } ++ ++ if (!access_ok(VERIFY_WRITE, (void __user *)search_addr, ++ sizeof(CHIPSET_PCI_SEARCH_ADDR_NODE))) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Access not OK!"); ++ return OS_FAULT; ++ } ++ ++ if (copy_from_user(&user_addr, (void __user *)search_addr, ++ sizeof(CHIPSET_PCI_SEARCH_ADDR_NODE))) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Error: Memory copy failure for search_addr!"); ++ return OS_FAULT; ++ } ++ ++ if (CHIPSET_PCI_SEARCH_ADDR_start(&user_addr) > ++ CHIPSET_PCI_SEARCH_ADDR_stop(&user_addr)) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "SEARCH_ADDR_start > SEARCH_ADDR_stop!"); ++ return OS_INVALID; ++ } ++ ++ CHIPSET_PCI_SEARCH_ADDR_address(&user_addr) = 0; ++ ++ for (addr = CHIPSET_PCI_SEARCH_ADDR_start(&user_addr); ++ addr <= CHIPSET_PCI_SEARCH_ADDR_stop(&user_addr); ++ addr += CHIPSET_PCI_SEARCH_ADDR_increment(&user_addr)) { ++ SEP_DRV_LOG_TRACE("Addr=%x:", addr); ++ if (lwpmudrv_Is_Physical_Address_Free(addr)) { ++ CHIPSET_PCI_SEARCH_ADDR_address(&user_addr) = addr; ++ break; ++ } ++ } ++ ++ if (copy_to_user((void __user *)arg->buf_drv_to_usr, (VOID *)&user_addr, ++ sizeof(CHIPSET_PCI_SEARCH_ADDR_NODE))) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Error: Memory copy failure for user_addr!"); ++ return OS_FAULT; ++ } ++ ++ SEP_DRV_LOG_FLOW_OUT("Success"); ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Samp_Read_PCI_Config(IOCTL_ARGS arg) ++ * ++ * @param arg - Pointer to the IOCTL structure ++ * ++ * @return OS_STATUS ++ * ++ * @brief Read the PCI Configuration Space ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Samp_Read_PCI_Config(IOCTL_ARGS arg) ++{ ++ CHIPSET_PCI_CONFIG rd_pci = NULL; ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (arg->len_drv_to_usr == 0 || arg->buf_drv_to_usr == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments!"); ++ return OS_FAULT; ++ } ++ ++ rd_pci = CONTROL_Allocate_Memory(arg->len_drv_to_usr); ++ if (rd_pci == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Memory allocation failure for rd_pci!"); ++ return OS_NO_MEM; ++ } ++ ++ if (copy_from_user(rd_pci, (void __user *)arg->buf_usr_to_drv, ++ sizeof(CHIPSET_PCI_CONFIG_NODE))) { ++ CONTROL_Free_Memory(rd_pci); ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure for rd_pci!"); ++ return OS_FAULT; ++ } ++ ++ CHIPSET_PCI_CONFIG_value(rd_pci) = ++ PCI_Read_U32(CHIPSET_PCI_CONFIG_bus(rd_pci), ++ CHIPSET_PCI_CONFIG_device(rd_pci), ++ CHIPSET_PCI_CONFIG_function(rd_pci), ++ CHIPSET_PCI_CONFIG_offset(rd_pci)); ++ ++ if (copy_to_user((void __user *)arg->buf_drv_to_usr, (VOID *)rd_pci, ++ sizeof(CHIPSET_PCI_CONFIG_NODE))) { ++ CONTROL_Free_Memory(rd_pci); ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure for rd_pci!"); ++ return OS_FAULT; ++ } ++ ++ SEP_DRV_LOG_TRACE("Value at this PCI address:0x%x.", ++ CHIPSET_PCI_CONFIG_value(rd_pci)); ++ ++ CONTROL_Free_Memory(rd_pci); ++ ++ SEP_DRV_LOG_FLOW_OUT("Success."); ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Samp_Write_PCI_Config(IOCTL_ARGS arg) ++ * ++ * @param arg - Pointer to the IOCTL structure ++ * ++ * @return OS_STATUS ++ * ++ * @brief Write to the PCI Configuration Space ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Samp_Write_PCI_Config(IOCTL_ARGS arg) ++{ ++ CHIPSET_PCI_CONFIG wr_pci = NULL; ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ // the following allows "sep -el -pc" to work, since the command must access the ++ // the driver ioctls before driver is used for a collection ++ ++ if (!DRIVER_STATE_IN(GET_DRIVER_STATE(), ++ STATE_BIT_UNINITIALIZED | STATE_BIT_IDLE)) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Skipped: Driver state is not IDLE or UNINITIALIZED!"); ++ return OS_IN_PROGRESS; ++ } ++ ++ if (arg->len_usr_to_drv == 0 || arg->buf_usr_to_drv == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments!"); ++ return OS_INVALID; ++ } ++ ++ wr_pci = CONTROL_Allocate_Memory(arg->len_usr_to_drv); ++ if (wr_pci == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Memory allocation failure for wr_pci!"); ++ return OS_NO_MEM; ++ } ++ if (copy_from_user(wr_pci, (void __user *)arg->buf_usr_to_drv, ++ sizeof(CHIPSET_PCI_CONFIG_NODE))) { ++ CONTROL_Free_Memory(wr_pci); ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure for wr_pci!"); ++ return OS_FAULT; ++ } ++ ++ PCI_Write_U32(CHIPSET_PCI_CONFIG_bus(wr_pci), ++ CHIPSET_PCI_CONFIG_device(wr_pci), ++ CHIPSET_PCI_CONFIG_function(wr_pci), ++ CHIPSET_PCI_CONFIG_offset(wr_pci), ++ CHIPSET_PCI_CONFIG_value(wr_pci)); ++ ++ CONTROL_Free_Memory(wr_pci); ++ ++ SEP_DRV_LOG_FLOW_OUT("Success"); ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Samp_Chipset_Init(IOCTL_ARGS arg) ++ * ++ * @param arg - Pointer to the IOCTL structure ++ * ++ * @return OS_STATUS ++ * ++ * @brief Initialize the chipset cnfiguration ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Samp_Chipset_Init(IOCTL_ARGS arg) ++{ ++ PVOID buf_usr_to_drv; ++ U32 len_usr_to_drv; ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ buf_usr_to_drv = arg->buf_usr_to_drv; ++ len_usr_to_drv = arg->len_usr_to_drv; ++ ++ if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Skipped: driver state is not IDLE!"); ++ return OS_IN_PROGRESS; ++ } ++ ++ if (buf_usr_to_drv == NULL || len_usr_to_drv == 0) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Error: Invalid arguments!"); ++ return OS_INVALID; ++ } ++ ++ // First things first: Make a copy of the data for global use. ++ pma = CONTROL_Allocate_Memory(len_usr_to_drv); ++ ++ if (pma == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Memory allocation failure for pma!"); ++ return OS_NO_MEM; ++ } ++ ++ if (copy_from_user(pma, (void __user *)buf_usr_to_drv, len_usr_to_drv)) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure for pma!"); ++ return OS_FAULT; ++ } ++ ++#if defined(MY_DEBUG) ++ ++ SEP_DRV_LOG_TRACE("Chipset Configuration follows..."); ++ SEP_DRV_LOG_TRACE("pma->length=%d.", CHIPSET_CONFIG_length(pma)); ++ SEP_DRV_LOG_TRACE("pma->version=%d.", ++ CHIPSET_CONFIG_major_version(pma)); ++ SEP_DRV_LOG_TRACE("pma->processor=%d.", CHIPSET_CONFIG_processor(pma)); ++ SEP_DRV_LOG_TRACE("pma->mch_chipset=%d.", ++ CHIPSET_CONFIG_mch_chipset(pma)); ++ SEP_DRV_LOG_TRACE("pma->ich_chipset=%d.", ++ CHIPSET_CONFIG_ich_chipset(pma)); ++ SEP_DRV_LOG_TRACE("pma->gmch_chipset=%d.", ++ CHIPSET_CONFIG_gmch_chipset(pma)); ++ SEP_DRV_LOG_TRACE("pma->mother_board_time=%d.", ++ CHIPSET_CONFIG_motherboard_time(pma)); ++ SEP_DRV_LOG_TRACE("pma->host_proc_run=%d.", ++ CHIPSET_CONFIG_host_proc_run(pma)); ++ SEP_DRV_LOG_TRACE("pma->noa_chipset=%d.", ++ CHIPSET_CONFIG_noa_chipset(pma)); ++ SEP_DRV_LOG_TRACE("pma->bnb_chipset=%d.", ++ CHIPSET_CONFIG_bnb_chipset(pma)); ++ ++ if (CHIPSET_CONFIG_mch_chipset(pma)) { ++ SEP_DRV_LOG_TRACE("pma->mch->phys_add=0x%llx.", ++ CHIPSET_SEGMENT_physical_address( ++ &CHIPSET_CONFIG_mch(pma))); ++ SEP_DRV_LOG_TRACE( ++ "pma->mch->size=%d.", ++ CHIPSET_SEGMENT_size(&CHIPSET_CONFIG_mch(pma))); ++ SEP_DRV_LOG_TRACE( ++ "pma->mch->num_counters=%d.", ++ CHIPSET_SEGMENT_num_counters(&CHIPSET_CONFIG_mch(pma))); ++ SEP_DRV_LOG_TRACE( ++ "pma->mch->total_events=%d.", ++ CHIPSET_SEGMENT_total_events(&CHIPSET_CONFIG_mch(pma))); ++ } ++ ++ if (CHIPSET_CONFIG_ich_chipset(pma)) { ++ SEP_DRV_LOG_TRACE("pma->ich->phys_add=0x%llx.", ++ CHIPSET_SEGMENT_physical_address( ++ &CHIPSET_CONFIG_ich(pma))); ++ SEP_DRV_LOG_TRACE( ++ "pma->ich->size=%d.", ++ CHIPSET_SEGMENT_size(&CHIPSET_CONFIG_ich(pma))); ++ SEP_DRV_LOG_TRACE( ++ "pma->ich->num_counters=%d.", ++ CHIPSET_SEGMENT_num_counters(&CHIPSET_CONFIG_ich(pma))); ++ SEP_DRV_LOG_TRACE( ++ "pma->ich->total_events=%d.", ++ CHIPSET_SEGMENT_total_events(&CHIPSET_CONFIG_ich(pma))); ++ } ++ ++ if (CHIPSET_CONFIG_gmch_chipset(pma)) { ++ SEP_DRV_LOG_TRACE("pma->gmch->phys_add=0x%llx.", ++ CHIPSET_SEGMENT_physical_address( ++ &CHIPSET_CONFIG_gmch(pma))); ++ SEP_DRV_LOG_TRACE( ++ "pma->gmch->size=%d.", ++ CHIPSET_SEGMENT_size(&CHIPSET_CONFIG_gmch(pma))); ++ SEP_DRV_LOG_TRACE("pma->gmch->num_counters=%d.", ++ CHIPSET_SEGMENT_num_counters( ++ &CHIPSET_CONFIG_gmch(pma))); ++ SEP_DRV_LOG_TRACE("pma->gmch->total_events=%d.", ++ CHIPSET_SEGMENT_total_events( ++ &CHIPSET_CONFIG_gmch(pma))); ++ SEP_DRV_LOG_TRACE("pma->gmch->read_register=0x%x.", ++ CHIPSET_SEGMENT_read_register( ++ &CHIPSET_CONFIG_gmch(pma))); ++ SEP_DRV_LOG_TRACE("pma->gmch->write_register=0x%x.", ++ CHIPSET_SEGMENT_write_register( ++ &CHIPSET_CONFIG_gmch(pma))); ++ } ++ ++#endif ++ ++ // Set up the global cs_dispatch table ++ cs_dispatch = UTILITY_Configure_Chipset(); ++ if (cs_dispatch == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Unknown chipset family!"); ++ return OS_INVALID; ++ } ++ ++ // Initialize chipset configuration ++ if (cs_dispatch->init_chipset()) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Failed to initialize the chipset!"); ++ return OS_INVALID; ++ } ++ SEP_DRV_LOG_FLOW_OUT("Success"); ++ return OS_SUCCESS; ++} ++ ++#endif ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Get_Platform_Info(IOCTL_ARGS arg) ++ * ++ * @param arg - Pointer to the IOCTL structure ++ * ++ * @return OS_STATUS ++ * ++ * @brief Reads the MSR_PLATFORM_INFO register if present ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Get_Platform_Info(IOCTL_ARGS args) ++{ ++ U32 size = sizeof(DRV_PLATFORM_INFO_NODE); ++ OS_STATUS status = OS_SUCCESS; ++ DRV_PLATFORM_INFO platform_data = NULL; ++ U32 *dispatch_ids = NULL; ++ DISPATCH dispatch_ptr = NULL; ++ U32 i = 0; ++ U32 num_entries; // # dispatch ids to process ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ num_entries = ++ args->len_usr_to_drv / sizeof(U32); // # dispatch ids to process ++ ++ platform_data = CONTROL_Allocate_Memory(sizeof(DRV_PLATFORM_INFO_NODE)); ++ if (!platform_data) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Memory allocation failure for platform_data!"); ++ return OS_NO_MEM; ++ } ++ ++ memset(platform_data, 0, sizeof(DRV_PLATFORM_INFO_NODE)); ++ if (args->len_usr_to_drv > 0 && args->buf_usr_to_drv != NULL) { ++ dispatch_ids = CONTROL_Allocate_Memory(args->len_usr_to_drv); ++ if (!dispatch_ids) { ++ platform_data = CONTROL_Free_Memory(platform_data); ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Memory allocation failure for dispatch_ids!"); ++ return OS_NO_MEM; ++ } ++ ++ status = copy_from_user(dispatch_ids, (void __user *)args->buf_usr_to_drv, ++ args->len_usr_to_drv); ++ if (status) { ++ platform_data = CONTROL_Free_Memory(platform_data); ++ dispatch_ids = CONTROL_Free_Memory(dispatch_ids); ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Memory copy failure for dispatch_ids!"); ++ return status; ++ } ++ for (i = 0; i < num_entries; i++) { ++ if (dispatch_ids[i] > 0) { ++ dispatch_ptr = ++ UTILITY_Configure_CPU(dispatch_ids[i]); ++ if (dispatch_ptr && ++ dispatch_ptr->platform_info) { ++ dispatch_ptr->platform_info( ++ (PVOID)platform_data); ++ } ++ } ++ } ++ dispatch_ids = CONTROL_Free_Memory(dispatch_ids); ++ } else if (devices) { ++ dispatch_ptr = LWPMU_DEVICE_dispatch( ++ &devices[0]); //placeholder, needs to be fixed ++ if (dispatch_ptr && dispatch_ptr->platform_info) { ++ dispatch_ptr->platform_info((PVOID)platform_data); ++ } ++ } ++ ++ if (args->len_drv_to_usr < size || args->buf_drv_to_usr == NULL) { ++ platform_data = CONTROL_Free_Memory(platform_data); ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Error: Invalid arguments!"); ++ return OS_FAULT; ++ } ++ ++ status = copy_to_user((void __user *)args->buf_drv_to_usr, platform_data, size); ++ platform_data = CONTROL_Free_Memory(platform_data); ++ ++ SEP_DRV_LOG_FLOW_OUT("Return value: %d", status); ++ return status; ++} ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn void lwpmudrv_Setup_Cpu_Topology (value) ++ * ++ * @brief Sets up the per CPU state structures ++ * ++ * @param IOCTL_ARGS args ++ * ++ * @return OS_STATUS ++ * ++ * Special Notes: ++ * This function was added to support abstract dll creation. ++ */ ++static OS_STATUS lwpmudrv_Setup_Cpu_Topology(IOCTL_ARGS args) ++{ ++ S32 cpu_num; ++ S32 iter; ++ DRV_TOPOLOGY_INFO drv_topology, dt; ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Skipped: driver state is not IDLE!"); ++ return OS_IN_PROGRESS; ++ } ++ if (args->len_usr_to_drv == 0 || args->buf_usr_to_drv == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Topology information has been misconfigured!"); ++ return OS_INVALID; ++ } ++ ++ drv_topology = CONTROL_Allocate_Memory(args->len_usr_to_drv); ++ if (drv_topology == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Memory allocation failure for drv_topology!"); ++ return OS_NO_MEM; ++ } ++ ++ if (copy_from_user(drv_topology, ++ (void __user *)(args->buf_usr_to_drv), ++ args->len_usr_to_drv)) { ++ drv_topology = CONTROL_Free_Memory(drv_topology); ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Memory copy failure for drv_topology!"); ++ return OS_FAULT; ++ } ++ /* ++ * Topology Initializations ++ */ ++ num_packages = 0; ++ for (iter = 0; iter < GLOBAL_STATE_num_cpus(driver_state); iter++) { ++ dt = &drv_topology[iter]; ++ cpu_num = DRV_TOPOLOGY_INFO_cpu_number(dt); ++ CPU_STATE_socket_master(&pcb[cpu_num]) = ++ DRV_TOPOLOGY_INFO_socket_master(dt); ++ num_packages += CPU_STATE_socket_master(&pcb[cpu_num]); ++ CPU_STATE_core_master(&pcb[cpu_num]) = ++ DRV_TOPOLOGY_INFO_core_master(dt); ++ CPU_STATE_thr_master(&pcb[cpu_num]) = ++ DRV_TOPOLOGY_INFO_thr_master(dt); ++ CPU_STATE_core_type(&pcb[cpu_num]) = ++ DRV_TOPOLOGY_INFO_cpu_core_type(dt); ++ CPU_STATE_cpu_module_num(&pcb[cpu_num]) = ++ (U16)DRV_TOPOLOGY_INFO_cpu_module_num( ++ &drv_topology[iter]); ++ CPU_STATE_cpu_module_master(&pcb[cpu_num]) = ++ (U16)DRV_TOPOLOGY_INFO_cpu_module_master( ++ &drv_topology[iter]); ++ CPU_STATE_system_master(&pcb[cpu_num]) = (iter) ? 0 : 1; ++ SEP_DRV_LOG_TRACE("Cpu %d sm = %d cm = %d tm = %d.", cpu_num, ++ CPU_STATE_socket_master(&pcb[cpu_num]), ++ CPU_STATE_core_master(&pcb[cpu_num]), ++ CPU_STATE_thr_master(&pcb[cpu_num])); ++ } ++ drv_topology = CONTROL_Free_Memory(drv_topology); ++ ++ SEP_DRV_LOG_FLOW_OUT("Success"); ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Get_Num_Samples(IOCTL_ARGS arg) ++ * ++ * @param arg - Pointer to the IOCTL structure ++ * ++ * @return OS_STATUS ++ * ++ * @brief Returns the number of samples collected during the current ++ * @brief sampling run ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Get_Num_Samples(IOCTL_ARGS args) ++{ ++ S32 cpu_num; ++ U64 samples = 0; ++ OS_STATUS status; ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (pcb == NULL) { ++ SEP_DRV_LOG_ERROR("PCB was not initialized."); ++ return OS_FAULT; ++ } ++ ++ if (args->len_drv_to_usr == 0 || args->buf_drv_to_usr == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Topology information has been misconfigured!"); ++ return OS_INVALID; ++ } ++ ++ for (cpu_num = 0; cpu_num < GLOBAL_STATE_num_cpus(driver_state); ++ cpu_num++) { ++ samples += CPU_STATE_num_samples(&pcb[cpu_num]); ++ ++ SEP_DRV_LOG_TRACE("Samples for cpu %d = %lld.", cpu_num, ++ CPU_STATE_num_samples(&pcb[cpu_num])); ++ } ++ SEP_DRV_LOG_TRACE("Total number of samples %lld.", samples); ++ status = put_user(samples, (U64 __user *)args->buf_drv_to_usr); ++ ++ SEP_DRV_LOG_FLOW_OUT("Return value: %d", status); ++ return status; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Set_Device_Num_Units(IOCTL_ARGS arg) ++ * ++ * @param arg - Pointer to the IOCTL structure ++ * ++ * @return OS_STATUS ++ * ++ * @brief Set the number of devices for the sampling run ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Set_Device_Num_Units(IOCTL_ARGS args) ++{ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { ++ SEP_DRV_LOG_FLOW_OUT( ++ "'Success'/Skipped: driver state is not IDLE!"); ++ return OS_SUCCESS; ++ } ++ ++ if (args->len_usr_to_drv == 0 || args->buf_usr_to_drv == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Error: Invalid arguments."); ++ return OS_INVALID; ++ } ++ ++ if (copy_from_user(&(LWPMU_DEVICE_num_units(&devices[cur_device])), ++ (void __user *)args->buf_usr_to_drv, sizeof(U32))) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Memory copy failure for device num units!"); ++ return OS_FAULT; ++ } ++ SEP_DRV_LOG_TRACE("LWP: num_units = %d cur_device = %d.", ++ LWPMU_DEVICE_num_units(&devices[cur_device]), ++ cur_device); ++ // on to the next device. ++ cur_device++; ++ ++ SEP_DRV_LOG_FLOW_OUT("Success"); ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Get_Interval_Counts(IOCTL_ARGS arg) ++ * ++ * @param arg - Pointer to the IOCTL structure ++ * ++ * @return OS_STATUS ++ * ++ * @brief Returns the number of samples collected during the current ++ * @brief sampling run ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Get_Interval_Counts(IOCTL_ARGS args) ++{ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (!DRV_CONFIG_enable_cp_mode(drv_cfg)) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Not in CP mode!"); ++ return OS_INVALID; ++ } ++ if (args->len_drv_to_usr == 0 || args->buf_drv_to_usr == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Interval Counts information has been misconfigured!"); ++ return OS_INVALID; ++ } ++ if (!interrupt_counts) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Interrupt_counts is NULL!"); ++ return OS_INVALID; ++ } ++ ++ if (copy_to_user((void __user *)args->buf_drv_to_usr, interrupt_counts, ++ args->len_drv_to_usr)) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); ++ return OS_FAULT; ++ } ++ ++ SEP_DRV_LOG_FLOW_OUT("Success"); ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn U64 lwpmudrv_Set_Uncore_Topology_Info_And_Scan ++ * ++ * @brief Reads the MSR_PLATFORM_INFO register if present ++ * ++ * @param arg Pointer to the IOCTL structure ++ * ++ * @return status ++ * ++ * Special Notes: ++ * ++ */ ++static OS_STATUS lwpmudrv_Set_Uncore_Topology_Info_And_Scan(IOCTL_ARGS args) ++{ ++ SEP_DRV_LOG_FLOW_IN(""); ++ SEP_DRV_LOG_FLOW_OUT("Success [but did not do anything]"); ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn U64 lwpmudrv_Get_Uncore_Topology ++ * ++ * @brief Reads the MSR_PLATFORM_INFO register if present ++ * ++ * @param arg Pointer to the IOCTL structure ++ * ++ * @return status ++ * ++ * Special Notes: ++ * ++ */ ++static OS_STATUS lwpmudrv_Get_Uncore_Topology(IOCTL_ARGS args) ++{ ++ U32 dev; ++ static UNCORE_TOPOLOGY_INFO_NODE req_uncore_topology; ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (args->buf_usr_to_drv == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Invalid arguments (buf_usr_to_drv is NULL)!"); ++ return OS_INVALID; ++ } ++ if (args->len_usr_to_drv != sizeof(UNCORE_TOPOLOGY_INFO_NODE)) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Invalid arguments (unexpected len_usr_to_drv value)!"); ++ return OS_INVALID; ++ } ++ if (args->buf_drv_to_usr == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Invalid arguments (buf_drv_to_usr is NULL)!"); ++ return OS_INVALID; ++ } ++ if (args->len_drv_to_usr != sizeof(UNCORE_TOPOLOGY_INFO_NODE)) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Invalid arguments (unexpected len_drv_to_usr value)!"); ++ return OS_INVALID; ++ } ++ ++ memset((char *)&req_uncore_topology, 0, ++ sizeof(UNCORE_TOPOLOGY_INFO_NODE)); ++ if (copy_from_user(&req_uncore_topology, (void __user *)args->buf_usr_to_drv, ++ args->len_usr_to_drv)) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); ++ return OS_FAULT; ++ } ++ ++ for (dev = 0; dev < MAX_DEVICES; dev++) { ++ // skip if user does not require to scan this device ++ if (!UNCORE_TOPOLOGY_INFO_device_scan(&req_uncore_topology, ++ dev)) { ++ continue; ++ } ++ // skip if this device has been discovered ++ if (UNCORE_TOPOLOGY_INFO_device_scan(&uncore_topology, dev)) { ++ continue; ++ } ++ memcpy((U8 *)&(UNCORE_TOPOLOGY_INFO_device(&uncore_topology, ++ dev)), ++ (U8 *)&(UNCORE_TOPOLOGY_INFO_device(&req_uncore_topology, ++ dev)), ++ sizeof(UNCORE_PCIDEV_NODE)); ++ UNC_COMMON_PCI_Scan_For_Uncore((VOID *)&dev, dev, NULL); ++ } ++ ++ if (copy_to_user((void __user *)args->buf_drv_to_usr, &uncore_topology, ++ args->len_drv_to_usr)) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); ++ return OS_FAULT; ++ } ++ ++ SEP_DRV_LOG_FLOW_OUT("Success"); ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn U64 lwpmudrv_Get_Platform_Topology ++ * ++ * @brief Reads the MSR or PCI PLATFORM_INFO register if present ++ * ++ * @param arg Pointer to the IOCTL structure ++ * ++ * @return status ++ * ++ * Special Notes: ++ * ++ */ ++static OS_STATUS lwpmudrv_Get_Platform_Topology(IOCTL_ARGS args) ++{ ++ U32 dev; ++ U32 num_topology_devices = 0; ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (args->buf_usr_to_drv == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Invalid arguments (buf_usr_to_drv is NULL)!"); ++ return OS_INVALID; ++ } ++ if (args->len_usr_to_drv != sizeof(PLATFORM_TOPOLOGY_PROG_NODE)) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Invalid arguments (unexpected len_usr_to_drv value)!"); ++ return OS_INVALID; ++ } ++ if (args->buf_drv_to_usr == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Invalid arguments (buf_drv_to_usr is NULL)!"); ++ return OS_INVALID; ++ } ++ if (args->len_drv_to_usr != sizeof(PLATFORM_TOPOLOGY_PROG_NODE)) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Invalid arguments (unexpected len_drv_to_usr value)!"); ++ return OS_INVALID; ++ } ++ ++ memset((char *)&req_platform_topology_prog_node, 0, ++ sizeof(PLATFORM_TOPOLOGY_PROG_NODE)); ++ if (copy_from_user(&req_platform_topology_prog_node, ++ (void __user *)args->buf_usr_to_drv, args->len_usr_to_drv)) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Memory copy failure for req_platform_topology_prog_node!"); ++ return OS_FAULT; ++ } ++ ++ num_topology_devices = PLATFORM_TOPOLOGY_PROG_num_devices( ++ &req_platform_topology_prog_node); ++ for (dev = 0; dev < num_topology_devices; dev++) { ++ //skip if we have populated the register values already ++ if (PLATFORM_TOPOLOGY_PROG_topology_device_prog_valid( ++ &platform_topology_prog_node, dev)) { ++ continue; ++ } ++ memcpy((U8 *)&(PLATFORM_TOPOLOGY_PROG_topology_device( ++ &platform_topology_prog_node, dev)), ++ (U8 *)&(PLATFORM_TOPOLOGY_PROG_topology_device( ++ &req_platform_topology_prog_node, dev)), ++ sizeof(PLATFORM_TOPOLOGY_DISCOVERY_NODE)); ++ UNC_COMMON_Get_Platform_Topology(dev); ++ } ++ ++ if (copy_to_user((void __user *)args->buf_drv_to_usr, &platform_topology_prog_node, ++ args->len_drv_to_usr)) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Memory copy failure for platform_topology_prog_node!"); ++ return OS_FAULT; ++ } ++ ++ SEP_DRV_LOG_FLOW_OUT("Success"); ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn OS_STATUS lwpmudrv_Flush(void) ++ * ++ * @brief Flushes the current contents of sampling buffers ++ * ++ * @param - none ++ * ++ * @return status ++ * ++ * Special Notes: ++ */ ++static OS_STATUS lwpmudrv_Flush(void) ++{ ++ OS_STATUS status = OS_FAULT; ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (!DRV_CONFIG_enable_cp_mode(drv_cfg)) { ++ SEP_DRV_LOG_ERROR( ++ "The flush failed. Continuous profiling, -cp, is not enabled!"); ++ goto clean_return; ++ } ++ ++ if (!DRIVER_STATE_IN(GET_DRIVER_STATE(), STATE_BIT_PAUSED)) { ++ SEP_DRV_LOG_ERROR( ++ "The flush failed. The driver should be paused!"); ++ goto clean_return; ++ } ++ ++ if (multi_pebs_enabled || sched_switch_enabled) { ++#if !defined(DRV_SEP_ACRN_ON) ++ CONTROL_Invoke_Parallel(PEBS_Flush_Buffer, NULL); ++#endif ++ } ++ ++ LINUXOS_Uninstall_Hooks(); ++ LINUXOS_Enum_Process_Modules(TRUE); ++ status = OUTPUT_Flush(); ++ LINUXOS_Install_Hooks(); ++ ++clean_return: ++ SEP_DRV_LOG_FLOW_OUT("Status: %d.", status); ++ return status; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn U64 lwpmudrv_Get_Driver_log ++ * ++ * @brief Dumps the driver log ++ * ++ * @param arg Pointer to the IOCTL structure ++ * ++ * @return status ++ * ++ * Special Notes: ++ * ++ */ ++static OS_STATUS lwpmudrv_Get_Driver_Log(IOCTL_ARGS args) ++{ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (args->buf_drv_to_usr == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Invalid arguments (buf_drv_to_usr is NULL)!"); ++ return OS_INVALID; ++ } ++ if (args->len_drv_to_usr < sizeof(*DRV_LOG())) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Invalid arguments (unexpected len_drv_to_usr value)!"); ++ return OS_INVALID; ++ } ++ ++ if (copy_to_user((void __user *)args->buf_drv_to_usr, DRV_LOG(), sizeof(*DRV_LOG()))) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); ++ return OS_FAULT; ++ } ++ ++ SEP_DRV_LOG_DISAMBIGUATE(); // keeps the driver log's footprint unique (has the highest disambiguator field) ++ ++ SEP_DRV_LOG_FLOW_OUT("Success"); ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn U64 lwpmudrv_Control_Driver_log ++ * ++ * @brief Sets or/and gets the driver log's configuration ++ * ++ * @param arg Pointer to the IOCTL structure ++ * ++ * @return status ++ * ++ * Special Notes: ++ * ++ */ ++static OS_STATUS lwpmudrv_Control_Driver_Log(IOCTL_ARGS args) ++{ ++ DRV_LOG_CONTROL_NODE log_control; ++ U32 i; ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (args->buf_usr_to_drv == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Invalid arguments (buf_usr_to_drv is NULL)!"); ++ return OS_INVALID; ++ } ++ if (args->len_usr_to_drv < sizeof(log_control)) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Invalid arguments (unexpected len_usr_to_drv value)!"); ++ return OS_INVALID; ++ } ++ ++ if (copy_from_user(&log_control, (void __user *)args->buf_usr_to_drv, ++ sizeof(log_control))) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); ++ return OS_FAULT; ++ } ++ ++ if (DRV_LOG_CONTROL_command(&log_control) == ++ DRV_LOG_CONTROL_COMMAND_ADJUST_VERBOSITY) { ++ for (i = 0; i < DRV_NB_LOG_CATEGORIES; i++) { ++ if (DRV_LOG_CONTROL_verbosities(&log_control)[i] == ++ LOG_VERBOSITY_UNSET) { ++ SEP_DRV_LOG_TRACE( ++ "Current verbosity mask for '%s' is 0x%x", ++ (UTILITY_Log_Category_Strings()[i]), ++ ((U32)DRV_LOG_VERBOSITY(i))); ++ DRV_LOG_CONTROL_verbosities(&log_control)[i] = ++ DRV_LOG_VERBOSITY(i); ++ } else if (DRV_LOG_CONTROL_verbosities( ++ &log_control)[i] == ++ LOG_VERBOSITY_DEFAULT) { ++ U32 verbosity; ++ switch (i) { ++ case DRV_LOG_CATEGORY_LOAD: ++ verbosity = ++ DRV_LOG_DEFAULT_LOAD_VERBOSITY; ++ break; ++ case DRV_LOG_CATEGORY_INIT: ++ verbosity = ++ DRV_LOG_DEFAULT_INIT_VERBOSITY; ++ break; ++ case DRV_LOG_CATEGORY_DETECTION: ++ verbosity = ++ DRV_LOG_DEFAULT_DETECTION_VERBOSITY; ++ break; ++ case DRV_LOG_CATEGORY_ERROR: ++ verbosity = ++ DRV_LOG_DEFAULT_ERROR_VERBOSITY; ++ break; ++ case DRV_LOG_CATEGORY_STATE_CHANGE: ++ verbosity = ++ DRV_LOG_DEFAULT_STATE_CHANGE_VERBOSITY; ++ break; ++ case DRV_LOG_CATEGORY_MARK: ++ verbosity = ++ DRV_LOG_DEFAULT_MARK_VERBOSITY; ++ break; ++ case DRV_LOG_CATEGORY_DEBUG: ++ verbosity = ++ DRV_LOG_DEFAULT_DEBUG_VERBOSITY; ++ break; ++ case DRV_LOG_CATEGORY_FLOW: ++ verbosity = ++ DRV_LOG_DEFAULT_FLOW_VERBOSITY; ++ break; ++ case DRV_LOG_CATEGORY_ALLOC: ++ verbosity = ++ DRV_LOG_DEFAULT_ALLOC_VERBOSITY; ++ break; ++ case DRV_LOG_CATEGORY_INTERRUPT: ++ verbosity = ++ DRV_LOG_DEFAULT_INTERRUPT_VERBOSITY; ++ break; ++ case DRV_LOG_CATEGORY_TRACE: ++ verbosity = ++ DRV_LOG_DEFAULT_TRACE_VERBOSITY; ++ break; ++ case DRV_LOG_CATEGORY_REGISTER: ++ verbosity = ++ DRV_LOG_DEFAULT_REGISTER_VERBOSITY; ++ break; ++ case DRV_LOG_CATEGORY_NOTIFICATION: ++ verbosity = ++ DRV_LOG_DEFAULT_NOTIFICATION_VERBOSITY; ++ break; ++ case DRV_LOG_CATEGORY_WARNING: ++ verbosity = ++ DRV_LOG_DEFAULT_WARNING_VERBOSITY; ++ break; ++ // default: ++ // SEP_DRV_LOG_ERROR( ++ // "Unspecified category '%s' when resetting to default!", ++ // UTILITY_Log_Category_Strings() ++ // [i]); ++ // verbosity = LOG_VERBOSITY_NONE; ++ // break; ++ } ++ SEP_DRV_LOG_INIT( ++ "Resetting verbosity mask for '%s' from 0x%x to 0x%x.", ++ UTILITY_Log_Category_Strings()[i], ++ (U32)DRV_LOG_VERBOSITY(i), verbosity); ++ DRV_LOG_VERBOSITY(i) = verbosity; ++ DRV_LOG_CONTROL_verbosities(&log_control)[i] = ++ verbosity; ++ } else { ++ SEP_DRV_LOG_INIT( ++ "Changing verbosity mask for '%s' from 0x%x to 0x%x.", ++ UTILITY_Log_Category_Strings()[i], ++ (U32)DRV_LOG_VERBOSITY(i), ++ (U32)DRV_LOG_CONTROL_verbosities( ++ &log_control)[i]); ++ DRV_LOG_VERBOSITY(i) = ++ DRV_LOG_CONTROL_verbosities( ++ &log_control)[i]; ++ } ++ } ++ ++ for (; i < DRV_MAX_NB_LOG_CATEGORIES; i++) { ++ DRV_LOG_CONTROL_verbosities(&log_control)[i] = ++ LOG_VERBOSITY_UNSET; ++ } ++ ++ if (copy_to_user((void __user *)args->buf_drv_to_usr, &log_control, ++ sizeof(log_control))) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); ++ return OS_FAULT; ++ } ++ } else if (DRV_LOG_CONTROL_command(&log_control) == ++ DRV_LOG_CONTROL_COMMAND_MARK) { ++ DRV_LOG_CONTROL_message( ++ &log_control)[DRV_LOG_CONTROL_MAX_DATA_SIZE - 1] = 0; ++ SEP_DRV_LOG_MARK("Mark: '%s'.", ++ DRV_LOG_CONTROL_message(&log_control)); ++ } else if (DRV_LOG_CONTROL_command(&log_control) == ++ DRV_LOG_CONTROL_COMMAND_QUERY_SIZE) { ++ DRV_LOG_CONTROL_log_size(&log_control) = sizeof(*DRV_LOG()); ++ SEP_DRV_LOG_TRACE("Driver log size is %u bytes.", ++ DRV_LOG_CONTROL_log_size(&log_control)); ++ if (copy_to_user((void __user *)args->buf_drv_to_usr, &log_control, ++ sizeof(log_control))) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); ++ return OS_FAULT; ++ } ++ } else if (DRV_LOG_CONTROL_command(&log_control) == ++ DRV_LOG_CONTROL_COMMAND_BENCHMARK) { ++ U32 nb_iterations = ++ *(U32 *)&DRV_LOG_CONTROL_message(&log_control); ++ ++ SEP_DRV_LOG_INIT_IN("Starting benchmark (%u iterations)...", ++ nb_iterations); ++ for (i = 0; i < nb_iterations; i++) { ++ (void)i; ++ } ++ SEP_DRV_LOG_INIT_OUT("Benchmark complete (%u/%u iterations).", ++ i, nb_iterations); ++ } ++ ++ SEP_DRV_LOG_FLOW_OUT("Success"); ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn U64 lwpmudrv_Get_Drv_Setup_Info ++ * ++ * @brief Get numerous information of driver ++ * ++ * @param arg Pointer to the IOCTL structure ++ * ++ * @return status ++ * ++ * Special Notes: ++ * ++ */ ++static OS_STATUS lwpmudrv_Get_Drv_Setup_Info(IOCTL_ARGS args) ++{ ++#define VMM_VENDOR_STR_LEN 12 ++ U32 pebs_unavailable = 0; ++ U64 rbx, rcx, rdx, num_basic_functions; ++ S8 vmm_vendor_name[VMM_VENDOR_STR_LEN + 1]; ++ S8 *vmm_vmware_str = "VMwareVMware"; ++ S8 *vmm_kvm_str = "KVMKVMKVM\0\0\0"; ++ S8 *vmm_mshyperv_str = "Microsoft Hv"; ++ S8 *vmm_acrn_str = "ACRNACRNACRN"; ++#if defined(DRV_USE_KAISER) ++ int *kaiser_enabled_ptr; ++ int *kaiser_pti_option; ++#endif ++ bool is_hypervisor = FALSE; ++ ++ SEP_DRV_LOG_FLOW_IN("Args: %p.", args); ++ ++ if (args->buf_drv_to_usr == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Invalid arguments (buf_drv_to_usr is NULL)!"); ++ return OS_INVALID; ++ } ++ if (args->len_drv_to_usr != sizeof(DRV_SETUP_INFO_NODE)) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Invalid arguments (unexpected len_drv_to_usr value)!"); ++ return OS_INVALID; ++ } ++ ++ memset((char *)&req_drv_setup_info, 0, sizeof(DRV_SETUP_INFO_NODE)); ++ ++ DRV_SETUP_INFO_nmi_mode(&req_drv_setup_info) = 1; ++ ++#if defined(DRV_SEP_ACRN_ON) ++ is_hypervisor = TRUE; ++#endif ++ if (boot_cpu_has(X86_FEATURE_HYPERVISOR) || is_hypervisor) { ++ UTILITY_Read_Cpuid(0x40000000, &num_basic_functions, &rbx, &rcx, ++ &rdx); ++ memcpy(vmm_vendor_name, &rbx, 4); ++ memcpy(vmm_vendor_name + 4, &rcx, 4); ++ memcpy(vmm_vendor_name + 8, &rdx, 4); ++ memcpy(vmm_vendor_name + 12, "\0", 1); ++ ++ if (!strncmp(vmm_vendor_name, vmm_vmware_str, ++ VMM_VENDOR_STR_LEN)) { ++ DRV_SETUP_INFO_vmm_mode(&req_drv_setup_info) = 1; ++ DRV_SETUP_INFO_vmm_vendor(&req_drv_setup_info) = ++ DRV_VMM_VMWARE; ++ } else if (!strncmp(vmm_vendor_name, vmm_kvm_str, ++ VMM_VENDOR_STR_LEN)) { ++ DRV_SETUP_INFO_vmm_mode(&req_drv_setup_info) = 1; ++ DRV_SETUP_INFO_vmm_vendor(&req_drv_setup_info) = ++ DRV_VMM_KVM; ++ } else if (!strncmp(vmm_vendor_name, vmm_acrn_str, ++ VMM_VENDOR_STR_LEN)) { ++ DRV_SETUP_INFO_vmm_mode(&req_drv_setup_info) = 1; ++ DRV_SETUP_INFO_vmm_vendor(&req_drv_setup_info) = ++ DRV_VMM_ACRN; ++ } else if (!strncmp(vmm_vendor_name, vmm_mshyperv_str, ++ VMM_VENDOR_STR_LEN)) { ++ DRV_SETUP_INFO_vmm_mode(&req_drv_setup_info) = 1; ++ DRV_SETUP_INFO_vmm_vendor(&req_drv_setup_info) = ++ DRV_VMM_HYPERV; ++ if (num_basic_functions >= 0x40000003) { ++ UTILITY_Read_Cpuid(0x40000003, ++ &num_basic_functions, &rbx, ++ &rcx, &rdx); ++ if (rbx & 0x1) { ++ DRV_SETUP_INFO_vmm_guest_vm( ++ &req_drv_setup_info) = 0; ++ } else { ++ DRV_SETUP_INFO_vmm_guest_vm( ++ &req_drv_setup_info) = 1; ++ } ++ } ++ } ++ } ++#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32) ++ else if (xen_domain()) { ++ DRV_SETUP_INFO_vmm_mode(&req_drv_setup_info) = 1; ++ DRV_SETUP_INFO_vmm_vendor(&req_drv_setup_info) = DRV_VMM_XEN; ++ ++ if (xen_initial_domain()) { ++ DRV_SETUP_INFO_vmm_guest_vm(&req_drv_setup_info) = 0; ++ } else { ++ DRV_SETUP_INFO_vmm_guest_vm(&req_drv_setup_info) = 1; ++ } ++ } ++#endif ++ else { ++ if (LINUXOS_Check_KVM_Guest_Process()) { ++ DRV_SETUP_INFO_vmm_mode(&req_drv_setup_info) = 1; ++ DRV_SETUP_INFO_vmm_vendor(&req_drv_setup_info) = ++ DRV_VMM_KVM; ++ } ++ } ++ ++ pebs_unavailable = (SYS_Read_MSR(IA32_MISC_ENABLE) >> 12) & 0x1; ++ if (!pebs_unavailable) { ++ if (!wrmsr_safe(IA32_PEBS_ENABLE, 0, 0)) { ++ DRV_SETUP_INFO_pebs_accessible(&req_drv_setup_info) = 1; ++ } ++ } ++ ++#if defined(DRV_USE_KAISER) ++ kaiser_enabled_ptr = (int *)UTILITY_Find_Symbol("kaiser_enabled"); ++ if (kaiser_enabled_ptr && *kaiser_enabled_ptr) { ++ SEP_DRV_LOG_INIT( ++ "KAISER is enabled! (&kaiser_enable=%p, val: %d).", ++ kaiser_enabled_ptr, *kaiser_enabled_ptr); ++ DRV_SETUP_INFO_page_table_isolation(&req_drv_setup_info) = ++ DRV_SETUP_INFO_PTI_KAISER; ++ } else { ++ kaiser_pti_option = (int *)UTILITY_Find_Symbol("pti_option"); ++ if (kaiser_pti_option) { ++ SEP_DRV_LOG_INIT( ++ "KAISER pti_option=%p pti_option val=%d", ++ kaiser_pti_option, *kaiser_pti_option); ++#if defined(X86_FEATURE_PTI) ++ if (static_cpu_has(X86_FEATURE_PTI)) { ++ SEP_DRV_LOG_INIT( ++ "KAISER is Enabled or in Auto Enable!\n"); ++ DRV_SETUP_INFO_page_table_isolation( ++ &req_drv_setup_info) = ++ DRV_SETUP_INFO_PTI_KAISER; ++ } else { ++ SEP_DRV_LOG_INIT( ++ "KAISER is present but disabled!"); ++ } ++#endif ++ } ++ } ++ if (!kaiser_enabled_ptr && !kaiser_pti_option) { ++ SEP_DRV_LOG_ERROR( ++ "Could not find KAISER information. Assuming no KAISER!"); ++ } ++#elif defined(DRV_USE_PTI) ++ if (static_cpu_has(X86_FEATURE_PTI)) { ++ SEP_DRV_LOG_INIT("Kernel Page Table Isolation is enabled!"); ++ DRV_SETUP_INFO_page_table_isolation(&req_drv_setup_info) = ++ DRV_SETUP_INFO_PTI_KPTI; ++ } ++#endif ++ ++ SEP_DRV_LOG_TRACE("DRV_SETUP_INFO nmi_mode %d.", ++ DRV_SETUP_INFO_nmi_mode(&req_drv_setup_info)); ++ SEP_DRV_LOG_TRACE("DRV_SETUP_INFO vmm_mode %d.", ++ DRV_SETUP_INFO_vmm_mode(&req_drv_setup_info)); ++ SEP_DRV_LOG_TRACE("DRV_SETUP_INFO vmm_vendor %d.", ++ DRV_SETUP_INFO_vmm_vendor(&req_drv_setup_info)); ++ SEP_DRV_LOG_TRACE("DRV_SETUP_INFO vmm_guest_vm %d.", ++ DRV_SETUP_INFO_vmm_guest_vm(&req_drv_setup_info)); ++ SEP_DRV_LOG_TRACE("DRV_SETUP_INFO pebs_accessible %d.", ++ DRV_SETUP_INFO_pebs_accessible(&req_drv_setup_info)); ++ SEP_DRV_LOG_TRACE( ++ "DRV_SETUP_INFO page_table_isolation %d.", ++ DRV_SETUP_INFO_page_table_isolation(&req_drv_setup_info)); ++ ++#if defined(DRV_CPU_HOTPLUG) ++ DRV_SETUP_INFO_cpu_hotplug_mode(&req_drv_setup_info) = 1; ++#endif ++ ++ if (copy_to_user((void __user *)args->buf_drv_to_usr, &req_drv_setup_info, ++ args->len_drv_to_usr)) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Memory allocation failure!"); ++ return OS_FAULT; ++ } ++ ++ SEP_DRV_LOG_FLOW_OUT("Success."); ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn U64 lwpmudrv_Set_Emon_Buffer_Driver_Helper ++ * ++ * @brief Setup EMON buffer driver helper ++ * ++ * @param arg Pointer to the IOCTL structure ++ * ++ * @return status ++ * ++ * Special Notes: ++ * ++ */ ++static OS_STATUS lwpmudrv_Set_Emon_Buffer_Driver_Helper(IOCTL_ARGS args) ++{ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (args->len_usr_to_drv == 0 || args->buf_usr_to_drv == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Error: Invalid arguments."); ++ return OS_INVALID; ++ } ++ ++ if (!emon_buffer_driver_helper) { ++ emon_buffer_driver_helper = ++ CONTROL_Allocate_Memory(args->len_usr_to_drv); ++ if (emon_buffer_driver_helper == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Memory allocation failure for emon_buffer_driver_helper!"); ++ return OS_NO_MEM; ++ } ++ } ++ ++ if (copy_from_user(emon_buffer_driver_helper, (void __user *)args->buf_usr_to_drv, ++ args->len_usr_to_drv)) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Memory copy failure for device num units!"); ++ return OS_FAULT; ++ } ++ ++ SEP_DRV_LOG_FLOW_OUT("Success"); ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn U64 lwpmudrv_Set_OSID ++ * ++ * @brief Set OSID with specified value ++ * ++ * @param arg Pointer to the IOCTL structure ++ * ++ * @return status ++ * ++ * Special Notes: ++ * ++ */ ++ ++static OS_STATUS lwpmudrv_Set_OSID(IOCTL_ARGS args) ++{ ++ OS_STATUS status = OS_SUCCESS; ++ ++ if (args->buf_usr_to_drv == NULL) { ++ SEP_PRINT_ERROR("Invalid arguments (buf_usr_to_drv is NULL)!"); ++ return OS_INVALID; ++ } ++ if (args->len_usr_to_drv != sizeof(U32)) { ++ SEP_PRINT_ERROR( ++ "Invalid arguments (unexpected len_usr_to_drv value)!"); ++ return OS_INVALID; ++ } ++ ++ status = get_user(osid, (U32 __user *)args->buf_usr_to_drv); ++ return status; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Get_Agent_Mode(IOCTL_ARGS arg) ++ * ++ * @param arg - pointer to the IOCTL_ARGS structure ++ * ++ * @return OS_STATUS ++ * ++ * @brief Local function that copies agent mode from drv to usr code ++ * @brief Returns status. ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Get_Agent_Mode(IOCTL_ARGS args) ++{ ++ OS_STATUS status; ++ ++ if (args->buf_drv_to_usr == NULL) { ++ SEP_PRINT_ERROR("Invalid arguments (buf_drv_to_usr is NULL)!"); ++ return OS_INVALID; ++ } ++ if (args->len_drv_to_usr != sizeof(U32)) { ++ SEP_PRINT_ERROR( ++ "Invalid arguments (unexpected len_drv_to_usr value)!"); ++ return OS_INVALID; ++ } ++ ++#if defined(DRV_SEP_ACRN_ON) ++ status = put_user(HOST_VM_AGENT, (U32 __user *)args->buf_drv_to_usr); ++ sched_switch_enabled = TRUE; ++#else ++ status = put_user(-1, (U32 __user *)args->buf_drv_to_usr); ++ SEP_PRINT_ERROR("Invalid agent mode..!"); ++ status = OS_INVALID; ++#endif ++ ++ return status; ++} ++ ++/******************************************************************************* ++ * External Driver functions - Open ++ * This function is common to all drivers ++ *******************************************************************************/ ++ ++static int lwpmu_Open(struct inode *inode, struct file *filp) ++{ ++ SEP_DRV_LOG_TRACE_IN("Maj:%d, min:%d", imajor(inode), iminor(inode)); ++ ++ filp->private_data = container_of(inode->i_cdev, LWPMU_DEV_NODE, cdev); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++ return 0; ++} ++ ++/******************************************************************************* ++ * External Driver functions ++ * These functions are registered into the file operations table that ++ * controls this device. ++ * Open, Close, Read, Write, Release ++ *******************************************************************************/ ++ ++static ssize_t lwpmu_Read(struct file *filp, char __user *buf, size_t count, ++ loff_t *f_pos) ++{ ++ unsigned long retval; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ /* Transfering data to user space */ ++ SEP_DRV_LOG_TRACE("Dispatched with count=%d.", (S32)count); ++ if (copy_to_user((void __user *)buf, &LWPMU_DEV_buffer(lwpmu_control), 1)) { ++ retval = OS_FAULT; ++ SEP_DRV_LOG_ERROR_TRACE_OUT("Memory copy failure!"); ++ return retval; ++ } ++ /* Changing reading position as best suits */ ++ if (*f_pos == 0) { ++ *f_pos += 1; ++ SEP_DRV_LOG_TRACE_OUT("Return value: 1."); ++ return 1; ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT("Return value: 0."); ++ return 0; ++} ++ ++static ssize_t lwpmu_Write(struct file *filp, const char __user *buf, size_t count, ++ loff_t *f_pos) ++{ ++ unsigned long retval; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ SEP_DRV_LOG_TRACE("Dispatched with count=%d.", (S32)count); ++ if (copy_from_user(&LWPMU_DEV_buffer(lwpmu_control), (void __user *)(buf + count - 1), ++ 1)) { ++ retval = OS_FAULT; ++ SEP_DRV_LOG_ERROR_TRACE_OUT("Memory copy failure!"); ++ return retval; ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT("Return value: 1."); ++ return 1; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn extern IOCTL_OP_TYPE lwpmu_Service_IOCTL(IOCTL_USE_NODE, filp, cmd, arg) ++ * ++ * @param IOCTL_USE_INODE - Used for pre 2.6.32 kernels ++ * @param struct file *filp - file pointer ++ * @param unsigned int cmd - IOCTL command ++ * @param unsigned long arg - args to the IOCTL command ++ * ++ * @return OS_STATUS ++ * ++ * @brief SEP Worker function that handles IOCTL requests from the user mode. ++ * ++ * Special Notes ++ */ ++static IOCTL_OP_TYPE lwpmu_Service_IOCTL(IOCTL_USE_INODE struct file *filp, ++ unsigned int cmd, ++ IOCTL_ARGS_NODE local_args) ++{ ++ int status = OS_SUCCESS; ++ ++ SEP_DRV_LOG_TRACE_IN("Command: %d.", cmd); ++ ++ if (cmd == DRV_OPERATION_GET_DRIVER_STATE) { ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_DRIVER_STATE."); ++ status = lwpmudrv_Get_Driver_State(&local_args); ++ SEP_DRV_LOG_TRACE_OUT("Return value for command %d: %d", cmd, ++ status); ++ return status; ++ } ++ if (cmd == DRV_OPERATION_GET_DRIVER_LOG) { ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_DRIVER_LOG."); ++ status = lwpmudrv_Get_Driver_Log(&local_args); ++ SEP_DRV_LOG_TRACE_OUT("Return value for command %d: %d", cmd, ++ status); ++ return status; ++ } ++ if (cmd == DRV_OPERATION_CONTROL_DRIVER_LOG) { ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_CONTROL_DRIVER_LOG."); ++ status = lwpmudrv_Control_Driver_Log(&local_args); ++ SEP_DRV_LOG_TRACE_OUT("Return value for command %d: %d", cmd, ++ status); ++ return status; ++ } ++ if (GET_DRIVER_STATE() == DRV_STATE_PREPARE_STOP) { ++ SEP_DRV_LOG_TRACE("skipping ioctl -- processing stop."); ++ SEP_DRV_LOG_TRACE_OUT("Return value for command %d: %d", cmd, ++ status); ++ return status; ++ } ++ ++ MUTEX_LOCK(ioctl_lock); ++ UTILITY_Driver_Set_Active_Ioctl(cmd); ++ ++ switch (cmd) { ++ /* ++ * Common IOCTL commands ++ */ ++ ++ case DRV_OPERATION_VERSION: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_VERSION."); ++ status = lwpmudrv_Version(&local_args); ++ break; ++ ++ case DRV_OPERATION_RESERVE: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_RESERVE."); ++ status = lwpmudrv_Reserve(&local_args); ++ break; ++ ++ case DRV_OPERATION_INIT_DRIVER: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_INIT_DRIVER."); ++ status = lwpmudrv_Initialize_Driver(local_args.buf_usr_to_drv, ++ local_args.len_usr_to_drv); ++ break; ++ ++ case DRV_OPERATION_INIT: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_INIT."); ++ status = lwpmudrv_Initialize(local_args.buf_usr_to_drv, ++ local_args.len_usr_to_drv); ++ break; ++ ++ case DRV_OPERATION_INIT_PMU: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_INIT_PMU."); ++ status = lwpmudrv_Init_PMU(&local_args); ++ break; ++ ++ case DRV_OPERATION_SET_CPU_MASK: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_SET_CPU_MASK."); ++ status = lwpmudrv_Set_CPU_Mask(local_args.buf_usr_to_drv, ++ local_args.len_usr_to_drv); ++ break; ++ ++ case DRV_OPERATION_START: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_START."); ++ status = lwpmudrv_Start(); ++ break; ++ ++ case DRV_OPERATION_STOP: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_STOP."); ++ status = lwpmudrv_Prepare_Stop(); ++ UTILITY_Driver_Set_Active_Ioctl(0); ++ MUTEX_UNLOCK(ioctl_lock); ++ ++ MUTEX_LOCK(ioctl_lock); ++ UTILITY_Driver_Set_Active_Ioctl(cmd); ++ if (GET_DRIVER_STATE() == DRV_STATE_PREPARE_STOP) { ++ status = lwpmudrv_Finish_Stop(); ++ if (status == OS_SUCCESS) { ++ // if stop was successful, relevant memory should have been freed, ++ // so try to compact the memory tracker ++ CONTROL_Memory_Tracker_Compaction(); ++ } ++ } ++ break; ++ ++ case DRV_OPERATION_PAUSE: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_PAUSE."); ++ status = lwpmudrv_Pause(); ++ break; ++ ++ case DRV_OPERATION_RESUME: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_RESUME."); ++ status = lwpmudrv_Resume(); ++ break; ++ ++ case DRV_OPERATION_EM_GROUPS: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_EM_GROUPS."); ++ status = lwpmudrv_Set_EM_Config(&local_args); ++ break; ++ ++ case DRV_OPERATION_EM_CONFIG_NEXT: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_EM_CONFIG_NEXT."); ++ status = lwpmudrv_Configure_Events(&local_args); ++ break; ++ ++ case DRV_OPERATION_NUM_DESCRIPTOR: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_NUM_DESCRIPTOR."); ++ status = lwpmudrv_Set_Sample_Descriptors(&local_args); ++ break; ++ ++ case DRV_OPERATION_DESC_NEXT: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_DESC_NEXT."); ++ status = lwpmudrv_Configure_Descriptors(&local_args); ++ break; ++ ++ case DRV_OPERATION_GET_NORMALIZED_TSC: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_NORMALIZED_TSC."); ++ status = lwpmudrv_Get_Normalized_TSC(&local_args); ++ break; ++ ++ case DRV_OPERATION_GET_NORMALIZED_TSC_STANDALONE: ++ SEP_DRV_LOG_TRACE( ++ "DRV_OPERATION_GET_NORMALIZED_TSC_STANDALONE."); ++ status = lwpmudrv_Get_Normalized_TSC(&local_args); ++ break; ++ ++ case DRV_OPERATION_NUM_CORES: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_NUM_CORES."); ++ status = lwpmudrv_Get_Num_Cores(&local_args); ++ break; ++ ++ case DRV_OPERATION_KERNEL_CS: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_KERNEL_CS."); ++ status = lwpmudrv_Get_KERNEL_CS(&local_args); ++ break; ++ ++ case DRV_OPERATION_SET_UID: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_SET_UID."); ++ status = lwpmudrv_Set_UID(&local_args); ++ break; ++ ++ case DRV_OPERATION_TSC_SKEW_INFO: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_TSC_SKEW_INFO."); ++ status = lwpmudrv_Get_TSC_Skew_Info(&local_args); ++ break; ++ ++ case DRV_OPERATION_COLLECT_SYS_CONFIG: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_COLLECT_SYS_CONFIG."); ++ status = lwpmudrv_Collect_Sys_Config(&local_args); ++ break; ++ ++ case DRV_OPERATION_GET_SYS_CONFIG: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_SYS_CONFIG."); ++ status = lwpmudrv_Sys_Config(&local_args); ++ break; ++ ++ case DRV_OPERATION_TERMINATE: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_TERMINATE."); ++ status = lwpmudrv_Terminate(); ++ break; ++ ++ case DRV_OPERATION_SET_CPU_TOPOLOGY: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_SET_CPU_TOPOLOGY."); ++ status = lwpmudrv_Setup_Cpu_Topology(&local_args); ++ break; ++ ++ case DRV_OPERATION_GET_NUM_CORE_CTRS: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_NUM_CORE_CTRS."); ++ status = lwpmudrv_Samp_Read_Num_Of_Core_Counters(&local_args); ++ break; ++ ++ case DRV_OPERATION_GET_PLATFORM_INFO: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_PLATFORM_INFO."); ++ status = lwpmudrv_Get_Platform_Info(&local_args); ++ break; ++ ++ case DRV_OPERATION_READ_MSRS: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_READ_MSRs."); ++ status = lwpmudrv_Read_MSRs(&local_args); ++ break; ++ ++ case DRV_OPERATION_SWITCH_GROUP: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_SWITCH_GROUP."); ++ status = lwpmudrv_Switch_Group(); ++ break; ++ ++ case DRV_OPERATION_SET_OSID: ++ SEP_DRV_LOG_TRACE("LWPMUDRV_IOCTL_SET_OSID\n"); ++ status = lwpmudrv_Set_OSID(&local_args); ++ break; ++ ++ case DRV_OPERATION_GET_AGENT_MODE: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_AGENT_MODE\n"); ++ status = lwpmudrv_Get_Agent_Mode(&local_args); ++ break; ++ ++ /* ++ * EMON-specific IOCTL commands ++ */ ++ case DRV_OPERATION_READ_MSR: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_READ_MSR."); ++ status = lwpmudrv_Read_MSR_All_Cores(&local_args); ++ break; ++ ++ case DRV_OPERATION_WRITE_MSR: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_WRITE_MSR."); ++ status = lwpmudrv_Write_MSR_All_Cores(&local_args); ++ break; ++ ++ case DRV_OPERATION_READ_SWITCH_GROUP: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_READ_SWITCH_GROUP."); ++ status = lwpmudrv_Read_Counters_And_Switch_Group(&local_args); ++ break; ++ ++ case DRV_OPERATION_READ_AND_RESET: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_READ_AND_RESET."); ++ status = lwpmudrv_Read_And_Reset_Counters(&local_args); ++ break; ++ ++ /* ++ * Platform-specific IOCTL commands (IA32 and Intel64) ++ */ ++ ++ case DRV_OPERATION_INIT_UNC: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_INIT_UNC."); ++ status = lwpmudrv_Initialize_UNC(local_args.buf_usr_to_drv, ++ local_args.len_usr_to_drv); ++ break; ++ ++ case DRV_OPERATION_EM_GROUPS_UNC: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_EM_GROUPS_UNC."); ++ status = lwpmudrv_Set_EM_Config_UNC(&local_args); ++ break; ++ ++ case DRV_OPERATION_EM_CONFIG_NEXT_UNC: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_EM_CONFIG_NEXT_UNC."); ++ status = lwpmudrv_Configure_Events_UNC(&local_args); ++ break; ++ ++ case DRV_OPERATION_LBR_INFO: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_LBR_INFO."); ++ status = lwpmudrv_LBR_Info(&local_args); ++ break; ++ ++ case DRV_OPERATION_PWR_INFO: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_PWR_INFO."); ++ status = lwpmudrv_PWR_Info(&local_args); ++ break; ++ ++ case DRV_OPERATION_INIT_NUM_DEV: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_INIT_NUM_DEV."); ++ status = lwpmudrv_Initialize_Num_Devices(&local_args); ++ break; ++ case DRV_OPERATION_GET_NUM_SAMPLES: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_NUM_SAMPLES."); ++ status = lwpmudrv_Get_Num_Samples(&local_args); ++ break; ++ ++ case DRV_OPERATION_SET_DEVICE_NUM_UNITS: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_SET_DEVICE_NUM_UNITS."); ++ status = lwpmudrv_Set_Device_Num_Units(&local_args); ++ break; ++ ++ case DRV_OPERATION_GET_INTERVAL_COUNTS: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_INTERVAL_COUNTS."); ++ lwpmudrv_Get_Interval_Counts(&local_args); ++ break; ++ ++ case DRV_OPERATION_SET_SCAN_UNCORE_TOPOLOGY_INFO: ++ SEP_DRV_LOG_TRACE( ++ "DRV_OPERATION_SET_SCAN_UNCORE_TOPOLOGY_INFO."); ++ status = ++ lwpmudrv_Set_Uncore_Topology_Info_And_Scan(&local_args); ++ break; ++ ++ case DRV_OPERATION_GET_UNCORE_TOPOLOGY: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_UNCORE_TOPOLOGY."); ++ status = lwpmudrv_Get_Uncore_Topology(&local_args); ++ break; ++ ++ case DRV_OPERATION_GET_PLATFORM_TOPOLOGY: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_PLATFORM_TOPOLOGY."); ++ status = lwpmudrv_Get_Platform_Topology(&local_args); ++ break; ++ ++ case DRV_OPERATION_FLUSH: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_FLUSH."); ++ status = lwpmudrv_Flush(); ++ break; ++ ++ case DRV_OPERATION_SET_EMON_BUFFER_DRIVER_HELPER: ++ SEP_DRV_LOG_TRACE( ++ "DRV_OPERATION_SET_EMON_BUFFER_DRIVER_HELPER."); ++ status = lwpmudrv_Set_Emon_Buffer_Driver_Helper(&local_args); ++ break; ++ ++ /* ++ * Graphics IOCTL commands ++ */ ++ ++#if defined(BUILD_GFX) ++ case DRV_OPERATION_SET_GFX_EVENT: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_SET_GFX_EVENT."); ++ SEP_DRV_LOG_TRACE("lwpmudrv_Device_Control: enable_gfx=%d.", ++ (int)DRV_CONFIG_enable_gfx(drv_cfg)); ++ status = GFX_Set_Event_Code(&local_args); ++ break; ++#endif ++ ++ /* ++ * Chipset IOCTL commands ++ */ ++ ++#if defined(BUILD_CHIPSET) ++ case DRV_OPERATION_PCI_READ: { ++ CHIPSET_PCI_ARG_NODE pci_data; ++ ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_PCI_READ."); ++ ++ if (local_args.buf_usr_to_drv == NULL || ++ local_args.len_usr_to_drv != sizeof(CHIPSET_PCI_ARG_NODE) || ++ local_args.buf_drv_to_usr == NULL || ++ local_args.len_drv_to_usr != sizeof(CHIPSET_PCI_ARG_NODE)) { ++ status = OS_FAULT; ++ goto cleanup; ++ } ++ ++ if (copy_from_user(&pci_data, ++ (void __user *)local_args.buf_usr_to_drv, ++ sizeof(CHIPSET_PCI_ARG_NODE))) { ++ status = OS_FAULT; ++ goto cleanup; ++ } ++ ++ status = PCI_Read_From_Memory_Address( ++ CHIPSET_PCI_ARG_address(&pci_data), ++ &CHIPSET_PCI_ARG_value(&pci_data)); ++ ++ if (copy_to_user((void __user *)local_args.buf_drv_to_usr, &pci_data, ++ sizeof(CHIPSET_PCI_ARG_NODE))) { ++ status = OS_FAULT; ++ goto cleanup; ++ } ++ ++ break; ++ } ++ ++ case DRV_OPERATION_PCI_WRITE: { ++ CHIPSET_PCI_ARG_NODE pci_data; ++ ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_PCI_WRITE."); ++ ++ if (local_args.buf_usr_to_drv == NULL || ++ local_args.len_usr_to_drv != sizeof(CHIPSET_PCI_ARG_NODE)) { ++ status = OS_FAULT; ++ goto cleanup; ++ } ++ ++ if (copy_from_user(&pci_data, ++ (void __user *)local_args.buf_usr_to_drv, ++ sizeof(CHIPSET_PCI_ARG_NODE))) { ++ status = OS_FAULT; ++ goto cleanup; ++ } ++ ++ status = PCI_Write_To_Memory_Address( ++ CHIPSET_PCI_ARG_address(&pci_data), ++ CHIPSET_PCI_ARG_value(&pci_data)); ++ break; ++ } ++ ++ case DRV_OPERATION_FD_PHYS: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_FD_PHYS."); ++ status = lwpmudrv_Samp_Find_Physical_Address(&local_args); ++ break; ++ ++ case DRV_OPERATION_READ_PCI_CONFIG: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_READ_PCI_CONFIG."); ++ status = lwpmudrv_Samp_Read_PCI_Config(&local_args); ++ break; ++ ++ case DRV_OPERATION_WRITE_PCI_CONFIG: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_WRITE_PCI_CONFIG."); ++ status = lwpmudrv_Samp_Write_PCI_Config(&local_args); ++ break; ++ ++ case DRV_OPERATION_CHIPSET_INIT: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_CHIPSET_INIT."); ++ SEP_DRV_LOG_TRACE("Enable_chipset=%d.", ++ (int)DRV_CONFIG_enable_chipset(drv_cfg)); ++ status = lwpmudrv_Samp_Chipset_Init(&local_args); ++ break; ++ ++ case DRV_OPERATION_GET_CHIPSET_DEVICE_ID: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_CHIPSET_DEVICE_ID."); ++ status = lwpmudrv_Samp_Read_PCI_Config(&local_args); ++ break; ++#endif ++ ++ case DRV_OPERATION_GET_DRV_SETUP_INFO: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_DRV_SETUP_INFO."); ++ status = lwpmudrv_Get_Drv_Setup_Info(&local_args); ++ break; ++ ++ /* ++ * if none of the above, treat as unknown/illegal IOCTL command ++ */ ++ ++ default: ++ SEP_DRV_LOG_ERROR("Unknown IOCTL number: %d!", cmd); ++ status = OS_ILLEGAL_IOCTL; ++ break; ++ } ++#if defined(BUILD_CHIPSET) ++cleanup: ++#endif ++ UTILITY_Driver_Set_Active_Ioctl(0); ++ MUTEX_UNLOCK(ioctl_lock); ++ ++ SEP_DRV_LOG_TRACE_OUT("Return value for command %d: %d.", cmd, status); ++ return status; ++} ++ ++static long lwpmu_Device_Control(IOCTL_USE_INODE struct file *filp, ++ unsigned int cmd, unsigned long arg) ++{ ++ int status = OS_SUCCESS; ++ IOCTL_ARGS_NODE local_args; ++ ++ SEP_DRV_LOG_TRACE_IN("Cmd type: %d, subcommand: %d.", _IOC_TYPE(cmd), ++ _IOC_NR(cmd)); ++ ++#if !defined(DRV_USE_UNLOCKED_IOCTL) ++ SEP_DRV_LOG_TRACE("Cmd: 0x%x, called on inode maj:%d, min:%d.", cmd, ++ imajor(inode), iminor(inode)); ++#endif ++ SEP_DRV_LOG_TRACE("Type: %d, subcommand: %d.", _IOC_TYPE(cmd), ++ _IOC_NR(cmd)); ++ ++ if (_IOC_TYPE(cmd) != LWPMU_IOC_MAGIC) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("Unknown IOCTL magic: %d!", ++ _IOC_TYPE(cmd)); ++ return OS_ILLEGAL_IOCTL; ++ } ++ ++ memset(&local_args, 0, sizeof(IOCTL_ARGS_NODE)); ++ ++ if (arg) { ++ status = copy_from_user(&local_args, (void __user *)arg, ++ sizeof(IOCTL_ARGS_NODE)); ++ } ++ ++ status = lwpmu_Service_IOCTL(IOCTL_USE_INODE filp, _IOC_NR(cmd), ++ local_args); ++ ++ SEP_DRV_LOG_TRACE_OUT("Return value: %d.", status); ++ return status; ++} ++ ++#if defined(CONFIG_COMPAT) && defined(DRV_EM64T) ++static long lwpmu_Device_Control_Compat(struct file *filp, unsigned int cmd, ++ unsigned long arg) ++{ ++ int status = OS_SUCCESS; ++ IOCTL_COMPAT_ARGS_NODE local_args_compat; ++ IOCTL_ARGS_NODE local_args; ++ ++ SEP_DRV_LOG_TRACE_IN("Compat: type: %d, subcommand: %d.", ++ _IOC_TYPE(cmd), _IOC_NR(cmd)); ++ ++ memset(&local_args_compat, 0, sizeof(IOCTL_COMPAT_ARGS_NODE)); ++ SEP_DRV_LOG_TRACE("Compat: type: %d, subcommand: %d.", _IOC_TYPE(cmd), ++ _IOC_NR(cmd)); ++ ++ if (_IOC_TYPE(cmd) != LWPMU_IOC_MAGIC) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("Unknown IOCTL magic: %d!", ++ _IOC_TYPE(cmd)); ++ return OS_ILLEGAL_IOCTL; ++ } ++ ++ if (arg) { ++ status = copy_from_user(&local_args_compat, ++ (void __user *)arg, ++ sizeof(IOCTL_COMPAT_ARGS_NODE)); ++ } // NB: status defined above is not being used... ++ local_args.len_drv_to_usr = local_args_compat.len_drv_to_usr; ++ local_args.len_usr_to_drv = local_args_compat.len_usr_to_drv; ++ local_args.buf_drv_to_usr = ++ (char *)compat_ptr(local_args_compat.buf_drv_to_usr); ++ local_args.buf_usr_to_drv = ++ (char *)compat_ptr(local_args_compat.buf_usr_to_drv); ++ local_args.command = _IOC_NR(cmd); ++ ++ status = lwpmu_Service_IOCTL(filp, _IOC_NR(cmd), local_args); ++ ++ SEP_DRV_LOG_TRACE_OUT("Return value: %d", status); ++ return status; ++} ++#endif ++ ++/* ++ * @fn LWPMUDRV_Abnormal_Terminate(void) ++ * ++ * @brief This routine is called from linuxos_Exit_Task_Notify if the user process has ++ * been killed by an uncatchable signal (example kill -9). The state variable ++ * abormal_terminate is set to 1 and the clean up routines are called. In this ++ * code path the OS notifier hooks should not be unloaded. ++ * ++ * @param None ++ * ++ * @return OS_STATUS ++ * ++ * Special Notes: ++ * ++ */ ++static int LWPMUDRV_Abnormal_Terminate(void) ++{ ++ int status; ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ SEP_DRV_LOG_TRACE("Calling lwpmudrv_Prepare_Stop."); ++ status = lwpmudrv_Prepare_Stop(); ++ if (status != OS_SUCCESS) ++ return status; ++ ++ SEP_DRV_LOG_TRACE("Calling lwpmudrv_Finish_Stop."); ++ status = lwpmudrv_Finish_Stop(); ++ if (status != OS_SUCCESS) ++ return status; ++ ++ SEP_DRV_LOG_TRACE("Calling lwpmudrv_Terminate."); ++ status = lwpmudrv_Terminate(); ++ ++ SEP_DRV_LOG_FLOW_OUT("Return value: %d.", status); ++ return status; ++} ++ ++static int lwpmudrv_Abnormal_Handler(void *data) ++{ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ while (!kthread_should_stop()) { ++ if (wait_event_interruptible_timeout( ++ wait_exit, ++ GET_DRIVER_STATE() == DRV_STATE_TERMINATING, ++ msecs_to_jiffies(350))) { ++ SEP_DRV_LOG_WARNING( ++ "Processing abnormal termination..."); ++ MUTEX_LOCK(ioctl_lock); ++ SEP_DRV_LOG_TRACE("Locked ioctl_lock..."); ++ LWPMUDRV_Abnormal_Terminate(); ++ SEP_DRV_LOG_TRACE("Unlocking ioctl_lock..."); ++ MUTEX_UNLOCK(ioctl_lock); ++ } ++ } ++ ++ SEP_DRV_LOG_FLOW_OUT("End of thread."); ++ return 0; ++} ++ ++/***************************************************************************************** ++ * ++ * Driver Entry / Exit functions that will be called on when the driver is loaded and ++ * unloaded ++ * ++ ****************************************************************************************/ ++ ++/* ++ * Structure that declares the usual file access functions ++ * First one is for lwpmu_c, the control functions ++ */ ++static struct file_operations lwpmu_Fops = { ++ .owner = THIS_MODULE, ++ IOCTL_OP = lwpmu_Device_Control, ++#if defined(CONFIG_COMPAT) && defined(DRV_EM64T) ++ .compat_ioctl = lwpmu_Device_Control_Compat, ++#endif ++ .read = lwpmu_Read, ++ .write = lwpmu_Write, ++ .open = lwpmu_Open, ++ .release = NULL, ++ .llseek = NULL, ++}; ++ ++/* ++ * Second one is for lwpmu_m, the module notification functions ++ */ ++static struct file_operations lwmod_Fops = { ++ .owner = THIS_MODULE, ++ IOCTL_OP = NULL, //None needed ++ .read = OUTPUT_Module_Read, ++ .write = NULL, //No writing accepted ++ .open = lwpmu_Open, ++ .release = NULL, ++ .llseek = NULL, ++}; ++ ++/* ++ * Third one is for lwsamp_nn, the sampling functions ++ */ ++static struct file_operations lwsamp_Fops = { ++ .owner = THIS_MODULE, ++ IOCTL_OP = NULL, //None needed ++ .read = OUTPUT_Sample_Read, ++ .write = NULL, //No writing accepted ++ .open = lwpmu_Open, ++ .release = NULL, ++ .llseek = NULL, ++}; ++ ++/* ++ * Fourth one is for lwsamp_sideband, the pebs process info functions ++ */ ++static struct file_operations lwsideband_Fops = { ++ .owner = THIS_MODULE, ++ IOCTL_OP = NULL, //None needed ++ .read = OUTPUT_SidebandInfo_Read, ++ .write = NULL, //No writing accepted ++ .open = lwpmu_Open, ++ .release = NULL, ++ .llseek = NULL, ++}; ++ ++/* ++ * Fifth one is for lwsampunc_nn, the uncore sampling functions ++ */ ++static struct file_operations lwsampunc_Fops = { ++ .owner = THIS_MODULE, ++ IOCTL_OP = NULL, //None needed ++ .read = OUTPUT_UncSample_Read, ++ .write = NULL, //No writing accepted ++ .open = lwpmu_Open, ++ .release = NULL, ++ .llseek = NULL, ++}; ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static int lwpmudrv_setup_cdev(dev, fops, dev_number) ++ * ++ * @param LWPMU_DEV dev - pointer to the device object ++ * @param struct file_operations *fops - pointer to the file operations struct ++ * @param dev_t dev_number - major/monor device number ++ * ++ * @return OS_STATUS ++ * ++ * @brief Set up the device object. ++ * ++ * Special Notes ++ */ ++static int lwpmu_setup_cdev(LWPMU_DEV dev, struct file_operations *fops, ++ dev_t dev_number) ++{ ++ int res; ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ cdev_init(&LWPMU_DEV_cdev(dev), fops); ++ LWPMU_DEV_cdev(dev).owner = THIS_MODULE; ++ LWPMU_DEV_cdev(dev).ops = fops; ++ ++ res = cdev_add(&LWPMU_DEV_cdev(dev), dev_number, 1); ++ ++ SEP_DRV_LOG_TRACE_OUT("Return value: %d", res); ++ return res; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static int lwpmu_Load(void) ++ * ++ * @param none ++ * ++ * @return STATUS ++ * ++ * @brief Load the driver module into the kernel. Set up the driver object. ++ * @brief Set up the initial state of the driver and allocate the memory ++ * @brief needed to keep basic state information. ++ */ ++static int lwpmu_Load(void) ++{ ++ int i, j, num_cpus; ++ dev_t lwmod_DevNum; ++ OS_STATUS status = OS_INVALID; ++ char dev_name[MAXNAMELEN]; ++ struct device *sep_device; ++#if defined(CONFIG_XEN_HAVE_VPMU) ++ xen_pmu_params_t xenpmu_param; ++ xen_pmu_data_t *xenpmu_data; ++ unsigned long pfn; ++#endif ++ ++ SEP_DRV_LOG_LOAD("Driver loading..."); ++ if (UTILITY_Driver_Log_Init() != ++ OS_SUCCESS) { // Do not use SEP_DRV_LOG_X (where X != LOAD) before this, or if this fails ++ SEP_DRV_LOG_LOAD("Error: could not allocate log buffer."); ++ return OS_NO_MEM; ++ } ++ SEP_DRV_LOG_FLOW_IN("Starting internal log monitoring."); ++ ++ CONTROL_Memory_Tracker_Init(); ++ ++#if defined(DRV_SEP_ACRN_ON) ++ SEP_DRV_LOG_FLOW_IN("Starting internal log monitoring."); ++ vm_info_list = ++ CONTROL_Allocate_Memory(sizeof(struct profiling_vm_info_list)); ++ memset(vm_info_list, 0, sizeof(struct profiling_vm_info_list)); ++ ++ BUG_ON(!virt_addr_valid(vm_info_list)); ++ ++ acrn_hypercall2(HC_PROFILING_OPS, PROFILING_GET_VMINFO, ++ virt_to_phys(vm_info_list)); ++#endif ++ ++#if !defined(CONFIG_XEN_HAVE_VPMU) ++#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32) ++ if (xen_initial_domain()) { ++ SEP_DRV_LOG_LOAD( ++ "PMU virtualization is not enabled on XEN dom0!"); ++ } ++#endif ++#endif ++ ++ /* Get one major device number and two minor numbers. */ ++ /* The result is formatted as major+minor(0) */ ++ /* One minor number is for control (lwpmu_c), */ ++ /* the other (lwpmu_m) is for modules */ ++ SEP_DRV_LOG_INIT("About to register chrdev..."); ++ ++ lwpmu_DevNum = MKDEV(0, 0); ++ status = alloc_chrdev_region(&lwpmu_DevNum, 0, PMU_DEVICES, ++ SEP_DRIVER_NAME); ++ SEP_DRV_LOG_INIT("Result of alloc_chrdev_region is %d.", status); ++ if (status < 0) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Error: Failed to alloc chrdev_region (return = %d).", ++ status); ++ return status; ++ } ++ SEP_DRV_LOG_LOAD("Major number is %d", MAJOR(lwpmu_DevNum)); ++ status = lwpmudrv_Initialize_State(); ++ if (status < 0) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Failed to initialize state (return = %d)!", status); ++ return status; ++ } ++ num_cpus = GLOBAL_STATE_num_cpus(driver_state); ++ SEP_DRV_LOG_LOAD("Detected %d total CPUs and %d active CPUs.", num_cpus, ++ GLOBAL_STATE_active_cpus(driver_state)); ++ ++#if defined(CONFIG_XEN_HAVE_VPMU) ++ if (xen_initial_domain()) { ++ xenpmu_param.version.maj = XENPMU_VER_MAJ; ++ xenpmu_param.version.min = XENPMU_VER_MIN; ++ ++ for (i = 0; i < num_cpus; i++) { ++ xenpmu_data = ++ (xen_pmu_data_t *)get_zeroed_page(GFP_KERNEL); ++ ; ++ if (!xenpmu_data) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Memory allocation failure for xenpmu_data!"); ++ return OS_NO_MEM; ++ } ++ pfn = vmalloc_to_pfn((char *)xenpmu_data); ++ ++ xenpmu_param.val = pfn_to_mfn(pfn); ++ xenpmu_param.vcpu = i; ++ status = HYPERVISOR_xenpmu_op(XENPMU_init, ++ (PVOID)&xenpmu_param); ++ ++ per_cpu(sep_xenpmu_shared, i) = xenpmu_data; ++ } ++ SEP_DRV_LOG_LOAD("VPMU is initialized on XEN Dom0."); ++ } ++#endif ++ ++ PCI_Initialize(); ++ ++ /* Allocate memory for the control structures */ ++ lwpmu_control = CONTROL_Allocate_Memory(sizeof(LWPMU_DEV_NODE)); ++ lwmod_control = CONTROL_Allocate_Memory(sizeof(LWPMU_DEV_NODE)); ++ lwsamp_control = ++ CONTROL_Allocate_Memory(num_cpus * sizeof(LWPMU_DEV_NODE)); ++ lwsideband_control = ++ CONTROL_Allocate_Memory(num_cpus * sizeof(LWPMU_DEV_NODE)); ++ ++ if (!lwsideband_control || !lwsamp_control || !lwpmu_control || ++ !lwmod_control) { ++ CONTROL_Free_Memory(lwpmu_control); ++ CONTROL_Free_Memory(lwmod_control); ++ CONTROL_Free_Memory(lwsamp_control); ++ CONTROL_Free_Memory(lwsideband_control); ++ ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Memory allocation failure for control structures!"); ++ return OS_NO_MEM; ++ } ++ ++ /* Register the file operations with the OS */ ++ ++ pmu_class = class_create(THIS_MODULE, SEP_DRIVER_NAME); ++ if (IS_ERR(pmu_class)) { ++ SEP_DRV_LOG_ERROR("Error registering SEP control class!"); ++ } ++ sep_device = device_create(pmu_class, NULL, lwpmu_DevNum, NULL, ++ SEP_DRIVER_NAME DRV_DEVICE_DELIMITER "c"); ++ if (IS_ERR(sep_device)) { ++ SEP_DRV_LOG_ERROR("Error creating SEP PMU device!"); ++ } ++ ++ status = lwpmu_setup_cdev(lwpmu_control, &lwpmu_Fops, lwpmu_DevNum); ++ if (status) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Error %d when adding lwpmu as char device!", status); ++ unregister_chrdev(MAJOR(lwpmu_DevNum), SEP_DRIVER_NAME); ++ device_destroy(pmu_class, lwpmu_DevNum); ++ unregister_chrdev_region(lwpmu_DevNum, PMU_DEVICES); ++ class_destroy(pmu_class); ++ return status; ++ } ++ /* _c init was fine, now try _m */ ++ lwmod_DevNum = MKDEV(MAJOR(lwpmu_DevNum), MINOR(lwpmu_DevNum) + 1); ++ ++ sep_device = device_create(pmu_class, NULL, lwmod_DevNum, NULL, ++ SEP_DRIVER_NAME DRV_DEVICE_DELIMITER "m"); ++ if (IS_ERR(sep_device)) { ++ SEP_DRV_LOG_ERROR("Error creating SEP PMU device!"); ++ } ++ ++ status = lwpmu_setup_cdev(lwmod_control, &lwmod_Fops, lwmod_DevNum); ++ if (status) { ++ cdev_del(&LWPMU_DEV_cdev(lwpmu_control)); ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Error %d when adding lwpmu as char device!", status); ++ unregister_chrdev(MAJOR(lwpmu_DevNum), SEP_DRIVER_NAME); ++ device_destroy(pmu_class, lwpmu_DevNum); ++ device_destroy(pmu_class, lwpmu_DevNum + 1); ++ cdev_del(&LWPMU_DEV_cdev(lwpmu_control)); ++ unregister_chrdev_region(lwpmu_DevNum, PMU_DEVICES); ++ class_destroy(pmu_class); ++ return status; ++ } ++ ++ /* allocate one sampling device per cpu */ ++ lwsamp_DevNum = MKDEV(0, 0); ++ status = alloc_chrdev_region(&lwsamp_DevNum, 0, num_cpus, ++ SEP_SAMPLES_NAME); ++ ++ if (status < 0) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Error: Failed to alloc chrdev_region (return = %d).", ++ status); ++ unregister_chrdev(MAJOR(lwpmu_DevNum), SEP_DRIVER_NAME); ++ device_destroy(pmu_class, lwpmu_DevNum); ++ device_destroy(pmu_class, lwpmu_DevNum + 1); ++ cdev_del(&LWPMU_DEV_cdev(lwpmu_control)); ++ cdev_del(&LWPMU_DEV_cdev(lwmod_control)); ++ unregister_chrdev_region(lwpmu_DevNum, PMU_DEVICES); ++ class_destroy(pmu_class); ++ return status; ++ } ++ ++ /* Register the file operations with the OS */ ++ for (i = 0; i < num_cpus; i++) { ++ snprintf(dev_name, MAXNAMELEN, "%s%ss%d", SEP_DRIVER_NAME, ++ DRV_DEVICE_DELIMITER, i); ++ ++ sep_device = device_create(pmu_class, NULL, lwsamp_DevNum + i, NULL, ++ dev_name); ++ if (IS_ERR(sep_device)) { ++ SEP_DRV_LOG_ERROR("Error creating SEP PMU device !"); ++ } ++ status = lwpmu_setup_cdev(lwsamp_control + i, &lwsamp_Fops, ++ lwsamp_DevNum + i); ++ if (status) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Error %d when adding lwpmu as char device!", ++ status); ++ unregister_chrdev(MAJOR(lwpmu_DevNum), SEP_DRIVER_NAME); ++ device_destroy(pmu_class, lwpmu_DevNum); ++ device_destroy(pmu_class, lwpmu_DevNum + 1); ++ cdev_del(&LWPMU_DEV_cdev(lwpmu_control)); ++ cdev_del(&LWPMU_DEV_cdev(lwmod_control)); ++ unregister_chrdev_region(lwpmu_DevNum, PMU_DEVICES); ++ unregister_chrdev(MAJOR(lwsamp_DevNum), SEP_SAMPLES_NAME); ++ for (j = i; j > 0; j--) { ++ device_destroy(pmu_class, lwsamp_DevNum + j); ++ cdev_del(&LWPMU_DEV_cdev(&lwsamp_control[j])); ++ } ++ ++ class_destroy(pmu_class); ++ return status; ++ } else { ++ SEP_DRV_LOG_INIT("Added sampling device %d.", i); ++ } ++ } ++ ++ lwsideband_DevNum = MKDEV(0, 0); ++ status = alloc_chrdev_region(&lwsideband_DevNum, 0, num_cpus, ++ SEP_SIDEBAND_NAME); ++ ++ if (status < 0) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Memory allocation failure for chrdev_region for sideband!"); ++ unregister_chrdev(MAJOR(lwpmu_DevNum), SEP_DRIVER_NAME); ++ device_destroy(pmu_class, lwpmu_DevNum); ++ device_destroy(pmu_class, lwpmu_DevNum + 1); ++ cdev_del(&LWPMU_DEV_cdev(lwpmu_control)); ++ cdev_del(&LWPMU_DEV_cdev(lwmod_control)); ++ unregister_chrdev_region(lwpmu_DevNum, PMU_DEVICES); ++ unregister_chrdev(MAJOR(lwsamp_DevNum), SEP_SAMPLES_NAME); ++ for (j = 0; j < num_cpus; j++) { ++ device_destroy(pmu_class, lwsamp_DevNum + j); ++ cdev_del(&LWPMU_DEV_cdev(&lwsamp_control[j])); ++ } ++ ++ class_destroy(pmu_class); ++ return status; ++ } ++ ++ for (i = 0; i < num_cpus; i++) { ++ snprintf(dev_name, MAXNAMELEN, "%s%sb%d", SEP_DRIVER_NAME, ++ DRV_DEVICE_DELIMITER, i); ++ sep_device = device_create(pmu_class, NULL, lwsideband_DevNum + i, NULL, ++ dev_name); ++ if (IS_ERR(sep_device)) { ++ SEP_DRV_LOG_ERROR("Error creating SEP PMU device!"); ++ } ++ status = lwpmu_setup_cdev(lwsideband_control + i, ++ &lwsideband_Fops, ++ lwsideband_DevNum + i); ++ if (status) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Error %d when adding lwsideband as char device!", ++ status); ++ unregister_chrdev(MAJOR(lwpmu_DevNum), SEP_DRIVER_NAME); ++ device_destroy(pmu_class, lwpmu_DevNum); ++ device_destroy(pmu_class, lwpmu_DevNum + 1); ++ cdev_del(&LWPMU_DEV_cdev(lwpmu_control)); ++ cdev_del(&LWPMU_DEV_cdev(lwmod_control)); ++ unregister_chrdev_region(lwpmu_DevNum, PMU_DEVICES); ++ unregister_chrdev(MAJOR(lwsamp_DevNum), SEP_SAMPLES_NAME); ++ unregister_chrdev(MAJOR(lwsideband_DevNum), SEP_SIDEBAND_NAME); ++ for (j = 0; j < num_cpus; j++) { ++ device_destroy(pmu_class, lwsamp_DevNum + j); ++ cdev_del(&LWPMU_DEV_cdev(&lwsamp_control[j])); ++ } ++ for (j = i; j > 0; j--) { ++ device_destroy(pmu_class, lwsideband_DevNum + j); ++ cdev_del(&LWPMU_DEV_cdev(&lwsideband_control[j])); ++ } ++ ++ class_destroy(pmu_class); ++ return status; ++ } else { ++ SEP_DRV_LOG_INIT("Added sampling sideband device %d.", ++ i); ++ } ++ } ++ ++ cpu_tsc = (U64 *)CONTROL_Allocate_Memory( ++ GLOBAL_STATE_num_cpus(driver_state) * sizeof(U64)); ++ prev_cpu_tsc = (U64 *)CONTROL_Allocate_Memory( ++ GLOBAL_STATE_num_cpus(driver_state) * sizeof(U64)); ++ diff_cpu_tsc = (U64 *)CONTROL_Allocate_Memory( ++ GLOBAL_STATE_num_cpus(driver_state) * sizeof(U64)); ++ ++#if !defined(CONFIG_PREEMPT_COUNT) && !defined(DRV_SEP_ACRN_ON) ++ atomic_set(&read_now, GLOBAL_STATE_num_cpus(driver_state)); ++ init_waitqueue_head(&read_tsc_now); ++ CONTROL_Invoke_Parallel(lwpmudrv_Fill_TSC_Info, (PVOID)(size_t)0); ++#endif ++ ++ pcb_size = GLOBAL_STATE_num_cpus(driver_state) * sizeof(CPU_STATE_NODE); ++ pcb = CONTROL_Allocate_Memory(pcb_size); ++ if (!pcb) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Memory allocation failure for PCB!"); ++ return OS_NO_MEM; ++ } ++ ++ core_to_package_map = CONTROL_Allocate_Memory( ++ GLOBAL_STATE_num_cpus(driver_state) * sizeof(U32)); ++ if (!core_to_package_map) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Memory allocation failure for core_to_package_map!"); ++ return OS_NO_MEM; ++ } ++ ++ core_to_phys_core_map = CONTROL_Allocate_Memory( ++ GLOBAL_STATE_num_cpus(driver_state) * sizeof(U32)); ++ if (!core_to_phys_core_map) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Memory allocation failure for core_to_phys_core_map!"); ++ return OS_NO_MEM; ++ } ++ ++ core_to_thread_map = CONTROL_Allocate_Memory( ++ GLOBAL_STATE_num_cpus(driver_state) * sizeof(U32)); ++ if (!core_to_thread_map) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Memory allocation failure for core_to_thread_map!"); ++ return OS_NO_MEM; ++ } ++ ++ threads_per_core = CONTROL_Allocate_Memory( ++ GLOBAL_STATE_num_cpus(driver_state) * sizeof(U32)); ++ if (!threads_per_core) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Memory allocation failure for threads_per_core!"); ++ return OS_NO_MEM; ++ } ++ ++ occupied_core_ids = CONTROL_Allocate_Memory( ++ GLOBAL_STATE_num_cpus(driver_state) * sizeof(U32)); ++ if (!occupied_core_ids) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Memory allocation failure for occupied_core_ids!"); ++ return OS_NO_MEM; ++ } ++ SYS_INFO_Build(); ++ memset(pcb, 0, pcb_size); ++ ++ if (total_ram <= OUTPUT_MEMORY_THRESHOLD) { ++ output_buffer_size = OUTPUT_SMALL_BUFFER; ++ } ++ ++ MUTEX_INIT(ioctl_lock); ++ ++ status = UNC_COMMON_Init(); ++ if (status) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Error %d when init uncore struct!", ++ status); ++ return status; ++ } ++ ++ /* allocate one sampling device per package (for uncore)*/ ++ lwsampunc_control = ++ CONTROL_Allocate_Memory(num_packages * sizeof(LWPMU_DEV_NODE)); ++ if (!lwsampunc_control) { ++ CONTROL_Free_Memory(lwsampunc_control); ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "lwpmu driver failed to alloc space!\n"); ++ return OS_NO_MEM; ++ } ++ ++ lwsampunc_DevNum = MKDEV(0, 0); ++ status = alloc_chrdev_region(&lwsampunc_DevNum, 0, num_packages, ++ SEP_UNCORE_NAME); ++ ++ if (status < 0) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Error: Failed to alloc chrdev_region (return = %d).", ++ status); ++ unregister_chrdev(MAJOR(lwpmu_DevNum), SEP_DRIVER_NAME); ++ device_destroy(pmu_class, lwpmu_DevNum); ++ device_destroy(pmu_class, lwpmu_DevNum + 1); ++ cdev_del(&LWPMU_DEV_cdev(lwpmu_control)); ++ cdev_del(&LWPMU_DEV_cdev(lwmod_control)); ++ unregister_chrdev_region(lwpmu_DevNum, PMU_DEVICES); ++ unregister_chrdev(MAJOR(lwsamp_DevNum), SEP_SAMPLES_NAME); ++ unregister_chrdev(MAJOR(lwsampunc_DevNum), SEP_UNCORE_NAME); ++ unregister_chrdev(MAJOR(lwsideband_DevNum), SEP_SIDEBAND_NAME); ++ for (j = 0; j < num_cpus; j++) { ++ device_destroy(pmu_class, lwsamp_DevNum + j); ++ device_destroy(pmu_class, lwsideband_DevNum + j); ++ cdev_del(&LWPMU_DEV_cdev(&lwsideband_control[j])); ++ cdev_del(&LWPMU_DEV_cdev(&lwsamp_control[j])); ++ } ++ ++ class_destroy(pmu_class); ++ unregister_chrdev_region(lwsamp_DevNum, num_cpus); ++ unregister_chrdev_region(lwsideband_DevNum, num_cpus); ++ return status; ++ } ++ ++ /* Register the file operations with the OS */ ++ for (i = 0; i < num_packages; i++) { ++ snprintf(dev_name, MAXNAMELEN, "%s%su%d", SEP_DRIVER_NAME, ++ DRV_DEVICE_DELIMITER, i); ++ sep_device = device_create(pmu_class, NULL, ++ lwsampunc_DevNum + i, NULL, dev_name); ++ if (IS_ERR(sep_device)) { ++ SEP_DRV_LOG_ERROR("Error creating SEP PMU device!"); ++ } ++ status = lwpmu_setup_cdev(lwsampunc_control + i, ++ &lwsampunc_Fops, ++ lwsampunc_DevNum + i); ++ if (status) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Error %d when adding lwpmu as char device!", ++ status); ++ unregister_chrdev(MAJOR(lwpmu_DevNum), SEP_DRIVER_NAME); ++ device_destroy(pmu_class, lwpmu_DevNum); ++ device_destroy(pmu_class, lwpmu_DevNum + 1); ++ cdev_del(&LWPMU_DEV_cdev(lwpmu_control)); ++ cdev_del(&LWPMU_DEV_cdev(lwmod_control)); ++ unregister_chrdev_region(lwpmu_DevNum, PMU_DEVICES); ++ unregister_chrdev(MAJOR(lwsamp_DevNum), SEP_SAMPLES_NAME); ++ unregister_chrdev(MAJOR(lwsampunc_DevNum), SEP_UNCORE_NAME); ++ unregister_chrdev(MAJOR(lwsideband_DevNum), SEP_SIDEBAND_NAME); ++ for (j = 0; j < num_cpus; j++) { ++ device_destroy(pmu_class, lwsamp_DevNum + j); ++ device_destroy(pmu_class, lwsideband_DevNum + j); ++ cdev_del(&LWPMU_DEV_cdev(&lwsideband_control[j])); ++ cdev_del(&LWPMU_DEV_cdev(&lwsamp_control[j])); ++ } ++ ++ for (j = i; j > 0; j--) { ++ device_destroy(pmu_class, lwsampunc_DevNum + i); ++ cdev_del(&LWPMU_DEV_cdev(&lwsampunc_control[j])); ++ } ++ ++ class_destroy(pmu_class); ++ unregister_chrdev_region(lwsamp_DevNum, num_cpus); ++ unregister_chrdev_region(lwsampunc_DevNum, num_packages); ++ unregister_chrdev_region(lwsideband_DevNum, num_cpus); ++ ++ return status; ++ } else { ++ SEP_DRV_LOG_INIT("Added sampling device %d.", i); ++ } ++ } ++ ++ init_waitqueue_head(&wait_exit); ++ abnormal_handler = kthread_create(lwpmudrv_Abnormal_Handler, NULL, ++ "SEPDRV_ABNORMAL_HANDLER"); ++ if (abnormal_handler) { ++ wake_up_process(abnormal_handler); ++ } ++ ++#if defined(DRV_CPU_HOTPLUG) ++ /* Register CPU hotplug notifier */ ++ LINUXOS_Register_Hotplug(); ++#endif ++ /* ++ * Initialize the SEP driver version (done once at driver load time) ++ */ ++ SEP_VERSION_NODE_major(&drv_version) = SEP_MAJOR_VERSION; ++ SEP_VERSION_NODE_minor(&drv_version) = SEP_MINOR_VERSION; ++ SEP_VERSION_NODE_api(&drv_version) = SEP_API_VERSION; ++ SEP_VERSION_NODE_update(&drv_version) = SEP_UPDATE_VERSION; ++ ++ // ++ // Display driver version information ++ // ++ SEP_DRV_LOG_LOAD("PMU collection driver v%d.%d.%d %s has been loaded.", ++ SEP_VERSION_NODE_major(&drv_version), ++ SEP_VERSION_NODE_minor(&drv_version), ++ SEP_VERSION_NODE_api(&drv_version), ++ SEP_RELEASE_STRING); ++ ++#if defined(BUILD_CHIPSET) ++ SEP_DRV_LOG_LOAD("Chipset support is enabled."); ++#endif ++ ++#if defined(BUILD_GFX) ++ SEP_DRV_LOG_LOAD("Graphics support is enabled."); ++#endif ++ ++ SEP_DRV_LOG_LOAD("NMI will be used for handling PMU interrupts."); ++ ++ SEP_DRV_LOG_FLOW_OUT("Return value: %d.", status); ++ return status; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static int lwpmu_Unload(void) ++ * ++ * @param none ++ * ++ * @return none ++ * ++ * @brief Remove the driver module from the kernel. ++ */ ++static VOID lwpmu_Unload(void) ++{ ++ int i = 0; ++ int num_cpus; ++#if defined(CONFIG_XEN_HAVE_VPMU) ++ xen_pmu_params_t xenpmu_param; ++#endif ++ PVOID tmp_pcb; ++ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ SEP_DRV_LOG_LOAD("Driver unloading."); ++ ++ num_cpus = GLOBAL_STATE_num_cpus(driver_state); ++ ++ if (abnormal_handler) { ++ if (GET_DRIVER_STATE() != DRV_STATE_UNINITIALIZED) { ++ CHANGE_DRIVER_STATE(STATE_BIT_ANY, ++ DRV_STATE_TERMINATING); ++ } ++ wake_up_interruptible_all(&wait_exit); ++ kthread_stop(abnormal_handler); ++ abnormal_handler = NULL; ++ } ++ ++#if defined(CONFIG_XEN_HAVE_VPMU) ++ if (xen_initial_domain()) { ++ xenpmu_param.version.maj = XENPMU_VER_MAJ; ++ xenpmu_param.version.min = XENPMU_VER_MIN; ++ ++ for (i = 0; i < num_cpus; i++) { ++ xenpmu_param.vcpu = i; ++ HYPERVISOR_xenpmu_op(XENPMU_finish, &xenpmu_param); ++ ++ vfree(per_cpu(sep_xenpmu_shared, i)); ++ per_cpu(sep_xenpmu_shared, i) = NULL; ++ } ++ SEP_DRV_LOG_LOAD("VPMU was disabled on XEN Dom0."); ++ } ++#endif ++ ++ LINUXOS_Uninstall_Hooks(); ++ SYS_INFO_Destroy(); ++ OUTPUT_Destroy(); ++ cpu_buf = CONTROL_Free_Memory(cpu_buf); ++ unc_buf = CONTROL_Free_Memory(unc_buf); ++ cpu_sideband_buf = CONTROL_Free_Memory(cpu_sideband_buf); ++ module_buf = CONTROL_Free_Memory(module_buf); ++ cpu_tsc = CONTROL_Free_Memory(cpu_tsc); ++ prev_cpu_tsc = CONTROL_Free_Memory(prev_cpu_tsc); ++ diff_cpu_tsc = CONTROL_Free_Memory(diff_cpu_tsc); ++ core_to_package_map = CONTROL_Free_Memory(core_to_package_map); ++ core_to_phys_core_map = CONTROL_Free_Memory(core_to_phys_core_map); ++ core_to_thread_map = CONTROL_Free_Memory(core_to_thread_map); ++ threads_per_core = CONTROL_Free_Memory(threads_per_core); ++ occupied_core_ids = CONTROL_Free_Memory(occupied_core_ids); ++#if defined(DRV_SEP_ACRN_ON) ++ vm_info_list = CONTROL_Free_Memory(vm_info_list); ++#endif ++ ++ tmp_pcb = pcb; ++ // Ensures there is no log message written (ERROR, ALLOC, ...) ++ pcb = NULL; // between pcb being freed and pcb being NULL. ++ CONTROL_Free_Memory(tmp_pcb); ++ pcb_size = 0; ++ ++ UNC_COMMON_Clean_Up(); ++ ++ unregister_chrdev(MAJOR(lwpmu_DevNum), SEP_DRIVER_NAME); ++ device_destroy(pmu_class, lwpmu_DevNum); ++ device_destroy(pmu_class, lwpmu_DevNum + 1); ++ ++ cdev_del(&LWPMU_DEV_cdev(lwpmu_control)); ++ cdev_del(&LWPMU_DEV_cdev(lwmod_control)); ++ unregister_chrdev_region(lwpmu_DevNum, PMU_DEVICES); ++ ++ unregister_chrdev(MAJOR(lwsamp_DevNum), SEP_SAMPLES_NAME); ++ unregister_chrdev(MAJOR(lwsampunc_DevNum), SEP_UNCORE_NAME); ++ unregister_chrdev(MAJOR(lwsideband_DevNum), SEP_SIDEBAND_NAME); ++ ++ for (i = 0; i < num_cpus; i++) { ++ device_destroy(pmu_class, lwsamp_DevNum + i); ++ device_destroy(pmu_class, lwsideband_DevNum + i); ++ cdev_del(&LWPMU_DEV_cdev(&lwsamp_control[i])); ++ cdev_del(&LWPMU_DEV_cdev(&lwsideband_control[i])); ++ } ++ ++ for (i = 0; i < num_packages; i++) { ++ device_destroy(pmu_class, lwsampunc_DevNum + i); ++ cdev_del(&LWPMU_DEV_cdev(&lwsampunc_control[i])); ++ } ++ ++ class_destroy(pmu_class); ++ ++ unregister_chrdev_region(lwsamp_DevNum, num_cpus); ++ unregister_chrdev_region(lwsampunc_DevNum, num_packages); ++ unregister_chrdev_region(lwsideband_DevNum, num_cpus); ++ lwpmu_control = CONTROL_Free_Memory(lwpmu_control); ++ lwmod_control = CONTROL_Free_Memory(lwmod_control); ++ lwsamp_control = CONTROL_Free_Memory(lwsamp_control); ++ lwsampunc_control = CONTROL_Free_Memory(lwsampunc_control); ++ lwsideband_control = CONTROL_Free_Memory(lwsideband_control); ++ ++ CONTROL_Memory_Tracker_Free(); ++ ++#if defined(DRV_CPU_HOTPLUG) ++ /* Unregister CPU hotplug notifier */ ++ LINUXOS_Unregister_Hotplug(); ++#endif ++ ++ SEP_DRV_LOG_FLOW_OUT( ++ "Log deallocation. Cannot track further in internal log."); ++ UTILITY_Driver_Log_Free(); // Do not use SEP_DRV_LOG_X (where X != LOAD) after this ++ ++ SEP_DRV_LOG_LOAD( ++ "PMU collection driver v%d.%d.%d %s has been unloaded.", ++ SEP_VERSION_NODE_major(&drv_version), ++ SEP_VERSION_NODE_minor(&drv_version), ++ SEP_VERSION_NODE_api(&drv_version), SEP_RELEASE_STRING); ++} ++ ++/* Declaration of the init and exit functions */ ++module_init(lwpmu_Load); ++module_exit(lwpmu_Unload); +diff --git a/drivers/platform/x86/sepdk/sep/output.c b/drivers/platform/x86/sepdk/sep/output.c +new file mode 100755 +index 000000000000..5fe2d28b4174 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/sep/output.c +@@ -0,0 +1,1177 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#include "lwpmudrv_defines.h" ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "lwpmudrv_types.h" ++#include "lwpmudrv.h" ++#include "lwpmudrv_ecb.h" ++#include "lwpmudrv_struct.h" ++ ++#include "control.h" ++#include "output.h" ++#include "utility.h" ++#include "inc/linuxos.h" ++#define OTHER_C_DEVICES 1 // one for module ++ ++/* ++ * Global data: Buffer control structure ++ */ ++static wait_queue_head_t flush_queue; ++static atomic_t flush_writers; ++static volatile int flush; ++extern DRV_CONFIG drv_cfg; ++extern DRV_BOOL multi_pebs_enabled; ++extern DRV_BOOL sched_switch_enabled; ++extern DRV_BOOL unc_buf_init; ++ ++static void output_NMI_Sample_Buffer(unsigned long data); ++ ++/* ++ * @fn output_Free_Buffers(output, size) ++ * ++ * @param IN outbuf - The output buffer to manipulate ++ * ++ * @brief Deallocate the memory associated with the buffer descriptor ++ * ++ */ ++static VOID output_Free_Buffers(BUFFER_DESC buffer, size_t size) ++{ ++ int j; ++ OUTPUT outbuf; ++ ++ SEP_DRV_LOG_TRACE_IN("Buffer: %p, size: %u.", buffer, size); ++ ++ if (buffer == NULL) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!buffer)."); ++ return; ++ } ++ outbuf = &BUFFER_DESC_outbuf(buffer); ++ for (j = 0; j < OUTPUT_NUM_BUFFERS; j++) { ++ CONTROL_Free_Memory(OUTPUT_buffer(outbuf, j)); ++ OUTPUT_buffer(outbuf, j) = NULL; ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn int OUTPUT_Reserve_Buffer_Space (OUTPUT outbuf, ++ * U32 size, ++ * U8 in_notification) ++ * ++ * @param outbuf IN output buffer to manipulate ++ * @param size IN The size of data to reserve ++ * @param defer IN wake up directly if FALSE. ++ * Otherwise, see below. ++ * @param in_notification IN 1 if in notification, 0 if not ++ * ++ * @result outloc - to the location where data is to be written ++ * ++ * Reserve space in the output buffers for data. The behavior of this function ++ * when a buffer is full will vary depending on the 'defer' and 'in_notification' ++ * parameters, as described in the special notes section. ++ * ++ * Special Notes: ++ * ----------------------------------------------------------------------------------------------------------------------- ++ * defer | in_notification | description ++ * ----------------------------------------------------------------------------------------------------------------------- ++ * FALSE | FALSE/TRUE | directly signals the buffer's consumer with wake_up_interruptible_sync ++ * ----------------------------------------------------------------------------------------------------------------------- ++ * TRUE | FALSE | defers the call to wake_up_interruptible_sync using tasklet_schedule [needed because calling ++ * | | it directly is not safe from an NMI] ++ * ----------------------------------------------------------------------------------------------------------------------- ++ * | | do not signal -or explicitly schedule the signaling of- the buffer's consumer [needed because ++ * TRUE | TRUE | neither operation is safe from the sched_switch tracepoint callback in kernel version 4.13]. ++ * | | Instead relies on the interrupt handler to do it next time there is an interrupt. ++ * ----------------------------------------------------------------------------------------------------------------------- ++ */ ++void *OUTPUT_Reserve_Buffer_Space(BUFFER_DESC bd, U32 size, ++ DRV_BOOL defer, U8 in_notification, ++ S32 cpu_idx) ++{ ++ char *outloc = NULL; ++ OUTPUT outbuf = &BUFFER_DESC_outbuf(bd); ++ S32 this_cpu; ++ ++ SEP_DRV_LOG_NOTIFICATION_TRACE_IN( ++ in_notification, "Bd: %p, size: %u, defer: %u, notif: %u.", bd, ++ size, defer, in_notification); ++ ++ if (DRV_CONFIG_enable_cp_mode(drv_cfg) && flush) { ++ SEP_DRV_LOG_NOTIFICATION_TRACE_OUT( ++ in_notification, "Res: NULL (cp_mode && flush)."); ++ return NULL; ++ } ++ ++ if (OUTPUT_remaining_buffer_size(outbuf) >= size) { ++ outloc = (char *) ++ (OUTPUT_buffer(outbuf, OUTPUT_current_buffer(outbuf)) + ++ (OUTPUT_total_buffer_size(outbuf) - ++ OUTPUT_remaining_buffer_size(outbuf))); ++ } else { ++ U32 i, j, start; ++ OUTPUT_buffer_full(outbuf, OUTPUT_current_buffer(outbuf)) = ++ OUTPUT_total_buffer_size(outbuf) - ++ OUTPUT_remaining_buffer_size(outbuf); ++ ++ // ++ // Massive Naive assumption: Must find a way to fix it. ++ // In spite of the loop. ++ // The next buffer to fill are monotonically increasing ++ // indicies. ++ // ++ if (!DRV_CONFIG_enable_cp_mode(drv_cfg)) { ++ OUTPUT_signal_full(outbuf) = TRUE; ++ } ++ ++ start = OUTPUT_current_buffer(outbuf); ++ for (i = start + 1; i < start + OUTPUT_NUM_BUFFERS; i++) { ++ j = i % OUTPUT_NUM_BUFFERS; ++ ++ //don't check if buffer has data when doing CP ++ if (!OUTPUT_buffer_full(outbuf, j) || ++ (DRV_CONFIG_enable_cp_mode(drv_cfg))) { ++ OUTPUT_current_buffer(outbuf) = j; ++ OUTPUT_remaining_buffer_size(outbuf) = ++ OUTPUT_total_buffer_size(outbuf); ++ outloc = (char *)OUTPUT_buffer(outbuf, j); ++ if (DRV_CONFIG_enable_cp_mode(drv_cfg)) { ++ // discarding all the information in the new buffer in CP mode ++ OUTPUT_buffer_full(outbuf, j) = 0; ++ break; ++ } ++ } ++#if !(defined(CONFIG_PREEMPT_RT) || defined(CONFIG_PREEMPT_RT_FULL)) ++ else { ++ if (!defer) { ++ OUTPUT_signal_full(outbuf) = FALSE; ++ SEP_DRV_LOG_NOTIFICATION_WARNING( ++ in_notification, ++ "Output buffers are full. Might be dropping some samples!"); ++ break; ++ } ++ } ++#endif ++ } ++ } ++ ++ if (outloc) { ++ OUTPUT_remaining_buffer_size(outbuf) -= size; ++ memset(outloc, 0, size); ++ } ++ ++ if (OUTPUT_signal_full(outbuf)) { ++ if (!defer) { ++#if !(defined(CONFIG_PREEMPT_RT) || defined(CONFIG_PREEMPT_RT_FULL)) ++ SEP_DRV_LOG_NOTIFICATION_TRACE( ++ in_notification, ++ "Choosing direct wakeup approach."); ++#if !defined(DRV_SEP_ACRN_ON) ++ wake_up_interruptible_sync(&BUFFER_DESC_queue(bd)); ++#endif ++ OUTPUT_signal_full(outbuf) = FALSE; ++#endif ++ } else { ++ if (!OUTPUT_tasklet_queued(outbuf)) { ++ if (cpu_idx == -1) { ++ this_cpu = CONTROL_THIS_CPU(); ++ } else { ++ this_cpu = cpu_idx; ++ } ++ if (!in_notification) { ++ SEP_DRV_LOG_NOTIFICATION_TRACE( ++ in_notification, ++ "Scheduling the tasklet on cpu %u.", ++ this_cpu); ++ OUTPUT_tasklet_queued(outbuf) = TRUE; ++#if !defined(DRV_SEP_ACRN_ON) ++ tasklet_schedule(&CPU_STATE_nmi_tasklet( ++ &pcb[this_cpu])); ++#endif ++ } else { ++ static U32 cpt; ++ ++ if (!cpt) { ++ SEP_DRV_LOG_WARNING( ++ "Using interrupt-driven sideband buffer flushes for extra safety."); ++ SEP_DRV_LOG_WARNING( ++ "This may result in fewer context switches being recorded."); ++ } ++ SEP_DRV_LOG_TRACE( ++ "Lost context switch information (for the %uth time).", ++ ++cpt); ++ } ++ } ++ } ++ } ++ ++ SEP_DRV_LOG_NOTIFICATION_TRACE_OUT(in_notification, "Res: %p.", outloc); ++ return outloc; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * ++ * @fn int OUTPUT_Buffer_Fill (BUFFER_DESC buf, ++ * PVOID data, ++ * U16 size, ++ * U8 in_notification) ++ * ++ * @brief Place a record (can be module, marker, etc) in a buffer ++ * ++ * @param data - pointer to a buffer to copy ++ * @param size - size of the buffer to cpu ++ * @param in_notification - 1 if in notification, 0 if not ++ * ++ * @return number of bytes copied into buffer ++ * ++ * Start by ensuring that output buffer space is available. ++ * If so, then copy the input data to the output buffer and make the necessary ++ * adjustments to manage the output buffers. ++ * If not, signal the read event for this buffer and get another buffer. ++ * ++ * Special Notes: ++ * ++ */ ++static int output_Buffer_Fill(BUFFER_DESC bd, PVOID data, U16 size, ++ U8 in_notification) ++{ ++ char *outloc; ++ ++ SEP_DRV_LOG_NOTIFICATION_TRACE_IN( ++ in_notification, "Bd: %p, data: %p, size: %u.", bd, data, size); ++ ++ outloc = (char *)OUTPUT_Reserve_Buffer_Space(bd, size, ++ FALSE, in_notification, -1); ++ if (outloc) { ++ memcpy(outloc, data, size); ++ SEP_DRV_LOG_NOTIFICATION_TRACE_OUT(in_notification, ++ "Res: %d (outloc).", size); ++ return size; ++ } ++ ++ SEP_DRV_LOG_NOTIFICATION_TRACE_OUT(in_notification, ++ "Res: 0 (!outloc)."); ++ return 0; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn int OUTPUT_Module_Fill (PVOID data, ++ * U16 size, ++ * U8 in_notification) ++ * ++ * @brief Place a module record in a buffer ++ * ++ * @param data - pointer to a buffer to copy ++ * @param size - size of the buffer to cpu ++ * @param in_notification - 1 if in notification, 0 if not ++ * ++ * @return number of bytes copied into buffer ++ * ++ * ++ */ ++int OUTPUT_Module_Fill(PVOID data, U16 size, U8 in_notification) ++{ ++ int ret_size; ++ OUTPUT outbuf = &BUFFER_DESC_outbuf(module_buf); ++ ++ SEP_DRV_LOG_NOTIFICATION_TRACE_IN(in_notification, ++ "Data: %p, size: %u.", data, size); ++ ++ spin_lock(&OUTPUT_buffer_lock(outbuf)); ++ ret_size = output_Buffer_Fill(module_buf, data, size, in_notification); ++ spin_unlock(&OUTPUT_buffer_lock(outbuf)); ++ ++ SEP_DRV_LOG_NOTIFICATION_TRACE_OUT(in_notification, "Res: %d.", ++ ret_size); ++ return ret_size; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn ssize_t output_Read(struct file *filp, ++ * char *buf, ++ * size_t count, ++ * loff_t *f_pos, ++ * BUFFER_DESC kernel_buf) ++ * ++ * @brief Return a sample buffer to user-mode. If not full or flush, wait ++ * ++ * @param *filp a file pointer ++ * @param *buf a sampling buffer ++ * @param count size of the user's buffer ++ * @param f_pos file pointer (current offset in bytes) ++ * @param kernel_buf the kernel output buffer structure ++ * ++ * @return number of bytes read. zero indicates end of file. Neg means error ++ * ++ * Place no more than count bytes into the user's buffer. ++ * Block if unavailable on "BUFFER_DESC_queue(buf)" ++ * ++ * Special Notes: ++ * ++ */ ++static ssize_t output_Read(struct file *filp, char __user *buf, size_t count, ++ loff_t *f_pos, BUFFER_DESC kernel_buf) ++{ ++ ssize_t to_copy = 0; ++ ssize_t uncopied; ++ OUTPUT outbuf = &BUFFER_DESC_outbuf(kernel_buf); ++ U32 cur_buf, i; ++ /* Buffer is filled by output_fill_modules. */ ++ ++ SEP_DRV_LOG_TRACE_IN( ++ "Filp: %p, buf: %p, count: %u, f_pos: %p, kernel_buf: %p.", ++ filp, buf, (U32)count, f_pos, kernel_buf); ++ ++ cur_buf = OUTPUT_current_buffer(outbuf); ++ if (!DRV_CONFIG_enable_cp_mode(drv_cfg) || flush) { ++ for (i = 0; i < OUTPUT_NUM_BUFFERS; i++) { ++ //iterate through all buffers ++ cur_buf++; ++ if (cur_buf >= OUTPUT_NUM_BUFFERS) { ++ cur_buf = 0; ++ } //circularly ++ to_copy = OUTPUT_buffer_full(outbuf, cur_buf); ++ if (to_copy != 0) { ++ if (flush && ++ DRV_CONFIG_enable_cp_mode(drv_cfg) && ++ cur_buf == OUTPUT_current_buffer(outbuf)) { ++ OUTPUT_current_buffer(outbuf)++; ++ if (OUTPUT_current_buffer(outbuf) >= ++ OUTPUT_NUM_BUFFERS) { ++ OUTPUT_current_buffer(outbuf) = ++ 0; ++ } ++ OUTPUT_remaining_buffer_size(outbuf) = ++ OUTPUT_total_buffer_size( ++ outbuf); ++ } ++ break; ++ } ++ } ++ } ++ ++ SEP_DRV_LOG_TRACE("buffer %d has %d bytes ready.", (S32)cur_buf, ++ (S32)to_copy); ++ if (!flush && to_copy == 0) { ++ unsigned long delay = msecs_to_jiffies(1000); ++ ++ while (1) { ++ U32 res = wait_event_interruptible_timeout( ++ BUFFER_DESC_queue(kernel_buf), ++ flush || (OUTPUT_buffer_full(outbuf, cur_buf) && ++ !DRV_CONFIG_enable_cp_mode(drv_cfg)), ++ delay); ++ ++ if (GET_DRIVER_STATE() == DRV_STATE_TERMINATING) { ++ SEP_DRV_LOG_INIT( ++ "Switched to TERMINATING while waiting for BUFFER_DESC_queue!"); ++ break; ++ } ++ ++ if (res == ERESTARTSYS || res == 0) { ++ SEP_DRV_LOG_TRACE( ++ "Wait_event_interruptible_timeout(BUFFER_DESC_queue): %u.", ++ res); ++ continue; ++ } ++ ++ break; ++ } ++ ++ if (DRV_CONFIG_enable_cp_mode(drv_cfg)) { ++ // reset the current buffer index if in CP mode ++ cur_buf = OUTPUT_current_buffer(outbuf); ++ for (i = 0; i < OUTPUT_NUM_BUFFERS; ++ i++) { //iterate through all buffers ++ cur_buf++; ++ if (cur_buf >= OUTPUT_NUM_BUFFERS) { ++ cur_buf = 0; ++ } //circularly ++ to_copy = OUTPUT_buffer_full(outbuf, cur_buf); ++ if (to_copy != 0) { ++ if (flush && ++ DRV_CONFIG_enable_cp_mode( ++ drv_cfg) && ++ cur_buf == OUTPUT_current_buffer( ++ outbuf)) { ++ OUTPUT_current_buffer(outbuf)++; ++ if (OUTPUT_current_buffer( ++ outbuf) >= ++ OUTPUT_NUM_BUFFERS) { ++ OUTPUT_current_buffer( ++ outbuf) = 0; ++ } ++ OUTPUT_remaining_buffer_size( ++ outbuf) = ++ OUTPUT_total_buffer_size( ++ outbuf); ++ } ++ break; ++ } ++ } ++ } ++ SEP_DRV_LOG_TRACE("Get to copy %d.", (S32)cur_buf); ++ to_copy = OUTPUT_buffer_full(outbuf, cur_buf); ++ SEP_DRV_LOG_TRACE( ++ "output_Read awakened, buffer %d has %d bytes.", ++ cur_buf, (int)to_copy); ++ } ++ ++ /* Ensure that the user's buffer is large enough */ ++ if (to_copy > count) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT( ++ "OS_NO_MEM (user buffer is too small!)."); ++ return OS_NO_MEM; ++ } ++ ++ /* Copy data to user space. Note that we use cur_buf as the source */ ++ if (GET_DRIVER_STATE() != DRV_STATE_TERMINATING) { ++ uncopied = copy_to_user(buf, OUTPUT_buffer(outbuf, cur_buf), ++ to_copy); ++ /* Mark the buffer empty */ ++ OUTPUT_buffer_full(outbuf, cur_buf) = 0; ++ *f_pos += to_copy - uncopied; ++ if (uncopied) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT( ++ "Res: %u (only copied %u of %u bytes!).", ++ (U32)(to_copy - uncopied), (U32)to_copy, ++ (U32)uncopied); ++ return (to_copy - uncopied); ++ } ++ } else { ++ to_copy = 0; ++ SEP_DRV_LOG_TRACE("To copy set to 0."); ++ } ++ ++ // At end-of-file, decrement the count of active buffer writers ++ ++ if (to_copy == 0) { ++ DRV_BOOL flush_val = atomic_dec_and_test(&flush_writers); ++ SEP_DRV_LOG_TRACE("Decremented flush_writers."); ++ if (flush_val == TRUE) { ++ wake_up_interruptible_sync(&flush_queue); ++ } ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT("Res: %u.", (U32)to_copy); ++ return to_copy; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn ssize_t OUTPUT_Module_Read(struct file *filp, ++ * char *buf, ++ * size_t count, ++ * loff_t *f_pos) ++ * ++ * @brief Return a module buffer to user-mode. If not full or flush, wait ++ * ++ * @param *filp a file pointer ++ * @param *buf a sampling buffer ++ * @param count size of the user's buffer ++ * @param f_pos file pointer (current offset in bytes) ++ * @param buf the kernel output buffer structure ++ * ++ * @return number of bytes read. zero indicates end of file. Neg means error ++ * ++ * Place no more than count bytes into the user's buffer. ++ * Block on "BUFFER_DESC_queue(kernel_buf)" if buffer isn't full. ++ * ++ * Special Notes: ++ * ++ */ ++ssize_t OUTPUT_Module_Read(struct file *filp, char __user *buf, size_t count, ++ loff_t *f_pos) ++{ ++ ssize_t res; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ SEP_DRV_LOG_TRACE("Read request for modules on minor."); ++ ++ res = output_Read(filp, buf, count, f_pos, module_buf); ++ ++ SEP_DRV_LOG_TRACE_OUT("Res: %u.", (U32)res); ++ return res; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn ssize_t OUTPUT_Sample_Read(struct file *filp, ++ * char *buf, ++ * size_t count, ++ * loff_t *f_pos) ++ * ++ * @brief Return a sample buffer to user-mode. If not full or flush, wait ++ * ++ * @param *filp a file pointer ++ * @param *buf a sampling buffer ++ * @param count size of the user's buffer ++ * @param f_pos file pointer (current offset in bytes) ++ * @param buf the kernel output buffer structure ++ * ++ * @return number of bytes read. zero indicates end of file. Neg means error ++ * ++ * Place no more than count bytes into the user's buffer. ++ * Block on "BUFFER_DESC_queue(kernel_buf)" if buffer isn't full. ++ * ++ * Special Notes: ++ * ++ */ ++ssize_t OUTPUT_Sample_Read(struct file *filp, char __user *buf, size_t count, ++ loff_t *f_pos) ++{ ++ int i; ++ ssize_t res; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ i = iminor(filp->DRV_F_DENTRY ++ ->d_inode); // kernel pointer - not user pointer ++ SEP_DRV_LOG_TRACE("Read request for samples on minor %d.", i); ++ res = output_Read(filp, buf, count, f_pos, &(cpu_buf[i])); ++ ++ SEP_DRV_LOG_TRACE_OUT("Res: %u.", (U32)res); ++ return res; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn ssize_t OUTPUT_Sample_Read(struct file *filp, ++ * char *buf, ++ * size_t count, ++ * loff_t *f_pos) ++ * ++ * @brief Return a sample buffer to user-mode. If not full or flush, wait ++ * ++ * @param *filp a file pointer ++ * @param *buf a sampling buffer ++ * @param count size of the user's buffer ++ * @param f_pos file pointer (current offset in bytes) ++ * @param buf the kernel output buffer structure ++ * ++ * @return number of bytes read. zero indicates end of file. Neg means error ++ * ++ * Place no more than count bytes into the user's buffer. ++ * Block on "BUFFER_DESC_queue(kernel_buf)" if buffer isn't full. ++ * ++ * Special Notes: ++ * ++ */ ++ssize_t OUTPUT_UncSample_Read(struct file *filp, char __user *buf, ++ size_t count, loff_t *f_pos) ++{ ++ int i; ++ ssize_t res = 0; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ i = iminor(filp->DRV_F_DENTRY ++ ->d_inode); // kernel pointer - not user pointer ++ SEP_DRV_LOG_TRACE("Read request for samples on minor %d.", i); ++ if (unc_buf_init) { ++ res = output_Read(filp, buf, count, f_pos, &(unc_buf[i])); ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT("Res: %u.", (U32)res); ++ return res; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn ssize_t OUTPUT_SidebandInfo_Read(struct file *filp, ++ * char *buf, ++ * size_t count, ++ * loff_t *f_pos) ++ * ++ * @brief Return a sideband info buffer to user-mode. If not full or flush, wait ++ * ++ * @param *filp a file pointer ++ * @param *buf a sideband info buffer ++ * @param count size of the user's buffer ++ * @param f_pos file pointer (current offset in bytes) ++ * @param buf the kernel output buffer structure ++ * ++ * @return number of bytes read. zero indicates end of file. Neg means error ++ * ++ * Place no more than count bytes into the user's buffer. ++ * Block on "BUFFER_DESC_queue(kernel_buf)" if buffer isn't full. ++ * ++ * Special Notes: ++ * ++ */ ++ssize_t OUTPUT_SidebandInfo_Read(struct file *filp, char __user *buf, ++ size_t count, loff_t *f_pos) ++{ ++ int i; ++ ssize_t res = 0; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ i = iminor(filp->DRV_F_DENTRY ++ ->d_inode); // kernel pointer - not user pointer ++ SEP_DRV_LOG_TRACE("Read request for pebs process info on minor %d.", i); ++ if (multi_pebs_enabled || sched_switch_enabled) { ++ res = output_Read(filp, buf, count, f_pos, ++ &(cpu_sideband_buf[i])); ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT("Res: %u.", (U32)res); ++ return res; ++} ++ ++/* ++ * @fn output_Initialized_Buffers() ++ * ++ * @result OUTPUT ++ * @param BUFFER_DESC desc - descriptor for the buffer being initialized ++ * @param U32 factor - multiplier for OUTPUT_BUFFER_SIZE. ++ * 1 for cpu buffers, 2 for module buffers. ++ * ++ * @brief Allocate, initialize, and return an output data structure ++ * ++ * Special Notes: ++ * Multiple (OUTPUT_NUM_BUFFERS) buffers will be allocated ++ * Each buffer is of size (OUTPUT_BUFFER_SIZE) ++ * Each field in the buffer is initialized ++ * The event queue for the OUTPUT is initialized ++ * ++ */ ++static BUFFER_DESC output_Initialized_Buffers(BUFFER_DESC desc, U32 factor) ++{ ++ OUTPUT outbuf; ++ int j; ++ ++ SEP_DRV_LOG_TRACE_IN("Desc: %p, factor: %u.", desc, factor); ++ ++ /* ++ * Allocate the BUFFER_DESC, then allocate its buffers ++ */ ++ if (desc == NULL) { ++ desc = (BUFFER_DESC)CONTROL_Allocate_Memory( ++ sizeof(BUFFER_DESC_NODE)); ++ if (desc == NULL) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT( ++ "Res: NULL (failed allocation for desc!)."); ++ return NULL; ++ } ++ } ++ outbuf = &(BUFFER_DESC_outbuf(desc)); ++ spin_lock_init(&OUTPUT_buffer_lock(outbuf)); ++ for (j = 0; j < OUTPUT_NUM_BUFFERS; j++) { ++ if (OUTPUT_buffer(outbuf, j) == NULL) { ++ OUTPUT_buffer(outbuf, j) = CONTROL_Allocate_Memory( ++ (size_t)OUTPUT_BUFFER_SIZE * factor); ++ } ++ OUTPUT_buffer_full(outbuf, j) = 0; ++ if (!OUTPUT_buffer(outbuf, j)) { ++ /*return NULL to tell the caller that allocation failed*/ ++ SEP_DRV_LOG_ERROR_TRACE_OUT( ++ "Res: NULL (failed alloc for OUTPUT_buffer(output, %d)!).", ++ j); ++ CONTROL_Free_Memory(desc); ++ return NULL; ++ } ++ } ++ /* ++ * Initialize the remaining fields in the BUFFER_DESC ++ */ ++ OUTPUT_current_buffer(outbuf) = 0; ++ OUTPUT_signal_full(outbuf) = FALSE; ++ OUTPUT_remaining_buffer_size(outbuf) = OUTPUT_BUFFER_SIZE * factor; ++ OUTPUT_total_buffer_size(outbuf) = OUTPUT_BUFFER_SIZE * factor; ++ OUTPUT_tasklet_queued(outbuf) = FALSE; ++ init_waitqueue_head(&BUFFER_DESC_queue(desc)); ++ ++ SEP_DRV_LOG_TRACE_OUT("Res: %p.", desc); ++ return desc; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID output_NMI_Sample_Buffer ( ++ * ) ++ * ++ * @brief Callback from NMI tasklet. The function checks if any buffers ++ * are full, and if full, signals the reader threads. ++ * ++ * @param none ++ * ++ * @return NONE ++ * ++ * Special Notes: ++ * This callback was added to handle out-of-band event delivery ++ * when running in NMI mode ++ */ ++static void output_NMI_Sample_Buffer(unsigned long data) ++{ ++ U32 cpu_id; ++ OUTPUT outbuf; ++ ++ SEP_DRV_LOG_NOTIFICATION_IN("Data: %u.", (U32)data); ++ ++ if (data == (unsigned long)-1) { ++ cpu_id = CONTROL_THIS_CPU(); ++ } else { ++ cpu_id = data; ++ } ++ ++ if (cpu_buf) { ++ outbuf = &BUFFER_DESC_outbuf(&cpu_buf[cpu_id]); ++ if (outbuf && OUTPUT_signal_full(outbuf)) { ++ wake_up_interruptible_sync( ++ &BUFFER_DESC_queue(&cpu_buf[cpu_id])); ++ OUTPUT_signal_full(outbuf) = FALSE; ++ OUTPUT_tasklet_queued(outbuf) = FALSE; ++ } ++ } ++ ++ if (cpu_sideband_buf) { ++ outbuf = &BUFFER_DESC_outbuf(&cpu_sideband_buf[cpu_id]); ++ if (outbuf && OUTPUT_signal_full(outbuf)) { ++ wake_up_interruptible_sync( ++ &BUFFER_DESC_queue(&cpu_sideband_buf[cpu_id])); ++ OUTPUT_signal_full(outbuf) = FALSE; ++ OUTPUT_tasklet_queued(outbuf) = FALSE; ++ } ++ } ++ ++ SEP_DRV_LOG_NOTIFICATION_OUT(""); ++} ++ ++/* ++ * @fn extern void OUTPUT_Initialize(void) ++ * ++ * @returns OS_STATUS ++ * @brief Allocate, initialize, and return all output data structure ++ * ++ * Special Notes: ++ * Initialize the output structures. ++ * For each CPU in the system, allocate the output buffers. ++ * Initialize a module buffer and temp file to hold module information ++ * Initialize the read queues for each sample buffer ++ * ++ */ ++OS_STATUS OUTPUT_Initialize(void) ++{ ++ BUFFER_DESC unused; ++ S32 i; ++ OS_STATUS status = OS_SUCCESS; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ flush = 0; ++ if (saved_buffer_size != OUTPUT_BUFFER_SIZE) { ++ if (saved_buffer_size > 0) { ++ OUTPUT_Destroy(); ++ } ++ saved_buffer_size = OUTPUT_BUFFER_SIZE; ++ } ++ for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { ++ unused = output_Initialized_Buffers(&cpu_buf[i], 1); ++ if (!unused) { ++ OUTPUT_Destroy(); ++ SEP_DRV_LOG_ERROR_TRACE_OUT( ++ "OS_NO_MEM (failed to allocate cpu output buffers!)."); ++ return OS_NO_MEM; ++ } ++ } ++ ++ if (multi_pebs_enabled || sched_switch_enabled) { ++ for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { ++ unused = output_Initialized_Buffers( ++ &cpu_sideband_buf[i], 1); ++ if (!unused) { ++ OUTPUT_Destroy(); ++ SEP_DRV_LOG_ERROR_TRACE_OUT( ++ "OS_NO_MEM (failed to allocate pebs process info output buffers!)."); ++ return OS_NO_MEM; ++ } ++ } ++ } ++ ++ /* ++ * Just need one module buffer ++ */ ++ unused = output_Initialized_Buffers(module_buf, MODULE_BUFF_SIZE); ++ if (!unused) { ++ OUTPUT_Destroy(); ++ SEP_DRV_LOG_ERROR_TRACE_OUT( ++ "OS_NO_MEM (failed to create module output buffers!)."); ++ return OS_NO_MEM; ++ } ++ ++ SEP_DRV_LOG_TRACE("Set up the tasklet for NMI."); ++ for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { ++#if !defined(DRV_SEP_ACRN_ON) ++ tasklet_init(&CPU_STATE_nmi_tasklet(&pcb[i]), ++ output_NMI_Sample_Buffer, (unsigned long)-1); ++#else ++ tasklet_init(&CPU_STATE_nmi_tasklet(&pcb[i]), ++ output_NMI_Sample_Buffer, (unsigned long)i); ++#endif ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT("Res: %u.", (U32)status); ++ return status; ++} ++ ++#if defined(DRV_USE_TASKLET_WORKAROUND) ++static struct tasklet_struct dummy_tasklet; ++ ++/* ++ * @fn extern void output_tasklet_waker (PVOID ptr) ++ * ++ * @returns None ++ * @brief Schedules a dummy tasklet to wake up the tasklet handler on the current core ++ * ++ * Special Notes: ++ * Workaround for a rare situation where some tasklets are scheduled, but the core's TASKLET softirq bit was reset. ++ * [NB: this may be caused by a kernel bug; so far, this issue was only observed on kernel version 3.10.0-123.el7] ++ * Scheduling a (new) tasklet raises a new softirq, and gives 'forgotten' tasklets another chance to be processed. ++ * This workaround is not fool-proof: if this new tasklet gets 'forgotten' too, the driver will get stuck in the ++ * Clean Up routine until it gets processed (thanks to an external event raising the TASKLET softirq on this core), ++ * which might never happen. ++ * ++ */ ++static void output_tasklet_waker(PVOID ptr) ++{ ++ SEP_DRV_LOG_TRACE_IN(""); ++ tasklet_schedule(&dummy_tasklet); ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ++ * @fn extern void output_dummy_tasklet_handler (unsigned long dummy) ++ * ++ * @returns None ++ * @brief Dummy tasklet handler. ++ * ++ * Special Notes: ++ * If this gets executed, the aforementioned workaround was successful. ++ * ++ */ ++static void output_dummy_tasklet_handler(unsigned long dummy) ++{ ++ SEP_DRV_LOG_NOTIFICATION_IN("Workaround was successful!"); ++ SEP_DRV_LOG_NOTIFICATION_OUT(""); ++} ++#endif ++ ++/* ++ * @fn extern void OUTPUT_Cleanup (void) ++ * ++ * @returns None ++ * @brief Cleans up NMI tasklets if needed ++ * ++ * Special Notes: ++ * Waits until all NMI tasklets are complete. ++ * ++ */ ++void OUTPUT_Cleanup(void) ++{ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ if (!pcb) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!pcb)."); ++ return; ++ } else { ++ int i; ++ SEP_DRV_LOG_TRACE("Killing all NMI tasklets..."); ++ ++ for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { ++ SEP_DRV_LOG_TRACE("Killing NMI tasklet %d...", i); ++ ++ if (CPU_STATE_nmi_tasklet(&pcb[i]).state) { ++#if defined(DRV_USE_TASKLET_WORKAROUND) ++ SEP_DRV_LOG_ERROR( ++ "Tasklet %d is probably stuck! Trying workaround...", ++ i); ++ tasklet_init(&dummy_tasklet, ++ output_dummy_tasklet_handler, 0); ++ CONTROL_Invoke_Cpu(i, output_tasklet_waker, ++ NULL); ++ tasklet_kill(&dummy_tasklet); ++ SEP_DRV_LOG_ERROR( ++ "Workaround was successful for tasklet %d.", ++ i); ++#else ++ SEP_DRV_LOG_ERROR( ++ "Tasklet %d may be stuck. Try to set USE_TASKLET_WORKAROUND=YES in the Makefile if you observe unexpected behavior (e.g. cannot terminate a collection or initiate a new one).", ++ i); ++#endif ++ } ++ ++ tasklet_kill(&CPU_STATE_nmi_tasklet(&pcb[i])); ++ } ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ++ * @fn extern void OUTPUT_Initialize_UNC() ++ * ++ * @returns OS_STATUS ++ * @brief Allocate, initialize, and return all output data structure ++ * ++ * Special Notes: ++ * Initialize the output structures. ++ * For each CPU in the system, allocate the output buffers. ++ * Initialize a module buffer and temp file to hold module information ++ * Initialize the read queues for each sample buffer ++ * ++ */ ++OS_STATUS OUTPUT_Initialize_UNC(void) ++{ ++ BUFFER_DESC unused; ++ int i; ++ OS_STATUS status = OS_SUCCESS; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ for (i = 0; i < num_packages; i++) { ++ unused = output_Initialized_Buffers(&unc_buf[i], 1); ++ if (!unused) { ++ OUTPUT_Destroy(); ++ SEP_DRV_LOG_ERROR_TRACE_OUT( ++ "Failed to allocate package output buffers!"); ++ return OS_NO_MEM; ++ } ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT("Res: %u.", (U32)status); ++ return status; ++} ++ ++/* ++ * @fn OS_STATUS OUTPUT_Flush() ++ * ++ * @brief Flush the module buffers and sample buffers ++ * ++ * @return OS_STATUS ++ * ++ * For each CPU in the system, set buffer full to the byte count to flush. ++ * Flush the modules buffer, as well. ++ * ++ */ ++int OUTPUT_Flush(void) ++{ ++ int i; ++ int writers = 0; ++ OUTPUT outbuf; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ /* ++ * Flush all remaining data to files ++ * set up a flush event ++ */ ++ init_waitqueue_head(&flush_queue); ++ SEP_DRV_LOG_TRACE( ++ "Waiting for %d writers.", ++ (GLOBAL_STATE_num_cpus(driver_state) + OTHER_C_DEVICES)); ++ for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { ++ if (CPU_STATE_initial_mask(&pcb[i]) == 0) { ++ continue; ++ } ++ outbuf = &(cpu_buf[i].outbuf); ++ writers += 1; ++ ++ OUTPUT_buffer_full(outbuf, OUTPUT_current_buffer(outbuf)) = ++ OUTPUT_total_buffer_size(outbuf) - ++ OUTPUT_remaining_buffer_size(outbuf); ++ } ++ ++ if (unc_buf_init) { ++ for (i = 0; i < num_packages; i++) { ++ outbuf = &(unc_buf[i].outbuf); ++ writers += 1; ++ ++ OUTPUT_buffer_full(outbuf, ++ OUTPUT_current_buffer(outbuf)) = ++ OUTPUT_total_buffer_size(outbuf) - ++ OUTPUT_remaining_buffer_size(outbuf); ++ } ++ } ++ ++ if (multi_pebs_enabled || sched_switch_enabled) { ++ for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { ++ if (CPU_STATE_initial_mask(&pcb[i]) == 0) { ++ continue; ++ } ++ outbuf = &(cpu_sideband_buf[i].outbuf); ++ writers += 1; ++ ++ OUTPUT_buffer_full(outbuf, ++ OUTPUT_current_buffer(outbuf)) = ++ OUTPUT_total_buffer_size(outbuf) - ++ OUTPUT_remaining_buffer_size(outbuf); ++ } ++ } ++ ++ atomic_set(&flush_writers, writers + OTHER_C_DEVICES); ++ // Flip the switch to terminate the output threads ++ // Do not do this earlier, as threads may terminate before all the data is flushed ++ flush = 1; ++ for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { ++ if (CPU_STATE_initial_mask(&pcb[i]) == 0) { ++ continue; ++ } ++ outbuf = &BUFFER_DESC_outbuf(&cpu_buf[i]); ++ OUTPUT_buffer_full(outbuf, OUTPUT_current_buffer(outbuf)) = ++ OUTPUT_total_buffer_size(outbuf) - ++ OUTPUT_remaining_buffer_size(outbuf); ++ wake_up_interruptible_sync(&BUFFER_DESC_queue(&cpu_buf[i])); ++ } ++ ++ if (unc_buf_init) { ++ for (i = 0; i < num_packages; i++) { ++ outbuf = &BUFFER_DESC_outbuf(&unc_buf[i]); ++ OUTPUT_buffer_full(outbuf, ++ OUTPUT_current_buffer(outbuf)) = ++ OUTPUT_total_buffer_size(outbuf) - ++ OUTPUT_remaining_buffer_size(outbuf); ++ wake_up_interruptible_sync( ++ &BUFFER_DESC_queue(&unc_buf[i])); ++ } ++ } ++ ++ if (multi_pebs_enabled || sched_switch_enabled) { ++ for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { ++ if (CPU_STATE_initial_mask(&pcb[i]) == 0) { ++ continue; ++ } ++ outbuf = &BUFFER_DESC_outbuf(&cpu_sideband_buf[i]); ++ OUTPUT_buffer_full(outbuf, ++ OUTPUT_current_buffer(outbuf)) = ++ OUTPUT_total_buffer_size(outbuf) - ++ OUTPUT_remaining_buffer_size(outbuf); ++ wake_up_interruptible_sync( ++ &BUFFER_DESC_queue(&cpu_sideband_buf[i])); ++ } ++ } ++ // Flush all data from the module buffers ++ ++ outbuf = &BUFFER_DESC_outbuf(module_buf); ++ ++ OUTPUT_buffer_full(outbuf, OUTPUT_current_buffer(outbuf)) = ++ OUTPUT_total_buffer_size(outbuf) - ++ OUTPUT_remaining_buffer_size(outbuf); ++ ++ SEP_DRV_LOG_TRACE("Waking up module_queue."); ++ wake_up_interruptible_sync(&BUFFER_DESC_queue(module_buf)); ++ ++ //Wait for buffers to empty ++ while (atomic_read(&flush_writers) != 0) { ++ unsigned long delay; ++ U32 res; ++ delay = msecs_to_jiffies(1000); ++ res = wait_event_interruptible_timeout( ++ flush_queue, atomic_read(&flush_writers) == 0, delay); ++ ++ if (res == ERESTARTSYS || res == 0) { ++ SEP_DRV_LOG_TRACE( ++ "Wait_event_interruptible_timeout(flush_queue): %u, %u writers.", ++ res, atomic_read(&flush_writers)); ++ continue; ++ } ++ } ++ ++ SEP_DRV_LOG_TRACE("Awakened from flush_queue."); ++ flush = 0; ++ ++ SEP_DRV_LOG_TRACE_OUT("Res: 0."); ++ return 0; ++} ++ ++/* ++ * @fn extern void OUTPUT_Destroy() ++ * ++ * @param buffer - seed name of the output file ++ * @param len - length of the seed name ++ * @returns OS_STATUS ++ * @brief Deallocate output structures ++ * ++ * Special Notes: ++ * Free the module buffers ++ * For each CPU in the system, free the sampling buffers ++ */ ++int OUTPUT_Destroy(void) ++{ ++ int i, n; ++ OUTPUT outbuf; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ if (module_buf) { ++ outbuf = &BUFFER_DESC_outbuf(module_buf); ++ output_Free_Buffers(module_buf, ++ OUTPUT_total_buffer_size(outbuf)); ++ } ++ ++ if (cpu_buf != NULL) { ++ n = GLOBAL_STATE_num_cpus(driver_state); ++ for (i = 0; i < n; i++) { ++ outbuf = &BUFFER_DESC_outbuf(&cpu_buf[i]); ++ output_Free_Buffers(&cpu_buf[i], ++ OUTPUT_total_buffer_size(outbuf)); ++ } ++ } ++ ++ if (unc_buf != NULL) { ++ n = num_packages; ++ for (i = 0; i < n; i++) { ++ outbuf = &BUFFER_DESC_outbuf(&unc_buf[i]); ++ output_Free_Buffers(&unc_buf[i], ++ OUTPUT_total_buffer_size(outbuf)); ++ } ++ } ++ ++ if (cpu_sideband_buf != NULL) { ++ n = GLOBAL_STATE_num_cpus(driver_state); ++ for (i = 0; i < n; i++) { ++ outbuf = &BUFFER_DESC_outbuf(&cpu_sideband_buf[i]); ++ output_Free_Buffers(&cpu_sideband_buf[i], ++ OUTPUT_total_buffer_size(outbuf)); ++ } ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT("Res: 0."); ++ return 0; ++} +diff --git a/drivers/platform/x86/sepdk/sep/pci.c b/drivers/platform/x86/sepdk/sep/pci.c +new file mode 100755 +index 000000000000..12a93804975c +--- /dev/null ++++ b/drivers/platform/x86/sepdk/sep/pci.c +@@ -0,0 +1,661 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#include "lwpmudrv_defines.h" ++#include ++#include ++#include ++#include ++#include ++ ++#include "lwpmudrv_types.h" ++#include "rise_errors.h" ++#include "lwpmudrv_ecb.h" ++ ++#if defined(BUILD_CHIPSET) ++#include "lwpmudrv_chipset.h" ++#endif ++ ++#include "inc/lwpmudrv.h" ++#include "inc/pci.h" ++#include "inc/utility.h" ++ ++static struct pci_bus *pci_buses[MAX_BUSNO]; ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn extern VOID PCI_Initialize(void) ++ * ++ * @param none ++ * ++ * @return none ++ * ++ * @brief Initializes the pci_buses array. ++ * ++ */ ++VOID PCI_Initialize(void) ++{ ++ U32 i; ++ U32 num_found_buses = 0; ++ ++ SEP_DRV_LOG_INIT_IN("Initializing pci_buses..."); ++ ++ for (i = 0; i < MAX_BUSNO; i++) { ++ pci_buses[i] = pci_find_bus(0, i); ++ if (pci_buses[i]) { ++ SEP_DRV_LOG_DETECTION("Found PCI bus 0x%x at %p.", i, ++ pci_buses[i]); ++ num_found_buses++; ++ } ++ SEP_DRV_LOG_TRACE("pci_buses[%u]: %p.", i, pci_buses[i]); ++ } ++ ++ SEP_DRV_LOG_INIT_OUT("Found %u buses.", num_found_buses); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn extern U32 PCI_Read_U32(bus, device, function, offset) ++ * ++ * @param bus - target bus ++ * @param device - target device ++ * @param function - target function ++ * @param offset - target register offset ++ * ++ * @return Value at this location ++ * ++ * @brief Reads a U32 from PCI configuration space ++ * ++ */ ++U32 PCI_Read_U32(U32 bus, U32 device, U32 function, U32 offset) ++{ ++ U32 res = 0; ++ U32 devfn = (device << 3) | (function & 0x7); ++ ++ SEP_DRV_LOG_REGISTER_IN("Will read BDF(%x:%x:%x)[0x%x](4B)...", bus, ++ device, function, offset); ++ ++ if (bus < MAX_BUSNO && pci_buses[bus]) { ++ pci_buses[bus]->ops->read(pci_buses[bus], devfn, offset, 4, ++ &res); ++ } else { ++ SEP_DRV_LOG_ERROR( ++ "Could not read BDF(%x:%x:%x)[0x%x]: bus not found!", ++ bus, device, function, offset); ++ } ++ ++ SEP_DRV_LOG_REGISTER_OUT("Has read BDF(%x:%x:%x)[0x%x](4B): 0x%x.", bus, ++ device, function, offset, res); ++ return res; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn extern U32 PCI_Read_U32_Valid(bus,device,function,offset,invalid_value) ++ * ++ * @param bus - target bus ++ * @param device - target device ++ * @param function - target function ++ * @param offset - target register offset ++ * @param invalid_value - value against which to compare PCI-obtained value ++ * ++ * @return Value at this location (if value != invalid_value), 0 otherwise ++ * ++ * @brief Reads a U32 from PCI configuration space ++ * ++ */ ++U32 PCI_Read_U32_Valid(U32 bus, U32 device, U32 function, U32 offset, ++ U32 invalid_value) ++{ ++ U32 res = 0; ++ U32 devfn = (device << 3) | (function & 0x7); ++ ++ SEP_DRV_LOG_REGISTER_IN("Will read BDF(%x:%x:%x)[0x%x](4B)...", bus, ++ device, function, offset); ++ ++ if (bus < MAX_BUSNO && pci_buses[bus]) { ++ pci_buses[bus]->ops->read(pci_buses[bus], devfn, offset, 4, ++ &res); ++ if (res == invalid_value) { ++ res = 0; ++ SEP_DRV_LOG_REGISTER_OUT( ++ "Has read BDF(%x:%x:%x)[0x%x](4B): 0x%x(invalid value)", ++ bus, device, function, offset, res); ++ } else { ++ SEP_DRV_LOG_REGISTER_OUT( ++ "Has read BDF(%x:%x:%x)[0x%x](4B): 0x%x.", bus, ++ device, function, offset, res); ++ } ++ } else { ++ SEP_DRV_LOG_REGISTER_OUT( ++ "Could not read BDF(%x:%x:%x)[0x%x]: bus not found!", ++ bus, device, function, offset); ++ } ++ ++ return res; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn extern U32 PCI_Read_U64(bus, device, function, offset) ++ * ++ * @param bus - target bus ++ * @param device - target device ++ * @param function - target function ++ * @param offset - target register offset ++ * ++ * @return Value at this location ++ * ++ * @brief Reads a U64 from PCI configuration space ++ * ++ */ ++U64 PCI_Read_U64(U32 bus, U32 device, U32 function, U32 offset) ++{ ++ U64 res = 0; ++ U32 devfn = (device << 3) | (function & 0x7); ++ ++ SEP_DRV_LOG_REGISTER_IN("Will read BDF(%x:%x:%x)[0x%x](8B)...", bus, ++ device, function, offset); ++ ++ if (bus < MAX_BUSNO && pci_buses[bus]) { ++ pci_buses[bus]->ops->read(pci_buses[bus], devfn, offset, 4, ++ (U32 *)&res); ++ pci_buses[bus]->ops->read(pci_buses[bus], devfn, offset + 4, 4, ++ ((U32 *)&res) + 1); ++ } else { ++ SEP_DRV_LOG_ERROR( ++ "Could not read BDF(%x:%x:%x)[0x%x]: bus not found!", ++ bus, device, function, offset); ++ } ++ ++ SEP_DRV_LOG_REGISTER_OUT("Has read BDF(%x:%x:%x)[0x%x](8B): 0x%llx.", ++ bus, device, function, offset, res); ++ return res; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn extern U32 PCI_Read_U64_Valid(bus,device,function,offset,invalid_value) ++ * ++ * @param bus - target bus ++ * @param device - target device ++ * @param function - target function ++ * @param offset - target register offset ++ * @param invalid_value - value against which to compare PCI-obtained value ++ * ++ * @return Value at this location (if value != invalid_value), 0 otherwise ++ * ++ * @brief Reads a U64 from PCI configuration space ++ * ++ */ ++U64 PCI_Read_U64_Valid(U32 bus, U32 device, U32 function, U32 offset, ++ U64 invalid_value) ++{ ++ U64 res = 0; ++ U32 devfn = (device << 3) | (function & 0x7); ++ ++ SEP_DRV_LOG_REGISTER_IN("Will read BDF(%x:%x:%x)[0x%x](8B)...", bus, ++ device, function, offset); ++ ++ if (bus < MAX_BUSNO && pci_buses[bus]) { ++ pci_buses[bus]->ops->read(pci_buses[bus], devfn, offset, 4, ++ (U32 *)&res); ++ pci_buses[bus]->ops->read(pci_buses[bus], devfn, offset + 4, 4, ++ ((U32 *)&res) + 1); ++ ++ if (res == invalid_value) { ++ res = 0; ++ SEP_DRV_LOG_REGISTER_OUT( ++ "Has read BDF(%x:%x:%x)[0x%x](8B): 0x%llx(invalid val)", ++ bus, device, function, offset, res); ++ } else { ++ SEP_DRV_LOG_REGISTER_OUT( ++ "Has read BDF(%x:%x:%x)[0x%x](8B): 0x%llx.", ++ bus, device, function, offset, res); ++ } ++ } else { ++ SEP_DRV_LOG_REGISTER_OUT( ++ "Could not read BDF(%x:%x:%x)[0x%x]: bus not found!", ++ bus, device, function, offset); ++ } ++ ++ return res; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn extern U32 PCI_Write_U32(bus, device, function, offset, value) ++ * ++ * @param bus - target bus ++ * @param device - target device ++ * @param function - target function ++ * @param offset - target register offset ++ * @param value - value to write ++ * ++ * @return 0 in case of success, 1 otherwise ++ * ++ * @brief Writes a U32 to PCI configuration space ++ * ++ */ ++U32 PCI_Write_U32(U32 bus, U32 device, U32 function, U32 offset, ++ U32 value) ++{ ++ U32 res = 0; ++ U32 devfn = (device << 3) | (function & 0x7); ++ ++ SEP_DRV_LOG_REGISTER_IN("Will write BDF(%x:%x:%x)[0x%x](4B): 0x%x...", ++ bus, device, function, offset, value); ++ ++ if (bus < MAX_BUSNO && pci_buses[bus]) { ++ pci_buses[bus]->ops->write(pci_buses[bus], devfn, offset, 4, ++ value); ++ SEP_DRV_LOG_REGISTER_OUT( ++ "Has written BDF(%x:%x:%x)[0x%x](4B): 0x%x.", bus, ++ device, function, offset, value); ++ } else { ++ SEP_DRV_LOG_ERROR( ++ "Could not write BDF(%x:%x:%x)[0x%x]: bus not found!", ++ bus, device, function, offset); ++ res = 1; ++ SEP_DRV_LOG_REGISTER_OUT( ++ "Failed to write BDF(%x:%x:%x)[0x%x](4B): 0x%x.", bus, ++ device, function, offset, value); ++ } ++ ++ return res; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn extern U32 PCI_Write_U64(bus, device, function, offset, value) ++ * ++ * @param bus - target bus ++ * @param device - target device ++ * @param function - target function ++ * @param offset - target register offset ++ * @param value - value to write ++ * ++ * @return 0 in case of success, 1 otherwise ++ * ++ * @brief Writes a U64 to PCI configuration space ++ * ++ */ ++U32 PCI_Write_U64(U32 bus, U32 device, U32 function, U32 offset, ++ U64 value) ++{ ++ U32 res = 0; ++ U32 devfn = (device << 3) | (function & 0x7); ++ ++ SEP_DRV_LOG_REGISTER_IN("Will write BDF(%x:%x:%x)[0x%x](8B): 0x%llx...", ++ bus, device, function, offset, value); ++ ++ if (bus < MAX_BUSNO && pci_buses[bus]) { ++ pci_buses[bus]->ops->write(pci_buses[bus], devfn, offset, 4, ++ (U32)value); ++ pci_buses[bus]->ops->write(pci_buses[bus], devfn, offset + 4, 4, ++ (U32)(value >> 32)); ++ SEP_DRV_LOG_REGISTER_OUT( ++ "Has written BDF(%x:%x:%x)[0x%x](8B): 0x%llx.", bus, ++ device, function, offset, value); ++ } else { ++ SEP_DRV_LOG_ERROR( ++ "Could not write BDF(%x:%x:%x)[0x%x]: bus not found!", ++ bus, device, function, offset); ++ res = 1; ++ SEP_DRV_LOG_REGISTER_OUT( ++ "Failed to write BDF(%x:%x:%x)[0x%x](8B): 0x%llx.", bus, ++ device, function, offset, value); ++ } ++ ++ return res; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn extern int PCI_Read_From_Memory_Address(addr, val) ++ * ++ * @param addr - physical address in mmio ++ * @param *value - value at this address ++ * ++ * @return status ++ * ++ * @brief Read memory mapped i/o physical location ++ * ++ */ ++int PCI_Read_From_Memory_Address(U32 addr, U32 *val) ++{ ++ U32 aligned_addr, offset, value; ++ PVOID base; ++ ++ SEP_DRV_LOG_TRACE_IN("Addr: %x, val_pointer: %p.", addr, val); ++ ++ if (addr <= 0) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("OS_INVALID (addr <= 0!)."); ++ return OS_INVALID; ++ } ++ ++ SEP_DRV_LOG_TRACE("Preparing to reading physical address: %x.", addr); ++ offset = addr & ~PAGE_MASK; ++ aligned_addr = addr & PAGE_MASK; ++ SEP_DRV_LOG_TRACE("Aligned physical address: %x, offset: %x.", ++ aligned_addr, offset); ++ ++ base = (PVOID)ioremap_nocache(aligned_addr, PAGE_SIZE); ++ if (base == NULL) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("OS_INVALID (mapping failed!)."); ++ return OS_INVALID; ++ } ++ ++ SEP_DRV_LOG_REGISTER_IN("Will read PCI address %u (mapped at %p).", ++ addr, base + offset); ++ value = readl((void __iomem *)(base + offset)); ++ SEP_DRV_LOG_REGISTER_OUT("Read PCI address %u (mapped at %p): %x.", ++ addr, base + offset, value); ++ ++ *val = value; ++ ++ iounmap((void __iomem *)base); ++ ++ SEP_DRV_LOG_TRACE_OUT("OS_SUCCESS."); ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn extern int PCI_Write_To_Memory_Address(addr, val) ++ * ++ * @param addr - physical address in mmio ++ * @param value - value to be written ++ * ++ * @return status ++ * ++ * @brief Write to memory mapped i/o physical location ++ * ++ */ ++int PCI_Write_To_Memory_Address(U32 addr, U32 val) ++{ ++ U32 aligned_addr, offset; ++ PVOID base; ++ ++ SEP_DRV_LOG_TRACE_IN("Addr: %x, val: %x.", addr, val); ++ ++ if (addr <= 0) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("OS_INVALID (addr <= 0!)."); ++ return OS_INVALID; ++ } ++ ++ SEP_DRV_LOG_TRACE( ++ "Preparing to writing physical address: %x (val: %x).", addr, ++ val); ++ offset = addr & ~PAGE_MASK; ++ aligned_addr = addr & PAGE_MASK; ++ SEP_DRV_LOG_TRACE("Aligned physical address: %x, offset: %x (val: %x).", ++ aligned_addr, offset, val); ++ ++ base = (PVOID)ioremap_nocache(aligned_addr, PAGE_SIZE); ++ if (base == NULL) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("OS_INVALID (mapping failed!)."); ++ return OS_INVALID; ++ } ++ ++ SEP_DRV_LOG_REGISTER_IN("Will write PCI address %u (mapped at %p): %x.", ++ addr, base + offset, val); ++ writel(val, (void __iomem *)(base + offset)); ++ SEP_DRV_LOG_REGISTER_OUT("Wrote PCI address %u (mapped at %p): %x.", ++ addr, base + offset, val); ++ ++ iounmap((void __iomem *)base); ++ ++ SEP_DRV_LOG_TRACE_OUT("OS_SUCCESS."); ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn extern U32 PCI_Map_Memory(SEP_MMIO_NODE *node, U64 phy_address, ++ * U64 map_size) ++ * ++ * @param node - MAP NODE to use ++ * @param phy_address - Address to be mapped ++ * @param map_size - Amount of memory to map (has to be a multiple of 4k) ++ * ++ * @return OS_SUCCESS or OS_INVALID ++ * ++ * @brief Maps a physical address to a virtual address ++ * ++ */ ++OS_STATUS PCI_Map_Memory(SEP_MMIO_NODE *node, U64 phy_address, ++ U32 map_size) ++{ ++ U8 *res; ++ ++ SEP_DRV_LOG_INIT_IN("Node: %p, phy_address: %llx, map_size: %u.", node, ++ phy_address, map_size); ++ ++ if (!node || !phy_address || !map_size || (phy_address & 4095)) { ++ SEP_DRV_LOG_ERROR_INIT_OUT("Invalid parameters, aborting!"); ++ return OS_INVALID; ++ } ++ ++ res = (U8 *)ioremap_nocache(phy_address, map_size); ++ if (!res) { ++ SEP_DRV_LOG_ERROR_INIT_OUT("Map operation failed!"); ++ return OS_INVALID; ++ } ++ ++ SEP_MMIO_NODE_physical_address(node) = (UIOP)phy_address; ++ SEP_MMIO_NODE_virtual_address(node) = (UIOP)res; ++ SEP_MMIO_NODE_map_token(node) = (UIOP)res; ++ SEP_MMIO_NODE_size(node) = map_size; ++ ++ SEP_DRV_LOG_INIT_OUT("Addr:0x%llx->0x%llx, tok:0x%llx, sz:%u.", ++ SEP_MMIO_NODE_physical_address(node), ++ SEP_MMIO_NODE_virtual_address(node), ++ SEP_MMIO_NODE_map_token(node), ++ SEP_MMIO_NODE_size(node)); ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn extern void PCI_Unmap_Memory(SEP_MMIO_NODE *node) ++ * ++ * @param node - memory map node to clean up ++ * ++ * @return none ++ * ++ * @brief Unmaps previously mapped memory ++ * ++ */ ++void PCI_Unmap_Memory(SEP_MMIO_NODE *node) ++{ ++ SEP_DRV_LOG_INIT_IN("Unmapping node %p.", node); ++ ++ if (node) { ++ if (SEP_MMIO_NODE_size(node)) { ++ SEP_DRV_LOG_TRACE( ++ "Unmapping token 0x%llx (0x%llx->0x%llx)[%uB].", ++ SEP_MMIO_NODE_map_token(node), ++ SEP_MMIO_NODE_physical_address(node), ++ SEP_MMIO_NODE_virtual_address(node), ++ SEP_MMIO_NODE_size(node)); ++ iounmap((void __iomem *)(UIOP)SEP_MMIO_NODE_map_token(node)); ++ SEP_MMIO_NODE_size(node) = 0; ++ SEP_MMIO_NODE_map_token(node) = 0; ++ SEP_MMIO_NODE_virtual_address(node) = 0; ++ SEP_MMIO_NODE_physical_address(node) = 0; ++ } else { ++ SEP_DRV_LOG_TRACE("Already unmapped."); ++ } ++ } ++ ++ SEP_DRV_LOG_INIT_OUT("Unmapped node %p.", node); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn U32 PCI_MMIO_Read_U32(virtual_address_base, offset) ++ * ++ * @param virtual_address_base - Virtual address base ++ * @param offset - Register offset ++ * ++ * @return U32 read from an MMIO register ++ * ++ * @brief Reads U32 value from MMIO ++ * ++ */ ++U32 PCI_MMIO_Read_U32(U64 virtual_address_base, U32 offset) ++{ ++ U32 temp_u32 = 0LL; ++ U32 *computed_address; ++ ++ computed_address = ++ (U32 *)(((char *)(UIOP)virtual_address_base) + offset); ++ ++ SEP_DRV_LOG_REGISTER_IN("Will read U32(0x%llx + 0x%x = 0x%p).", ++ virtual_address_base, offset, computed_address); ++ ++ if (!virtual_address_base) { ++ SEP_DRV_LOG_ERROR("Invalid base for U32(0x%llx + 0x%x = 0x%p)!", ++ virtual_address_base, offset, ++ computed_address); ++ temp_u32 = 0; ++ } else { ++ temp_u32 = *computed_address; ++ } ++ ++ SEP_DRV_LOG_REGISTER_OUT("Has read U32(0x%llx + 0x%x): 0x%x.", ++ virtual_address_base, offset, temp_u32); ++ return temp_u32; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn U64 PCI_MMIO_Read_U64(virtual_address_base, offset) ++ * ++ * @param virtual_address_base - Virtual address base ++ * @param offset - Register offset ++ * ++ * @return U64 read from an MMIO register ++ * ++ * @brief Reads U64 value from MMIO ++ * ++ */ ++U64 PCI_MMIO_Read_U64(U64 virtual_address_base, U32 offset) ++{ ++ U64 temp_u64 = 0LL; ++ U64 *computed_address; ++ ++ computed_address = ++ (U64 *)(((char *)(UIOP)virtual_address_base) + offset); ++ ++ SEP_DRV_LOG_REGISTER_IN("Will read U64(0x%llx + 0x%x = 0x%p).", ++ virtual_address_base, offset, computed_address); ++ ++ if (!virtual_address_base) { ++ SEP_DRV_LOG_ERROR("Invalid base for U32(0x%llx + 0x%x = 0x%p)!", ++ virtual_address_base, offset, ++ computed_address); ++ temp_u64 = 0; ++ } else { ++ temp_u64 = *computed_address; ++ } ++ ++ SEP_DRV_LOG_REGISTER_OUT("Has read U64(0x%llx + 0x%x): 0x%llx.", ++ virtual_address_base, offset, temp_u64); ++ return temp_u64; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn void PCI_MMIO_Write_U32(virtual_address_base, offset, value) ++ * ++ * @param virtual_address_base - Virtual address base ++ * @param offset - Register offset ++ * @param value - Value to write ++ * ++ * @return U32 write into an MMIO register ++ * ++ * @brief Writes U32 value to MMIO ++ * ++ */ ++void PCI_MMIO_Write_U32(U64 virtual_address_base, U32 offset, U32 value) ++{ ++ U32 *computed_address; ++ ++ computed_address = ++ (U32 *)(((char *)(UIOP)virtual_address_base) + offset); ++ ++ SEP_DRV_LOG_REGISTER_IN("Writing 0x%x to U32(0x%llx + 0x%x = 0x%p).", ++ value, virtual_address_base, offset, ++ computed_address); ++ ++ if (!virtual_address_base) { ++ SEP_DRV_LOG_ERROR("Invalid base for U32(0x%llx + 0x%x = 0x%p)!", ++ virtual_address_base, offset, ++ computed_address); ++ } else { ++ *computed_address = value; ++ } ++ ++ SEP_DRV_LOG_REGISTER_OUT("Has written 0x%x to U32(0x%llx + 0x%x).", ++ value, virtual_address_base, offset); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn void PCI_MMIO_Write_U64(virtual_address_base, offset, value) ++ * ++ * @param virtual_address_base - Virtual address base ++ * @param offset - Register offset ++ * @param value - Value to write ++ * ++ * @return U64 write into an MMIO register ++ * ++ * @brief Writes U64 value to MMIO ++ * ++ */ ++void PCI_MMIO_Write_U64(U64 virtual_address_base, U32 offset, U64 value) ++{ ++ U64 *computed_address; ++ ++ computed_address = ++ (U64 *)(((char *)(UIOP)virtual_address_base) + offset); ++ ++ SEP_DRV_LOG_REGISTER_IN("Writing 0x%llx to U64(0x%llx + 0x%x = 0x%p).", ++ value, virtual_address_base, offset, ++ computed_address); ++ ++ if (!virtual_address_base) { ++ SEP_DRV_LOG_ERROR("Invalid base for U32(0x%llx + 0x%x = 0x%p)!", ++ virtual_address_base, offset, ++ computed_address); ++ } else { ++ *computed_address = value; ++ } ++ ++ SEP_DRV_LOG_REGISTER_OUT("Has written 0x%llx to U64(0x%llx + 0x%x).", ++ value, virtual_address_base, offset); ++} +diff --git a/drivers/platform/x86/sepdk/sep/pebs.c b/drivers/platform/x86/sepdk/sep/pebs.c +new file mode 100755 +index 000000000000..7537c1136ec4 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/sep/pebs.c +@@ -0,0 +1,1957 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#include "lwpmudrv_defines.h" ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "lwpmudrv_types.h" ++#include "lwpmudrv_ecb.h" ++#include "lwpmudrv_struct.h" ++#include "lwpmudrv.h" ++#include "control.h" ++#include "core2.h" ++#include "utility.h" ++#include "output.h" ++#include "ecb_iterators.h" ++#include "pebs.h" ++ ++#if defined(DRV_USE_KAISER) ++#include ++#include ++int (*local_kaiser_add_mapping)(unsigned long, unsigned long, ++ unsigned long) = NULL; ++void (*local_kaiser_remove_mapping)(unsigned long, unsigned long) = NULL; ++#elif defined(DRV_USE_PTI) ++#include ++#include ++#include ++#include ++#include ++static void (*local_cea_set_pte)(void *cea_vaddr, phys_addr_t pa, ++ pgprot_t flags) = NULL; ++static void (*local_do_kernel_range_flush)(void *info) = NULL; ++static DEFINE_PER_CPU(PVOID, dts_buffer_cea); ++#endif ++ ++static PVOID pebs_global_memory; ++static size_t pebs_global_memory_size; ++ ++extern DRV_CONFIG drv_cfg; ++extern DRV_SETUP_INFO_NODE req_drv_setup_info; ++ ++#if defined(DRV_USE_PTI) ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID pebs_Update_CEA (S32) ++ * ++ * @brief Flush the TLB entries related to PEBS buffer in cpu entry area ++ * ++ * @param this_cpu current cpu ++ * ++ * @return NONE ++ * ++ * Special Notes: ++ */ ++static VOID pebs_Update_CEA(S32 this_cpu) ++{ ++ unsigned long cea_start_addr; ++ unsigned long cea_end_addr; ++ ++ SEP_DRV_LOG_TRACE_IN("This_cpu: %d.", this_cpu); ++ ++ if (per_cpu(dts_buffer_cea, this_cpu) != 0) { ++ cea_start_addr = ++ (unsigned long)per_cpu(dts_buffer_cea, this_cpu); ++ cea_end_addr = cea_start_addr + ++ (unsigned long)CPU_STATE_dts_buffer_size( ++ &pcb[this_cpu]); ++ if (local_do_kernel_range_flush) { ++ struct flush_tlb_info info; ++ info.start = cea_start_addr; ++ info.end = cea_end_addr; ++ local_do_kernel_range_flush(&info); ++ } ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++#endif ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID pebs_Corei7_Initialize_Threshold ++ * (dts, LWPMU_DEVICE_pebs_record_size(&devices[dev_idx])) ++ * ++ * @brief The nehalem specific initialization ++ * ++ * @param dts - dts description ++ * ++ * @return NONE ++ * ++ * Special Notes: ++ */ ++static VOID pebs_Corei7_Initialize_Threshold(DTS_BUFFER_EXT dts) ++{ ++ U32 this_cpu; ++ U32 dev_idx; ++ DEV_CONFIG pcfg; ++ ++ SEP_DRV_LOG_TRACE_IN("Dts: %p.", dts); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ dev_idx = core_to_dev_map[this_cpu]; ++ pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ ++ DTS_BUFFER_EXT_pebs_threshold(dts) = ++ DTS_BUFFER_EXT_pebs_base(dts) + ++ (LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]) * ++ (U64)DEV_CONFIG_pebs_record_num(pcfg)); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID pebs_Corei7_Overflow () ++ * ++ * @brief The Nehalem specific overflow check ++ * ++ * @param this_cpu - cpu id ++ * overflow_status - overflow status ++ * rec_index - record index ++ * ++ * @return NONE ++ * ++ * Special Notes: ++ * Check the global overflow field of the buffer descriptor. ++ * Precise events can be allocated on any of the 4 general purpose ++ * registers. ++ */ ++static U64 pebs_Corei7_Overflow(S32 this_cpu, U64 overflow_status, ++ U32 rec_index) ++{ ++ DTS_BUFFER_EXT dtes; ++ S8 *pebs_base, *pebs_index, *pebs_ptr; ++ PEBS_REC_EXT pb; ++ U8 pebs_ptr_check = FALSE; ++ U32 dev_idx = core_to_dev_map[this_cpu]; ++ ++ SEP_DRV_LOG_TRACE_IN( ++ "This_cpu: %d, overflow_status: %llx, rec_index: %u.", this_cpu, ++ overflow_status, rec_index); ++ ++ dtes = CPU_STATE_dts_buffer(&pcb[this_cpu]); ++ ++ SEP_DRV_LOG_TRACE("This_cpu: %d, dtes %p.", this_cpu, dtes); ++ ++ if (!dtes) { ++ return overflow_status; ++ } ++ pebs_base = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_base(dtes); ++ SEP_DRV_LOG_TRACE("This_cpu: %d, pebs_base %p.", this_cpu, pebs_base); ++ pebs_index = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_index(dtes); ++ pebs_ptr = (S8 *)((UIOP)DTS_BUFFER_EXT_pebs_base(dtes) + ++ ((UIOP)rec_index * ++ LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]))); ++ pebs_ptr_check = ++ (pebs_ptr && pebs_base != pebs_index && pebs_ptr < pebs_index); ++ if (pebs_ptr_check) { ++ pb = (PEBS_REC_EXT)pebs_ptr; ++ overflow_status |= PEBS_REC_EXT_glob_perf_overflow(pb); ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT("Res: %llx.", overflow_status); ++ return overflow_status; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID pebs_Corei7_Overflow_APEBS () ++ * ++ * @brief Overflow check ++ * ++ * @param this_cpu - cpu id ++ * overflow_status - overflow status ++ * rec_index - record index ++ * ++ * @return NONE ++ * ++ * Special Notes: ++ * Check the global overflow field of the buffer descriptor. ++ * Precise events can be allocated on any of the 8 general purpose ++ * registers or 4 fixed registers. ++ */ ++static U64 pebs_Corei7_Overflow_APEBS(S32 this_cpu, U64 overflow_status, ++ U32 rec_index) ++{ ++ S8 *pebs_base, *pebs_index, *pebs_ptr; ++ ADAPTIVE_PEBS_BASIC_INFO pb; ++ DTS_BUFFER_EXT1 dtes = CPU_STATE_dts_buffer(&pcb[this_cpu]); ++ U8 pebs_ptr_check = FALSE; ++ U32 dev_idx = core_to_dev_map[this_cpu]; ++ DEV_CONFIG pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ ++ if (!dtes) { ++ return overflow_status; ++ } ++ pebs_base = (S8 *)(UIOP)DTS_BUFFER_EXT1_pebs_base(dtes); ++ pebs_index = (S8 *)(UIOP)DTS_BUFFER_EXT1_pebs_index(dtes); ++ pebs_ptr = (S8 *)((UIOP)DTS_BUFFER_EXT1_pebs_base(dtes) + ++ ((UIOP)rec_index * ++ LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]))); ++ pebs_ptr_check = ++ (pebs_ptr && pebs_base != pebs_index && pebs_ptr < pebs_index); ++ ++ if (pebs_ptr_check && DEV_CONFIG_enable_adaptive_pebs(pcfg)) { ++ pb = (ADAPTIVE_PEBS_BASIC_INFO)pebs_ptr; ++ overflow_status |= ++ ADAPTIVE_PEBS_BASIC_INFO_applicable_counters(pb); ++ } ++ ++ return overflow_status; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID pebs_Core2_Initialize_Threshold ++ * (dts, LWPMU_DEVICE_pebs_record_size(&devices[dev_idx])) ++ * ++ * @brief The Core2 specific initialization ++ * ++ * @param dts - dts description ++ * ++ * @return NONE ++ * ++ * Special Notes: ++ */ ++static VOID pebs_Core2_Initialize_Threshold(DTS_BUFFER_EXT dts) ++{ ++ SEP_DRV_LOG_TRACE_IN("Dts: %p.", dts); ++ ++ DTS_BUFFER_EXT_pebs_threshold(dts) = DTS_BUFFER_EXT_pebs_base(dts); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID pebs_Core2_Overflow ++ * (dts, LWPMU_DEVICE_pebs_record_size(&devices[dev_idx])) ++ * ++ * @brief The Core2 specific overflow check ++ * ++ * @param this_cpu - cpu id ++ * overflow_status - overflow status ++ * rec_index - record index ++ * ++ * @return NONE ++ * ++ * Special Notes: ++ * Check the base and the index fields of the circular buffer, if they are ++ * not the same, then a precise event has overflowed. Precise events are ++ * allocated only on register#0. ++ */ ++static U64 pebs_Core2_Overflow(S32 this_cpu, U64 overflow_status, U32 rec_index) ++{ ++ DTS_BUFFER_EXT dtes; ++ U8 status = FALSE; ++ ++ SEP_DRV_LOG_TRACE_IN( ++ "This_cpu: %d, overflow_status: %llx, rec_index: %u.", this_cpu, ++ overflow_status, rec_index); ++ ++ dtes = CPU_STATE_dts_buffer(&pcb[this_cpu]); ++ ++ if (!dtes) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("Res: %llx (dtes is NULL!).", ++ overflow_status); ++ return overflow_status; ++ } ++ status = (U8)((dtes) && (DTS_BUFFER_EXT_pebs_index(dtes) != ++ DTS_BUFFER_EXT_pebs_base(dtes))); ++ if (status) { ++ // Merom allows only for GP register 0 to be precise capable ++ overflow_status |= 0x1; ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT("Res: %llx.", overflow_status); ++ return overflow_status; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID pebs_Modify_IP (sample, is_64bit_addr) ++ * ++ * @brief Change the IP field in the sample to that in the PEBS record ++ * ++ * @param sample - sample buffer ++ * @param is_64bit_addr - are we in a 64 bit module ++ * ++ * @return NONE ++ * ++ * Special Notes: ++ * ++ */ ++static VOID pebs_Modify_IP(void *sample, DRV_BOOL is_64bit_addr, U32 rec_index) ++{ ++ SampleRecordPC *psamp = sample; ++ DTS_BUFFER_EXT dtes; ++ S8 *pebs_base, *pebs_index, *pebs_ptr; ++ PEBS_REC_EXT pb; ++ U8 pebs_ptr_check = FALSE; ++ U32 this_cpu; ++ U32 dev_idx; ++ ++ SEP_DRV_LOG_TRACE_IN("Sample: %p, is_64bit_addr: %u, rec_index: %u.", ++ sample, is_64bit_addr, rec_index); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ dev_idx = core_to_dev_map[this_cpu]; ++ dtes = CPU_STATE_dts_buffer(&pcb[this_cpu]); ++ ++ if (!dtes || !psamp) { ++ return; ++ } ++ SEP_DRV_LOG_TRACE("In PEBS Fill Buffer: cpu %d.", CONTROL_THIS_CPU()); ++ pebs_base = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_base(dtes); ++ pebs_index = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_index(dtes); ++ pebs_ptr = (S8 *)((UIOP)DTS_BUFFER_EXT_pebs_base(dtes) + ++ ((UIOP)rec_index * ++ LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]))); ++ pebs_ptr_check = ++ (pebs_ptr && pebs_base != pebs_index && pebs_ptr < pebs_index); ++ if (pebs_ptr_check) { ++ pb = (PEBS_REC_EXT)pebs_ptr; ++ if (is_64bit_addr) { ++ SAMPLE_RECORD_iip(psamp) = PEBS_REC_EXT_linear_ip(pb); ++ SAMPLE_RECORD_ipsr(psamp) = PEBS_REC_EXT_r_flags(pb); ++ } else { ++ SAMPLE_RECORD_eip(psamp) = ++ PEBS_REC_EXT_linear_ip(pb) & 0xFFFFFFFF; ++ SAMPLE_RECORD_eflags(psamp) = ++ PEBS_REC_EXT_r_flags(pb) & 0xFFFFFFFF; ++ } ++ } ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID pebs_Modify_IP_With_Eventing_IP (sample, is_64bit_addr) ++ * ++ * @brief Change the IP field in the sample to that in the PEBS record ++ * ++ * @param sample - sample buffer ++ * @param is_64bit_addr - are we in a 64 bit module ++ * ++ * @return NONE ++ * ++ * Special Notes: ++ * ++ */ ++static VOID pebs_Modify_IP_With_Eventing_IP(void *sample, ++ DRV_BOOL is_64bit_addr, ++ U32 rec_index) ++{ ++ SampleRecordPC *psamp = sample; ++ DTS_BUFFER_EXT dtes; ++ S8 *pebs_ptr, *pebs_base, *pebs_index; ++ U64 ip = 0, flags = 0; ++ U8 pebs_ptr_check = FALSE; ++ U32 this_cpu; ++ U32 dev_idx; ++ DEV_CONFIG pcfg; ++ ++ SEP_DRV_LOG_TRACE_IN("Sample: %p, is_64bit_addr: %u, rec_index: %u.", ++ sample, is_64bit_addr, rec_index); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ dev_idx = core_to_dev_map[this_cpu]; ++ pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ dtes = CPU_STATE_dts_buffer(&pcb[this_cpu]); ++ ++ if (!dtes || !psamp) { ++ return; ++ } ++ ++ pebs_base = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_base(dtes); ++ pebs_index = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_index(dtes); ++ pebs_ptr = (S8 *)((UIOP)DTS_BUFFER_EXT_pebs_base(dtes) + ++ ((UIOP)rec_index * ++ LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]))); ++ pebs_ptr_check = ++ (pebs_ptr && pebs_base != pebs_index && pebs_ptr < pebs_index); ++ ++ if (!pebs_ptr_check) { ++ return; ++ } ++ if (DEV_CONFIG_enable_adaptive_pebs(pcfg)) { ++ ip = ADAPTIVE_PEBS_BASIC_INFO_eventing_ip( ++ (ADAPTIVE_PEBS_BASIC_INFO)pebs_ptr); ++ if (DEV_CONFIG_apebs_collect_gpr(pcfg)) { ++ flags = ADAPTIVE_PEBS_GPR_INFO_rflags(( ++ ADAPTIVE_PEBS_GPR_INFO)( ++ pebs_ptr + LWPMU_DEVICE_apebs_gpr_offset( ++ &devices[dev_idx]))); ++ } ++ } else { ++ ip = PEBS_REC_EXT1_eventing_ip((PEBS_REC_EXT1)pebs_ptr); ++ flags = PEBS_REC_EXT1_r_flags((PEBS_REC_EXT1)pebs_ptr); ++ } ++ if (is_64bit_addr) { ++ SAMPLE_RECORD_iip(psamp) = ip; ++ SAMPLE_RECORD_ipsr(psamp) = flags; ++ } else { ++ SAMPLE_RECORD_eip(psamp) = ip & 0xFFFFFFFF; ++ SAMPLE_RECORD_eflags(psamp) = flags & 0xFFFFFFFF; ++ } ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID pebs_Modify_TSC (sample) ++ * ++ * @brief Change the TSC field in the sample to that in the PEBS record ++ * ++ * @param sample - sample buffer ++ * rec_index - record index ++ * @return NONE ++ * ++ * Special Notes: ++ * ++ */ ++static VOID pebs_Modify_TSC(void *sample, U32 rec_index) ++{ ++ SampleRecordPC *psamp = sample; ++ DTS_BUFFER_EXT dtes; ++ S8 *pebs_base, *pebs_index, *pebs_ptr; ++ U64 tsc; ++ U8 pebs_ptr_check = FALSE; ++ U32 this_cpu; ++ U32 dev_idx; ++ DEV_CONFIG pcfg; ++ ++ SEP_DRV_LOG_TRACE_IN("Sample: %p, rec_index: %u.", sample, rec_index); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ dev_idx = core_to_dev_map[this_cpu]; ++ pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ dtes = CPU_STATE_dts_buffer(&pcb[this_cpu]); ++ ++ if (!dtes || !psamp) { ++ return; ++ } ++ pebs_base = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_base(dtes); ++ pebs_index = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_index(dtes); ++ pebs_ptr = (S8 *)((UIOP)DTS_BUFFER_EXT_pebs_base(dtes) + ++ ((UIOP)rec_index * ++ LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]))); ++ pebs_ptr_check = ++ (pebs_ptr && pebs_base != pebs_index && pebs_ptr < pebs_index); ++ if (!pebs_ptr_check) { ++ return; ++ } ++ ++ if (DEV_CONFIG_enable_adaptive_pebs(pcfg)) { ++ tsc = ADAPTIVE_PEBS_BASIC_INFO_tsc( ++ (ADAPTIVE_PEBS_BASIC_INFO)pebs_ptr); ++ } else { ++ tsc = PEBS_REC_EXT2_tsc((PEBS_REC_EXT2)pebs_ptr); ++ } ++ SAMPLE_RECORD_tsc(psamp) = tsc; ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn U32 pebs_Get_Num_Records_Filled () ++ * ++ * @brief get number of PEBS records filled in PEBS buffer ++ * ++ * @param NONE ++ * ++ * @return NONE ++ * ++ * Special Notes: ++ * ++ */ ++static U32 pebs_Get_Num_Records_Filled(VOID) ++{ ++ U32 num = 0; ++ DTS_BUFFER_EXT dtes; ++ S8 *pebs_base, *pebs_index; ++ U32 this_cpu; ++ U32 dev_idx; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ dev_idx = core_to_dev_map[this_cpu]; ++ dtes = CPU_STATE_dts_buffer(&pcb[this_cpu]); ++ ++ if (!dtes) { ++ return num; ++ } ++ pebs_base = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_base(dtes); ++ pebs_index = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_index(dtes); ++ if (pebs_base != pebs_index) { ++ num = (U32)(pebs_index - pebs_base) / ++ LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]); ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT("Res: %u.", num); ++ return num; ++} ++ ++/* ++ * Initialize the pebs micro dispatch tables ++ */ ++PEBS_DISPATCH_NODE core2_pebs = { ++ .initialize_threshold = pebs_Core2_Initialize_Threshold, ++ .overflow = pebs_Core2_Overflow, ++ .modify_ip = pebs_Modify_IP, ++ .modify_tsc = NULL, ++ .get_num_records_filled = pebs_Get_Num_Records_Filled ++}; ++ ++PEBS_DISPATCH_NODE core2p_pebs = { ++ .initialize_threshold = pebs_Corei7_Initialize_Threshold, ++ .overflow = pebs_Core2_Overflow, ++ .modify_ip = pebs_Modify_IP, ++ .modify_tsc = NULL, ++ .get_num_records_filled = pebs_Get_Num_Records_Filled ++}; ++ ++PEBS_DISPATCH_NODE corei7_pebs = { ++ .initialize_threshold = pebs_Corei7_Initialize_Threshold, ++ .overflow = pebs_Corei7_Overflow, ++ .modify_ip = pebs_Modify_IP, ++ .modify_tsc = NULL, ++ .get_num_records_filled = pebs_Get_Num_Records_Filled ++}; ++ ++PEBS_DISPATCH_NODE haswell_pebs = { ++ .initialize_threshold = pebs_Corei7_Initialize_Threshold, ++ .overflow = pebs_Corei7_Overflow, ++ .modify_ip = pebs_Modify_IP_With_Eventing_IP, ++ .modify_tsc = NULL, ++ .get_num_records_filled = pebs_Get_Num_Records_Filled ++}; ++ ++PEBS_DISPATCH_NODE perfver4_pebs = { ++ .initialize_threshold = pebs_Corei7_Initialize_Threshold, ++ .overflow = pebs_Corei7_Overflow, ++ .modify_ip = pebs_Modify_IP_With_Eventing_IP, ++ .modify_tsc = pebs_Modify_TSC, ++ .get_num_records_filled = pebs_Get_Num_Records_Filled ++}; ++ ++// adaptive PEBS ++PEBS_DISPATCH_NODE perfver4_apebs = { ++ .initialize_threshold = pebs_Corei7_Initialize_Threshold, ++ .overflow = pebs_Corei7_Overflow_APEBS, ++ .modify_ip = pebs_Modify_IP_With_Eventing_IP, ++ .modify_tsc = pebs_Modify_TSC, ++ .get_num_records_filled = pebs_Get_Num_Records_Filled ++}; ++ ++#define PER_CORE_BUFFER_SIZE(dts_size, record_size, record_num) \ ++ (dts_size + (record_num + 1) * (record_size) + 64) ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID* pebs_Alloc_DTS_Buffer (VOID) ++ * ++ * @brief Allocate buffers used for latency and pebs sampling ++ * ++ * @param NONE ++ * ++ * @return NONE ++ * ++ * Special Notes: ++ * Allocate the memory needed to hold the DTS and PEBS records buffer. ++ * This routine is called by a thread that corresponds to a single core ++ */ ++static VOID *pebs_Alloc_DTS_Buffer(VOID) ++{ ++ UIOP pebs_base; ++ U32 dts_size; ++ PVOID dts_buffer = NULL; ++ DTS_BUFFER_EXT dts; ++ int this_cpu; ++ CPU_STATE pcpu; ++ U32 dev_idx; ++ DEV_CONFIG pcfg; ++ PEBS_DISPATCH pebs_dispatch; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ /* ++ * one PEBS record... need 2 records so that ++ * threshold can be less than absolute max ++ */ ++ preempt_disable(); ++ this_cpu = CONTROL_THIS_CPU(); ++ preempt_enable(); ++ dts_size = sizeof(DTS_BUFFER_EXT_NODE); ++ pcpu = &pcb[this_cpu]; ++ dev_idx = core_to_dev_map[this_cpu]; ++ pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ pebs_dispatch = LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]); ++ ++ if (DEV_CONFIG_enable_adaptive_pebs(pcfg) || ++ DEV_CONFIG_collect_fixed_counter_pebs(pcfg)) { ++ dts_size = sizeof(DTS_BUFFER_EXT1_NODE); ++ } ++ ++ /* ++ * account for extra bytes to align PEBS base to cache line boundary ++ */ ++ if (DRV_SETUP_INFO_page_table_isolation(&req_drv_setup_info) == ++ DRV_SETUP_INFO_PTI_KPTI) { ++#if defined(DRV_USE_PTI) && defined(CONFIG_CPU_SUP_INTEL) ++ struct page *page; ++ U32 buffer_size; ++ ++ SEP_DRV_LOG_INIT("Allocating PEBS buffer using KPTI approach."); ++ buffer_size = (PER_CORE_BUFFER_SIZE( ++ dts_size, ++ LWPMU_DEVICE_pebs_record_size( ++ &devices[dev_idx]), ++ DEV_CONFIG_pebs_record_num(pcfg)) / ++ PAGE_SIZE + ++ 1) * ++ PAGE_SIZE; ++ if (buffer_size > PEBS_BUFFER_SIZE) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT( ++ "Can't allocate more buffer than CEA allows!"); ++ return NULL; ++ } ++ ++ page = __alloc_pages_node(cpu_to_node(this_cpu), ++ GFP_ATOMIC | __GFP_ZERO, ++ get_order(buffer_size)); ++ if (!page) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT( ++ "NULL (failed to allocate space for DTS buffer!)."); ++ return NULL; ++ } ++ dts_buffer = page_address(page); ++ per_cpu(dts_buffer_cea, this_cpu) = ++ &get_cpu_entry_area(this_cpu) ++ ->cpu_debug_buffers.pebs_buffer; ++ if (!per_cpu(dts_buffer_cea, this_cpu)) { ++ if (dts_buffer) { ++ free_pages((unsigned long)dts_buffer, ++ get_order(buffer_size)); ++ } ++ SEP_DRV_LOG_ERROR_TRACE_OUT( ++ "CEA pebs_buffer ptr is NULL!"); ++ return NULL; ++ } ++ ++ CPU_STATE_dts_buffer(pcpu) = dts_buffer; ++ CPU_STATE_dts_buffer_size(pcpu) = buffer_size; ++ ++ if (local_cea_set_pte) { ++ size_t idx; ++ phys_addr_t phys_addr; ++ PVOID cea_ptr = per_cpu(dts_buffer_cea, this_cpu); ++ ++ phys_addr = virt_to_phys(dts_buffer); ++ ++ preempt_disable(); ++ for (idx = 0; idx < buffer_size; idx += PAGE_SIZE, ++ phys_addr += PAGE_SIZE, cea_ptr += PAGE_SIZE) { ++ local_cea_set_pte(cea_ptr, phys_addr, ++ PAGE_KERNEL); ++ } ++ pebs_Update_CEA(this_cpu); ++ preempt_enable(); ++ } ++ pebs_base = ++ (UIOP)(per_cpu(dts_buffer_cea, this_cpu)) + dts_size; ++ SEP_DRV_LOG_TRACE("This_cpu: %d, pebs_base %p.", this_cpu, ++ pebs_base); ++ ++ dts = (DTS_BUFFER_EXT)(per_cpu(dts_buffer_cea, this_cpu)); ++#else ++ SEP_DRV_LOG_ERROR_TRACE_OUT( ++ "KPTI is enabled without PAGE_TABLE_ISOLATION kernel configuration!"); ++ return NULL; ++#endif ++ } else { ++ dts_buffer = (char *)pebs_global_memory + ++ CPU_STATE_dts_buffer_offset(pcpu); ++ if (!dts_buffer) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT( ++ "NULL (failed to allocate space for DTS buffer!)."); ++ return NULL; ++ } ++ pebs_base = (UIOP)(dts_buffer) + dts_size; ++ ++ CPU_STATE_dts_buffer(pcpu) = dts_buffer; ++ CPU_STATE_dts_buffer_size(pcpu) = PER_CORE_BUFFER_SIZE( ++ dts_size, ++ LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]), ++ DEV_CONFIG_pebs_record_num(pcfg)); ++ ++ // Make 32 byte aligned ++ if ((pebs_base & 0x000001F) != 0x0) { ++ pebs_base = ALIGN_32(pebs_base); ++ } ++ ++ dts = (DTS_BUFFER_EXT)dts_buffer; ++ } ++ ++ /* ++ * Program the DTES Buffer for Precise EBS. ++ * Set PEBS buffer for one PEBS record ++ */ ++ DTS_BUFFER_EXT_base(dts) = 0; ++ DTS_BUFFER_EXT_index(dts) = 0; ++ DTS_BUFFER_EXT_max(dts) = 0; ++ DTS_BUFFER_EXT_threshold(dts) = 0; ++ DTS_BUFFER_EXT_pebs_base(dts) = pebs_base; ++ DTS_BUFFER_EXT_pebs_index(dts) = pebs_base; ++ DTS_BUFFER_EXT_pebs_max(dts) = pebs_base + ++ ((UIOP)DEV_CONFIG_pebs_record_num(pcfg) + 1) * ++ LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]); ++ ++ pebs_dispatch->initialize_threshold(dts); ++ ++ SEP_DRV_LOG_TRACE("base --- %llx.", DTS_BUFFER_EXT_pebs_base(dts)); ++ SEP_DRV_LOG_TRACE("index --- %llu.", DTS_BUFFER_EXT_pebs_index(dts)); ++ SEP_DRV_LOG_TRACE("max --- %llu.", DTS_BUFFER_EXT_pebs_max(dts)); ++ SEP_DRV_LOG_TRACE("threahold --- %llu.", ++ DTS_BUFFER_EXT_pebs_threshold(dts)); ++ SEP_DRV_LOG_TRACE("DTES buffer allocated for PEBS: %p.", dts_buffer); ++ ++ SEP_DRV_LOG_TRACE_OUT("Res: %p.", dts_buffer); ++ return dts; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID* pebs_Allocate_Buffers (VOID *params) ++ * ++ * @brief Allocate memory and set up MSRs in preparation for PEBS ++ * ++ * @param NONE ++ * ++ * @return NONE ++ * ++ * Special Notes: ++ * Set up the DS area and program the DS_AREA msrs in preparation ++ * for a PEBS run. Save away the old value in the DS_AREA. ++ * This routine is called via the parallel thread call. ++ */ ++static VOID pebs_Allocate_Buffers(VOID *params) ++{ ++ U64 value; ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ U32 dev_idx; ++ DEV_CONFIG pcfg; ++ PVOID dts_ptr = NULL; ++ ++ SEP_DRV_LOG_TRACE_IN("Params: %p.", params); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ dev_idx = core_to_dev_map[this_cpu]; ++ pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ ++ if (!DEV_CONFIG_pebs_mode(pcfg)) { ++ return; ++ } ++ ++ SYS_Write_MSR(IA32_PEBS_ENABLE, 0LL); ++ value = SYS_Read_MSR(IA32_MISC_ENABLE); ++ if ((value & 0x80) && !(value & 0x1000)) { ++ CPU_STATE_old_dts_buffer(pcpu) = ++ (PVOID)(UIOP)SYS_Read_MSR(IA32_DS_AREA); ++ dts_ptr = pebs_Alloc_DTS_Buffer(); ++ if (!dts_ptr) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("dts_ptr is NULL!"); ++ return; ++ } ++ SEP_DRV_LOG_TRACE("Old dts buffer - %p.", ++ CPU_STATE_old_dts_buffer(pcpu)); ++ SEP_DRV_LOG_TRACE("New dts buffer - %p.", dts_ptr); ++ SYS_Write_MSR(IA32_DS_AREA, (U64)(UIOP)dts_ptr); ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID pebs_Dellocate_Buffers (VOID *params) ++ * ++ * @brief Clean up PEBS buffers and restore older values into the DS_AREA ++ * ++ * @param NONE ++ * ++ * @return NONE ++ * ++ * Special Notes: ++ * Clean up the DS area and all restore state prior to the sampling run ++ * This routine is called via the parallel thread call. ++ */ ++static VOID pebs_Deallocate_Buffers(VOID *params) ++{ ++ CPU_STATE pcpu; ++ U32 this_cpu; ++ U32 dev_idx; ++ DEV_CONFIG pcfg; ++ ++ SEP_DRV_LOG_TRACE_IN("Params: %p.", params); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ dev_idx = core_to_dev_map[this_cpu]; ++ pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ ++ if (!DEV_CONFIG_pebs_mode(pcfg)) { ++ return; ++ } ++ ++ SEP_DRV_LOG_TRACE("Entered deallocate buffers."); ++ SYS_Write_MSR(IA32_DS_AREA, (U64)(UIOP)CPU_STATE_old_dts_buffer(pcpu)); ++ ++ if (DRV_SETUP_INFO_page_table_isolation(&req_drv_setup_info) == ++ DRV_SETUP_INFO_PTI_KPTI) { ++#if defined(DRV_USE_PTI) ++ SEP_DRV_LOG_INIT("Freeing PEBS buffer using KPTI approach."); ++ ++ if (local_cea_set_pte) { ++ size_t idx; ++ PVOID cea_ptr = per_cpu(dts_buffer_cea, this_cpu); ++ preempt_disable(); ++ for (idx = 0; idx < CPU_STATE_dts_buffer_size(pcpu); ++ idx += PAGE_SIZE, cea_ptr += PAGE_SIZE) { ++ local_cea_set_pte(cea_ptr, 0, PAGE_KERNEL); ++ } ++ pebs_Update_CEA(this_cpu); ++ preempt_enable(); ++ } ++ ++ if (CPU_STATE_dts_buffer(pcpu)) { ++ free_pages((unsigned long)CPU_STATE_dts_buffer(pcpu), ++ get_order(CPU_STATE_dts_buffer_size(pcpu))); ++ CPU_STATE_dts_buffer(pcpu) = NULL; ++ } ++#endif ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn U64 PEBS_Overflowed (this_cpu, overflow_status) ++ * ++ * @brief Figure out if the PEBS event caused an overflow ++ * ++ * @param this_cpu -- the current cpu ++ * overflow_status -- current value of the global overflow status ++ * ++ * @return updated overflow_status ++ * ++ * Special Notes: ++ * Figure out if the PEBS area has data that need to be transferred ++ * to the output sample. ++ * Update the overflow_status that is passed and return this value. ++ * The overflow_status defines the events/status to be read ++ */ ++U64 PEBS_Overflowed(S32 this_cpu, U64 overflow_status, U32 rec_index) ++{ ++ U64 res; ++ U32 dev_idx; ++ PEBS_DISPATCH pebs_dispatch; ++ ++ SEP_DRV_LOG_TRACE_IN( ++ "This_cpu: %d, overflow_status: %llx, rec_index: %u.", this_cpu, ++ overflow_status, rec_index); ++ ++ dev_idx = core_to_dev_map[this_cpu]; ++ pebs_dispatch = LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]); ++ ++ res = pebs_dispatch->overflow(this_cpu, overflow_status, rec_index); ++ ++ SEP_DRV_LOG_TRACE_OUT("Res: %llx.", overflow_status); ++ return res; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID PEBS_Reset_Index (this_cpu) ++ * ++ * @brief Reset the PEBS index pointer ++ * ++ * @param this_cpu -- the current cpu ++ * ++ * @return NONE ++ * ++ * Special Notes: ++ * reset index to next PEBS record to base of buffer ++ */ ++VOID PEBS_Reset_Index(S32 this_cpu) ++{ ++ DTS_BUFFER_EXT dtes; ++ ++ SEP_DRV_LOG_TRACE_IN("This_cpu: %d.", this_cpu); ++ ++ dtes = CPU_STATE_dts_buffer(&pcb[this_cpu]); ++ ++ if (!dtes) { ++ return; ++ } ++ SEP_DRV_LOG_TRACE("PEBS Reset Index: %d.", this_cpu); ++ DTS_BUFFER_EXT_pebs_index(dtes) = DTS_BUFFER_EXT_pebs_base(dtes); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++extern U32 pmi_Get_CSD(U32, U32 *, U32 *); ++#define EFLAGS_V86_MASK 0x00020000L ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID PEBS_Flush_Buffer (VOID * param) ++ * ++ * @brief generate sampling records from PEBS records in PEBS buffer ++ * ++ * @param param -- not used ++ * ++ * @return NONE ++ * ++ * Special Notes: ++ */ ++VOID PEBS_Flush_Buffer(VOID *param) ++{ ++ U32 i, this_cpu, index, desc_id; ++ U64 pebs_overflow_status = 0; ++ U64 lbr_tos_from_ip = 0ULL; ++ DRV_BOOL counter_overflowed = FALSE; ++ // ECB pecb; ++ CPU_STATE pcpu; ++ EVENT_DESC evt_desc; ++ BUFFER_DESC bd; ++ SampleRecordPC *psamp_pebs; ++ U32 is_64bit_addr = FALSE; ++ U32 u32PebsRecordNumFilled; ++#if defined(DRV_IA32) ++ U32 seg_cs; ++ U32 csdlo; ++ U32 csdhi; ++#endif ++ U32 dev_idx; ++ DEV_CONFIG pcfg; ++ U32 cur_grp; ++ DRV_BOOL multi_pebs_enabled; ++ ++ SEP_DRV_LOG_TRACE_IN("Param: %p.", param); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ bd = &cpu_buf[this_cpu]; ++ dev_idx = core_to_dev_map[this_cpu]; ++ pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ cur_grp = CPU_STATE_current_group(pcpu); ++ multi_pebs_enabled = ++ (DEV_CONFIG_pebs_mode(pcfg) && ++ (DEV_CONFIG_pebs_record_num(pcfg) > 1) && ++ (DRV_SETUP_INFO_page_table_isolation(&req_drv_setup_info) == ++ DRV_SETUP_INFO_PTI_DISABLED)); ++ ++ if (!multi_pebs_enabled) { ++ SEP_DRV_LOG_TRACE_OUT("PEBS_Flush_Buffer is not supported."); ++ return; ++ } ++ ++ u32PebsRecordNumFilled = PEBS_Get_Num_Records_Filled(); ++ for (i = 0; i < u32PebsRecordNumFilled; i++) { ++ pebs_overflow_status = PEBS_Overflowed(this_cpu, 0, i); ++ SEP_DRV_LOG_TRACE("Pebs_overflow_status = 0x%llx, i=%d.", ++ pebs_overflow_status, i); ++ ++ // pecb = LWPMU_DEVICE_PMU_register_data( ++ // &devices[dev_idx])[cur_grp]; ++ FOR_EACH_DATA_REG(pecb, j) ++ { ++ if ((!DEV_CONFIG_enable_adaptive_pebs(pcfg) && ++ !ECB_entries_is_gp_reg_get(pecb, j)) || ++ !ECB_entries_precise_get(pecb, j)) { ++ continue; ++ } ++ if (ECB_entries_fixed_reg_get(pecb, j)) { ++ index = ECB_entries_reg_id(pecb, j) - ++ IA32_FIXED_CTR0; ++ if (pebs_overflow_status & ++ ((U64)1 << (32 + index))) { ++ counter_overflowed = TRUE; ++ } ++ } else { ++ index = ECB_entries_reg_id(pecb, j) - IA32_PMC0; ++ if (pebs_overflow_status & (U64)1 << index) { ++ counter_overflowed = TRUE; ++ } ++ } ++ if (counter_overflowed) { ++ desc_id = ECB_entries_event_id_index(pecb, j); ++ evt_desc = desc_data[desc_id]; ++ SEP_DRV_LOG_TRACE( ++ "Event_id_index=%u, desc_id=%u.", ++ ECB_entries_event_id_index(pecb, j), ++ desc_id); ++ psamp_pebs = (SampleRecordPC *) ++ OUTPUT_Reserve_Buffer_Space( ++ bd, ++ EVENT_DESC_sample_size( ++ evt_desc), ++ (NMI_mode) ? TRUE : FALSE, ++ !SEP_IN_NOTIFICATION, ++ (S32)this_cpu); ++ if (!psamp_pebs) { ++ SEP_DRV_LOG_ERROR( ++ "Could not generate samples from PEBS records."); ++ continue; ++ } ++ ++ lbr_tos_from_ip = 0ULL; ++ CPU_STATE_num_samples(&pcb[this_cpu]) += 1; ++ SAMPLE_RECORD_descriptor_id(psamp_pebs) = ++ desc_id; ++ SAMPLE_RECORD_event_index(psamp_pebs) = ++ ECB_entries_event_id_index(pecb, j); ++ SAMPLE_RECORD_pid_rec_index(psamp_pebs) = ++ (U32)-1; ++ SAMPLE_RECORD_pid_rec_index_raw(psamp_pebs) = 1; ++ SAMPLE_RECORD_tid(psamp_pebs) = (U32)-1; ++ SAMPLE_RECORD_cpu_num(psamp_pebs) = ++ (U16)this_cpu; ++ SAMPLE_RECORD_osid(psamp_pebs) = 0; ++ ++#if defined(DRV_IA32) ++ PEBS_Modify_IP((S8 *)psamp_pebs, is_64bit_addr, ++ i); ++ SAMPLE_RECORD_cs(psamp_pebs) = __KERNEL_CS; ++ if (SAMPLE_RECORD_eflags(psamp_pebs) & ++ EFLAGS_V86_MASK) { ++ csdlo = 0; ++ csdhi = 0; ++ } else { ++ seg_cs = SAMPLE_RECORD_cs(psamp_pebs); ++ SYS_Get_CSD(seg_cs, &csdlo, &csdhi); ++ } ++ SAMPLE_RECORD_csd(psamp_pebs).u1.lowWord = ++ csdlo; ++ SAMPLE_RECORD_csd(psamp_pebs).u2.highWord = ++ csdhi; ++#elif defined(DRV_EM64T) ++ SAMPLE_RECORD_cs(psamp_pebs) = __KERNEL_CS; ++ pmi_Get_CSD(SAMPLE_RECORD_cs(psamp_pebs), ++ &SAMPLE_RECORD_csd(psamp_pebs) ++ .u1.lowWord, ++ &SAMPLE_RECORD_csd(psamp_pebs) ++ .u2.highWord); ++ is_64bit_addr = ++ (SAMPLE_RECORD_csd(psamp_pebs) ++ .u2.s2.reserved_0 == 1); ++ if (is_64bit_addr) { ++ SAMPLE_RECORD_ia64_pc(psamp_pebs) = ++ TRUE; ++ } else { ++ SAMPLE_RECORD_ia64_pc(psamp_pebs) = ++ FALSE; ++ ++ SEP_DRV_LOG_TRACE( ++ "SAMPLE_RECORD_eip(psamp_pebs) 0x%x.", ++ SAMPLE_RECORD_eip(psamp_pebs)); ++ SEP_DRV_LOG_TRACE( ++ "SAMPLE_RECORD_eflags(psamp_pebs) %x.", ++ SAMPLE_RECORD_eflags( ++ psamp_pebs)); ++ } ++#endif ++ if (EVENT_DESC_pebs_offset(evt_desc) || ++ EVENT_DESC_latency_offset_in_sample( ++ evt_desc)) { ++ lbr_tos_from_ip = PEBS_Fill_Buffer( ++ (S8 *)psamp_pebs, evt_desc, i); ++ } ++ PEBS_Modify_IP((S8 *)psamp_pebs, is_64bit_addr, ++ i); ++ PEBS_Modify_TSC((S8 *)psamp_pebs, i); ++ if (ECB_entries_branch_evt_get(pecb, j) && ++ DEV_CONFIG_precise_ip_lbrs(pcfg) && ++ lbr_tos_from_ip) { ++ if (is_64bit_addr) { ++ SAMPLE_RECORD_iip(psamp_pebs) = ++ lbr_tos_from_ip; ++ SEP_DRV_LOG_TRACE( ++ "UPDATED SAMPLE_RECORD_iip(psamp) 0x%llx.", ++ SAMPLE_RECORD_iip( ++ psamp_pebs)); ++ } else { ++ SAMPLE_RECORD_eip(psamp_pebs) = ++ (U32)lbr_tos_from_ip; ++ SEP_DRV_LOG_TRACE( ++ "UPDATED SAMPLE_RECORD_eip(psamp) 0x%x.", ++ SAMPLE_RECORD_eip( ++ psamp_pebs)); ++ } ++ } ++ } ++ } ++ END_FOR_EACH_DATA_REG; ++ } ++ PEBS_Reset_Index(this_cpu); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID PEBS_Reset_Counter (this_cpu, index, value) ++ * ++ * @brief set reset value for PMC after overflow ++ * ++ * @param this_cpu -- the current cpu ++ * index -- PMC register index ++ * value -- reset value for PMC after overflow ++ * ++ * @return NONE ++ * ++ * Special Notes: ++ */ ++VOID PEBS_Reset_Counter(S32 this_cpu, U32 index, U64 value) ++{ ++ DTS_BUFFER_EXT dts; ++ DTS_BUFFER_EXT1 dts_ext = NULL; ++ U32 dev_idx; ++ DEV_CONFIG pcfg; ++ ++ SEP_DRV_LOG_TRACE_IN("This_cpu: %d, index: %u, value: %llx.", this_cpu, ++ index, value); ++ ++ dev_idx = core_to_dev_map[this_cpu]; ++ pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ dts = CPU_STATE_dts_buffer(&pcb[this_cpu]); ++ ++ if (!dts) { ++ return; ++ } ++ SEP_DRV_LOG_TRACE( ++ "PEBS Reset GP Counters[0:4]: cpu %d, index=%u, value=%llx.", ++ this_cpu, index, value); ++ switch (index) { ++ case 0: ++ DTS_BUFFER_EXT_counter_reset0(dts) = value; ++ break; ++ case 1: ++ DTS_BUFFER_EXT_counter_reset1(dts) = value; ++ break; ++ case 2: ++ DTS_BUFFER_EXT_counter_reset2(dts) = value; ++ break; ++ case 3: ++ DTS_BUFFER_EXT_counter_reset3(dts) = value; ++ break; ++ } ++ ++ if (DEV_CONFIG_enable_adaptive_pebs(pcfg) || ++ DEV_CONFIG_collect_fixed_counter_pebs(pcfg)) { ++ dts_ext = CPU_STATE_dts_buffer(&pcb[this_cpu]); ++ } ++ if (!dts_ext) { ++ return; ++ } ++ SEP_DRV_LOG_TRACE("PEBS Reset Fixed Counters and GP Counters[4:7]: \ ++ cpu %d, index=%u, value=%llx.", ++ this_cpu, index, value); ++ switch (index) { ++ case 4: ++ DTS_BUFFER_EXT1_counter_reset4(dts_ext) = value; ++ break; ++ case 5: ++ DTS_BUFFER_EXT1_counter_reset5(dts_ext) = value; ++ break; ++ case 6: ++ DTS_BUFFER_EXT1_counter_reset6(dts_ext) = value; ++ break; ++ case 7: ++ DTS_BUFFER_EXT1_counter_reset7(dts_ext) = value; ++ break; ++ case 8: ++ DTS_BUFFER_EXT1_fixed_counter_reset0(dts_ext) = value; ++ break; ++ case 9: ++ DTS_BUFFER_EXT1_fixed_counter_reset1(dts_ext) = value; ++ break; ++ case 10: ++ DTS_BUFFER_EXT1_fixed_counter_reset2(dts_ext) = value; ++ break; ++ case 11: ++ DTS_BUFFER_EXT1_fixed_counter_reset3(dts_ext) = value; ++ break; ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID PEBS_Modify_IP (sample, is_64bit_addr) ++ * ++ * @brief Change the IP field in the sample to that in the PEBS record ++ * ++ * @param sample - sample buffer ++ * @param is_64bit_addr - are we in a 64 bit module ++ * ++ * @return NONE ++ * ++ * Special Notes: ++ * ++ */ ++VOID PEBS_Modify_IP(void *sample, DRV_BOOL is_64bit_addr, U32 rec_index) ++{ ++ U32 this_cpu; ++ U32 dev_idx; ++ PEBS_DISPATCH pebs_dispatch; ++ ++ SEP_DRV_LOG_TRACE_IN("Sample: %p, is_64bit_addr: %u, rec_index: %u.", ++ sample, is_64bit_addr, rec_index); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ dev_idx = core_to_dev_map[this_cpu]; ++ pebs_dispatch = LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]); ++ ++ pebs_dispatch->modify_ip(sample, is_64bit_addr, rec_index); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID PEBS_Modify_TSC (sample) ++ * ++ * @brief Change the TSC field in the sample to that in the PEBS record ++ * ++ * @param sample - sample buffer ++ * ++ * @return NONE ++ * ++ * Special Notes: ++ * ++ */ ++VOID PEBS_Modify_TSC(void *sample, U32 rec_index) ++{ ++ U32 this_cpu; ++ U32 dev_idx; ++ PEBS_DISPATCH pebs_dispatch; ++ ++ SEP_DRV_LOG_TRACE_IN("Sample: %p, rec_index: %u.", sample, rec_index); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ dev_idx = core_to_dev_map[this_cpu]; ++ pebs_dispatch = LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]); ++ ++ if (pebs_dispatch->modify_tsc != NULL) { ++ pebs_dispatch->modify_tsc(sample, rec_index); ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++U32 PEBS_Get_Num_Records_Filled(VOID) ++{ ++ U32 this_cpu; ++ U32 dev_idx; ++ PEBS_DISPATCH pebs_dispatch; ++ U32 num = 0; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ dev_idx = core_to_dev_map[this_cpu]; ++ pebs_dispatch = LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]); ++ ++ if (pebs_dispatch->get_num_records_filled != NULL) { ++ num = pebs_dispatch->get_num_records_filled(); ++ SEP_DRV_LOG_TRACE("Num=%u.", num); ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT("Res: %u.", num); ++ return num; ++} ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID PEBS_Fill_Phy_Addr (LATENCY_INFO latency_info) ++ * ++ * @brief Fill latency node with phy addr when applicable ++ * ++ * @param latency_info - pointer to LATENCY_INFO struct ++ * ++ * @return NONE ++ * ++ * Special Notes: ++ * ++ */ ++ ++static VOID PEBS_Fill_Phy_Addr(LATENCY_INFO latency_info) ++{ ++#if defined(DRV_EM64T) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) ++ U64 lin_addr; ++ U64 offset; ++ struct page *page = NULL; ++ ++ if (!DRV_CONFIG_virt_phys_translation(drv_cfg)) { ++ return; ++ } ++ lin_addr = (U64)LATENCY_INFO_linear_address(latency_info); ++ if (lin_addr != 0) { ++ offset = (U64)(lin_addr & 0x0FFF); ++ if (__virt_addr_valid(lin_addr)) { ++ LATENCY_INFO_phys_addr(latency_info) = ++ (U64)__pa(lin_addr); ++ } else if (lin_addr < __PAGE_OFFSET) { ++ pagefault_disable(); ++ if (__get_user_pages_fast(lin_addr, 1, 1, &page)) { ++ LATENCY_INFO_phys_addr(latency_info) = ++ (U64)page_to_phys(page) + offset; ++ put_page(page); ++ } ++ pagefault_enable(); ++ } ++ } ++#endif ++} ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn U64 PEBS_Fill_Buffer (S8 *buffer, EVENT_DESC evt_desc, U32 rec_index) ++ * ++ * @brief Fill the buffer with the pebs data ++ * ++ * @param buffer - area to write the data into ++ * event_desc - event descriptor of the pebs event ++ rec_index - current pebs record index ++ * ++ * @return if APEBS return LBR_TOS_FROM_IP else return 0 ++ * ++ * Special Notes: ++ * ++ */ ++U64 PEBS_Fill_Buffer(S8 *buffer, EVENT_DESC evt_desc, U32 rec_index) ++{ ++ DTS_BUFFER_EXT dtes; ++ LATENCY_INFO_NODE latency_info = { 0 }; ++ PEBS_REC_EXT1 pebs_base_ext1; ++ PEBS_REC_EXT2 pebs_base_ext2; ++ S8 *pebs_base, *pebs_index, *pebs_ptr; ++ U8 pebs_ptr_check = FALSE; ++ U64 lbr_tos_from_ip = 0ULL; ++ U32 this_cpu; ++ U32 dev_idx; ++ DEV_CONFIG pcfg; ++ ++ SEP_DRV_LOG_TRACE_IN("Buffer: %p, evt_desc: %p, rec_index: %u.", buffer, ++ evt_desc, rec_index); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ dev_idx = core_to_dev_map[this_cpu]; ++ pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ dtes = CPU_STATE_dts_buffer(&pcb[this_cpu]); ++ ++ if (DEV_CONFIG_enable_adaptive_pebs(pcfg)) { ++ lbr_tos_from_ip = ++ APEBS_Fill_Buffer(buffer, evt_desc, rec_index); ++ return lbr_tos_from_ip; ++ } ++ ++ SEP_DRV_LOG_TRACE("In PEBS Fill Buffer: cpu %d.", CONTROL_THIS_CPU()); ++ ++ if (!dtes) { ++ return lbr_tos_from_ip; ++ } ++ pebs_base = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_base(dtes); ++ pebs_index = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_index(dtes); ++ pebs_ptr = (S8 *)((UIOP)DTS_BUFFER_EXT_pebs_base(dtes) + ++ ((UIOP)rec_index * ++ LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]))); ++ pebs_ptr_check = ++ (pebs_ptr && pebs_base != pebs_index && pebs_ptr < pebs_index); ++ if (!pebs_ptr_check) { ++ return lbr_tos_from_ip; ++ } ++ pebs_base = pebs_ptr; ++ if (EVENT_DESC_pebs_offset(evt_desc)) { ++ SEP_DRV_LOG_TRACE("PEBS buffer has data available."); ++ memcpy(buffer + EVENT_DESC_pebs_offset(evt_desc), pebs_base, ++ EVENT_DESC_pebs_size(evt_desc)); ++ } ++ if (EVENT_DESC_eventing_ip_offset(evt_desc)) { ++ pebs_base_ext1 = (PEBS_REC_EXT1)pebs_base; ++ *(U64 *)(buffer + EVENT_DESC_eventing_ip_offset(evt_desc)) = ++ PEBS_REC_EXT1_eventing_ip(pebs_base_ext1); ++ } ++ if (EVENT_DESC_hle_offset(evt_desc)) { ++ pebs_base_ext1 = (PEBS_REC_EXT1)pebs_base; ++ *(U64 *)(buffer + EVENT_DESC_hle_offset(evt_desc)) = ++ PEBS_REC_EXT1_hle_info(pebs_base_ext1); ++ } ++ if (EVENT_DESC_latency_offset_in_sample(evt_desc)) { ++ pebs_base_ext1 = (PEBS_REC_EXT1)pebs_base; ++ memcpy(&latency_info, ++ pebs_base + EVENT_DESC_latency_offset_in_pebs_record( ++ evt_desc), ++ EVENT_DESC_latency_size_from_pebs_record(evt_desc)); ++ memcpy(&LATENCY_INFO_stack_pointer(&latency_info), ++ &PEBS_REC_EXT1_rsp(pebs_base_ext1), sizeof(U64)); ++ ++ LATENCY_INFO_phys_addr(&latency_info) = 0; ++ PEBS_Fill_Phy_Addr(&latency_info); ++ ++ memcpy(buffer + EVENT_DESC_latency_offset_in_sample(evt_desc), ++ &latency_info, sizeof(LATENCY_INFO_NODE)); ++ } ++ if (EVENT_DESC_pebs_tsc_offset(evt_desc)) { ++ pebs_base_ext2 = (PEBS_REC_EXT2)pebs_base; ++ *(U64 *)(buffer + EVENT_DESC_pebs_tsc_offset(evt_desc)) = ++ PEBS_REC_EXT2_tsc(pebs_base_ext2); ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++ return lbr_tos_from_ip; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn U64 APEBS_Fill_Buffer (S8 *buffer, EVENT_DESC evt_desc, U32 rec_index) ++ * ++ * @brief Fill the buffer with the pebs data ++ * ++ * @param buffer - area to write the data into ++ * event_desc - event descriptor of the pebs event ++ * rec_index - current pebs record index ++ * ++ * @return LBR_TOS_FROM_IP ++ * ++ * Special Notes: ++ * ++ */ ++U64 APEBS_Fill_Buffer(S8 *buffer, EVENT_DESC evt_desc, U32 rec_index) ++{ ++ DTS_BUFFER_EXT1 dtes; ++ LATENCY_INFO_NODE latency_info = { 0 }; ++ U64 dtes_record_size = 0; ++ U64 dtes_record_format = 0; ++ ADAPTIVE_PEBS_MEM_INFO apebs_mem = NULL; ++ ADAPTIVE_PEBS_GPR_INFO apebs_gpr = NULL; ++ ADAPTIVE_PEBS_BASIC_INFO apebs_basic = NULL; ++ S8 *pebs_base, *pebs_index, *pebs_ptr; ++ U8 pebs_ptr_check = FALSE; ++ U64 lbr_tos_from_ip = 0ULL; ++ U32 this_cpu; ++ U32 dev_idx; ++ DEV_CONFIG pcfg; ++ ++ SEP_DRV_LOG_TRACE_IN("Buffer: %p, evt_desc: %p, rec_index: %u.", buffer, ++ evt_desc, rec_index); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ dev_idx = core_to_dev_map[this_cpu]; ++ pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ dtes = CPU_STATE_dts_buffer(&pcb[this_cpu]); ++ ++ SEP_DRV_LOG_TRACE("In APEBS Fill Buffer: cpu %d.", this_cpu); ++ ++ if (!dtes || !DEV_CONFIG_enable_adaptive_pebs(pcfg)) { ++ return lbr_tos_from_ip; ++ } ++ ++ pebs_base = (S8 *)(UIOP)DTS_BUFFER_EXT1_pebs_base(dtes); ++ pebs_index = (S8 *)(UIOP)DTS_BUFFER_EXT1_pebs_index(dtes); ++ pebs_ptr = (S8 *)((UIOP)DTS_BUFFER_EXT1_pebs_base(dtes) + ++ ((UIOP)rec_index * ++ LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]))); ++ pebs_ptr_check = ++ (pebs_ptr && pebs_base != pebs_index && pebs_ptr < pebs_index); ++ if (!pebs_ptr_check) { ++ return lbr_tos_from_ip; ++ } ++ ++ pebs_base = pebs_ptr; ++ apebs_basic = (ADAPTIVE_PEBS_BASIC_INFO)( ++ pebs_base + LWPMU_DEVICE_apebs_basic_offset(&devices[dev_idx])); ++ dtes_record_size = (ADAPTIVE_PEBS_BASIC_INFO_record_info(apebs_basic) & ++ APEBS_RECORD_SIZE_MASK) >> 48; // [63:48] ++ dtes_record_format = ++ (ADAPTIVE_PEBS_BASIC_INFO_record_info(apebs_basic) & ++ (U64)APEBS_RECORD_FORMAT_MASK); // [47:0] ++ ++ if (dtes_record_size != ++ LWPMU_DEVICE_pebs_record_size(&devices[dev_idx])) { ++ SEP_DRV_LOG_TRACE( ++ "PEBS record size does not match with ucode\n"); ++ } ++ if (EVENT_DESC_pebs_offset(evt_desc)) { ++ *(U64 *)(buffer + EVENT_DESC_pebs_offset(evt_desc)) = ++ ADAPTIVE_PEBS_BASIC_INFO_record_info(apebs_basic); ++ } ++ if (EVENT_DESC_eventing_ip_offset(evt_desc)) { ++ *(U64 *)(buffer + EVENT_DESC_eventing_ip_offset(evt_desc)) = ++ ADAPTIVE_PEBS_BASIC_INFO_eventing_ip(apebs_basic); ++ } ++ if (EVENT_DESC_pebs_tsc_offset(evt_desc)) { ++ *(U64 *)(buffer + EVENT_DESC_pebs_tsc_offset(evt_desc)) = ++ ADAPTIVE_PEBS_BASIC_INFO_tsc(apebs_basic); ++ } ++ if (EVENT_DESC_applicable_counters_offset(evt_desc)) { ++ *(U64 *)(buffer + ++ EVENT_DESC_applicable_counters_offset(evt_desc)) = ++ ADAPTIVE_PEBS_BASIC_INFO_applicable_counters( ++ apebs_basic); ++ } ++ if (DEV_CONFIG_apebs_collect_gpr(pcfg) && ++ EVENT_DESC_gpr_info_offset(evt_desc)) { ++ if (!(dtes_record_format & APEBS_GPR_RECORD_FORMAT_MASK)) { ++ SEP_DRV_LOG_WARNING( ++ "GPR info not found in DS PEBS record."); ++ } ++ memcpy(buffer + EVENT_DESC_gpr_info_offset(evt_desc), ++ pebs_base + ++ LWPMU_DEVICE_apebs_gpr_offset(&devices[dev_idx]), ++ EVENT_DESC_gpr_info_size(evt_desc)); ++ } ++ if (DEV_CONFIG_apebs_collect_mem_info(pcfg) && ++ EVENT_DESC_latency_offset_in_sample(evt_desc)) { ++ if (!(dtes_record_format & APEBS_MEM_RECORD_FORMAT_MASK)) { ++ SEP_DRV_LOG_WARNING( ++ "MEM info not found in DS PEBS record."); ++ } ++ apebs_mem = (ADAPTIVE_PEBS_MEM_INFO)( ++ pebs_base + ++ LWPMU_DEVICE_apebs_mem_offset(&devices[dev_idx])); ++ memcpy(&LATENCY_INFO_linear_address(&latency_info), ++ &ADAPTIVE_PEBS_MEM_INFO_data_linear_address(apebs_mem), ++ sizeof(U64)); ++ memcpy(&LATENCY_INFO_data_source(&latency_info), ++ &ADAPTIVE_PEBS_MEM_INFO_data_source(apebs_mem), ++ sizeof(U64)); ++ memcpy(&LATENCY_INFO_latency(&latency_info), ++ &ADAPTIVE_PEBS_MEM_INFO_latency(apebs_mem), sizeof(U64)); ++ LATENCY_INFO_stack_pointer(&latency_info) = 0; ++ if (DEV_CONFIG_apebs_collect_gpr(pcfg)) { ++ apebs_gpr = (ADAPTIVE_PEBS_GPR_INFO)( ++ pebs_base + LWPMU_DEVICE_apebs_gpr_offset( ++ &devices[dev_idx])); ++ memcpy(&LATENCY_INFO_stack_pointer(&latency_info), ++ &ADAPTIVE_PEBS_GPR_INFO_rsp(apebs_gpr), ++ sizeof(U64)); ++ } ++ ++ LATENCY_INFO_phys_addr(&latency_info) = 0; ++ PEBS_Fill_Phy_Addr(&latency_info); ++ memcpy(buffer + EVENT_DESC_latency_offset_in_sample(evt_desc), ++ &latency_info, sizeof(LATENCY_INFO_NODE)); ++ } ++ if (DEV_CONFIG_apebs_collect_mem_info(pcfg) && ++ EVENT_DESC_hle_offset(evt_desc)) { ++ *(U64 *)(buffer + EVENT_DESC_hle_offset(evt_desc)) = ++ ADAPTIVE_PEBS_MEM_INFO_hle_info(( ++ ADAPTIVE_PEBS_MEM_INFO)( ++ pebs_base + LWPMU_DEVICE_apebs_mem_offset( ++ &devices[dev_idx]))); ++ } ++ if (DEV_CONFIG_apebs_collect_xmm(pcfg) && ++ EVENT_DESC_xmm_info_offset(evt_desc)) { ++ if (!(dtes_record_format & APEBS_XMM_RECORD_FORMAT_MASK)) { ++ SEP_DRV_LOG_WARNING( ++ "XMM info not found in DS PEBS record."); ++ } ++ memcpy(buffer + EVENT_DESC_xmm_info_offset(evt_desc), ++ pebs_base + ++ LWPMU_DEVICE_apebs_xmm_offset(&devices[dev_idx]), ++ EVENT_DESC_xmm_info_size(evt_desc)); ++ } ++ if (DEV_CONFIG_apebs_collect_lbrs(pcfg) && ++ EVENT_DESC_lbr_offset(evt_desc)) { ++ if (!(dtes_record_format & APEBS_LBR_RECORD_FORMAT_MASK)) { ++ SEP_DRV_LOG_WARNING( ++ "LBR info not found in DS PEBS record\n"); ++ } ++ if ((dtes_record_format >> 24) != ++ (DEV_CONFIG_apebs_num_lbr_entries(pcfg) - 1)) { ++ SEP_DRV_LOG_WARNING( ++ "DRV_CONFIG_apebs_num_lbr_entries does not match with PEBS record\n"); ++ } ++ *(U64 *)(buffer + EVENT_DESC_lbr_offset(evt_desc)) = ++ DEV_CONFIG_apebs_num_lbr_entries(pcfg) - 1; ++ //Top-of-Stack(TOS) pointing to last entry ++ //Populating lbr callstack as SST_ENTRY_N to SST_ENTRY_0 in ++ // tb util, hence setting TOS to SST_ENTRY_N ++ memcpy(buffer + EVENT_DESC_lbr_offset(evt_desc) + sizeof(U64), ++ pebs_base + ++ LWPMU_DEVICE_apebs_lbr_offset(&devices[dev_idx]), ++ EVENT_DESC_lbr_info_size(evt_desc) - sizeof(U64)); ++ lbr_tos_from_ip = ADAPTIVE_PEBS_LBR_INFO_lbr_from( ++ (ADAPTIVE_PEBS_LBR_INFO)(pebs_base + ++ LWPMU_DEVICE_apebs_lbr_offset( ++ &devices[dev_idx]))); ++ } ++ return lbr_tos_from_ip; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn OS_STATUS PEBS_Initialize (DEV_CONFIG pcfg) ++ * ++ * @brief Initialize the pebs buffers ++ * ++ * @param dev_idx - Device index ++ * ++ * @return status ++ * ++ * Special Notes: ++ * If the user is asking for PEBS information. Allocate the DS area ++ */ ++OS_STATUS PEBS_Initialize(U32 dev_idx) ++{ ++ DEV_CONFIG pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ ++ SEP_DRV_LOG_TRACE_IN("Pcfg: %p.", pcfg); ++ ++ if (DEV_CONFIG_pebs_mode(pcfg)) { ++ switch (DEV_CONFIG_pebs_mode(pcfg)) { ++ case 1: ++ SEP_DRV_LOG_INIT("Set up the Core2 dispatch table."); ++ LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]) = ++ &core2_pebs; ++ LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]) = ++ sizeof(PEBS_REC_NODE); ++ break; ++ case 2: ++ SEP_DRV_LOG_INIT("Set up the Nehalem dispatch."); ++ LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]) = ++ &corei7_pebs; ++ LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]) = ++ sizeof(PEBS_REC_EXT_NODE); ++ break; ++ case 3: ++ SEP_DRV_LOG_INIT( ++ "Set up the Core2 (PNR) dispatch table."); ++ LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]) = ++ &core2p_pebs; ++ LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]) = ++ sizeof(PEBS_REC_NODE); ++ break; ++ case 4: ++ SEP_DRV_LOG_INIT("Set up the Haswell dispatch table."); ++ LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]) = ++ &haswell_pebs; ++ LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]) = ++ sizeof(PEBS_REC_EXT1_NODE); ++ break; ++ case 5: ++ SEP_DRV_LOG_INIT( ++ "Set up the Perf version4 dispatch table."); ++ LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]) = ++ &perfver4_pebs; ++ LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]) = ++ sizeof(PEBS_REC_EXT2_NODE); ++ break; ++ case 6: ++ if (!DEV_CONFIG_enable_adaptive_pebs(pcfg)) { ++ SEP_DRV_LOG_TRACE( ++ "APEBS need to be enabled in perf version4 SNC dispatch mode."); ++ } ++ LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]) = ++ &perfver4_apebs; ++ LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]) = ++ sizeof(ADAPTIVE_PEBS_BASIC_INFO_NODE); ++ if (DEV_CONFIG_apebs_collect_mem_info(pcfg)) { ++ LWPMU_DEVICE_apebs_mem_offset( ++ &devices[dev_idx]) = ++ LWPMU_DEVICE_pebs_record_size( ++ &devices[dev_idx]); ++ LWPMU_DEVICE_pebs_record_size( ++ &devices[dev_idx]) += ++ sizeof(ADAPTIVE_PEBS_MEM_INFO_NODE); ++ } ++ if (DEV_CONFIG_apebs_collect_gpr(pcfg)) { ++ LWPMU_DEVICE_apebs_gpr_offset( ++ &devices[dev_idx]) = ++ LWPMU_DEVICE_pebs_record_size( ++ &devices[dev_idx]); ++ LWPMU_DEVICE_pebs_record_size( ++ &devices[dev_idx]) += ++ sizeof(ADAPTIVE_PEBS_GPR_INFO_NODE); ++ } ++ if (DEV_CONFIG_apebs_collect_xmm(pcfg)) { ++ LWPMU_DEVICE_apebs_xmm_offset( ++ &devices[dev_idx]) = ++ LWPMU_DEVICE_pebs_record_size( ++ &devices[dev_idx]); ++ LWPMU_DEVICE_pebs_record_size( ++ &devices[dev_idx]) += ++ sizeof(ADAPTIVE_PEBS_XMM_INFO_NODE); ++ } ++ if (DEV_CONFIG_apebs_collect_lbrs(pcfg)) { ++ LWPMU_DEVICE_apebs_lbr_offset( ++ &devices[dev_idx]) = ++ LWPMU_DEVICE_pebs_record_size( ++ &devices[dev_idx]); ++ LWPMU_DEVICE_pebs_record_size( ++ &devices[dev_idx]) += ++ (sizeof(ADAPTIVE_PEBS_LBR_INFO_NODE) * ++ DEV_CONFIG_apebs_num_lbr_entries( ++ pcfg)); ++ } ++ SEP_DRV_LOG_TRACE("Size of adaptive pebs record - %d.", ++ LWPMU_DEVICE_pebs_record_size( ++ &devices[dev_idx])); ++ break; ++ default: ++ SEP_DRV_LOG_INIT( ++ "Unknown PEBS type. Will not collect PEBS information."); ++ break; ++ } ++ } ++ if (LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]) && ++ !DEV_CONFIG_pebs_record_num(pcfg)) { ++ DEV_CONFIG_pebs_record_num(pcfg) = 1; ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT("OS_SUCCESS"); ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn OS_STATUS PEBS_Allocate (void) ++ * ++ * @brief Allocate the pebs related buffers ++ * ++ * @param NONE ++ * ++ * @return NONE ++ * ++ * Special Notes: ++ * Allocated the DS area used for PEBS capture ++ */ ++OS_STATUS PEBS_Allocate(VOID) ++{ ++ S32 cpu_num; ++ CPU_STATE pcpu; ++ U32 dev_idx; ++ U32 dts_size; ++ DEV_CONFIG pcfg; ++ ++ SEP_DRV_LOG_INIT_IN(""); ++ ++ for (cpu_num = 0; cpu_num < GLOBAL_STATE_num_cpus(driver_state); ++ cpu_num++) { ++ pcpu = &pcb[cpu_num]; ++ dev_idx = core_to_dev_map[cpu_num]; ++ pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ if (LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx])) { ++ dts_size = sizeof(DTS_BUFFER_EXT_NODE); ++ if (DEV_CONFIG_enable_adaptive_pebs(pcfg)) { ++ dts_size = sizeof(DTS_BUFFER_EXT1_NODE); ++ } ++ CPU_STATE_dts_buffer_offset(pcpu) = ++ pebs_global_memory_size; ++ pebs_global_memory_size += PER_CORE_BUFFER_SIZE( ++ dts_size, ++ LWPMU_DEVICE_pebs_record_size( ++ &devices[dev_idx]), ++ DEV_CONFIG_pebs_record_num(pcfg)); ++ } ++ } ++ if (pebs_global_memory_size) { ++ if (DRV_SETUP_INFO_page_table_isolation(&req_drv_setup_info) == ++ DRV_SETUP_INFO_PTI_DISABLED) { ++ SEP_DRV_LOG_INIT( ++ "Allocating global PEBS buffer using regular control routine."); ++ pebs_global_memory = (PVOID)CONTROL_Allocate_KMemory( ++ pebs_global_memory_size); ++ if (!pebs_global_memory) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT( ++ "Failed to allocate PEBS buffer!"); ++ return OS_NO_MEM; ++ } ++ memset(pebs_global_memory, 0, pebs_global_memory_size); ++ } else { ++#if defined(DRV_USE_KAISER) ++ SEP_DRV_LOG_INIT( ++ "Allocating PEBS buffer using KAISER-compatible approach."); ++ ++ if (!local_kaiser_add_mapping) { ++ local_kaiser_add_mapping = ++ (PVOID)UTILITY_Find_Symbol( ++ "kaiser_add_mapping"); ++ if (!local_kaiser_add_mapping) { ++ SEP_DRV_LOG_ERROR( ++ "Could not find 'kaiser_add_mapping'!"); ++ goto kaiser_error_handling; ++ } ++ } ++ ++ if (!local_kaiser_remove_mapping) { ++ local_kaiser_remove_mapping = ++ (PVOID)UTILITY_Find_Symbol( ++ "kaiser_remove_mapping"); ++ if (!local_kaiser_remove_mapping) { ++ SEP_DRV_LOG_ERROR( ++ "Could not find 'kaiser_remove_mapping'!"); ++ goto kaiser_error_handling; ++ } ++ } ++ ++ pebs_global_memory = (PVOID)__get_free_pages( ++ GFP_KERNEL | __GFP_ZERO, ++ get_order(pebs_global_memory_size)); ++ ++ if (pebs_global_memory) { ++ SEP_DRV_LOG_TRACE( ++ "Successful memory allocation for pebs_global_memory."); ++ ++ if (local_kaiser_add_mapping( ++ (unsigned long)pebs_global_memory, ++ pebs_global_memory_size, ++ __PAGE_KERNEL) >= 0) { ++ SEP_DRV_LOG_TRACE( ++ "Successful kaiser_add_mapping."); ++ } else { ++ SEP_DRV_LOG_ERROR( ++ "KAISER mapping failed!"); ++ free_pages( ++ (unsigned long) ++ pebs_global_memory, ++ get_order( ++ pebs_global_memory_size)); ++ pebs_global_memory = NULL; ++ goto kaiser_error_handling; ++ } ++ } else { ++ SEP_DRV_LOG_ERROR( ++ "Failed memory allocation for pebs_global_memory!"); ++ } ++ ++ kaiser_error_handling: ++ if (!pebs_global_memory) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT( ++ "Failed to setup PEBS buffer!"); ++ return OS_NO_MEM; ++ } ++#elif defined(DRV_USE_PTI) ++ if (!local_cea_set_pte) { ++ local_cea_set_pte = (PVOID)UTILITY_Find_Symbol( ++ "cea_set_pte"); ++ if (!local_cea_set_pte) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT( ++ "Could not find 'cea_set_pte'!"); ++ return OS_FAULT; ++ } ++ } ++ if (!local_do_kernel_range_flush) { ++ local_do_kernel_range_flush = ++ (PVOID)UTILITY_Find_Symbol( ++ "do_kernel_range_flush"); ++ if (!local_do_kernel_range_flush) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT( ++ "Could not find 'do_kernel_range_flush'!"); ++ return OS_FAULT; ++ } ++ } ++#endif // DRV_USE_PTI ++ } ++ } ++ ++ CONTROL_Invoke_Parallel(pebs_Allocate_Buffers, (VOID *)NULL); ++ ++ SEP_DRV_LOG_INIT_OUT(""); ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID PEBS_Destroy (void) ++ * ++ * @brief Clean up the pebs related buffers ++ * ++ * @param pcfg - Driver Configuration ++ * ++ * @return NONE ++ * ++ * Special Notes: ++ * Deallocated the DS area used for PEBS capture ++ */ ++VOID PEBS_Destroy(VOID) ++{ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ CONTROL_Invoke_Parallel(pebs_Deallocate_Buffers, (VOID *)(size_t)0); ++ if (pebs_global_memory) { ++ if (DRV_SETUP_INFO_page_table_isolation(&req_drv_setup_info) == ++ DRV_SETUP_INFO_PTI_DISABLED) { ++ SEP_DRV_LOG_INIT( ++ "Freeing PEBS buffer using regular control routine."); ++ pebs_global_memory = ++ CONTROL_Free_Memory(pebs_global_memory); ++ } ++#if defined(DRV_USE_KAISER) ++ else if (DRV_SETUP_INFO_page_table_isolation( ++ &req_drv_setup_info) == ++ DRV_SETUP_INFO_PTI_KAISER) { ++ SEP_DRV_LOG_INIT( ++ "Freeing PEBS buffer using KAISER-compatible approach."); ++ if (local_kaiser_remove_mapping) { ++ local_kaiser_remove_mapping( ++ (unsigned long)pebs_global_memory, ++ pebs_global_memory_size); ++ } else { ++ SEP_DRV_LOG_ERROR( ++ "Could not call 'kaiser_remove_mapping'!"); ++ } ++ free_pages((unsigned long)pebs_global_memory, ++ get_order(pebs_global_memory_size)); ++ pebs_global_memory = NULL; ++ } ++#endif // DRV_USE_KAISER ++ ++ pebs_global_memory_size = 0; ++ SEP_DRV_LOG_INIT("PEBS buffer successfully freed."); ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} +diff --git a/drivers/platform/x86/sepdk/sep/perfver4.c b/drivers/platform/x86/sepdk/sep/perfver4.c +new file mode 100755 +index 000000000000..ae8fa717f4bf +--- /dev/null ++++ b/drivers/platform/x86/sepdk/sep/perfver4.c +@@ -0,0 +1,1972 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#include "lwpmudrv_defines.h" ++#include ++#include ++#include ++ ++#include "lwpmudrv_types.h" ++#include "lwpmudrv_ecb.h" ++#include "lwpmudrv_struct.h" ++ ++#include "lwpmudrv.h" ++#include "utility.h" ++#include "control.h" ++#include "output.h" ++#include "perfver4.h" ++#include "ecb_iterators.h" ++#include "pebs.h" ++#include "apic.h" ++ ++extern U64 *read_counter_info; ++extern DRV_CONFIG drv_cfg; ++extern U64 *interrupt_counts; ++extern DRV_SETUP_INFO_NODE req_drv_setup_info; ++extern EMON_BUFFER_DRIVER_HELPER emon_buffer_driver_helper; ++static U64 perf_metrics_counter_reload_value; ++ ++typedef struct SADDR_S { ++ S64 addr : PERFVER4_LBR_DATA_BITS; ++} SADDR; ++ ++static U32 restore_reg_addr[3]; ++ ++#define SADDR_addr(x) ((x).addr) ++#define MSR_ENERGY_MULTIPLIER 0x606 // Energy Multiplier MSR ++ ++#define IS_FIXED_CTR_ENABLED(ia32_perf_global_ctrl_reg_val) \ ++ ((ia32_perf_global_ctrl_reg_val)&0x700000000ULL) ++#define IS_FOUR_FIXED_CTR_ENABLED(ia32_perf_global_ctrl_reg_val) \ ++ ((ia32_perf_global_ctrl_reg_val)&0xF00000000ULL) ++#define IS_PMC_PEBS_ENABLED_GP(ia32_perf_global_ctrl_reg_val, \ ++ ia32_pebs_enable_reg_val) \ ++ (((ia32_perf_global_ctrl_reg_val)&0xfULL) == \ ++ ((ia32_pebs_enable_reg_val)&0xfULL)) ++#define IS_PMC_PEBS_ENABLED_FP_AND_GP(ia32_perf_global_ctrl_reg_val, \ ++ ia32_pebs_enable_reg_val) \ ++ (((ia32_perf_global_ctrl_reg_val)&0xf000000ffULL) == \ ++ ((ia32_pebs_enable_reg_val)&0xf000000ffULL)) ++ ++#define DISABLE_FRZ_ON_PMI(ia32_debug_ctrl_reg_val) \ ++ (0xefff & (ia32_debug_ctrl_reg_val)) ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn void perfver4_Write_PMU(param) ++ * ++ * @param param dummy parameter which is not used ++ * ++ * @return None No return needed ++ * ++ * @brief Initial set up of the PMU registers ++ * ++ * Special Notes ++ * Initial write of PMU registers. ++ * Walk through the enties and write the value of the register accordingly. ++ * Assumption: For CCCR registers the enable bit is set to value 0. ++ * When current_group = 0, then this is the first time this routine is called, ++ * initialize the locks and set up EM tables. ++ */ ++static VOID perfver4_Write_PMU(VOID *param) ++{ ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ ECB pecb; ++ U32 dev_idx; ++ U32 cur_grp; ++ EVENT_CONFIG ec; ++ DISPATCH dispatch; ++ DEV_CONFIG pcfg; ++#if defined(DRV_SEP_ACRN_ON) ++ struct profiling_pmi_config *pmi_config; ++ U32 index; ++ S32 msr_idx; ++#else ++ U32 counter_index; ++#endif ++ ++ SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); ++ ++ if (param == NULL) { ++ preempt_disable(); ++ this_cpu = CONTROL_THIS_CPU(); ++ preempt_enable(); ++ } else { ++ this_cpu = *(S32 *)param; ++ } ++ ++ pcpu = &pcb[this_cpu]; ++ dev_idx = core_to_dev_map[this_cpu]; ++ cur_grp = CPU_STATE_current_group(pcpu); ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; ++ ec = LWPMU_DEVICE_ec(&devices[dev_idx]); ++ dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); ++ pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ ++ if (!pecb) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); ++ return; ++ } ++ ++#if !defined(DRV_SEP_ACRN_ON) ++ counter_index = 0; ++ if (CPU_STATE_current_group(pcpu) == 0) { ++ if (EVENT_CONFIG_mode(ec) != EM_DISABLED) { ++ U32 index; ++ U32 st_index; ++ U32 j; ++ ++ /* Save all the initialization values away into an array for Event Multiplexing. */ ++ for (j = 0; j < EVENT_CONFIG_num_groups(ec); j++) { ++ CPU_STATE_current_group(pcpu) = j; ++ st_index = CPU_STATE_current_group(pcpu) * ++ EVENT_CONFIG_max_gp_events(ec); ++ FOR_EACH_REG_CORE_OPERATION( ++ pecb, i, PMU_OPERATION_DATA_GP) ++ { ++ index = st_index + i - ++ ECB_operations_register_start( ++ pecb, ++ PMU_OPERATION_DATA_GP); ++ CPU_STATE_em_tables(pcpu)[index] = ++ ECB_entries_reg_value(pecb, i); ++ } ++ END_FOR_EACH_REG_CORE_OPERATION; ++ } ++ /* Reset the current group to the very first one. */ ++ CPU_STATE_current_group(pcpu) = ++ this_cpu % EVENT_CONFIG_num_groups(ec); ++ } ++ } ++ ++ if (dispatch->hw_errata) { ++ dispatch->hw_errata(); ++ } ++ ++ /* Clear outstanding frozen bits */ ++ SYS_Write_MSR(IA32_PERF_GLOBAL_OVF_CTRL, PERFVER4_FROZEN_BIT_MASK); ++ ++ FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_ALL_REG) ++ { ++ /* ++ * Writing the GLOBAL Control register enables the PMU to start counting. ++ * So write 0 into the register to prevent any counting from starting. ++ */ ++ if (i == ECB_SECTION_REG_INDEX(pecb, GLOBAL_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)) { ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); ++ continue; ++ } ++ /* ++ * PEBS is enabled for this collection session ++ */ ++ if (DRV_SETUP_INFO_pebs_accessible(&req_drv_setup_info) && ++ i == ECB_SECTION_REG_INDEX(pecb, PEBS_ENABLE_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS) && ++ ECB_entries_reg_value(pecb, i)) { ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); ++ continue; ++ } ++ ++ if (DEV_CONFIG_pebs_mode(pcfg) && ++ (ECB_entries_precise_get(pecb, i) == 1)) { ++ if (ECB_entries_fixed_reg_get(pecb, i)) { ++ counter_index = (ECB_entries_reg_id(pecb, i) - ++ IA32_FIXED_CTR0 + 8); ++ } else { ++ counter_index = (ECB_entries_reg_id(pecb, i) - ++ IA32_PMC0); ++ } ++ PEBS_Reset_Counter(this_cpu, counter_index, ++ ECB_entries_reg_value(pecb, i)); ++ } ++ ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, i), ++ ECB_entries_reg_value(pecb, i)); ++#if defined(MYDEBUG) ++ { ++ U64 val = SYS_Read_MSR(ECB_entries_reg_id(pecb, i)); ++ SEP_DRV_LOG_TRACE( ++ "Write reg 0x%x --- value 0x%llx -- read 0x%llx.", ++ ECB_entries_reg_id(pecb, i), ++ ECB_entries_reg_value(pecb, i), val); ++ } ++#endif ++ } ++ END_FOR_EACH_REG_CORE_OPERATION; ++#else ++ pmi_config = (struct profiling_pmi_config *)CONTROL_Allocate_Memory( ++ sizeof(struct profiling_pmi_config)); ++ if (pmi_config == NULL) { ++ SEP_PRINT_ERROR("pmi_config memory allocation failed\n"); ++ return; ++ } ++ memset(pmi_config, 0, sizeof(struct profiling_pmi_config)); ++ ++ msr_idx = 0; ++ pmi_config->num_groups = 1; ++ ++ pmi_config->initial_list[0][msr_idx].msr_id = IA32_PERF_GLOBAL_CTRL; ++ pmi_config->initial_list[0][msr_idx].op_type = MSR_OP_WRITE; ++ pmi_config->initial_list[0][msr_idx].reg_type = PMU_MSR_CCCR; ++ pmi_config->initial_list[0][msr_idx].value = 0x0; ++ pmi_config->initial_list[0][msr_idx].param = 0x0; ++ msr_idx++; ++ ++ FOR_EACH_CCCR_REG_CPU(pecb, i, this_cpu) ++ { ++ if ((ECB_entries_reg_id(pecb, i) == IA32_PERF_GLOBAL_CTRL) || ++ (ECB_entries_reg_id(pecb, i) == IA32_PEBS_ENABLE)) { ++ continue; ++ } ++ ++ pmi_config->initial_list[0][msr_idx].msr_id = ++ ECB_entries_reg_id(pecb, i); ++ pmi_config->initial_list[0][msr_idx].op_type = MSR_OP_WRITE; ++ pmi_config->initial_list[0][msr_idx].reg_type = PMU_MSR_CCCR; ++ pmi_config->initial_list[0][msr_idx].value = ++ ECB_entries_reg_value(pecb, i); ++ pmi_config->initial_list[0][msr_idx].param = 0x0; ++ msr_idx++; ++ BUG_ON(msr_idx >= MAX_MSR_LIST_NUM); ++ } ++ END_FOR_EACH_CCCR_REG_CPU; ++ ++ FOR_EACH_ESCR_REG_CPU(pecb, i, this_cpu) ++ { ++ pmi_config->initial_list[0][msr_idx].msr_id = ++ ECB_entries_reg_id(pecb, i); ++ pmi_config->initial_list[0][msr_idx].op_type = MSR_OP_WRITE; ++ pmi_config->initial_list[0][msr_idx].reg_type = PMU_MSR_ESCR; ++ pmi_config->initial_list[0][msr_idx].value = ++ ECB_entries_reg_value(pecb, i); ++ pmi_config->initial_list[0][msr_idx].param = 0x0; ++ msr_idx++; ++ BUG_ON(msr_idx >= MAX_MSR_LIST_NUM); ++ } ++ END_FOR_EACH_ESCR_REG_CPU; ++ ++ FOR_EACH_DATA_REG_CPU(pecb, i, this_cpu) ++ { ++ if (ECB_entries_fixed_reg_get(pecb, i)) { ++ index = ECB_entries_reg_id(pecb, i) - IA32_FIXED_CTR0 + ++ 0x20; ++ } else if (ECB_entries_is_gp_reg_get(pecb, i)) { ++ index = ECB_entries_reg_id(pecb, i) - IA32_PMC0; ++ } else { ++ continue; ++ } ++ pmi_config->initial_list[0][msr_idx].msr_id = ++ ECB_entries_reg_id(pecb, i); ++ pmi_config->initial_list[0][msr_idx].op_type = MSR_OP_WRITE; ++ pmi_config->initial_list[0][msr_idx].reg_type = PMU_MSR_DATA; ++ pmi_config->initial_list[0][msr_idx].value = ++ ECB_entries_reg_value(pecb, i); ++ pmi_config->initial_list[0][msr_idx].param = index; ++ msr_idx++; ++ BUG_ON(msr_idx >= MAX_MSR_LIST_NUM); ++ } ++ END_FOR_EACH_DATA_REG_CPU; ++ pmi_config->initial_list[0][msr_idx].msr_id = -1; ++ ++ FOR_EACH_CCCR_REG_CPU(pecb, i, this_cpu) ++ { ++ if (ECB_entries_reg_id(pecb, i) == IA32_PERF_GLOBAL_CTRL) { ++ pmi_config->start_list[0][0].msr_id = ++ IA32_PERF_GLOBAL_CTRL; ++ pmi_config->start_list[0][0].op_type = MSR_OP_WRITE; ++ pmi_config->start_list[0][0].reg_type = PMU_MSR_CCCR; ++ pmi_config->start_list[0][0].value = ++ ECB_entries_reg_value(pecb, i); ++ pmi_config->start_list[0][0].param = 0x0; ++ pmi_config->start_list[0][1].msr_id = -1; ++ break; ++ } ++ } ++ END_FOR_EACH_CCCR_REG_CPU; ++ ++ pmi_config->stop_list[0][0].msr_id = IA32_PERF_GLOBAL_CTRL; ++ pmi_config->stop_list[0][0].op_type = MSR_OP_WRITE; ++ pmi_config->stop_list[0][0].reg_type = PMU_MSR_CCCR; ++ pmi_config->stop_list[0][0].value = 0x0; ++ pmi_config->stop_list[0][0].param = 0x0; ++ pmi_config->stop_list[0][1].msr_id = -1; ++ ++ if (DRV_CONFIG_counting_mode(drv_cfg) == FALSE) { ++ pmi_config->entry_list[0][0].msr_id = IA32_PERF_GLOBAL_CTRL; ++ pmi_config->entry_list[0][0].op_type = MSR_OP_WRITE; ++ pmi_config->entry_list[0][0].reg_type = PMU_MSR_CCCR; ++ pmi_config->entry_list[0][0].value = 0x0; ++ pmi_config->entry_list[0][0].param = 0x0; ++ pmi_config->entry_list[0][1].msr_id = -1; ++ ++ msr_idx = 0; ++ FOR_EACH_CCCR_REG_CPU(pecb, i, this_cpu) ++ { ++ if ((ECB_entries_reg_id(pecb, i) == ++ IA32_PERF_GLOBAL_CTRL) || ++ (ECB_entries_reg_id(pecb, i) == IA32_PEBS_ENABLE)) { ++ continue; ++ } ++ ++ pmi_config->exit_list[0][msr_idx].msr_id = ++ ECB_entries_reg_id(pecb, i); ++ pmi_config->exit_list[0][msr_idx].op_type = ++ MSR_OP_WRITE; ++ pmi_config->exit_list[0][msr_idx].reg_type = ++ PMU_MSR_CCCR; ++ pmi_config->exit_list[0][msr_idx].value = ++ ECB_entries_reg_value(pecb, i); ++ pmi_config->exit_list[0][msr_idx].param = 0x0; ++ msr_idx++; ++ BUG_ON(msr_idx >= MAX_MSR_LIST_NUM); ++ } ++ END_FOR_EACH_CCCR_REG_CPU; ++ ++ FOR_EACH_ESCR_REG_CPU(pecb, i, this_cpu) ++ { ++ pmi_config->exit_list[0][msr_idx].msr_id = ++ ECB_entries_reg_id(pecb, i); ++ pmi_config->exit_list[0][msr_idx].op_type = ++ MSR_OP_WRITE; ++ pmi_config->exit_list[0][msr_idx].reg_type = ++ PMU_MSR_ESCR; ++ pmi_config->exit_list[0][msr_idx].value = ++ ECB_entries_reg_value(pecb, i); ++ pmi_config->exit_list[0][msr_idx].param = 0x0; ++ msr_idx++; ++ BUG_ON(msr_idx >= MAX_MSR_LIST_NUM); ++ } ++ END_FOR_EACH_ESCR_REG_CPU; ++ ++ FOR_EACH_DATA_REG_CPU(pecb, i, this_cpu) ++ { ++ if (ECB_entries_fixed_reg_get(pecb, i)) { ++ index = ECB_entries_reg_id(pecb, i) - ++ IA32_FIXED_CTR0 + 0x20; ++ } else if (ECB_entries_is_gp_reg_get(pecb, i)) { ++ index = ECB_entries_reg_id(pecb, i) - IA32_PMC0; ++ } else { ++ continue; ++ } ++ pmi_config->exit_list[0][msr_idx].msr_id = ++ ECB_entries_reg_id(pecb, i); ++ pmi_config->exit_list[0][msr_idx].op_type = ++ MSR_OP_WRITE; ++ pmi_config->exit_list[0][msr_idx].reg_type = ++ PMU_MSR_DATA; ++ pmi_config->exit_list[0][msr_idx].value = ++ ECB_entries_reg_value(pecb, i); ++ pmi_config->exit_list[0][msr_idx].param = index; ++ msr_idx++; ++ BUG_ON(msr_idx >= MAX_MSR_LIST_NUM); ++ } ++ END_FOR_EACH_DATA_REG_CPU; ++ ++ FOR_EACH_CCCR_REG_CPU(pecb, i, this_cpu) ++ { ++ if (ECB_entries_reg_id(pecb, i) == ++ IA32_PERF_GLOBAL_CTRL) { ++ pmi_config->exit_list[0][msr_idx].msr_id = ++ IA32_PERF_GLOBAL_CTRL; ++ pmi_config->exit_list[0][msr_idx].op_type = ++ MSR_OP_WRITE; ++ pmi_config->exit_list[0][msr_idx].reg_type = ++ PMU_MSR_CCCR; ++ pmi_config->exit_list[0][msr_idx].value = ++ ECB_entries_reg_value(pecb, i); ++ pmi_config->exit_list[0][msr_idx].param = 0x0; ++ msr_idx++; ++ BUG_ON(msr_idx >= MAX_MSR_LIST_NUM); ++ break; ++ } ++ } ++ END_FOR_EACH_CCCR_REG_CPU; ++ pmi_config->exit_list[0][msr_idx].msr_id = -1; ++ } ++ ++ BUG_ON(!virt_addr_valid(pmi_config)); ++ ++ acrn_hypercall2(HC_PROFILING_OPS, PROFILING_CONFIG_PMI, ++ virt_to_phys(pmi_config)); ++ ++ pmi_config = CONTROL_Free_Memory(pmi_config); ++#endif ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn void perfver4_Disable_PMU(param) ++ * ++ * @param param dummy parameter which is not used ++ * ++ * @return None No return needed ++ * ++ * @brief Zero out the global control register. This automatically disables the PMU counters. ++ * ++ */ ++static VOID perfver4_Disable_PMU(PVOID param) ++{ ++#if !defined(DRV_SEP_ACRN_ON) ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ ECB pecb; ++ U32 dev_idx; ++ U32 cur_grp; ++ DEV_CONFIG pcfg; ++ ++ SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ dev_idx = core_to_dev_map[this_cpu]; ++ cur_grp = CPU_STATE_current_group(pcpu); ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; ++ pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ ++ if (!pecb) { ++ // no programming for this device for this group ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); ++ return; ++ } ++ ++ if (GET_DRIVER_STATE() != DRV_STATE_RUNNING) { ++ SEP_DRV_LOG_TRACE("Driver state = %d.", GET_DRIVER_STATE()); ++ SYS_Write_MSR(ECB_entries_reg_id( ++ pecb, ECB_SECTION_REG_INDEX( ++ pecb, GLOBAL_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ 0LL); ++ if (DEV_CONFIG_pebs_mode(pcfg)) { ++ SYS_Write_MSR( ++ ECB_entries_reg_id( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, PEBS_ENABLE_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ 0LL); ++ } ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++#endif ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn void perfver4_Enable_PMU(param) ++ * ++ * @param param dummy parameter which is not used ++ * ++ * @return None No return needed ++ * ++ * @brief Set the enable bit for all the Control registers ++ * ++ */ ++static VOID perfver4_Enable_PMU(PVOID param) ++{ ++#if !defined(DRV_SEP_ACRN_ON) ++ /* ++ * Get the value from the event block ++ * 0 == location of the global control reg for this block. ++ * Generalize this location awareness when possible ++ */ ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ ECB pecb; ++ U32 dev_idx; ++ U32 cur_grp; ++ DEV_CONFIG pcfg; ++ U64 global_control_val; ++ U64 pebs_enable_val; ++ DRV_BOOL multi_pebs_enabled; ++ ++ SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ dev_idx = core_to_dev_map[this_cpu]; ++ cur_grp = CPU_STATE_current_group(pcpu); ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; ++ pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ ++ if (!pecb) { ++ // no programming for this device for this group ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); ++ return; ++ } ++ ++ if (KVM_guest_mode) { ++ SYS_Write_MSR(ECB_entries_reg_id( ++ pecb, ECB_SECTION_REG_INDEX( ++ pecb, GLOBAL_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ 0LL); ++ } ++ if (GET_DRIVER_STATE() == DRV_STATE_RUNNING) { ++ APIC_Enable_Pmi(); ++ ++ /* Clear outstanding frozen bits */ ++ SYS_Write_MSR(ECB_entries_reg_id( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, GLOBAL_OVF_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ PERFVER4_FROZEN_BIT_MASK); ++ ++ if (CPU_STATE_reset_mask(pcpu)) { ++ SEP_DRV_LOG_TRACE("Overflow reset mask %llx.", ++ CPU_STATE_reset_mask(pcpu)); ++ // Reinitialize the global overflow control register ++ SYS_Write_MSR( ++ ECB_entries_reg_id( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, GLOBAL_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ ECB_entries_reg_value( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, GLOBAL_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS))); ++ SYS_Write_MSR( ++ ECB_entries_reg_id( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, DEBUG_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ ECB_entries_reg_value( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, DEBUG_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS))); ++ CPU_STATE_reset_mask(pcpu) = 0LL; ++ } ++ if (CPU_STATE_group_swap(pcpu)) { ++ CPU_STATE_group_swap(pcpu) = 0; ++ SYS_Write_MSR( ++ ECB_entries_reg_id( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, GLOBAL_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ ECB_entries_reg_value( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, GLOBAL_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS))); ++ if (DEV_CONFIG_pebs_mode(pcfg) || ++ DEV_CONFIG_latency_capture(pcfg)) { ++ SYS_Write_MSR( ++ ECB_entries_reg_id( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, ++ PEBS_ENABLE_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ ECB_entries_reg_value( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, ++ PEBS_ENABLE_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS))); ++ } ++ SYS_Write_MSR( ++ ECB_entries_reg_id( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, DEBUG_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ ECB_entries_reg_value( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, DEBUG_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS))); ++#if defined(MYDEBUG) ++ { ++ U64 val; ++ val = SYS_Read_MSR(ECB_entries_reg_id( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, GLOBAL_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS))); ++ SEP_DRV_LOG_TRACE( ++ "Write reg 0x%x--- read 0x%llx.", ++ ECB_entries_reg_id(pecb, 0), val); ++ } ++#endif ++ } ++ ++ multi_pebs_enabled = (DEV_CONFIG_pebs_mode(pcfg) && ++ (DEV_CONFIG_pebs_record_num(pcfg) > 1) && ++ (DRV_SETUP_INFO_page_table_isolation( ++ &req_drv_setup_info) == ++ DRV_SETUP_INFO_PTI_DISABLED)); ++ ++ // FIXME: workaround for sampling both pebs event and non-pebs event ++ // with pebs buffer size > 1 ++ if (multi_pebs_enabled) { ++ global_control_val = SYS_Read_MSR(ECB_entries_reg_id( ++ pecb, ECB_SECTION_REG_INDEX( ++ pecb, GLOBAL_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS))); ++ pebs_enable_val = SYS_Read_MSR(ECB_entries_reg_id( ++ pecb, ECB_SECTION_REG_INDEX( ++ pecb, PEBS_ENABLE_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS))); ++ if (IS_FIXED_CTR_ENABLED(global_control_val) || ++ !IS_PMC_PEBS_ENABLED_GP(global_control_val, ++ pebs_enable_val)) { ++ SEP_DRV_LOG_TRACE( ++ "Global_control_val = 0x%llx pebs_enable_val = 0x%llx.", ++ global_control_val, pebs_enable_val); ++ SYS_Write_MSR( ++ ECB_entries_reg_id( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, ++ DEBUG_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ DISABLE_FRZ_ON_PMI(ECB_entries_reg_value( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, ++ DEBUG_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)))); ++ } ++ } ++ } ++ SEP_DRV_LOG_TRACE("Reenabled PMU with value 0x%llx.", ++ ECB_entries_reg_value(pecb, 0)); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++#endif ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn perfver4_Read_PMU_Data(param) ++ * ++ * @param param dummy parameter which is not used ++ * ++ * @return None No return needed ++ * ++ * @brief Read all the data MSR's into a buffer. Called by the interrupt handler. ++ * ++ */ ++static void perfver4_Read_PMU_Data(PVOID param) ++{ ++ U32 j; ++ U64 *buffer = read_counter_info; ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ ECB pecb; ++ U32 dev_idx; ++ U32 cur_grp; ++#if defined(DRV_SEP_ACRN_ON) ++ S32 start_index, cpu_idx, msr_idx; ++ struct profiling_msr_ops_list *msr_list; ++#endif ++ ++ SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); ++ ++ if (param == NULL) { ++ preempt_disable(); ++ this_cpu = CONTROL_THIS_CPU(); ++ preempt_enable(); ++ } else { ++ this_cpu = *(S32 *)param; ++ } ++ ++#if !defined(DRV_SEP_ACRN_ON) ++ pcpu = &pcb[this_cpu]; ++ dev_idx = core_to_dev_map[this_cpu]; ++ cur_grp = CPU_STATE_current_group(pcpu); ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; ++ ++ if (!pecb) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); ++ return; ++ } ++ ++ SEP_DRV_LOG_TRACE("PMU control_data 0x%p, buffer 0x%p.", ++ LWPMU_DEVICE_PMU_register_data(&devices[dev_idx]), ++ buffer); ++ FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_ALL) ++ { ++ j = EMON_BUFFER_CORE_EVENT_OFFSET( ++ EMON_BUFFER_DRIVER_HELPER_core_index_to_thread_offset_map( ++ emon_buffer_driver_helper)[this_cpu], ++ ECB_entries_core_event_id(pecb, i)); ++ ++ buffer[j] = SYS_Read_MSR(ECB_entries_reg_id(pecb, i)); ++ SEP_DRV_LOG_TRACE("j=%u, value=%llu, cpu=%u, event_id=%u", j, ++ buffer[j], this_cpu, ++ ECB_entries_core_event_id(pecb, i)); ++ } ++ END_FOR_EACH_REG_CORE_OPERATION; ++#else ++ if (DRV_CONFIG_counting_mode(drv_cfg) == TRUE) { ++ msr_list = (struct profiling_msr_ops_list *) ++ CONTROL_Allocate_Memory( ++ GLOBAL_STATE_num_cpus(driver_state) * ++ sizeof(struct profiling_msr_ops_list)); ++ memset(msr_list, 0, ++ GLOBAL_STATE_num_cpus(driver_state) * ++ sizeof(struct profiling_msr_ops_list)); ++ for (cpu_idx = 0; cpu_idx < GLOBAL_STATE_num_cpus(driver_state); ++ cpu_idx++) { ++ pcpu = &pcb[cpu_idx]; ++ dev_idx = core_to_dev_map[cpu_idx]; ++ cur_grp = CPU_STATE_current_group(pcpu); ++ pecb = LWPMU_DEVICE_PMU_register_data( ++ &devices[dev_idx])[cur_grp]; ++ ++ if (!pecb) { ++ continue; ++ } ++ ++ msr_idx = 0; ++ FOR_EACH_DATA_REG_CPU(pecb, i, cpu_idx) ++ { ++ msr_list[cpu_idx].entries[msr_idx].msr_id = ++ ECB_entries_reg_id(pecb, i); ++ msr_list[cpu_idx].entries[msr_idx].op_type = ++ MSR_OP_READ_CLEAR; ++ msr_list[cpu_idx].entries[msr_idx].value = 0LL; ++ msr_idx++; ++ } ++ END_FOR_EACH_DATA_REG_CPU; ++ msr_list[cpu_idx].num_entries = msr_idx; ++ msr_list[cpu_idx].msr_op_state = MSR_OP_REQUESTED; ++ } ++ ++ BUG_ON(!virt_addr_valid(msr_list)); ++ ++ acrn_hypercall2(HC_PROFILING_OPS, PROFILING_MSR_OPS, ++ virt_to_phys(msr_list)); ++ ++ for (cpu_idx = 0; cpu_idx < GLOBAL_STATE_num_cpus(driver_state); ++ cpu_idx++) { ++ pcpu = &pcb[cpu_idx]; ++ dev_idx = core_to_dev_map[cpu_idx]; ++ cur_grp = CPU_STATE_current_group(pcpu); ++ pecb = LWPMU_DEVICE_PMU_register_data( ++ &devices[dev_idx])[cur_grp]; ++ ++ if (!pecb) { ++ continue; ++ } ++ ++ start_index = ECB_num_events(pecb) * cpu_idx; ++ msr_idx = 0; ++ FOR_EACH_DATA_REG_CPU(pecb, i, cpu_idx) ++ { ++ j = start_index + ++ ECB_entries_event_id_index(pecb, i); ++ buffer[j] = ++ msr_list[cpu_idx].entries[msr_idx].value; ++ msr_idx++; ++ } ++ END_FOR_EACH_DATA_REG_CPU; ++ } ++ ++ msr_list = CONTROL_Free_Memory(msr_list); ++ } ++#endif ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn void perfver4_Check_Overflow(masks) ++ * ++ * @param masks the mask structure to populate ++ * ++ * @return None No return needed ++ * ++ * @brief Called by the data processing method to figure out which registers have overflowed. ++ * ++ */ ++static void perfver4_Check_Overflow(DRV_MASKS masks) ++{ ++ U32 index; ++ U64 overflow_status = 0; ++ U32 this_cpu; ++ BUFFER_DESC bd; ++ CPU_STATE pcpu; ++ ECB pecb; ++ U32 dev_idx; ++ U32 cur_grp; ++ DEV_CONFIG pcfg; ++ DISPATCH dispatch; ++ U64 overflow_status_clr = 0; ++ DRV_EVENT_MASK_NODE event_flag; ++ ++ SEP_DRV_LOG_TRACE_IN("Masks: %p.", masks); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ bd = &cpu_buf[this_cpu]; ++ pcpu = &pcb[this_cpu]; ++ dev_idx = core_to_dev_map[this_cpu]; ++ cur_grp = CPU_STATE_current_group(pcpu); ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; ++ pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); ++ ++ if (!pecb) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); ++ return; ++ } ++ ++ // initialize masks ++ DRV_MASKS_masks_num(masks) = 0; ++ ++ overflow_status = SYS_Read_MSR(ECB_entries_reg_id( ++ pecb, ECB_SECTION_REG_INDEX(pecb, GLOBAL_STATUS_REG_INDEX, ++ PMU_OPERATION_GLOBAL_STATUS))); ++ ++ if (DEV_CONFIG_pebs_mode(pcfg) && ++ (DEV_CONFIG_pebs_record_num(pcfg) == 1)) { ++ overflow_status = PEBS_Overflowed(this_cpu, overflow_status, 0); ++ } ++ overflow_status_clr = overflow_status; ++ ++ if (dispatch->check_overflow_gp_errata) { ++ overflow_status = dispatch->check_overflow_gp_errata( ++ pecb, &overflow_status_clr); ++ } ++ SEP_DRV_LOG_TRACE("Overflow: cpu: %d, status 0x%llx.", this_cpu, ++ overflow_status); ++ index = 0; ++ BUFFER_DESC_sample_count(bd) = 0; ++ FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_ALL) ++ { ++ if (ECB_entries_fixed_reg_get(pecb, i)) { ++ index = i - ++ ECB_operations_register_start( ++ pecb, PMU_OPERATION_DATA_FIXED) + ++ 0x20; ++ if (dispatch->check_overflow_errata) { ++ overflow_status = ++ dispatch->check_overflow_errata( ++ pecb, i, overflow_status); ++ } ++ } else if (ECB_entries_is_gp_reg_get(pecb, i)) { ++ index = i - ECB_operations_register_start( ++ pecb, PMU_OPERATION_DATA_GP); ++ } else { ++ continue; ++ } ++ if (overflow_status & ((U64)1 << index)) { ++ SEP_DRV_LOG_TRACE("Overflow: cpu: %d, index %d.", ++ this_cpu, index); ++ SEP_DRV_LOG_TRACE( ++ "Register 0x%x --- val 0%llx.", ++ ECB_entries_reg_id(pecb, i), ++ SYS_Read_MSR(ECB_entries_reg_id(pecb, i))); ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, i), ++ ECB_entries_reg_value(pecb, i)); ++ ++ if (DRV_CONFIG_enable_cp_mode(drv_cfg)) { ++ /* Increment the interrupt count. */ ++ if (interrupt_counts) { ++ interrupt_counts ++ [this_cpu * ++ DRV_CONFIG_num_events( ++ drv_cfg) + ++ ECB_entries_event_id_index( ++ pecb, i)] += 1; ++ } ++ } ++ ++ DRV_EVENT_MASK_bitFields1(&event_flag) = (U8)0; ++ if (ECB_entries_precise_get(pecb, i)) { ++ DRV_EVENT_MASK_precise(&event_flag) = 1; ++ } ++ if (ECB_entries_lbr_value_get(pecb, i)) { ++ DRV_EVENT_MASK_lbr_capture(&event_flag) = 1; ++ } ++ if (ECB_entries_uncore_get(pecb, i)) { ++ DRV_EVENT_MASK_uncore_capture(&event_flag) = 1; ++ } ++ if (ECB_entries_branch_evt_get(pecb, i)) { ++ DRV_EVENT_MASK_branch(&event_flag) = 1; ++ } ++ ++ if (DRV_MASKS_masks_num(masks) < MAX_OVERFLOW_EVENTS) { ++ DRV_EVENT_MASK_bitFields1( ++ DRV_MASKS_eventmasks(masks) + ++ DRV_MASKS_masks_num(masks)) = ++ DRV_EVENT_MASK_bitFields1(&event_flag); ++ DRV_EVENT_MASK_event_idx( ++ DRV_MASKS_eventmasks(masks) + ++ DRV_MASKS_masks_num(masks)) = ++ ECB_entries_event_id_index(pecb, i); ++ DRV_MASKS_masks_num(masks)++; ++ } else { ++ SEP_DRV_LOG_ERROR( ++ "The array for event masks is full."); ++ } ++ ++ SEP_DRV_LOG_TRACE("Overflow -- 0x%llx, index 0x%llx.", ++ overflow_status, (U64)1 << index); ++ SEP_DRV_LOG_TRACE("Slot# %d, reg_id 0x%x, index %d.", i, ++ ECB_entries_reg_id(pecb, i), index); ++ if (ECB_entries_event_id_index(pecb, i) == ++ CPU_STATE_trigger_event_num(pcpu)) { ++ CPU_STATE_trigger_count(pcpu)--; ++ } ++ } ++ } ++ END_FOR_EACH_REG_CORE_OPERATION; ++ ++ CPU_STATE_reset_mask(pcpu) = overflow_status_clr; ++ /* Clear outstanding overflow bits */ ++ SYS_Write_MSR(ECB_entries_reg_id( ++ pecb, ECB_SECTION_REG_INDEX( ++ pecb, GLOBAL_OVF_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ overflow_status_clr & PERFVER4_OVERFLOW_BIT_MASK_HT_ON); ++ ++ SEP_DRV_LOG_TRACE("Check overflow completed %d.", this_cpu); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn perfver4_Swap_Group(restart) ++ * ++ * @param restart dummy parameter which is not used ++ * ++ * @return None No return needed ++ * ++ * @brief Perform the mechanics of swapping the event groups for event mux operations ++ * ++ * Special Notes ++ * Swap function for event multiplexing. ++ * Freeze the counting. ++ * Swap the groups. ++ * Enable the counting. ++ * Reset the event trigger count ++ * ++ */ ++static VOID perfver4_Swap_Group(DRV_BOOL restart) ++{ ++ U32 index; ++ U32 next_group; ++ U32 st_index; ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ U32 dev_idx; ++ DISPATCH dispatch; ++ DEV_CONFIG pcfg; ++ EVENT_CONFIG ec; ++ U32 counter_index; ++ ++ SEP_DRV_LOG_TRACE_IN("Dummy restart: %u.", restart); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ dev_idx = core_to_dev_map[this_cpu]; ++ dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); ++ pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ ec = LWPMU_DEVICE_ec(&devices[dev_idx]); ++ counter_index = 0; ++ ++ st_index = ++ CPU_STATE_current_group(pcpu) * EVENT_CONFIG_max_gp_events(ec); ++ next_group = (CPU_STATE_current_group(pcpu) + 1); ++ if (next_group >= EVENT_CONFIG_num_groups(ec)) { ++ next_group = 0; ++ } ++ ++ SEP_DRV_LOG_TRACE("Current group : 0x%x.", ++ CPU_STATE_current_group(pcpu)); ++ SEP_DRV_LOG_TRACE("Next group : 0x%x.", next_group); ++ ++ // Save the counters for the current group ++ if (!DRV_CONFIG_event_based_counts(drv_cfg)) { ++ FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_GP) ++ { ++ index = st_index + i - ++ ECB_operations_register_start( ++ pecb, PMU_OPERATION_DATA_GP); ++ CPU_STATE_em_tables(pcpu)[index] = ++ SYS_Read_MSR(ECB_entries_reg_id(pecb, i)); ++ SEP_DRV_LOG_TRACE("Saved value for reg 0x%x : 0x%llx.", ++ ECB_entries_reg_id(pecb, i), ++ CPU_STATE_em_tables(pcpu)[index]); ++ } ++ END_FOR_EACH_REG_CORE_OPERATION; ++ } ++ ++ CPU_STATE_current_group(pcpu) = next_group; ++ ++ if (dispatch->hw_errata) { ++ dispatch->hw_errata(); ++ } ++ ++ // First write the GP control registers (eventsel) ++ FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_CTRL_GP) ++ { ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, i), ++ ECB_entries_reg_value(pecb, i)); ++ } ++ END_FOR_EACH_REG_CORE_OPERATION; ++ ++ if (DRV_CONFIG_event_based_counts(drv_cfg)) { ++ // In EBC mode, reset the counts for all events except for trigger event ++ FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_ALL) ++ { ++ if (ECB_entries_event_id_index(pecb, i) != ++ CPU_STATE_trigger_event_num(pcpu)) { ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); ++ } ++ } ++ END_FOR_EACH_REG_CORE_OPERATION; ++ } else { ++ // Then write the gp count registers ++ st_index = CPU_STATE_current_group(pcpu) * ++ EVENT_CONFIG_max_gp_events(ec); ++ FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_GP) ++ { ++ index = st_index + i - ++ ECB_operations_register_start( ++ pecb, PMU_OPERATION_DATA_GP); ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, i), ++ CPU_STATE_em_tables(pcpu)[index]); ++ SEP_DRV_LOG_TRACE( ++ "Restore value for reg 0x%x : 0x%llx.", ++ ECB_entries_reg_id(pecb, i), ++ CPU_STATE_em_tables(pcpu)[index]); ++ } ++ END_FOR_EACH_REG_CORE_OPERATION; ++ } ++ ++ FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_OCR) ++ { ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, i), ++ ECB_entries_reg_value(pecb, i)); ++ } ++ END_FOR_EACH_REG_CORE_OPERATION; ++ ++ if (DEV_CONFIG_pebs_record_num(pcfg)) { ++ FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_ALL) ++ { ++ if (ECB_entries_precise_get(pecb, i) == 1) { ++ if (ECB_entries_fixed_reg_get(pecb, i)) { ++ counter_index = ++ i - ++ ECB_operations_register_start( ++ pecb, ++ PMU_OPERATION_DATA_FIXED) + ++ 8; ++ } else { ++ counter_index = ++ i - ++ ECB_operations_register_start( ++ pecb, ++ PMU_OPERATION_DATA_GP); ++ } ++ PEBS_Reset_Counter(this_cpu, counter_index, ++ ECB_entries_reg_value(pecb, ++ i)); ++ } ++ } ++ END_FOR_EACH_REG_CORE_OPERATION; ++ } ++ ++ /* ++ * reset the em factor when a group is swapped ++ */ ++ CPU_STATE_trigger_count(pcpu) = EVENT_CONFIG_em_factor(ec); ++ ++ /* ++ * The enable routine needs to rewrite the control registers ++ */ ++ CPU_STATE_reset_mask(pcpu) = 0LL; ++ CPU_STATE_group_swap(pcpu) = 1; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn perfver4_Initialize(params) ++ * ++ * @param params dummy parameter which is not used ++ * ++ * @return None No return needed ++ * ++ * @brief Initialize the PMU setting up for collection ++ * ++ * Special Notes ++ * Saves the relevant PMU state (minimal set of MSRs required ++ * to avoid conflicts with other Linux tools, such as Oprofile). ++ * This function should be called in parallel across all CPUs ++ * prior to the start of sampling, before PMU state is changed. ++ * ++ */ ++static VOID perfver4_Initialize(VOID *param) ++{ ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ U32 dev_idx; ++ DEV_CONFIG pcfg; ++ U32 cur_grp; ++ ECB pecb = NULL; ++ ++ SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); ++ ++ if (pcb == NULL) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!pcb)."); ++ return; ++ } ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ dev_idx = core_to_dev_map[this_cpu]; ++ pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ CPU_STATE_pmu_state(pcpu) = pmu_state + (this_cpu * 3); ++ if (CPU_STATE_pmu_state(pcpu) == NULL) { ++ SEP_DRV_LOG_WARNING_TRACE_OUT( ++ "Unable to save PMU state on CPU %d.", this_cpu); ++ return; ++ } ++ ++ cur_grp = CPU_STATE_current_group(pcpu); ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; ++ restore_reg_addr[0] = ECB_entries_reg_id( ++ pecb, ECB_SECTION_REG_INDEX(pecb, DEBUG_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)); ++ restore_reg_addr[1] = ECB_entries_reg_id( ++ pecb, ECB_SECTION_REG_INDEX(pecb, GLOBAL_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)); ++ restore_reg_addr[2] = ECB_entries_reg_id( ++ pecb, ECB_SECTION_REG_INDEX(pecb, FIXED_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)); ++ ++ // save the original PMU state on this CPU (NOTE: must only be called ONCE per collection) ++ CPU_STATE_pmu_state(pcpu)[0] = SYS_Read_MSR(restore_reg_addr[0]); ++ CPU_STATE_pmu_state(pcpu)[1] = SYS_Read_MSR(restore_reg_addr[1]); ++ CPU_STATE_pmu_state(pcpu)[2] = SYS_Read_MSR(restore_reg_addr[2]); ++ ++ if (DRV_CONFIG_ds_area_available(drv_cfg) && ++ DEV_CONFIG_pebs_mode(pcfg)) { ++ SYS_Write_MSR(ECB_entries_reg_id( ++ pecb, ECB_SECTION_REG_INDEX( ++ pecb, PEBS_ENABLE_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ 0LL); ++ } ++ ++ SEP_DRV_LOG_TRACE("Saving PMU state on CPU %d:", this_cpu); ++ SEP_DRV_LOG_TRACE(" msr_val(IA32_DEBUG_CTRL)=0x%llx.", ++ CPU_STATE_pmu_state(pcpu)[0]); ++ SEP_DRV_LOG_TRACE(" msr_val(IA32_PERF_GLOBAL_CTRL)=0x%llx.", ++ CPU_STATE_pmu_state(pcpu)[1]); ++ SEP_DRV_LOG_TRACE(" msr_val(IA32_FIXED_CTRL)=0x%llx.", ++ CPU_STATE_pmu_state(pcpu)[2]); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn perfver4_Destroy(params) ++ * ++ * @param params dummy parameter which is not used ++ * ++ * @return None No return needed ++ * ++ * @brief Reset the PMU setting up after collection ++ * ++ * Special Notes ++ * Restores the previously saved PMU state done in pmv_v4_Initialize. ++ * This function should be called in parallel across all CPUs ++ * after sampling collection ends/terminates. ++ * ++ */ ++static VOID perfver4_Destroy(VOID *param) ++{ ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ ++ SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); ++ ++ if (pcb == NULL) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!pcb)."); ++ return; ++ } ++ ++ preempt_disable(); ++ this_cpu = CONTROL_THIS_CPU(); ++ preempt_enable(); ++ pcpu = &pcb[this_cpu]; ++ ++ if (CPU_STATE_pmu_state(pcpu) == NULL) { ++ SEP_DRV_LOG_WARNING_TRACE_OUT( ++ "Unable to restore PMU state on CPU %d.", this_cpu); ++ return; ++ } ++ ++ SEP_DRV_LOG_TRACE("Clearing PMU state on CPU %d:", this_cpu); ++ SEP_DRV_LOG_TRACE(" msr_val(IA32_DEBUG_CTRL)=0x0."); ++ SEP_DRV_LOG_TRACE(" msr_val(IA32_PERF_GLOBAL_CTRL)=0x0."); ++ SEP_DRV_LOG_TRACE(" msr_val(IA32_FIXED_CTRL)=0x0."); ++ ++ SYS_Write_MSR(restore_reg_addr[0], 0); ++ SYS_Write_MSR(restore_reg_addr[1], 0); ++ SYS_Write_MSR(restore_reg_addr[2], 0); ++ ++ CPU_STATE_pmu_state(pcpu) = NULL; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ++ * @fn perfver4_Read_LBRs(buffer) ++ * ++ * @param IN buffer - pointer to the buffer to write the data into ++ * @return Last branch source IP address ++ * ++ * @brief Read all the LBR registers into the buffer provided and return ++ * ++ */ ++static U64 perfver4_Read_LBRs(VOID *buffer, PVOID data) ++{ ++ U32 i, count = 0; ++ U64 *lbr_buf = NULL; ++ U64 value = 0; ++ U64 tos_ip_addr = 0; ++ U64 tos_ptr = 0; ++ SADDR saddr; ++ U32 pairs = 0; ++ U32 this_cpu; ++ U32 dev_idx; ++ LBR lbr; ++ DEV_CONFIG pcfg; ++#if defined(DRV_SEP_ACRN_ON) ++ struct lbr_pmu_sample *lbr_data = NULL; ++#endif ++ ++ SEP_DRV_LOG_TRACE_IN("Buffer: %p.", buffer); ++ ++ preempt_disable(); ++ this_cpu = CONTROL_THIS_CPU(); ++ preempt_enable(); ++ dev_idx = core_to_dev_map[this_cpu]; ++ pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ lbr = LWPMU_DEVICE_lbr(&devices[dev_idx]); ++ ++ if (lbr == NULL) { ++ return 0; ++ } ++ ++#if defined(DRV_SEP_ACRN_ON) ++ if (data == NULL) { ++ return 0; ++ } ++ lbr_data = (struct lbr_pmu_sample *)data; ++#endif ++ ++ if (buffer && DEV_CONFIG_store_lbrs(pcfg)) { ++ lbr_buf = (U64 *)buffer; ++ } ++ ++ if (LBR_num_entries(lbr) > 0) { ++ pairs = (LBR_num_entries(lbr) - 1) / 3; ++ } ++ for (i = 0; i < LBR_num_entries(lbr); i++) { ++#if !defined(DRV_SEP_ACRN_ON) ++ value = SYS_Read_MSR(LBR_entries_reg_id(lbr, i)); ++#else ++ if (i == 0) { ++ value = lbr_data->lbr_tos; ++ } else { ++ if (LBR_entries_etype(lbr, i) == LBR_ENTRY_FROM_IP) { ++ value = lbr_data->lbr_from_ip[i - 1]; ++ } else if (LBR_entries_etype(lbr, i) == ++ LBR_ENTRY_TO_IP) { ++ value = lbr_data->lbr_to_ip[i - pairs - 1]; ++ } else { ++ value = lbr_data->lbr_info[i - 2 * pairs - 1]; ++ } ++ } ++#endif ++ if (buffer && DEV_CONFIG_store_lbrs(pcfg)) { ++ *lbr_buf = value; ++ } ++ if (DEV_CONFIG_collect_callstacks(pcfg)) { ++ if ((LBR_entries_etype(lbr, i) == LBR_ENTRY_FROM_IP && ++ i > tos_ptr + 1) || ++ (LBR_entries_etype(lbr, i) == LBR_ENTRY_TO_IP && ++ i > tos_ptr + pairs + 1) || ++ (LBR_entries_etype(lbr, i) == LBR_ENTRY_INFO && ++ i > tos_ptr + 2 * pairs + 1)) { ++ if (buffer && DEV_CONFIG_store_lbrs(pcfg)) { ++ *lbr_buf = 0x0ULL; ++ lbr_buf++; ++ } ++ continue; ++ } ++ } ++ SEP_DRV_LOG_TRACE("LBR %u, 0x%llx.", i, value); ++ if (i == 0) { ++ tos_ptr = value; ++ } else { ++ if (LBR_entries_etype(lbr, i) == ++ LBR_ENTRY_FROM_IP) { // LBR from register ++ if (tos_ptr == count) { ++ SADDR_addr(saddr) = ++ value & PERFVER4_LBR_BITMASK; ++ tos_ip_addr = (U64)SADDR_addr( ++ saddr); // Add signed extension ++ SEP_DRV_LOG_TRACE( ++ "Tos_ip_addr %llu, 0x%llx.", ++ tos_ptr, value); ++ } ++ count++; ++ } ++ } ++ if (buffer && DEV_CONFIG_store_lbrs(pcfg)) { ++ lbr_buf++; ++ } ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT("Res: %llu.", tos_ip_addr); ++ return tos_ip_addr; ++} ++ ++/* ++ * @fn perfver4_Clean_Up(param) ++ * ++ * @param IN param - currently not used ++ * ++ * @brief Clean up registers in ECB ++ * ++ */ ++static VOID perfver4_Clean_Up(VOID *param) ++{ ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ ECB pecb = NULL; ++ U32 dev_idx; ++ U32 cur_grp; ++ ++ SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ dev_idx = core_to_dev_map[this_cpu]; ++ cur_grp = CPU_STATE_current_group(pcpu); ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; ++ ++ if (!pecb) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); ++ return; ++ } ++ ++ FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_ALL_REG) ++ { ++ if (ECB_entries_clean_up_get(pecb, i)) { ++ SEP_DRV_LOG_TRACE("Clean up set --- RegId --- %x.", ++ ECB_entries_reg_id(pecb, i)); ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); ++ } ++ } ++ END_FOR_EACH_REG_CORE_OPERATION; ++ ++ /* Clear outstanding frozen bits */ ++ if (pecb) { ++ SYS_Write_MSR(ECB_entries_reg_id( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, GLOBAL_OVF_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ PERFVER4_FROZEN_BIT_MASK); ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++ return; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn void perfver4_Check_Overflow_Htoff_Mode(masks) ++ * ++ * @param masks the mask structure to populate ++ * ++ * @return None No return needed ++ * ++ * @brief Called by the data processing method to figure out which registers have overflowed. ++ * ++ */ ++static void perfver4_Check_Overflow_Htoff_Mode(DRV_MASKS masks) ++{ ++ U32 index; ++ U64 value = 0; ++ U64 overflow_status = 0; ++ U32 this_cpu; ++ BUFFER_DESC bd; ++ CPU_STATE pcpu; ++ ECB pecb; ++ U32 dev_idx; ++ U32 cur_grp; ++ DISPATCH dispatch; ++ DEV_CONFIG pcfg; ++ U64 overflow_status_clr = 0; ++ DRV_EVENT_MASK_NODE event_flag; ++ ++ SEP_DRV_LOG_TRACE_IN("Masks: %p.", masks); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ bd = &cpu_buf[this_cpu]; ++ pcpu = &pcb[this_cpu]; ++ dev_idx = core_to_dev_map[this_cpu]; ++ cur_grp = CPU_STATE_current_group(pcpu); ++ dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); ++ pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; ++ ++ if (!pecb) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); ++ return; ++ } ++ ++ // initialize masks ++ DRV_MASKS_masks_num(masks) = 0; ++ ++ overflow_status = SYS_Read_MSR(ECB_entries_reg_id( ++ pecb, ECB_SECTION_REG_INDEX(pecb, GLOBAL_STATUS_REG_INDEX, ++ PMU_OPERATION_GLOBAL_STATUS))); ++ ++ if (DEV_CONFIG_pebs_mode(pcfg) && ++ (DEV_CONFIG_pebs_record_num(pcfg) == 1)) { ++ overflow_status = PEBS_Overflowed(this_cpu, overflow_status, 0); ++ } ++ overflow_status_clr = overflow_status; ++ SEP_DRV_LOG_TRACE("Overflow: cpu: %d, status 0x%llx.", this_cpu, ++ overflow_status); ++ index = 0; ++ BUFFER_DESC_sample_count(bd) = 0; ++ ++ if (dispatch->check_overflow_gp_errata) { ++ overflow_status = dispatch->check_overflow_gp_errata( ++ pecb, &overflow_status_clr); ++ } ++ ++ FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_ALL) ++ { ++ if (ECB_entries_fixed_reg_get(pecb, i)) { ++ index = i - ++ ECB_operations_register_start( ++ pecb, PMU_OPERATION_DATA_FIXED) + ++ 0x20; ++ } else if (ECB_entries_is_gp_reg_get(pecb, i) && ++ ECB_entries_reg_value(pecb, i) != 0) { ++ index = i - ECB_operations_register_start( ++ pecb, PMU_OPERATION_DATA_GP); ++ if (index >= 4 && index <= 7) { ++ value = SYS_Read_MSR( ++ ECB_entries_reg_id(pecb, i)); ++ if (value > 0 && value <= 0x100000000LL) { ++ overflow_status |= ((U64)1 << index); ++ } ++ } ++ } else { ++ continue; ++ } ++ if (overflow_status & ((U64)1 << index)) { ++ SEP_DRV_LOG_TRACE("Overflow: cpu: %d, index %d.", ++ this_cpu, index); ++ SEP_DRV_LOG_TRACE( ++ "Register 0x%x --- val 0%llx.", ++ ECB_entries_reg_id(pecb, i), ++ SYS_Read_MSR(ECB_entries_reg_id(pecb, i))); ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, i), ++ ECB_entries_reg_value(pecb, i)); ++ ++ if (DRV_CONFIG_enable_cp_mode(drv_cfg)) { ++ /* Increment the interrupt count. */ ++ if (interrupt_counts) { ++ interrupt_counts ++ [this_cpu * ++ DRV_CONFIG_num_events( ++ drv_cfg) + ++ ECB_entries_event_id_index( ++ pecb, i)] += 1; ++ } ++ } ++ ++ DRV_EVENT_MASK_bitFields1(&event_flag) = (U8)0; ++ if (ECB_entries_precise_get(pecb, i)) { ++ DRV_EVENT_MASK_precise(&event_flag) = 1; ++ } ++ if (ECB_entries_lbr_value_get(pecb, i)) { ++ DRV_EVENT_MASK_lbr_capture(&event_flag) = 1; ++ } ++ ++ if (DRV_MASKS_masks_num(masks) < MAX_OVERFLOW_EVENTS) { ++ DRV_EVENT_MASK_bitFields1( ++ DRV_MASKS_eventmasks(masks) + ++ DRV_MASKS_masks_num(masks)) = ++ DRV_EVENT_MASK_bitFields1(&event_flag); ++ DRV_EVENT_MASK_event_idx( ++ DRV_MASKS_eventmasks(masks) + ++ DRV_MASKS_masks_num(masks)) = ++ ECB_entries_event_id_index(pecb, i); ++ DRV_MASKS_masks_num(masks)++; ++ } else { ++ SEP_DRV_LOG_ERROR( ++ "The array for event masks is full."); ++ } ++ ++ SEP_DRV_LOG_TRACE("Overflow -- 0x%llx, index 0x%llx.", ++ overflow_status, (U64)1 << index); ++ SEP_DRV_LOG_TRACE("Slot# %d, reg_id 0x%x, index %d.", i, ++ ECB_entries_reg_id(pecb, i), index); ++ if (ECB_entries_event_id_index(pecb, i) == ++ CPU_STATE_trigger_event_num(pcpu)) { ++ CPU_STATE_trigger_count(pcpu)--; ++ } ++ } ++ } ++ END_FOR_EACH_REG_CORE_OPERATION; ++ ++ CPU_STATE_reset_mask(pcpu) = overflow_status_clr; ++ /* Clear outstanding overflow bits */ ++ SYS_Write_MSR(ECB_entries_reg_id( ++ pecb, ECB_SECTION_REG_INDEX( ++ pecb, GLOBAL_OVF_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ overflow_status_clr & PERFVER4_OVERFLOW_BIT_MASK_HT_OFF); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++#define MAX_COUNTER 0xFFFFFFFFFFFFLLU ++#define FIXED_CTR3_BIT_INDEX 35 ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn void perfver4_Check_Overflow_Nonht_Mode(masks) ++ * ++ * @param masks the mask structure to populate ++ * ++ * @return None No return needed ++ * ++ * @brief Called by the data processing method to figure out which registers have overflowed. ++ * ++ */ ++static VOID perfver4_Check_Overflow_Nonht_Mode(DRV_MASKS masks) ++{ ++ U32 index; ++ U64 overflow_status = 0; ++ U32 this_cpu = CONTROL_THIS_CPU(); ++ BUFFER_DESC bd = &cpu_buf[this_cpu]; ++ CPU_STATE pcpu = &pcb[this_cpu]; ++ U32 dev_idx = core_to_dev_map[this_cpu]; ++ U32 cur_grp = CPU_STATE_current_group(pcpu); ++ DEV_CONFIG pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ ECB pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; ++ U64 overflow_status_clr = 0; ++ DRV_EVENT_MASK_NODE event_flag; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ if (!pecb) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); ++ return; ++ } ++ ++ // initialize masks ++ DRV_MASKS_masks_num(masks) = 0; ++ ++ overflow_status = SYS_Read_MSR(ECB_entries_reg_id( ++ pecb, ECB_SECTION_REG_INDEX(pecb, GLOBAL_STATUS_REG_INDEX, ++ PMU_OPERATION_GLOBAL_STATUS))); ++ ++ if (DEV_CONFIG_pebs_mode(pcfg) && ++ (DEV_CONFIG_pebs_record_num(pcfg) == 1)) { ++ overflow_status = PEBS_Overflowed(this_cpu, overflow_status, 0); ++ } ++ overflow_status_clr = overflow_status; ++ SEP_DRV_LOG_TRACE("Overflow: cpu: %d, status 0x%llx.", this_cpu, ++ overflow_status); ++ index = 0; ++ BUFFER_DESC_sample_count(bd) = 0; ++ ++ FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_ALL) ++ { ++ if (ECB_entries_fixed_reg_get(pecb, i)) { ++ index = i - ++ ECB_operations_register_start( ++ pecb, PMU_OPERATION_DATA_FIXED) + ++ 0x20; ++ } else if (ECB_entries_is_gp_reg_get(pecb, i) && ++ ECB_entries_reg_value(pecb, i) != 0) { ++ index = i - ECB_operations_register_start( ++ pecb, PMU_OPERATION_DATA_GP); ++ } else { ++ continue; ++ } ++ if (overflow_status & ((U64)1 << index)) { ++ SEP_DRV_LOG_TRACE("Overflow: cpu: %d, index %d.", ++ this_cpu, index); ++ SEP_DRV_LOG_TRACE( ++ "register 0x%x --- val 0%llx.", ++ ECB_entries_reg_id(pecb, i), ++ SYS_Read_MSR(ECB_entries_reg_id(pecb, i))); ++ ++ DRV_EVENT_MASK_bitFields1(&event_flag) = (U8)0; ++ if (DEV_CONFIG_enable_perf_metrics(pcfg) && ++ index == FIXED_CTR3_BIT_INDEX) { ++ perf_metrics_counter_reload_value = ++ ECB_entries_reg_value( ++ pecb, i); // saving reload value ++ // Writing positive SAV into data register before reading metrics ++ SYS_Write_MSR( ++ ECB_entries_reg_id(pecb, i), ++ ((~(ECB_entries_reg_value(pecb, i)) + ++ 1) & ++ MAX_COUNTER)); ++ DRV_EVENT_MASK_perf_metrics_capture( ++ &event_flag) = 1; ++ } else { ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, i), ++ ECB_entries_reg_value(pecb, i)); ++ } ++ if (DRV_CONFIG_enable_cp_mode(drv_cfg)) { ++ /* Increment the interrupt count. */ ++ if (interrupt_counts) { ++ interrupt_counts ++ [this_cpu * ++ DRV_CONFIG_num_events( ++ drv_cfg) + ++ ECB_entries_event_id_index( ++ pecb, i)] += 1; ++ } ++ } ++ ++ if (ECB_entries_precise_get(pecb, i)) { ++ DRV_EVENT_MASK_precise(&event_flag) = 1; ++ } ++ if (ECB_entries_lbr_value_get(pecb, i)) { ++ DRV_EVENT_MASK_lbr_capture(&event_flag) = 1; ++ } ++ if (ECB_entries_uncore_get(pecb, i)) { ++ DRV_EVENT_MASK_uncore_capture(&event_flag) = 1; ++ } ++ if (ECB_entries_branch_evt_get(pecb, i)) { ++ DRV_EVENT_MASK_branch(&event_flag) = 1; ++ } ++ ++ if (DRV_MASKS_masks_num(masks) < MAX_OVERFLOW_EVENTS) { ++ DRV_EVENT_MASK_bitFields1( ++ DRV_MASKS_eventmasks(masks) + ++ DRV_MASKS_masks_num(masks)) = ++ DRV_EVENT_MASK_bitFields1(&event_flag); ++ DRV_EVENT_MASK_event_idx( ++ DRV_MASKS_eventmasks(masks) + ++ DRV_MASKS_masks_num(masks)) = ++ ECB_entries_event_id_index(pecb, i); ++ DRV_MASKS_masks_num(masks)++; ++ } else { ++ SEP_DRV_LOG_ERROR( ++ "The array for event masks is full."); ++ } ++ ++ SEP_DRV_LOG_TRACE("Overflow -- 0x%llx, index 0x%llx.", ++ overflow_status, (U64)1 << index); ++ SEP_DRV_LOG_TRACE("Slot# %d, reg_id 0x%x, index %d.", i, ++ ECB_entries_reg_id(pecb, i), index); ++ if (ECB_entries_event_id_index(pecb, i) == ++ CPU_STATE_trigger_event_num(pcpu)) { ++ CPU_STATE_trigger_count(pcpu)--; ++ } ++ } ++ } ++ END_FOR_EACH_REG_CORE_OPERATION; ++ ++ CPU_STATE_reset_mask(pcpu) = overflow_status_clr; ++ /* Clear outstanding overflow bits */ ++ SYS_Write_MSR(ECB_entries_reg_id( ++ pecb, ECB_SECTION_REG_INDEX( ++ pecb, GLOBAL_OVF_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ overflow_status_clr & PERFVER4_OVERFLOW_BIT_MASK_NON_HT); ++ ++ SEP_DRV_LOG_TRACE("Check Overflow completed %d.", this_cpu); ++} ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn void perfver4_Read_Power(buffer) ++ * ++ * @param buffer - pointer to the buffer to write the data into ++ * ++ * @return None No return needed ++ * ++ * @brief Read all the power MSRs into the buffer provided and return. ++ * ++ */ ++static VOID perfver4_Read_Power(VOID *buffer) ++{ ++ U32 i; ++ U64 *pwr_buf = (U64 *)buffer; ++ U32 this_cpu; ++ U32 dev_idx; ++ PWR pwr; ++ ++ SEP_DRV_LOG_TRACE_IN("Buffer: %p.", buffer); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ dev_idx = core_to_dev_map[this_cpu]; ++ pwr = LWPMU_DEVICE_pwr(&devices[dev_idx]); ++ ++ for (i = 0; i < PWR_num_entries(pwr); i++) { ++ *pwr_buf = SYS_Read_MSR(PWR_entries_reg_id(pwr, i)); ++ pwr_buf++; ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn perfver4_Read_Counts(param, id) ++ * ++ * @param param The read thread node to process ++ * @param id The event id for the which the sample is generated ++ * ++ * @return None No return needed ++ * ++ * @brief Read CPU event based counts data and store into the buffer param; ++ * For the case of the trigger event, store the SAV value. ++ */ ++static VOID perfver4_Read_Counts(PVOID param, U32 id) ++{ ++ U64 *data; ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ U32 dev_idx; ++ DEV_CONFIG pcfg; ++ U32 event_id = 0; ++ ++ SEP_DRV_LOG_TRACE_IN("Param: %p, id: %u.", param, id); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ dev_idx = core_to_dev_map[this_cpu]; ++ pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ ++ if (DEV_CONFIG_ebc_group_id_offset(pcfg)) { ++ // Write GroupID ++ data = (U64 *)((S8 *)param + ++ DEV_CONFIG_ebc_group_id_offset(pcfg)); ++ *data = CPU_STATE_current_group(pcpu) + 1; ++ } ++ ++ FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_ALL) ++ { ++ if (ECB_entries_counter_event_offset(pecb, i) == 0) { ++ continue; ++ } ++ data = (U64 *)((S8 *)param + ++ ECB_entries_counter_event_offset(pecb, i)); ++ event_id = ECB_entries_event_id_index(pecb, i); ++ if (event_id == id) { ++ *data = ~(ECB_entries_reg_value(pecb, i) - 1) & ++ ECB_entries_max_bits(pecb, i); ++ ; ++ } else { ++ *data = SYS_Read_MSR(ECB_entries_reg_id(pecb, i)); ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); ++ } ++ } ++ END_FOR_EACH_REG_CORE_OPERATION; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn perfver4_Read_Metrics(buffer, id) ++ * ++ * @param param buffer to write metrics into ++ * ++ * @return None No return needed ++ * ++ * @brief Read hardware metrics from IA32_PERF_METRICS MSR ++ */ ++static VOID perfver4_Read_Metrics(PVOID buffer) ++{ ++ U64 *data, metrics = 0; ++ U32 j; ++ U32 this_cpu = CONTROL_THIS_CPU(); ++ U32 dev_idx = core_to_dev_map[this_cpu]; ++ DEV_CONFIG pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ ++ data = (U64 *)buffer; ++ FOR_EACH_NONEVENT_REG(pecb, i) ++ { ++ metrics = SYS_Read_MSR(ECB_entries_reg_id(pecb, i)); ++ for (j = 0; j < DEV_CONFIG_num_perf_metrics(pcfg); j++) { ++ *data = (metrics & (0xFFULL << 8 * j)) >> 8 * j; ++ data++; ++ } ++ } ++ END_FOR_EACH_NONEVENT_REG; ++ ++ if (DRV_CONFIG_emon_mode(drv_cfg)) { ++ SYS_Write_MSR(IA32_FIXED_CTR3, 0LL); ++ } else { ++ SYS_Write_MSR(IA32_FIXED_CTR3, ++ perf_metrics_counter_reload_value); ++ perf_metrics_counter_reload_value = 0; ++ } ++ ++ SYS_Write_MSR(IA32_PERF_METRICS, 0LL); ++} ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn U64 perfver4_Platform_Info ++ * ++ * @brief Reads the MSR_PLATFORM_INFO register if present ++ * ++ * @param void ++ * ++ * @return value read from the register ++ * ++ * Special Notes: ++ * ++ */ ++static VOID perfver4_Platform_Info(PVOID data) ++{ ++ DRV_PLATFORM_INFO platform_data = (DRV_PLATFORM_INFO)data; ++ U64 value = 0; ++ U64 energy_multiplier; ++ ++ SEP_DRV_LOG_TRACE_IN("Data: %p.", data); ++ ++ if (!platform_data) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!platform_data)."); ++ return; ++ } ++ ++#define IA32_MSR_PLATFORM_INFO 0xCE ++ value = SYS_Read_MSR(IA32_MSR_PLATFORM_INFO); ++ ++ DRV_PLATFORM_INFO_info(platform_data) = value; ++ DRV_PLATFORM_INFO_ddr_freq_index(platform_data) = 0; ++ ++#define IA32_MSR_MISC_ENABLE 0x1A4 ++ DRV_PLATFORM_INFO_misc_valid(platform_data) = 1; ++ value = SYS_Read_MSR(IA32_MSR_MISC_ENABLE); ++ DRV_PLATFORM_INFO_misc_info(platform_data) = value; ++#undef IA32_MSR_MISC_ENABLE ++ ++ energy_multiplier = SYS_Read_MSR(MSR_ENERGY_MULTIPLIER); ++ SEP_DRV_LOG_TRACE("MSR_ENERGY_MULTIPLIER: %llx.", energy_multiplier); ++ DRV_PLATFORM_INFO_energy_multiplier(platform_data) = ++ (U32)(energy_multiplier & 0x00001F00) >> 8; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ++ * Initialize the dispatch table ++ */ ++DISPATCH_NODE perfver4_dispatch = { .init = perfver4_Initialize, ++ .fini = perfver4_Destroy, ++ .write = perfver4_Write_PMU, ++ .freeze = perfver4_Disable_PMU, ++ .restart = perfver4_Enable_PMU, ++ .read_data = perfver4_Read_PMU_Data, ++ .check_overflow = perfver4_Check_Overflow, ++ .swap_group = perfver4_Swap_Group, ++ .read_lbrs = perfver4_Read_LBRs, ++ .cleanup = perfver4_Clean_Up, ++ .hw_errata = NULL, ++ .read_power = perfver4_Read_Power, ++ .check_overflow_errata = NULL, ++ .read_counts = perfver4_Read_Counts, ++ .check_overflow_gp_errata = NULL, ++ .read_ro = NULL, ++ .platform_info = perfver4_Platform_Info, ++ .trigger_read = NULL, ++ .scan_for_uncore = NULL, ++ .read_metrics = NULL }; ++ ++DISPATCH_NODE perfver4_dispatch_htoff_mode = { ++ .init = perfver4_Initialize, ++ .fini = perfver4_Destroy, ++ .write = perfver4_Write_PMU, ++ .freeze = perfver4_Disable_PMU, ++ .restart = perfver4_Enable_PMU, ++ .read_data = perfver4_Read_PMU_Data, ++ .check_overflow = perfver4_Check_Overflow_Htoff_Mode, ++ .swap_group = perfver4_Swap_Group, ++ .read_lbrs = perfver4_Read_LBRs, ++ .cleanup = perfver4_Clean_Up, ++ .hw_errata = NULL, ++ .read_power = perfver4_Read_Power, ++ .check_overflow_errata = NULL, ++ .read_counts = perfver4_Read_Counts, ++ .check_overflow_gp_errata = NULL, ++ .read_ro = NULL, ++ .platform_info = perfver4_Platform_Info, ++ .trigger_read = NULL, ++ .scan_for_uncore = NULL, ++ .read_metrics = NULL ++}; ++ ++DISPATCH_NODE perfver4_dispatch_nonht_mode = { ++ .init = perfver4_Initialize, ++ .fini = perfver4_Destroy, ++ .write = perfver4_Write_PMU, ++ .freeze = perfver4_Disable_PMU, ++ .restart = perfver4_Enable_PMU, ++ .read_data = perfver4_Read_PMU_Data, ++ .check_overflow = perfver4_Check_Overflow_Nonht_Mode, ++ .swap_group = perfver4_Swap_Group, ++ .read_lbrs = perfver4_Read_LBRs, ++ .cleanup = perfver4_Clean_Up, ++ .hw_errata = NULL, ++ .read_power = perfver4_Read_Power, ++ .check_overflow_errata = NULL, ++ .read_counts = perfver4_Read_Counts, ++ .check_overflow_gp_errata = NULL, ++ .read_ro = NULL, ++ .platform_info = perfver4_Platform_Info, ++ .trigger_read = NULL, ++ .scan_for_uncore = NULL, ++ .read_metrics = perfver4_Read_Metrics ++}; +diff --git a/drivers/platform/x86/sepdk/sep/pmi.c b/drivers/platform/x86/sepdk/sep/pmi.c +new file mode 100755 +index 000000000000..44f335dbc885 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/sep/pmi.c +@@ -0,0 +1,640 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#include "lwpmudrv_defines.h" ++#include ++#include ++#include ++#if defined(DRV_EM64T) ++#include ++#endif ++#include ++#include ++ ++#include "lwpmudrv_types.h" ++#include "lwpmudrv_ecb.h" ++#include "lwpmudrv_struct.h" ++#include "apic.h" ++#include "lwpmudrv.h" ++#include "output.h" ++#include "control.h" ++#include "pmi.h" ++#include "utility.h" ++#include "pebs.h" ++#include "ecb_iterators.h" ++#include "msrdefs.h" ++ ++#if defined(BUILD_CHIPSET) ++#include "lwpmudrv_chipset.h" ++#endif ++#include "sepdrv_p_state.h" ++ ++// Desc id #0 is used for module records ++#define COMPUTE_DESC_ID(index) ((index)) ++ ++extern DRV_CONFIG drv_cfg; ++extern uid_t uid; ++extern DRV_SETUP_INFO_NODE req_drv_setup_info; ++#define EFLAGS_V86_MASK 0x00020000L ++ ++/********************************************************************* ++ * Global Variables / State ++ *********************************************************************/ ++ ++/********************************************************************* ++ * Interrupt Handler ++ *********************************************************************/ ++ ++/* ++ * PMI_Interrupt_Handler ++ * Arguments ++ * IntFrame - Pointer to the Interrupt Frame ++ * ++ * Returns ++ * None ++ * ++ * Description ++ * Grab the data that is needed to populate the sample records ++ */ ++#if defined(DRV_EM64T) ++#define IS_LDT_BIT 0x4 ++#define SEGMENT_SHIFT 3 ++IDTGDT_DESC gdt_desc; ++ ++U32 pmi_Get_CSD(U32 seg, U32 *low, U32 *high) ++{ ++ PVOID gdt_max_addr; ++ struct desc_struct *gdt; ++ CodeDescriptor *csd; ++ ++ SEP_DRV_LOG_TRACE_IN("Seg: %u, low: %p, high: %p.", seg, low, high); ++ ++ gdt_max_addr = ++ (PVOID)(((U64)gdt_desc.idtgdt_base) + gdt_desc.idtgdt_limit); ++ gdt = gdt_desc.idtgdt_base; ++ ++ if (seg & IS_LDT_BIT) { ++ *low = 0; ++ *high = 0; ++ SEP_DRV_LOG_TRACE_OUT("FALSE [%u, %u] (IS_LDT_BIT).", *low, ++ *high); ++ return FALSE; ++ } ++ ++ // segment offset is based on dropping the bottom 3 bits... ++ csd = (CodeDescriptor *)&(gdt[seg >> SEGMENT_SHIFT]); ++ ++ if (((PVOID)csd) >= gdt_max_addr) { ++ SEP_DRV_LOG_WARNING_TRACE_OUT( ++ "FALSE (segment too big in get_CSD(0x%x)!).", seg); ++ return FALSE; ++ } ++ ++ *low = csd->u1.lowWord; ++ *high = csd->u2.highWord; ++ ++ SEP_DRV_LOG_TRACE("Seg 0x%x, low %08x, high %08x, reserved_0: %d.", seg, ++ *low, *high, csd->u2.s2.reserved_0); ++ SEP_DRV_LOG_TRACE_OUT("TRUE [%u, %u].", *low, *high); ++ ++ return TRUE; ++} ++#endif ++ ++asmlinkage VOID PMI_Interrupt_Handler(struct pt_regs *regs) ++{ ++ SampleRecordPC *psamp; ++ CPU_STATE pcpu; ++ BUFFER_DESC bd; ++#if defined(DRV_IA32) ++ U32 csdlo; // low half code seg descriptor ++ U32 csdhi; // high half code seg descriptor ++ U32 seg_cs; // code seg selector ++#endif ++ DRV_MASKS_NODE event_mask; ++ U32 this_cpu; ++ U32 dev_idx; ++ DISPATCH dispatch; ++ DEV_CONFIG pcfg; ++ U32 i; ++ U32 is_64bit_addr = FALSE; ++ U32 pid; ++ U32 tid; ++ U64 tsc; ++ U32 desc_id; ++ EVENT_DESC evt_desc; ++ U32 accept_interrupt = 1; ++#if defined(SECURE_SEP) ++ uid_t l_uid; ++#endif ++ U64 lbr_tos_from_ip = 0; ++ DRV_BOOL multi_pebs_enabled; ++ ++ SEP_DRV_LOG_INTERRUPT_IN( ++ "PID: %d, TID: %d.", current->pid, ++ GET_CURRENT_TGID()); // needs to be before function calls for the tracing to make sense ++ // may later want to separate the INTERRUPT_IN from the PID/TID logging ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ bd = &cpu_buf[this_cpu]; ++ dev_idx = core_to_dev_map[this_cpu]; ++ dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); ++ pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ multi_pebs_enabled = ++ (DEV_CONFIG_pebs_mode(pcfg) && ++ (DEV_CONFIG_pebs_record_num(pcfg) > 1) && ++ (DRV_SETUP_INFO_page_table_isolation(&req_drv_setup_info) == ++ DRV_SETUP_INFO_PTI_DISABLED)); ++ SYS_Locked_Inc(&CPU_STATE_in_interrupt( ++ pcpu)); // needs to be before dispatch->freeze to ensure printk is never called from an interrupt ++ ++ // Disable the counter control ++ dispatch->freeze(NULL); ++ ++ CPU_STATE_nmi_handled(&pcb[this_cpu])++; ++ ++#if defined(SECURE_SEP) ++ l_uid = DRV_GET_UID(current); ++ accept_interrupt = (l_uid == uid); ++#endif ++ dispatch->check_overflow(&event_mask); ++ if (GET_DRIVER_STATE() != DRV_STATE_RUNNING || ++ CPU_STATE_accept_interrupt(&pcb[this_cpu]) != 1) { ++ goto pmi_cleanup; ++ } ++ ++ pid = GET_CURRENT_TGID(); ++ tid = current->pid; ++ ++ if (DRV_CONFIG_target_pid(drv_cfg) > 0 && ++ pid != DRV_CONFIG_target_pid(drv_cfg)) { ++ accept_interrupt = 0; ++ } ++ ++ if (accept_interrupt == 0) { ++ goto pmi_cleanup; ++ } ++ UTILITY_Read_TSC(&tsc); ++ if (multi_pebs_enabled && PEBS_Get_Num_Records_Filled() > 0) { ++ PEBS_Flush_Buffer(NULL); ++ } ++ ++ SEP_DRV_LOG_TRACE("Nb overflowed events: %d.", event_mask.masks_num); ++ for (i = 0; i < event_mask.masks_num; i++) { ++ if (multi_pebs_enabled && ++ (DRV_EVENT_MASK_precise(&event_mask.eventmasks[i]))) { ++ continue; ++ } ++ if (DRV_CONFIG_event_based_counts(drv_cfg) == 0) { ++ desc_id = COMPUTE_DESC_ID(DRV_EVENT_MASK_event_idx( ++ &event_mask.eventmasks[i])); ++ } else { ++ desc_id = CPU_STATE_current_group(pcpu); ++ } ++ evt_desc = desc_data[desc_id]; ++ psamp = (SampleRecordPC *)OUTPUT_Reserve_Buffer_Space( ++ bd, EVENT_DESC_sample_size(evt_desc), ++ (NMI_mode) ? TRUE : FALSE, !SEP_IN_NOTIFICATION, ++ (S32)this_cpu); ++ ++ if (!psamp) { ++ continue; ++ } ++ lbr_tos_from_ip = 0; ++ CPU_STATE_num_samples(pcpu) += 1; ++ SAMPLE_RECORD_descriptor_id(psamp) = desc_id; ++ SAMPLE_RECORD_tsc(psamp) = tsc; ++ SAMPLE_RECORD_pid_rec_index_raw(psamp) = 1; ++ SAMPLE_RECORD_pid_rec_index(psamp) = pid; ++ SAMPLE_RECORD_tid(psamp) = tid; ++ SAMPLE_RECORD_cpu_num(psamp) = (U16)this_cpu; ++#if defined(DRV_IA32) ++ SAMPLE_RECORD_eip(psamp) = REGS_eip(regs); ++ SAMPLE_RECORD_eflags(psamp) = REGS_eflags(regs); ++ SAMPLE_RECORD_cs(psamp) = (U16)REGS_xcs(regs); ++ ++ if (SAMPLE_RECORD_eflags(psamp) & EFLAGS_V86_MASK) { ++ csdlo = 0; ++ csdhi = 0; ++ } else { ++ seg_cs = SAMPLE_RECORD_cs(psamp); ++ SYS_Get_CSD(seg_cs, &csdlo, &csdhi); ++ } ++ SAMPLE_RECORD_csd(psamp).u1.lowWord = csdlo; ++ SAMPLE_RECORD_csd(psamp).u2.highWord = csdhi; ++#elif defined(DRV_EM64T) ++ SAMPLE_RECORD_cs(psamp) = (U16)REGS_cs(regs); ++ ++ pmi_Get_CSD(SAMPLE_RECORD_cs(psamp), ++ &SAMPLE_RECORD_csd(psamp).u1.lowWord, ++ &SAMPLE_RECORD_csd(psamp).u2.highWord); ++#endif ++ SEP_DRV_LOG_TRACE("SAMPLE_RECORD_pid_rec_index(psamp) %x.", ++ SAMPLE_RECORD_pid_rec_index(psamp)); ++ SEP_DRV_LOG_TRACE("SAMPLE_RECORD_tid(psamp) %x.", ++ SAMPLE_RECORD_tid(psamp)); ++#if defined(DRV_IA32) ++ SEP_DRV_LOG_TRACE("SAMPLE_RECORD_eip(psamp) %x.", ++ SAMPLE_RECORD_eip(psamp)); ++ SEP_DRV_LOG_TRACE("SAMPLE_RECORD_eflags(psamp) %x.", ++ SAMPLE_RECORD_eflags(psamp)); ++#endif ++ SEP_DRV_LOG_TRACE("SAMPLE_RECORD_cpu_num(psamp) %x.", ++ SAMPLE_RECORD_cpu_num(psamp)); ++ SEP_DRV_LOG_TRACE("SAMPLE_RECORD_cs(psamp) %x.", ++ SAMPLE_RECORD_cs(psamp)); ++ SEP_DRV_LOG_TRACE("SAMPLE_RECORD_csd(psamp).lowWord %x.", ++ SAMPLE_RECORD_csd(psamp).u1.lowWord); ++ SEP_DRV_LOG_TRACE("SAMPLE_RECORD_csd(psamp).highWord %x.", ++ SAMPLE_RECORD_csd(psamp).u2.highWord); ++ ++#if defined(DRV_EM64T) ++ is_64bit_addr = ++ (SAMPLE_RECORD_csd(psamp).u2.s2.reserved_0 == 1); ++ if (is_64bit_addr) { ++ SAMPLE_RECORD_iip(psamp) = REGS_rip(regs); ++ SAMPLE_RECORD_ipsr(psamp) = ++ (REGS_eflags(regs) & 0xffffffff) | ++ (((U64)SAMPLE_RECORD_csd(psamp).u2.s2.dpl) ++ << 32); ++ SAMPLE_RECORD_ia64_pc(psamp) = TRUE; ++ } else { ++ SAMPLE_RECORD_eip(psamp) = REGS_rip(regs); ++ SAMPLE_RECORD_eflags(psamp) = REGS_eflags(regs); ++ SAMPLE_RECORD_ia64_pc(psamp) = FALSE; ++ ++ SEP_DRV_LOG_TRACE("SAMPLE_RECORD_eip(psamp) 0x%x.", ++ SAMPLE_RECORD_eip(psamp)); ++ SEP_DRV_LOG_TRACE("SAMPLE_RECORD_eflags(psamp) %x.", ++ SAMPLE_RECORD_eflags(psamp)); ++ } ++#endif ++ ++ SAMPLE_RECORD_event_index(psamp) = ++ DRV_EVENT_MASK_event_idx(&event_mask.eventmasks[i]); ++ if (DEV_CONFIG_pebs_mode(pcfg) && ++ DRV_EVENT_MASK_precise(&event_mask.eventmasks[i])) { ++ if (EVENT_DESC_pebs_offset(evt_desc) || ++ EVENT_DESC_latency_offset_in_sample(evt_desc)) { ++ lbr_tos_from_ip = PEBS_Fill_Buffer((S8 *)psamp, ++ evt_desc, 0); ++ } ++ PEBS_Modify_IP((S8 *)psamp, is_64bit_addr, 0); ++ PEBS_Modify_TSC((S8 *)psamp, 0); ++ } ++ if (DEV_CONFIG_collect_lbrs(pcfg) && ++ DRV_EVENT_MASK_lbr_capture(&event_mask.eventmasks[i]) && ++ !DEV_CONFIG_apebs_collect_lbrs(pcfg)) { ++ lbr_tos_from_ip = dispatch->read_lbrs( ++ !DEV_CONFIG_store_lbrs(pcfg) ? ++ NULL : ++ ((S8 *)(psamp) + ++ EVENT_DESC_lbr_offset(evt_desc)), ++ NULL); ++ } ++ if (DRV_EVENT_MASK_branch(&event_mask.eventmasks[i]) && ++ DEV_CONFIG_precise_ip_lbrs(pcfg) && lbr_tos_from_ip) { ++ if (is_64bit_addr) { ++ SAMPLE_RECORD_iip(psamp) = lbr_tos_from_ip; ++ SEP_DRV_LOG_TRACE( ++ "UPDATED SAMPLE_RECORD_iip(psamp) 0x%llx.", ++ SAMPLE_RECORD_iip(psamp)); ++ } else { ++ SAMPLE_RECORD_eip(psamp) = (U32)lbr_tos_from_ip; ++ SEP_DRV_LOG_TRACE( ++ "UPDATED SAMPLE_RECORD_eip(psamp) 0x%x.", ++ SAMPLE_RECORD_eip(psamp)); ++ } ++ } ++ if (DEV_CONFIG_power_capture(pcfg)) { ++ dispatch->read_power( ++ ((S8 *)(psamp) + ++ EVENT_DESC_power_offset_in_sample(evt_desc))); ++ } ++ ++#if defined(BUILD_CHIPSET) ++ if (DRV_CONFIG_enable_chipset(drv_cfg)) { ++ cs_dispatch->read_counters( ++ ((S8 *)(psamp) + ++ DRV_CONFIG_chipset_offset(drv_cfg))); ++ } ++#endif ++ if (DRV_CONFIG_event_based_counts(drv_cfg)) { ++ dispatch->read_counts( ++ (S8 *)psamp, ++ DRV_EVENT_MASK_event_idx( ++ &event_mask.eventmasks[i])); ++ } ++ if (DEV_CONFIG_enable_perf_metrics(pcfg) && ++ DRV_EVENT_MASK_perf_metrics_capture( ++ &event_mask.eventmasks[i])) { ++ dispatch->read_metrics( ++ (S8 *)(psamp) + ++ EVENT_DESC_perfmetrics_offset(evt_desc)); ++ } ++ if (DRV_CONFIG_enable_p_state(drv_cfg)) { ++ if (DRV_CONFIG_read_pstate_msrs(drv_cfg) && ++ (DRV_CONFIG_p_state_trigger_index(drv_cfg) == -1 || ++ SAMPLE_RECORD_event_index(psamp) == ++ DRV_CONFIG_p_state_trigger_index( ++ drv_cfg))) { ++ SEPDRV_P_STATE_Read( ++ (S8 *)(psamp) + ++ EVENT_DESC_p_state_offset( ++ evt_desc), ++ pcpu); ++ } ++ if (!DRV_CONFIG_event_based_counts(drv_cfg) && ++ CPU_STATE_p_state_counting(pcpu)) { ++ dispatch->read_counts( ++ (S8 *)psamp, ++ DRV_EVENT_MASK_event_idx( ++ &event_mask.eventmasks[i])); ++ } ++ } ++ } ++ ++pmi_cleanup: ++ if (DEV_CONFIG_pebs_mode(pcfg)) { ++ if (!multi_pebs_enabled) { ++ PEBS_Reset_Index(this_cpu); ++ } else { ++ if (cpu_sideband_buf) { ++ OUTPUT outbuf = &BUFFER_DESC_outbuf( ++ &cpu_sideband_buf[this_cpu]); ++ if (OUTPUT_signal_full(outbuf) && ++ !OUTPUT_tasklet_queued(outbuf)) { ++ SEP_DRV_LOG_TRACE( ++ "Interrupt-driven sideband buffer flush tasklet scheduling."); ++ OUTPUT_tasklet_queued(outbuf) = TRUE; ++ tasklet_schedule(&CPU_STATE_nmi_tasklet( ++ &pcb[this_cpu])); ++ } ++ } ++ } ++ } ++ ++ // Reset the data counters ++ if (CPU_STATE_trigger_count(&pcb[this_cpu]) == 0) { ++ dispatch->swap_group(FALSE); ++ } ++ // Re-enable the counter control ++ dispatch->restart(NULL); ++ SYS_Locked_Dec(&CPU_STATE_in_interrupt( ++ &pcb[this_cpu])); // do not use SEP_DRV_LOG_X (where X != INTERRUPT) below this ++ ++ SEP_DRV_LOG_INTERRUPT_OUT(""); ++} ++ ++#if defined(DRV_SEP_ACRN_ON) ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn S32 PMI_Buffer_Handler(PVOID data) ++ * ++ * @param data - Pointer to data ++ * ++ * @return S32 ++ * ++ * @brief Handle the PMI sample data in buffer ++ * ++ * Special Notes ++ */ ++S32 PMI_Buffer_Handler(PVOID data) ++{ ++ SampleRecordPC *psamp; ++ CPU_STATE pcpu; ++ BUFFER_DESC bd; ++ S32 cpu_id, j; ++ U32 desc_id; ++ EVENT_DESC evt_desc; ++ U64 lbr_tos_from_ip = 0; ++ ECB pecb; ++ U32 dev_idx; ++ DISPATCH dispatch; ++ DEV_CONFIG pcfg; ++ ++ struct data_header header; ++ struct pmu_sample psample; ++ S32 data_size, payload_size, expected_payload_size, index; ++ U64 overflow_status = 0; ++ ++ if (!pcb || !cpu_buf || !devices) { ++ return 0; ++ } ++ cpu_id = (S32)(size_t)data; ++ ++ pcpu = &pcb[cpu_id]; ++ bd = &cpu_buf[cpu_id]; ++ dev_idx = core_to_dev_map[cpu_id]; ++ dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); ++ pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ pecb = LWPMU_DEVICE_PMU_register_data( ++ &devices[dev_idx])[CPU_STATE_current_group(pcpu)]; ++ ++ while (1) { ++ if ((GLOBAL_STATE_current_phase(driver_state) == ++ DRV_STATE_PREPARE_STOP) || ++ (GLOBAL_STATE_current_phase(driver_state) == ++ DRV_STATE_TERMINATING) || ++ (GLOBAL_STATE_current_phase(driver_state) == ++ DRV_STATE_STOPPED)) { ++ goto handler_cleanup; ++ } ++ ++ data_size = ++ sbuf_get(samp_buf_per_cpu[cpu_id], (uint8_t *)&header); ++ if (data_size <= 0) { ++ continue; ++ } ++ payload_size = 0; ++ if ((header.data_type == (1 << CORE_PMU_SAMPLING)) || ++ (header.data_type == (1 << LBR_PMU_SAMPLING))) { ++ if (header.data_type == (1 << CORE_PMU_SAMPLING)) { ++ expected_payload_size = CORE_PMU_SAMPLE_SIZE; ++ } else if (header.data_type == ++ (1 << LBR_PMU_SAMPLING)) { ++ expected_payload_size = CORE_PMU_SAMPLE_SIZE + ++ LBR_PMU_SAMPLE_SIZE; ++ } else { ++ expected_payload_size = 0; ++ } ++ for (j = 0; j < (expected_payload_size - 1) / ++ TRACE_ELEMENT_SIZE + ++ 1; ++ j++) { ++ while (1) { ++ data_size = sbuf_get( ++ samp_buf_per_cpu[cpu_id], ++ (uint8_t *)&psample + ++ j * TRACE_ELEMENT_SIZE); ++ if (data_size <= 0) { ++ if ((GLOBAL_STATE_current_phase( ++ driver_state) == ++ DRV_STATE_PREPARE_STOP) || ++ (GLOBAL_STATE_current_phase( ++ driver_state) == ++ DRV_STATE_TERMINATING) || ++ (GLOBAL_STATE_current_phase( ++ driver_state) == ++ DRV_STATE_STOPPED)) { ++ goto handler_cleanup; ++ } ++ } else { ++ break; ++ } ++ } ++ ++ payload_size += data_size; ++ } ++ if (header.payload_size > payload_size) { ++ // Mismatch in payload size in header info ++ SEP_PRINT_ERROR( ++ "Mismatch in data size: header=%llu, payload_size=%d\n", ++ header.payload_size, payload_size); ++ break; ++ } ++ if (header.cpu_id != cpu_id) { ++ // Mismatch in cpu index in header info ++ SEP_PRINT_ERROR( ++ "Mismatch in cpu idx: header=%u, buffer=%d\n", ++ header.cpu_id, cpu_id); ++ break; ++ } ++ ++ // Now, handle the sample data in buffer ++ overflow_status = psample.csample.overflow_status; ++ SEP_PRINT_DEBUG("overflow_status cpu%d, value=0x%llx\n", ++ cpu_id, overflow_status); ++ ++ FOR_EACH_DATA_REG_CPU(pecb, i, cpu_id) ++ { ++ if (ECB_entries_is_gp_reg_get(pecb, i)) { ++ index = ECB_entries_reg_id(pecb, i) - ++ IA32_PMC0; ++ } else if (ECB_entries_fixed_reg_get(pecb, i)) { ++ index = ECB_entries_reg_id(pecb, i) - ++ IA32_FIXED_CTR0 + 0x20; ++ } else { ++ continue; ++ } ++ ++ if (overflow_status & ((U64)1 << index)) { ++ desc_id = COMPUTE_DESC_ID( ++ ECB_entries_event_id_index(pecb, ++ i)); ++ evt_desc = desc_data[desc_id]; ++ SEP_PRINT_DEBUG( ++ "In Interrupt handler: event_id_index=%u, desc_id=%u\n", ++ ECB_entries_event_id_index(pecb, ++ i), ++ desc_id); ++ ++ psamp = (SampleRecordPC *) ++ OUTPUT_Reserve_Buffer_Space( ++ bd, ++ EVENT_DESC_sample_size( ++ evt_desc), ++ TRUE, ++ !SEP_IN_NOTIFICATION, ++ cpu_id); ++ if (!psamp) { ++ SEP_PRINT_DEBUG( ++ "In Interrupt handler: psamp is NULL. No output buffer allocated\n"); ++ continue; ++ } ++ ++ CPU_STATE_num_samples(pcpu) += 1; ++ SAMPLE_RECORD_descriptor_id(psamp) = ++ desc_id; ++ SAMPLE_RECORD_event_index(psamp) = ++ ECB_entries_event_id_index(pecb, ++ i); ++ SAMPLE_RECORD_osid(psamp) = ++ psample.csample.os_id; ++ SAMPLE_RECORD_tsc(psamp) = header.tsc; ++ SAMPLE_RECORD_pid_rec_index_raw(psamp) = ++ 1; ++ SAMPLE_RECORD_pid_rec_index(psamp) = 0; ++ SAMPLE_RECORD_pid_rec_index(psamp) = 0; ++ SAMPLE_RECORD_tid(psamp) = 0; ++ SAMPLE_RECORD_cpu_num(psamp) = ++ (U16)header.cpu_id; ++ SAMPLE_RECORD_cs(psamp) = ++ (U16)psample.csample.cs; ++ ++ SAMPLE_RECORD_iip(psamp) = ++ psample.csample.rip; ++ SAMPLE_RECORD_ipsr(psamp) = ++ (psample.csample.rflags & ++ 0xffffffff) | ++ (((U64)SAMPLE_RECORD_csd(psamp) ++ .u2.s2.dpl) ++ << 32); ++ SAMPLE_RECORD_ia64_pc(psamp) = TRUE; ++ ++ if (DEV_CONFIG_collect_lbrs(pcfg) && ++ ++ !DEV_CONFIG_apebs_collect_lbrs( ++ pcfg) && ++ header.data_type == ++ (1 << LBR_PMU_SAMPLING)) { ++ lbr_tos_from_ip = dispatch->read_lbrs( ++ !DEV_CONFIG_store_lbrs( ++ pcfg) ? ++ NULL : ++ ((S8 *)(psamp) + ++ EVENT_DESC_lbr_offset( ++ evt_desc)), ++ &psample.lsample); ++ } ++ ++ SEP_PRINT_DEBUG( ++ "SAMPLE_RECORD_cpu_num(psamp) %x\n", ++ SAMPLE_RECORD_cpu_num(psamp)); ++ SEP_PRINT_DEBUG( ++ "SAMPLE_RECORD_iip(psamp) %x\n", ++ SAMPLE_RECORD_iip(psamp)); ++ SEP_PRINT_DEBUG( ++ "SAMPLE_RECORD_cs(psamp) %x\n", ++ SAMPLE_RECORD_cs(psamp)); ++ SEP_PRINT_DEBUG( ++ "SAMPLE_RECORD_csd(psamp).lowWord %x\n", ++ SAMPLE_RECORD_csd(psamp) ++ .u1.lowWord); ++ SEP_PRINT_DEBUG( ++ "SAMPLE_RECORD_csd(psamp).highWord %x\n", ++ SAMPLE_RECORD_csd(psamp) ++ .u2.highWord); ++ } ++ } ++ END_FOR_EACH_DATA_REG_CPU; ++ } ++ } ++ ++handler_cleanup: ++ return 0; ++} ++#endif +diff --git a/drivers/platform/x86/sepdk/sep/sepdrv_p_state.c b/drivers/platform/x86/sepdk/sep/sepdrv_p_state.c +new file mode 100755 +index 000000000000..e91b9be4d582 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/sep/sepdrv_p_state.c +@@ -0,0 +1,88 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#include "lwpmudrv_defines.h" ++#include "lwpmudrv_types.h" ++#include "inc/control.h" ++#include "inc/utility.h" ++#include "inc/sepdrv_p_state.h" ++ ++/*! ++ * @fn OS_STATUS SEPDRV_P_STATE_Read ++ * ++ * @brief Reads the APERF and MPERF counters into the buffer provided for the purpose ++ * ++ * @param buffer - buffer to read the counts into ++ * ++ * @param pcpu - pcpu struct that contains the previous APERF/MPERF values ++ * ++ * @return OS_SUCCESS if read succeeded, otherwise error ++ * ++ * @note ++ */ ++OS_STATUS SEPDRV_P_STATE_Read(S8 *buffer, CPU_STATE pcpu) ++{ ++ U64 *samp = (U64 *)buffer; ++ U64 new_APERF = 0; ++ U64 new_MPERF = 0; ++ ++ SEP_DRV_LOG_TRACE_IN("Buffer: %p, pcpu: %p.", buffer, pcpu); ++ ++ if ((samp == NULL) || (pcpu == NULL)) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("OS_INVALID (!samp || !pcpu)."); ++ return OS_INVALID; ++ } ++ ++ new_APERF = SYS_Read_MSR(DRV_APERF_MSR); ++ new_MPERF = SYS_Read_MSR(DRV_MPERF_MSR); ++ ++ if (CPU_STATE_last_p_state_valid(pcpu)) { ++ // there is a previous APERF/MPERF value ++ if ((CPU_STATE_last_aperf(pcpu)) > new_APERF) { ++ // a wrap-around has occurred. ++ samp[1] = CPU_STATE_last_aperf(pcpu) - new_APERF; ++ } else { ++ samp[1] = new_APERF - CPU_STATE_last_aperf(pcpu); ++ } ++ ++ if ((CPU_STATE_last_mperf(pcpu)) > new_MPERF) { ++ // a wrap-around has occurred. ++ samp[0] = CPU_STATE_last_mperf(pcpu) - new_MPERF; ++ } else { ++ samp[0] = new_MPERF - CPU_STATE_last_mperf(pcpu); ++ } ++ } else { ++ // there is no previous valid APERF/MPERF values, thus no delta calculations ++ (CPU_STATE_last_p_state_valid(pcpu)) = TRUE; ++ samp[0] = 0; ++ samp[1] = 0; ++ } ++ ++ CPU_STATE_last_aperf(pcpu) = new_APERF; ++ CPU_STATE_last_mperf(pcpu) = new_MPERF; ++ ++ SEP_DRV_LOG_TRACE_OUT("OS_SUCCESS."); ++ return OS_SUCCESS; ++} +diff --git a/drivers/platform/x86/sepdk/sep/silvermont.c b/drivers/platform/x86/sepdk/sep/silvermont.c +new file mode 100755 +index 000000000000..d69930395923 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/sep/silvermont.c +@@ -0,0 +1,1113 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#include "lwpmudrv_defines.h" ++#include ++#include ++#include ++ ++#include "lwpmudrv_types.h" ++#include "lwpmudrv_ecb.h" ++#include "lwpmudrv_struct.h" ++ ++#include "lwpmudrv.h" ++#include "utility.h" ++#include "control.h" ++#include "output.h" ++#include "silvermont.h" ++#include "ecb_iterators.h" ++#include "pebs.h" ++#include "apic.h" ++ ++extern U64 *read_counter_info; ++extern DRV_CONFIG drv_cfg; ++extern U64 *interrupt_counts; ++extern DRV_SETUP_INFO_NODE req_drv_setup_info; ++extern EMON_BUFFER_DRIVER_HELPER emon_buffer_driver_helper; ++static U32 restore_reg_addr[3]; ++ ++typedef struct SADDR_S { ++ S64 addr : SILVERMONT_LBR_DATA_BITS; ++} SADDR; ++ ++#define SADDR_addr(x) ((x).addr) ++#define ADD_ERRATA_FIX_FOR_FIXED_CTR0 ++#define MSR_ENERGY_MULTIPLIER 0x606 // Energy Multiplier MSR ++ ++#if defined(DRV_IA32) ++#define ENABLE_IA32_PERFEVTSEL0_CTR 0x00400000 ++#define ENABLE_FIXED_CTR0 0x00000003 ++#elif defined(DRV_EM64T) ++#define ENABLE_IA32_PERFEVTSEL0_CTR 0x0000000000400000 ++#define ENABLE_FIXED_CTR0 0x0000000000000003 ++#else ++#error "Unexpected Architecture seen" ++#endif ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn void silvermont_Write_PMU(param) ++ * ++ * @param param dummy parameter which is not used ++ * ++ * @return None No return needed ++ * ++ * @brief Initial set up of the PMU registers ++ * ++ * Special Notes ++ * Initial write of PMU registers. ++ * Walk through the enties and write the value of the register accordingly. ++ * Assumption: For CCCR registers the enable bit is set to value 0. ++ * When current_group = 0, then this is the first time this routine is called, ++ * initialize the locks and set up EM tables. ++ */ ++static VOID silvermont_Write_PMU(VOID *param) ++{ ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ U32 dev_idx; ++ DISPATCH dispatch; ++ EVENT_CONFIG ec; ++ ++ SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ dev_idx = core_to_dev_map[this_cpu]; ++ ec = LWPMU_DEVICE_ec(&devices[dev_idx]); ++ dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); ++ ++ if (CPU_STATE_current_group(pcpu) == 0) { ++ if (EVENT_CONFIG_mode(ec) != EM_DISABLED) { ++ U32 index; ++ U32 st_index; ++ U32 j; ++ ++ /* Save all the initialization values away into an array for Event Multiplexing. */ ++ for (j = 0; j < EVENT_CONFIG_num_groups(ec); j++) { ++ CPU_STATE_current_group(pcpu) = j; ++ st_index = CPU_STATE_current_group(pcpu) * ++ EVENT_CONFIG_max_gp_events(ec); ++ FOR_EACH_REG_CORE_OPERATION( ++ pecb, i, PMU_OPERATION_DATA_GP) ++ { ++ index = st_index + i - ++ ECB_operations_register_start( ++ pecb, ++ PMU_OPERATION_DATA_GP); ++ CPU_STATE_em_tables(pcpu)[index] = ++ ECB_entries_reg_value(pecb, i); ++ } ++ END_FOR_EACH_REG_CORE_OPERATION; ++ } ++ /* Reset the current group to the very first one. */ ++ CPU_STATE_current_group(pcpu) = ++ this_cpu % EVENT_CONFIG_num_groups(ec); ++ } ++ } ++ ++ if (dispatch->hw_errata) { ++ dispatch->hw_errata(); ++ } ++ ++ FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_ALL_REG) ++ { ++ /* ++ * Writing the GLOBAL Control register enables the PMU to start counting. ++ * So write 0 into the register to prevent any counting from starting. ++ */ ++ if (i == ECB_SECTION_REG_INDEX(pecb, GLOBAL_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)) { ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); ++ continue; ++ } ++ /* ++ * PEBS is enabled for this collection session ++ */ ++ if (DRV_SETUP_INFO_pebs_accessible(&req_drv_setup_info) && ++ i == ECB_SECTION_REG_INDEX(pecb, PEBS_ENABLE_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS) && ++ ECB_entries_reg_value(pecb, i)) { ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); ++ continue; ++ } ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, i), ++ ECB_entries_reg_value(pecb, i)); ++#if defined(MYDEBUG) ++ { ++ U64 val = SYS_Read_MSR(ECB_entries_reg_id(pecb, i)); ++ SEP_DRV_LOG_TRACE( ++ "Write reg 0x%x --- value 0x%llx -- read 0x%llx.", ++ ECB_entries_reg_id(pecb, i), ++ ECB_entries_reg_value(pecb, i), val); ++ } ++#endif ++ } ++ END_FOR_EACH_REG_CORE_OPERATION; ++ ++#if defined(ADD_ERRATA_FIX_FOR_FIXED_CTR0) ++ { ++ U64 fixed_ctr0 = SYS_Read_MSR(IA32_FIXED_CTRL); ++ fixed_ctr0 = (fixed_ctr0 & (ENABLE_FIXED_CTR0)); ++ if (fixed_ctr0 != 0x0) { ++ U64 val = SYS_Read_MSR(IA32_PERFEVTSEL0); ++ val |= ENABLE_IA32_PERFEVTSEL0_CTR; ++ SYS_Write_MSR(IA32_PERFEVTSEL0, val); ++ } ++ } ++#endif ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn void silvermont_Disable_PMU(param) ++ * ++ * @param param dummy parameter which is not used ++ * ++ * @return None No return needed ++ * ++ * @brief Zero out the global control register. This automatically disables the PMU counters. ++ * ++ */ ++static VOID silvermont_Disable_PMU(PVOID param) ++{ ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ ECB pecb; ++ U32 dev_idx; ++ U32 cur_grp; ++ DEV_CONFIG pcfg; ++ ++ SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ dev_idx = core_to_dev_map[this_cpu]; ++ cur_grp = CPU_STATE_current_group(pcpu); ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; ++ pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ ++ if (!pecb) { ++ SEP_DRV_LOG_TRACE_OUT( ++ "No programming for this device in this group."); ++ return; ++ } ++ ++ if (GET_DRIVER_STATE() != DRV_STATE_RUNNING) { ++ SYS_Write_MSR(ECB_entries_reg_id( ++ pecb, ECB_SECTION_REG_INDEX( ++ pecb, GLOBAL_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ 0LL); ++ if (DEV_CONFIG_pebs_mode(pcfg)) { ++ SYS_Write_MSR( ++ ECB_entries_reg_id( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, PEBS_ENABLE_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ 0LL); ++ } ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn void silvermont_Enable_PMU(param) ++ * ++ * @param param dummy parameter which is not used ++ * ++ * @return None No return needed ++ * ++ * @brief Set the enable bit for all the Control registers ++ * ++ */ ++static VOID silvermont_Enable_PMU(PVOID param) ++{ ++ /* ++ * Get the value from the event block ++ * 0 == location of the global control reg for this block. ++ * Generalize this location awareness when possible ++ */ ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ ECB pecb; ++ U32 dev_idx; ++ U32 cur_grp; ++ DEV_CONFIG pcfg; ++ ++ SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ dev_idx = core_to_dev_map[this_cpu]; ++ cur_grp = CPU_STATE_current_group(pcpu); ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; ++ pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ ++ if (!pecb) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); ++ return; ++ } ++ ++ if (KVM_guest_mode) { ++ SYS_Write_MSR(ECB_entries_reg_id( ++ pecb, ECB_SECTION_REG_INDEX( ++ pecb, GLOBAL_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ 0LL); ++ } ++ if (GET_DRIVER_STATE() == DRV_STATE_RUNNING) { ++ APIC_Enable_Pmi(); ++ if (CPU_STATE_reset_mask(pcpu)) { ++ SEP_DRV_LOG_TRACE("Overflow reset mask %llx.", ++ CPU_STATE_reset_mask(pcpu)); ++ // Reinitialize the global overflow control register ++ SYS_Write_MSR( ++ ECB_entries_reg_id( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, GLOBAL_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ ECB_entries_reg_value( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, GLOBAL_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS))); ++ SYS_Write_MSR( ++ ECB_entries_reg_id( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, DEBUG_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ ECB_entries_reg_value( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, DEBUG_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS))); ++ CPU_STATE_reset_mask(pcpu) = 0LL; ++ } ++ if (CPU_STATE_group_swap(pcpu)) { ++ CPU_STATE_group_swap(pcpu) = 0; ++ SYS_Write_MSR( ++ ECB_entries_reg_id( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, GLOBAL_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ ECB_entries_reg_value( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, GLOBAL_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS))); ++ if (DEV_CONFIG_pebs_mode(pcfg)) { ++ SYS_Write_MSR( ++ ECB_entries_reg_id( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, ++ PEBS_ENABLE_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ ECB_entries_reg_value( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, ++ PEBS_ENABLE_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS))); ++ } ++ SYS_Write_MSR( ++ ECB_entries_reg_id( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, DEBUG_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ ECB_entries_reg_value( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, DEBUG_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS))); ++#if defined(MYDEBUG) ++ { ++ U64 val; ++ val = SYS_Read_MSR(ECB_entries_reg_id( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, GLOBAL_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS))); ++ SEP_DRV_LOG_TRACE( ++ "Write reg 0x%x--- read 0x%llx.", ++ ECB_entries_reg_id( ++ pecb, ++ ECB_SECTION_REG_INDEX( ++ pecb, ++ GLOBAL_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)), ++ val); ++ } ++#endif ++ } ++ } ++ SEP_DRV_LOG_TRACE("Reenabled PMU with value 0x%llx.", ++ ECB_entries_reg_value(pecb, 0)); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn silvermont_Read_PMU_Data(param) ++ * ++ * @param param dummy parameter which is not used ++ * ++ * @return None No return needed ++ * ++ * @brief Read all the data MSR's into a buffer. Called by the interrupt handler. ++ * ++ */ ++static void silvermont_Read_PMU_Data(PVOID param) ++{ ++ U32 j; ++ U64 *buffer = read_counter_info; ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ ECB pecb; ++ U32 dev_idx; ++ U32 cur_grp; ++ ++ SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); ++ ++ preempt_disable(); ++ this_cpu = CONTROL_THIS_CPU(); ++ preempt_enable(); ++ pcpu = &pcb[this_cpu]; ++ dev_idx = core_to_dev_map[this_cpu]; ++ cur_grp = CPU_STATE_current_group(pcpu); ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; ++ ++ if (!pecb) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); ++ return; ++ } ++ ++ SEP_DRV_LOG_TRACE("PMU control_data 0x%p, buffer 0x%p.", ++ LWPMU_DEVICE_PMU_register_data(&devices[dev_idx]), ++ buffer); ++ FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_ALL) ++ { ++ j = EMON_BUFFER_CORE_EVENT_OFFSET( ++ EMON_BUFFER_DRIVER_HELPER_core_index_to_thread_offset_map( ++ emon_buffer_driver_helper)[this_cpu], ++ ECB_entries_core_event_id(pecb, i)); ++ ++ buffer[j] = SYS_Read_MSR(ECB_entries_reg_id(pecb, i)); ++ SEP_DRV_LOG_TRACE("j=%u, value=%llu, cpu=%u, event_id=%u", j, ++ buffer[j], this_cpu, ++ ECB_entries_core_event_id(pecb, i)); ++ } ++ END_FOR_EACH_REG_CORE_OPERATION; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn void silvermont_Check_Overflow(masks) ++ * ++ * @param masks the mask structure to populate ++ * ++ * @return None No return needed ++ * ++ * @brief Called by the data processing method to figure out which registers have overflowed. ++ * ++ */ ++static void silvermont_Check_Overflow(DRV_MASKS masks) ++{ ++ U32 index; ++ U64 overflow_status = 0; ++ U32 this_cpu; ++ BUFFER_DESC bd; ++ CPU_STATE pcpu; ++ ECB pecb; ++ U32 dev_idx; ++ U32 cur_grp; ++ DEV_CONFIG pcfg; ++ DISPATCH dispatch; ++ U64 overflow_status_clr = 0; ++ DRV_EVENT_MASK_NODE event_flag; ++ ++ SEP_DRV_LOG_TRACE_IN("Masks: %p.", masks); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ bd = &cpu_buf[this_cpu]; ++ pcpu = &pcb[this_cpu]; ++ dev_idx = core_to_dev_map[this_cpu]; ++ cur_grp = CPU_STATE_current_group(pcpu); ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; ++ pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); ++ ++ if (!pecb) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); ++ return; ++ } ++ ++ // initialize masks ++ DRV_MASKS_masks_num(masks) = 0; ++ ++ overflow_status = SYS_Read_MSR(ECB_entries_reg_id( ++ pecb, ECB_SECTION_REG_INDEX(pecb, GLOBAL_STATUS_REG_INDEX, ++ PMU_OPERATION_GLOBAL_STATUS))); ++ ++ if (DEV_CONFIG_pebs_mode(pcfg)) { ++ overflow_status = PEBS_Overflowed(this_cpu, overflow_status, 0); ++ } ++ overflow_status_clr = overflow_status; ++ ++ if (dispatch->check_overflow_gp_errata) { ++ overflow_status = dispatch->check_overflow_gp_errata( ++ pecb, &overflow_status_clr); ++ } ++ SEP_DRV_LOG_TRACE("Overflow: cpu: %d, status 0x%llx.", this_cpu, ++ overflow_status); ++ index = 0; ++ BUFFER_DESC_sample_count(bd) = 0; ++ FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_ALL) ++ { ++ if (ECB_entries_fixed_reg_get(pecb, i)) { ++ index = i - ++ ECB_operations_register_start( ++ pecb, PMU_OPERATION_DATA_FIXED) + ++ 0x20; ++ if (dispatch->check_overflow_errata) { ++ overflow_status = ++ dispatch->check_overflow_errata( ++ pecb, i, overflow_status); ++ } ++ } else if (ECB_entries_is_gp_reg_get(pecb, i)) { ++ index = i - ECB_operations_register_start( ++ pecb, PMU_OPERATION_DATA_GP); ++ } else { ++ continue; ++ } ++ if (overflow_status & ((U64)1 << index)) { ++ SEP_DRV_LOG_TRACE("Overflow: cpu: %d, index %d.", ++ this_cpu, index); ++ SEP_DRV_LOG_TRACE( ++ "Register 0x%x --- val 0%llx.", ++ ECB_entries_reg_id(pecb, i), ++ SYS_Read_MSR(ECB_entries_reg_id(pecb, i))); ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, i), ++ ECB_entries_reg_value(pecb, i)); ++ ++ if (DRV_CONFIG_enable_cp_mode(drv_cfg)) { ++ /* Increment the interrupt count. */ ++ if (interrupt_counts) { ++ interrupt_counts ++ [this_cpu * ++ DRV_CONFIG_num_events( ++ drv_cfg) + ++ ECB_entries_event_id_index( ++ pecb, i)] += 1; ++ } ++ } ++ ++ DRV_EVENT_MASK_bitFields1(&event_flag) = (U8)0; ++ if (ECB_entries_fixed_reg_get(pecb, i)) { ++ CPU_STATE_p_state_counting(pcpu) = 1; ++ } ++ if (ECB_entries_precise_get(pecb, i)) { ++ DRV_EVENT_MASK_precise(&event_flag) = 1; ++ } ++ if (ECB_entries_lbr_value_get(pecb, i)) { ++ DRV_EVENT_MASK_lbr_capture(&event_flag) = 1; ++ } ++ if (ECB_entries_uncore_get(pecb, i)) { ++ DRV_EVENT_MASK_uncore_capture(&event_flag) = 1; ++ } ++ ++ if (DRV_MASKS_masks_num(masks) < MAX_OVERFLOW_EVENTS) { ++ DRV_EVENT_MASK_bitFields1( ++ DRV_MASKS_eventmasks(masks) + ++ DRV_MASKS_masks_num(masks)) = ++ DRV_EVENT_MASK_bitFields1(&event_flag); ++ DRV_EVENT_MASK_event_idx( ++ DRV_MASKS_eventmasks(masks) + ++ DRV_MASKS_masks_num(masks)) = ++ ECB_entries_event_id_index(pecb, i); ++ DRV_MASKS_masks_num(masks)++; ++ } else { ++ SEP_DRV_LOG_ERROR( ++ "The array for event masks is full."); ++ } ++ ++ SEP_DRV_LOG_TRACE("Overflow -- 0x%llx, index 0x%llx.", ++ overflow_status, (U64)1 << index); ++ SEP_DRV_LOG_TRACE("Slot# %d, reg_id 0x%x, index %d.", i, ++ ECB_entries_reg_id(pecb, i), index); ++ if (ECB_entries_event_id_index(pecb, i) == ++ CPU_STATE_trigger_event_num(pcpu)) { ++ CPU_STATE_trigger_count(pcpu)--; ++ } ++ } ++ } ++ END_FOR_EACH_REG_CORE_OPERATION; ++ ++ CPU_STATE_reset_mask(pcpu) = overflow_status_clr; ++ // Reinitialize the global overflow control register ++ SYS_Write_MSR(IA32_PERF_GLOBAL_OVF_CTRL, overflow_status_clr); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn silvermont_Swap_Group(restart) ++ * ++ * @param restart dummy parameter which is not used ++ * ++ * @return None No return needed ++ * ++ * @brief Perform the mechanics of swapping the event groups for event mux operations ++ * ++ * Special Notes ++ * Swap function for event multiplexing. ++ * Freeze the counting. ++ * Swap the groups. ++ * Enable the counting. ++ * Reset the event trigger count ++ * ++ */ ++static VOID silvermont_Swap_Group(DRV_BOOL restart) ++{ ++ U32 index; ++ U32 next_group; ++ U32 st_index; ++ U32 this_cpu = CONTROL_THIS_CPU(); ++ CPU_STATE pcpu = &pcb[this_cpu]; ++ U32 dev_idx; ++ DISPATCH dispatch; ++ EVENT_CONFIG ec; ++ ++ SEP_DRV_LOG_TRACE_IN("Dummy restart: %u.", restart); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ dev_idx = core_to_dev_map[this_cpu]; ++ dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); ++ ec = LWPMU_DEVICE_ec(&devices[dev_idx]); ++ st_index = ++ CPU_STATE_current_group(pcpu) * EVENT_CONFIG_max_gp_events(ec); ++ next_group = (CPU_STATE_current_group(pcpu) + 1); ++ ++ if (next_group >= EVENT_CONFIG_num_groups(ec)) { ++ next_group = 0; ++ } ++ ++ SEP_DRV_LOG_TRACE("Current group : 0x%x.", ++ CPU_STATE_current_group(pcpu)); ++ SEP_DRV_LOG_TRACE("Next group : 0x%x.", next_group); ++ ++ // Save the counters for the current group ++ if (!DRV_CONFIG_event_based_counts(drv_cfg)) { ++ FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_GP) ++ { ++ index = st_index + i - ++ ECB_operations_register_start( ++ pecb, PMU_OPERATION_DATA_GP); ++ CPU_STATE_em_tables(pcpu)[index] = ++ SYS_Read_MSR(ECB_entries_reg_id(pecb, i)); ++ SEP_DRV_LOG_TRACE("Saved value for reg 0x%x : 0x%llx.", ++ ECB_entries_reg_id(pecb, i), ++ CPU_STATE_em_tables(pcpu)[index]); ++ } ++ END_FOR_EACH_REG_CORE_OPERATION; ++ } ++ ++ CPU_STATE_current_group(pcpu) = next_group; ++ ++ if (dispatch->hw_errata) { ++ dispatch->hw_errata(); ++ } ++ ++ // First write the GP control registers (eventsel) ++ FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_CTRL_GP) ++ { ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, i), ++ ECB_entries_reg_value(pecb, i)); ++ } ++ END_FOR_EACH_REG_CORE_OPERATION; ++ ++ if (DRV_CONFIG_event_based_counts(drv_cfg)) { ++ // In EBC mode, reset the counts for all events except for trigger event ++ FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_ALL) ++ { ++ if (ECB_entries_event_id_index(pecb, i) != ++ CPU_STATE_trigger_event_num(pcpu)) { ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); ++ } ++ } ++ END_FOR_EACH_REG_CORE_OPERATION; ++ } else { ++ // Then write the gp count registers ++ st_index = CPU_STATE_current_group(pcpu) * ++ EVENT_CONFIG_max_gp_events(ec); ++ FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_GP) ++ { ++ index = st_index + i - ++ ECB_operations_register_start( ++ pecb, PMU_OPERATION_DATA_GP); ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, i), ++ CPU_STATE_em_tables(pcpu)[index]); ++ SEP_DRV_LOG_TRACE( ++ "Restore value for reg 0x%x : 0x%llx.", ++ ECB_entries_reg_id(pecb, i), ++ CPU_STATE_em_tables(pcpu)[index]); ++ } ++ END_FOR_EACH_REG_CORE_OPERATION; ++ } ++ ++ FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_OCR) ++ { ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, i), ++ ECB_entries_reg_value(pecb, i)); ++ } ++ END_FOR_EACH_REG_CORE_OPERATION; ++ ++ /* ++ * reset the em factor when a group is swapped ++ */ ++ CPU_STATE_trigger_count(pcpu) = EVENT_CONFIG_em_factor(ec); ++ ++ /* ++ * The enable routine needs to rewrite the control registers ++ */ ++ CPU_STATE_reset_mask(pcpu) = 0LL; ++ CPU_STATE_group_swap(pcpu) = 1; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn silvermont_Initialize(params) ++ * ++ * @param params dummy parameter which is not used ++ * ++ * @return None No return needed ++ * ++ * @brief Initialize the PMU setting up for collection ++ * ++ * Special Notes ++ * Saves the relevant PMU state (minimal set of MSRs required ++ * to avoid conflicts with other Linux tools, such as Oprofile). ++ * This function should be called in parallel across all CPUs ++ * prior to the start of sampling, before PMU state is changed. ++ * ++ */ ++static VOID silvermont_Initialize(VOID *param) ++{ ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ ECB pecb; ++ U32 dev_idx; ++ U32 cur_grp; ++ ++ SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ dev_idx = core_to_dev_map[this_cpu]; ++ ++ if (pcb == NULL) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!pcb)."); ++ return; ++ } ++ ++ pcpu = &pcb[this_cpu]; ++ cur_grp = CPU_STATE_current_group(pcpu); ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; ++ ++ CPU_STATE_pmu_state(pcpu) = pmu_state + (this_cpu * 3); ++ if (CPU_STATE_pmu_state(pcpu) == NULL) { ++ SEP_DRV_LOG_WARNING_TRACE_OUT( ++ "Unable to save PMU state on CPU %d!", this_cpu); ++ return; ++ } ++ ++ restore_reg_addr[0] = ECB_entries_reg_id( ++ pecb, ECB_SECTION_REG_INDEX(pecb, DEBUG_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)); ++ restore_reg_addr[1] = ECB_entries_reg_id( ++ pecb, ECB_SECTION_REG_INDEX(pecb, GLOBAL_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)); ++ restore_reg_addr[2] = ECB_entries_reg_id( ++ pecb, ECB_SECTION_REG_INDEX(pecb, FIXED_CTRL_REG_INDEX, ++ PMU_OPERATION_GLOBAL_REGS)); ++ ++ // save the original PMU state on this CPU (NOTE: must only be called ONCE per collection) ++ CPU_STATE_pmu_state(pcpu)[0] = SYS_Read_MSR(restore_reg_addr[0]); ++ CPU_STATE_pmu_state(pcpu)[1] = SYS_Read_MSR(restore_reg_addr[1]); ++ CPU_STATE_pmu_state(pcpu)[2] = SYS_Read_MSR(restore_reg_addr[2]); ++ ++ SEP_DRV_LOG_TRACE("Saving PMU state on CPU %d:", this_cpu); ++ SEP_DRV_LOG_TRACE(" msr_val(IA32_DEBUG_CTRL)=0x%llx.", ++ CPU_STATE_pmu_state(pcpu)[0]); ++ SEP_DRV_LOG_TRACE(" msr_val(IA32_PERF_GLOBAL_CTRL)=0x%llx.", ++ CPU_STATE_pmu_state(pcpu)[1]); ++ SEP_DRV_LOG_TRACE(" msr_val(IA32_FIXED_CTRL)=0x%llx.", ++ CPU_STATE_pmu_state(pcpu)[2]); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn silvermont_Destroy(params) ++ * ++ * @param params dummy parameter which is not used ++ * ++ * @return None No return needed ++ * ++ * @brief Reset the PMU setting up after collection ++ * ++ * Special Notes ++ * Restores the previously saved PMU state done in core2_Initialize. ++ * This function should be called in parallel across all CPUs ++ * after sampling collection ends/terminates. ++ * ++ */ ++static VOID silvermont_Destroy(VOID *param) ++{ ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ ++ SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); ++ ++ if (pcb == NULL) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!pcb)."); ++ return; ++ } ++ ++ preempt_disable(); ++ this_cpu = CONTROL_THIS_CPU(); ++ preempt_enable(); ++ pcpu = &pcb[this_cpu]; ++ ++ if (CPU_STATE_pmu_state(pcpu) == NULL) { ++ SEP_DRV_LOG_WARNING_TRACE_OUT( ++ "Unable to restore PMU state on CPU %d!", this_cpu); ++ return; ++ } ++ ++ SEP_DRV_LOG_TRACE("Clearing PMU state on CPU %d:", this_cpu); ++ SEP_DRV_LOG_TRACE(" msr_val(IA32_DEBUG_CTRL)=0x0."); ++ SEP_DRV_LOG_TRACE(" msr_val(IA32_PERF_GLOBAL_CTRL)=0x0."); ++ SEP_DRV_LOG_TRACE(" msr_val(IA32_FIXED_CTRL)=0."); ++ ++ SYS_Write_MSR(restore_reg_addr[0], 0); ++ SYS_Write_MSR(restore_reg_addr[1], 0); ++ SYS_Write_MSR(restore_reg_addr[2], 0); ++ ++ CPU_STATE_pmu_state(pcpu) = NULL; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ++ * @fn silvermont_Read_LBRs(buffer) ++ * ++ * @param IN buffer - pointer to the buffer to write the data into ++ * @return None ++ * ++ * @brief Read all the LBR registers into the buffer provided and return ++ * ++ */ ++static U64 silvermont_Read_LBRs(VOID *buffer, PVOID data) ++{ ++ U32 i, count = 0; ++ U64 *lbr_buf = NULL; ++ U64 value; ++ U64 tos_ip_addr = 0; ++ U64 tos_ptr = 0; ++ SADDR saddr; ++ U32 this_cpu; ++ U32 dev_idx; ++ LBR lbr; ++ DEV_CONFIG pcfg; ++ ++ SEP_DRV_LOG_TRACE_IN("Buffer: %p.", buffer); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ dev_idx = core_to_dev_map[this_cpu]; ++ pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ lbr = LWPMU_DEVICE_lbr(&devices[dev_idx]); ++ ++ if (buffer && DEV_CONFIG_store_lbrs(pcfg)) { ++ lbr_buf = (U64 *)buffer; ++ } ++ ++ for (i = 0; i < LBR_num_entries(lbr); i++) { ++ value = SYS_Read_MSR(LBR_entries_reg_id(lbr, i)); ++ if (buffer && DEV_CONFIG_store_lbrs(pcfg)) { ++ *lbr_buf = value; ++ } ++ SEP_DRV_LOG_TRACE("LBR %u, 0x%llx.", i, value); ++ if (i == 0) { ++ tos_ptr = value; ++ } else { ++ if (LBR_entries_etype(lbr, i) == LBR_ENTRY_FROM_IP) { ++ if (tos_ptr == count) { ++ SADDR_addr(saddr) = ++ value & SILVERMONT_LBR_BITMASK; ++ tos_ip_addr = (U64)SADDR_addr( ++ saddr); // Add signed extension ++ SEP_DRV_LOG_TRACE( ++ "Tos_ip_addr %llu, 0x%llx.", ++ tos_ptr, value); ++ } ++ count++; ++ } ++ } ++ if (buffer && DEV_CONFIG_store_lbrs(pcfg)) { ++ lbr_buf++; ++ } ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT("Res: %llu.", tos_ip_addr); ++ return tos_ip_addr; ++} ++ ++static VOID silvermont_Clean_Up(VOID *param) ++{ ++ SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); ++ ++ FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_ALL_REG) ++ { ++ if (ECB_entries_clean_up_get(pecb, i)) { ++ SEP_DRV_LOG_TRACE("Clean up set --- RegId --- %x.", ++ ECB_entries_reg_id(pecb, i)); ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); ++ } ++ } ++ END_FOR_EACH_REG_CORE_OPERATION; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn silvermont_Read_Counts(param, id) ++ * ++ * @param param The read thread node to process ++ * @param id The event id for the which the sample is generated ++ * ++ * @return None No return needed ++ * ++ * @brief Read CPU event based counts data and store into the buffer param; ++ * For the case of the trigger event, store the SAV value. ++ */ ++static VOID silvermont_Read_Counts(PVOID param, U32 id) ++{ ++ U64 *data; ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ U32 dev_idx; ++ DEV_CONFIG pcfg; ++ U32 event_id = 0; ++ ++ SEP_DRV_LOG_TRACE_IN("Param: %p, id: %u.", param, id); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ dev_idx = core_to_dev_map[this_cpu]; ++ pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ ++ if (DEV_CONFIG_ebc_group_id_offset(pcfg)) { ++ // Write GroupID ++ data = (U64 *)((S8 *)param + ++ DEV_CONFIG_ebc_group_id_offset(pcfg)); ++ *data = CPU_STATE_current_group(pcpu) + 1; ++ } ++ ++ FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_ALL) ++ { ++ if (ECB_entries_counter_event_offset(pecb, i) == 0) { ++ continue; ++ } ++ data = (U64 *)((S8 *)param + ++ ECB_entries_counter_event_offset(pecb, i)); ++ event_id = ECB_entries_event_id_index(pecb, i); ++ if (event_id == id) { ++ *data = ~(ECB_entries_reg_value(pecb, i) - 1) & ++ ECB_entries_max_bits(pecb, i); ++ } else { ++ *data = SYS_Read_MSR(ECB_entries_reg_id(pecb, i)); ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); ++ } ++ } ++ END_FOR_EACH_REG_CORE_OPERATION; ++ ++ if (DRV_CONFIG_enable_p_state(drv_cfg)) { ++ CPU_STATE_p_state_counting(pcpu) = 0; ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn U64 silvermont_Platform_Info ++ * ++ * @brief Reads the MSR_PLATFORM_INFO register if present ++ * ++ * @param void ++ * ++ * @return value read from the register ++ * ++ * Special Notes: ++ * ++ */ ++static void silvermont_Platform_Info(PVOID data) ++{ ++ U64 index = 0; ++ DRV_PLATFORM_INFO platform_data = (DRV_PLATFORM_INFO)data; ++ U64 value = 0; ++ U64 clock_value = 0; ++ U64 energy_multiplier; ++ ++ SEP_DRV_LOG_TRACE_IN("Data: %p.", data); ++ ++ if (!platform_data) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!platform_data)."); ++ return; ++ } ++ ++#define IA32_MSR_PLATFORM_INFO 0xCE ++ value = SYS_Read_MSR(IA32_MSR_PLATFORM_INFO); ++ ++#define IA32_MSR_PSB_CLOCK_STS 0xCD ++#define FREQ_MASK_BITS 0x03 ++ ++ clock_value = SYS_Read_MSR(IA32_MSR_PSB_CLOCK_STS); ++ index = clock_value & FREQ_MASK_BITS; ++ DRV_PLATFORM_INFO_info(platform_data) = value; ++ DRV_PLATFORM_INFO_ddr_freq_index(platform_data) = index; ++ ++#undef IA32_MSR_PLATFORM_INFO ++#undef IA32_MSR_PSB_CLOCK_STS ++#undef FREQ_MASK_BITS ++ energy_multiplier = SYS_Read_MSR(MSR_ENERGY_MULTIPLIER); ++ SEP_DRV_LOG_TRACE("MSR_ENERGY_MULTIPLIER: %llx.", energy_multiplier); ++ DRV_PLATFORM_INFO_energy_multiplier(platform_data) = ++ (U32)(energy_multiplier & 0x00001F00) >> 8; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID knights_Platform_Info ++ * ++ * @brief Reads the MSR_PLATFORM_INFO register if present ++ * ++ * @param void ++ * ++ * @return value read from the register ++ * ++ * Special Notes: ++ * ++ */ ++static VOID knights_Platform_Info(PVOID data) ++{ ++ DRV_PLATFORM_INFO platform_data = (DRV_PLATFORM_INFO)data; ++ U64 value = 0; ++ U64 energy_multiplier; ++ ++ SEP_DRV_LOG_TRACE_IN("Data: %p.", data); ++ ++ if (!platform_data) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!platform_data)."); ++ return; ++ } ++ ++#define IA32_MSR_PLATFORM_INFO 0xCE ++ value = SYS_Read_MSR(IA32_MSR_PLATFORM_INFO); ++ ++ DRV_PLATFORM_INFO_info(platform_data) = value; ++ DRV_PLATFORM_INFO_ddr_freq_index(platform_data) = 0; ++ energy_multiplier = SYS_Read_MSR(MSR_ENERGY_MULTIPLIER); ++ SEP_DRV_LOG_TRACE("MSR_ENERGY_MULTIPLIER: %llx.", energy_multiplier); ++ DRV_PLATFORM_INFO_energy_multiplier(platform_data) = ++ (U32)(energy_multiplier & 0x00001F00) >> 8; ++} ++ ++/* ++ * Initialize the dispatch table ++ */ ++DISPATCH_NODE silvermont_dispatch = { .init = silvermont_Initialize, ++ .fini = silvermont_Destroy, ++ .write = silvermont_Write_PMU, ++ .freeze = silvermont_Disable_PMU, ++ .restart = silvermont_Enable_PMU, ++ .read_data = silvermont_Read_PMU_Data, ++ .check_overflow = ++ silvermont_Check_Overflow, ++ .swap_group = silvermont_Swap_Group, ++ .read_lbrs = silvermont_Read_LBRs, ++ .cleanup = silvermont_Clean_Up, ++ .hw_errata = NULL, ++ .read_power = NULL, ++ .check_overflow_errata = NULL, ++ .read_counts = silvermont_Read_Counts, ++ .check_overflow_gp_errata = NULL, ++ .read_ro = NULL, ++ .platform_info = silvermont_Platform_Info, ++ .trigger_read = NULL, ++ .scan_for_uncore = NULL, ++ .read_metrics = NULL }; ++ ++DISPATCH_NODE knights_dispatch = { .init = silvermont_Initialize, ++ .fini = silvermont_Destroy, ++ .write = silvermont_Write_PMU, ++ .freeze = silvermont_Disable_PMU, ++ .restart = silvermont_Enable_PMU, ++ .read_data = silvermont_Read_PMU_Data, ++ .check_overflow = silvermont_Check_Overflow, ++ .swap_group = silvermont_Swap_Group, ++ .read_lbrs = silvermont_Read_LBRs, ++ .cleanup = silvermont_Clean_Up, ++ .hw_errata = NULL, ++ .read_power = NULL, ++ .check_overflow_errata = NULL, ++ .read_counts = silvermont_Read_Counts, ++ .check_overflow_gp_errata = NULL, ++ .read_ro = NULL, ++ .platform_info = knights_Platform_Info, ++ .trigger_read = NULL, ++ .scan_for_uncore = NULL, ++ .read_metrics = NULL }; +diff --git a/drivers/platform/x86/sepdk/sep/sys32.S b/drivers/platform/x86/sepdk/sep/sys32.S +new file mode 100755 +index 000000000000..eb4c12304cdc +--- /dev/null ++++ b/drivers/platform/x86/sepdk/sep/sys32.S +@@ -0,0 +1,200 @@ ++# Copyright(C) 2002-2018 Intel Corporation. All Rights Reserved. ++# ++# This file is part of SEP Development Kit ++# ++# SEP Development Kit is free software; you can redistribute it ++# and/or modify it under the terms of the GNU General Public License ++# version 2 as published by the Free Software Foundation. ++# ++# SEP Development Kit is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++# GNU General Public License for more details. ++# ++# As a special exception, you may use this file as part of a free software ++# library without restriction. Specifically, if other files instantiate ++# templates or use macros or inline functions from this file, or you compile ++# this file and link it with other files to produce an executable, this ++# file does not by itself cause the resulting executable to be covered by ++# the GNU General Public License. This exception does not however ++# invalidate any other reasons why the executable file might be covered by ++# the GNU General Public License. ++ ++ ++#include ++#include ++ ++#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 20) ++#define USE_KERNEL_PERCPU_SEGMENT_GS ++#endif ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 21) && LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 29) ++#define USE_KERNEL_PERCPU_SEGMENT_FS ++#endif ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) ++#define USE_KERNEL_PERCPU_SEGMENT_FS ++#define USE_KERNEL_PERCPU_SEGMENT_GS ++#endif ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20) ++#if !defined(__KERNEL_PERCPU) ++#define __KERNEL_PERCPU __KERNEL_PDA ++#endif ++#endif ++ ++#if defined(USE_KERNEL_PERCPU_SEGMENT_GS) ++#if defined(__KERNEL_STACK_CANARY) ++#define SEP_GS_SEG_VALUE __KERNEL_STACK_CANARY ++#else ++#define SEP_GS_SEG_VALUE __KERNEL_PERCPU ++#endif ++#endif ++ ++#*********************************************************************** ++# ++# SYS_Get_IDT_Base_HWR ++# Get the IDT Desc address ++# ++# Entry: none ++# ++# Exit: base address in eax ++# ++# void SYS_Get_IDT_Base_HWR(U64 *pIdtDesc); ++# ++#*********************************************************************** ++ .text ++ .align 4 ++ .global SYS_IO_Delay ++SYS_IO_Delay: ++ ret ++ ++ .global SYS_Get_IDT_Base_HWR ++SYS_Get_IDT_Base_HWR: ++ subl $8, %esp ++ sidt 2(%esp) ++ movl 4(%esp), %eax ++ addl $8, %esp ++ ret ++ .global SYS_Get_cs ++SYS_Get_cs: ++ mov %cs, %ax ++ andl $0x0000ffff, %eax ++ ret ++ ++ .global SYS_Get_TSC ++SYS_Get_TSC: ++ rdtsc ++ ret ++ .text ++ .align 4 ++ .global SYS_Perfvec_Handler ++SYS_Perfvec_Handler: ++ # This is the same as KERNEL's ++ pushl %eax # Filler for Error Code ++ ++ cld ++ pushl %es # SAVE_ALL macro to access pt_regs ++ pushl %ds # inside our ISR. ++#if defined(USE_KERNEL_PERCPU_SEGMENT_GS) ++ pushl %gs ++#endif ++#if defined(USE_KERNEL_PERCPU_SEGMENT_FS) ++ pushl %fs ++#endif ++ pushl %eax ++ pushl %ebp ++ pushl %edi ++ pushl %esi ++ pushl %edx ++ pushl %ecx ++ pushl %ebx ++ ++ movl $(__KERNEL_DS), %edx # Use KERNEL DS selector ++ movl %edx, %ds # Make sure we set Kernel ++ movl %edx, %es # DS into local DS and ES ++ ++#if defined(USE_KERNEL_PERCPU_SEGMENT_GS) ++ movl $(SEP_GS_SEG_VALUE), %edx # Use kernel percpu segment ++ movl %edx, %gs # ... and load it into %gs ++#endif ++#if defined(USE_KERNEL_PERCPU_SEGMENT_FS) ++ movl $(__KERNEL_PERCPU), %edx # Use kernel percpu segment ++ movl %edx, %fs # ... and load it into %fs ++#endif ++ ++ movl %esp, %ebx # get ready to put *pt_regs on stack ++ ++ pushl %ebx # put *pt_regs on the stack ++ call PMI_Interrupt_Handler ++ addl $0x4, %esp # pop to nowhere... ++ ++ pop %ebx # restore register set ++ pop %ecx ++ pop %edx ++ pop %esi ++ pop %edi ++ pop %ebp ++ pop %eax ++#if defined(USE_KERNEL_PERCPU_SEGMENT_FS) ++ pop %fs ++#endif ++#if defined(USE_KERNEL_PERCPU_SEGMENT_GS) ++ pop %gs ++#endif ++ pop %ds ++ pop %es ++ pop %eax ++ ++ iret ++# ---------------------------------------------------------------------------- ++# name: get_CSD ++# ++# description: get the CS descriptor ++# ++# input: code segment selector ++# ++# output: code segment descriptor ++# ---------------------------------------------------------------------------- ++ .text ++ .align 4 ++ .globl SYS_Get_CSD ++ ++SYS_Get_CSD: ++ pushl %ebp ++ movl %esp, %ebp ++ pushal # save regs ++ ++ subl $8, %esp ++ xorl %eax, %eax ++ movw 8(%ebp), %ax # eax.lo = cs ++ sgdt (%esp) # store gdt reg ++ leal (%esp), %ebx # ebx = gdt reg ptr ++ movl 2(%ebx), %ecx # ecx = gdt base ++ xorl %edx, %edx ++ movw %ax, %dx ++ andl $4, %edx ++ cmpl $0, %edx # test ti. GDT? ++ jz .bsr_10 # ..yes ++ xorl %edx, %edx ++ sldt %dx # ..no dx=ldtsel ++ andb $0xf8, %dl # clear ti, rpl ++ addl 2(%ebx), %edx # add gdt base ++ movb 7(%edx), %cl # ecx = ldt base ++ shll $8, %ecx # .. ++ movb 4(%edx), %cl # .. ++ shll $16, %ecx # .. ++ movw 2(%edx), %cx # .. ++.bsr_10: ++ andb $0xf8, %al # clear ti & rpl ++ addl %eax, %ecx # add to gdt/ldt ++ movl (%ecx), %eax # copy code seg ++ movl 12(%ebp), %edx # ..descriptor (csdlo) ++ movl %eax, (%edx) # ..descriptor (csdlo) ++ movl 4(%ecx), %eax # ..from gdt or ++ movl 16(%ebp), %edx # ..ldt to sample (csdhi) ++ movl %eax, (%edx) # ..ldt to sample (csdhi) ++ addl $8, %esp ++ popal # restore regs ++ leave ++ ret +diff --git a/drivers/platform/x86/sepdk/sep/sys64.S b/drivers/platform/x86/sepdk/sep/sys64.S +new file mode 100755 +index 000000000000..1deb8db3cdb7 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/sep/sys64.S +@@ -0,0 +1,140 @@ ++# Copyright(C) 2002-2018 Intel Corporation. All Rights Reserved. ++# ++# This file is part of SEP Development Kit ++# ++# SEP Development Kit is free software; you can redistribute it ++# and/or modify it under the terms of the GNU General Public License ++# version 2 as published by the Free Software Foundation. ++# ++# SEP Development Kit is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++# GNU General Public License for more details. ++# ++# As a special exception, you may use this file as part of a free software ++# library without restriction. Specifically, if other files instantiate ++# templates or use macros or inline functions from this file, or you compile ++# this file and link it with other files to produce an executable, this ++# file does not by itself cause the resulting executable to be covered by ++# the GNU General Public License. This exception does not however ++# invalidate any other reasons why the executable file might be covered by ++# the GNU General Public License. ++ ++#include "inc/asm_helper.h" ++#include ++ ++.text ++ ++#*********************************************************************** ++# ++# SYS_Get_IDT_Base ++# Get the IDT Desc address ++# ++# Entry: pointer to location to store idt Desc ++# ++# Exit: none ++# ++# void SYS_Get_IDT_Base(U64 *pIdtDesc); ++# ++#*********************************************************************** ++ .global SYS_Get_IDT_Base ++SYS_Get_IDT_Base: ++ SIDT (%rdi) ++ ret ++ ++#*********************************************************************** ++# ++# SYS_Get_GDT_Base ++# Get the GDT Desc address ++# ++# Entry: pointer to location to store gdt Desc ++# ++# Exit: none ++# ++# void SYS_Get_GDT_Base(U64 *pGdtDesc); ++# ++#*********************************************************************** ++ .global SYS_Get_GDT_Base ++SYS_Get_GDT_Base: ++ SGDT (%rdi) ++ ret ++ ++#*********************************************************************** ++# ++# SYS_Get_TSC ++# Get the current TSC ++# ++# Entry: pointer to location to store gdt Desc ++# ++# Exit: none ++# ++# void SYS_Get_TSC(U64 *tsc); ++# ++#*********************************************************************** ++# .global SYS_Get_TSC ++#SYS_Get_TSC: ++# rdtsc ++# ret ++ ++#*********************************************************************** ++# ++# SYS_IO_Delay ++# Add a short delay to the instruction stream ++# ++# Entry: none ++# ++# Exit: none ++# ++# void SYS_IO_Delay(void); ++# ++#*********************************************************************** ++ .global SYS_IO_Delay ++SYS_IO_Delay: ++ ret ++ ++# ---------------------------------------------------------------------------- ++# name: SYS_PerfVec_Handler ++# ++# description: ISR entry for local APIC PERF interrupt vector ++# ++# Input: n/a ++# ++# Output: n/a ++# ---------------------------------------------------------------------------- ++ ++ .global SYS_Perfvec_Handler ++SYS_Perfvec_Handler: ++ CFI_STARTPROC ++ pushq %rax # fake an error code... ++ cld # cause the kernel likes it this way... ++ ++ SAVE_ALL # Save the world! ++ ++ movl $MSR_GS_BASE, %ecx # for the moment, do the safe swapgs check ++ rdmsr ++ xorl %ebx, %ebx # assume no swapgs (ebx == 0) ++ testl %edx, %edx ++ js 1f ++ swapgs ++ movl $1, %ebx # ebx == 1 means we did a swapgs ++1: movq %rsp, %rdi # pt_regs is the first argument ++ ++ # ++ # ebx is zero if no swap, one if swap ++ # ebx is preserved in C calling convention... ++ # ++ # NOTE: the C code is responsible for ACK'ing the APIC!!! ++ # ++ call PMI_Interrupt_Handler ++ ++ # ++ # Don't want an interrupt while we are doing the swapgs stuff ++ # ++ cli ++ testl %ebx, %ebx ++ jz 2f ++ swapgs ++2: RESTORE_ALL ++ popq %rax ++ iretq ++ CFI_ENDPROC +diff --git a/drivers/platform/x86/sepdk/sep/sys_info.c b/drivers/platform/x86/sepdk/sep/sys_info.c +new file mode 100755 +index 000000000000..b72ce2894c82 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/sep/sys_info.c +@@ -0,0 +1,1111 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#include "lwpmudrv_defines.h" ++#include ++#include ++#include ++ ++#include "lwpmudrv_types.h" ++#include "rise_errors.h" ++#include "lwpmudrv_ecb.h" ++#include "lwpmudrv_struct.h" ++#include "lwpmudrv.h" ++#include "control.h" ++#include "utility.h" ++#include "apic.h" ++#include "sys_info.h" ++ ++#define VTSA_CPUID VTSA_CPUID_X86 ++ ++extern U64 total_ram; ++static IOCTL_SYS_INFO *ioctl_sys_info; ++static size_t ioctl_sys_info_size; ++static U32 *cpuid_entry_count; ++static U32 *cpuid_total_count; ++U32 *cpu_built_sysinfo; ++ ++static U32 cpu_threads_per_core; ++static VOID *gen_per_cpu_ptr; ++ ++#define VTSA_NA64 ((U64)-1) ++#define VTSA_NA32 ((U32)-1) ++#define VTSA_NA ((U32)-1) ++ ++#define SYS_INFO_NUM_SETS(rcx) ((rcx) + 1) ++#define SYS_INFO_LINE_SIZE(rbx) (((rbx)&0xfff) + 1) ++#define SYS_INFO_LINE_PARTITIONS(rbx) ((((rbx) >> 12) & 0x3ff) + 1) ++#define SYS_INFO_NUM_WAYS(rbx) ((((rbx) >> 22) & 0x3ff) + 1) ++ ++#define SYS_INFO_CACHE_SIZE(rcx, rbx) \ ++ (SYS_INFO_NUM_SETS((rcx)) * SYS_INFO_LINE_SIZE((rbx)) * \ ++ SYS_INFO_LINE_PARTITIONS((rbx)) * SYS_INFO_NUM_WAYS((rbx))) ++ ++#define MSR_FB_PCARD_ID_FUSE 0x17 // platform id fuses MSR ++ ++#define LOW_PART(x) (x & 0xFFFFFFFF) ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static U64 sys_info_nbits(number) ++ * ++ * @param number - the number to check ++ * @return the number of bit. ++ * ++ * @brief This routine gets the number of useful bits with the given number. ++ * It will round the number up to power of 2, and adjust to 0 based number. ++ * sys_info_nbits(0x3) = 2 ++ * sys_info_nbits(0x4) = 2 ++ * ++ */ ++static U64 sys_info_nbits(U64 number) ++{ ++ U64 i; ++ ++ SEP_DRV_LOG_TRACE_IN("Number: %llx.", ++ number); // is %llu portable in the kernel? ++ ++ if (number < 2) { ++ SEP_DRV_LOG_TRACE_OUT("Res: %u. (early exit)", (U32)number); ++ return number; ++ } ++ ++ // adjust to 0 based number, and round up to power of 2 ++ number--; ++ for (i = 0; number > 0; i++) { ++ number >>= 1; ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT("Res: %u.", (U32)i); ++ return i; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static U64 sys_info_bitmask(nbits) ++ * ++ * @param number - the number of bits ++ * @return the bit mask for the nbits number ++ * ++ * @brief This routine gets the bitmask for the nbits number. ++ */ ++static U64 sys_info_bitmask(U64 nbits) ++{ ++ U64 mask = 0; ++ ++ SEP_DRV_LOG_TRACE_IN("Nbits: %u.", (U32)nbits); ++ ++ mask = (U64)1 << nbits; ++ mask--; ++ ++ SEP_DRV_LOG_TRACE_OUT("Res: %llx.", mask); ++ ++ return mask; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static void sys_info_Get_Num_Cpuid_Funcs(basic_funcs, basic_4_funcs, extended_funcs) ++ * ++ * @param basic_functions - pointer to the number of basic functions ++ * @param basic_4_funcs - pointer to the basic 4 functions ++ * @param extended_funcs - pointer to the number of extended functions ++ * @return total number of cpuid functions ++ * ++ * @brief This routine gets the number of basic and extended cpuid functions. ++ * ++ */ ++static U32 sys_info_Get_Num_Cpuid_Funcs(OUT U32 *basic_funcs, ++ OUT U32 *basic_4_funcs, ++ OUT U32 *extended_funcs) ++{ ++ U64 num_basic_funcs = 0x0LL; ++ U64 num_basic_4_funcs = 0x0LL; ++ U64 num_extended_funcs = 0x0LL; ++ U64 rax; ++ U64 rbx; ++ U64 rcx; ++ U64 rdx; ++ U64 i; ++ U32 res; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ UTILITY_Read_Cpuid(0, &num_basic_funcs, &rbx, &rcx, &rdx); ++ UTILITY_Read_Cpuid(0x80000000, &num_extended_funcs, &rbx, &rcx, &rdx); ++ ++ if (num_extended_funcs & 0x80000000) { ++ num_extended_funcs -= 0x80000000; ++ } ++ ++ // ++ // make sure num_extended_funcs is not bogus ++ // ++ if (num_extended_funcs > 0x1000) { ++ num_extended_funcs = 0; ++ } ++ ++ // ++ // if number of basic funcs is greater than 4, figure out how many ++ // time we should call CPUID with eax = 0x4. ++ // ++ num_basic_4_funcs = 0; ++ if (num_basic_funcs >= 4) { ++ for (i = 0, rax = (U64)-1; (rax & 0x1f) != 0; i++) { ++ rcx = i; ++ UTILITY_Read_Cpuid(4, &rax, &rbx, &rcx, &rdx); ++ } ++ num_basic_4_funcs = i - 1; ++ } ++ if (num_basic_funcs >= 0xb) { ++ i = 0; ++ do { ++ rcx = i; ++ UTILITY_Read_Cpuid(0xb, &rax, &rbx, &rcx, &rdx); ++ i++; ++ } while (!(LOW_PART(rax) == 0 && LOW_PART(rbx) == 0)); ++ num_basic_4_funcs += i; ++ } ++ SEP_DRV_LOG_TRACE("Num_basic_4_funcs = %llx.", num_basic_4_funcs); ++ ++ // ++ // adjust number to include 0 and 0x80000000 functions. ++ // ++ num_basic_funcs++; ++ num_extended_funcs++; ++ ++ SEP_DRV_LOG_TRACE("num_basic_funcs: %llx, num_extended_funcs: %llx.", ++ num_basic_funcs, num_extended_funcs); ++ ++ // ++ // fill-in the parameter for the caller ++ // ++ if (basic_funcs != NULL) { ++ *basic_funcs = (U32)num_basic_funcs; ++ } ++ if (basic_4_funcs != NULL) { ++ *basic_4_funcs = (U32)num_basic_4_funcs; ++ } ++ if (extended_funcs != NULL) { ++ *extended_funcs = (U32)num_extended_funcs; ++ } ++ ++ res = (U32)(num_basic_funcs + num_basic_4_funcs + num_extended_funcs); ++ SEP_DRV_LOG_TRACE_OUT("Res: %u.", res); ++ return res; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static void sys_info_Get_Cpuid_Entry_Cpunt(buffer) ++ * ++ * @param buffer - pointer to the buffer to hold the info ++ * @return None ++ * ++ * @brief Service Routine to query the CPU for the number of entries needed ++ * ++ */ ++static VOID sys_info_Get_Cpuid_Entry_Count(PVOID param) ++{ ++ S32 current_processor; ++ U32 *current_cpu_buffer; ++ ++ SEP_DRV_LOG_TRACE_IN("Buffer: %p.", buffer); ++ ++ if (param == NULL) { ++ current_processor = CONTROL_THIS_CPU(); ++ } else { ++ current_processor = *(S32 *)param; ++ } ++ SEP_DRV_LOG_TRACE("Beginning on CPU %u.", current_processor); ++ ++ current_cpu_buffer = (U32 *)((U8 *)cpuid_entry_count + ++ current_processor * sizeof(U32)); ++ ++#if defined(ALLOW_ASSERT) ++ ASSERT(((U8 *)current_cpu_buffer + sizeof(U32)) <= ++ ((U8 *)current_cpu_buffer + ++ GLOBAL_STATE_num_cpus(driver_state) * sizeof(U32))); ++#endif ++ *current_cpu_buffer = sys_info_Get_Num_Cpuid_Funcs(NULL, NULL, NULL); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static U32 sys_info_Get_Cpuid_Buffer_Size(cpuid_entries) ++ * ++ * @param cpuid_entries - number of cpuid entries ++ * @return size of buffer needed in bytes ++ * ++ * @brief This routine returns number of bytes needed to hold the CPU_CS_INFO ++ * @brief structure. ++ * ++ */ ++static U32 sys_info_Get_Cpuid_Buffer_Size(U32 cpuid_entries) ++{ ++ U32 cpuid_size; ++ U32 buffer_size; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ cpuid_size = sizeof(VTSA_CPUID); ++ ++ buffer_size = ++ sizeof(IOCTL_SYS_INFO) + sizeof(VTSA_GEN_ARRAY_HDR) + ++ sizeof(VTSA_NODE_INFO) + sizeof(VTSA_GEN_ARRAY_HDR) + ++ GLOBAL_STATE_num_cpus(driver_state) * sizeof(VTSA_GEN_PER_CPU) + ++ GLOBAL_STATE_num_cpus(driver_state) * ++ sizeof(VTSA_GEN_ARRAY_HDR) + ++ cpuid_entries * cpuid_size; ++ ++ SEP_DRV_LOG_TRACE_OUT("Res: %u.", buffer_size); ++ ++ return buffer_size; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn extern void sys_info_Fill_CPUID(...) ++ * ++ * @param num_cpuids, ++ * @param basic_funcs, ++ * @param extended_funcs, ++ * @param cpu, ++ * @param *current_cpuid ++ * @param *gen_per_cpu, ++ * @param *local_gpc ++ * ++ * @return None ++ * ++ * @brief This routine is called to build per cpu information. ++ * @brief Fills in the cpuid for the processor in the right location in the buffer ++ * ++ */ ++static void sys_info_Fill_CPUID(U32 num_cpuids, U32 basic_funcs, ++ U32 extended_funcs, U32 cpu, ++ VTSA_CPUID *current_cpuid, ++ VTSA_GEN_PER_CPU *gen_per_cpu, ++ VTSA_GEN_PER_CPU *local_gpc) ++{ ++ U32 i, index, j; ++ U64 cpuid_function; ++ U64 rax, rbx, rcx, rdx; ++ VTSA_CPUID *cpuid_el; ++ U32 shift_nbits_core = 0; ++ U32 shift_nbits_pkg = 0; ++ // U32 family = 0; ++ U32 model = 0; ++ DRV_BOOL ht_supported = FALSE; ++ U32 apic_id = 0; ++ U32 num_logical_per_physical = 0; ++ U32 cores_per_die = 1; ++ U32 thread_id = 0; ++ U32 core_id = 0; ++ U32 package_id = 0; ++ U32 module_id = 0; ++ U32 cores_sharing_cache = 0; ++ U32 cache_mask_width = 0; ++ U32 num_cores = 0; ++ ++ SEP_DRV_LOG_TRACE_IN("CPU: %x.", cpu); ++ ++ apic_id = CPU_STATE_apic_id(&pcb[cpu]); ++ SEP_DRV_LOG_TRACE("Cpu %x: apic_id = %d.", cpu, apic_id); ++ ++ for (i = 0, index = 0; index < num_cpuids; i++) { ++ cpuid_function = ++ (i < basic_funcs) ? i : (0x80000000 + i - basic_funcs); ++ ++ if (cpuid_function == 0x4) { ++ for (j = 0, rax = (U64)-1; (rax & 0x1f) != 0; j++) { ++ rcx = j; ++ UTILITY_Read_Cpuid(cpuid_function, &rax, &rbx, ++ &rcx, &rdx); ++ cpuid_el = ¤t_cpuid[index]; ++ index++; ++ ++#if defined(ALLOW_ASSERT) ++ ASSERT(((U8 *)cpuid_el + sizeof(VTSA_CPUID)) <= ++ cpuid_buffer_limit); ++#endif ++ ++ VTSA_CPUID_X86_cpuid_eax_input(cpuid_el) = ++ (U32)cpuid_function; ++ VTSA_CPUID_X86_cpuid_eax(cpuid_el) = (U32)rax; ++ VTSA_CPUID_X86_cpuid_ebx(cpuid_el) = (U32)rbx; ++ VTSA_CPUID_X86_cpuid_ecx(cpuid_el) = (U32)rcx; ++ VTSA_CPUID_X86_cpuid_edx(cpuid_el) = (U32)rdx; ++ SEP_DRV_LOG_TRACE("Function: %x.", ++ (U32)cpuid_function); ++ SEP_DRV_LOG_TRACE( ++ "rax: %x, rbx: %x, rcx: %x, rdx: %x.", ++ (U32)rax, (U32)rbx, (U32)rcx, (U32)rdx); ++ ++ if ((rax & 0x1f) != 0) { ++ local_gpc = &gen_per_cpu[cpu]; ++ if (((rax >> 5) & 0x3) == 2) { ++ VTSA_GEN_PER_CPU_cpu_cache_L2( ++ local_gpc) = ++ (U32)(SYS_INFO_CACHE_SIZE( ++ rcx, ++ rbx) >> ++ 10); ++ SEP_DRV_LOG_TRACE( ++ "L2 Cache: %x.", ++ VTSA_GEN_PER_CPU_cpu_cache_L2( ++ local_gpc)); ++ cores_sharing_cache = ++ ((U16)(rax >> 14) & ++ 0xfff) + ++ 1; ++ SEP_DRV_LOG_TRACE( ++ "CORES_SHARING_CACHE=%d j=%d cpu=%d.", ++ cores_sharing_cache, j, ++ cpu); ++ } ++ ++ if (((rax >> 5) & 0x3) == 3) { ++ VTSA_GEN_PER_CPU_cpu_cache_L3( ++ local_gpc) = ++ (U32)(SYS_INFO_CACHE_SIZE( ++ rcx, ++ rbx) >> ++ 10); ++ SEP_DRV_LOG_TRACE( ++ "L3 Cache: %x.", ++ VTSA_GEN_PER_CPU_cpu_cache_L3( ++ local_gpc)); ++ } ++ } ++ if (j == 0) { ++ cores_per_die = ++ ((U16)(rax >> 26) & 0x3f) + 1; ++ } ++ } ++ if (cores_sharing_cache != 0) { ++ cache_mask_width = (U32)sys_info_nbits( ++ cores_sharing_cache); ++ SEP_DRV_LOG_TRACE("CACHE MASK WIDTH=%x.", ++ cache_mask_width); ++ } ++ } else if (cpuid_function == 0xb) { ++ j = 0; ++ do { ++ rcx = j; ++ UTILITY_Read_Cpuid(cpuid_function, &rax, &rbx, ++ &rcx, &rdx); ++ cpuid_el = ¤t_cpuid[index]; ++ index++; ++ ++#if defined(ALLOW_ASSERT) ++ ASSERT(((U8 *)cpuid_el + ++ sizeof(VTSA_CPUID_X86)) <= ++ cpuid_buffer_limit); ++#endif ++ ++ VTSA_CPUID_X86_cpuid_eax_input(cpuid_el) = ++ (U32)cpuid_function; ++ VTSA_CPUID_X86_cpuid_eax(cpuid_el) = (U32)rax; ++ VTSA_CPUID_X86_cpuid_ebx(cpuid_el) = (U32)rbx; ++ VTSA_CPUID_X86_cpuid_ecx(cpuid_el) = (U32)rcx; ++ VTSA_CPUID_X86_cpuid_edx(cpuid_el) = (U32)rdx; ++ SEP_DRV_LOG_TRACE("Function: %x.", ++ (U32)cpuid_function); ++ SEP_DRV_LOG_TRACE( ++ "rax: %x, rbx: %x, rcx: %x, rdx: %x.", ++ (U32)rax, (U32)rbx, (U32)rcx, (U32)rdx); ++ if (j == 0) { ++ shift_nbits_core = ++ rax & ++ 0x1f; //No. of bits to shift APIC ID to get Core ID ++ } ++ if (j == 1) { ++ shift_nbits_pkg = ++ rax & ++ 0x1f; //No. of bits to shift APIC ID to get Pkg ID ++ } ++ j++; ++ } while (!(LOW_PART(rax) == 0 && LOW_PART(rbx) == 0)); ++ } else { ++ UTILITY_Read_Cpuid(cpuid_function, &rax, &rbx, &rcx, ++ &rdx); ++ cpuid_el = ¤t_cpuid[index]; ++ index++; ++ ++ SEP_DRV_LOG_TRACE( ++ "Cpu %x: num_cpuids = %x i = %x index = %x.", ++ cpu, num_cpuids, i, index); ++ ++#if defined(ALLOW_ASSERT) ++ ASSERT(((U8 *)cpuid_el + sizeof(VTSA_CPUID_X86)) <= ++ cpuid_buffer_limit); ++ ++ ASSERT(((U8 *)cpuid_el + sizeof(VTSA_CPUID_X86)) <= ++ ((U8 *)current_cpuid + ++ (num_cpuids * sizeof(VTSA_CPUID_X86)))); ++#endif ++ ++ VTSA_CPUID_X86_cpuid_eax_input(cpuid_el) = ++ (U32)cpuid_function; ++ VTSA_CPUID_X86_cpuid_eax(cpuid_el) = (U32)rax; ++ VTSA_CPUID_X86_cpuid_ebx(cpuid_el) = (U32)rbx; ++ VTSA_CPUID_X86_cpuid_ecx(cpuid_el) = (U32)rcx; ++ VTSA_CPUID_X86_cpuid_edx(cpuid_el) = (U32)rdx; ++ SEP_DRV_LOG_TRACE("Function: %x.", (U32)cpuid_function); ++ SEP_DRV_LOG_TRACE("rax: %x, rbx: %x, rcx: %x, rdx: %x.", ++ (U32)rax, (U32)rbx, (U32)rcx, ++ (U32)rdx); ++ ++ if (cpuid_function == 0) { ++ if ((U32)rbx == 0x756e6547 && ++ (U32)rcx == 0x6c65746e && ++ (U32)rdx == 0x49656e69) { ++ VTSA_GEN_PER_CPU_platform_id( ++ local_gpc) = ++ SYS_Read_MSR( ++ MSR_FB_PCARD_ID_FUSE); ++ } ++ } else if (cpuid_function == 1) { ++ // family = (U32)(rax >> 8 & 0x0f); ++ /* extended model bits */ ++ model = (U32)(rax >> 12 & 0xf0) | ++ (U32)(rax >> 4 & 0x0f); ++ // model |= (U32)(rax >> 4 & 0x0f); ++ ht_supported = (rdx >> 28) & 1 ? TRUE : FALSE; ++ num_logical_per_physical = ++ (U32)((rbx & 0xff0000) >> 16); ++ if (num_logical_per_physical == 0) { ++ num_logical_per_physical = 1; ++ } ++ } else if (cpuid_function == 0xa) { ++ VTSA_GEN_PER_CPU_arch_perfmon_ver(local_gpc) = ++ (U32)(rax & 0xFF); ++ VTSA_GEN_PER_CPU_num_gp_counters(local_gpc) = ++ (U32)((rax >> 8) & 0xFF); ++ VTSA_GEN_PER_CPU_num_fixed_counters(local_gpc) = ++ (U32)(rdx & 0x1F); ++ } ++ } ++ } ++ ++ // set cpu_cache_L2 if not already set using 0x80000006 function ++ if (gen_per_cpu[cpu].cpu_cache_L2 == VTSA_NA && extended_funcs >= 6) { ++ UTILITY_Read_Cpuid(0x80000006, &rax, &rbx, &rcx, &rdx); ++ VTSA_GEN_PER_CPU_cpu_cache_L2(local_gpc) = (U32)(rcx >> 16); ++ } ++ ++ if (!ht_supported || num_logical_per_physical == cores_per_die) { ++ threads_per_core[cpu] = 1; ++ thread_id = 0; ++ } else { ++ // each core has 4 threads for MIC system, otherwise, it has 2 threads when ht is enabled ++ threads_per_core[cpu] = cpu_threads_per_core; ++ thread_id = (U16)(apic_id & (cpu_threads_per_core - 1)); ++ } ++ ++ core_id = (apic_id >> shift_nbits_core) & ++ sys_info_bitmask(shift_nbits_pkg - shift_nbits_core); ++ package_id = apic_id >> shift_nbits_pkg; ++ ++ if (cache_mask_width) { ++ module_id = (U32)(core_id / 2); ++ } ++ SEP_DRV_LOG_TRACE("MODULE ID=%d CORE ID=%d for cpu=%d PACKAGE ID=%d.", ++ module_id, core_id, cpu, package_id); ++ SEP_DRV_LOG_TRACE("Num_logical_per_physical=%d cores_per_die=%d.", ++ num_logical_per_physical, cores_per_die); ++ SEP_DRV_LOG_TRACE("Package_id %d, apic_id %x.", package_id, apic_id); ++ SEP_DRV_LOG_TRACE( ++ "Sys_info_nbits[cores_per_die, threads_per_core[%u]]: [%lld, %lld].", ++ cpu, sys_info_nbits(cores_per_die), ++ sys_info_nbits(threads_per_core[cpu])); ++ ++ VTSA_GEN_PER_CPU_cpu_intel_processor_number(local_gpc) = VTSA_NA32; ++ VTSA_GEN_PER_CPU_cpu_package_num(local_gpc) = (U16)package_id; ++ VTSA_GEN_PER_CPU_cpu_core_num(local_gpc) = (U16)core_id; ++ VTSA_GEN_PER_CPU_cpu_hw_thread_num(local_gpc) = (U16)thread_id; ++ VTSA_GEN_PER_CPU_cpu_threads_per_core(local_gpc) = ++ (U16)threads_per_core[cpu]; ++ VTSA_GEN_PER_CPU_cpu_module_num(local_gpc) = (U16)module_id; ++ num_cores = GLOBAL_STATE_num_cpus(driver_state) / threads_per_core[cpu]; ++ VTSA_GEN_PER_CPU_cpu_num_modules(local_gpc) = ++ (U16)(num_cores / 2); // Relavent to Atom processors, Always 2 ++ VTSA_GEN_PER_CPU_cpu_core_type(local_gpc) = 0; ++ GLOBAL_STATE_num_modules(driver_state) = ++ VTSA_GEN_PER_CPU_cpu_num_modules(local_gpc); ++ SEP_DRV_LOG_TRACE("MODULE COUNT=%d.", ++ GLOBAL_STATE_num_modules(driver_state)); ++ ++ core_to_package_map[cpu] = package_id; ++ core_to_phys_core_map[cpu] = core_id; ++ core_to_thread_map[cpu] = thread_id; ++ occupied_core_ids[core_id] = 1; ++ ++ if (num_packages < package_id + 1) { ++ num_packages = package_id + 1; ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++#if !defined(DRV_SEP_ACRN_ON) ++/* ------------------------------------------------------------------------- */ ++/*! ++* @fn static void sys_info_Update_Hyperthreading_Info(buffer) ++* ++* @param buffer - points to the base of GEN_PER_CPU structure ++* @return None ++* ++* @brief This routine is called to update per cpu information based on HT ON/OFF. ++* ++*/ ++static VOID sys_info_Update_Hyperthreading_Info(VOID *buffer) ++{ ++ U32 cpu; ++ VTSA_GEN_PER_CPU *gen_per_cpu, *local_gpc; ++ U32 i = 0; ++ U32 num_cores = 0; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ cpu = CONTROL_THIS_CPU(); ++ ++ // get the GEN_PER_CPU entry for the current processor. ++ gen_per_cpu = (VTSA_GEN_PER_CPU *)buffer; ++ ++ // Update GEN_PER_CPU ++ local_gpc = &(gen_per_cpu[cpu]); ++ ++ while (i < (U32)GLOBAL_STATE_num_cpus(driver_state)) { ++ if (cpu_built_sysinfo[i] == 1) { ++ i++; ++ } ++ } ++ ++ for (i = 0; i < (U32)GLOBAL_STATE_num_cpus(driver_state); i++) { ++ if (occupied_core_ids[i] == 1) { ++ num_cores++; ++ } ++ } ++ threads_per_core[cpu] = (U32)(GLOBAL_STATE_num_cpus(driver_state) / ++ (num_cores * num_packages)); ++ if (VTSA_GEN_PER_CPU_cpu_threads_per_core(local_gpc) != ++ (U16)threads_per_core[cpu]) { ++ VTSA_GEN_PER_CPU_cpu_threads_per_core(local_gpc) = ++ (U16)threads_per_core[cpu]; ++ VTSA_GEN_PER_CPU_cpu_num_modules(local_gpc) = ++ (U16)(num_cores / 2); ++ GLOBAL_STATE_num_modules(driver_state) = ++ VTSA_GEN_PER_CPU_cpu_num_modules(local_gpc); ++ } ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++#endif ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static void sys_info_Build_Percpu(buffer) ++ * ++ * @param buffer - points to the base of GEN_PER_CPU structure ++ * @return None ++ * ++ * @brief This routine is called to build per cpu information. ++ * ++ */ ++static VOID sys_info_Build_Percpu(PVOID param) ++{ ++ U32 basic_funcs, basic_4_funcs, extended_funcs; ++ U32 num_cpuids; ++ S32 cpu; ++ VTSA_CPUID *current_cpuid; ++ VTSA_GEN_ARRAY_HDR *cpuid_gen_array_hdr; ++ VTSA_GEN_PER_CPU *gen_per_cpu, *local_gpc; ++ VTSA_FIXED_SIZE_PTR *fsp; ++ U8 *cpuid_gen_array_hdr_base; ++#if defined(ALLOW_ASSERT) ++ U8 *cpuid_buffer_limit; ++#endif ++ ++ SEP_DRV_LOG_TRACE_IN("Buffer: %p.", buffer); ++ ++ if (param == NULL) { ++ cpu = CONTROL_THIS_CPU(); ++ } else { ++ cpu = *(S32 *)param; ++ } ++ num_cpuids = (U32)sys_info_Get_Num_Cpuid_Funcs( ++ &basic_funcs, &basic_4_funcs, &extended_funcs); ++ ++ // get the GEN_PER_CPU entry for the current processor. ++ gen_per_cpu = (VTSA_GEN_PER_CPU *)gen_per_cpu_ptr; ++ SEP_DRV_LOG_TRACE("cpu %x: gen_per_cpu = %p.", cpu, gen_per_cpu); ++ ++ // get GEN_ARRAY_HDR and cpuid array base ++ cpuid_gen_array_hdr_base = ++ (U8 *)gen_per_cpu + ++ GLOBAL_STATE_num_cpus(driver_state) * sizeof(VTSA_GEN_PER_CPU); ++ ++ SEP_DRV_LOG_TRACE("cpuid_gen_array_hdr_base = %p.", ++ cpuid_gen_array_hdr_base); ++ SEP_DRV_LOG_TRACE("cpu = %x.", cpu); ++ SEP_DRV_LOG_TRACE("cpuid_total_count[cpu] = %x.", ++ cpuid_total_count[cpu]); ++ SEP_DRV_LOG_TRACE("sizeof(VTSA_CPUID) = %lx.", sizeof(VTSA_CPUID)); ++ ++ cpuid_gen_array_hdr =(VTSA_GEN_ARRAY_HDR *) ++ ((U8 *)cpuid_gen_array_hdr_base + ++ sizeof(VTSA_GEN_ARRAY_HDR) * cpu + ++ cpuid_total_count[cpu] * sizeof(VTSA_CPUID)); ++ ++ // get current cpuid array base. ++ current_cpuid = (VTSA_CPUID *)((U8 *)cpuid_gen_array_hdr + ++ sizeof(VTSA_GEN_ARRAY_HDR)); ++#if defined(ALLOW_ASSERT) ++ // get the absolute buffer limit ++ cpuid_buffer_limit = ++ (U8 *)ioctl_sys_info + ++ GENERIC_IOCTL_size(&IOCTL_SYS_INFO_gen(ioctl_sys_info)); ++#endif ++ ++ // ++ // Fill in GEN_PER_CPU ++ // ++ local_gpc = &(gen_per_cpu[cpu]); ++ ++ if (VTSA_GEN_PER_CPU_cpu_intel_processor_number(local_gpc)) { ++ SEP_DRV_LOG_TRACE_OUT( ++ "Early exit (VTSA_GEN_PER_CPU_cpu_intel_processor_number)."); ++ return; ++ } ++ VTSA_GEN_PER_CPU_cpu_number(local_gpc) = cpu; ++ VTSA_GEN_PER_CPU_cpu_speed_mhz(local_gpc) = VTSA_NA32; ++ VTSA_GEN_PER_CPU_cpu_fsb_mhz(local_gpc) = VTSA_NA32; ++ ++ fsp = &VTSA_GEN_PER_CPU_cpu_cpuid_array(local_gpc); ++ VTSA_FIXED_SIZE_PTR_is_ptr(fsp) = 0; ++ VTSA_FIXED_SIZE_PTR_fs_offset(fsp) = ++ (U64)((U8 *)cpuid_gen_array_hdr - ++ (U8 *)&IOCTL_SYS_INFO_sys_info(ioctl_sys_info)); ++ ++ /* ++ * Get the time stamp difference between this cpu and cpu 0. ++ * This value will be used by user mode code to generate standardize ++ * time needed for sampling over time (SOT) functionality. ++ */ ++ VTSA_GEN_PER_CPU_cpu_tsc_offset(local_gpc) = TSC_SKEW(cpu); ++ ++ // ++ // fill GEN_ARRAY_HDR ++ // ++ fsp = &VTSA_GEN_ARRAY_HDR_hdr_next_gen_hdr(cpuid_gen_array_hdr); ++ VTSA_GEN_ARRAY_HDR_hdr_size(cpuid_gen_array_hdr) = ++ sizeof(VTSA_GEN_ARRAY_HDR); ++ VTSA_FIXED_SIZE_PTR_is_ptr(fsp) = 0; ++ VTSA_FIXED_SIZE_PTR_fs_offset(fsp) = 0; ++ VTSA_GEN_ARRAY_HDR_array_num_entries(cpuid_gen_array_hdr) = num_cpuids; ++ VTSA_GEN_ARRAY_HDR_array_entry_size(cpuid_gen_array_hdr) = ++ sizeof(VTSA_CPUID); ++ VTSA_GEN_ARRAY_HDR_array_type(cpuid_gen_array_hdr) = GT_CPUID; ++#if defined(DRV_IA32) ++ VTSA_GEN_ARRAY_HDR_array_subtype(cpuid_gen_array_hdr) = GST_X86; ++#elif defined(DRV_EM64T) ++ VTSA_GEN_ARRAY_HDR_array_subtype(cpuid_gen_array_hdr) = GST_EM64T; ++#endif ++ ++ // ++ // fill out cpu id information ++ // ++ sys_info_Fill_CPUID(num_cpuids, basic_funcs, extended_funcs, cpu, ++ current_cpuid, gen_per_cpu, local_gpc); ++ /* ++ * Mark cpu info on this cpu as successfully built ++ */ ++ cpu_built_sysinfo[cpu] = 1; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static void sys_info_Get_Processor_Info(NULL) ++ * ++ * @param None ++ * @return None ++ * ++ * @brief This routine is called to get global informaton on the processor in general, ++ * it include: ++ * cpu_thread_per_core ++ * ++ */ ++static VOID sys_info_Get_Processor_Info(VOID *param) ++{ ++ U64 rax; ++ U64 rbx; ++ U64 rcx; ++ U64 rdx; ++ U32 family; ++ U32 model; ++ DRV_BOOL ht_supported = FALSE; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ // read cpuid with function 1 to find family/model ++ UTILITY_Read_Cpuid(1, &rax, &rbx, &rcx, &rdx); ++ family = (U32)(rax >> 8 & 0x0f); ++ model = (U32)(rax >> 12 & 0xf0); /* extended model bits */ ++ model |= (U32)(rax >> 4 & 0x0f); ++ if (is_Knights_family(family, model)) { ++ cpu_threads_per_core = 4; ++ } else { ++ ht_supported = (rdx >> 28) & 1 ? TRUE : FALSE; ++ if (ht_supported) { ++ cpu_threads_per_core = 2; ++ } else { ++ cpu_threads_per_core = 1; ++ } ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn extern void SYS_Info_Build(void) ++ * ++ * @param None ++ * @return None ++ * ++ * @brief This is the driver routine that constructs the VTSA_SYS_INFO ++ * @brief structure used to report system information into the tb5 file ++ * ++ */ ++U32 SYS_INFO_Build(void) ++{ ++ VTSA_GEN_ARRAY_HDR *gen_array_hdr; ++ VTSA_NODE_INFO *node_info; ++ VTSA_SYS_INFO *sys_info; ++ VTSA_FIXED_SIZE_PTR *fsp; ++ U32 buffer_size; ++ U32 total_cpuid_entries; ++ S32 i; ++ struct sysinfo k_sysinfo; ++ U32 res; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ SEP_DRV_LOG_TRACE("Entered."); ++ ++ if (ioctl_sys_info) { ++ /* The sys info has already been computed. Do not redo */ ++ buffer_size = ++ GENERIC_IOCTL_size(&IOCTL_SYS_INFO_gen(ioctl_sys_info)); ++ return buffer_size - sizeof(GENERIC_IOCTL); ++ } ++ ++ si_meminfo(&k_sysinfo); ++ ++ buffer_size = GLOBAL_STATE_num_cpus(driver_state) * sizeof(U32); ++ cpu_built_sysinfo = CONTROL_Allocate_Memory(buffer_size); ++ if (cpu_built_sysinfo == NULL) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT( ++ "Cpu_built_sysinfo memory alloc failed!"); ++ return 0; ++ } ++ ++ cpuid_entry_count = CONTROL_Allocate_Memory(buffer_size); ++ if (cpuid_entry_count == NULL) { ++ cpu_built_sysinfo = CONTROL_Free_Memory(cpu_built_sysinfo); ++ SEP_DRV_LOG_ERROR_TRACE_OUT( ++ "Memory alloc failed for cpuid_entry_count!"); ++ return 0; ++ } ++ ++ cpuid_total_count = CONTROL_Allocate_Memory(buffer_size); ++ if (cpuid_total_count == NULL) { ++ cpu_built_sysinfo = CONTROL_Free_Memory(cpu_built_sysinfo); ++ cpuid_entry_count = CONTROL_Free_Memory(cpuid_entry_count); ++ SEP_DRV_LOG_ERROR_TRACE_OUT( ++ "Memory alloc failed for cpuid_total_count!"); ++ return 0; ++ } ++ ++ // checking on family-model to set threads_per_core as 4: MIC, 2: ht-on; 1: rest ++ sys_info_Get_Processor_Info(NULL); ++ ++#if defined(DRV_SEP_ACRN_ON) ++ for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { ++ sys_info_Get_Cpuid_Entry_Count(&i); ++ } ++#else ++ CONTROL_Invoke_Parallel(sys_info_Get_Cpuid_Entry_Count, NULL); ++#endif ++ ++ total_cpuid_entries = 0; ++ for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { ++ //if cpu is offline, set its cpuid count same as cpu0 ++ if (cpuid_entry_count[i] == 0) { ++ cpuid_entry_count[i] = cpuid_entry_count[0]; ++ cpu_built_sysinfo[i] = 0; ++ } ++ cpuid_total_count[i] = total_cpuid_entries; ++ total_cpuid_entries += cpuid_entry_count[i]; ++ } ++ ++ ioctl_sys_info_size = ++ sys_info_Get_Cpuid_Buffer_Size(total_cpuid_entries); ++ ioctl_sys_info = CONTROL_Allocate_Memory(ioctl_sys_info_size); ++ if (ioctl_sys_info == NULL) { ++ cpuid_entry_count = CONTROL_Free_Memory(cpuid_entry_count); ++ cpuid_total_count = CONTROL_Free_Memory(cpuid_total_count); ++ ++ SEP_DRV_LOG_ERROR_TRACE_OUT( ++ "Memory alloc failed for ioctl_sys_info!"); ++ // return STATUS_INSUFFICIENT_RESOURCES; ++ return 0; ++ } ++ ++ // ++ // fill in ioctl and cpu_cs_info fields. ++ // ++ GENERIC_IOCTL_size(&IOCTL_SYS_INFO_gen(ioctl_sys_info)) = ++ ioctl_sys_info_size; ++ GENERIC_IOCTL_ret(&IOCTL_SYS_INFO_gen(ioctl_sys_info)) = VT_SUCCESS; ++ ++ sys_info = &IOCTL_SYS_INFO_sys_info(ioctl_sys_info); ++ VTSA_SYS_INFO_min_app_address(sys_info) = VTSA_NA64; ++ VTSA_SYS_INFO_max_app_address(sys_info) = VTSA_NA64; ++ VTSA_SYS_INFO_page_size(sys_info) = k_sysinfo.mem_unit; ++ VTSA_SYS_INFO_allocation_granularity(sys_info) = k_sysinfo.mem_unit; ++ ++ // ++ // offset from ioctl_sys_info ++ // ++ VTSA_FIXED_SIZE_PTR_is_ptr(&VTSA_SYS_INFO_node_array(sys_info)) = 0; ++ VTSA_FIXED_SIZE_PTR_fs_offset(&VTSA_SYS_INFO_node_array(sys_info)) = ++ sizeof(VTSA_SYS_INFO); ++ ++ // ++ // fill in node_info array header ++ // ++ gen_array_hdr = (VTSA_GEN_ARRAY_HDR *)((U8 *)sys_info + ++ VTSA_FIXED_SIZE_PTR_fs_offset( ++ &VTSA_SYS_INFO_node_array(sys_info))); ++ ++ SEP_DRV_LOG_TRACE("Gen_array_hdr = %p.", gen_array_hdr); ++ fsp = &VTSA_GEN_ARRAY_HDR_hdr_next_gen_hdr(gen_array_hdr); ++ VTSA_FIXED_SIZE_PTR_is_ptr(fsp) = 0; ++ VTSA_FIXED_SIZE_PTR_fs_offset(fsp) = 0; ++ ++ VTSA_GEN_ARRAY_HDR_hdr_size(gen_array_hdr) = sizeof(VTSA_GEN_ARRAY_HDR); ++ VTSA_GEN_ARRAY_HDR_array_num_entries(gen_array_hdr) = 1; ++ VTSA_GEN_ARRAY_HDR_array_entry_size(gen_array_hdr) = ++ sizeof(VTSA_NODE_INFO); ++ VTSA_GEN_ARRAY_HDR_array_type(gen_array_hdr) = GT_NODE; ++ VTSA_GEN_ARRAY_HDR_array_subtype(gen_array_hdr) = GST_UNK; ++ ++ // ++ // fill in node_info ++ // ++ node_info = (VTSA_NODE_INFO *)((U8 *)gen_array_hdr + ++ sizeof(VTSA_GEN_ARRAY_HDR)); ++ SEP_DRV_LOG_TRACE("Node_info = %p.", node_info); ++ ++ VTSA_NODE_INFO_node_type_from_shell(node_info) = VTSA_NA32; ++ ++ VTSA_NODE_INFO_node_id(node_info) = VTSA_NA32; ++ VTSA_NODE_INFO_node_num_available(node_info) = ++ GLOBAL_STATE_num_cpus(driver_state); ++ VTSA_NODE_INFO_node_num_used(node_info) = VTSA_NA32; ++ total_ram = k_sysinfo.totalram << PAGE_SHIFT; ++ VTSA_NODE_INFO_node_physical_memory(node_info) = total_ram; ++ ++ fsp = &VTSA_NODE_INFO_node_percpu_array(node_info); ++ VTSA_FIXED_SIZE_PTR_is_ptr(fsp) = 0; ++ VTSA_FIXED_SIZE_PTR_fs_offset(fsp) = sizeof(VTSA_SYS_INFO) + ++ sizeof(VTSA_GEN_ARRAY_HDR) + ++ sizeof(VTSA_NODE_INFO); ++ // ++ // fill in gen_per_cpu array header ++ // ++ gen_array_hdr = ++ (VTSA_GEN_ARRAY_HDR *)((U8 *)sys_info + ++ VTSA_FIXED_SIZE_PTR_fs_offset(fsp)); ++ SEP_DRV_LOG_TRACE("Gen_array_hdr = %p.", gen_array_hdr); ++ ++ fsp = &VTSA_GEN_ARRAY_HDR_hdr_next_gen_hdr(gen_array_hdr); ++ VTSA_FIXED_SIZE_PTR_is_ptr(fsp) = 0; ++ VTSA_FIXED_SIZE_PTR_fs_offset(fsp) = 0; ++ ++ VTSA_GEN_ARRAY_HDR_hdr_size(gen_array_hdr) = sizeof(VTSA_GEN_ARRAY_HDR); ++ VTSA_GEN_ARRAY_HDR_array_num_entries(gen_array_hdr) = ++ GLOBAL_STATE_num_cpus(driver_state); ++ VTSA_GEN_ARRAY_HDR_array_entry_size(gen_array_hdr) = ++ sizeof(VTSA_GEN_PER_CPU); ++ VTSA_GEN_ARRAY_HDR_array_type(gen_array_hdr) = GT_PER_CPU; ++ ++#if defined(DRV_IA32) ++ VTSA_GEN_ARRAY_HDR_array_subtype(gen_array_hdr) = GST_X86; ++#elif defined(DRV_EM64T) ++ VTSA_GEN_ARRAY_HDR_array_subtype(gen_array_hdr) = GST_EM64T; ++#endif ++ ++ gen_per_cpu_ptr = (U8 *)gen_array_hdr + sizeof(VTSA_GEN_ARRAY_HDR); ++ ++#if defined(DRV_SEP_ACRN_ON) ++ for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { ++ APIC_Init(&i); ++ sys_info_Build_Percpu(&i); ++ } ++#else ++ CONTROL_Invoke_Parallel(APIC_Init, NULL); ++ CONTROL_Invoke_Parallel(sys_info_Build_Percpu, NULL); ++ CONTROL_Invoke_Parallel(sys_info_Update_Hyperthreading_Info, ++ (VOID *)gen_per_cpu_ptr); ++#endif ++ ++ /* ++ * Cleanup - deallocate memory that is no longer needed ++ */ ++ cpuid_entry_count = CONTROL_Free_Memory(cpuid_entry_count); ++ ++ res = ioctl_sys_info_size - sizeof(GENERIC_IOCTL); ++ ++ SEP_DRV_LOG_TRACE_OUT("Res: %u.", res); ++ return res; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn extern void SYS_Info_Transfer(buf_usr_to_drv, len_usr_to_drv) ++ * ++ * @param buf_usr_to_drv - pointer to the buffer to write the data into ++ * @param len_usr_to_drv - length of the buffer passed in ++ * ++ * @brief Transfer the data collected via the SYS_INFO_Build routine ++ * @brief back to the caller. ++ * ++ */ ++VOID SYS_INFO_Transfer(PVOID buf_usr_to_drv, unsigned long len_usr_to_drv) ++{ ++ unsigned long exp_size; ++ ssize_t unused; ++ ++ SEP_DRV_LOG_TRACE_IN("Buffer: %p, buffer_len: %u.", buf_usr_to_drv, ++ (U32)len_usr_to_drv); ++ ++ if (ioctl_sys_info == NULL || len_usr_to_drv == 0) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT( ++ "Ioctl_sys_info is NULL or len_usr_to_drv is 0!"); ++ return; ++ } ++ exp_size = GENERIC_IOCTL_size(&IOCTL_SYS_INFO_gen(ioctl_sys_info)) - ++ sizeof(GENERIC_IOCTL); ++ if (len_usr_to_drv < exp_size) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("Insufficient Space!"); ++ return; ++ } ++ unused = copy_to_user((void __user *)buf_usr_to_drv, ++ &(IOCTL_SYS_INFO_sys_info(ioctl_sys_info)), ++ len_usr_to_drv); ++ if (unused) { ++ // no-op ... eliminates "variable not used" compiler warning ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn extern void SYS_Info_Destroy(void) ++ * ++ * @param None ++ * @return None ++ * ++ * @brief Free any memory associated with the sys info before unloading the driver ++ * ++ */ ++VOID SYS_INFO_Destroy(void) ++{ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ cpuid_total_count = CONTROL_Free_Memory(cpuid_total_count); ++ cpu_built_sysinfo = CONTROL_Free_Memory(cpu_built_sysinfo); ++ ioctl_sys_info = CONTROL_Free_Memory(ioctl_sys_info); ++ ioctl_sys_info_size = 0; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn extern void SYS_INFO_Build_Cpu(PVOID param) ++ * ++ * @param PVOID param ++ * @return None ++ * ++ * @brief call routine to populate cpu info ++ * ++ */ ++VOID SYS_INFO_Build_Cpu(PVOID param) ++{ ++ VTSA_GEN_ARRAY_HDR *gen_array_hdr; ++ VTSA_NODE_INFO *node_info; ++ VTSA_SYS_INFO *sys_info; ++ VTSA_FIXED_SIZE_PTR *fsp; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ if (!ioctl_sys_info) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("Ioctl_sys_info is null!"); ++ return; ++ } ++ sys_info = &IOCTL_SYS_INFO_sys_info(ioctl_sys_info); ++ gen_array_hdr = ++ (VTSA_GEN_ARRAY_HDR *)((U8 *)sys_info + ++ VTSA_FIXED_SIZE_PTR_fs_offset( ++ &VTSA_SYS_INFO_node_array( ++ sys_info))); ++ SEP_DRV_LOG_TRACE("Gen_array_hdr = %p.", gen_array_hdr); ++ ++ node_info = (VTSA_NODE_INFO *)((U8 *)gen_array_hdr + ++ sizeof(VTSA_GEN_ARRAY_HDR)); ++ SEP_DRV_LOG_TRACE("Node_info = %p.", node_info); ++ fsp = &VTSA_NODE_INFO_node_percpu_array(node_info); ++ ++ gen_array_hdr = ++ (VTSA_GEN_ARRAY_HDR *)((U8 *)sys_info + ++ VTSA_FIXED_SIZE_PTR_fs_offset(fsp)); ++ SEP_DRV_LOG_TRACE("Gen_array_hdr = %p.", gen_array_hdr); ++ gen_per_cpu_ptr = (U8 *)gen_array_hdr + sizeof(VTSA_GEN_ARRAY_HDR); ++ ++ sys_info_Build_Percpu(NULL); ++ ++#if !defined(DRV_SEP_ACRN_ON) ++ sys_info_Update_Hyperthreading_Info((VOID *)gen_per_cpu_ptr); ++#endif ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} +diff --git a/drivers/platform/x86/sepdk/sep/unc_common.c b/drivers/platform/x86/sepdk/sep/unc_common.c +new file mode 100755 +index 000000000000..5442734a91f7 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/sep/unc_common.c +@@ -0,0 +1,388 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#include "lwpmudrv_defines.h" ++#include "lwpmudrv_types.h" ++#include "lwpmudrv_ecb.h" ++#include "lwpmudrv_struct.h" ++ ++#include "inc/ecb_iterators.h" ++#include "inc/control.h" ++#include "inc/pci.h" ++#include "inc/unc_common.h" ++#include "inc/utility.h" ++ ++extern UNCORE_TOPOLOGY_INFO_NODE uncore_topology; ++extern PLATFORM_TOPOLOGY_PROG_NODE platform_topology_prog_node; ++extern U64 *read_counter_info; ++ ++/* this is the table to keep pci_bus structure for PCI devices ++ * for both pci config access and mmio access ++ */ ++UNC_PCIDEV_NODE unc_pcidev_map[MAX_DEVICES]; ++ ++#define GET_PACKAGE_NUM(device_type, cpu) \ ++ (((device_type) == DRV_SINGLE_INSTANCE) ? 0 : core_to_package_map[cpu]) ++ ++/************************************************************/ ++/* ++ * unc common Dispatch functions ++ * ++ ************************************************************/ ++void UNC_COMMON_Dummy_Func(PVOID param) ++{ ++ SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); ++ SEP_DRV_LOG_TRACE_OUT("Empty function."); ++} ++ ++/************************************************************/ ++/* ++ * UNC common PCI based API ++ * ++ ************************************************************/ ++ ++/*! ++ * @fn OS_STATUS UNC_COMMON_Add_Bus_Map ++ * ++ * @brief This code discovers which package's data is read off of which bus. ++ * ++ * @param None ++ * ++ * @return OS_STATUS ++ * ++ * Special Notes: ++ * This probably will move to the UBOX once that is programmed. ++ */ ++OS_STATUS ++UNC_COMMON_Add_Bus_Map(U32 uncore_did, U32 dev_node, U32 bus_no) ++{ ++ U32 i = 0; ++ U32 entries = 0; ++ ++ if (!UNC_PCIDEV_busno_list(&(unc_pcidev_map[dev_node]))) { ++ // allocate array for holding bus mapping ++ // package based device: an entry per package, all units in the same package are in the same bus. ++ // system based device: an entry per unit if in different bus ++ entries = GET_MAX_PCIDEV_ENTRIES(num_packages); ++ UNC_PCIDEV_busno_list(&(unc_pcidev_map[dev_node])) = ++ CONTROL_Allocate_Memory(entries * sizeof(S32)); ++ if (UNC_PCIDEV_busno_list(&(unc_pcidev_map[dev_node])) == ++ NULL) { ++ SEP_DRV_LOG_ERROR("Memory allocation failure!"); ++ return OS_NO_MEM; ++ } ++ UNC_PCIDEV_num_entries(&(unc_pcidev_map[dev_node])) = 0; ++ UNC_PCIDEV_max_entries(&(unc_pcidev_map[dev_node])) = entries; ++ for (i = 0; i < entries; i++) { ++ UNC_PCIDEV_busno_entry(&(unc_pcidev_map[dev_node]), i) = ++ INVALID_BUS_NUMBER; ++ } ++ } else { ++ entries = UNC_PCIDEV_max_entries(&(unc_pcidev_map[dev_node])); ++ } ++ ++ for (i = 0; i < UNC_PCIDEV_num_entries(&(unc_pcidev_map[dev_node])); ++ i++) { ++ if (UNC_PCIDEV_busno_entry(&(unc_pcidev_map[dev_node]), i) == ++ (S32)bus_no) { ++ SEP_DRV_LOG_TRACE( ++ "Already in the map, another unit, no add."); ++ return OS_SUCCESS; ++ } ++ } ++ if (i < entries) { ++ UNC_PCIDEV_busno_entry(&(unc_pcidev_map[dev_node]), i) = ++ (S32)bus_no; ++ UNC_PCIDEV_num_entries(&(unc_pcidev_map[dev_node]))++; ++ SEP_DRV_LOG_TRACE("Add numpackages=%d busno=%x devnode=%d.", ++ num_packages, bus_no, dev_node); ++ return OS_SUCCESS; ++ } ++ SEP_DRV_LOG_ERROR_TRACE_OUT( ++ "Exceed max map entries, drop this bus map!"); ++ return OS_NO_MEM; ++} ++ ++OS_STATUS UNC_COMMON_Init(void) ++{ ++ U32 i = 0; ++ ++ for (i = 0; i < MAX_DEVICES; i++) { ++ memset(&(unc_pcidev_map[i]), 0, sizeof(UNC_PCIDEV_NODE)); ++ } ++ ++ memset((char *)&uncore_topology, 0, sizeof(UNCORE_TOPOLOGY_INFO_NODE)); ++ memset((char *)&platform_topology_prog_node, 0, ++ sizeof(PLATFORM_TOPOLOGY_PROG_NODE)); ++ ++ return OS_SUCCESS; ++} ++ ++/*! ++ * @fn extern VOID UNC_COMMON_Clean_Up(PVOID) ++ * ++ * @brief clear out out programming ++ * ++ * @param None ++ * ++ * @return None ++ */ ++void UNC_COMMON_Clean_Up(void) ++{ ++ U32 i = 0; ++ for (i = 0; i < MAX_DEVICES; i++) { ++ if (UNC_PCIDEV_busno_list(&(unc_pcidev_map[i]))) { ++ UNC_PCIDEV_busno_list(&(unc_pcidev_map[i])) = ++ CONTROL_Free_Memory(UNC_PCIDEV_busno_list( ++ &(unc_pcidev_map[i]))); ++ } ++ if (UNC_PCIDEV_mmio_map(&(unc_pcidev_map[i]))) { ++ UNC_PCIDEV_mmio_map(&(unc_pcidev_map[i])) = ++ CONTROL_Free_Memory(UNC_PCIDEV_mmio_map( ++ &(unc_pcidev_map[i]))); ++ } ++ memset(&(unc_pcidev_map[i]), 0, sizeof(UNC_PCIDEV_NODE)); ++ } ++} ++ ++/*! ++ * @fn static VOID UNC_COMMON_PCI_Scan_For_Uncore(VOID*) ++ * ++ * @brief Initial write of PMU registers ++ * Walk through the enties and write the value of the register accordingly. ++ * When current_group = 0, then this is the first time this routine is called, ++ * ++ * @param None ++ * ++ * @return None ++ * ++ * Special Notes: ++ */ ++ ++VOID UNC_COMMON_PCI_Scan_For_Uncore(PVOID param, U32 dev_node, ++ DEVICE_CALLBACK callback) ++{ ++ U32 device_id; ++ U32 value; ++ U32 vendor_id; ++ U32 busno; ++ U32 j, k, l; ++ U32 device_found = 0; ++ ++ SEP_DRV_LOG_TRACE_IN("Dummy param: %p, dev_node: %u, callback: %p.", ++ param, dev_node, callback); ++ ++ for (busno = 0; busno < 256; busno++) { ++ for (j = 0; j < MAX_PCI_DEVNO; j++) { ++ if (!(UNCORE_TOPOLOGY_INFO_pcidev_valid( ++ &uncore_topology, dev_node, j))) { ++ continue; ++ } ++ for (k = 0; k < MAX_PCI_FUNCNO; k++) { ++ if (!(UNCORE_TOPOLOGY_INFO_pcidev_is_devno_funcno_valid( ++ &uncore_topology, dev_node, j, ++ k))) { ++ continue; ++ } ++ device_found = 0; ++ value = PCI_Read_U32_Valid(busno, j, k, 0, 0); ++ CONTINUE_IF_NOT_GENUINE_INTEL_DEVICE( ++ value, vendor_id, device_id); ++ SEP_DRV_LOG_TRACE("Uncore device ID = 0x%x.", ++ device_id); ++ ++ for (l = 0; ++ l < ++ UNCORE_TOPOLOGY_INFO_num_deviceid_entries( ++ &uncore_topology, dev_node); ++ l++) { ++ if (UNCORE_TOPOLOGY_INFO_deviceid( ++ &uncore_topology, dev_node, ++ l) == device_id) { ++ device_found = 1; ++ break; ++ } ++ } ++ if (device_found) { ++ if (UNC_COMMON_Add_Bus_Map( ++ device_id, dev_node, ++ busno) == OS_SUCCESS) { ++ UNCORE_TOPOLOGY_INFO_pcidev_num_entries_found( ++ &uncore_topology, ++ dev_node, j, k)++; ++ SEP_DRV_LOG_DETECTION( ++ "Found device 0x%x at BDF(%x:%x:%x) [%u unit(s) so far].", ++ device_id, busno, j, k, ++ UNCORE_TOPOLOGY_INFO_pcidev_num_entries_found( ++ &uncore_topology, ++ dev_node, j, ++ k)); ++ } ++ } ++ } ++ } ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/*! ++ * @fn extern VOID UNC_COMMON_Get_Platform_Topology() ++ * ++ * @brief This function will walk through the platform registers to retrieve information and calculate the bus no. ++ * Reads appropriate pci_config regs and populates the PLATFORM_TOPOLOGY_PROG_NODE structure with the reg value. ++ * ++ * @param U32 dev_node - Device no. ++ * ++ * @return None ++ * ++ * Special Notes: ++ * device_num corresponds to Memory controller ++ * func_num corresponds to Channel number ++ * reg_offset corresponds to dimm slot ++ */ ++VOID UNC_COMMON_Get_Platform_Topology(U32 dev_node) ++{ ++ U32 num_registers = 0; ++ // U32 device_index = 0; ++ U32 bus_num = 0; ++ U32 i = 0; ++ U32 func_num = 0; ++ U32 num_pkgs = num_packages; ++ U32 device_num = 0; ++ U32 reg_offset = 0; ++ U32 len = 0; ++ U64 reg_value = 0; ++ U32 device_value = 0; ++ U64 reg_mask = 0; ++ U32 vendor_id; ++ U32 device_id; ++ U32 valid; ++ ++ PLATFORM_TOPOLOGY_REG topology_regs = NULL; ++ ++ SEP_DRV_LOG_TRACE_IN("Dev_node: %u.", dev_node); ++ PLATFORM_TOPOLOGY_PROG_topology_device_prog_valid( ++ &platform_topology_prog_node, dev_node) = 1; ++ ++ if (num_packages > MAX_PACKAGES) { ++ SEP_DRV_LOG_ERROR( ++ "Num_packages %d > MAX_PACKAGE, getting for only %d packages.", ++ num_packages, MAX_PACKAGES); ++ num_pkgs = MAX_PACKAGES; ++ } ++ ++ num_registers = PLATFORM_TOPOLOGY_PROG_topology_device_num_registers( ++ &platform_topology_prog_node, dev_node); ++ topology_regs = PLATFORM_TOPOLOGY_PROG_topology_topology_regs( ++ &platform_topology_prog_node, dev_node); ++ // device_index = PLATFORM_TOPOLOGY_PROG_topology_device_device_index( ++ // &platform_topology_prog_node, dev_node); ++ ++ for (i = 0; i < num_pkgs; i++) { ++ for (len = 0; len < num_registers; len++) { ++ if (PLATFORM_TOPOLOGY_REG_reg_type( ++ topology_regs, len) == PMU_REG_PROG_MSR) { ++ reg_value = SYS_Read_MSR( ++ PLATFORM_TOPOLOGY_REG_reg_id( ++ topology_regs, len)); ++ reg_mask = PLATFORM_TOPOLOGY_REG_reg_mask( ++ topology_regs, len); ++ PLATFORM_TOPOLOGY_REG_reg_value(topology_regs, ++ len, i) = ++ reg_value & reg_mask; ++ SEP_DRV_LOG_TRACE( ++ "Read UNCORE_MSR_FREQUENCY 0x%x\n", ++ PLATFORM_TOPOLOGY_REG_reg_id( ++ topology_regs, len)); ++ } else { ++ if (!IS_BUS_MAP_VALID(dev_node, i)) { ++ continue; ++ } ++ bus_num = GET_BUS_MAP(dev_node, i); ++ device_num = PLATFORM_TOPOLOGY_REG_device( ++ topology_regs, len); ++ func_num = PLATFORM_TOPOLOGY_REG_function( ++ topology_regs, len); ++ reg_offset = PLATFORM_TOPOLOGY_REG_reg_id( ++ topology_regs, len); ++ device_value = PCI_Read_U32_Valid( ++ bus_num, device_num, func_num, 0, 0); ++ CHECK_IF_GENUINE_INTEL_DEVICE(device_value, ++ vendor_id, ++ device_id, valid); ++ SEP_DRV_LOG_TRACE("Uncore device ID = 0x%x.", ++ device_id); ++ if (!valid) { ++ PLATFORM_TOPOLOGY_REG_device_valid( ++ topology_regs, len) = 0; ++ } ++ PLATFORM_TOPOLOGY_REG_reg_value(topology_regs, ++ len, i) = ++ PCI_Read_U32_Valid(bus_num, device_num, ++ func_num, reg_offset, ++ PCI_INVALID_VALUE); ++ } ++ } ++ if (PLATFORM_TOPOLOGY_PROG_topology_device_scope( ++ &platform_topology_prog_node, dev_node) == ++ SYSTEM_EVENT) { ++ break; ++ } ++ } ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/************************************************************/ ++/* ++ * UNC common MSR based API ++ * ++ ************************************************************/ ++ ++/*! ++ * @fn VOID UNC_COMMON_MSR_Clean_Up(PVOID) ++ * ++ * @brief clear out out programming ++ * ++ * @param None ++ * ++ * @return None ++ */ ++VOID UNC_COMMON_MSR_Clean_Up(VOID *param) ++{ ++ U32 dev_idx; ++ ++ SEP_DRV_LOG_TRACE_IN("Param: %p.", param); ++ dev_idx = *((U32 *)param); ++ FOR_EACH_REG_ENTRY_UNC(pecb, dev_idx, i) ++ { ++ if (ECB_entries_clean_up_get(pecb, i)) { ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); ++ } ++ } ++ END_FOR_EACH_REG_ENTRY_UNC; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} +diff --git a/drivers/platform/x86/sepdk/sep/unc_gt.c b/drivers/platform/x86/sepdk/sep/unc_gt.c +new file mode 100755 +index 000000000000..34e7650da94b +--- /dev/null ++++ b/drivers/platform/x86/sepdk/sep/unc_gt.c +@@ -0,0 +1,470 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++#include "lwpmudrv_defines.h" ++#include "lwpmudrv_types.h" ++#include "lwpmudrv_ecb.h" ++#include "lwpmudrv_struct.h" ++ ++#include "inc/ecb_iterators.h" ++#include "inc/control.h" ++#include "inc/unc_common.h" ++#include "inc/utility.h" ++#include "inc/pci.h" ++#include "inc/unc_gt.h" ++ ++extern U64 *read_counter_info; ++extern EMON_BUFFER_DRIVER_HELPER emon_buffer_driver_helper; ++ ++static U64 unc_gt_virtual_address; ++static SEP_MMIO_NODE unc_gt_map; ++static U32 unc_gt_rc6_reg1; ++static U32 unc_gt_rc6_reg2; ++static U32 unc_gt_clk_gt_reg1; ++static U32 unc_gt_clk_gt_reg2; ++static U32 unc_gt_clk_gt_reg3; ++static U32 unc_gt_clk_gt_reg4; ++ ++/*! ++ * @fn static VOID unc_gt_Write_PMU(VOID*) ++ * ++ * @brief Initial write of PMU registers ++ * Walk through the enties and write the value of the register accordingly. ++ * ++ * @param device id ++ * ++ * @return None ++ * ++ * Special Notes: ++ */ ++static VOID unc_gt_Write_PMU(VOID *param) ++{ ++ U32 dev_idx; ++ ECB pecb; ++ DRV_PCI_DEVICE_ENTRY_NODE dpden; ++ U64 device_id; ++ U32 vendor_id; ++ U64 bar_lo; ++ U32 offset_delta; ++ U32 tmp_value; ++ U32 this_cpu; ++ U32 value; ++ CPU_STATE pcpu; ++ ++ SEP_DRV_LOG_TRACE_IN("Param: %p.", param); ++ ++ dev_idx = *((U32 *)param); ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[0]; ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ ++ if (!CPU_STATE_system_master(pcpu)) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!system_master)."); ++ return; ++ } ++ ++ dpden = ECB_pcidev_entry_node(pecb); ++ value = PCI_Read_U32(DRV_PCI_DEVICE_ENTRY_bus_no(&dpden), ++ DRV_PCI_DEVICE_ENTRY_dev_no(&dpden), ++ DRV_PCI_DEVICE_ENTRY_func_no(&dpden), 0); ++ vendor_id = DRV_GET_PCI_VENDOR_ID(value); ++ device_id = DRV_GET_PCI_DEVICE_ID(value); ++ ++ if (DRV_IS_INTEL_VENDOR_ID(vendor_id) && ++ DRV_IS_GT_DEVICE_ID(device_id)) { ++ SEP_DRV_LOG_TRACE("Found Desktop GT."); ++ } ++ ++ bar_lo = PCI_Read_U32(DRV_PCI_DEVICE_ENTRY_bus_no(&dpden), ++ DRV_PCI_DEVICE_ENTRY_dev_no(&dpden), ++ DRV_PCI_DEVICE_ENTRY_func_no(&dpden), ++ DRV_PCI_DEVICE_ENTRY_bar_offset(&dpden)); ++ bar_lo &= UNC_GT_BAR_MASK; ++ ++ PCI_Map_Memory(&unc_gt_map, bar_lo, GT_MMIO_SIZE); ++ unc_gt_virtual_address = SEP_MMIO_NODE_virtual_address(&unc_gt_map); ++ ++ FOR_EACH_PCI_DATA_REG_RAW(pecb, i, dev_idx) ++ { ++ offset_delta = ECB_entries_reg_offset(pecb, i); ++ // this is needed for overflow detection of the accumulators. ++ if (LWPMU_DEVICE_counter_mask(&devices[dev_idx]) == 0) { ++ LWPMU_DEVICE_counter_mask(&devices[dev_idx]) = ++ (U64)ECB_entries_max_bits(pecb, i); ++ } ++ } ++ END_FOR_EACH_PCI_CCCR_REG_RAW; ++ ++ //enable the global control to clear the counter first ++ SYS_Write_MSR(PERF_GLOBAL_CTRL, ECB_entries_reg_value(pecb, 0)); ++ FOR_EACH_PCI_CCCR_REG_RAW(pecb, i, dev_idx) ++ { ++ offset_delta = ECB_entries_reg_offset(pecb, i); ++ if (offset_delta == PERF_GLOBAL_CTRL) { ++ continue; ++ } ++ PCI_MMIO_Write_U32(unc_gt_virtual_address, offset_delta, ++ GT_CLEAR_COUNTERS); ++ ++ SEP_DRV_LOG_TRACE("CCCR offset delta is 0x%x W is clear ctrs.", ++ offset_delta); ++ } ++ END_FOR_EACH_PCI_CCCR_REG_RAW; ++ ++ //disable the counters ++ SYS_Write_MSR(PERF_GLOBAL_CTRL, 0LL); ++ ++ FOR_EACH_PCI_CCCR_REG_RAW(pecb, i, dev_idx) ++ { ++ offset_delta = ECB_entries_reg_offset(pecb, i); ++ if (offset_delta == PERF_GLOBAL_CTRL) { ++ continue; ++ } ++ PCI_MMIO_Write_U32(unc_gt_virtual_address, offset_delta, ++ ((U32)ECB_entries_reg_value(pecb, i))); ++ tmp_value = ++ PCI_MMIO_Read_U32(unc_gt_virtual_address, offset_delta); ++ ++ // remove compiler warning on unused variables ++ if (tmp_value) { ++ } ++ ++ SEP_DRV_LOG_TRACE( ++ "CCCR offset delta is 0x%x R is 0x%x W is 0x%llx.", ++ offset_delta, tmp_value, ++ ECB_entries_reg_value(pecb, i)); ++ } ++ END_FOR_EACH_PCI_CCCR_REG_RAW; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/*! ++ * @fn static VOID unc_gt_Disable_RC6_Clock_Gating(void) ++ * ++ * @brief This snippet of code allows GT events to count by ++ * disabling settings related to clock gating/power ++ * @param none ++ * ++ * @return None ++ * ++ * Special Notes: ++ */ ++static VOID unc_gt_Disable_RC6_Clock_Gating(void) ++{ ++ U32 tmp; ++ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ // Disable RC6 ++ unc_gt_rc6_reg1 = ++ PCI_MMIO_Read_U32(unc_gt_virtual_address, UNC_GT_RC6_REG1); ++ tmp = unc_gt_rc6_reg1 | UNC_GT_RC6_REG1_OR_VALUE; ++ unc_gt_rc6_reg2 = ++ PCI_MMIO_Read_U32(unc_gt_virtual_address, UNC_GT_RC6_REG2); ++ ++ PCI_MMIO_Write_U32(unc_gt_virtual_address, UNC_GT_RC6_REG2, ++ UNC_GT_RC6_REG2_VALUE); ++ PCI_MMIO_Write_U32(unc_gt_virtual_address, UNC_GT_RC6_REG1, tmp); ++ ++ SEP_DRV_LOG_TRACE("Original value of RC6 rc6_1 = 0x%x, rc6_2 = 0x%x.", ++ unc_gt_rc6_reg1, unc_gt_rc6_reg2); ++ ++ // Disable clock gating ++ // Save ++ unc_gt_clk_gt_reg1 = ++ PCI_MMIO_Read_U32(unc_gt_virtual_address, UNC_GT_GCPUNIT_REG1); ++ unc_gt_clk_gt_reg2 = ++ PCI_MMIO_Read_U32(unc_gt_virtual_address, UNC_GT_GCPUNIT_REG2); ++ unc_gt_clk_gt_reg3 = ++ PCI_MMIO_Read_U32(unc_gt_virtual_address, UNC_GT_GCPUNIT_REG3); ++ unc_gt_clk_gt_reg4 = ++ PCI_MMIO_Read_U32(unc_gt_virtual_address, UNC_GT_GCPUNIT_REG4); ++ ++ SEP_DRV_LOG_TRACE("Original value of RC6 ck_1 = 0x%x, ck_2 = 0x%x.", ++ unc_gt_clk_gt_reg1, unc_gt_clk_gt_reg2); ++ SEP_DRV_LOG_TRACE("Original value of RC6 ck_3 = 0x%x, ck_4 = 0x%x.", ++ unc_gt_clk_gt_reg3, unc_gt_clk_gt_reg4); ++ ++ // Disable ++ PCI_MMIO_Write_U32(unc_gt_virtual_address, UNC_GT_GCPUNIT_REG1, ++ UNC_GT_GCPUNIT_REG1_VALUE); ++ PCI_MMIO_Write_U32(unc_gt_virtual_address, UNC_GT_GCPUNIT_REG2, ++ UNC_GT_GCPUNIT_REG2_VALUE); ++ PCI_MMIO_Write_U32(unc_gt_virtual_address, UNC_GT_GCPUNIT_REG3, ++ UNC_GT_GCPUNIT_REG3_VALUE); ++ PCI_MMIO_Write_U32(unc_gt_virtual_address, UNC_GT_GCPUNIT_REG4, ++ UNC_GT_GCPUNIT_REG4_VALUE); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/*! ++ * @fn static VOID unc_gt_Restore_RC6_Clock_Gating(void) ++ * ++ * @brief This snippet of code restores the system settings ++ * for clock gating/power ++ * @param none ++ * ++ * @return None ++ * ++ * Special Notes: ++ */ ++static VOID unc_gt_Restore_RC6_Clock_Gating(void) ++{ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ PCI_MMIO_Write_U32(unc_gt_virtual_address, UNC_GT_RC6_REG2, ++ unc_gt_rc6_reg2); ++ PCI_MMIO_Write_U32(unc_gt_virtual_address, UNC_GT_RC6_REG1, ++ unc_gt_rc6_reg1); ++ ++ PCI_MMIO_Write_U32(unc_gt_virtual_address, UNC_GT_GCPUNIT_REG1, ++ unc_gt_clk_gt_reg1); ++ PCI_MMIO_Write_U32(unc_gt_virtual_address, UNC_GT_GCPUNIT_REG2, ++ unc_gt_clk_gt_reg2); ++ PCI_MMIO_Write_U32(unc_gt_virtual_address, UNC_GT_GCPUNIT_REG3, ++ unc_gt_clk_gt_reg3); ++ PCI_MMIO_Write_U32(unc_gt_virtual_address, UNC_GT_GCPUNIT_REG4, ++ unc_gt_clk_gt_reg4); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/*! ++ * @fn static VOID unc_gt_Enable_PMU(PVOID) ++ * ++ * @brief Disable the clock gating and Set the global enable ++ * ++ * @param device_id ++ * ++ * @return None ++ * ++ * Special Notes: ++ */ ++static VOID unc_gt_Enable_PMU(PVOID param) ++{ ++ U32 dev_idx; ++ ECB pecb; ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ ++ SEP_DRV_LOG_TRACE_IN("Param: %p.", param); ++ ++ dev_idx = *((U32 *)param); ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[0]; ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ ++ if (!CPU_STATE_system_master(pcpu)) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!system_master)."); ++ return; ++ } ++ ++ unc_gt_Disable_RC6_Clock_Gating(); ++ ++ if (pecb && GET_DRIVER_STATE() == DRV_STATE_RUNNING) { ++ SYS_Write_MSR(PERF_GLOBAL_CTRL, ECB_entries_reg_value(pecb, 0)); ++ SEP_DRV_LOG_TRACE("Enabling GT Global control = 0x%llx.", ++ ECB_entries_reg_value(pecb, 0)); ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++/*! ++ * @fn static VOID unc_gt_Disable_PMU(PVOID) ++ * ++ * @brief Unmap the virtual address when sampling/driver stops ++ * and restore system values for clock gating settings ++ * ++ * @param None ++ * ++ * @return None ++ * ++ * Special Notes: ++ */ ++static VOID unc_gt_Disable_PMU(PVOID param) ++{ ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ U32 cur_driver_state; ++ ++ SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ cur_driver_state = GET_DRIVER_STATE(); ++ ++ if (!CPU_STATE_system_master(pcpu)) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!system_master)."); ++ return; ++ } ++ unc_gt_Restore_RC6_Clock_Gating(); ++ ++ if (unc_gt_virtual_address && ++ (cur_driver_state == DRV_STATE_STOPPED || ++ cur_driver_state == DRV_STATE_PREPARE_STOP || ++ cur_driver_state == DRV_STATE_TERMINATING)) { ++ SYS_Write_MSR(PERF_GLOBAL_CTRL, 0LL); ++ PCI_Unmap_Memory(&unc_gt_map); ++ unc_gt_virtual_address = 0; ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/*! ++ * @fn unc_gt_Read_Counts(param, id) ++ * ++ * @param param The read thread node to process ++ * @param id The id refers to the device index ++ * ++ * @return None No return needed ++ * ++ * @brief Read the Uncore count data and store into the buffer param; ++ * ++ */ ++static VOID unc_gt_Read_Counts(PVOID param, U32 id) ++{ ++ U64 *data = (U64 *)param; ++ U32 cur_grp; ++ ECB pecb; ++ U32 offset_delta; ++ U32 tmp_value_lo = 0; ++ U32 tmp_value_hi = 0; ++ GT_CTR_NODE gt_ctr_value; ++ U32 this_cpu; ++ U32 package_num; ++ ++ SEP_DRV_LOG_TRACE_IN("Param: %p, id: %u.", param, id); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ package_num = core_to_package_map[this_cpu]; ++ cur_grp = LWPMU_DEVICE_cur_group(&devices[id])[package_num]; ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[id])[cur_grp]; ++ ++ // Write GroupID ++ data = (U64 *)((S8 *)data + ECB_group_offset(pecb)); ++ *data = cur_grp + 1; ++ GT_CTR_NODE_value_reset(gt_ctr_value); ++ ++ //Read in the counts into temporary buffe ++ FOR_EACH_PCI_DATA_REG_RAW(pecb, i, id) ++ { ++ offset_delta = ECB_entries_reg_offset(pecb, i); ++ tmp_value_lo = ++ PCI_MMIO_Read_U32(unc_gt_virtual_address, offset_delta); ++ offset_delta = offset_delta + NEXT_ADDR_OFFSET; ++ tmp_value_hi = ++ PCI_MMIO_Read_U32(unc_gt_virtual_address, offset_delta); ++ data = (U64 *)((S8 *)param + ++ ECB_entries_counter_event_offset(pecb, i)); ++ GT_CTR_NODE_low(gt_ctr_value) = tmp_value_lo; ++ GT_CTR_NODE_high(gt_ctr_value) = tmp_value_hi; ++ *data = GT_CTR_NODE_value(gt_ctr_value); ++ SEP_DRV_LOG_TRACE("DATA offset delta is 0x%x R is 0x%llx.", ++ offset_delta, ++ GT_CTR_NODE_value(gt_ctr_value)); ++ } ++ END_FOR_EACH_PCI_DATA_REG_RAW; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++static VOID unc_gt_Read_PMU_Data(PVOID param) ++{ ++ U32 j; ++ U64 *buffer = read_counter_info; ++ U32 dev_idx; ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ // U32 cur_grp; ++ U32 offset_delta; ++ U32 tmp_value_lo = 0; ++ U32 tmp_value_hi = 0; ++ GT_CTR_NODE gt_ctr_value; ++ U32 package_num = 0; ++ ++ SEP_DRV_LOG_DEBUG_IN("Param: %p.", param); ++ ++ dev_idx = *((U32 *)param); ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ ++ if (!CPU_STATE_system_master(pcpu)) { ++ SEP_DRV_LOG_DEBUG_OUT("Early exit (!system_master)."); ++ return; ++ } ++ ++ package_num = core_to_package_map[this_cpu]; ++ // cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[package_num]; ++ ++ FOR_EACH_PCI_DATA_REG_RAW(pecb, i, dev_idx) ++ { ++ j = EMON_BUFFER_UNCORE_PACKAGE_EVENT_OFFSET( ++ package_num, ++ EMON_BUFFER_DRIVER_HELPER_num_entries_per_package( ++ emon_buffer_driver_helper), ++ ECB_entries_uncore_buffer_offset_in_package(pecb, i)); ++ offset_delta = ECB_entries_reg_offset(pecb, i); ++ tmp_value_lo = ++ PCI_MMIO_Read_U32(unc_gt_virtual_address, offset_delta); ++ offset_delta = offset_delta + NEXT_ADDR_OFFSET; ++ tmp_value_hi = ++ PCI_MMIO_Read_U32(unc_gt_virtual_address, offset_delta); ++ GT_CTR_NODE_low(gt_ctr_value) = tmp_value_lo; ++ GT_CTR_NODE_high(gt_ctr_value) = tmp_value_hi; ++ buffer[j] = GT_CTR_NODE_value(gt_ctr_value); ++ SEP_DRV_LOG_TRACE("j=%u, value=%llu, cpu=%u", j, buffer[j], ++ this_cpu); ++ } ++ END_FOR_EACH_PCI_DATA_REG_RAW; ++ ++ SEP_DRV_LOG_DEBUG_OUT(""); ++} ++ ++/* ++ * Initialize the dispatch table ++ */ ++ ++DISPATCH_NODE unc_gt_dispatch = { .init = NULL, ++ .fini = NULL, ++ .write = unc_gt_Write_PMU, ++ .freeze = unc_gt_Disable_PMU, ++ .restart = unc_gt_Enable_PMU, ++ .read_data = unc_gt_Read_PMU_Data, ++ .check_overflow = NULL, ++ .swap_group = NULL, ++ .read_lbrs = NULL, ++ .cleanup = NULL, ++ .hw_errata = NULL, ++ .read_power = NULL, ++ .check_overflow_errata = NULL, ++ .read_counts = unc_gt_Read_Counts, ++ .check_overflow_gp_errata = NULL, ++ .read_ro = NULL, ++ .platform_info = NULL, ++ .trigger_read = unc_gt_Read_Counts, ++ .scan_for_uncore = NULL, ++ .read_metrics = NULL }; +diff --git a/drivers/platform/x86/sepdk/sep/unc_mmio.c b/drivers/platform/x86/sepdk/sep/unc_mmio.c +new file mode 100755 +index 000000000000..b1d997d0f405 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/sep/unc_mmio.c +@@ -0,0 +1,1083 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#include ++ ++#include "lwpmudrv_defines.h" ++#include ++#include ++#include ++ ++#include "lwpmudrv_types.h" ++#include "lwpmudrv_ecb.h" ++#include "lwpmudrv_struct.h" ++ ++#include "lwpmudrv.h" ++#include "utility.h" ++#include "control.h" ++#include "unc_common.h" ++#include "ecb_iterators.h" ++#include "pebs.h" ++#include "inc/pci.h" ++ ++extern U64 *read_counter_info; ++extern U64 *prev_counter_data; ++extern DRV_CONFIG drv_cfg; ++extern EMON_BUFFER_DRIVER_HELPER emon_buffer_driver_helper; ++ ++#define MASK_32BIT 0xffffffff ++#define MASK_64BIT 0xffffffff00000000ULL ++ ++#define IS_MASTER(device_type, cpu) \ ++ (((device_type) == DRV_SINGLE_INSTANCE) ? \ ++ CPU_STATE_system_master(&pcb[cpu]) : \ ++ CPU_STATE_socket_master(&pcb[(cpu)])) ++#define GET_PACKAGE_NUM(device_type, cpu) \ ++ (((device_type) == DRV_SINGLE_INSTANCE) ? 0 : core_to_package_map[cpu]) ++#define IS_64BIT(mask) (((mask) >> 32) != 0) ++ ++#define EVENT_COUNTER_MAX_TRY 30 ++ ++struct FPGA_CONTROL_NODE_S { ++ union { ++ struct { ++ U64 rst_ctrs : 1; ++ U64 rsvd1 : 7; ++ U64 frz : 1; ++ U64 rsvd2 : 7; ++ U64 event_select : 4; ++ U64 port_id : 2; ++ U64 rsvd3 : 1; ++ U64 port_enable : 1; ++ U64 rsvd4 : 40; ++ } bits; ++ U64 bit_field; ++ } u; ++}; ++ ++static struct FPGA_CONTROL_NODE_S control_node; ++ ++/*! ++ * @fn static VOID unc_mmio_Write_PMU(VOID*) ++ * ++ * @brief Initial write of PMU registers ++ * Walk through the enties and write the value of the register accordingly. ++ * When current_group = 0, then this is the first time this routine is called, ++ * ++ * @param None ++ * ++ * @return None ++ * ++ * Special Notes: ++ */ ++static VOID unc_mmio_Write_PMU(VOID *param) ++{ ++ U32 dev_idx; ++ U32 offset_delta = 0; ++ DEV_UNC_CONFIG pcfg_unc; ++ U32 event_id = 0; ++ U64 tmp_value = 0; ++ U32 this_cpu; ++ U32 package_num = 0; ++ U32 cur_grp; ++ ECB pecb; ++ U64 virtual_addr = 0; ++ U32 idx_w = 0; ++ U32 event_code = 0; ++ U32 counter = 0; ++ U32 entry = 0; ++ U32 dev_node = 0; ++ ++ SEP_DRV_LOG_TRACE_IN("Param: %p.", param); ++ ++ dev_idx = *((U32 *)param); ++ this_cpu = CONTROL_THIS_CPU(); ++ pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ if (!IS_MASTER(DEV_UNC_CONFIG_device_type(pcfg_unc), this_cpu)) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!is_master)."); ++ return; ++ } ++ ++ package_num = ++ GET_PACKAGE_NUM(DEV_UNC_CONFIG_device_type(pcfg_unc), this_cpu); ++ cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[package_num]; ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[(dev_idx)])[(cur_grp)]; ++ if (!pecb) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); ++ return; ++ } ++ ++ dev_node = ECB_dev_node(pecb); ++ entry = package_num; ++ if (!IS_MMIO_MAP_VALID(dev_node, entry)) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("Early exit (!IS_MMIO_MAP_VALID)."); ++ return; ++ } ++ ++ virtual_addr = virtual_address_table(dev_node, entry); ++ ++ FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, PMU_OPERATION_WRITE) ++ { ++ PCI_MMIO_Write_U64(virtual_addr, ECB_entries_reg_id(pecb, idx), ++ ECB_entries_reg_value(pecb, idx)); ++ } ++ END_FOR_EACH_REG_UNC_OPERATION; ++ ++ if (DRV_CONFIG_emon_mode(drv_cfg)) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!event_based_counts)."); ++ return; ++ } ++ ++ idx_w = ECB_operations_register_start(pecb, PMU_OPERATION_WRITE); ++ FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, PMU_OPERATION_READ) ++ { ++ if (ECB_entries_reg_offset(pecb, idx) > ++ DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio( ++ &ECB_pcidev_entry_node(pecb))) { ++ offset_delta = ++ ECB_entries_reg_offset(pecb, idx) - ++ DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio( ++ &ECB_pcidev_entry_node(pecb)); ++ } else { ++ offset_delta = ECB_entries_reg_offset(pecb, idx); ++ } ++ ++ if ((DEV_UNC_CONFIG_device_type(pcfg_unc) == ++ DRV_SINGLE_INSTANCE) && ++ (GET_NUM_MAP_ENTRIES(dev_node) > 1)) { ++ // multiple MMIO mapping per device, find virtual_addr per mapping. ++ entry = ECB_entries_unit_id(pecb, idx); ++ virtual_addr = virtual_address_table(dev_node, entry); ++ } ++ ++ if ((ECB_entries_counter_type(pecb, idx) == ++ PROG_FREERUN_COUNTER) && ++ (ECB_entries_unit_id(pecb, idx) == 0)) { ++ //Write event code before reading ++ PCI_MMIO_Write_U64(virtual_addr, ++ ECB_entries_reg_id(pecb, idx_w), ++ ECB_entries_reg_value(pecb, idx_w)); ++ event_code = (U32)control_node.u.bits.event_select; ++ idx_w++; ++ } ++ ++ // this is needed for overflow detection of the accumulators. ++ if (IS_64BIT((U64)(ECB_entries_max_bits(pecb, idx)))) { ++ if (ECB_entries_counter_type(pecb, idx) == ++ PROG_FREERUN_COUNTER) { ++ do { ++ if (counter > EVENT_COUNTER_MAX_TRY) { ++ break; ++ } ++ tmp_value = SYS_MMIO_Read64( ++ virtual_addr, offset_delta); ++ counter++; ++ } while (event_code != (tmp_value >> 60)); ++ } ++ tmp_value = SYS_MMIO_Read64(virtual_addr, offset_delta); ++ } else { ++ tmp_value = SYS_MMIO_Read32(virtual_addr, offset_delta); ++ } ++ tmp_value &= (U64)ECB_entries_max_bits(pecb, idx); ++ ++ LWPMU_DEVICE_prev_value( ++ &devices[dev_idx])[package_num][event_id] = tmp_value; ++ SEP_DRV_LOG_TRACE( ++ "unc_mmio_Write_PMU: cpu[%d], device[%d], package[%d], entry %d, event_id %d, value %llu\n", ++ this_cpu, dev_idx, package_num, entry, event_id, ++ tmp_value); ++ event_id++; ++ ++ if (LWPMU_DEVICE_counter_mask(&devices[dev_idx]) == 0) { ++ LWPMU_DEVICE_counter_mask(&devices[dev_idx]) = ++ (U64)ECB_entries_max_bits(pecb, idx); ++ } ++ } ++ END_FOR_EACH_REG_UNC_OPERATION; ++ SEP_DRV_LOG_TRACE( ++ "BAR address is 0x%llx and virt is 0x%llx.", ++ DRV_PCI_DEVICE_ENTRY_bar_address(&ECB_pcidev_entry_node(pecb)), ++ virtual_addr); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/*! ++ * @fn static VOID unc_mmio_Enable_PMU(PVOID) ++ * ++ * @brief Capture the previous values to calculate delta later. ++ * ++ * @param None ++ * ++ * @return None ++ * ++ * Special Notes: ++ */ ++static void unc_mmio_Enable_PMU(PVOID param) ++{ ++ U32 j; ++ U64 *buffer = prev_counter_data; ++ U32 this_cpu; ++ U32 dev_idx; ++ DEV_UNC_CONFIG pcfg_unc; ++ U32 package_num; ++ U32 offset_delta; ++ U32 cur_grp; ++ ECB pecb; ++ U64 virtual_addr = 0; ++ U64 reg_val = 0; ++ U32 idx_w = 0; ++ U32 event_code = 0; ++ U32 counter = 0; ++ // U32 num_events = 0; ++ U32 entry = 0; ++ // U32 num_pkgs = num_packages; ++ U32 dev_node = 0; ++ ++ SEP_DRV_LOG_TRACE_IN("Param: %p.", param); ++ ++ dev_idx = *((U32 *)param); ++ this_cpu = CONTROL_THIS_CPU(); ++ pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ if (!IS_MASTER(DEV_UNC_CONFIG_device_type(pcfg_unc), this_cpu)) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!IS_MASTER)."); ++ return; ++ } ++ ++ package_num = ++ GET_PACKAGE_NUM(DEV_UNC_CONFIG_device_type(pcfg_unc), this_cpu); ++ cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[package_num]; ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[(dev_idx)])[(cur_grp)]; ++ if (!pecb) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); ++ return; ++ } ++ ++ dev_node = ECB_dev_node(pecb); ++ entry = package_num; ++ if (!IS_MMIO_MAP_VALID(dev_node, entry)) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("Early exit (!IS_MMIO_MAP_VALID)."); ++ return; ++ } ++ ++ // if (DEV_UNC_CONFIG_device_type(pcfg_unc) == DRV_SINGLE_INSTANCE) { ++ // num_pkgs = 1; ++ // } ++ ++ virtual_addr = virtual_address_table(dev_node, entry); ++ ++ // NOTE THAT the enable function currently captures previous values ++ // for EMON collection to avoid unnecessary memory copy. ++ if (DRV_CONFIG_emon_mode(drv_cfg)) { ++ // num_events = ECB_num_events(pecb); ++ idx_w = ECB_operations_register_start(pecb, ++ PMU_OPERATION_WRITE); ++ FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, ++ PMU_OPERATION_READ) ++ { ++ if (ECB_entries_reg_offset(pecb, idx) > ++ DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio( ++ &ECB_pcidev_entry_node(pecb))) { ++ offset_delta = ++ ECB_entries_reg_offset(pecb, idx) - ++ DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio( ++ &ECB_pcidev_entry_node(pecb)); ++ } else { ++ offset_delta = ++ ECB_entries_reg_offset(pecb, idx); ++ } ++ ++ if ((DEV_UNC_CONFIG_device_type(pcfg_unc) == ++ DRV_SINGLE_INSTANCE) && ++ (GET_NUM_MAP_ENTRIES(dev_node) > 1)) { ++ // multiple MMIO mapping per device, find virtual_addr per mapping. ++ entry = ECB_entries_unit_id(pecb, idx); ++ virtual_addr = ++ virtual_address_table(dev_node, entry); ++ } ++ ++ if ((ECB_entries_counter_type(pecb, idx) == ++ PROG_FREERUN_COUNTER) && ++ (ECB_entries_unit_id(pecb, idx) == 0)) { ++ PCI_MMIO_Write_U64( ++ virtual_addr, ++ ECB_entries_reg_id(pecb, idx_w), ++ ECB_entries_reg_value(pecb, idx_w)); ++ control_node.u.bit_field = ++ ECB_entries_reg_value(pecb, idx_w); ++ event_code = ++ (U32)control_node.u.bits.event_select; ++ idx_w++; ++ } ++ ++ if ((ECB_entries_event_scope(pecb, idx) == ++ PACKAGE_EVENT) || ++ (ECB_entries_event_scope(pecb, idx) == ++ SYSTEM_EVENT)) { ++ if (ECB_entries_event_scope(pecb, idx) == ++ SYSTEM_EVENT) { ++ j = ECB_entries_uncore_buffer_offset_in_system( ++ pecb, idx); ++ } else { ++ j = EMON_BUFFER_UNCORE_PACKAGE_EVENT_OFFSET( ++ package_num, ++ EMON_BUFFER_DRIVER_HELPER_num_entries_per_package( ++ emon_buffer_driver_helper), ++ ECB_entries_uncore_buffer_offset_in_package( ++ pecb, idx)); ++ } ++ ++ if (IS_64BIT((U64)( ++ ECB_entries_max_bits(pecb, idx)))) { ++ if (ECB_entries_counter_type(pecb, ++ idx) == ++ PROG_FREERUN_COUNTER) { ++ do { ++ if (counter > ++ EVENT_COUNTER_MAX_TRY) { ++ break; ++ } ++ buffer[j] = SYS_MMIO_Read64( ++ virtual_addr, ++ offset_delta); ++ counter++; ++ } while (event_code != ++ (buffer[j] >> 60)); ++ } ++ buffer[j] = SYS_MMIO_Read64( ++ virtual_addr, offset_delta); ++ } else { ++ buffer[j] = SYS_MMIO_Read32( ++ virtual_addr, offset_delta); ++ } ++ buffer[j] &= ++ (U64)ECB_entries_max_bits(pecb, idx); ++ SEP_DRV_LOG_TRACE( ++ "j=%u, value=%llu, cpu=%u, MSR=0x%x", j, ++ buffer[j], this_cpu, ++ ECB_entries_reg_id(pecb, idx)); ++ } ++ } ++ END_FOR_EACH_REG_UNC_OPERATION; ++ } ++ virtual_addr = virtual_address_table(dev_node, entry); ++ FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, PMU_OPERATION_ENABLE) ++ { ++ if (ECB_entries_reg_rw_type(pecb, idx) == ++ PMU_REG_RW_READ_WRITE) { ++ reg_val = PCI_MMIO_Read_U64( ++ virtual_addr, ECB_entries_reg_id(pecb, idx)); ++ reg_val &= ECB_entries_reg_value(pecb, idx); ++ PCI_MMIO_Write_U64(virtual_addr, ++ ECB_entries_reg_id(pecb, idx), ++ reg_val); ++ } ++ } ++ END_FOR_EACH_REG_UNC_OPERATION; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/*! ++ * @fn static VOID unc_mmio_Disable_PMU(PVOID) ++ * ++ * @brief Unmap the virtual address when you stop sampling. ++ * ++ * @param None ++ * ++ * @return None ++ * ++ * Special Notes: ++ */ ++static void unc_mmio_Disable_PMU(PVOID param) ++{ ++ U32 dev_idx; ++ U32 this_cpu; ++ U64 virtual_addr = 0; ++ U64 reg_val = 0; ++ DEV_UNC_CONFIG pcfg_unc; ++ U32 package_num; ++ U32 dev_node = 0; ++ U32 cur_grp = 0; ++ ECB pecb; ++ U32 entry = 0; ++ ++ SEP_DRV_LOG_TRACE_IN("Param: %p.", param); ++ ++ dev_idx = *((U32 *)param); ++ this_cpu = CONTROL_THIS_CPU(); ++ pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ if (!IS_MASTER(DEV_UNC_CONFIG_device_type(pcfg_unc), this_cpu)) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!IS_MASTER)."); ++ return; ++ } ++ ++ package_num = ++ GET_PACKAGE_NUM(DEV_UNC_CONFIG_device_type(pcfg_unc), this_cpu); ++ cur_grp = LWPMU_DEVICE_cur_group(&devices[dev_idx])[package_num]; ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[(cur_grp)]; ++ if (!pecb) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); ++ return; ++ } ++ ++ dev_node = ECB_dev_node(pecb); ++ entry = package_num; ++ if (!IS_MMIO_MAP_VALID(dev_node, entry)) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("Early exit (!IS_MMIO_MAP_VALID)."); ++ return; ++ } ++ ++ virtual_addr = virtual_address_table(dev_node, entry); ++ ++ FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, PMU_OPERATION_DISABLE) ++ { ++ if (ECB_entries_reg_rw_type(pecb, idx) == ++ PMU_REG_RW_READ_WRITE) { ++ reg_val = PCI_MMIO_Read_U64( ++ virtual_addr, ECB_entries_reg_id(pecb, idx)); ++ reg_val |= ECB_entries_reg_value(pecb, idx); ++ PCI_MMIO_Write_U64(virtual_addr, ++ ECB_entries_reg_id(pecb, idx), ++ reg_val); ++ } ++ } ++ END_FOR_EACH_REG_UNC_OPERATION; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn void unc_mmio_Trigger_Read(id) ++ * ++ * @param id Device index ++ * ++ * @return None No return needed ++ * ++ * @brief Read the Uncore data from counters and store into buffer ++ */ ++static VOID unc_mmio_Trigger_Read(PVOID param, U32 id) ++{ ++ U32 this_cpu; ++ U32 cur_grp; ++ ECB pecb; ++ U32 index = 0; ++ U64 diff = 0; ++ U32 offset_delta = 0; ++ U64 value = 0ULL; ++ U64 *data; ++ U64 virtual_addr = 0; ++ DEV_UNC_CONFIG pcfg_unc; ++ U32 package_num; ++ U32 idx_w = 0; ++ U32 event_code = 0; ++ U32 counter = 0; ++ U32 entry = 0; ++ U32 dev_node = 0; ++ ++ SEP_DRV_LOG_TRACE_IN("Param: %p, id: %u.", param, id); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[id]); ++ if (!IS_MASTER(DEV_UNC_CONFIG_device_type(pcfg_unc), this_cpu)) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!IS_MASTER)."); ++ return; ++ } ++ ++ package_num = ++ GET_PACKAGE_NUM(DEV_UNC_CONFIG_device_type(pcfg_unc), this_cpu); ++ cur_grp = LWPMU_DEVICE_cur_group(&devices[id])[package_num]; ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[id])[(cur_grp)]; ++ if (!pecb) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); ++ return; ++ } ++ ++ dev_node = ECB_dev_node(pecb); ++ entry = package_num; ++ if (!IS_MMIO_MAP_VALID(dev_node, entry)) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("Early exit (!IS_MMIO_MAP_VALID)."); ++ return; ++ } ++ ++ virtual_addr = virtual_address_table(dev_node, entry); ++ ++ // Write GroupID ++ data = (U64 *)((S8 *)param + ECB_group_offset(pecb)); ++ *data = cur_grp + 1; ++ //Read in the counts into temporary buffer ++ idx_w = ECB_operations_register_start(pecb, PMU_OPERATION_WRITE); ++ FOR_EACH_REG_UNC_OPERATION(pecb, id, idx, PMU_OPERATION_READ) ++ { ++ if (ECB_entries_reg_offset(pecb, idx) > ++ DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio( ++ &ECB_pcidev_entry_node(pecb))) { ++ offset_delta = ++ ECB_entries_reg_offset(pecb, idx) - ++ DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio( ++ &ECB_pcidev_entry_node(pecb)); ++ } else { ++ offset_delta = ECB_entries_reg_offset(pecb, idx); ++ } ++ ++ if ((DEV_UNC_CONFIG_device_type(pcfg_unc) == ++ DRV_SINGLE_INSTANCE) && ++ (GET_NUM_MAP_ENTRIES(dev_node) > 1)) { ++ // multiple MMIO mapping per device ++ entry = ECB_entries_unit_id(pecb, idx); ++ virtual_addr = virtual_address_table(dev_node, entry); ++ } ++ ++ if ((ECB_entries_counter_type(pecb, idx) == ++ PROG_FREERUN_COUNTER) && ++ (ECB_entries_unit_id(pecb, idx) == 0)) { ++ PCI_MMIO_Write_U64(virtual_addr, ++ ECB_entries_reg_id(pecb, idx_w), ++ ECB_entries_reg_value(pecb, idx_w)); ++ control_node.u.bit_field = ++ ECB_entries_reg_value(pecb, idx_w); ++ event_code = (U32)control_node.u.bits.event_select; ++ idx_w++; ++ } ++ ++ if (IS_64BIT((U64)(ECB_entries_max_bits(pecb, idx)))) { ++ if (ECB_entries_counter_type(pecb, idx) == ++ PROG_FREERUN_COUNTER) { ++ do { ++ if (counter > EVENT_COUNTER_MAX_TRY) { ++ break; ++ } ++ value = SYS_MMIO_Read64(virtual_addr, ++ offset_delta); ++ counter++; ++ } while (event_code != (value >> 60)); ++ } ++ value = SYS_MMIO_Read64(virtual_addr, offset_delta); ++ } else { ++ value = SYS_MMIO_Read32((volatile unsigned int *)virtual_addr, offset_delta); ++ } ++ value &= (U64)ECB_entries_max_bits(pecb, idx); ++ ++ data = (U64 *)((S8 *)param + ++ ECB_entries_counter_event_offset(pecb, idx)); ++ //check for overflow if not a static counter ++ if (ECB_entries_counter_type(pecb, idx) == STATIC_COUNTER) { ++ *data = value; ++ } else { ++ if (value < LWPMU_DEVICE_prev_value( ++ &devices[id])[package_num][index]) { ++ diff = LWPMU_DEVICE_counter_mask(&devices[id]) - ++ LWPMU_DEVICE_prev_value( ++ &devices[id])[package_num][index]; ++ diff += value; ++ } else { ++ diff = value - ++ LWPMU_DEVICE_prev_value( ++ &devices[id])[package_num][index]; ++ } ++ LWPMU_DEVICE_acc_value( ++ &devices[id])[package_num][cur_grp][index] += ++ diff; ++ LWPMU_DEVICE_prev_value( ++ &devices[id])[package_num][index] = value; ++ *data = LWPMU_DEVICE_acc_value( ++ &devices[id])[package_num][cur_grp][index]; ++ } ++ index++; ++ } ++ END_FOR_EACH_REG_UNC_OPERATION; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn unc_mmio_Read_PMU_Data(param) ++ * ++ * @param param dummy parameter which is not used ++ * ++ * @return None No return needed ++ * ++ * @brief Read all the data MSR's into a buffer. Called by the interrupt handler. ++ * ++ */ ++static VOID unc_mmio_Read_PMU_Data(PVOID param) ++{ ++ U32 j; ++ U64 *buffer = read_counter_info; ++ U64 *prev_buffer = prev_counter_data; ++ U32 this_cpu; ++ U32 dev_idx; ++ DEV_UNC_CONFIG pcfg_unc; ++ U32 offset_delta; ++ U32 cur_grp; ++ ECB pecb; ++ U64 tmp_value = 0ULL; ++ U64 virtual_addr = 0; ++ U32 idx_w = 0; ++ U32 event_code = 0; ++ U32 counter = 0; ++ // U32 num_events = 0; ++ U32 package_num; ++ U32 entry = 0; ++ U32 dev_node = 0; ++ ++ SEP_DRV_LOG_TRACE_IN("Param: %p.", param); ++ ++ dev_idx = *((U32 *)param); ++ this_cpu = CONTROL_THIS_CPU(); ++ pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[dev_idx]); ++ if (!IS_MASTER(DEV_UNC_CONFIG_device_type(pcfg_unc), this_cpu)) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!IS_MASTER)."); ++ return; ++ } ++ ++ package_num = ++ GET_PACKAGE_NUM(DEV_UNC_CONFIG_device_type(pcfg_unc), this_cpu); ++ cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[package_num]; ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[(dev_idx)])[(cur_grp)]; ++ if (!pecb) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); ++ return; ++ } ++ ++ dev_node = ECB_dev_node(pecb); ++ entry = package_num; ++ if (!IS_MMIO_MAP_VALID(dev_node, entry)) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("Early exit (!IS_MMIO_MAP_VALID)."); ++ return; ++ } ++ ++ virtual_addr = virtual_address_table(dev_node, entry); ++ ++ // num_events = ECB_num_events(pecb); ++ ++ idx_w = ECB_operations_register_start(pecb, PMU_OPERATION_WRITE); ++ ++ FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, PMU_OPERATION_READ) ++ { ++ if (ECB_entries_reg_offset(pecb, idx) > ++ DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio( ++ &ECB_pcidev_entry_node(pecb))) { ++ offset_delta = ++ ECB_entries_reg_offset(pecb, idx) - ++ DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio( ++ &ECB_pcidev_entry_node(pecb)); ++ } else { ++ offset_delta = ECB_entries_reg_offset(pecb, idx); ++ } ++ ++ if ((DEV_UNC_CONFIG_device_type(pcfg_unc) == ++ DRV_SINGLE_INSTANCE) && ++ (GET_NUM_MAP_ENTRIES(dev_node) > 1)) { ++ // multiple MMIO mapping per device, find virtual_addr per mapping. ++ entry = ECB_entries_unit_id(pecb, idx); ++ virtual_addr = virtual_address_table(dev_node, entry); ++ } ++ ++ if ((ECB_entries_counter_type(pecb, idx) == ++ PROG_FREERUN_COUNTER) && ++ (ECB_entries_unit_id(pecb, idx) == 0)) { ++ PCI_MMIO_Write_U64(virtual_addr, ++ ECB_entries_reg_id(pecb, idx_w), ++ ECB_entries_reg_value(pecb, idx_w)); ++ control_node.u.bit_field = ++ ECB_entries_reg_value(pecb, idx_w); ++ event_code = (U32)control_node.u.bits.event_select; ++ idx_w++; ++ } ++ ++ if ((ECB_entries_event_scope(pecb, idx) == PACKAGE_EVENT) || ++ (ECB_entries_event_scope(pecb, idx) == SYSTEM_EVENT)) { ++ if (ECB_entries_event_scope(pecb, idx) == ++ SYSTEM_EVENT) { ++ j = ECB_entries_uncore_buffer_offset_in_system( ++ pecb, idx); ++ } else { ++ j = EMON_BUFFER_UNCORE_PACKAGE_EVENT_OFFSET( ++ package_num, ++ EMON_BUFFER_DRIVER_HELPER_num_entries_per_package( ++ emon_buffer_driver_helper), ++ ECB_entries_uncore_buffer_offset_in_package( ++ pecb, idx)); ++ } ++ ++ if (IS_64BIT((U64)(ECB_entries_max_bits(pecb, idx)))) { ++ if (ECB_entries_counter_type(pecb, idx) == ++ PROG_FREERUN_COUNTER) { ++ do { ++ if (counter > ++ EVENT_COUNTER_MAX_TRY) { ++ break; ++ } ++ tmp_value = SYS_MMIO_Read64( ++ virtual_addr, ++ offset_delta); ++ counter++; ++ } while (event_code != ++ (tmp_value >> 60)); ++ } ++ tmp_value = SYS_MMIO_Read64(virtual_addr, ++ offset_delta); ++ } else { ++ tmp_value = SYS_MMIO_Read32(virtual_addr, ++ offset_delta); ++ } ++ tmp_value &= (U64)ECB_entries_max_bits(pecb, idx); ++ if (ECB_entries_counter_type(pecb, idx) == ++ STATIC_COUNTER) { ++ buffer[j] = tmp_value; ++ } else { ++ if (tmp_value >= prev_buffer[j]) { ++ buffer[j] = tmp_value - prev_buffer[j]; ++ } else { ++ buffer[j] = tmp_value + ++ (ECB_entries_max_bits(pecb, ++ idx) - ++ prev_buffer[j]); ++ } ++ } ++ SEP_DRV_LOG_TRACE("j=%u, value=%llu, cpu=%u, MSR=0x%x", ++ j, buffer[j], this_cpu, ++ ECB_entries_reg_id(pecb, idx)); ++ } ++ } ++ END_FOR_EACH_REG_UNC_OPERATION; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn unc_mmio_Initialize(param) ++ * ++ * @param param dummy parameter which is not used ++ * ++ * @return None No return needed ++ * ++ * @brief Do the mapping of the physical address (to do the invalidates in the TLB) ++ * NOTE: this should never be done with SMP call ++ * ++ */ ++static VOID unc_mmio_Initialize(PVOID param) ++{ ++ DRV_PCI_DEVICE_ENTRY_NODE dpden; ++ ++ U64 bar; ++ ++ U64 physical_address; ++ U32 dev_idx = 0; ++ U32 cur_grp = 0; ++ ECB pecb = NULL; ++ U32 dev_node; ++ U32 i = 0; ++ U32 page_len = 4096; // 4K ++ ++ U32 use_default_busno = 0; ++ U32 entries = 0; ++ ++ SEP_DRV_LOG_TRACE_IN("Param: %p.", param); ++ ++ dev_idx = *((U32 *)param); ++ cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[0]; ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; ++ ++ if (!pecb) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); ++ return; ++ } ++ dev_node = ECB_dev_node(pecb); ++ ++ if (IS_MMIO_MAP_VALID(dev_node, 0)) { ++ SEP_DRV_LOG_INIT_TRACE_OUT( ++ "Early exit (device[%d] node %d already mapped).", ++ dev_idx, dev_node); ++ return; ++ } ++ ++ dpden = ECB_pcidev_entry_node(pecb); ++ ++ // use busno found from topology scan if available ++ // otherwise use the default one ++ entries = GET_NUM_MAP_ENTRIES(dev_node); ++ if (entries == 0) { ++ use_default_busno = 1; ++ entries = 1; // this could the client, does not through the scan ++ UNC_PCIDEV_num_entries(&(unc_pcidev_map[dev_node])) = 1; ++ UNC_PCIDEV_max_entries(&(unc_pcidev_map[dev_node])) = 1; ++ } ++ if (!UNC_PCIDEV_mmio_map(&(unc_pcidev_map[dev_node]))) { ++ // it is better to allocate space in the beginning ++ UNC_PCIDEV_mmio_map(&(unc_pcidev_map[dev_node])) = ++ CONTROL_Allocate_Memory(entries * ++ sizeof(SEP_MMIO_NODE)); ++ if (UNC_PCIDEV_mmio_map(&(unc_pcidev_map[dev_node])) == NULL) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("Early exit (No Memory)."); ++ return; ++ } ++ memset(UNC_PCIDEV_mmio_map(&(unc_pcidev_map[dev_node])), 0, ++ entries * sizeof(U64)); ++ } ++ for (i = 0; i < entries; i++) { ++ if (!use_default_busno) { ++ if (IS_BUS_MAP_VALID(dev_node, i)) { ++ DRV_PCI_DEVICE_ENTRY_bus_no(&dpden) = ++ UNC_PCIDEV_busno_entry( ++ &(unc_pcidev_map[dev_node]), i); ++ } ++ } ++ ++ bar = PCI_Read_U64(DRV_PCI_DEVICE_ENTRY_bus_no(&dpden), ++ DRV_PCI_DEVICE_ENTRY_dev_no(&dpden), ++ DRV_PCI_DEVICE_ENTRY_func_no(&dpden), ++ DRV_PCI_DEVICE_ENTRY_bar_offset(&dpden)); ++ ++ bar &= DRV_PCI_DEVICE_ENTRY_bar_mask(&dpden); ++ ++ DRV_PCI_DEVICE_ENTRY_bar_address(&ECB_pcidev_entry_node(pecb)) = ++ bar; ++ physical_address = DRV_PCI_DEVICE_ENTRY_bar_address( ++ &ECB_pcidev_entry_node(pecb)) + ++ DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio( ++ &ECB_pcidev_entry_node(pecb)); ++ ++ PCI_Map_Memory(&UNC_PCIDEV_mmio_map_entry( ++ &(unc_pcidev_map[dev_node]), i), ++ physical_address, page_len); ++ } ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn unc_mmio_fpga_Initialize(param) ++ * ++ * @param param dummy parameter which is not used ++ * ++ * @return None No return needed ++ * ++ * @brief Do the mapping of the physical address (to do the invalidates in the TLB) ++ * NOTE: this should never be done with SMP call ++ * ++ */ ++static VOID unc_mmio_fpga_Initialize(PVOID param) ++{ ++#if defined(DRV_EM64T) ++ U64 phys_addr; ++ SEP_MMIO_NODE tmp_map = { 0 }; ++ U64 virt_addr; ++ U64 dfh; ++ U32 id; ++ U32 offset = 0; ++ S32 next_offset = -1; ++ U32 dev_idx; ++ U32 cur_grp; ++ ECB pecb; ++ U32 bus_list[2] = { 0x5e, 0xbe }; ++ U32 busno; ++ U32 page_len = 4096; ++ U32 package_num = 0; ++ U32 dev_node = 0; ++ U32 entries = 0; ++ DRV_PCI_DEVICE_ENTRY_NODE dpden; ++ ++ SEP_DRV_LOG_TRACE_IN("Param: %p.", param); ++ ++ dev_idx = *((U32 *)param); ++ cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[0]; ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; ++ ++ if (!pecb) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); ++ return; ++ } ++ ++ dev_node = ECB_dev_node(pecb); ++ ++ entries = GET_NUM_MAP_ENTRIES(dev_node); ++ if (entries == 0) { ++ entries = num_packages; ++ } ++ ++ if (!UNC_PCIDEV_mmio_map(&(unc_pcidev_map[dev_node]))) { ++ // it is better to allocate space in the beginning ++ UNC_PCIDEV_mmio_map(&(unc_pcidev_map[dev_node])) = ++ CONTROL_Allocate_Memory(entries * ++ sizeof(SEP_MMIO_NODE)); ++ if (UNC_PCIDEV_mmio_map(&(unc_pcidev_map[dev_node])) == NULL) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("Early exit (No Memory)."); ++ return; ++ } ++ memset(UNC_PCIDEV_mmio_map(&(unc_pcidev_map[dev_node])), 0, ++ (entries * sizeof(SEP_MMIO_NODE))); ++ UNC_PCIDEV_num_entries(&(unc_pcidev_map[dev_node])) = 0; ++ UNC_PCIDEV_max_entries(&(unc_pcidev_map[dev_node])) = entries; ++ } else { ++ if (virtual_address_table(dev_node, 0) != 0) { ++ SEP_DRV_LOG_INIT_TRACE_OUT( ++ "Early exit (device[%d] node %d already mapped).", ++ dev_idx, dev_node); ++ return; ++ } ++ } ++ ++ dpden = ECB_pcidev_entry_node(pecb); ++ ++ for (package_num = 0; package_num < num_packages; package_num++) { ++ if (package_num < 2) { ++ busno = bus_list[package_num]; ++ } else { ++ busno = 0; ++ } ++ phys_addr = ++ PCI_Read_U64(busno, DRV_PCI_DEVICE_ENTRY_dev_no(&dpden), ++ DRV_PCI_DEVICE_ENTRY_func_no(&dpden), ++ DRV_PCI_DEVICE_ENTRY_bar_offset(&dpden)); ++ phys_addr &= DRV_PCI_DEVICE_ENTRY_bar_mask(&dpden); ++ if (package_num == 0) { ++ PCI_Map_Memory(&tmp_map, phys_addr, 8 * page_len); ++ virt_addr = SEP_MMIO_NODE_virtual_address(&tmp_map); ++ while (next_offset != 0) { ++ dfh = SYS_MMIO_Read64((U64)virt_addr, offset); ++ next_offset = (U32)((dfh >> 16) & 0xffffff); ++ id = (U32)(dfh & 0xfff); ++ if (offset && ++ (id == ++ DRV_PCI_DEVICE_ENTRY_feature_id(&dpden))) { ++ break; ++ } ++ offset += next_offset; ++ } ++ PCI_Unmap_Memory(&tmp_map); ++ } ++ phys_addr += offset; ++ PCI_Map_Memory( ++ &UNC_PCIDEV_mmio_map_entry(&(unc_pcidev_map[dev_node]), ++ package_num), ++ phys_addr, 8 * page_len); ++ UNC_PCIDEV_num_entries(&(unc_pcidev_map[dev_node]))++; ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++#endif ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn unc_mmio_Destroy(param) ++ * ++ * @param param dummy parameter which is not used ++ * ++ * @return None No return needed ++ * ++ * @brief Invalidate the entry in TLB of the physical address ++ * NOTE: this should never be done with SMP call ++ * ++ */ ++static VOID unc_mmio_Destroy(PVOID param) ++{ ++ U32 dev_idx; ++ U32 i; ++ U64 addr = 0; ++ U32 cur_grp = 0; ++ U32 dev_node = 0; ++ U32 entries = 0; ++ ECB pecb; ++ ++ SEP_DRV_LOG_TRACE_IN("Param: %p.", param); ++ ++ dev_idx = *((U32 *)param); ++ cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[0]; ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; ++ ++ if (!pecb) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); ++ return; ++ } ++ dev_node = ECB_dev_node(pecb); ++ ++ if (!UNC_PCIDEV_mmio_map(&(unc_pcidev_map[dev_node]))) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (no mapping)."); ++ return; ++ } ++ ++ entries = GET_NUM_MAP_ENTRIES(dev_node); ++ ++ for (i = 0; i < entries; i++) { ++ addr = virtual_address_table(dev_node, i); ++ if (addr) { ++ PCI_Unmap_Memory(&UNC_PCIDEV_mmio_map_entry( ++ &(unc_pcidev_map[dev_node]), i)); ++ } ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ++ * Initialize the dispatch table ++ */ ++DISPATCH_NODE unc_mmio_dispatch = { .init = unc_mmio_Initialize, ++ .fini = unc_mmio_Destroy, ++ .write = unc_mmio_Write_PMU, ++ .freeze = unc_mmio_Disable_PMU, ++ .restart = unc_mmio_Enable_PMU, ++ .read_data = unc_mmio_Read_PMU_Data, ++ .check_overflow = NULL, ++ .swap_group = NULL, ++ .read_lbrs = NULL, ++ .cleanup = UNC_COMMON_Dummy_Func, ++ .hw_errata = NULL, ++ .read_power = NULL, ++ .check_overflow_errata = NULL, ++ .read_counts = NULL, ++ .check_overflow_gp_errata = NULL, ++ .read_ro = NULL, ++ .platform_info = NULL, ++ .trigger_read = unc_mmio_Trigger_Read, ++ .scan_for_uncore = NULL, ++ .read_metrics = NULL }; ++ ++DISPATCH_NODE unc_mmio_fpga_dispatch = { .init = unc_mmio_fpga_Initialize, ++ .fini = unc_mmio_Destroy, ++ .write = unc_mmio_Write_PMU, ++ .freeze = unc_mmio_Disable_PMU, ++ .restart = unc_mmio_Enable_PMU, ++ .read_data = unc_mmio_Read_PMU_Data, ++ .check_overflow = NULL, ++ .swap_group = NULL, ++ .read_lbrs = NULL, ++ .cleanup = UNC_COMMON_Dummy_Func, ++ .hw_errata = NULL, ++ .read_power = NULL, ++ .check_overflow_errata = NULL, ++ .read_counts = NULL, ++ .check_overflow_gp_errata = NULL, ++ .read_ro = NULL, ++ .platform_info = NULL, ++ .trigger_read = unc_mmio_Trigger_Read, ++ .scan_for_uncore = NULL, ++ .read_metrics = NULL }; +diff --git a/drivers/platform/x86/sepdk/sep/unc_msr.c b/drivers/platform/x86/sepdk/sep/unc_msr.c +new file mode 100755 +index 000000000000..ce144203dc39 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/sep/unc_msr.c +@@ -0,0 +1,347 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#include "lwpmudrv_defines.h" ++#include "lwpmudrv_types.h" ++#include "lwpmudrv_ecb.h" ++#include "lwpmudrv_struct.h" ++ ++#include "inc/ecb_iterators.h" ++#include "inc/control.h" ++#include "inc/unc_common.h" ++#include "inc/utility.h" ++ ++extern U64 *read_counter_info; ++extern EMON_BUFFER_DRIVER_HELPER emon_buffer_driver_helper; ++extern DRV_CONFIG drv_cfg; ++ ++/*! ++ * @fn static VOID UNC_COMMON_MSR_Write_PMU(VOID*) ++ * ++ * @brief Initial write of PMU registers ++ * Walk through the enties and write the value of the register accordingly. ++ * When current_group = 0, then this is the first time this routine is called, ++ * ++ * @param None ++ * ++ * @return None ++ * ++ * Special Notes: ++ */ ++static VOID UNC_MSR_Write_PMU(PVOID param) ++{ ++ U32 dev_idx; ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ ++ SEP_DRV_LOG_TRACE_IN("Param: %p.", param); ++ ++ dev_idx = *((U32 *)param); ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ ++ if (!CPU_STATE_socket_master(pcpu)) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!CPU_STATE_socket_master)."); ++ return; ++ } ++ ++ FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, PMU_OPERATION_WRITE) ++ { ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, idx), ++ ECB_entries_reg_value(pecb, idx)); ++ } ++ END_FOR_EACH_REG_UNC_OPERATION; ++ ++ FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, PMU_OPERATION_READ) ++ { ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, idx), 0ULL); ++ if (LWPMU_DEVICE_counter_mask(&devices[dev_idx]) == 0) { ++ LWPMU_DEVICE_counter_mask(&devices[dev_idx]) = ++ (U64)ECB_entries_max_bits(pecb, idx); ++ } ++ } ++ END_FOR_EACH_REG_UNC_OPERATION; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/*! ++ * @fn static VOID UNC_MSR_Enable_PMU(PVOID) ++ * ++ * @brief Set the enable bit for all the evsel registers ++ * ++ * @param None ++ * ++ * @return None ++ * ++ * Special Notes: ++ */ ++static VOID UNC_MSR_Enable_PMU(PVOID param) ++{ ++ U32 dev_idx; ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ U64 reg_val = 0; ++ ++ SEP_DRV_LOG_TRACE_IN("Param: %p.", param); ++ ++ dev_idx = *((U32 *)param); ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ ++ if (!CPU_STATE_socket_master(pcpu)) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!CPU_STATE_socket_master)."); ++ return; ++ } ++ ++ FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, PMU_OPERATION_ENABLE) ++ { ++ reg_val = ECB_entries_reg_value(pecb, idx); ++ if (ECB_entries_reg_rw_type(pecb, idx) == ++ PMU_REG_RW_READ_WRITE) { ++ reg_val = SYS_Read_MSR(ECB_entries_reg_id(pecb, idx)); ++ if (ECB_entries_reg_type(pecb, idx) == ++ PMU_REG_UNIT_CTRL) { ++ reg_val &= ECB_entries_reg_value(pecb, idx); ++ } else { ++ reg_val |= ECB_entries_reg_value(pecb, idx); ++ } ++ } ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, idx), reg_val); ++ } ++ END_FOR_EACH_REG_UNC_OPERATION; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/*! ++ * @fn static VOID UNC_MSR_Disable_PMU(PVOID) ++ * ++ * @brief Set the enable bit for all the evsel registers ++ * ++ * @param None ++ * ++ * @return None ++ * ++ * Special Notes: ++ */ ++static VOID UNC_MSR_Disable_PMU(PVOID param) ++{ ++ U32 dev_idx; ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ U64 reg_val = 0; ++ ++ SEP_DRV_LOG_TRACE_IN("Param: %p.", param); ++ ++ dev_idx = *((U32 *)param); ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ ++ if (!CPU_STATE_socket_master(pcpu)) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!CPU_STATE_socket_master)."); ++ return; ++ } ++ ++ FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, PMU_OPERATION_DISABLE) ++ { ++ reg_val = ECB_entries_reg_value(pecb, idx); ++ if (ECB_entries_reg_rw_type(pecb, idx) == ++ PMU_REG_RW_READ_WRITE) { ++ reg_val = SYS_Read_MSR(ECB_entries_reg_id(pecb, idx)); ++ if (ECB_entries_reg_type(pecb, idx) == ++ PMU_REG_UNIT_CTRL) { ++ reg_val |= ECB_entries_reg_value(pecb, idx); ++ } else { ++ reg_val &= ECB_entries_reg_value(pecb, idx); ++ } ++ } ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, idx), reg_val); ++ } ++ END_FOR_EACH_REG_UNC_OPERATION; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/*! ++ * @fn static VOID UNC_MSR_Read_PMU_Data(param) ++ * ++ * @param param The read thread node to process ++ * @param id The id refers to the device index ++ * ++ * @return None No return needed ++ * ++ * @brief Read the Uncore count data and store into the buffer ++ * Let us say we have 2 core events in a dual socket JKTN; ++ * The start_index will be at 32 as it will 2 events in 16 CPU per socket ++ * The position for first event of QPI will be computed based on its event ++ * ++ */ ++static VOID UNC_MSR_Read_PMU_Data(PVOID param) ++{ ++ U32 j = 0; ++ U32 dev_idx; ++ U32 this_cpu; ++ U32 package_num = 0; ++ U64 *buffer; ++ CPU_STATE pcpu; ++ U32 cur_grp; ++ ECB pecb; ++ ++ SEP_DRV_LOG_TRACE_IN("Param: %p.", param); ++ ++ dev_idx = *((U32 *)param); ++ this_cpu = CONTROL_THIS_CPU(); ++ buffer = read_counter_info; ++ pcpu = &pcb[this_cpu]; ++ package_num = core_to_package_map[this_cpu]; ++ cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[package_num]; ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[(dev_idx)])[cur_grp]; ++ ++ // NOTE THAT the read_pmu function on for EMON collection. ++ if (!DRV_CONFIG_emon_mode(drv_cfg)) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!emon_mode)."); ++ return; ++ } ++ if (!CPU_STATE_socket_master(pcpu)) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!CPU_STATE_socket_master)."); ++ return; ++ } ++ if (!pecb) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); ++ return; ++ } ++ ++ //Read in the counts into temporary buffer ++ FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, PMU_OPERATION_READ) ++ { ++ if (ECB_entries_event_scope(pecb, idx) == SYSTEM_EVENT) { ++ j = ECB_entries_uncore_buffer_offset_in_system(pecb, ++ idx); ++ } else { ++ j = EMON_BUFFER_UNCORE_PACKAGE_EVENT_OFFSET( ++ package_num, ++ EMON_BUFFER_DRIVER_HELPER_num_entries_per_package( ++ emon_buffer_driver_helper), ++ ECB_entries_uncore_buffer_offset_in_package( ++ pecb, idx)); ++ } ++ ++ buffer[j] = SYS_Read_MSR(ECB_entries_reg_id(pecb, idx)); ++ SEP_DRV_LOG_TRACE("j=%u, value=%llu, cpu=%u, event_id=%u", j, ++ buffer[j], this_cpu, ++ ECB_entries_core_event_id(pecb, idx)); ++ } ++ END_FOR_EACH_REG_UNC_OPERATION; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static VOID UNC_MSR_Trigger_Read(id) ++ * ++ * @param id Device index ++ * ++ * @return None No return needed ++ * ++ * @brief Read the Uncore data from counters and store into buffer ++ */ ++static VOID UNC_MSR_Trigger_Read(PVOID param, U32 id) ++{ ++ U32 this_cpu; ++ U32 package_num; ++ U32 cur_grp; ++ ECB pecb; ++ U32 index = 0; ++ U64 diff = 0; ++ U64 value; ++ U64 *data; ++ ++ SEP_DRV_LOG_TRACE_IN("Param: %p, id: %u.", param, id); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ package_num = core_to_package_map[this_cpu]; ++ cur_grp = LWPMU_DEVICE_cur_group(&devices[id])[package_num]; ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[id])[cur_grp]; ++ ++ // Write GroupID ++ data = (U64 *)((S8 *)param + ECB_group_offset(pecb)); ++ *data = cur_grp + 1; ++ //Read in the counts into uncore buffer ++ FOR_EACH_REG_UNC_OPERATION(pecb, id, idx, PMU_OPERATION_READ) ++ { ++ value = SYS_Read_MSR(ECB_entries_reg_id(pecb, idx)); ++ //check for overflow ++ if (value < ++ LWPMU_DEVICE_prev_value(&devices[id])[package_num][index]) { ++ diff = LWPMU_DEVICE_counter_mask(&devices[id]) - ++ LWPMU_DEVICE_prev_value( ++ &devices[id])[package_num][index]; ++ diff += value; ++ } else { ++ diff = value - ++ LWPMU_DEVICE_prev_value( ++ &devices[id])[package_num][index]; ++ } ++ LWPMU_DEVICE_acc_value( ++ &devices[id])[package_num][cur_grp][index] += diff; ++ LWPMU_DEVICE_prev_value(&devices[id])[package_num][index] = ++ value; ++ data = (U64 *)((S8 *)param + ++ ECB_entries_counter_event_offset(pecb, idx)); ++ *data = LWPMU_DEVICE_acc_value( ++ &devices[id])[package_num][cur_grp][index]; ++ index++; ++ } ++ END_FOR_EACH_REG_UNC_OPERATION; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ++ * Initialize the dispatch table ++ */ ++ ++DISPATCH_NODE unc_msr_dispatch = { .init = NULL, ++ .fini = NULL, ++ .write = UNC_MSR_Write_PMU, ++ .freeze = UNC_MSR_Disable_PMU, ++ .restart = UNC_MSR_Enable_PMU, ++ .read_data = UNC_MSR_Read_PMU_Data, ++ .check_overflow = NULL, ++ .swap_group = NULL, ++ .read_lbrs = NULL, ++ .cleanup = UNC_COMMON_MSR_Clean_Up, ++ .hw_errata = NULL, ++ .read_power = NULL, ++ .check_overflow_errata = NULL, ++ .read_counts = NULL, ++ .check_overflow_gp_errata = NULL, ++ .read_ro = NULL, ++ .platform_info = NULL, ++ .trigger_read = UNC_MSR_Trigger_Read, ++ .scan_for_uncore = NULL, ++ .read_metrics = NULL }; +diff --git a/drivers/platform/x86/sepdk/sep/unc_pci.c b/drivers/platform/x86/sepdk/sep/unc_pci.c +new file mode 100755 +index 000000000000..e338556f8b34 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/sep/unc_pci.c +@@ -0,0 +1,491 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#include "lwpmudrv_defines.h" ++#include "lwpmudrv_types.h" ++#include "lwpmudrv_ecb.h" ++#include "lwpmudrv_struct.h" ++ ++#include "inc/ecb_iterators.h" ++#include "inc/control.h" ++#include "inc/unc_common.h" ++#include "inc/utility.h" ++#include "inc/pci.h" ++ ++extern U64 *read_counter_info; ++extern UNCORE_TOPOLOGY_INFO_NODE uncore_topology; ++extern EMON_BUFFER_DRIVER_HELPER emon_buffer_driver_helper; ++extern DRV_CONFIG drv_cfg; ++ ++/*! ++ * @fn static VOID unc_pci_Write_PMU(VOID*) ++ * ++ * @brief Initial write of PMU registers ++ * Walk through the enties and write the value of the register accordingly. ++ * When current_group = 0, then this is the first time this routine is called, ++ * ++ * @param None ++ * ++ * @return None ++ * ++ * Special Notes: ++ */ ++static VOID unc_pci_Write_PMU(PVOID param) ++{ ++ U32 device_id; ++ U32 dev_idx; ++ U32 value; ++ U32 vendor_id; ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ U32 package_num = 0; ++ U32 dev_node = 0; ++ U32 cur_grp; ++ ECB pecb; ++ U32 busno; ++ ++ SEP_DRV_LOG_TRACE_IN("Param: %p.", param); ++ ++ dev_idx = *((U32 *)param); ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ package_num = core_to_package_map[this_cpu]; ++ cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[package_num]; ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[(dev_idx)])[cur_grp]; ++ ++ if (!CPU_STATE_socket_master(pcpu)) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!CPU_STATE_socket_master)."); ++ return; ++ } ++ if (!pecb) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); ++ return; ++ } ++ ++ // first, figure out which package maps to which bus ++ dev_node = ECB_dev_node(pecb); ++ if (!IS_BUS_MAP_VALID(dev_node, package_num)) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("No UNC_PCIDEV bus map for %u!", ++ dev_node); ++ return; ++ } ++ ++ busno = GET_BUS_MAP(dev_node, package_num); ++ ++ LWPMU_DEVICE_pci_dev_node_index(&devices[dev_idx]) = dev_node; ++ ++ FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, PMU_OPERATION_WRITE) ++ { ++ if (ECB_entries_reg_type(pecb, idx) == PMU_REG_GLOBAL_CTRL) { ++ //Check if we need to zero this MSR out ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, idx), 0LL); ++ continue; ++ } ++ ++ // otherwise, we have a valid entry ++ // now we just need to find the corresponding bus # ++ ECB_entries_bus_no(pecb, idx) = busno; ++ value = PCI_Read_U32(busno, ECB_entries_dev_no(pecb, idx), ++ ECB_entries_func_no(pecb, idx), 0); ++ ++ CONTINUE_IF_NOT_GENUINE_INTEL_DEVICE(value, vendor_id, ++ device_id); ++ SEP_DRV_LOG_TRACE("Uncore device ID = 0x%x.", ++ device_id); ++ ++ if (ECB_entries_reg_type(pecb, idx) == PMU_REG_UNIT_CTRL) { ++ // busno can not be stored in ECB because different sockets have different bus no. ++ PCI_Write_U32(busno, ECB_entries_dev_no(pecb, idx), ++ ECB_entries_func_no(pecb, idx), ++ ECB_entries_reg_id(pecb, idx), ++ (U32)ECB_entries_reg_value(pecb, idx)); ++ continue; ++ } ++ ++ // now program at the corresponding offset ++ PCI_Write_U32(busno, ECB_entries_dev_no(pecb, idx), ++ ECB_entries_func_no(pecb, idx), ++ ECB_entries_reg_id(pecb, idx), ++ (U32)ECB_entries_reg_value(pecb, idx)); ++ ++ if ((ECB_entries_reg_value(pecb, idx) >> NEXT_ADDR_SHIFT) != ++ 0) { ++ PCI_Write_U32(busno, ECB_entries_dev_no(pecb, idx), ++ ECB_entries_func_no(pecb, idx), ++ ECB_entries_reg_id(pecb, idx) + ++ NEXT_ADDR_OFFSET, ++ (U32)(ECB_entries_reg_value(pecb, idx) >> ++ NEXT_ADDR_SHIFT)); ++ } ++ } ++ END_FOR_EACH_REG_UNC_OPERATION; ++ ++ FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, PMU_OPERATION_READ) ++ { ++ PCI_Write_U64(busno, ECB_entries_dev_no(pecb, idx), ++ ECB_entries_func_no(pecb, idx), ++ ECB_entries_reg_id(pecb, idx), 0); ++ ++ // this is needed for overflow detection of the accumulators. ++ if (LWPMU_DEVICE_counter_mask(&devices[dev_idx]) == 0) { ++ LWPMU_DEVICE_counter_mask(&devices[dev_idx]) = ++ (U64)ECB_entries_max_bits(pecb, idx); ++ } ++ } ++ END_FOR_EACH_REG_UNC_OPERATION; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/*! ++ * @fn static VOID unc_pci_Enable_PMU(PVOID) ++ * ++ * @brief Set the enable bit for all the EVSEL registers ++ * ++ * @param Device Index of this PMU unit ++ * ++ * @return None ++ * ++ * Special Notes: ++ */ ++static VOID unc_pci_Enable_PMU(PVOID param) ++{ ++ U32 dev_idx; ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ U32 package_num = 0; ++ U32 dev_node; ++ U32 reg_val = 0; ++ U32 busno; ++ ++ SEP_DRV_LOG_TRACE_IN("Param: %p.", param); ++ ++ dev_idx = *((U32 *)param); ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ dev_node = LWPMU_DEVICE_pci_dev_node_index(&devices[dev_idx]); ++ ++ if (!CPU_STATE_socket_master(pcpu)) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!CPU_STATE_socket_master)."); ++ return; ++ } ++ ++ package_num = core_to_package_map[this_cpu]; ++ ++ if (!IS_BUS_MAP_VALID(dev_node, package_num)) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("No UNC_PCIDEV bus map for %u!", ++ dev_node); ++ return; ++ } ++ ++ busno = GET_BUS_MAP(dev_node, package_num); ++ ++ FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, PMU_OPERATION_ENABLE) ++ { ++ if (ECB_entries_reg_type(pecb, idx) == PMU_REG_GLOBAL_CTRL) { ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, idx), ++ ECB_entries_reg_value(pecb, idx)); ++ continue; ++ } ++ reg_val = (U32)ECB_entries_reg_value(pecb, idx); ++ if (ECB_entries_reg_rw_type(pecb, idx) == ++ PMU_REG_RW_READ_WRITE) { ++ reg_val = PCI_Read_U32(busno, ++ ECB_entries_dev_no(pecb, idx), ++ ECB_entries_func_no(pecb, idx), ++ ECB_entries_reg_id(pecb, idx)); ++ reg_val &= ECB_entries_reg_value(pecb, idx); ++ } ++ PCI_Write_U32(busno, ECB_entries_dev_no(pecb, idx), ++ ECB_entries_func_no(pecb, idx), ++ ECB_entries_reg_id(pecb, idx), reg_val); ++ } ++ END_FOR_EACH_REG_UNC_OPERATION; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/*! ++ * @fn static VOID unc_pci_Disable_PMU(PVOID) ++ * ++ * @brief Disable the per unit global control to stop the PMU counters. ++ * ++ * @param Device Index of this PMU unit ++ * @control_msr Control MSR address ++ * @enable_val If counter freeze bit does not work, counter enable bit should be cleared ++ * @disable_val Disable collection ++ * ++ * @return None ++ * ++ * Special Notes: ++ */ ++static VOID unc_pci_Disable_PMU(PVOID param) ++{ ++ U32 dev_idx; ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ U32 package_num = 0; ++ U32 dev_node; ++ U32 reg_val = 0; ++ U32 busno; ++ ++ SEP_DRV_LOG_TRACE_IN("Param: %p.", param); ++ ++ dev_idx = *((U32 *)param); ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ dev_node = LWPMU_DEVICE_pci_dev_node_index(&devices[dev_idx]); ++ ++ if (!CPU_STATE_socket_master(pcpu)) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!CPU_STATE_socket_master)."); ++ return; ++ } ++ ++ package_num = core_to_package_map[this_cpu]; ++ ++ if (!IS_BUS_MAP_VALID(dev_node, package_num)) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("No UNC_PCIDEV bus map for %u!", ++ dev_node); ++ return; ++ } ++ ++ busno = GET_BUS_MAP(dev_node, package_num); ++ ++ FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, PMU_OPERATION_DISABLE) ++ { ++ if (ECB_entries_reg_type(pecb, idx) == PMU_REG_GLOBAL_CTRL) { ++ SYS_Write_MSR(ECB_entries_reg_id(pecb, idx), ++ ECB_entries_reg_value(pecb, idx)); ++ continue; ++ } ++ reg_val = (U32)ECB_entries_reg_value(pecb, idx); ++ if (ECB_entries_reg_rw_type(pecb, idx) == ++ PMU_REG_RW_READ_WRITE) { ++ reg_val = PCI_Read_U32(busno, ++ ECB_entries_dev_no(pecb, idx), ++ ECB_entries_func_no(pecb, idx), ++ ECB_entries_reg_id(pecb, idx)); ++ reg_val |= ECB_entries_reg_value(pecb, idx); ++ } ++ PCI_Write_U32(busno, ECB_entries_dev_no(pecb, idx), ++ ECB_entries_func_no(pecb, idx), ++ ECB_entries_reg_id(pecb, idx), reg_val); ++ } ++ END_FOR_EACH_REG_UNC_OPERATION; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static VOID unc_pci_Trigger_Read(id) ++ * ++ * @param id Device index ++ * ++ * @return None No return needed ++ * ++ * @brief Read the Uncore data from counters and store into buffer ++ */ ++static VOID unc_pci_Trigger_Read(PVOID param, U32 id) ++{ ++ U32 this_cpu = 0; ++ U32 package_num = 0; ++ U32 dev_node = 0; ++ U32 cur_grp = 0; ++ ECB pecb = NULL; ++ U32 index = 0; ++ U64 value_low = 0; ++ U64 value_high = 0; ++ U64 diff = 0; ++ U64 value; ++ U64 *data; ++ U32 busno; ++ ++ SEP_DRV_LOG_TRACE_IN("Param: %p, id: %u.", param, id); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ package_num = core_to_package_map[this_cpu]; ++ dev_node = LWPMU_DEVICE_pci_dev_node_index(&devices[id]); ++ cur_grp = LWPMU_DEVICE_cur_group(&devices[id])[package_num]; ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[id])[cur_grp]; ++ ++ if (!IS_BUS_MAP_VALID(dev_node, package_num)) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("No UNC_PCIDEV bus map for %u!", ++ dev_node); ++ return; ++ } ++ ++ busno = GET_BUS_MAP(dev_node, package_num); ++ ++ // Write GroupID ++ data = (U64 *)((S8 *)param + ECB_group_offset(pecb)); ++ *data = cur_grp + 1; ++ // Read the counts into uncore buffer ++ FOR_EACH_REG_UNC_OPERATION(pecb, id, idx, PMU_OPERATION_READ) ++ { ++ // read lower 4 bytes ++ value_low = PCI_Read_U32(busno, ECB_entries_dev_no(pecb, idx), ++ ECB_entries_func_no(pecb, idx), ++ ECB_entries_reg_id(pecb, idx)); ++ value = LOWER_4_BYTES_MASK & value_low; ++ ++ // read upper 4 bytes ++ value_high = PCI_Read_U32( ++ busno, ECB_entries_dev_no(pecb, idx), ++ ECB_entries_func_no(pecb, idx), ++ (ECB_entries_reg_id(pecb, idx) + NEXT_ADDR_OFFSET)); ++ value |= value_high << NEXT_ADDR_SHIFT; ++ //check for overflow ++ if (value < ++ LWPMU_DEVICE_prev_value(&devices[id])[package_num][index]) { ++ diff = LWPMU_DEVICE_counter_mask(&devices[id]) - ++ LWPMU_DEVICE_prev_value( ++ &devices[id])[package_num][index]; ++ diff += value; ++ } else { ++ diff = value - ++ LWPMU_DEVICE_prev_value( ++ &devices[id])[package_num][index]; ++ } ++ LWPMU_DEVICE_acc_value( ++ &devices[id])[package_num][cur_grp][index] += diff; ++ LWPMU_DEVICE_prev_value(&devices[id])[package_num][index] = ++ value; ++ data = (U64 *)((S8 *)param + ++ ECB_entries_counter_event_offset(pecb, idx)); ++ *data = LWPMU_DEVICE_acc_value( ++ &devices[id])[package_num][cur_grp][index]; ++ index++; ++ } ++ END_FOR_EACH_REG_UNC_OPERATION; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/*! ++ * @fn static unc_pci_Read_PMU_Data(param) ++ * ++ * @param param The device index ++ * ++ * @return None No return needed ++ * ++ * @brief Read the Uncore count data and store into the buffer; ++ */ ++static VOID unc_pci_Read_PMU_Data(PVOID param) ++{ ++ U32 j = 0; ++ U32 dev_idx; ++ U32 this_cpu; ++ U64 *buffer = read_counter_info; ++ CPU_STATE pcpu; ++ U32 cur_grp; ++ ECB pecb; ++ U32 dev_node; ++ U32 package_num = 0; ++ U32 busno; ++ ++ SEP_DRV_LOG_TRACE_IN("Param: %p.", param); ++ ++ dev_idx = *((U32 *)param); ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ package_num = core_to_package_map[this_cpu]; ++ cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[package_num]; ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[(dev_idx)])[cur_grp]; ++ dev_node = LWPMU_DEVICE_pci_dev_node_index(&devices[dev_idx]); ++ ++ // NOTE THAT the read_pmu function on for EMON collection. ++ if (!DRV_CONFIG_emon_mode(drv_cfg)) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!emon_mode)."); ++ return; ++ } ++ if (!CPU_STATE_socket_master(pcpu)) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!CPU_STATE_socket_master)."); ++ return; ++ } ++ if (!pecb) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); ++ return; ++ } ++ ++ if (!IS_BUS_MAP_VALID(dev_node, package_num)) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT("No UNC_PCIDEV bus map for %u!", ++ dev_node); ++ return; ++ } ++ ++ busno = GET_BUS_MAP(dev_node, package_num); ++ ++ //Read in the counts into temporary buffer ++ FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, PMU_OPERATION_READ) ++ { ++ if (ECB_entries_event_scope(pecb, idx) == SYSTEM_EVENT) { ++ j = ECB_entries_uncore_buffer_offset_in_system(pecb, ++ idx); ++ } else { ++ j = EMON_BUFFER_UNCORE_PACKAGE_EVENT_OFFSET( ++ package_num, ++ EMON_BUFFER_DRIVER_HELPER_num_entries_per_package( ++ emon_buffer_driver_helper), ++ ECB_entries_uncore_buffer_offset_in_package( ++ pecb, idx)); ++ } ++ ++ buffer[j] = PCI_Read_U64(busno, ECB_entries_dev_no(pecb, idx), ++ ECB_entries_func_no(pecb, idx), ++ ECB_entries_reg_id(pecb, idx)); ++ ++ SEP_DRV_LOG_TRACE("j=%u, value=%llu, cpu=%u", j, buffer[j], ++ this_cpu); ++ } ++ END_FOR_EACH_REG_UNC_OPERATION; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ++ * Initialize the dispatch table ++ */ ++ ++DISPATCH_NODE unc_pci_dispatch = { .init = NULL, ++ .fini = NULL, ++ .write = unc_pci_Write_PMU, ++ .freeze = unc_pci_Disable_PMU, ++ .restart = unc_pci_Enable_PMU, ++ .read_data = unc_pci_Read_PMU_Data, ++ .check_overflow = NULL, ++ .swap_group = NULL, ++ .read_lbrs = NULL, ++ .cleanup = NULL, ++ .hw_errata = NULL, ++ .read_power = NULL, ++ .check_overflow_errata = NULL, ++ .read_counts = NULL, ++ .check_overflow_gp_errata = NULL, ++ .read_ro = NULL, ++ .platform_info = NULL, ++ .trigger_read = unc_pci_Trigger_Read, ++ .scan_for_uncore = NULL, ++ .read_metrics = NULL }; +diff --git a/drivers/platform/x86/sepdk/sep/unc_power.c b/drivers/platform/x86/sepdk/sep/unc_power.c +new file mode 100755 +index 000000000000..4f7d8ff43744 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/sep/unc_power.c +@@ -0,0 +1,444 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#include "lwpmudrv_defines.h" ++#include "lwpmudrv_types.h" ++#include "lwpmudrv_ecb.h" ++#include "lwpmudrv_struct.h" ++ ++#include "inc/ecb_iterators.h" ++#include "inc/control.h" ++#include "inc/unc_common.h" ++#include "inc/utility.h" ++ ++extern U64 *read_counter_info; ++extern U64 *prev_counter_data; ++extern EMON_BUFFER_DRIVER_HELPER emon_buffer_driver_helper; ++static U64 **prev_val_per_thread; ++static U64 **acc_per_thread; ++extern DRV_CONFIG drv_cfg; ++ ++/*! ++ * @fn unc_power_Allocate(param) ++ * ++ * @param param device index ++ * ++ * @return None No return needed ++ * ++ * @brief Allocate arrays required for reading counts ++ */ ++static VOID unc_power_Allocate(PVOID param) ++{ ++ U32 id; ++ U32 cur_grp; ++ ECB pecb; ++ U32 i; ++ U32 j; ++ ++ SEP_DRV_LOG_TRACE_IN("Param: %p.", param); ++ ++ id = *((U32 *)param); ++ cur_grp = LWPMU_DEVICE_cur_group(&devices[id])[0]; ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[id])[cur_grp]; ++ ++ if (!pecb) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); ++ return; ++ } ++ ++ acc_per_thread = CONTROL_Allocate_Memory( ++ GLOBAL_STATE_num_cpus(driver_state) * sizeof(U64 *)); ++ if (acc_per_thread == NULL) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT( ++ "Unable to allocate memory for acc_per_thread!"); ++ return; ++ } ++ ++ prev_val_per_thread = CONTROL_Allocate_Memory( ++ GLOBAL_STATE_num_cpus(driver_state) * sizeof(U64 *)); ++ if (prev_val_per_thread == NULL) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT( ++ "Unable to allocate memory for prev_val_per_thread!"); ++ return; ++ } ++ ++ for (i = 0; i < (U32)GLOBAL_STATE_num_cpus(driver_state); i++) { ++ acc_per_thread[i] = CONTROL_Allocate_Memory( ++ ECB_num_events(pecb) * sizeof(U64)); ++ if (acc_per_thread[i] == NULL) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT( ++ "Unable to allocate memory for acc_per_thread[%u]!", ++ i); ++ return; ++ } ++ ++ prev_val_per_thread[i] = CONTROL_Allocate_Memory( ++ ECB_num_events(pecb) * sizeof(U64)); ++ if (prev_val_per_thread[i] == NULL) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT( ++ "Unable to allocate memory for prev_val_per_thread[%u]!", ++ i); ++ return; ++ } ++ ++ // initialize all values to 0 ++ for (j = 0; j < ECB_num_events(pecb); j++) { ++ acc_per_thread[i][j] = 0LL; ++ prev_val_per_thread[i][j] = 0LL; ++ } ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/*! ++ * @fn unc_power_Free(param) ++ * ++ * @param param device index ++ * ++ * @return None No return needed ++ * ++ * @brief Free arrays required for reading counts ++ */ ++static VOID unc_power_Free(PVOID param) ++{ ++ U32 i; ++ ++ SEP_DRV_LOG_TRACE_IN("Param: %p.", param); ++ ++ if (acc_per_thread) { ++ for (i = 0; i < (U32)GLOBAL_STATE_num_cpus(driver_state); i++) { ++ acc_per_thread[i] = ++ CONTROL_Free_Memory(acc_per_thread[i]); ++ } ++ acc_per_thread = CONTROL_Free_Memory(acc_per_thread); ++ } ++ ++ if (prev_val_per_thread) { ++ for (i = 0; i < (U32)GLOBAL_STATE_num_cpus(driver_state); i++) { ++ prev_val_per_thread[i] = ++ CONTROL_Free_Memory(prev_val_per_thread[i]); ++ } ++ prev_val_per_thread = CONTROL_Free_Memory(prev_val_per_thread); ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/*! ++ * @fn unc_power_Read_Counts(param, id, mask) ++ * ++ * @param param pointer to sample buffer ++ * @param id device index ++ * @param mask The mask bits for value ++ * ++ * @return None No return needed ++ * ++ * @brief Read the Uncore count data and store into the buffer param ++ */ ++static VOID unc_power_Trigger_Read(PVOID param, U32 id) ++{ ++ U64 *data = (U64 *)param; ++ U32 cur_grp; ++ ECB pecb; ++ U32 this_cpu; ++ U32 package_num; ++ U32 index = 0; ++ U64 diff = 0; ++ U64 value; ++ ++ SEP_DRV_LOG_TRACE_IN("Param: %p.", param); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ package_num = core_to_package_map[this_cpu]; ++ cur_grp = LWPMU_DEVICE_cur_group(&devices[id])[package_num]; ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[id])[cur_grp]; ++ ++ // Write GroupID ++ data = (U64 *)((S8 *)data + ECB_group_offset(pecb)); ++ *data = cur_grp + 1; ++ ++ FOR_EACH_REG_UNC_OPERATION(pecb, id, idx, PMU_OPERATION_READ) ++ { ++ data = (U64 *)((S8 *)param + ++ ECB_entries_counter_event_offset(pecb, idx)); ++ value = SYS_Read_MSR(ECB_entries_reg_id(pecb, idx)); ++ if (ECB_entries_max_bits(pecb, idx)) { ++ value &= ECB_entries_max_bits(pecb, idx); ++ } ++ //check for overflow if not a static counter ++ if (ECB_entries_counter_type(pecb, idx) == STATIC_COUNTER) { ++ *data = value; ++ } else { ++ if (value < prev_val_per_thread[this_cpu][index]) { ++ diff = ECB_entries_max_bits(pecb, idx) - ++ prev_val_per_thread[this_cpu][index]; ++ diff += value; ++ } else { ++ diff = value - ++ prev_val_per_thread[this_cpu][index]; ++ } ++ acc_per_thread[this_cpu][index] += diff; ++ prev_val_per_thread[this_cpu][index] = value; ++ *data = acc_per_thread[this_cpu][index]; ++ } ++ index++; ++ } ++ END_FOR_EACH_REG_UNC_OPERATION; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn unc_power_Enable_PMU(param) ++ * ++ * @param None ++ * ++ * @return None ++ * ++ * @brief Capture the previous values to calculate delta later. ++ */ ++static VOID unc_power_Enable_PMU(PVOID param) ++{ ++ U32 j; ++ U64 *buffer = prev_counter_data; ++ U32 dev_idx; ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ U32 package_event_count = 0; ++ U32 thread_event_count = 0; ++ U32 module_event_count = 0; ++ U64 tmp_value = 0; ++ U32 package_id = 0; ++ U32 core_id = 0; ++ U32 thread_id = 0; ++ ++ SEP_DRV_LOG_TRACE_IN("Param: %p.", param); ++ ++ dev_idx = *((U32 *)param); ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ package_id = core_to_package_map[this_cpu]; ++ core_id = core_to_phys_core_map[this_cpu]; ++ thread_id = core_to_thread_map[this_cpu]; ++ ++ // NOTE THAT the enable function currently captures previous values ++ // for EMON collection to avoid unnecessary memory copy. ++ if (!DRV_CONFIG_emon_mode(drv_cfg)) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!emon_mode)."); ++ return; ++ } ++ ++ FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, PMU_OPERATION_READ) ++ { ++ if (ECB_entries_event_scope(pecb, idx) == PACKAGE_EVENT) { ++ j = EMON_BUFFER_UNCORE_PACKAGE_POWER_EVENT_OFFSET( ++ package_id, ++ EMON_BUFFER_DRIVER_HELPER_num_entries_per_package( ++ emon_buffer_driver_helper), ++ EMON_BUFFER_DRIVER_HELPER_power_device_offset_in_package( ++ emon_buffer_driver_helper), ++ package_event_count); ++ package_event_count++; ++ } else if (ECB_entries_event_scope(pecb, idx) == MODULE_EVENT) { ++ j = EMON_BUFFER_UNCORE_MODULE_POWER_EVENT_OFFSET( ++ package_id, ++ EMON_BUFFER_DRIVER_HELPER_num_entries_per_package( ++ emon_buffer_driver_helper), ++ EMON_BUFFER_DRIVER_HELPER_power_device_offset_in_package( ++ emon_buffer_driver_helper), ++ EMON_BUFFER_DRIVER_HELPER_power_num_package_events( ++ emon_buffer_driver_helper), ++ CPU_STATE_cpu_module_master(pcpu), ++ EMON_BUFFER_DRIVER_HELPER_power_num_module_events( ++ emon_buffer_driver_helper), ++ module_event_count); ++ module_event_count++; ++ } else { ++ j = EMON_BUFFER_UNCORE_THREAD_POWER_EVENT_OFFSET( ++ package_id, ++ EMON_BUFFER_DRIVER_HELPER_num_entries_per_package( ++ emon_buffer_driver_helper), ++ EMON_BUFFER_DRIVER_HELPER_power_device_offset_in_package( ++ emon_buffer_driver_helper), ++ EMON_BUFFER_DRIVER_HELPER_power_num_package_events( ++ emon_buffer_driver_helper), ++ GLOBAL_STATE_num_modules(driver_state), ++ EMON_BUFFER_DRIVER_HELPER_power_num_module_events( ++ emon_buffer_driver_helper), ++ core_id, threads_per_core[cpu], thread_id, ++ EMON_BUFFER_DRIVER_HELPER_power_num_thread_events( ++ emon_buffer_driver_helper), ++ thread_event_count); ++ thread_event_count++; ++ } ++ ++ tmp_value = SYS_Read_MSR(ECB_entries_reg_id(pecb, idx)); ++ if (ECB_entries_max_bits(pecb, idx)) { ++ tmp_value &= ECB_entries_max_bits(pecb, idx); ++ } ++ buffer[j] = tmp_value; ++ SEP_DRV_LOG_TRACE("j=%u, value=%llu, cpu=%u", j, buffer[j], ++ this_cpu); ++ } ++ END_FOR_EACH_REG_UNC_OPERATION; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn unc_power_Read_PMU_Data(param) ++ * ++ * @param param The read thread node to process ++ * ++ * @return None No return needed ++ * ++ * @brief Read the Uncore count data and store into the buffer param; ++ * Uncore PMU does not support sampling, i.e. ignore the id parameter. ++ */ ++static VOID unc_power_Read_PMU_Data(PVOID param) ++{ ++ U32 j; ++ U64 *buffer = read_counter_info; ++ U64 *prev_buffer = prev_counter_data; ++ U32 dev_idx; ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ U32 package_event_count = 0; ++ U32 thread_event_count = 0; ++ U32 module_event_count = 0; ++ U64 tmp_value; ++ U32 package_id = 0; ++ U32 core_id = 0; ++ U32 thread_id = 0; ++ ++ SEP_DRV_LOG_TRACE_IN("Param: %p.", param); ++ ++ dev_idx = *((U32 *)param); ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ package_id = core_to_package_map[this_cpu]; ++ core_id = core_to_phys_core_map[this_cpu]; ++ thread_id = core_to_thread_map[this_cpu]; ++ ++ // NOTE THAT the read_pmu function on for EMON collection. ++ if (!DRV_CONFIG_emon_mode(drv_cfg)) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!emon_mode)."); ++ return; ++ } ++ ++ FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, PMU_OPERATION_READ) ++ { ++ if (ECB_entries_event_scope(pecb, idx) == PACKAGE_EVENT) { ++ j = EMON_BUFFER_UNCORE_PACKAGE_POWER_EVENT_OFFSET( ++ package_id, ++ EMON_BUFFER_DRIVER_HELPER_num_entries_per_package( ++ emon_buffer_driver_helper), ++ EMON_BUFFER_DRIVER_HELPER_power_device_offset_in_package( ++ emon_buffer_driver_helper), ++ package_event_count); ++ package_event_count++; ++ } else if (ECB_entries_event_scope(pecb, idx) == MODULE_EVENT) { ++ j = EMON_BUFFER_UNCORE_MODULE_POWER_EVENT_OFFSET( ++ package_id, ++ EMON_BUFFER_DRIVER_HELPER_num_entries_per_package( ++ emon_buffer_driver_helper), ++ EMON_BUFFER_DRIVER_HELPER_power_device_offset_in_package( ++ emon_buffer_driver_helper), ++ EMON_BUFFER_DRIVER_HELPER_power_num_package_events( ++ emon_buffer_driver_helper), ++ CPU_STATE_cpu_module_master(pcpu), ++ EMON_BUFFER_DRIVER_HELPER_power_num_module_events( ++ emon_buffer_driver_helper), ++ module_event_count); ++ module_event_count++; ++ } else { ++ j = EMON_BUFFER_UNCORE_THREAD_POWER_EVENT_OFFSET( ++ package_id, ++ EMON_BUFFER_DRIVER_HELPER_num_entries_per_package( ++ emon_buffer_driver_helper), ++ EMON_BUFFER_DRIVER_HELPER_power_device_offset_in_package( ++ emon_buffer_driver_helper), ++ EMON_BUFFER_DRIVER_HELPER_power_num_package_events( ++ emon_buffer_driver_helper), ++ GLOBAL_STATE_num_modules(driver_state), ++ EMON_BUFFER_DRIVER_HELPER_power_num_module_events( ++ emon_buffer_driver_helper), ++ core_id, threads_per_core[cpu], thread_id, ++ EMON_BUFFER_DRIVER_HELPER_power_num_thread_events( ++ emon_buffer_driver_helper), ++ thread_event_count); ++ thread_event_count++; ++ } ++ ++ tmp_value = SYS_Read_MSR(ECB_entries_reg_id(pecb, idx)); ++ if (ECB_entries_max_bits(pecb, idx)) { ++ tmp_value &= ECB_entries_max_bits(pecb, idx); ++ } ++ if (ECB_entries_counter_type(pecb, idx) == STATIC_COUNTER) { ++ buffer[j] = tmp_value; ++ } else { ++ if (tmp_value >= prev_buffer[j]) { ++ buffer[j] = tmp_value - prev_buffer[j]; ++ } else { ++ buffer[j] = tmp_value + ++ (ECB_entries_max_bits(pecb, idx) - ++ prev_buffer[j]); ++ } ++ } ++ SEP_DRV_LOG_TRACE("j=%u, value=%llu, cpu=%u", j, buffer[j], ++ this_cpu); ++ } ++ END_FOR_EACH_REG_UNC_OPERATION; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ++ * Initialize the dispatch table ++ */ ++ ++DISPATCH_NODE unc_power_dispatch = { .init = unc_power_Allocate, ++ .fini = unc_power_Free, ++ .write = UNC_COMMON_Dummy_Func, ++ .freeze = NULL, ++ .restart = unc_power_Enable_PMU, ++ .read_data = unc_power_Read_PMU_Data, ++ .check_overflow = NULL, ++ .swap_group = NULL, ++ .read_lbrs = NULL, ++ .cleanup = NULL, ++ .hw_errata = NULL, ++ .read_power = NULL, ++ .check_overflow_errata = NULL, ++ .read_counts = NULL, ++ .check_overflow_gp_errata = NULL, ++ .read_ro = NULL, ++ .platform_info = NULL, ++ .trigger_read = unc_power_Trigger_Read, ++ .scan_for_uncore = NULL, ++ .read_metrics = NULL }; +diff --git a/drivers/platform/x86/sepdk/sep/unc_sa.c b/drivers/platform/x86/sepdk/sep/unc_sa.c +new file mode 100755 +index 000000000000..7345807f9588 +--- /dev/null ++++ b/drivers/platform/x86/sepdk/sep/unc_sa.c +@@ -0,0 +1,173 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#include "lwpmudrv_defines.h" ++#include "lwpmudrv_types.h" ++#include "lwpmudrv_ecb.h" ++#include "lwpmudrv_struct.h" ++ ++#include "inc/ecb_iterators.h" ++#include "inc/control.h" ++#include "inc/haswellunc_sa.h" ++#include "inc/utility.h" ++ ++#if 0 ++extern U64 *read_counter_info; ++extern DRV_CONFIG drv_cfg; ++ ++extern VOID SOCPERF_Read_Data3(PVOID data_buffer); ++#endif ++ ++/*! ++ * @fn static VOID hswunc_sa_Initialize(PVOID) ++ * ++ * @brief Initialize any registers or addresses ++ * ++ * @param param ++ * ++ * @return None ++ * ++ * Special Notes: ++ */ ++static VOID hswunc_sa_Initialize(VOID *param) ++{ ++ SEP_DRV_LOG_TRACE_IN("Param: %p.", param); ++ SEP_DRV_LOG_TRACE_OUT("Empty function."); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn hswunc_sa_Read_Counts(param, id) ++ * ++ * @param param The read thread node to process ++ * @param id The id refers to the device index ++ * ++ * @return None No return needed ++ * ++ * @brief Read the Uncore count data and store into the buffer param; ++ * ++ */ ++static VOID hswunc_sa_Trigger_Read(PVOID param, U32 id) ++{ ++#if 0 ++ U64 *data = (U64 *)param; ++ U32 cur_grp; ++ ECB pecb; ++ U32 this_cpu; ++ U32 package_num; ++ ++ SEP_DRV_LOG_TRACE_IN("Param: %p, id: %u.", param, id); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ package_num = core_to_package_map[this_cpu]; ++ cur_grp = LWPMU_DEVICE_cur_group(&devices[id])[package_num]; ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[id])[cur_grp]; ++ ++ // group id ++ data = (U64 *)((S8 *)data + ECB_group_offset(pecb)); ++ SOCPERF_Read_Data3((void*)data); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++#endif ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn hswunc_sa_Read_PMU_Data(param) ++ * ++ * @param param the device index ++ * ++ * @return None No return needed ++ * ++ * @brief Read the Uncore count data and store into the buffer param; ++ * ++ */ ++static VOID hswunc_sa_Read_PMU_Data(PVOID param) ++{ ++#if 0 ++ U32 j; ++ U64 *buffer = read_counter_info; ++ U32 dev_idx; ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ U32 event_index = 0; ++ U64 counter_buffer[HSWUNC_SA_MAX_COUNTERS + 1]; ++ ++ SEP_DRV_LOG_TRACE_IN("Param: %p.", param); ++ ++ dev_idx = *((U32 *)param); ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ ++ // NOTE THAT the read_pmu function on for EMON collection. ++ if (!DRV_CONFIG_emon_mode(drv_cfg)) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!emon_mode)."); ++ return; ++ } ++ if (!CPU_STATE_system_master(pcpu)) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!system_master)."); ++ return; ++ } ++ ++ SOCPERF_Read_Data3((void*)counter_buffer); ++ ++ FOR_EACH_PCI_DATA_REG_RAW(pecb, i, dev_idx) ++ { ++ j = ECB_entries_uncore_buffer_offset_in_system(pecb, i); ++ buffer[j] = counter_buffer[event_index + 1]; ++ event_index++; ++ SEP_DRV_LOG_TRACE("j=%u, value=%llu, cpu=%u", j, buffer[j], ++ this_cpu); ++ } ++ END_FOR_EACH_PCI_DATA_REG_RAW; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++#endif ++} ++ ++/* ++ * Initialize the dispatch table ++ */ ++ ++DISPATCH_NODE hswunc_sa_dispatch = { .init = hswunc_sa_Initialize, ++ .fini = NULL, ++ .write = NULL, ++ .freeze = NULL, ++ .restart = NULL, ++ .read_data = hswunc_sa_Read_PMU_Data, ++ .check_overflow = NULL, ++ .swap_group = NULL, ++ .read_lbrs = NULL, ++ .cleanup = NULL, ++ .hw_errata = NULL, ++ .read_power = NULL, ++ .check_overflow_errata = NULL, ++ .read_counts = NULL, ++ .check_overflow_gp_errata = NULL, ++ .read_ro = NULL, ++ .platform_info = NULL, ++ .trigger_read = hswunc_sa_Trigger_Read, ++ .scan_for_uncore = NULL, ++ .read_metrics = NULL }; +diff --git a/drivers/platform/x86/sepdk/sep/utility.c b/drivers/platform/x86/sepdk/sep/utility.c +new file mode 100755 +index 000000000000..cc4f0cba5e9e +--- /dev/null ++++ b/drivers/platform/x86/sepdk/sep/utility.c +@@ -0,0 +1,1157 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#include "lwpmudrv_defines.h" ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "lwpmudrv_defines.h" ++#include "lwpmudrv_types.h" ++#include "rise_errors.h" ++#include "lwpmudrv_ecb.h" ++#include "lwpmudrv.h" ++#include "control.h" ++#include "core2.h" ++#include "silvermont.h" ++#include "perfver4.h" ++#include "valleyview_sochap.h" ++#include "unc_gt.h" ++#include "haswellunc_sa.h" ++#if defined(BUILD_CHIPSET) ++#include "chap.h" ++#endif ++#include "utility.h" ++#if defined(BUILD_CHIPSET) ++#include "lwpmudrv_chipset.h" ++#include "gmch.h" ++#endif ++ ++#include "control.h" ++ ++//volatile int config_done; ++ ++ ++#if defined(BUILD_CHIPSET) ++extern CHIPSET_CONFIG pma; ++#endif ++ ++VOID UTILITY_down_read_mm(struct mm_struct *mm) ++{ ++ SEP_DRV_LOG_TRACE_IN("Mm: %p.", mm); ++ ++ down_read((struct rw_semaphore *)&mm->mmap_sem); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++VOID UTILITY_up_read_mm(struct mm_struct *mm) ++{ ++ SEP_DRV_LOG_TRACE_IN("Mm: %p.", mm); ++ ++ up_read((struct rw_semaphore *)&mm->mmap_sem); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++// NOT to be instrumented, used inside DRV_LOG! ++VOID UTILITY_Read_TSC(U64 *pTsc) ++{ ++ *pTsc = rdtsc_ordered(); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID UTILITY_Read_Cpuid ++ * ++ * @brief executes the cpuid_function of cpuid and returns values ++ * ++ * @param IN cpuid_function ++ * OUT rax - results of the cpuid instruction in the ++ * OUT rbx - corresponding registers ++ * OUT rcx ++ * OUT rdx ++ * ++ * @return none ++ * ++ * Special Notes: ++ * ++ * ++ */ ++VOID UTILITY_Read_Cpuid(U64 cpuid_function, U64 *rax_value, ++ U64 *rbx_value, U64 *rcx_value, U64 *rdx_value) ++{ ++ U32 function; ++ U32 *eax, *ebx, *ecx, *edx; ++ ++ SEP_DRV_LOG_TRACE_IN( ++ "Fn: %llu, rax_p: %p, rbx_p: %p, rcx_p: %p, rdx_p: %p.", ++ cpuid_function, rax_value, rbx_value, rcx_value, rdx_value); ++ ++#if defined(DRV_SEP_ACRN_ON) ++ if (cpuid_function != 0x40000000) { ++ struct profiling_pcpuid pcpuid; ++ memset(&pcpuid, 0, sizeof(struct profiling_pcpuid)); ++ pcpuid.leaf = (U32)cpuid_function; ++ if (rcx_value != NULL) { ++ pcpuid.subleaf = (U32)*rcx_value; ++ } ++ ++ BUG_ON(!virt_addr_valid(&pcpuid)); ++ ++ acrn_hypercall2(HC_PROFILING_OPS, PROFILING_GET_PCPUID, ++ virt_to_phys(&pcpuid)); ++ ++ if (rax_value != NULL) { ++ *rax_value = pcpuid.eax; ++ } ++ if (rbx_value != NULL) { ++ *rbx_value = pcpuid.ebx; ++ } ++ if (rcx_value != NULL) { ++ *rcx_value = pcpuid.ecx; ++ } ++ if (rdx_value != NULL) { ++ *rdx_value = pcpuid.edx; ++ } ++ return; ++ } ++#endif ++ function = (U32)cpuid_function; ++ eax = (U32 *)rax_value; ++ ebx = (U32 *)rbx_value; ++ ecx = (U32 *)rcx_value; ++ edx = (U32 *)rdx_value; ++ ++ *eax = function; ++ ++ __asm__("cpuid" ++ : "=a"(*eax), "=b"(*ebx), "=c"(*ecx), "=d"(*edx) ++ : "a"(function), "b"(*ebx), "c"(*ecx), "d"(*edx)); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID UTILITY_Configure_CPU ++ * ++ * @brief Reads the CPU information from the hardware ++ * ++ * @param param dispatch_id - The id of the dispatch table. ++ * ++ * @return Pointer to the correct dispatch table for the CPU architecture ++ * ++ * Special Notes: ++ * ++ */ ++DISPATCH UTILITY_Configure_CPU(U32 dispatch_id) ++{ ++ DISPATCH dispatch = NULL; ++ ++ SEP_DRV_LOG_TRACE_IN("Dispatch_id: %u.", dispatch_id); ++ ++ switch (dispatch_id) { ++ case 1: ++ SEP_DRV_LOG_INIT( ++ "Set up the Core(TM)2 processor dispatch table."); ++ dispatch = &core2_dispatch; ++ break; ++ case 6: ++ SEP_DRV_LOG_INIT("Set up the Silvermont dispatch table."); ++ dispatch = &silvermont_dispatch; ++ break; ++ case 7: ++ SEP_DRV_LOG_INIT( ++ "Set up the perfver4 HTON dispatch table such as Skylake."); ++ dispatch = &perfver4_dispatch; ++ break; ++ case 8: ++ SEP_DRV_LOG_INIT( ++ "Set up the perfver4 HTOFF dispatch table such as Skylake."); ++ dispatch = &perfver4_dispatch_htoff_mode; ++ break; ++ case 11: ++ SEP_DRV_LOG_INIT( ++ "Set up the perfver4 NONHT dispatch table such as Icelake."); ++ dispatch = &perfver4_dispatch_nonht_mode; ++ break; ++ case 700: ++ case 701: ++ case 1100: ++ SEP_DRV_LOG_INIT("Set up the Valleyview SA dispatch table."); ++ dispatch = &valleyview_visa_dispatch; ++ break; ++ case 2: ++ SEP_DRV_LOG_INIT( ++ "Set up the Core i7(TM) processor dispatch table."); ++ dispatch = &corei7_dispatch; ++ break; ++ case 3: ++ SEP_DRV_LOG_INIT("Set up the Core i7(TM) dispatch table."); ++ dispatch = &corei7_dispatch_htoff_mode; ++ break; ++ case 4: ++ SEP_DRV_LOG_INIT( ++ "Set up the Sandybridge processor dispatch table."); ++ dispatch = &corei7_dispatch_2; ++ break; ++ case 5: ++ SEP_DRV_LOG_INIT("Set up the Sandybridge dispatch table."); ++ dispatch = &corei7_dispatch_htoff_mode_2; ++ break; ++ case 9: ++ SEP_DRV_LOG_INIT( ++ "Set up the Nehalem, Westemere dispatch table."); ++ dispatch = &corei7_dispatch_nehalem; ++ break; ++ case 10: ++ SEP_DRV_LOG_INIT("Set up the Knights family dispatch table."); ++ dispatch = &knights_dispatch; ++ break; ++ case 100: ++ SEP_DRV_LOG_INIT("Set up the MSR based uncore dispatch table."); ++ dispatch = &unc_msr_dispatch; ++ break; ++ case 110: ++ SEP_DRV_LOG_INIT("Set up the PCI Based Uncore dispatch table."); ++ dispatch = &unc_pci_dispatch; ++ break; ++ case 120: ++ SEP_DRV_LOG_INIT( ++ "Set up the MMIO based uncore dispatch table."); ++ dispatch = &unc_mmio_dispatch; ++ break; ++ case 121: ++ SEP_DRV_LOG_INIT( ++ "Set up the MMIO based uncore dispatch table for FPGA."); ++ dispatch = &unc_mmio_fpga_dispatch; ++ break; ++ case 130: ++ SEP_DRV_LOG_INIT("Set up the Uncore Power dispatch table."); ++ dispatch = &unc_power_dispatch; ++ break; ++ case 230: ++ SEP_DRV_LOG_INIT("Set up the Haswell SA dispatch table."); ++ dispatch = &hswunc_sa_dispatch; ++ break; ++ case 400: ++ SEP_DRV_LOG_INIT("Set up the GT dispatch table."); ++ dispatch = &unc_gt_dispatch; ++ break; ++ default: ++ dispatch = NULL; ++ SEP_DRV_LOG_ERROR( ++ "Architecture not supported (dispatch_id: %d).", ++ dispatch_id); ++ break; ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT("Res: %p.", dispatch); ++ return dispatch; ++} ++ ++U64 SYS_MMIO_Read64(U64 baseAddress, U64 offset) ++{ ++ U64 res = 0; ++#if defined(DRV_EM64T) ++ SEP_DRV_LOG_REGISTER_IN("Will read MMIO *(0x%llx + 0x%llx).", ++ baseAddress, offset); ++ ++ if (baseAddress) { ++ volatile U64 *p = ++ (U64 *)(baseAddress + offset); // offset is in bytes ++ res = *p; ++ } else { ++ SEP_DRV_LOG_ERROR("BaseAddress is NULL!"); ++ res = (U64)-1; // typical value for undefined CSR ++ } ++ ++ SEP_DRV_LOG_REGISTER_OUT("Has read MMIO *(0x%llx + 0x%llx): 0x%llx.", ++ baseAddress, offset, res); ++#endif ++ return res; ++} ++ ++U64 SYS_Read_MSR(U32 msr) ++{ ++ U64 val = 0; ++ ++#if defined(DRV_DEBUG_MSR) ++ int error; ++ SEP_DRV_LOG_REGISTER_IN("Will safely read MSR 0x%x.", msr); ++ error = rdmsrl_safe(msr, &val); ++ if (error) { ++ SEP_DRV_LOG_ERROR("Failed to read MSR 0x%x.", msr); ++ } ++ SEP_DRV_LOG_REGISTER_OUT("Has read MSR 0x%x: 0x%llx (error: %d).", msr, ++ val, error); ++#else ++ SEP_DRV_LOG_REGISTER_IN("Will read MSR 0x%x.", msr); ++ rdmsrl(msr, val); ++ SEP_DRV_LOG_REGISTER_OUT("Has read MSR 0x%x: 0x%llx.", msr, val); ++#endif ++ ++ return val; ++} ++ ++void SYS_Write_MSR(U32 msr, U64 val) ++{ ++#if defined(DRV_DEBUG_MSR) ++ int error; ++ SEP_DRV_LOG_REGISTER_IN("Will safely write MSR 0x%x: 0x%llx.", msr, ++ val); ++ error = wrmsr_safe(msr, (U32)val, (U32)(val >> 32)); ++ if (error) { ++ SEP_DRV_LOG_ERROR("Failed to write MSR 0x%x: 0x%llx.", msr, ++ val); ++ } ++ SEP_DRV_LOG_REGISTER_OUT("Wrote MSR 0x%x: 0x%llx (error: %d).", msr, ++ val, error); ++ ++#else // !DRV_DEBUG_MSR ++ SEP_DRV_LOG_REGISTER_IN("Will write MSR 0x%x: 0x%llx.", msr, val); ++#if defined(DRV_IA32) ++ wrmsr(msr, (U32)val, (U32)(val >> 32)); ++#endif ++#if defined(DRV_EM64T) ++ wrmsrl(msr, val); ++#endif ++ SEP_DRV_LOG_REGISTER_OUT("Wrote MSR 0x%x: 0x%llx.", msr, val); ++ ++#endif // !DRV_DEBUG_MSR ++} ++ ++#if defined(BUILD_CHIPSET) ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID UTILITY_Configure_Chipset ++ * ++ * @brief Configures the chipset information ++ * ++ * @param none ++ * ++ * @return none ++ * ++ * Special Notes: ++ * ++ */ ++CS_DISPATCH UTILITY_Configure_Chipset(void) ++{ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ if (CHIPSET_CONFIG_gmch_chipset(pma)) { ++ cs_dispatch = &gmch_dispatch; ++ SEP_DRV_LOG_INIT("Using GMCH dispatch table."); ++ } else if (CHIPSET_CONFIG_mch_chipset(pma) || ++ CHIPSET_CONFIG_ich_chipset(pma)) { ++ cs_dispatch = &chap_dispatch; ++ SEP_DRV_LOG_INIT("Using CHAP dispatch table."); ++ } else { ++ SEP_DRV_LOG_ERROR("Unable to map chipset dispatch table!"); ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT("Res: %p.", cs_dispatch); ++ return cs_dispatch; ++} ++ ++#endif ++ ++#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 32) ++static unsigned long utility_Compare_Symbol_Names_Return_Value; ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static int utility_Compare_Symbol_Names (void* ref_name, ++ * const char* symbol_name, struct module* dummy, unsigned long symbol_address) ++ * ++ * @brief Comparator for kallsyms_on_each_symbol. ++ * ++ * @param ++ * void * ref_name : Symbol we are looking for ++ * const char * symbol_name : Name of the current symbol being evaluated ++ * struct module* dummy : Pointer to the module structure. Not needed. ++ * unsigned long symbol_address : Address of the current symbol being evaluated ++ * ++ * @return 1 if ref_name matches symbol_name, 0 otherwise. ++ * Fills utility_Compare_Symbol_Names_Return_Value with the symbol's address ++ * on success. ++ * ++ * Special Notes: ++ * Only used as a callback comparator for kallsyms_on_each_symbol. ++ */ ++static int utility_Compare_Symbol_Names(void *ref_name, const char *symbol_name, ++ struct module *dummy, ++ unsigned long symbol_address) ++{ ++ int res = 0; ++ ++ SEP_DRV_LOG_TRACE_IN( ++ "Ref_name: %p, symbol_name: %p, dummy: %p, symbol_address: %u.", ++ ref_name, symbol_name, dummy, symbol_address); ++ ++ if (strcmp((char *)ref_name, symbol_name) == 0) { ++ utility_Compare_Symbol_Names_Return_Value = symbol_address; ++ res = 1; ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT("Res: %u.", res); ++ return res; ++} ++#endif ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn extern unsigned long UTILITY_Find_Symbol (const char* name) ++ * ++ * @brief Finds the address of the specified kernel symbol. ++ * ++ * @param const char* name - name of the symbol to look for ++ * ++ * @return Symbol address (0 if could not find) ++ * ++ * Special Notes: ++ * This wrapper is needed due to kallsyms_lookup_name not being exported ++ * in kernel version 2.6.32.*. ++ * Careful! This code is *NOT* multithread-safe or reentrant! Should only ++ * be called from 1 context at a time! ++ */ ++unsigned long UTILITY_Find_Symbol(const char *name) ++{ ++ unsigned long res = 0; ++ ++ SEP_DRV_LOG_TRACE_IN("Name: %p.", name); ++ // Not printing the name to follow the log convention: *must not* ++ // dereference any pointer in an 'IN' message ++ ++#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 32) ++ if (kallsyms_on_each_symbol(utility_Compare_Symbol_Names, ++ (void *)name)) { ++ res = utility_Compare_Symbol_Names_Return_Value; ++ } ++#else ++ res = kallsyms_lookup_name(name); ++#endif ++ ++ SEP_DRV_LOG_INIT("Name: '%s': 0x%llx.", name ? name : "NULL", ++ (unsigned long long)res); ++ // Printing here instead. (Paranoia in case of corrupt pointer.) ++ ++ SEP_DRV_LOG_TRACE_OUT("Res: 0x%llx.", (unsigned long long)res); ++ return res; ++} ++ ++/* ++ ************************************ ++ * DRIVER LOG BUFFER DECLARATIONS * ++ ************************************ ++ */ ++ ++volatile U8 active_ioctl; ++ ++DRV_LOG_BUFFER driver_log_buffer; ++ ++static const char *drv_log_categories[DRV_NB_LOG_CATEGORIES] = { ++ "load", "init", "detection", "error", "state change", ++ "mark", "debug", "flow", "alloc", "interrupt", ++ "trace", "register", "notification", "warning" ++}; ++ ++#define DRV_LOG_NB_DRIVER_STATES 9 ++static const char *drv_log_states[DRV_LOG_NB_DRIVER_STATES] = { ++ "Uninitialized", "Reserved", "Idle", "Paused", "Stopped", ++ "Running", "Pausing", "Prepare_Stop", "Terminating" ++}; ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static VOID utility_Driver_Log_Kprint_Helper ++ * (U8 category, char** category_string, ++ * U8 secondary, char** secondary_string_1, ++ * char** secondary_string_2, char** secondary_string_3, ++ * char** secondary_string_4) ++ * ++ * @brief Helper function for printing log messages to the system log. ++ * ++ * @param IN category - message category ++ * IN/OUT category_string - location where to place a pointer ++ * to the category's name ++ * IN secondary - secondary field value for the message ++ * IN/OUT secondary_string_1 - location where to place a pointer to ++ * the 1st part of the secondary info's decoded information ++ * IN/OUT secondary_string_2 - location where to place a pointer to ++ * the 2nd part of the secondary info's decoded information ++ * IN/OUT secondary_string_3 - location where to place a pointer to ++ * the 3rd part of the secondary info's decoded information ++ * IN/OUT secondary_string_4 - location where to place a pointer to ++ * the 4th part of the secondary info's decoded information ++ * ++ * @return none ++ * ++ * Special Notes: ++ * Allows a single format string to be used for all categories (instead of ++ * category-specific format strings) when calling printk, simplifying the ++ * print routine and reducing potential errors. There is a performance cost to ++ * this approach (forcing printk to process empty strings), but it ++ * should be dwarved by the cost of calling printk in the first place. ++ * NB: none of the input string pointers may be NULL! ++ */ ++static VOID utility_Driver_Log_Kprint_Helper( ++ U8 category, char **category_string, U8 secondary, ++ char **secondary_string_1, char **secondary_string_2, ++ char **secondary_string_3, char **secondary_string_4) ++{ ++ if (category >= DRV_NB_LOG_CATEGORIES) { ++ *category_string = "Unknown category"; ++ } else { ++ *category_string = (char *)drv_log_categories[category]; ++ } ++ ++ *secondary_string_1 = ""; ++ *secondary_string_2 = ""; ++ *secondary_string_3 = ""; ++ *secondary_string_4 = ""; ++ ++ switch (category) { ++ case DRV_LOG_CATEGORY_FLOW: ++ case DRV_LOG_CATEGORY_TRACE: ++ case DRV_LOG_CATEGORY_INTERRUPT: ++ // we should *never* be kprinting from an interrupt context... ++ if (secondary != DRV_LOG_NOTHING) { ++ *secondary_string_1 = ", "; ++ if (secondary == DRV_LOG_FLOW_IN) { ++ *secondary_string_2 = "Entering"; ++ } else if (secondary == DRV_LOG_FLOW_OUT) { ++ *secondary_string_2 = "Leaving"; ++ } ++ } ++ break; ++ case DRV_LOG_CATEGORY_STATE_CHANGE: { ++ U8 orig_state, dest_state; ++ ++ orig_state = (secondary & 0xF0) >> 4; ++ dest_state = secondary & 0x0F; ++ ++ *secondary_string_1 = ", "; ++ ++ if (orig_state < DRV_LOG_NB_DRIVER_STATES) { ++ *secondary_string_2 = ++ (char *)drv_log_states[orig_state]; ++ } else { ++ *secondary_string_2 = "Unknown_state"; ++ } ++ ++ *secondary_string_3 = " -> "; ++ ++ if (dest_state < DRV_LOG_NB_DRIVER_STATES) { ++ *secondary_string_4 = ++ (char *)drv_log_states[dest_state]; ++ } else { ++ *secondary_string_4 = "Unknown_state"; ++ } ++ } break; ++ ++ default: ++ break; ++ } ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static inline VOID utility_Log_Write ( ++ * U8 destination, U8 category, U8 secondary, ++ * const char* function_name, U32 func_name_len, ++ * U32 line_number, U64 tsc, U8 ioctl, U16 processor_id, ++ * U8 driver_state, U16 nb_active_interrupts, ++ * U16 nb_active_notifications, ++ * const char* format_string, ...) ++ * ++ * @brief Checks whether and where the message should be logged, and logs ++ * it as appropriate. ++ * ++ * @param ++ * U8 destination - whether to write to the primary (0) ++ * or the auxiliary log buffer (1) ++ * U8 category - message category ++ * U8 secondary - secondary information field for the message ++ * const char* function_name - name of the calling function ++ * U32 func_name_len - length of the name of the calling function ++ * (more efficient to pass it as parameter than finding it back at runtime) ++ * U32 line_number - line number of the call site ++ * U64 tsc - time stamp value to use ++ * U8 ioctl - current active ioctl ++ * U16 processor_id - id of the active core/thread ++ * U8 driver_state - current driver state ++ * U16 nb_active_interrupts - number of interrupts currently being ++ * processed ++ * U16 nb_active_notifications - number of notifications currently being ++ * processed ++ * const char* format_string - classical format string for printf-like funcs ++ * ... - elements to print ++ * ++ * @return none ++ * ++ * Special Notes: ++ * Writes the specified message to the specified log buffer. ++ * The order of writes (integrity tag at the beginning, overflow tag at ++ * the very end) matters to ensure the logged information can be detected ++ * to be only partially written if applicable). Much of the needed information ++ * (active core, driver state, tsc..) is passed through the stack (instead of ++ * obtained inside utility_Log_Write) to guarantee entries representing the ++ * same message (or log call) in different channels use consistent information, ++ * letting the decoder reliably identify duplicates. ++ */ ++static inline VOID utility_Log_Write(U8 destination, U8 category, U8 secondary, ++ const char *function_name, ++ U32 function_name_length, U32 line_number, ++ U64 tsc, U8 ioctl, U16 processor_id, ++ U8 driver_state, U16 nb_active_interrupts, ++ U16 nb_active_notifications, ++ const char *format_string, va_list args) ++{ ++ U32 entry_id; ++ U16 overflow_tag; ++ DRV_LOG_ENTRY entry; ++ char *target_func_buffer; ++ U32 local_func_name_length; ++ U32 i; ++ ++ if (destination == 0) { // primary buffer ++ entry_id = __sync_add_and_fetch( ++ &DRV_LOG_BUFFER_pri_entry_index(DRV_LOG()), 1); ++ overflow_tag = (U16)(entry_id / DRV_LOG_MAX_NB_PRI_ENTRIES); ++ entry = DRV_LOG_BUFFER_entries(DRV_LOG()) + ++ entry_id % DRV_LOG_MAX_NB_PRI_ENTRIES; ++ } else { ++ entry_id = __sync_add_and_fetch( ++ &DRV_LOG_BUFFER_aux_entry_index(DRV_LOG()), 1); ++ overflow_tag = (U16)(entry_id / DRV_LOG_MAX_NB_AUX_ENTRIES); ++ entry = DRV_LOG_BUFFER_entries(DRV_LOG()) + ++ DRV_LOG_MAX_NB_PRI_ENTRIES + ++ entry_id % DRV_LOG_MAX_NB_AUX_ENTRIES; ++ } ++ ++ DRV_LOG_COMPILER_MEM_BARRIER(); ++ DRV_LOG_ENTRY_integrity_tag(entry) = overflow_tag; ++ DRV_LOG_COMPILER_MEM_BARRIER(); ++ ++ if (format_string && ++ *format_string) { // setting this one first to try to increase MLP ++ DRV_VSNPRINTF(DRV_LOG_ENTRY_message(entry), ++ DRV_LOG_MESSAGE_LENGTH, DRV_LOG_MESSAGE_LENGTH, ++ format_string, args); ++ } else { ++ DRV_LOG_ENTRY_message(entry)[0] = 0; ++ } ++ ++ target_func_buffer = DRV_LOG_ENTRY_function_name(entry); ++ local_func_name_length = ++ function_name_length < DRV_LOG_FUNCTION_NAME_LENGTH ? ++ function_name_length : ++ DRV_LOG_FUNCTION_NAME_LENGTH; ++ for (i = 0; i < local_func_name_length - 1; i++) { ++ target_func_buffer[i] = function_name[i]; ++ } ++ target_func_buffer[i] = 0; ++ ++ DRV_LOG_ENTRY_category(entry) = category; ++ DRV_LOG_ENTRY_secondary_info(entry) = secondary; ++ DRV_LOG_ENTRY_line_number(entry) = line_number; ++ DRV_LOG_ENTRY_active_drv_operation(entry) = ioctl; ++ DRV_LOG_ENTRY_processor_id(entry) = processor_id; ++ DRV_LOG_ENTRY_driver_state(entry) = driver_state; ++ DRV_LOG_ENTRY_nb_active_interrupts(entry) = nb_active_interrupts; ++ DRV_LOG_ENTRY_nb_active_notifications(entry) = nb_active_notifications; ++ DRV_LOG_ENTRY_tsc(entry) = tsc; ++ ++ DRV_LOG_COMPILER_MEM_BARRIER(); ++ DRV_LOG_ENTRY_temporal_tag(entry) = overflow_tag; ++ DRV_LOG_COMPILER_MEM_BARRIER(); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn extern void UTILITY_Log (U8 category, U8 in_notification, U8 secondary, ++ * const char* function_name, U32 func_name_len, ++ * U32 line_number, const char* format_string, ...) ++ * ++ * @brief Checks whether and where the message should be logged, ++ * and logs it as appropriate. ++ * ++ * @param ++ * U8 category - message category ++ * U8 in_notification - whether or not we are in a notification/OS ++ * callback context (this information cannot be reliably obtained without ++ * passing it through the stack) ++ * U8 secondary - secondary information field for the message ++ * const char* function_name - name of the calling function ++ * U32 func_name_len - length of the name of the calling function ++ * (more efficient to pass it as parameter than finding it back at runtime) ++ * U32 line_number - line number of the call site ++ * const char* format_string - classical format string for printf-like ++ * ... functions elements to print ++ * ++ * @return none ++ * ++ * Special Notes: ++ * Takes a snapshot of various elements (TSC, driver state, etc.) to ensure ++ * a single log call writes consistent information to all applicable channels ++ * (i.e. favoring consistency over instantaneous accuracy). ++ * See utility_Log_Write for details. ++ */ ++VOID UTILITY_Log(U8 category, U8 in_notification, U8 secondary, ++ const char *function_name, U32 func_name_len, ++ U32 line_number, const char *format_string, ...) ++{ ++ U64 tsc_snapshot; ++ U8 ioctl_snapshot; ++ U8 driver_state_snapshot; ++ U16 processor_id_snapshot; ++ U16 nb_active_interrupts_snapshot; ++ U16 nb_active_notifications_snapshot; ++ U8 category_verbosity; ++ U8 in_interrupt; ++ U8 is_enabled; ++ va_list args; ++ U32 i; ++ ++ category_verbosity = DRV_LOG_VERBOSITY(category); ++ processor_id_snapshot = raw_smp_processor_id(); ++ in_interrupt = ((pcb && atomic_read(&CPU_STATE_in_interrupt( ++ &pcb[processor_id_snapshot]))) + ++ (category == DRV_LOG_CATEGORY_INTERRUPT)); ++ is_enabled = ++ in_interrupt * !!(category_verbosity & LOG_CONTEXT_INTERRUPT) + ++ in_notification * ++ !!(category_verbosity & LOG_CONTEXT_NOTIFICATION) + ++ (!in_interrupt * !in_notification) * ++ !!(category_verbosity & LOG_CONTEXT_REGULAR); ++ ++ if (!is_enabled) { ++ return; ++ } ++ ++ ioctl_snapshot = active_ioctl; ++ driver_state_snapshot = GET_DRIVER_STATE(); ++ nb_active_interrupts_snapshot = ++ DRV_LOG_BUFFER_nb_active_interrupts(DRV_LOG()); ++ nb_active_notifications_snapshot = ++ DRV_LOG_BUFFER_nb_active_notifications(DRV_LOG()); ++ UTILITY_Read_TSC(&tsc_snapshot); ++ ++ va_start(args, format_string); ++ ++ for (i = 0; i < 2; i++) { ++ if (category_verbosity & (1 << i)) { ++ va_list args_copy; ++ ++ va_copy(args_copy, args); ++ utility_Log_Write( ++ i, category, secondary, function_name, ++ func_name_len, line_number, ++ tsc_snapshot, ioctl_snapshot, ++ processor_id_snapshot, ++ driver_state_snapshot, ++ nb_active_interrupts_snapshot, ++ nb_active_notifications_snapshot, ++ format_string, args_copy); ++ va_end(args_copy); ++ } ++ } ++ if (category_verbosity & LOG_CHANNEL_PRINTK || ++ category_verbosity & LOG_CHANNEL_TRACEK) { ++#define DRV_LOG_DEBUG_ARRAY_SIZE 512 ++ char tmp_array[DRV_LOG_DEBUG_ARRAY_SIZE]; ++ U32 nb_written_characters; ++ char *category_s, *sec1_s, *sec2_s, *sec3_s, *sec4_s; ++ va_list args_copy; ++ ++ utility_Driver_Log_Kprint_Helper(category, &category_s, ++ secondary, &sec1_s, ++ &sec2_s, &sec3_s, ++ &sec4_s); ++ ++ nb_written_characters = DRV_SNPRINTF( ++ tmp_array, DRV_LOG_DEBUG_ARRAY_SIZE - 1, ++ DRV_LOG_DEBUG_ARRAY_SIZE - 1, ++ SEP_MSG_PREFIX " [%s%s%s%s%s] [%s@%d]: ", ++ category_s, sec1_s, sec2_s, sec3_s, sec4_s, ++ function_name, line_number); ++ ++ if (nb_written_characters > 0) { ++ va_copy(args_copy, args); ++ nb_written_characters += DRV_VSNPRINTF( ++ tmp_array + nb_written_characters, ++ DRV_LOG_DEBUG_ARRAY_SIZE - ++ nb_written_characters - 1, ++ DRV_LOG_DEBUG_ARRAY_SIZE - ++ nb_written_characters - 1, ++ format_string, args_copy); ++ va_end(args_copy); ++#undef DRV_LOG_DEBUG_ARRAY_SIZE ++ ++ tmp_array[nb_written_characters++] = '\n'; ++ tmp_array[nb_written_characters++] = '\0'; ++ ++ if ((category_verbosity & LOG_CHANNEL_PRINTK) * ++ !in_interrupt * !in_notification) { ++ if (!in_atomic()) { ++ switch (category) { ++ case DRV_LOG_CATEGORY_ERROR: ++ pr_err("%s", tmp_array); ++ break; ++ case DRV_LOG_CATEGORY_WARNING: ++ pr_debug("%s", tmp_array); ++ break; ++ default: ++ pr_info("%s", tmp_array); ++ break; ++ } ++ } ++ } ++ ++ if (category_verbosity & LOG_CHANNEL_TRACEK) { ++ trace_printk("%s", tmp_array); ++ } ++ } ++ } ++ ++ va_end(args); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn extern DRV_STATUS UTILITY_Driver_Log_Init (void) ++ * ++ * @brief Allocates and initializes the driver log buffer. ++ * ++ * @param none ++ * ++ * @return OS_SUCCESS on success, OS_NO_MEM on error. ++ * ++ * Special Notes: ++ * Should be (successfully) run before any non-LOAD log calls. ++ * Allocates memory without going through CONTROL_Allocate (to avoid ++ * complicating the instrumentation of CONTROL_* functions): calling ++ * UTILITY_Driver_Log_Free is necessary to free the log structure. ++ * Falls back to vmalloc when contiguous physical memory cannot be ++ * allocated. This does not impact runtime behavior, but may impact ++ * the easiness of retrieving the log from a core dump if the system ++ * crashes. ++ */ ++DRV_STATUS UTILITY_Driver_Log_Init(void) ++{ ++ struct timespec cur_time; ++ U32 size = sizeof(*driver_log_buffer); ++ U8 using_contiguous_physical_memory; ++ U32 bitness; ++ ++ if (size < MAX_KMALLOC_SIZE) { ++ // allocating outside regular func to restrict area of driver ++ driver_log_buffer = (PVOID)kmalloc( ++ size, ++ GFP_KERNEL); // where the log might not be initialized ++ } else { ++ driver_log_buffer = ++ (PVOID)__get_free_pages(GFP_KERNEL, get_order(size)); ++ } ++ ++ if (driver_log_buffer) { ++ using_contiguous_physical_memory = 1; ++ } else { ++ driver_log_buffer = vmalloc(size); ++ ++ if (!driver_log_buffer) { ++ return OS_NO_MEM; ++ } ++ ++ using_contiguous_physical_memory = 0; ++ } ++ ++ memset(driver_log_buffer, DRV_LOG_FILLER_BYTE, ++ sizeof(*driver_log_buffer)); ++ // we don't want zero-filled pages ++ // (so that the buffer's pages don't get omitted in some crash dumps) ++ ++ DRV_LOG_COMPILER_MEM_BARRIER(); ++ DRV_LOG_BUFFER_header_signature(driver_log_buffer)[0] = ++ DRV_LOG_SIGNATURE_0; ++ DRV_LOG_BUFFER_footer_signature(driver_log_buffer)[0] = ++ DRV_LOG_SIGNATURE_6; ++ DRV_LOG_BUFFER_header_signature(driver_log_buffer)[3] = ++ DRV_LOG_SIGNATURE_3; ++ DRV_LOG_BUFFER_footer_signature(driver_log_buffer)[3] = ++ DRV_LOG_SIGNATURE_3; ++ ++ DRV_LOG_COMPILER_MEM_BARRIER(); ++ DRV_LOG_BUFFER_header_signature(driver_log_buffer)[2] = ++ DRV_LOG_SIGNATURE_2; ++ DRV_LOG_BUFFER_footer_signature(driver_log_buffer)[2] = ++ DRV_LOG_SIGNATURE_4; ++ DRV_LOG_BUFFER_header_signature(driver_log_buffer)[1] = ++ DRV_LOG_SIGNATURE_1; ++ DRV_LOG_BUFFER_footer_signature(driver_log_buffer)[1] = ++ DRV_LOG_SIGNATURE_5; ++ ++ DRV_LOG_COMPILER_MEM_BARRIER(); ++ DRV_LOG_BUFFER_header_signature(driver_log_buffer)[7] = ++ DRV_LOG_SIGNATURE_7; ++ DRV_LOG_BUFFER_footer_signature(driver_log_buffer)[7] = ++ DRV_LOG_SIGNATURE_7; ++ DRV_LOG_BUFFER_header_signature(driver_log_buffer)[5] = ++ DRV_LOG_SIGNATURE_5; ++ DRV_LOG_BUFFER_footer_signature(driver_log_buffer)[5] = ++ DRV_LOG_SIGNATURE_1; ++ ++ DRV_LOG_COMPILER_MEM_BARRIER(); ++ DRV_LOG_BUFFER_header_signature(driver_log_buffer)[6] = ++ DRV_LOG_SIGNATURE_6; ++ DRV_LOG_BUFFER_footer_signature(driver_log_buffer)[6] = ++ DRV_LOG_SIGNATURE_0; ++ DRV_LOG_BUFFER_header_signature(driver_log_buffer)[4] = ++ DRV_LOG_SIGNATURE_4; ++ DRV_LOG_BUFFER_footer_signature(driver_log_buffer)[4] = ++ DRV_LOG_SIGNATURE_2; ++ ++ DRV_LOG_BUFFER_log_size(driver_log_buffer) = sizeof(*driver_log_buffer); ++ DRV_LOG_BUFFER_max_nb_pri_entries(driver_log_buffer) = ++ DRV_LOG_MAX_NB_PRI_ENTRIES; ++ DRV_LOG_BUFFER_max_nb_aux_entries(driver_log_buffer) = ++ DRV_LOG_MAX_NB_AUX_ENTRIES; ++ getnstimeofday(&cur_time); ++ DRV_LOG_BUFFER_init_time(driver_log_buffer) = cur_time.tv_sec; ++ DRV_LOG_BUFFER_disambiguator(driver_log_buffer) = 0; ++ DRV_LOG_BUFFER_log_version(driver_log_buffer) = DRV_LOG_VERSION; ++ DRV_LOG_BUFFER_pri_entry_index(driver_log_buffer) = (U32)((S32)-1); ++ DRV_LOG_BUFFER_aux_entry_index(driver_log_buffer) = (U32)((S32)-1); ++ ++#if defined(DRV_EM64T) ++ bitness = 64; ++#else ++ bitness = 32; ++#endif ++ ++ DRV_SNPRINTF(DRV_LOG_BUFFER_driver_version(driver_log_buffer), ++ DRV_LOG_DRIVER_VERSION_SIZE, DRV_LOG_DRIVER_VERSION_SIZE, ++ "[%u-bit Linux] SEP v%d.%d (update %d). API %d.", bitness, ++ SEP_MAJOR_VERSION, SEP_MINOR_VERSION, SEP_UPDATE_VERSION, ++ SEP_API_VERSION); ++ ++ DRV_LOG_BUFFER_driver_state(driver_log_buffer) = GET_DRIVER_STATE(); ++ DRV_LOG_BUFFER_active_drv_operation(driver_log_buffer) = active_ioctl; ++ DRV_LOG_BUFFER_nb_drv_operations(driver_log_buffer) = 0; ++ DRV_LOG_BUFFER_nb_interrupts(driver_log_buffer) = 0; ++ DRV_LOG_BUFFER_nb_active_interrupts(driver_log_buffer) = 0; ++ DRV_LOG_BUFFER_nb_notifications(driver_log_buffer) = 0; ++ DRV_LOG_BUFFER_nb_active_notifications(driver_log_buffer) = 0; ++ DRV_LOG_BUFFER_nb_driver_state_transitions(driver_log_buffer) = 0; ++ ++ DRV_LOG_VERBOSITY(DRV_LOG_CATEGORY_LOAD) = ++ DRV_LOG_DEFAULT_LOAD_VERBOSITY; ++ DRV_LOG_VERBOSITY(DRV_LOG_CATEGORY_INIT) = ++ DRV_LOG_DEFAULT_INIT_VERBOSITY; ++ DRV_LOG_VERBOSITY(DRV_LOG_CATEGORY_DETECTION) = ++ DRV_LOG_DEFAULT_DETECTION_VERBOSITY; ++ DRV_LOG_VERBOSITY(DRV_LOG_CATEGORY_ERROR) = ++ DRV_LOG_DEFAULT_ERROR_VERBOSITY; ++ DRV_LOG_VERBOSITY(DRV_LOG_CATEGORY_STATE_CHANGE) = ++ DRV_LOG_DEFAULT_STATE_CHANGE_VERBOSITY; ++ DRV_LOG_VERBOSITY(DRV_LOG_CATEGORY_MARK) = ++ DRV_LOG_DEFAULT_MARK_VERBOSITY; ++ DRV_LOG_VERBOSITY(DRV_LOG_CATEGORY_DEBUG) = ++ DRV_LOG_DEFAULT_DEBUG_VERBOSITY; ++ DRV_LOG_VERBOSITY(DRV_LOG_CATEGORY_FLOW) = ++ DRV_LOG_DEFAULT_FLOW_VERBOSITY; ++ DRV_LOG_VERBOSITY(DRV_LOG_CATEGORY_ALLOC) = ++ DRV_LOG_DEFAULT_ALLOC_VERBOSITY; ++ DRV_LOG_VERBOSITY(DRV_LOG_CATEGORY_INTERRUPT) = ++ DRV_LOG_DEFAULT_INTERRUPT_VERBOSITY; ++ DRV_LOG_VERBOSITY(DRV_LOG_CATEGORY_TRACE) = ++ DRV_LOG_DEFAULT_TRACE_VERBOSITY; ++ DRV_LOG_VERBOSITY(DRV_LOG_CATEGORY_REGISTER) = ++ DRV_LOG_DEFAULT_REGISTER_VERBOSITY; ++ DRV_LOG_VERBOSITY(DRV_LOG_CATEGORY_NOTIFICATION) = ++ DRV_LOG_DEFAULT_NOTIFICATION_VERBOSITY; ++ DRV_LOG_VERBOSITY(DRV_LOG_CATEGORY_WARNING) = ++ DRV_LOG_DEFAULT_WARNING_VERBOSITY; ++ ++ DRV_LOG_BUFFER_contiguous_physical_memory(driver_log_buffer) = ++ using_contiguous_physical_memory; ++ ++ SEP_DRV_LOG_LOAD( ++ "Initialized driver log using %scontiguous physical memory.", ++ DRV_LOG_BUFFER_contiguous_physical_memory(driver_log_buffer) ? ++ "" : ++ "non-"); ++ ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn extern DRV_STATUS UTILITY_Driver_Log_Free (void) ++ * ++ * @brief Frees the driver log buffer. ++ * ++ * @param none ++ * ++ * @return OS_SUCCESS on success, OS_NO_MEM on error. ++ * ++ * Special Notes: ++ * Should be done before unloading the driver. ++ * See UTILITY_Driver_Log_Init for details. ++ */ ++void UTILITY_Driver_Log_Free(void) ++{ ++ U32 size = sizeof(*driver_log_buffer); ++ ++ if (driver_log_buffer) { ++ if (DRV_LOG_BUFFER_contiguous_physical_memory( ++ driver_log_buffer)) { ++ if (size < MAX_KMALLOC_SIZE) { ++ kfree(driver_log_buffer); ++ } else { ++ free_pages((unsigned long)driver_log_buffer, ++ get_order(size)); ++ } ++ } else { ++ vfree(driver_log_buffer); ++ } ++ ++ driver_log_buffer = NULL; ++ } ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn extern void UTILITY_Driver_Set_Active_Ioctl (U32 ioctl) ++ * ++ * @brief Sets the 'active_ioctl' global to the specified value. ++ * ++ * @param U32 ioctl - ioctl/drvop code to use ++ * ++ * @return none ++ * ++ * Special Notes: ++ * Used to keep track of the IOCTL operation currently being processed. ++ * This information is saved in the log buffer (globally), as well as ++ * in every log entry. ++ * NB: only IOCTLs for which grabbing the ioctl mutex is necessary ++ * should be kept track of this way. ++ */ ++void UTILITY_Driver_Set_Active_Ioctl(U32 ioctl) ++{ ++ active_ioctl = ioctl; ++ if (ioctl) { ++ DRV_LOG_BUFFER_nb_drv_operations(driver_log_buffer)++; ++ } ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn extern const char** UTILITY_Log_Category_Strings (void) ++ * ++ * @brief Accessor function for the log category string array ++ * ++ * @param none ++ * ++ * @return none ++ * ++ * Special Notes: ++ * Only needed for cosmetic purposes when adjusting category verbosities. ++ */ ++const char **UTILITY_Log_Category_Strings(void) ++{ ++ return drv_log_categories; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn extern U32 UTILITY_Change_Driver_State (U32 allowed_prior_states, ++ * U32 state, const char* func, U32 line_number) ++ * ++ * @brief Updates the driver state (if the transition is legal). ++ * ++ * @param U32 allowed_prior_states - the bitmask representing the states ++ * from which the transition is allowed to occur ++ * U32 state - the destination state ++ * const char* func - the callsite's function's name ++ * U32 line_number - the callsite's line number ++ * ++ * @return 1 in case of success, 0 otherwise ++ * ++ * Special Notes: ++ * ++ */ ++U32 UTILITY_Change_Driver_State(U32 allowed_prior_states, U32 state, ++ const char *func, U32 line_number) ++{ ++ U32 res = 1; ++ U32 previous_state; ++ U32 current_state = GET_DRIVER_STATE(); ++ U32 nb_attempts = 0; ++ ++ SEP_DRV_LOG_TRACE_IN( ++ "Prior states: 0x%x, state: %u, func: %p, line: %u.", ++ allowed_prior_states, state, func, line_number); ++ ++ if (state >= DRV_LOG_NB_DRIVER_STATES) { ++ SEP_DRV_LOG_ERROR("Illegal destination state %d (%s@%u)!", ++ state, func, line_number); ++ res = 0; ++ goto clean_return; ++ } ++ ++ do { ++ previous_state = current_state; ++ nb_attempts++; ++ SEP_DRV_LOG_TRACE("Attempt #%d to transition to state %s.", ++ nb_attempts, drv_log_states[state]); ++ ++ if (DRIVER_STATE_IN(current_state, allowed_prior_states)) { ++ current_state = cmpxchg(&GET_DRIVER_STATE(), ++ previous_state, state); ++ } else { ++ SEP_DRV_LOG_ERROR( ++ "Invalid transition [%s -> %s] (%s@%u)!", ++ drv_log_states[previous_state], ++ drv_log_states[state], func, line_number); ++ res = 0; ++ goto clean_return; ++ } ++ ++ } while (previous_state != current_state); ++ ++ SEP_DRV_LOG_STATE_TRANSITION(previous_state, state, "From %s@%u.", func, ++ line_number); ++ ++clean_return: ++ SEP_DRV_LOG_TRACE_OUT("Res: %u.", res); ++ return res; ++} +diff --git a/drivers/platform/x86/sepdk/sep/valleyview_sochap.c b/drivers/platform/x86/sepdk/sep/valleyview_sochap.c +new file mode 100755 +index 000000000000..7e1e5eb9c65f +--- /dev/null ++++ b/drivers/platform/x86/sepdk/sep/valleyview_sochap.c +@@ -0,0 +1,301 @@ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ ++ ++#include "lwpmudrv_defines.h" ++#include "lwpmudrv_types.h" ++#include "lwpmudrv_ecb.h" ++#include "lwpmudrv_struct.h" ++ ++#include "inc/ecb_iterators.h" ++#include "inc/control.h" ++#include "inc/utility.h" ++#include "inc/valleyview_sochap.h" ++ ++static U64 *uncore_current_data; ++static U64 *uncore_to_read_data; ++extern DRV_CONFIG drv_cfg; ++ ++#if 0 ++extern U64 *read_counter_info; ++extern VOID SOCPERF_Read_Data3(PVOID data_buffer); ++#endif ++ ++/*! ++ * @fn static VOID valleyview_VISA_Initialize(PVOID) ++ * ++ * @brief Initialize any registers or addresses ++ * ++ * @param param ++ * ++ * @return None ++ * ++ * Special Notes: ++ */ ++static VOID valleyview_VISA_Initialize(VOID *param) ++{ ++ SEP_DRV_LOG_TRACE_IN("Param: %p.", param); ++ ++ // Allocate memory for reading GMCH counter values + the group id ++ if (!uncore_current_data) { ++ uncore_current_data = CONTROL_Allocate_Memory( ++ (VLV_CHAP_MAX_COUNTERS + 1) * sizeof(U64)); ++ if (!uncore_current_data) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT( ++ "Early exit (uncore_current_data is NULL!)."); ++ return; ++ } ++ } ++ if (!uncore_to_read_data) { ++ uncore_to_read_data = CONTROL_Allocate_Memory( ++ (VLV_CHAP_MAX_COUNTERS + 1) * sizeof(U64)); ++ if (!uncore_to_read_data) { ++ SEP_DRV_LOG_ERROR_TRACE_OUT( ++ "Early exit (uncore_to_read_data is NULL!)."); ++ return; ++ } ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/*! ++ * @fn static VOID valleyview_VISA_Enable_PMU(PVOID) ++ * ++ * @brief Start counting ++ * ++ * @param param - device index ++ * ++ * @return None ++ * ++ * Special Notes: ++ */ ++static VOID valleyview_VISA_Enable_PMU(PVOID param) ++{ ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ ++ SEP_DRV_LOG_TRACE_IN("Param: %p.", param); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ ++ if (!CPU_STATE_system_master(pcpu)) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!system_master)."); ++ return; ++ } ++ ++ SEP_DRV_LOG_TRACE("Starting the counters..."); ++ if (uncore_current_data) { ++ memset(uncore_current_data, 0, ++ (VLV_CHAP_MAX_COUNTERS + 1) * sizeof(U64)); ++ } ++ if (uncore_to_read_data) { ++ memset(uncore_to_read_data, 0, ++ (VLV_CHAP_MAX_COUNTERS + 1) * sizeof(U64)); ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/*! ++ * @fn static VOID valleyview_VISA_Disable_PMU(PVOID) ++ * ++ * @brief Unmap the virtual address when sampling/driver stops ++ * ++ * @param param - device index ++ * ++ * @return None ++ * ++ * Special Notes: ++ */ ++static VOID valleyview_VISA_Disable_PMU(PVOID param) ++{ ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ U32 cur_driver_state; ++ ++ SEP_DRV_LOG_TRACE_IN("Param: %p.", param); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ cur_driver_state = GET_DRIVER_STATE(); ++ ++ if (!CPU_STATE_system_master(pcpu)) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!system_master)."); ++ return; ++ } ++ SEP_DRV_LOG_TRACE("Stopping the counters..."); ++ if (cur_driver_state == DRV_STATE_PREPARE_STOP || ++ cur_driver_state == DRV_STATE_STOPPED || ++ cur_driver_state == DRV_STATE_TERMINATING) { ++ uncore_current_data = CONTROL_Free_Memory(uncore_current_data); ++ uncore_to_read_data = CONTROL_Free_Memory(uncore_to_read_data); ++ } ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++} ++ ++/*! ++ * @fn static VOID valleyview_VISA_Clean_Up(PVOID) ++ * ++ * @brief Reset any registers or addresses ++ * ++ * @param param ++ * ++ * @return None ++ * ++ * Special Notes: ++ */ ++static VOID valleyview_VISA_Clean_Up(VOID *param) ++{ ++ SEP_DRV_LOG_TRACE_IN("Param: %p.", param); ++ SEP_DRV_LOG_TRACE_OUT("Empty function."); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn valleyview_VISA_Read_PMU_Data(param) ++ * ++ * @param param The device index ++ * ++ * @return None No return needed ++ * ++ * @brief Read the Uncore count data and store into the buffer param; ++ * ++ */ ++static VOID valleyview_VISA_Read_PMU_Data(PVOID param) ++{ ++#if 0 ++ U32 j; ++ U64 *buffer = read_counter_info; ++ U32 dev_idx; ++ U32 this_cpu; ++ CPU_STATE pcpu; ++ U32 package_num; ++ U32 event_index = 0; ++ U32 cur_grp; ++ ECB pecb; ++ U64 counter_buffer[VLV_CHAP_MAX_COUNTERS + 1]; ++ ++ SEP_DRV_LOG_TRACE_IN("Param: %p.", param); ++ ++ dev_idx = *((U32 *)param); ++ this_cpu = CONTROL_THIS_CPU(); ++ pcpu = &pcb[this_cpu]; ++ package_num = core_to_package_map[this_cpu]; ++ cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[package_num]; ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[(dev_idx)])[cur_grp]; ++ ++ // NOTE THAT the read_pmu function on for EMON collection. ++ if (!DRV_CONFIG_emon_mode(drv_cfg)) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!emon_mode)."); ++ return; ++ } ++ if (!CPU_STATE_socket_master(pcpu)) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!socket_master)."); ++ return; ++ } ++ if (!pecb) { ++ SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); ++ return; ++ } ++ ++ SOCPERF_Read_Data3((void*)counter_buffer); ++ ++ FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, PMU_OPERATION_READ) ++ { ++ //the buffer index for this PMU needs to account for each event ++ j = ECB_entries_uncore_buffer_offset_in_system(pecb, idx); ++ buffer[j] = counter_buffer[event_index + 1]; ++ event_index++; ++ SEP_DRV_LOG_TRACE("j=%u, value=%llu, cpu=%u", j, buffer[j], ++ this_cpu); ++ } ++ END_FOR_EACH_REG_UNC_OPERATION; ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++#endif ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn valleyview_Trigger_Read() ++ * ++ * @param None ++ * ++ * @return None No return needed ++ * ++ * @brief Read the SoCHAP counters when timer is triggered ++ * ++ */ ++static VOID valleyview_Trigger_Read(PVOID param, U32 id) ++{ ++#if 0 ++ U64 *data = (U64 *)param; ++ U32 cur_grp; ++ ECB pecb; ++ U32 this_cpu; ++ U32 package_num; ++ ++ SEP_DRV_LOG_TRACE_IN("Param: %p, , id: %u.", param, id); ++ ++ this_cpu = CONTROL_THIS_CPU(); ++ package_num = core_to_package_map[this_cpu]; ++ cur_grp = LWPMU_DEVICE_cur_group(&devices[id])[package_num]; ++ pecb = LWPMU_DEVICE_PMU_register_data(&devices[id])[cur_grp]; ++ ++ // group id ++ data = (U64 *)((S8 *)data + ECB_group_offset(pecb)); ++ SOCPERF_Read_Data3((void*)data); ++ ++ SEP_DRV_LOG_TRACE_OUT(""); ++#endif ++} ++ ++/* ++ * Initialize the dispatch table ++ */ ++DISPATCH_NODE valleyview_visa_dispatch = { ++ .init = valleyview_VISA_Initialize, ++ .fini = NULL, ++ .write = NULL, ++ .freeze = valleyview_VISA_Disable_PMU, ++ .restart = valleyview_VISA_Enable_PMU, ++ .read_data = valleyview_VISA_Read_PMU_Data, ++ .check_overflow = NULL, ++ .swap_group = NULL, ++ .read_lbrs = NULL, ++ .cleanup = valleyview_VISA_Clean_Up, ++ .hw_errata = NULL, ++ .read_power = NULL, ++ .check_overflow_errata = NULL, ++ .read_counts = NULL, ++ .check_overflow_gp_errata = NULL, ++ .read_ro = NULL, ++ .platform_info = NULL, ++ .trigger_read = valleyview_Trigger_Read, ++ .scan_for_uncore = NULL, ++ .read_metrics = NULL ++}; +diff --git a/drivers/platform/x86/socwatch/Kconfig b/drivers/platform/x86/socwatch/Kconfig +new file mode 100644 +index 000000000000..87a7ae205f2d +--- /dev/null ++++ b/drivers/platform/x86/socwatch/Kconfig +@@ -0,0 +1,6 @@ ++menuconfig INTEL_SOCWATCH ++ depends on X86 ++ tristate "SocWatch Driver Support" ++ default m ++ help ++ Say Y here to enable SocWatch driver +diff --git a/drivers/platform/x86/socwatch/Makefile b/drivers/platform/x86/socwatch/Makefile +new file mode 100644 +index 000000000000..15ac18fcfdc0 +--- /dev/null ++++ b/drivers/platform/x86/socwatch/Makefile +@@ -0,0 +1,22 @@ ++# ++# Makefile for the socwatch driver. ++# ++ ++DRIVER_BASE=socwatch ++DRIVER_MAJOR=2 ++DRIVER_MINOR=6 ++# basic name of driver ++DRIVER_NAME=${DRIVER_BASE}${DRIVER_MAJOR}_${DRIVER_MINOR} ++ ++DO_DRIVER_PROFILING=0 ++ ++ccflags-y += -Idrivers/platform/x86/socwatch/inc/ \ ++ -DDO_DRIVER_PROFILING=$(DO_DRIVER_PROFILING) ++ ++obj-$(CONFIG_INTEL_SOCWATCH) += $(DRIVER_NAME).o ++ ++$(DRIVER_NAME)-objs := sw_driver.o sw_hardware_io.o \ ++ sw_output_buffer.o sw_tracepoint_handlers.o \ ++ sw_mem.o sw_collector.o sw_telem.o \ ++ sw_file_ops.o sw_internal.o sw_ops_provider.o \ ++ sw_reader.o sw_trace_notifier_provider.o +diff --git a/drivers/platform/x86/socwatch/inc/sw_collector.h b/drivers/platform/x86/socwatch/inc/sw_collector.h +new file mode 100644 +index 000000000000..b771ab936b26 +--- /dev/null ++++ b/drivers/platform/x86/socwatch/inc/sw_collector.h +@@ -0,0 +1,136 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++#ifndef __SW_COLLECTOR_H__ ++ ++#include "sw_internal.h" ++ ++/* ++ * Forward declaration ++ */ ++struct sw_hw_ops; ++ ++// TODO: convert from 'list_head' to 'hlist_head' ++/** ++ * struct - sw_collector_data ++ * Information about the collector to be invoked at collection time. ++ * ++ * The collector_lists array holds linked lists of collectors to ++ * be exercised at specific points in time during the collection ++ * (e.g. begin, poll, end, etc.). At a trigger time, the driver walks ++ * that time's list of nodes, and exercises the collectors on that list. ++ * ++ * @list: List/link implementation ++ * @cpumask: Collect if cpu matches mask ++ * @info: Ptr to metric info ++ * @ops: Ptr to collector's operations ++ * @last_update_jiffies: Indicates when this node was last exercised. ++ * @per_msg_payload_size: Data size ++ * @msg: Ptr to collected data ++ */ ++typedef struct sw_collector_data { ++ SW_LIST_ENTRY(list, sw_collector_data); ++ struct cpumask cpumask; ++ struct sw_driver_interface_info *info; ++ const struct sw_hw_ops **ops; ++ size_t per_msg_payload_size; ++ u64 last_update_jiffies; ++ struct sw_driver_msg *msg; ++} sw_collector_data_t; ++#define GET_MSG_SLOT_FOR_CPU(msgs, cpu, size) \ ++ ((struct sw_driver_msg *)&( \ ++ ((char *)(msgs))[(cpu) * \ ++ (sizeof(struct sw_driver_msg) + (size))])) ++ ++struct sw_collector_data *sw_alloc_collector_node(void); ++void sw_free_collector_node(struct sw_collector_data *node); ++int sw_handle_collector_node(struct sw_collector_data *data); ++int sw_handle_collector_node_on_cpu(struct sw_collector_data *data, int cpu); ++int sw_write_collector_node(struct sw_collector_data *data); ++ ++void sw_init_collector_list(void *list_head); ++void sw_destroy_collector_list(void *list_head); ++int sw_handle_collector_list(void *list_head, ++ int (*func)(struct sw_collector_data *data)); ++int sw_handle_collector_list_on_cpu(void *list_head, ++ int (*func)(struct sw_collector_data *data, ++ int cpu), ++ int cpu); ++ ++int sw_handle_driver_io_descriptor( ++ char *dst_vals, int cpu, ++ const struct sw_driver_io_descriptor *descriptor, ++ const struct sw_hw_ops *hw_ops); ++int sw_init_driver_io_descriptor(struct sw_driver_io_descriptor *descriptor); ++int sw_reset_driver_io_descriptor(struct sw_driver_io_descriptor *descriptor); ++ ++int sw_add_driver_info(void *list_head, ++ const struct sw_driver_interface_info *info); ++ ++void sw_handle_per_cpu_msg(void *info); ++void sw_handle_per_cpu_msg_no_sched(void *info); ++void sw_handle_per_cpu_msg_on_cpu(int cpu, void *info); ++ ++void sw_set_collector_ops(const struct sw_hw_ops *hw_ops); ++ ++/** ++ * Process all messages for the given time. ++ * @param[in] when The time period e.g. 'BEGIN' or 'END' ++ * ++ * @returns 0 on success, non-zero on error ++ */ ++extern int sw_process_snapshot(enum sw_when_type when); ++extern int sw_process_snapshot_on_cpu(enum sw_when_type when, int cpu); ++#endif // __SW_COLLECTOR_H__ +diff --git a/drivers/platform/x86/socwatch/inc/sw_defines.h b/drivers/platform/x86/socwatch/inc/sw_defines.h +new file mode 100644 +index 000000000000..15ccca1efed6 +--- /dev/null ++++ b/drivers/platform/x86/socwatch/inc/sw_defines.h +@@ -0,0 +1,156 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++ ++#ifndef _PW_DEFINES_H_ ++#define _PW_DEFINES_H_ 1 ++ ++#include "sw_version.h" ++ ++/* *************************************************** ++ * Common to kernel and userspace. ++ * *************************************************** ++ */ ++#define PW_SUCCESS 0 ++#define PW_ERROR 1 ++#define PW_SUCCESS_NO_COLLECT 2 ++ ++/* ++ * Helper macro to convert 'u64' to 'unsigned long long' to avoid gcc warnings. ++ */ ++#define TO_ULL(x) (unsigned long long)(x) ++/* ++* Convert an arg to 'long long' ++*/ ++#define TO_LL(x) (long long)(x) ++/* ++ * Convert an arg to 'unsigned long' ++ */ ++#define TO_UL(x) (unsigned long)(x) ++/* ++ * Helper macro for string representation of a boolean value. ++ */ ++#define GET_BOOL_STRING(b) ((b) ? "TRUE" : "FALSE") ++ ++/* ++ * Circularly increment 'i' MODULO 'l'. ++ * ONLY WORKS IF 'l' is (power of 2 - 1) ie. ++ * l == (2 ^ x) - 1 ++ */ ++#define CIRCULAR_INC(index, mask) (((index) + 1) & (mask)) ++#define CIRCULAR_ADD(index, val, mask) (((index) + (val)) & (mask)) ++/* ++ * Circularly decrement 'i'. ++ */ ++#define CIRCULAR_DEC(i, m) \ ++ ({ \ ++ int __tmp1 = (i); \ ++ if (--__tmp1 < 0) \ ++ __tmp1 = (m); \ ++ __tmp1; \ ++ }) ++/* ++ * Retrieve size of an array. ++ */ ++#define SW_ARRAY_SIZE(array) (sizeof(array) / sizeof((array)[0])) ++/* ++ * Should the driver count number of dropped samples? ++ */ ++#define DO_COUNT_DROPPED_SAMPLES 1 ++/* ++ * Extract F/W major, minor versions. ++ * Assumes version numbers are 8b unsigned ints. ++ */ ++#define SW_GET_SCU_FW_VERSION_MAJOR(ver) (((ver) >> 8) & 0xff) ++#define SW_GET_SCU_FW_VERSION_MINOR(ver) ((ver)&0xff) ++/* ++ * Max size of process name retrieved from kernel. ++ */ ++#define SW_MAX_PROC_NAME_SIZE 16 ++ ++/* ++ * Number of SOCPERF counters. ++ * Needed by both Ring-0 and Ring-3 ++ */ ++#define SW_NUM_SOCPERF_COUNTERS 9 ++ ++/* ++ * Max size of process name retrieved from kernel space. ++ */ ++#define SW_MAX_PROC_NAME_SIZE 16 ++/* ++ * Max size of kernel wakelock name. ++ */ ++#define SW_MAX_KERNEL_WAKELOCK_NAME_SIZE 100 ++ ++/* Data value read when a telemetry data read fails. */ ++#define SW_TELEM_READ_FAIL_VALUE 0xF00DF00DF00DF00DUL ++ ++#ifdef SWW_MERGE ++typedef enum { ++ SW_STOP_EVENT = 0, ++ SW_CS_EXIT_EVENT, ++ SW_COUNTER_RESET_EVENT, ++ SW_COUNTER_HOTKEY_EVENT, ++ SW_MAX_COLLECTION_EVENT ++} collector_stop_event_t; ++#endif // SWW_MERGE ++ ++#define MAX_UNSIGNED_16_BIT_VALUE 0xFFFF ++#define MAX_UNSIGNED_24_BIT_VALUE 0xFFFFFF ++#define MAX_UNSIGNED_32_BIT_VALUE 0xFFFFFFFF ++#define MAX_UNSIGNED_64_BIT_VALUE 0xFFFFFFFFFFFFFFFF ++ ++#endif // _PW_DEFINES_H_ +diff --git a/drivers/platform/x86/socwatch/inc/sw_file_ops.h b/drivers/platform/x86/socwatch/inc/sw_file_ops.h +new file mode 100644 +index 000000000000..7c5705cf942c +--- /dev/null ++++ b/drivers/platform/x86/socwatch/inc/sw_file_ops.h +@@ -0,0 +1,70 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++#ifndef __SW_FILE_OPS_H__ ++#define __SW_FILE_OPS_H__ ++ ++enum sw_driver_collection_cmd; ++struct sw_file_ops { ++ long (*ioctl_handler)(unsigned int ioctl_num, void *local_args); ++ int (*stop_handler)(void); ++ enum sw_driver_collection_cmd (*get_current_cmd)(void); ++ bool (*should_flush)(void); ++}; ++ ++int sw_register_dev(struct sw_file_ops *ops); ++void sw_unregister_dev(void); ++ ++#endif // __SW_FILE_OPS_H__ +diff --git a/drivers/platform/x86/socwatch/inc/sw_hardware_io.h b/drivers/platform/x86/socwatch/inc/sw_hardware_io.h +new file mode 100644 +index 000000000000..f93fa6b10d7a +--- /dev/null ++++ b/drivers/platform/x86/socwatch/inc/sw_hardware_io.h +@@ -0,0 +1,118 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++#ifndef __SW_HARDWARE_IO_H__ ++#define __SW_HARDWARE_IO_H__ ++ ++#include "sw_structs.h" ++ ++typedef int (*sw_io_desc_init_func_t)( ++ struct sw_driver_io_descriptor *descriptor); ++typedef void (*sw_hardware_op_func_t)( ++ char *dst_vals, int cpu, ++ const struct sw_driver_io_descriptor *descriptor, ++ u16 counter_size_in_bytes); ++typedef int (*sw_io_desc_print_func_t)( ++ const struct sw_driver_io_descriptor *descriptor); ++typedef int (*sw_io_desc_reset_func_t)( ++ const struct sw_driver_io_descriptor *descriptor); ++typedef bool (*sw_io_desc_available_func_t)(void); ++typedef bool (*sw_hw_op_post_config_func_t)(void); ++ ++/** ++ * struct sw_hw_ops - Operations for each of the HW collection mechanisms ++ * in swkernelcollector. ++ * @name: A descriptive name used to identify this particular operation. ++ * @init: Initialize a metric's collection. ++ * @read: Read a metric's data. ++ * @write: Write to the HW for the metric(?). ++ * @print: Print out the data. ++ * @reset: Opposite of init--called after we're done collecting. ++ * @available: Decide whether this H/W op is available on the current platform. ++ * @post_config: Perform any post-configuration steps. ++ */ ++struct sw_hw_ops { ++ const char *name; ++ sw_io_desc_init_func_t init; ++ sw_hardware_op_func_t read; ++ sw_hardware_op_func_t write; ++ sw_io_desc_print_func_t print; ++ sw_io_desc_reset_func_t reset; ++ sw_io_desc_available_func_t available; ++ sw_hw_op_post_config_func_t post_config; ++}; ++ ++bool sw_is_valid_hw_op_id(int id); ++int sw_get_hw_op_id(const struct sw_hw_ops *op); ++const struct sw_hw_ops *sw_get_hw_ops_for(int id); ++const char *sw_get_hw_op_abstract_name(const struct sw_hw_ops *op); ++ ++int sw_for_each_hw_op(int (*func)(const struct sw_hw_ops *op, void *priv), ++ void *priv, bool return_on_error); ++ ++/** ++ * Add an operation to the list of providers. ++ */ ++int sw_register_hw_op(const struct sw_hw_ops *ops); ++/** ++ * Register all H/W operations. ++ */ ++int sw_register_hw_ops(void); ++/** ++ * Unregister previously registered H/W operations. ++ */ ++void sw_free_hw_ops(void); ++ ++#endif // __SW_HARDWARE_IO_H__ +diff --git a/drivers/platform/x86/socwatch/inc/sw_internal.h b/drivers/platform/x86/socwatch/inc/sw_internal.h +new file mode 100644 +index 000000000000..8e88d5d5ea54 +--- /dev/null ++++ b/drivers/platform/x86/socwatch/inc/sw_internal.h +@@ -0,0 +1,138 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++#ifndef __SW_DATA_STRUCTS_H__ ++#define __SW_DATA_STRUCTS_H__ ++ ++/* ++ * Taken from 'sw_driver' ++ * TODO: move to separate file? ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include // inode ++#include // class_create ++#include // cdev_alloc ++#include // vmalloc ++#include // TASK_INTERRUPTIBLE ++#include // wait_event_interruptible ++#include // pci_get_bus_and_slot ++#include // LINUX_VERSION_CODE ++#include // For SFI F/W version ++#include ++#include ++#include // local_t ++#include // "in_atomic" ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) ++#include // copy_to_user ++#else ++#include // copy_to_user ++#endif // LINUX_VERSION_CODE ++ ++#ifdef CONFIG_X86_WANT_INTEL_MID ++#include ++#endif // CONFIG_X86_WANT_INTEL_MID ++/* ++ * End taken from sw_driver ++ */ ++ ++#include "sw_structs.h" ++#include "sw_ioctl.h" ++#include "sw_list.h" ++ ++/* ****************************************** ++ * Compile time constants ++ * ****************************************** ++ */ ++#define GET_POLLED_CPU() (sw_max_num_cpus) ++ ++/* ****************************************** ++ * Function declarations. ++ * ****************************************** ++ */ ++/* ++ * Output to user. ++ */ ++unsigned long sw_copy_to_user(char __user *dst, ++ char *src, size_t bytes_to_copy); ++bool sw_check_output_buffer_params(void __user *buffer, size_t bytes_to_read, ++ size_t buff_size); ++/* ++ * smp call function. ++ */ ++void sw_schedule_work(const struct cpumask *mask, void (*work)(void *), ++ void *data); ++/* ++ * Save IRQ flags and retrieve cpu number. ++ */ ++int sw_get_cpu(unsigned long *flags); ++/* ++ * Restore IRQ flags. ++ */ ++void sw_put_cpu(unsigned long flags); ++/* ++ * Set module scope for cpu frequencies. ++ */ ++int sw_set_module_scope_for_cpus(void); ++/* ++ * reset module scope for cpu frequencies. ++ */ ++int sw_reset_module_scope_for_cpus(void); ++ ++#endif // __SW_DATA_STRUCTS_H__ +diff --git a/drivers/platform/x86/socwatch/inc/sw_ioctl.h b/drivers/platform/x86/socwatch/inc/sw_ioctl.h +new file mode 100644 +index 000000000000..baf93058c5c5 +--- /dev/null ++++ b/drivers/platform/x86/socwatch/inc/sw_ioctl.h +@@ -0,0 +1,303 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++#ifndef __SW_IOCTL_H__ ++#define __SW_IOCTL_H__ 1 ++ ++#if defined(__linux__) || defined(__QNX__) ++#if __KERNEL__ ++#include ++#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) ++#include ++#include ++#endif // COMPAT && x64 ++#else // !__KERNEL__ ++#include ++#endif // __KERNEL__ ++#endif // __linux__ ++/* ++ * Ensure we pull in definition of 'DO_COUNT_DROPPED_SAMPLES'! ++ */ ++#include "sw_defines.h" ++ ++#ifdef ONECORE ++#ifndef __KERNEL__ ++#include ++#endif //__KERNEL__ ++#endif // ONECORE ++ ++/* ++ * The APWR-specific IOCTL magic ++ * number -- used to ensure IOCTLs ++ * are delivered to the correct ++ * driver. ++ */ ++// #define APWR_IOCTL_MAGIC_NUM 0xdead ++#define APWR_IOCTL_MAGIC_NUM 100 ++ ++/* ++ * The name of the device file ++ */ ++// #define DEVICE_FILE_NAME "/dev/pw_driver_char_dev" ++#define PW_DEVICE_FILE_NAME "/dev/apwr_driver_char_dev" ++#define PW_DEVICE_NAME "apwr_driver_char_dev" ++ ++enum sw_ioctl_cmd { ++ sw_ioctl_cmd_none = 0, ++ sw_ioctl_cmd_config, ++ sw_ioctl_cmd_cmd, ++ sw_ioctl_cmd_poll, ++ sw_ioctl_cmd_immediate_io, ++ sw_ioctl_cmd_scu_version, ++ sw_ioctl_cmd_read_immediate, ++ sw_ioctl_cmd_driver_version, ++ sw_ioctl_cmd_avail_trace, ++ sw_ioctl_cmd_avail_notify, ++ sw_ioctl_cmd_avail_collect, ++ sw_ioctl_cmd_topology_changes, ++}; ++/* ++ * The actual IOCTL commands. ++ * ++ * From the kernel documentation: ++ * "_IOR" ==> Read IOCTL ++ * "_IOW" ==> Write IOCTL ++ * "_IOWR" ==> Read/Write IOCTL ++ * ++ * Where "Read" and "Write" are from the user's perspective ++ * (similar to the file "read" and "write" calls). ++ */ ++#ifdef SWW_MERGE // Windows ++// ++// Device type -- in the "User Defined" range." ++// ++#define POWER_I_CONF_TYPE 40000 ++ ++// List assigned tracepoint id ++#define CSIR_TRACEPOINT_ID_MASK 1 ++#define DEVICE_STATE_TRACEPOINT_ID_MASK 2 ++#define CSIR_SEPARATE_TRACEPOINT_ID_MASK 3 ++#define RESET_TRACEPOINT_ID_MASK 4 ++#define DISPLAY_ON_TRACEPOINT_ID_MASK 5 ++ ++#ifdef SWW_MERGE ++// ++// TELEM BAR CONFIG ++// ++#define MAX_TELEM_BAR_CFG 3 ++#define TELEM_MCHBAR_CFG 0 ++#define TELEM_IPC1BAR_CFG 1 ++#define TELEM_SSRAMBAR_CFG 2 ++#endif ++ ++// ++// The IOCTL function codes from 0x800 to 0xFFF are for customer use. ++// ++#define PW_IOCTL_CONFIG \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x900, METHOD_BUFFERED, FILE_ANY_ACCESS) ++#define PW_IOCTL_START_COLLECTION \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x901, METHOD_BUFFERED, FILE_ANY_ACCESS) ++#define PW_IOCTL_STOP_COLLECTION \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x902, METHOD_BUFFERED, FILE_ANY_ACCESS) ++ ++// TODO: pause, resume, cancel not supported yet ++#define PW_IOCTL_PAUSE_COLLECTION \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x903, METHOD_BUFFERED, FILE_ANY_ACCESS) ++#define PW_IOCTL_RESUME_COLLECTION \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x904, METHOD_BUFFERED, FILE_ANY_ACCESS) ++#define PW_IOCTL_CANCEL_COLLECTION \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x905, METHOD_BUFFERED, FILE_ANY_ACCESS) ++ ++#define PW_IOCTL_GET_PROCESSOR_GROUP_TOPOLOGY \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x906, METHOD_BUFFERED, FILE_ANY_ACCESS) ++#define PW_IOCTL_TOPOLOGY \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x907, METHOD_BUFFERED, FILE_ANY_ACCESS) ++#define PW_IOCTL_GET_AVAILABLE_COLLECTORS \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x908, METHOD_BUFFERED, FILE_ANY_ACCESS) ++#define PW_IOCTL_IMMEDIATE_IO \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x909, METHOD_BUFFERED, FILE_ANY_ACCESS) ++#define PW_IOCTL_DRV_CLEANUP \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x90A, METHOD_BUFFERED, FILE_ANY_ACCESS) ++#define PW_IOCTL_SET_COLLECTION_EVENT \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x90B, METHOD_BUFFERED, FILE_ANY_ACCESS) ++#define PW_IOCTL_TRY_STOP_EVENT \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x90C, METHOD_BUFFERED, FILE_ANY_ACCESS) ++#define PW_IOCTL_SET_PCH_ACTIVE_INTERVAL \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x90D, METHOD_BUFFERED, FILE_ANY_ACCESS) ++#define PW_IOCTL_SET_TELEM_BAR \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x90E, METHOD_BUFFERED, FILE_ANY_ACCESS) ++#define PW_IOCTL_METADATA \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x90F, METHOD_BUFFERED, FILE_ANY_ACCESS) ++#define PW_IOCTL_SET_GBE_INTERVAL \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x910, METHOD_BUFFERED, FILE_ANY_ACCESS) ++#define PW_IOCTL_ENABLE_COLLECTION \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x911, METHOD_BUFFERED, FILE_ANY_ACCESS) ++#define PW_IOCTL_DISABLE_COLLECTION \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x912, METHOD_BUFFERED, FILE_ANY_ACCESS) ++#define PW_IOCTL_DRIVER_BUILD_DATE \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x913, METHOD_BUFFERED, FILE_ANY_ACCESS) ++ ++#elif !defined(__APPLE__) ++#define PW_IOCTL_CONFIG \ ++ _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_config, \ ++ struct sw_driver_ioctl_arg *) ++#if DO_COUNT_DROPPED_SAMPLES ++#define PW_IOCTL_CMD \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, \ ++ struct sw_driver_ioctl_arg *) ++#else ++#define PW_IOCTL_CMD \ ++ _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, \ ++ struct sw_driver_ioctl_arg *) ++#endif // DO_COUNT_DROPPED_SAMPLES ++#define PW_IOCTL_POLL _IO(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_poll) ++#define PW_IOCTL_IMMEDIATE_IO \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_immediate_io, \ ++ struct sw_driver_ioctl_arg *) ++#define PW_IOCTL_GET_SCU_FW_VERSION \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_scu_version, \ ++ struct sw_driver_ioctl_arg *) ++#define PW_IOCTL_READ_IMMEDIATE \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_read_immediate, \ ++ struct sw_driver_ioctl_arg *) ++#define PW_IOCTL_GET_DRIVER_VERSION \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_driver_version, \ ++ struct sw_driver_ioctl_arg *) ++#define PW_IOCTL_GET_AVAILABLE_TRACEPOINTS \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_trace, \ ++ struct sw_driver_ioctl_arg *) ++#define PW_IOCTL_GET_AVAILABLE_NOTIFIERS \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_notify, \ ++ struct sw_driver_ioctl_arg *) ++#define PW_IOCTL_GET_AVAILABLE_COLLECTORS \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_collect, \ ++ struct sw_driver_ioctl_arg *) ++#define PW_IOCTL_GET_TOPOLOGY_CHANGES \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_topology_changes, \ ++ struct sw_driver_ioctl_arg *) ++#else // __APPLE__ ++#define PW_IOCTL_CONFIG \ ++ _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_config, \ ++ struct sw_driver_ioctl_arg) ++#if DO_COUNT_DROPPED_SAMPLES ++#define PW_IOCTL_CMD \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, \ ++ struct sw_driver_ioctl_arg) ++#else ++#define PW_IOCTL_CMD \ ++ _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, struct sw_driver_ioctl_arg) ++#endif // DO_COUNT_DROPPED_SAMPLES ++#define PW_IOCTL_POLL _IO(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_poll) ++#define PW_IOCTL_IMMEDIATE_IO \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_immediate_io, \ ++ struct sw_driver_ioctl_arg) ++#define PW_IOCTL_GET_SCU_FW_VERSION \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_scu_version, \ ++ struct sw_driver_ioctl_arg) ++#define PW_IOCTL_READ_IMMEDIATE \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_read_immediate, \ ++ struct sw_driver_ioctl_arg) ++#define PW_IOCTL_GET_DRIVER_VERSION \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_driver_version, \ ++ struct sw_driver_ioctl_arg) ++#define PW_IOCTL_GET_AVAILABLE_TRACEPOINTS \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_trace, \ ++ struct sw_driver_ioctl_arg) ++#define PW_IOCTL_GET_AVAILABLE_NOTIFIERS \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_notify, \ ++ struct sw_driver_ioctl_arg) ++#define PW_IOCTL_GET_AVAILABLE_COLLECTORS \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_collect, \ ++ struct sw_driver_ioctl_arg) ++#define PW_IOCTL_GET_TOPOLOGY_CHANGES \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_topology_changes, \ ++ struct sw_driver_ioctl_arg) ++#endif // __APPLE__ ++ ++/* ++ * 32b-compatible version of the above ++ * IOCTL numbers. Required ONLY for ++ * 32b compatibility on 64b systems, ++ * and ONLY by the driver. ++ */ ++#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) ++#define PW_IOCTL_CONFIG32 \ ++ _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_config, compat_uptr_t) ++#if DO_COUNT_DROPPED_SAMPLES ++#define PW_IOCTL_CMD32 \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, compat_uptr_t) ++#else ++#define PW_IOCTL_CMD32 \ ++ _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, compat_uptr_t) ++#endif // DO_COUNT_DROPPED_SAMPLES ++#define PW_IOCTL_POLL32 _IO(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_poll) ++#define PW_IOCTL_IMMEDIATE_IO32 \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_immediate_io, compat_uptr_t) ++#define PW_IOCTL_GET_SCU_FW_VERSION32 \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_scu_version, compat_uptr_t) ++#define PW_IOCTL_READ_IMMEDIATE32 \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_read_immediate, compat_uptr_t) ++#define PW_IOCTL_GET_DRIVER_VERSION32 \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_driver_version, compat_uptr_t) ++#define PW_IOCTL_GET_AVAILABLE_TRACEPOINTS32 \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_trace, compat_uptr_t) ++#define PW_IOCTL_GET_AVAILABLE_NOTIFIERS32 \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_notify, compat_uptr_t) ++#define PW_IOCTL_GET_AVAILABLE_COLLECTORS32 \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_collect, compat_uptr_t) ++#define PW_IOCTL_GET_TOPOLOGY_CHANGES32 \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_topology_changes, compat_uptr_t) ++#endif // defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) ++#endif // __SW_IOCTL_H__ +diff --git a/drivers/platform/x86/socwatch/inc/sw_kernel_defines.h b/drivers/platform/x86/socwatch/inc/sw_kernel_defines.h +new file mode 100644 +index 000000000000..275b1bdfc25e +--- /dev/null ++++ b/drivers/platform/x86/socwatch/inc/sw_kernel_defines.h +@@ -0,0 +1,164 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++#ifndef _SW_KERNEL_DEFINES_H_ ++#define _SW_KERNEL_DEFINES_H_ 1 ++ ++#include "sw_defines.h" ++ ++#if defined(__APPLE__) ++#define likely(x) (x) ++#define unlikely(x) (x) ++#endif // __APPLE__ ++ ++#if !defined(__APPLE__) ++#define CPU() (raw_smp_processor_id()) ++#define RAW_CPU() (raw_smp_processor_id()) ++#else ++#define CPU() (cpu_number()) ++#define RAW_CPU() (cpu_number()) ++#endif // __APPLE__ ++ ++#define TID() (current->pid) ++#define PID() (current->tgid) ++#define NAME() (current->comm) ++#define PKG(c) (cpu_data(c).phys_proc_id) ++#define IT_REAL_INCR() (current->signal->it_real_incr.tv64) ++ ++#define ATOMIC_CAS(ptr, old_val, new_val) \ ++ (cmpxchg((ptr), (old_val), (new_val)) == (old_val)) ++ ++/* ++ * Should we measure overheads? ++ * '1' ==> YES ++ * '0' ==> NO ++ */ ++#define DO_OVERHEAD_MEASUREMENTS 0 ++/* ++ * Should we track memory usage? ++ * '1' ==> YES ++ * '0' ==> NO ++ */ ++#define DO_TRACK_MEMORY_USAGE 0 ++/* ++ * Are we compiling with driver profiling support ++ * turned ON? If YES then force 'DO_OVERHEAD_MEASUREMENTS' ++ * and 'DO_TRACK_MEMORY_USAGE' to be TRUE. ++ */ ++#if DO_DRIVER_PROFILING ++#if !DO_OVERHEAD_MEASUREMENTS ++#undef DO_OVERHEAD_MEASUREMENTS ++#define DO_OVERHEAD_MEASUREMENTS 1 ++#endif // DO_OVERHEAD_MEASUREMENTS ++#if !DO_TRACK_MEMORY_USAGE ++#undef DO_TRACK_MEMORY_USAGE ++#define DO_TRACK_MEMORY_USAGE 1 ++#endif // DO_TRACK_MEMORY_USAGE ++#endif // DO_DRIVER_PROFILING ++/* ++ * Should we allow debug output. ++ * Set to: "1" ==> 'OUTPUT' is enabled. ++ * "0" ==> 'OUTPUT' is disabled. ++ */ ++#define DO_DEBUG_OUTPUT 0 ++/* ++ * Control whether to output driver ERROR messages. ++ * These are independent of the 'OUTPUT' macro ++ * (which controls debug messages). ++ * Set to '1' ==> Print driver error messages (to '/var/log/messages') ++ * '0' ==> Do NOT print driver error messages ++ */ ++#define DO_PRINT_DRIVER_ERROR_MESSAGES 1 ++/* ++ * Macros to control output printing. ++ */ ++#if !defined(__APPLE__) ++#if DO_DEBUG_OUTPUT ++#define pw_pr_debug(...) printk(KERN_INFO __VA_ARGS__) ++#define pw_pr_warn(...) printk(KERN_WARNING __VA_ARGS__) ++#else ++#define pw_pr_debug(...) ++#define pw_pr_warn(...) ++#endif ++#define pw_pr_force(...) printk(KERN_INFO __VA_ARGS__) ++#else ++#if DO_DEBUG_OUTPUT ++#define pw_pr_debug(...) IOLog(__VA_ARGS__) ++#define pw_pr_warn(...) IOLog(__VA_ARGS__) ++#else ++#define pw_pr_debug(...) ++#define pw_pr_warn(...) ++#endif ++#define pw_pr_force(...) IOLog(__VA_ARGS__) ++#endif // __APPLE__ ++ ++/* ++ * Macro for driver error messages. ++ */ ++#if !defined(__APPLE__) ++#if (DO_PRINT_DRIVER_ERROR_MESSAGES || DO_DEBUG_OUTPUT) ++#define pw_pr_error(...) printk(KERN_ERR __VA_ARGS__) ++#else ++#define pw_pr_error(...) ++#endif ++#else ++#if (DO_PRINT_DRIVER_ERROR_MESSAGES || DO_DEBUG_OUTPUT) ++#define pw_pr_error(...) IOLog(__VA_ARGS__) ++#else ++#define pw_pr_error(...) ++#endif ++#endif // __APPLE__ ++ ++#endif // _SW_KERNEL_DEFINES_H_ +diff --git a/drivers/platform/x86/socwatch/inc/sw_list.h b/drivers/platform/x86/socwatch/inc/sw_list.h +new file mode 100644 +index 000000000000..ecc646a99caa +--- /dev/null ++++ b/drivers/platform/x86/socwatch/inc/sw_list.h +@@ -0,0 +1,76 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++#ifndef __SW_LIST_H__ ++#define __SW_LIST_H__ ++ ++#include ++ ++#define SW_DEFINE_LIST_HEAD(name, dummy) struct list_head name ++#define SW_DECLARE_LIST_HEAD(name, dummy) extern struct list_head name ++#define SW_LIST_ENTRY(name, dummy) struct list_head name ++#define SW_LIST_HEAD_VAR(dummy) struct list_head ++#define SW_LIST_HEAD_INIT(head) INIT_LIST_HEAD(head) ++#define SW_LIST_ENTRY_INIT(node, field) INIT_LIST_HEAD(&node->field) ++#define SW_LIST_ADD(head, node, field) list_add_tail(&node->field, head) ++#define SW_LIST_GET_HEAD_ENTRY(head, type, field) \ ++ list_first_entry(head, struct type, field) ++#define SW_LIST_UNLINK(node, field) list_del(&node->field) ++#define SW_LIST_FOR_EACH_ENTRY(node, head, field) \ ++ list_for_each_entry(node, head, field) ++#define SW_LIST_EMPTY(head) list_empty(head) ++#define SW_LIST_HEAD_INITIALIZER(head) LIST_HEAD_INIT(head) ++ ++#endif // __SW_LIST_H__ +diff --git a/drivers/platform/x86/socwatch/inc/sw_lock_defs.h b/drivers/platform/x86/socwatch/inc/sw_lock_defs.h +new file mode 100644 +index 000000000000..7c9d68c02f58 +--- /dev/null ++++ b/drivers/platform/x86/socwatch/inc/sw_lock_defs.h +@@ -0,0 +1,98 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++ ++/* ++ * Description: file containing locking routines ++ * used by the power driver. ++ */ ++ ++#ifndef __SW_LOCK_DEFS_H__ ++#define __SW_LOCK_DEFS_H__ ++ ++#define SW_DEFINE_SPINLOCK(s) DEFINE_SPINLOCK(s) ++#define SW_DECLARE_SPINLOCK(s) static spinlock_t s ++ ++#define SW_INIT_SPINLOCK(s) spin_lock_init(&s) ++#define SW_DESTROY_SPINLOCK(s) /* NOP */ ++ ++#define LOCK(l) \ ++ { \ ++ unsigned long _tmp_l_flags; \ ++ spin_lock_irqsave(&(l), _tmp_l_flags); ++ ++#define UNLOCK(l) \ ++ spin_unlock_irqrestore(&(l), _tmp_l_flags); \ ++ } ++ ++#define READ_LOCK(l) \ ++ { \ ++ unsigned long _tmp_l_flags; \ ++ read_lock_irqsave(&(l), _tmp_l_flags); ++ ++#define READ_UNLOCK(l) \ ++ read_unlock_irqrestore(&(l), _tmp_l_flags); \ ++ } ++ ++#define WRITE_LOCK(l) \ ++ { \ ++ unsigned long _tmp_l_flags; \ ++ write_lock_irqsave(&(l), _tmp_l_flags); ++ ++#define WRITE_UNLOCK(l) \ ++ write_unlock_irqrestore(&(l), _tmp_l_flags); \ ++ } ++ ++#endif // __SW_LOCK_DEFS_H__ +diff --git a/drivers/platform/x86/socwatch/inc/sw_mem.h b/drivers/platform/x86/socwatch/inc/sw_mem.h +new file mode 100644 +index 000000000000..600b8881262c +--- /dev/null ++++ b/drivers/platform/x86/socwatch/inc/sw_mem.h +@@ -0,0 +1,82 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++ ++/* ++ * Description: file containing memory management routines ++ * used by the power driver. ++ */ ++ ++#ifndef _SW_MEM_H_ ++#define _SW_MEM_H_ 1 ++ ++#include "sw_types.h" ++ ++void *sw_kmalloc(size_t size, gfp_t flags); ++void sw_kfree(const void *obj); ++/* ++ * Allocate free pages. ++ */ ++unsigned long sw_allocate_pages(gfp_t flags, ++ unsigned int alloc_size_in_bytes); ++/* ++ * Free up previously allocated pages. ++ */ ++void sw_release_pages(unsigned long addr, unsigned int alloc_size_in_bytes); ++ ++u64 sw_get_total_bytes_alloced(void); ++u64 sw_get_max_bytes_alloced(void); ++u64 sw_get_curr_bytes_alloced(void); ++#endif // _SW_MEM_H_ +diff --git a/drivers/platform/x86/socwatch/inc/sw_ops_provider.h b/drivers/platform/x86/socwatch/inc/sw_ops_provider.h +new file mode 100644 +index 000000000000..43bd73fd3445 +--- /dev/null ++++ b/drivers/platform/x86/socwatch/inc/sw_ops_provider.h +@@ -0,0 +1,62 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++#ifndef __SW_OPS_PROVIDER_H__ ++#define __SW_OPS_PROVIDER_H__ ++ ++int sw_register_ops_providers(void); ++void sw_free_ops_providers(void); ++ ++#endif // __SW_OPS_PROVIDER_H__ +diff --git a/drivers/platform/x86/socwatch/inc/sw_output_buffer.h b/drivers/platform/x86/socwatch/inc/sw_output_buffer.h +new file mode 100644 +index 000000000000..17e59445ce85 +--- /dev/null ++++ b/drivers/platform/x86/socwatch/inc/sw_output_buffer.h +@@ -0,0 +1,136 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++ ++#ifndef _SW_OUTPUT_BUFFER_H_ ++#define _SW_OUTPUT_BUFFER_H_ 1 ++/* ++ * Special mask for the case where all buffers have been flushed. ++ */ ++// #define sw_ALL_WRITES_DONE_MASK 0xffffffff ++#define SW_ALL_WRITES_DONE_MASK ((u32)-1) ++/* ++ * Special mask for the case where no data is available to be read. ++ */ ++#define SW_NO_DATA_AVAIL_MASK ((u32)-2) ++ ++/* ++ * Forward declarations. ++ */ ++struct sw_driver_msg; ++ ++/* ++ * Data structures. ++ */ ++enum sw_wakeup_action { ++ SW_WAKEUP_ACTION_DIRECT, ++ SW_WAKEUP_ACTION_TIMER, ++ SW_WAKEUP_ACTION_NONE, ++}; ++ ++/* ++ * Variable declarations. ++ */ ++extern u64 sw_num_samples_produced, sw_num_samples_dropped; ++extern unsigned long sw_buffer_alloc_size; ++extern int sw_max_num_cpus; ++extern wait_queue_head_t sw_reader_queue; ++ ++/* ++ * Public API. ++ */ ++int sw_init_per_cpu_buffers(void); ++void sw_destroy_per_cpu_buffers(void); ++void sw_reset_per_cpu_buffers(void); ++ ++void sw_count_samples_produced_dropped(void); ++ ++int sw_produce_polled_msg(struct sw_driver_msg *, enum sw_wakeup_action); ++int sw_produce_generic_msg(struct sw_driver_msg *, enum sw_wakeup_action); ++ ++bool sw_any_seg_full(u32 *val, bool is_flush_mode); ++size_t sw_consume_data(u32 mask, void __user *buffer, size_t bytes_to_read); ++ ++unsigned int sw_get_output_buffer_size(void); ++ ++void sw_wait_once(void); ++void sw_wakeup(void); ++ ++void sw_print_output_buffer_overheads(void); ++ ++/* ++ * Init reader queue. ++ */ ++int sw_init_reader_queue(void); ++/* ++ * Destroy reader queue. ++ */ ++void sw_destroy_reader_queue(void); ++/* ++ * Wakeup client waiting for a full buffer. ++ */ ++void sw_wakeup_reader(enum sw_wakeup_action); ++/* ++ * Wakeup client waiting for a full buffer, and ++ * cancel any timers initialized by the reader ++ * subsys. ++ */ ++void sw_cancel_reader(void); ++/* ++ * Print some stats about the reader subsys. ++ */ ++void sw_print_reader_stats(void); ++ ++#endif // _SW_OUTPUT_BUFFER_H_ +diff --git a/drivers/platform/x86/socwatch/inc/sw_overhead_measurements.h b/drivers/platform/x86/socwatch/inc/sw_overhead_measurements.h +new file mode 100644 +index 000000000000..7d9dc683119b +--- /dev/null ++++ b/drivers/platform/x86/socwatch/inc/sw_overhead_measurements.h +@@ -0,0 +1,189 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++ ++/* ++ * Description: file containing overhead measurement ++ * routines used by the power driver. ++ */ ++ ++#ifndef _PW_OVERHEAD_MEASUREMENTS_H_ ++#define _PW_OVERHEAD_MEASUREMENTS_H_ ++ ++/* ++ * Helper macro to declare variables required ++ * for conducting overhead measurements. ++ */ ++/* ++ * For each function that you want to profile, ++ * do the following (e.g. function 'foo'): ++ * ************************************************** ++ * DECLARE_OVERHEAD_VARS(foo); ++ * ************************************************** ++ * This will declare the two variables required ++ * to keep track of overheads incurred in ++ * calling/servicing 'foo'. Note that the name ++ * that you declare here *MUST* match the function name! ++ */ ++ ++#if DO_OVERHEAD_MEASUREMENTS ++ ++#ifndef __get_cpu_var ++/* ++ * Kernels >= 3.19 don't include a definition ++ * of '__get_cpu_var'. Create one now. ++ */ ++#define __get_cpu_var(var) (*this_cpu_ptr(&var)) ++#endif // __get_cpu_var ++#ifndef __raw_get_cpu_var ++/* ++ * Kernels >= 3.19 don't include a definition ++ * of '__raw_get_cpu_var'. Create one now. ++ */ ++#define __raw_get_cpu_var(var) (*raw_cpu_ptr(&var)) ++#endif // __get_cpu_var ++ ++extern u64 sw_timestamp(void); ++ ++#define DECLARE_OVERHEAD_VARS(name) \ ++ static DEFINE_PER_CPU(u64, name##_elapsed_time); \ ++ static DEFINE_PER_CPU(local_t, name##_num_iters) = LOCAL_INIT(0); \ ++ \ ++ static inline u64 get_my_cumulative_elapsed_time_##name(void) \ ++ { \ ++ return *(&__get_cpu_var(name##_elapsed_time)); \ ++ } \ ++ static inline int get_my_cumulative_num_iters_##name(void) \ ++ { \ ++ return local_read(&__get_cpu_var(name##_num_iters)); \ ++ } \ ++ \ ++ static inline u64 name##_get_cumulative_elapsed_time_for(int cpu) \ ++ { \ ++ return *(&per_cpu(name##_elapsed_time, cpu)); \ ++ } \ ++ \ ++ static inline int name##_get_cumulative_num_iters_for(int cpu) \ ++ { \ ++ return local_read(&per_cpu(name##_num_iters, cpu)); \ ++ } \ ++ \ ++ static inline void name##_get_cumulative_overhead_params(u64 *time, \ ++ int *iters) \ ++ { \ ++ int cpu = 0; \ ++ *time = 0; \ ++ *iters = 0; \ ++ for_each_online_cpu(cpu) { \ ++ *iters += name##_get_cumulative_num_iters_for(cpu); \ ++ *time += name##_get_cumulative_elapsed_time_for(cpu); \ ++ } \ ++ return; \ ++ } \ ++ \ ++ static inline void name##_print_cumulative_overhead_params( \ ++ const char *str) \ ++ { \ ++ int num = 0; \ ++ u64 time = 0; \ ++ name##_get_cumulative_overhead_params(&time, &num); \ ++ printk(KERN_INFO "%s: %d iters took %llu nano seconds!\n", \ ++ str, num, time); \ ++ } ++ ++#define DO_PER_CPU_OVERHEAD_FUNC(func, ...) \ ++ do { \ ++ u64 *__v = &__raw_get_cpu_var(func##_elapsed_time); \ ++ u64 tmp_1 = 0, tmp_2 = 0; \ ++ local_inc(&__raw_get_cpu_var(func##_num_iters)); \ ++ tmp_1 = sw_timestamp(); \ ++ { \ ++ func(__VA_ARGS__); \ ++ } \ ++ tmp_2 = sw_timestamp(); \ ++ *(__v) += (tmp_2 - tmp_1); \ ++ } while (0) ++ ++#define DO_PER_CPU_OVERHEAD_FUNC_RET(type, func, ...) \ ++ ({ \ ++ type __ret; \ ++ u64 *__v = &__raw_get_cpu_var(func##_elapsed_time); \ ++ u64 tmp_1 = 0, tmp_2 = 0; \ ++ local_inc(&__raw_get_cpu_var(func##_num_iters)); \ ++ tmp_1 = sw_timestamp(); \ ++ { \ ++ __ret = func(__VA_ARGS__); \ ++ } \ ++ tmp_2 = sw_timestamp(); \ ++ *(__v) += (tmp_2 - tmp_1); \ ++ __ret; \ ++ }) ++ ++#else // !DO_OVERHEAD_MEASUREMENTS ++#define DECLARE_OVERHEAD_VARS(name) \ ++ static inline void name##_print_cumulative_overhead_params( \ ++ const char *str) \ ++ { /* NOP */ \ ++ } ++ ++#define DO_PER_CPU_OVERHEAD_FUNC(func, ...) func(__VA_ARGS__) ++#define DO_PER_CPU_OVERHEAD_FUNC_RET(type, func, ...) func(__VA_ARGS__) ++ ++#endif // DO_OVERHEAD_MEASUREMENTS ++ ++#define PRINT_CUMULATIVE_OVERHEAD_PARAMS(name, str) \ ++ name##_print_cumulative_overhead_params(str) ++ ++#endif // _PW_OVERHEAD_MEASUREMENTS_H_ +diff --git a/drivers/platform/x86/socwatch/inc/sw_structs.h b/drivers/platform/x86/socwatch/inc/sw_structs.h +new file mode 100644 +index 000000000000..7f53a9e2984c +--- /dev/null ++++ b/drivers/platform/x86/socwatch/inc/sw_structs.h +@@ -0,0 +1,500 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++#ifndef __SW_STRUCTS_H__ ++#define __SW_STRUCTS_H__ 1 ++ ++#include "sw_types.h" ++ ++/* ++ * An enumeration of MSR types. ++ * Required if we want to differentiate ++ * between different types of MSRs. ++ */ ++enum sw_msr_type { ++ SW_MSR_TYPE_THREAD, ++ SW_MSR_TYPE_CORE, ++ SW_MSR_TYPE_MODULE, ++ SW_MSR_TYPE_PACKAGE, ++ SW_MSR_TYPE_SOC, ++ SW_MSR_TYPE_MAX, ++}; ++ ++/* ++ * Convenience for a 'string' data type. ++ * Not strictly required. ++ */ ++#pragma pack(push, 1) ++typedef struct sw_string_type { ++ pw_u16_t len; ++ char data[1]; ++} sw_string_type_t; ++#pragma pack(pop) ++#define SW_STRING_TYPE_HEADER_SIZE() \ ++ (sizeof(struct sw_string_type) - sizeof(char[1])) ++ ++#pragma pack(push, 1) ++struct sw_key_value_payload { ++ pw_u16_t m_numKeyValuePairs; ++ char data[1]; ++}; ++#pragma pack(pop) ++#define SW_KEY_VALUE_PAYLOAD_HEADER_SIZE() \ ++ (sizeof(struct sw_key_value_payload) - sizeof(char[1])) ++ ++typedef enum sw_kernel_wakelock_type { ++ SW_WAKE_LOCK = 0, // A kernel wakelock was acquired ++ SW_WAKE_UNLOCK = 1, // A kernel wakelock was released ++ SW_WAKE_LOCK_TIMEOUT = ++ 2, // A kernel wakelock was acquired with a timeout ++ SW_WAKE_LOCK_INITIAL = 3, // A kernel wakelock was acquired before the ++ // collection started ++ SW_WAKE_UNLOCK_ALL = 4, // All previously held kernel wakelocks were ++ // released -- used in ACPI S3 notifications ++} sw_kernel_wakelock_type_t; ++ ++typedef enum sw_when_type { ++ SW_WHEN_TYPE_BEGIN = 0, /* Start snapshot */ ++ SW_WHEN_TYPE_POLL, ++ SW_WHEN_TYPE_NOTIFIER, ++ SW_WHEN_TYPE_TRACEPOINT, ++ SW_WHEN_TYPE_END, /* Stop snapshot */ ++ SW_WHEN_TYPE_NONE ++} sw_when_type_t; ++ ++/** ++ * trigger_bits is defined to use type pw_u8_t that makes only upto 8 types possible ++ */ ++#define SW_TRIGGER_BEGIN_MASK() (1U << SW_WHEN_TYPE_BEGIN) ++#define SW_TRIGGER_END_MASK() (1U << SW_WHEN_TYPE_END) ++#define SW_TRIGGER_POLL_MASK() (1U << SW_WHEN_TYPE_POLL) ++#define SW_TRIGGER_TRACEPOINT_MASK() (1U << SW_WHEN_TYPE_TRACEPOINT) ++#define SW_TRIGGER_NOTIFIER_MASK() (1U << SW_WHEN_TYPE_NOTIFIER) ++#define SW_GET_TRIGGER_MASK_VALUE(m) (1U << (m)) ++#define SW_TRIGGER_MASK_ALL() (0xFF) ++ ++enum sw_io_cmd { SW_IO_CMD_READ = 0, SW_IO_CMD_WRITE, SW_IO_CMD_MAX }; ++ ++#pragma pack(push, 1) ++struct sw_driver_msr_io_descriptor { ++ pw_u64_t address; ++ enum sw_msr_type type; ++}; ++#pragma pack(pop) ++ ++#pragma pack(push, 1) ++struct sw_driver_ipc_mmio_io_descriptor { ++ union { ++#ifdef SWW_MERGE ++#pragma warning(push) ++#pragma warning( \ ++ disable : 4201) // disable C4201: nonstandard extension used: nameless struct/union ++#endif ++ struct { ++ pw_u16_t command; ++ pw_u16_t sub_command; ++ }; ++#ifdef SWW_MERGE ++#pragma warning(pop) // enable C4201 ++#endif ++ union { ++ pw_u32_t ipc_command; // (sub_command << 12) | (command) ++ pw_u8_t is_gbe; // Used only for GBE MMIO ++ }; ++ }; ++ // TODO: add a section for 'ctrl_address' and 'ctrl_remapped_address' ++ union { ++ pw_u64_t data_address; // Will be "io_remapped" ++ pw_u64_t data_remapped_address; ++ }; ++}; ++#pragma pack(pop) ++ ++#pragma pack(push, 1) ++struct sw_driver_pci_io_descriptor { ++ pw_u32_t bus; ++ pw_u32_t device; ++ pw_u32_t function; ++#ifdef __QNX__ ++ union { ++ pw_u32_t offset; ++ pw_u32_t index; ++ }; ++#else /* __QNX__ */ ++ pw_u32_t offset; ++#endif /* __QNX__ */ ++}; ++#pragma pack(pop) ++ ++#pragma pack(push, 1) ++struct sw_driver_configdb_io_descriptor { ++ // pw_u32_t port; ++ // pw_u32_t offset; ++ pw_u32_t address; ++}; ++#pragma pack(pop) ++ ++#pragma pack(push, 1) ++struct sw_driver_trace_args_io_descriptor { ++ pw_u8_t num_args; // Number of valid entries in the 'args' array, below; 1 <= num_args <= 7 ++ pw_u8_t args[7]; // Max of 7 args can be recorded ++}; ++#pragma pack(pop) ++ ++#pragma pack(push, 1) ++/** ++ * struct - sw_driver_telem_io_descriptor - Telemetry Metric descriptor ++ * ++ * @id: (Client & Driver) Telemetry ID of the counter to read. ++ * @idx: (Driver only) index into telem array to read, or the row ++ * of the telem_indirect table to lookup the telem array index. ++ * @unit: Unit from which to collect: 0 = PMC, 1 = PUNIT ++ * Values come from the telemetry_unit enum. ++ * @scale_op: When there are multiple instances of a telem value (e.g. ++ * module C-states) the operation to use when scaling the CPU ID ++ * and adding it to the telemetry data ID. ++ * @scale_val: Amount to scale an ID (when scaling one.) ++ * ++ * Like all hardware mechanism descriptors, the client uses this to pass ++ * metric hardware properties (unit and ID) to the driver. The driver ++ * uses it to program the telemetry unit. ++ * ++ * Users can specify that IDs should be scaled based on the CPU id, using ++ * the equation: ID = ID_value + (cpuid ) ++ * where is one of +, *, /, or %, and scaling_val is an integer ++ * value. This gives you: ++ * Operation scale_op scale_val ++ * Single instance of an ID * 0 ++ * Sequentially increasing ++ * CPU-specific values * 1 ++ * Per module cpu-specific ++ * values (2 cores/module) / 2 ++ * Round Robin assignment % cpu_count ++ * ++ * Note that scaling_value of 0 implies that no scaling should be ++ * applied. While (*, 1) is equivalent to (+, 0), the scaling value of 0 ++ * is reserved/defined to mean "no scaling", and is disallowed. ++ * ++ * If you're really tight on space, you could always fold unit and ++ * scale_op into a single byte without a lot of pain or even effort. ++ */ ++struct sw_driver_telem_io_descriptor { ++ union { ++ pw_u16_t id; ++ pw_u8_t idx; ++ }; ++ pw_u8_t unit; ++ pw_u8_t scale_op; ++ pw_u16_t scale_val; ++}; ++#pragma pack(pop) ++enum telemetry_unit { TELEM_PUNIT = 0, TELEM_PMC, TELEM_UNIT_NONE }; ++#define TELEM_MAX_ID 0xFFFF /* Maximum value of a Telemtry event ID. */ ++#define TELEM_MAX_SCALE 0xFFFF /* Maximum ID scaling value. */ ++#define TELEM_OP_ADD '+' /* Addition operator */ ++#define TELEM_OP_MULT '*' /* Multiplication operator */ ++#define TELEM_OP_DIV '/' /* Division operator */ ++#define TELEM_OP_MOD '%' /* Modulus operator */ ++#define TELEM_OP_NONE 'X' /* No operator--Not a scaled ID */ ++ ++#pragma pack(push, 1) ++struct sw_driver_mailbox_io_descriptor { ++ union { ++ /* ++ * Will be "io_remapped" ++ */ ++ pw_u64_t interface_address; ++ pw_u64_t interface_remapped_address; ++ }; ++ union { ++ /* ++ * Will be "io_remapped" ++ */ ++ pw_u64_t data_address; ++ pw_u64_t data_remapped_address; ++ }; ++ pw_u64_t command; ++ pw_u64_t command_mask; ++ pw_u16_t run_busy_bit; ++ pw_u16_t is_msr_type; ++}; ++#pragma pack(pop) ++ ++#pragma pack(push, 1) ++struct sw_driver_pch_mailbox_io_descriptor { ++ union { ++ /* ++ * Will be "io_remapped" ++ */ ++ pw_u64_t mtpmc_address; ++ pw_u64_t mtpmc_remapped_address; ++ }; ++ union { ++ /* ++ * Will be "io_remapped" ++ */ ++ pw_u64_t msg_full_sts_address; ++ pw_u64_t msg_full_sts_remapped_address; ++ }; ++ union { ++ /* ++ * Will be "io_remapped" ++ */ ++ pw_u64_t mfpmc_address; ++ pw_u64_t mfpmc_remapped_address; ++ }; ++ pw_u32_t data_address; ++}; ++#pragma pack(pop) ++ ++#pragma pack(push, 1) ++typedef struct sw_driver_io_descriptor { ++ pw_u16_t collection_type; ++ // TODO: specify READ/WRITE ++ pw_s16_t collection_command; // One of 'enum sw_io_cmd' ++ pw_u16_t counter_size_in_bytes; // The number of bytes to READ or WRITE ++ union { ++ struct sw_driver_msr_io_descriptor msr_descriptor; ++ struct sw_driver_ipc_mmio_io_descriptor ipc_descriptor; ++ struct sw_driver_ipc_mmio_io_descriptor mmio_descriptor; ++ struct sw_driver_pci_io_descriptor pci_descriptor; ++ struct sw_driver_configdb_io_descriptor configdb_descriptor; ++ struct sw_driver_trace_args_io_descriptor trace_args_descriptor; ++ struct sw_driver_telem_io_descriptor telem_descriptor; ++ struct sw_driver_pch_mailbox_io_descriptor ++ pch_mailbox_descriptor; ++ struct sw_driver_mailbox_io_descriptor mailbox_descriptor; ++ }; ++ pw_u64_t write_value; // The value to WRITE ++} sw_driver_io_descriptor_t; ++#pragma pack(pop) ++ ++/** ++ * sw_driver_interface_info is used to map data collected by kernel-level ++ * collectors to metrics. The client passes one of these structs to the ++ * driver for each metric the driver should collect. The driver tags the ++ * collected data (messages) using info from this struct. When processing ++ * data from the driver, the client uses its copy of this data to ++ * identify the plugin, metric, and message IDs of each message. ++ */ ++#pragma pack(push, 1) ++struct sw_driver_interface_info { ++ pw_u64_t tracepoint_id_mask; ++ pw_u64_t notifier_id_mask; ++ pw_s16_t cpu_mask; // On which CPU(s) should the driver read the data? ++ // Currently: -2 ==> read on ALL CPUs, ++ // -1 ==> read on ANY CPU, ++ // >= 0 ==> the specific CPU to read on ++ pw_s16_t plugin_id; // Metric Plugin SID ++ pw_s16_t metric_id; // Domain-specific ID assigned by each Metric Plugin ++ pw_s16_t msg_id; // Msg ID retrieved from the SoC Watch config file ++ pw_u16_t num_io_descriptors; // Number of descriptors in the array, below. ++ pw_u8_t trigger_bits; // Mask of 'when bits' to fire this collector. ++ pw_u16_t sampling_interval_msec; // Sampling interval, in msecs ++ pw_u8_t descriptors[1]; // Array of sw_driver_io_descriptor structs. ++}; ++#pragma pack(pop) ++ ++#define SW_DRIVER_INTERFACE_INFO_HEADER_SIZE() \ ++ (sizeof(struct sw_driver_interface_info) - sizeof(pw_u8_t[1])) ++ ++#pragma pack(push, 1) ++struct sw_driver_interface_msg { ++ pw_u16_t num_infos; // Number of 'sw_driver_interface_info' structs contained within the 'infos' variable, below ++ pw_u16_t min_polling_interval_msecs; // Min time to wait before polling; used exclusively ++ // with the low overhead, context-switch based ++ // polling mode ++ // pw_u16_t infos_size_bytes; // Size of data inlined within the 'infos' variable, below ++ pw_u8_t infos[1]; ++}; ++#pragma pack(pop) ++#define SW_DRIVER_INTERFACE_MSG_HEADER_SIZE() \ ++ (sizeof(struct sw_driver_interface_msg) - sizeof(pw_u8_t[1])) ++ ++typedef enum sw_name_id_type { ++ SW_NAME_TYPE_TRACEPOINT, ++ SW_NAME_TYPE_NOTIFIER, ++ SW_NAME_TYPE_COLLECTOR, ++ SW_NAME_TYPE_MAX, ++} sw_name_id_type_t; ++ ++#pragma pack(push, 1) ++struct sw_name_id_pair { ++ pw_u16_t id; ++ pw_u16_t type; // One of 'sw_name_id_type' ++ struct sw_string_type name; ++}; ++#pragma pack(pop) ++#define SW_NAME_ID_HEADER_SIZE() \ ++ (sizeof(struct sw_name_id_pair) - sizeof(struct sw_string_type)) ++ ++#pragma pack(push, 1) ++struct sw_name_info_msg { ++ pw_u16_t num_name_id_pairs; ++ pw_u16_t payload_len; ++ pw_u8_t pairs[1]; ++}; ++#pragma pack(pop) ++ ++/** ++ * This is the basic data structure for passing data collected by the ++ * kernel-level collectors up to the client. In addition to the data ++ * (payload), it contains the minimum metadata required for the client ++ * to identify the source of that data. ++ */ ++#pragma pack(push, 1) ++typedef struct sw_driver_msg { ++ pw_u64_t tsc; ++ pw_u16_t cpuidx; ++ pw_u8_t plugin_id; // Cannot have more than 256 plugins ++ pw_u8_t metric_id; // Each plugin cannot handle more than 256 metrics ++ pw_u8_t msg_id; // Each metric cannot have more than 256 components ++ pw_u16_t payload_len; ++ // pw_u64_t p_payload; // Ptr to payload ++ union { ++ pw_u64_t __dummy; // Ensure size of struct is consistent on x86, x64 ++ char *p_payload; // Ptr to payload (collected data values). ++ }; ++} sw_driver_msg_t; ++#pragma pack(pop) ++#define SW_DRIVER_MSG_HEADER_SIZE() \ ++ (sizeof(struct sw_driver_msg) - sizeof(pw_u64_t)) ++ ++typedef enum sw_driver_collection_cmd { ++ SW_DRIVER_START_COLLECTION = 1, ++ SW_DRIVER_STOP_COLLECTION = 2, ++ SW_DRIVER_PAUSE_COLLECTION = 3, ++ SW_DRIVER_RESUME_COLLECTION = 4, ++ SW_DRIVER_CANCEL_COLLECTION = 5, ++} sw_driver_collection_cmd_t; ++ ++#pragma pack(push, 1) ++struct sw_driver_version_info { ++ pw_u16_t major; ++ pw_u16_t minor; ++ pw_u16_t other; ++}; ++#pragma pack(pop) ++ ++enum cpu_action { ++ SW_CPU_ACTION_NONE, ++ SW_CPU_ACTION_OFFLINE, ++ SW_CPU_ACTION_ONLINE_PREPARE, ++ SW_CPU_ACTION_ONLINE, ++ SW_CPU_ACTION_MAX, ++}; ++#pragma pack(push, 1) ++struct sw_driver_topology_change { ++ pw_u64_t timestamp; // timestamp ++ enum cpu_action type; // One of 'enum cpu_action' ++ pw_u16_t cpu; // logical cpu ++ pw_u16_t core; // core id ++ pw_u16_t pkg; // pkg/physical id ++}; ++struct sw_driver_topology_msg { ++ pw_u16_t num_entries; ++ pw_u8_t topology_entries[1]; ++}; ++#pragma pack(pop) ++ ++/** ++ * An enumeration of possible pm states that ++ * SoC Watch is interested in ++ */ ++enum sw_pm_action { ++ SW_PM_ACTION_NONE, ++ SW_PM_ACTION_SUSPEND_ENTER, ++ SW_PM_ACTION_SUSPEND_EXIT, ++ SW_PM_ACTION_HIBERNATE_ENTER, ++ SW_PM_ACTION_HIBERNATE_EXIT, ++ SW_PM_ACTION_MAX, ++}; ++ ++/** ++ * An enumeration of possible actions that trigger ++ * the power notifier ++ */ ++enum sw_pm_mode { ++ SW_PM_MODE_FIRMWARE, ++ SW_PM_MODE_NONE, ++}; ++ ++#define SW_PM_VALUE(mode, action) ((mode) << 16 | (action)) ++ ++/* ++ * Wrapper for ioctl arguments. ++ * EVERY ioctl MUST use this struct! ++ */ ++#pragma pack(push, 1) ++struct sw_driver_ioctl_arg { ++ pw_s32_t in_len; ++ pw_s32_t out_len; ++ // pw_u64_t p_in_arg; // Pointer to input arg ++ // pw_u64_t p_out_arg; // Pointer to output arg ++ char *in_arg; ++ char *out_arg; ++}; ++#pragma pack(pop) ++ ++#pragma pack(push, 1) ++typedef struct sw_driver_msg_interval { ++ pw_u8_t plugin_id; // Cannot have more than 256 plugins ++ pw_u8_t metric_id; // Each plugin cannot handle more than 256 metrics ++ pw_u8_t msg_id; // Each metric cannot have more than 256 components ++ pw_u16_t interval; // collection interval ++} sw_driver_msg_interval_t; ++#pragma pack(pop) ++ ++#endif // __SW_STRUCTS_H__ +diff --git a/drivers/platform/x86/socwatch/inc/sw_telem.h b/drivers/platform/x86/socwatch/inc/sw_telem.h +new file mode 100644 +index 000000000000..52e5119b557e +--- /dev/null ++++ b/drivers/platform/x86/socwatch/inc/sw_telem.h +@@ -0,0 +1,74 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++ ++#ifndef _SW_TELEM_H_ ++#define _SW_TELEM_H_ 1 ++ ++#include "sw_structs.h" // sw_driver_io_descriptor ++#include "sw_types.h" // u8 and other types ++ ++int sw_telem_init_func(struct sw_driver_io_descriptor *descriptor); ++void sw_read_telem_info(char *dst_vals, int cpu, ++ const struct sw_driver_io_descriptor *descriptor, ++ u16 counter_size_in_bytes); ++void sw_write_telem_info(char *dst_vals, int cpu, ++ const struct sw_driver_io_descriptor *descriptor, ++ u16 counter_size_in_bytes); ++int sw_reset_telem(const struct sw_driver_io_descriptor *descriptor); ++bool sw_telem_available(void); ++bool sw_telem_post_config(void); ++ ++#endif /* SW_TELEM_H */ +diff --git a/drivers/platform/x86/socwatch/inc/sw_trace_notifier_provider.h b/drivers/platform/x86/socwatch/inc/sw_trace_notifier_provider.h +new file mode 100644 +index 000000000000..3834a16d7ae8 +--- /dev/null ++++ b/drivers/platform/x86/socwatch/inc/sw_trace_notifier_provider.h +@@ -0,0 +1,82 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++#ifndef __SW_TRACE_NOTIFIER_PROVIDER_H__ ++#define __SW_TRACE_NOTIFIER_PROVIDER_H__ ++ ++u64 sw_timestamp(void); ++/* ++ * Some architectures and OS versions require a "discovery" ++ * phase for tracepoints and/or notifiers. Allow for that here. ++ */ ++int sw_extract_trace_notifier_providers(void); ++/* ++ * Reset trace/notifier providers at the end ++ * of a collection. ++ */ ++void sw_reset_trace_notifier_providers(void); ++/* ++ * Print statistics on trace/notifier provider overheads. ++ */ ++void sw_print_trace_notifier_provider_overheads(void); ++/* ++ * Add all trace/notifier providers. ++ */ ++int sw_add_trace_notifier_providers(void); ++/* ++ * Remove previously added providers. ++ */ ++void sw_remove_trace_notifier_providers(void); ++#endif // __SW_TRACE_NOTIFIER_PROVIDER_H__ +diff --git a/drivers/platform/x86/socwatch/inc/sw_tracepoint_handlers.h b/drivers/platform/x86/socwatch/inc/sw_tracepoint_handlers.h +new file mode 100644 +index 000000000000..db8294a9a137 +--- /dev/null ++++ b/drivers/platform/x86/socwatch/inc/sw_tracepoint_handlers.h +@@ -0,0 +1,142 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++#ifndef __SW_TRACEPOINT_HANDLERS_H__ ++#define __SW_TRACEPOINT_HANDLERS_H__ ++ ++#include "sw_internal.h" ++ ++extern pw_u16_t sw_min_polling_interval_msecs; ++ ++enum sw_trace_data_type { ++ SW_TRACE_COLLECTOR_TRACEPOINT, ++ SW_TRACE_COLLECTOR_NOTIFIER ++}; ++ ++struct sw_trace_notifier_name { ++ const char * ++ kernel_name; // The tracepoint name; used by the kernel to identify tracepoints ++ const char * ++ abstract_name; // An abstract name used by plugins to specify tracepoints-of-interest; shared with Ring-3 ++}; ++ ++typedef struct sw_trace_notifier_data sw_trace_notifier_data_t; ++typedef int (*sw_trace_notifier_register_func)( ++ struct sw_trace_notifier_data *node); ++typedef int (*sw_trace_notifier_unregister_func)( ++ struct sw_trace_notifier_data *node); ++ ++struct sw_trace_notifier_data { ++ enum sw_trace_data_type type; // Tracepoint or Notifier ++ const struct sw_trace_notifier_name *name; // Tracepoint name(s) ++ sw_trace_notifier_register_func probe_register; // probe register function ++ sw_trace_notifier_unregister_func probe_unregister; // probe unregister function ++ struct tracepoint *tp; ++ bool always_register; // Set to TRUE if this tracepoint/notifier must ALWAYS be registered, regardless ++ // of whether the user has specified anything to collect ++ bool was_registered; ++ SW_DEFINE_LIST_HEAD( ++ list, ++ sw_collector_data); // List of 'sw_collector_data' instances for this tracepoint or notifier ++}; ++ ++struct sw_topology_node { ++ struct sw_driver_topology_change change; ++ ++ SW_LIST_ENTRY(list, sw_topology_node); ++}; ++SW_DECLARE_LIST_HEAD( ++ sw_topology_list, ++ sw_topology_node); // List of entries tracking changes in CPU topology ++extern size_t sw_num_topology_entries; // Size of the 'sw_topology_list' ++ ++int sw_extract_tracepoints(void); ++int sw_register_trace_notifiers(void); ++int sw_unregister_trace_notifiers(void); ++ ++/* ++ * Register a single TRACE/NOTIFY provider. ++ */ ++int sw_register_trace_notify_provider(struct sw_trace_notifier_data *tnode); ++/* ++ * Add all TRACE/NOTIFY providers. ++ */ ++int sw_add_trace_notify(void); ++void sw_remove_trace_notify(void); ++ ++void sw_reset_trace_notifier_lists(void); ++ ++void sw_print_trace_notifier_overheads(void); ++ ++int sw_for_each_tracepoint_node(int (*func)(struct sw_trace_notifier_data *node, ++ void *priv), ++ void *priv, bool return_on_error); ++int sw_for_each_notifier_node(int (*func)(struct sw_trace_notifier_data *node, ++ void *priv), ++ void *priv, bool return_on_error); ++ ++int sw_get_trace_notifier_id(struct sw_trace_notifier_data *node); ++ ++const char * ++sw_get_trace_notifier_kernel_name(struct sw_trace_notifier_data *node); ++const char * ++sw_get_trace_notifier_abstract_name(struct sw_trace_notifier_data *node); ++ ++/* ++ * Clear out the topology list. ++ */ ++void sw_clear_topology_list(void); ++ ++#endif // __SW_TRACEPOINT_HANDLERS_H__ +diff --git a/drivers/platform/x86/socwatch/inc/sw_types.h b/drivers/platform/x86/socwatch/inc/sw_types.h +new file mode 100644 +index 000000000000..914ce9806965 +--- /dev/null ++++ b/drivers/platform/x86/socwatch/inc/sw_types.h +@@ -0,0 +1,152 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++ ++#ifndef _PW_TYPES_H_ ++#define _PW_TYPES_H_ ++ ++#if defined(__linux__) || defined(__APPLE__) || defined(__QNX__) ++ ++#ifndef __KERNEL__ ++/* ++ * Called from Ring-3. ++ */ ++#include // Grab 'uint64_t' etc. ++#include // Grab 'pid_t' ++/* ++ * UNSIGNED types... ++ */ ++typedef uint8_t u8; ++typedef uint16_t u16; ++typedef uint32_t u32; ++typedef uint64_t u64; ++/* ++ * SIGNED types... ++ */ ++typedef int8_t s8; ++typedef int16_t s16; ++typedef int32_t s32; ++typedef int64_t s64; ++ ++#else // __KERNEL__ ++#if !defined(__APPLE__) ++#include ++#else // __APPLE__ ++#include ++#include // Grab 'uint64_t' etc. ++ ++typedef uint8_t u8; ++typedef uint16_t u16; ++typedef uint32_t u32; ++typedef uint64_t u64; ++/* ++* SIGNED types... ++*/ ++typedef int8_t s8; ++typedef int16_t s16; ++typedef int32_t s32; ++typedef int64_t s64; ++#endif // __APPLE__ ++#endif // __KERNEL__ ++ ++#elif defined(_WIN32) ++typedef __int32 int32_t; ++typedef unsigned __int32 uint32_t; ++typedef __int64 int64_t; ++typedef unsigned __int64 uint64_t; ++ ++/* ++ * UNSIGNED types... ++ */ ++typedef unsigned char u8; ++typedef unsigned short u16; ++typedef unsigned int u32; ++typedef unsigned long long u64; ++ ++/* ++ * SIGNED types... ++ */ ++typedef signed char s8; ++typedef signed short s16; ++typedef signed int s32; ++typedef signed long long s64; ++typedef s32 pid_t; ++typedef s32 ssize_t; ++ ++#endif // _WIN32 ++ ++/* ************************************ ++ * Common to both operating systems. ++ * ************************************ ++ */ ++/* ++ * UNSIGNED types... ++ */ ++typedef u8 pw_u8_t; ++typedef u16 pw_u16_t; ++typedef u32 pw_u32_t; ++typedef u64 pw_u64_t; ++ ++/* ++ * SIGNED types... ++ */ ++typedef s8 pw_s8_t; ++typedef s16 pw_s16_t; ++typedef s32 pw_s32_t; ++typedef s64 pw_s64_t; ++ ++typedef pid_t pw_pid_t; ++ ++#endif // _PW_TYPES_H_ +diff --git a/drivers/platform/x86/socwatch/inc/sw_version.h b/drivers/platform/x86/socwatch/inc/sw_version.h +new file mode 100644 +index 000000000000..5797edffa64d +--- /dev/null ++++ b/drivers/platform/x86/socwatch/inc/sw_version.h +@@ -0,0 +1,74 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++ ++#ifndef __SW_VERSION_H__ ++#define __SW_VERSION_H__ 1 ++ ++/* ++ * SOCWatch driver version ++ */ ++#define SW_DRIVER_VERSION_MAJOR 2 ++#define SW_DRIVER_VERSION_MINOR 6 ++#define SW_DRIVER_VERSION_OTHER 2 ++ ++/* ++ * Every SOC Watch userspace component shares the same version number. ++ */ ++#define SOCWATCH_VERSION_MAJOR 2 ++#define SOCWATCH_VERSION_MINOR 8 ++#define SOCWATCH_VERSION_OTHER 0 ++ ++#endif // __SW_VERSION_H__ +diff --git a/drivers/platform/x86/socwatch/sw_collector.c b/drivers/platform/x86/socwatch/sw_collector.c +new file mode 100644 +index 000000000000..a6c8a9cec48b +--- /dev/null ++++ b/drivers/platform/x86/socwatch/sw_collector.c +@@ -0,0 +1,706 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of condiions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++#include "sw_internal.h" ++#include "sw_structs.h" ++#include "sw_collector.h" ++#include "sw_kernel_defines.h" ++#include "sw_mem.h" ++#include "sw_types.h" ++#include "sw_hardware_io.h" ++#include "sw_output_buffer.h" ++ ++/* ------------------------------------------------- ++ * Local function declarations. ++ * ------------------------------------------------- ++ */ ++void sw_free_driver_interface_info_i(struct sw_driver_interface_info *info); ++const struct sw_hw_ops **sw_alloc_ops_i(pw_u16_t num_io_descriptors); ++void sw_free_ops_i(const struct sw_hw_ops **ops); ++struct sw_driver_interface_info * ++sw_copy_driver_interface_info_i(const struct sw_driver_interface_info *info); ++int sw_init_driver_interface_info_i(struct sw_driver_interface_info *info); ++int sw_reset_driver_interface_info_i(struct sw_driver_interface_info *info); ++int sw_init_ops_i(const struct sw_hw_ops **ops, ++ const struct sw_driver_interface_info *info); ++sw_driver_msg_t * ++sw_alloc_collector_msg_i(const struct sw_driver_interface_info *info, ++ size_t per_msg_payload_size); ++void sw_free_collector_msg_i(sw_driver_msg_t *msg); ++size_t sw_get_payload_size_i(const struct sw_driver_interface_info *info); ++void sw_handle_per_cpu_msg_i(void *info, enum sw_wakeup_action action); ++/* ------------------------------------------------- ++ * Variables. ++ * ------------------------------------------------- ++ */ ++const static struct sw_hw_ops *s_hw_ops; ++/* ------------------------------------------------- ++ * Function definitions. ++ * ------------------------------------------------- ++ */ ++/* ++ * Driver interface info functions. ++ */ ++ ++/** ++ * sw_add_driver_info() - Add a collector node to the list called at this ++ * "when type". ++ * @head: The collector node list to add the new node to. ++ * @info: Driver information to add to the list. ++ * ++ * This function allocates and links in a "collector node" for each ++ * collector based on the collector info in the info parameter. ++ * The function allocates the new node, and links it to a local copy ++ * of the passed-in driver interface info. If the collector has an ++ * init function among its operations, it iterates through the ++ * descriptors in info, passing each one to the init function. ++ * ++ * Finally, it allocates and initializes the "collector message" which ++ * buffers a data sample that this collector gathers during the run. ++ * ++ * Returns: -PW_ERROR on failure, PW_SUCCESS on success. ++ */ ++int sw_add_driver_info(void *list_head, ++ const struct sw_driver_interface_info *info) ++{ ++ SW_LIST_HEAD_VAR(sw_collector_data) * head = list_head; ++ struct sw_collector_data *node = sw_alloc_collector_node(); ++ ++ if (!node) { ++ pw_pr_error("ERROR allocating collector node!\n"); ++ return -PW_ERROR; ++ } ++ ++ node->info = sw_copy_driver_interface_info_i(info); ++ if (!node->info) { ++ pw_pr_error( ++ "ERROR allocating or copying driver_interface_info!\n"); ++ sw_free_collector_node(node); ++ return -PW_ERROR; ++ } ++ /* ++ * Initialize the collectors in the node's descriptors. ++ */ ++ if (sw_init_driver_interface_info_i(node->info)) { ++ pw_pr_error( ++ "ERROR initializing a driver_interface_info node!\n"); ++ sw_free_collector_node(node); ++ return -PW_ERROR; ++ } ++ /* ++ * Allocate the ops array. We do this one time as an optimization ++ * (we could always just repeatedly call 'sw_get_hw_ops_for()' ++ * during the collection but we want to avoid that overhead) ++ */ ++ node->ops = sw_alloc_ops_i(info->num_io_descriptors); ++ if (!node->ops || sw_init_ops_i(node->ops, info)) { ++ pw_pr_error("ERROR initializing the ops array!\n"); ++ sw_free_collector_node(node); ++ return -PW_ERROR; ++ } ++ /* ++ * Allocate and initialize the "collector message". ++ */ ++ node->per_msg_payload_size = sw_get_payload_size_i(info); ++ pw_pr_debug("Debug: Per msg payload size = %u\n", ++ (unsigned int)node->per_msg_payload_size); ++ node->msg = sw_alloc_collector_msg_i(info, node->per_msg_payload_size); ++ if (!node->msg) { ++ pw_pr_error("ERROR allocating space for a collector msg!\n"); ++ sw_free_collector_node(node); ++ return -PW_ERROR; ++ } ++ pw_pr_debug("NODE = %p, NODE->MSG = %p\n", node, node->msg); ++ cpumask_clear(&node->cpumask); ++ { ++ /* ++ * For now, use following protocol: ++ * cpu_mask == -2 ==> Collect on ALL CPUs ++ * cpu_mask == -1 ==> Collect on ANY CPU ++ * cpu_mask >= 0 ==> Collect on a specific CPU ++ */ ++ if (node->info->cpu_mask >= 0) { ++ /* ++ * Collect data on 'node->info->cpu_mask' ++ */ ++ cpumask_set_cpu(node->info->cpu_mask, &node->cpumask); ++ pw_pr_debug("OK: set CPU = %d\n", node->info->cpu_mask); ++ } else if (node->info->cpu_mask == -1) { ++ /* ++ * Collect data on ANY CPU. Leave empty as a flag ++ * to signify user wishes to collect data on 'ANY' cpu. ++ */ ++ pw_pr_debug("OK: set ANY CPU\n"); ++ } else { ++ /* ++ * Collect data on ALL cpus. ++ */ ++ cpumask_copy(&node->cpumask, cpu_present_mask); ++ pw_pr_debug("OK: set ALL CPUs\n"); ++ } ++ } ++ SW_LIST_ADD(head, node, list); ++ return PW_SUCCESS; ++} ++ ++const struct sw_hw_ops **sw_alloc_ops_i(pw_u16_t num_io_descriptors) ++{ ++ size_t size = num_io_descriptors * sizeof(struct sw_hw_ops *); ++ const struct sw_hw_ops **ops = sw_kmalloc(size, GFP_KERNEL); ++ ++ if (ops) { ++ memset(ops, 0, size); ++ } ++ return ops; ++} ++ ++void sw_free_driver_interface_info_i(struct sw_driver_interface_info *info) ++{ ++ if (info) { ++ sw_kfree(info); ++ } ++} ++ ++void sw_free_ops_i(const struct sw_hw_ops **ops) ++{ ++ if (ops) { ++ sw_kfree(ops); ++ } ++} ++ ++/** ++ * sw_copy_driver_interface_info_i - Allocate and copy the passed-in "info". ++ * ++ * @info: Information about the metric and collection properties ++ * ++ * Returns: a pointer to the newly allocated sw_driver_interface_info, ++ * which is a copy of the version passed in via the info pointer. ++ */ ++struct sw_driver_interface_info * ++sw_copy_driver_interface_info_i(const struct sw_driver_interface_info *info) ++{ ++ size_t size; ++ struct sw_driver_interface_info *node = NULL; ++ ++ if (!info) { ++ pw_pr_error("ERROR: NULL sw_driver_interface_info in alloc!\n"); ++ return node; ++ } ++ ++ size = SW_DRIVER_INTERFACE_INFO_HEADER_SIZE() + ++ (info->num_io_descriptors * ++ sizeof(struct sw_driver_io_descriptor)); ++ node = (struct sw_driver_interface_info *)sw_kmalloc(size, GFP_KERNEL); ++ if (!node) { ++ pw_pr_error("ERROR allocating driver interface info!\n"); ++ return node; ++ } ++ memcpy((char *)node, (const char *)info, size); ++ ++ /* ++ * Do debug dump. ++ */ ++ pw_pr_debug("DRIVER info has plugin_ID = %d, metric_ID = %d, " ++ "msg_ID = %d\n", ++ node->plugin_id, node->metric_id, node->msg_id); ++ ++ return node; ++} ++int sw_init_driver_interface_info_i(struct sw_driver_interface_info *info) ++{ ++ /* ++ * Do any initialization here. ++ * For now, only IPC/MMIO descriptors need to be initialized. ++ */ ++ int i = 0; ++ struct sw_driver_io_descriptor *descriptor = NULL; ++ ++ if (!info) { ++ pw_pr_error("ERROR: no info!\n"); ++ return -PW_ERROR; ++ } ++ for (i = 0, ++ descriptor = (struct sw_driver_io_descriptor *)info->descriptors; ++ i < info->num_io_descriptors; ++i, ++descriptor) { ++ if (sw_init_driver_io_descriptor(descriptor)) { ++ return -PW_ERROR; ++ } ++ } ++ return PW_SUCCESS; ++} ++ ++int sw_reset_driver_interface_info_i(struct sw_driver_interface_info *info) ++{ ++ /* ++ * Do any finalization here. ++ * For now, only IPC/MMIO descriptors need to be finalized. ++ */ ++ int i = 0; ++ struct sw_driver_io_descriptor *descriptor = NULL; ++ ++ if (!info) { ++ pw_pr_error("ERROR: no info!\n"); ++ return -PW_ERROR; ++ } ++ for (i = 0, ++ descriptor = (struct sw_driver_io_descriptor *)info->descriptors; ++ i < info->num_io_descriptors; ++i, ++descriptor) { ++ if (sw_reset_driver_io_descriptor(descriptor)) { ++ return -PW_ERROR; ++ } ++ } ++ return PW_SUCCESS; ++} ++int sw_init_ops_i(const struct sw_hw_ops **ops, ++ const struct sw_driver_interface_info *info) ++{ ++ int i = 0; ++ struct sw_driver_io_descriptor *descriptor = NULL; ++ ++ if (!ops || !info) { ++ return -PW_ERROR; ++ } ++ for (i = 0, ++ descriptor = (struct sw_driver_io_descriptor *)info->descriptors; ++ i < info->num_io_descriptors; ++i, ++descriptor) { ++ ops[i] = sw_get_hw_ops_for(descriptor->collection_type); ++ if (ops[i] == NULL) { ++ return -PW_ERROR; ++ } ++ } ++ return PW_SUCCESS; ++} ++ ++/* ++ * If this descriptor's collector has an init function, call it passing in ++ * this descriptor. That allows the collector to perform any initialization ++ * or registration specific to this metric. ++ */ ++int sw_init_driver_io_descriptor(struct sw_driver_io_descriptor *descriptor) ++{ ++ sw_io_desc_init_func_t init_func = NULL; ++ const struct sw_hw_ops *ops = ++ sw_get_hw_ops_for(descriptor->collection_type); ++ if (ops == NULL) { ++ pw_pr_error("NULL ops found in init_driver_io_desc: type %d\n", ++ descriptor->collection_type); ++ return -PW_ERROR; ++ } ++ init_func = ops->init; ++ ++ if (init_func) { ++ int retval = (*init_func)(descriptor); ++ ++ if (retval) { ++ pw_pr_error("(*init) return value for type %d: %d\n", ++ descriptor->collection_type, retval); ++ } ++ return retval; ++ } ++ return PW_SUCCESS; ++} ++ ++/* ++ * If this descriptor's collector has a finalize function, call it passing in ++ * this descriptor. This allows the collector to perform any finalization ++ * specific to this metric. ++ */ ++int sw_reset_driver_io_descriptor(struct sw_driver_io_descriptor *descriptor) ++{ ++ sw_io_desc_reset_func_t reset_func = NULL; ++ const struct sw_hw_ops *ops = ++ sw_get_hw_ops_for(descriptor->collection_type); ++ if (ops == NULL) { ++ pw_pr_error("NULL ops found in reset_driver_io_desc: type %d\n", ++ descriptor->collection_type); ++ return -PW_ERROR; ++ } ++ pw_pr_debug("calling reset on descriptor of type %d\n", ++ descriptor->collection_type); ++ reset_func = ops->reset; ++ ++ if (reset_func) { ++ int retval = (*reset_func)(descriptor); ++ ++ if (retval) { ++ pw_pr_error("(*reset) return value for type %d: %d\n", ++ descriptor->collection_type, retval); ++ } ++ return retval; ++ } ++ return PW_SUCCESS; ++} ++ ++int sw_handle_driver_io_descriptor( ++ char *dst_vals, int cpu, ++ const struct sw_driver_io_descriptor *descriptor, ++ const struct sw_hw_ops *hw_ops) ++{ ++ typedef void (*sw_hardware_io_func_t)( ++ char *, int, const struct sw_driver_io_descriptor *, u16); ++ sw_hardware_io_func_t hardware_io_func = NULL; ++ ++ if (descriptor->collection_command < SW_IO_CMD_READ || ++ descriptor->collection_command > SW_IO_CMD_WRITE) { ++ return -PW_ERROR; ++ } ++ switch (descriptor->collection_command) { ++ case SW_IO_CMD_READ: ++ hardware_io_func = hw_ops->read; ++ break; ++ case SW_IO_CMD_WRITE: ++ hardware_io_func = hw_ops->write; ++ break; ++ default: ++ break; ++ } ++ if (hardware_io_func) { ++ (*hardware_io_func)(dst_vals, cpu, descriptor, ++ descriptor->counter_size_in_bytes); ++ } else { ++ pw_pr_debug( ++ "NO ops to satisfy %u operation for collection type %u!\n", ++ descriptor->collection_command, ++ descriptor->collection_type); ++ } ++ return PW_SUCCESS; ++} ++ ++sw_driver_msg_t * ++sw_alloc_collector_msg_i(const struct sw_driver_interface_info *info, ++ size_t per_msg_payload_size) ++{ ++ size_t per_msg_size = 0, total_size = 0; ++ sw_driver_msg_t *msg = NULL; ++ ++ if (!info) { ++ return NULL; ++ } ++ per_msg_size = sizeof(struct sw_driver_msg) + per_msg_payload_size; ++ total_size = per_msg_size * num_possible_cpus(); ++ msg = (sw_driver_msg_t *)sw_kmalloc(total_size, GFP_KERNEL); ++ if (msg) { ++ int cpu = -1; ++ ++ memset(msg, 0, total_size); ++ for_each_possible_cpu(cpu) { ++ sw_driver_msg_t *__msg = GET_MSG_SLOT_FOR_CPU( ++ msg, cpu, per_msg_payload_size); ++ char *__payload = ++ (char *)__msg + sizeof(struct sw_driver_msg); ++ __msg->cpuidx = (pw_u16_t)cpu; ++ __msg->plugin_id = (pw_u8_t)info->plugin_id; ++ __msg->metric_id = (pw_u8_t)info->metric_id; ++ __msg->msg_id = (pw_u8_t)info->msg_id; ++ __msg->payload_len = per_msg_payload_size; ++ __msg->p_payload = __payload; ++ pw_pr_debug( ++ "[%d]: per_msg_payload_size = %zx, msg = %p, payload = %p\n", ++ cpu, per_msg_payload_size, __msg, __payload); ++ } ++ } ++ return msg; ++} ++ ++void sw_free_collector_msg_i(sw_driver_msg_t *msg) ++{ ++ if (msg) { ++ sw_kfree(msg); ++ } ++} ++ ++size_t sw_get_payload_size_i(const struct sw_driver_interface_info *info) ++{ ++ size_t size = 0; ++ int i = 0; ++ ++ if (info) { ++ for (i = 0; i < info->num_io_descriptors; ++ size += ++ ((struct sw_driver_io_descriptor *)info->descriptors)[i] ++ .counter_size_in_bytes, ++ ++i) ++ ; ++ } ++ return size; ++} ++ ++void sw_handle_per_cpu_msg_i(void *info, enum sw_wakeup_action action) ++{ ++ /* ++ * Basic algo: ++ * For each descriptor in 'node->info->descriptors'; do: ++ * 1. Perform H/W read; use 'descriptor->collection_type' ++ * to determine type of read; use 'descriptor->counter_size_in_bytes' ++ * for read size. Use msg->p_payload[dst_idx] as dst address ++ * 2. Increment dst idx by 'descriptor->counter_size_in_bytes' ++ */ ++ struct sw_collector_data *node = (struct sw_collector_data *)info; ++ int cpu = RAW_CPU(); ++ u16 num_descriptors = node->info->num_io_descriptors, i = 0; ++ struct sw_driver_io_descriptor *descriptors = ++ (struct sw_driver_io_descriptor *)node->info->descriptors; ++ sw_driver_msg_t *msg = GET_MSG_SLOT_FOR_CPU(node->msg, cpu, ++ node->per_msg_payload_size); ++ char *dst_vals = msg->p_payload; ++ const struct sw_hw_ops **ops = node->ops; ++ bool wasAnyWrite = false; ++ ++ // msg TSC assigned when msg is written to buffer ++ msg->cpuidx = cpu; ++ ++ for (i = 0; i < num_descriptors; ++i, ++ dst_vals += descriptors->counter_size_in_bytes, ++descriptors) { ++ if (unlikely(ops[i] == NULL)) { ++ pw_pr_debug("NULL OPS!\n"); ++ continue; ++ } ++ if (descriptors->collection_command == SW_IO_CMD_WRITE) { ++ wasAnyWrite = true; ++ } ++ if (sw_handle_driver_io_descriptor(dst_vals, cpu, descriptors, ++ ops[i])) { ++ pw_pr_error("ERROR reading descriptor with type %d\n", ++ descriptors->collection_type); ++ } ++ } ++ ++ /* ++ * We produce messages only on READs. Note that SWA prohibits ++ * messages that contain both READ and WRITE descriptors, so it ++ * is enough to check if there was ANY WRITE descriptor in this ++ * message. ++ */ ++ if (likely(wasAnyWrite == false)) { ++ if (sw_produce_generic_msg(msg, action)) { ++ pw_pr_warn("WARNING: could NOT produce message!\n"); ++ } ++ } ++ ++ return; ++} ++ ++/* ++ * Collector list and node functions. ++ */ ++struct sw_collector_data *sw_alloc_collector_node(void) ++{ ++ struct sw_collector_data *node = (struct sw_collector_data *)sw_kmalloc( ++ sizeof(struct sw_collector_data), GFP_KERNEL); ++ if (node) { ++ node->per_msg_payload_size = 0x0; ++ node->last_update_jiffies = 0x0; ++ node->info = NULL; ++ node->ops = NULL; ++ node->msg = NULL; ++ SW_LIST_ENTRY_INIT(node, list); ++ } ++ return node; ++} ++ ++void sw_free_collector_node(struct sw_collector_data *node) ++{ ++ if (!node) { ++ return; ++ } ++ if (node->info) { ++ sw_reset_driver_interface_info_i(node->info); ++ sw_free_driver_interface_info_i(node->info); ++ node->info = NULL; ++ } ++ if (node->ops) { ++ sw_free_ops_i(node->ops); ++ node->ops = NULL; ++ } ++ if (node->msg) { ++ sw_free_collector_msg_i(node->msg); ++ node->msg = NULL; ++ } ++ sw_kfree(node); ++ return; ++} ++ ++int sw_handle_collector_node(struct sw_collector_data *node) ++{ ++ if (!node || !node->info || !node->ops || !node->msg) { ++ return -PW_ERROR; ++ } ++ pw_pr_debug("Calling SMP_CALL_FUNCTION_MANY!\n"); ++ sw_schedule_work(&node->cpumask, &sw_handle_per_cpu_msg, node); ++ return PW_SUCCESS; ++} ++ ++int sw_handle_collector_node_on_cpu(struct sw_collector_data *node, int cpu) ++{ ++ if (!node || !node->info || !node->ops || !node->msg) { ++ return -PW_ERROR; ++ } ++ /* ++ * Check if this node indicates it should be scheduled ++ * on the given cpu. If so, clear all other CPUs from the ++ * mask and schedule the node. ++ */ ++ if (cpumask_test_cpu(cpu, &node->cpumask)) { ++ struct cpumask tmp_mask; ++ ++ cpumask_clear(&tmp_mask); ++ cpumask_set_cpu(cpu, &tmp_mask); ++ pw_pr_debug("Calling SMP_CALL_FUNCTION_MANY!\n"); ++ sw_schedule_work(&tmp_mask, &sw_handle_per_cpu_msg, node); ++ } ++ return PW_SUCCESS; ++} ++ ++void sw_init_collector_list(void *list_head) ++{ ++ SW_LIST_HEAD_VAR(sw_collector_data) * head = list_head; ++ SW_LIST_HEAD_INIT(head); ++} ++ ++void sw_destroy_collector_list(void *list_head) ++{ ++ SW_LIST_HEAD_VAR(sw_collector_data) * head = list_head; ++ while (!SW_LIST_EMPTY(head)) { ++ struct sw_collector_data *curr = ++ SW_LIST_GET_HEAD_ENTRY(head, sw_collector_data, list); ++ BUG_ON(!curr->info); ++ SW_LIST_UNLINK(curr, list); ++ sw_free_collector_node(curr); ++ } ++} ++ ++/** ++ * sw_handle_collector_list - Iterate through the collector list, calling ++ * func() upon each element. ++ * @list_head: The collector list head. ++ * @func: The function to call for each collector. ++ * ++ * This function is called when one of the "when types" fires, since the ++ * passed-in collector node list is the list of collections to do at that time. ++ * ++ * Returns: PW_SUCCESS on success, -PW_ERROR on error. ++ */ ++int sw_handle_collector_list(void *list_head, ++ int (*func)(struct sw_collector_data *data)) ++{ ++ SW_LIST_HEAD_VAR(sw_collector_data) * head = list_head; ++ int retVal = PW_SUCCESS; ++ struct sw_collector_data *curr = NULL; ++ ++ if (!head || !func) { ++ return -PW_ERROR; ++ } ++ SW_LIST_FOR_EACH_ENTRY(curr, head, list) ++ { ++ pw_pr_debug("HANDLING\n"); ++ if ((*func)(curr)) { ++ retVal = -PW_ERROR; ++ } ++ } ++ return retVal; ++} ++ ++int sw_handle_collector_list_on_cpu(void *list_head, ++ int (*func)(struct sw_collector_data *data, ++ int cpu), ++ int cpu) ++{ ++ SW_LIST_HEAD_VAR(sw_collector_data) * head = list_head; ++ int retVal = PW_SUCCESS; ++ struct sw_collector_data *curr = NULL; ++ if (!head || !func) { ++ return -PW_ERROR; ++ } ++ SW_LIST_FOR_EACH_ENTRY(curr, head, list) ++ { ++ pw_pr_debug("HANDLING\n"); ++ if ((*func)(curr, cpu)) { ++ retVal = -PW_ERROR; ++ } ++ } ++ return retVal; ++} ++ ++void sw_handle_per_cpu_msg(void *info) ++{ ++ sw_handle_per_cpu_msg_i(info, SW_WAKEUP_ACTION_DIRECT); ++} ++ ++void sw_handle_per_cpu_msg_no_sched(void *info) ++{ ++ sw_handle_per_cpu_msg_i(info, SW_WAKEUP_ACTION_TIMER); ++} ++ ++void sw_handle_per_cpu_msg_on_cpu(int cpu, void *info) ++{ ++ if (unlikely(cpu == RAW_CPU())) { ++ sw_handle_per_cpu_msg_no_sched(info); ++ } else { ++ pw_pr_debug("[%d] is handling for %d\n", RAW_CPU(), cpu); ++ /* ++ * No need to disable preemption -- 'smp_call_function_single' ++ * does that for us. ++ */ ++ smp_call_function_single( ++ cpu, &sw_handle_per_cpu_msg_no_sched, info, ++ false /* false ==> do NOT wait for function ++ completion */); ++ } ++} ++ ++void sw_set_collector_ops(const struct sw_hw_ops *hw_ops) ++{ ++ s_hw_ops = hw_ops; ++} +diff --git a/drivers/platform/x86/socwatch/sw_driver.c b/drivers/platform/x86/socwatch/sw_driver.c +new file mode 100644 +index 000000000000..35b516cfb26a +--- /dev/null ++++ b/drivers/platform/x86/socwatch/sw_driver.c +@@ -0,0 +1,1472 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++#define MOD_AUTHOR "Gautam Upadhyaya " ++#define MOD_DESC "SoC Watch kernel module" ++ ++#include "sw_internal.h" ++#include "sw_structs.h" ++#include "sw_kernel_defines.h" ++#include "sw_types.h" ++#include "sw_mem.h" ++#include "sw_ioctl.h" ++#include "sw_output_buffer.h" ++#include "sw_hardware_io.h" ++#include "sw_overhead_measurements.h" ++#include "sw_tracepoint_handlers.h" ++#include "sw_collector.h" ++#include "sw_file_ops.h" ++ ++/* ------------------------------------------------- ++ * Compile time constants. ++ * ------------------------------------------------- ++ */ ++/* ++ * Number of entries in the 'sw_collector_lists' array ++ */ ++#define NUM_COLLECTOR_MODES (SW_WHEN_TYPE_END - SW_WHEN_TYPE_BEGIN + 1) ++#define PW_OUTPUT_BUFFER_SIZE \ ++ 256 /* Number of output messages in each per-cpu buffer */ ++/* ++ * Check if tracepoint/notifier ID is in (user-supplied) mask ++ */ ++#define IS_TRACE_NOTIFIER_ID_IN_MASK(id, mask) \ ++ ((id) >= 0 && (((mask) >> (id)) & 0x1)) ++ ++/* ------------------------------------------------- ++ * Local function declarations. ++ * ------------------------------------------------- ++ */ ++int sw_load_driver_i(void); ++void sw_unload_driver_i(void); ++int sw_init_collector_lists_i(void); ++void sw_destroy_collector_lists_i(void); ++int sw_init_data_structures_i(void); ++void sw_destroy_data_structures_i(void); ++int sw_get_arch_details_i(void); ++void sw_iterate_driver_info_lists_i(void); ++void sw_handle_immediate_request_i(void *request); ++int sw_print_collector_node_i(struct sw_collector_data *data); ++int sw_collection_start_i(void); ++int sw_collection_stop_i(void); ++int sw_collection_poll_i(void); ++size_t sw_get_payload_size_i(const struct sw_driver_interface_info *info); ++sw_driver_msg_t * ++sw_alloc_collector_msg_i(const struct sw_driver_interface_info *info, ++ size_t per_msg_payload_size); ++static long sw_unlocked_handle_ioctl_i(unsigned int ioctl_num, ++ void *p_local_args); ++static long ++sw_set_driver_infos_i(struct sw_driver_interface_msg __user *remote_msg, ++ int local_len); ++static long sw_handle_cmd_i(sw_driver_collection_cmd_t cmd, ++ u64 __user *remote_out_args); ++static void sw_do_extract_scu_fw_version(void); ++static long ++sw_get_available_name_id_mappings_i(enum sw_name_id_type type, ++ struct sw_name_info_msg __user *remote_info, ++ size_t local_len); ++static enum sw_driver_collection_cmd sw_get_collection_cmd_i(void); ++static bool sw_should_flush_buffer_i(void); ++ ++/* ------------------------------------------------- ++ * Data structures. ++ * ------------------------------------------------- ++ */ ++/* ++ * Structure to hold current CMD state ++ * of the device driver. Constantly evolving, but ++ * that's OK -- this is internal to the driver ++ * and is NOT exported. ++ */ ++struct swa_internal_state { ++ sw_driver_collection_cmd_t ++ cmd; // indicates which command was specified ++ // last e.g. START, STOP etc. ++ /* ++ * Should we write to our per-cpu output buffers? ++ * YES if we're actively collecting. ++ * NO if we're not. ++ */ ++ bool write_to_buffers; ++ /* ++ * Should we "drain/flush" the per-cpu output buffers? ++ * (See "device_read" for an explanation) ++ */ ++ bool drain_buffers; ++ // Others... ++}; ++ ++/* ------------------------------------------------- ++ * Variables. ++ * ------------------------------------------------- ++ */ ++static bool do_force_module_scope_for_cpu_frequencies; ++module_param(do_force_module_scope_for_cpu_frequencies, bool, S_IRUSR); ++MODULE_PARM_DESC( ++ do_force_module_scope_for_cpu_frequencies, ++ "Toggle module scope for cpu frequencies. Sets \"affected_cpus\" and \"related_cpus\" of cpufreq_policy."); ++ ++static unsigned short sw_buffer_num_pages = 16; ++module_param(sw_buffer_num_pages, ushort, S_IRUSR); ++MODULE_PARM_DESC( ++ sw_buffer_num_pages, ++ "Specify number of 4kB pages to use for each per-cpu buffer. MUST be a power of 2! Default value = 16 (64 kB)"); ++ ++/* TODO: convert from 'list_head' to 'hlist_head' */ ++/* ++ * sw_collector_lists is an array of linked lists of "collector nodes" ++ * (sw_collector_data structs). It is indexed by the sw_when_type_t's. ++ * Each list holds the collectors to "execute" at a specific time, ++ * e.g. the beginning of the run, at a poll interval, tracepoint, etc. ++ */ ++static SW_DEFINE_LIST_HEAD(sw_collector_lists, ++ sw_collector_data)[NUM_COLLECTOR_MODES]; ++static __read_mostly u16 sw_scu_fw_major_minor; ++ ++static struct swa_internal_state s_internal_state; ++static struct sw_file_ops s_ops = { ++ .ioctl_handler = &sw_unlocked_handle_ioctl_i, ++ .stop_handler = &sw_collection_stop_i, ++ .get_current_cmd = &sw_get_collection_cmd_i, ++ .should_flush = &sw_should_flush_buffer_i, ++}; ++ ++/* ++ * For each function that you want to profile, ++ * do the following (e.g. function 'foo'): ++ * ************************************************** ++ * DECLARE_OVERHEAD_VARS(foo); ++ * ************************************************** ++ * This will declare the two variables required ++ * to keep track of overheads incurred in ++ * calling/servicing 'foo'. Note that the name ++ * that you declare here *MUST* match the function name! ++ */ ++ ++DECLARE_OVERHEAD_VARS(sw_collection_poll_i); // for POLL ++DECLARE_OVERHEAD_VARS(sw_any_seg_full); ++ ++/* ++ * String representation of the various 'SW_WHEN_TYPE_XYZ' enum values. ++ * Debugging ONLY! ++ */ ++#if DO_DEBUG_OUTPUT ++static const char *s_when_type_names[] = { "BEGIN", "POLL", "NOTIFIER", ++ "TRACEPOINT", "END" }; ++#endif // DO_DEBUG_OUTPUT ++ ++/* ------------------------------------------------- ++ * Function definitions. ++ * ------------------------------------------------- ++ */ ++/* ++ * External functions. ++ */ ++int sw_process_snapshot(enum sw_when_type when) ++{ ++ if (when > SW_WHEN_TYPE_END) { ++ pw_pr_error("invalid snapshot time %d specified!\n", when); ++ return -EINVAL; ++ } ++ if (sw_handle_collector_list(&sw_collector_lists[when], ++ &sw_handle_collector_node)) { ++ pw_pr_error("ERROR: could NOT handle snapshot for time %d!\n", ++ when); ++ return -EIO; ++ } ++ return 0; ++} ++ ++int sw_process_snapshot_on_cpu(enum sw_when_type when, int cpu) ++{ ++ if (when > SW_WHEN_TYPE_END) { ++ pw_pr_error("invalid snapshot time %d specified!\n", when); ++ return -EINVAL; ++ } ++ if (sw_handle_collector_list_on_cpu(&sw_collector_lists[when], ++ &sw_handle_collector_node_on_cpu, ++ cpu)) { ++ pw_pr_error("ERROR: could NOT handle snapshot for time %d!\n", ++ when); ++ return -EIO; ++ } ++ return 0; ++} ++ ++/* ++ * Driver interface info and collector list functions. ++ */ ++int sw_print_collector_node_i(struct sw_collector_data *curr) ++{ ++ pw_u16_t num_descriptors = 0; ++ sw_io_desc_print_func_t print_func = NULL; ++ struct sw_driver_io_descriptor *descriptor = NULL; ++ struct sw_driver_interface_info *info = NULL; ++ ++ if (!curr) { ++ return -PW_ERROR; ++ } ++ info = curr->info; ++ descriptor = (struct sw_driver_io_descriptor *)info->descriptors; ++ pw_pr_debug( ++ "cpu-mask = %d, Plugin-ID = %d, Metric-ID = %d, MSG-ID = %d\n", ++ info->cpu_mask, info->plugin_id, info->metric_id, info->msg_id); ++ for (num_descriptors = info->num_io_descriptors; num_descriptors > 0; ++ --num_descriptors, ++descriptor) { ++ const struct sw_hw_ops *ops = ++ sw_get_hw_ops_for(descriptor->collection_type); ++ if (ops == NULL) { ++ return -PW_ERROR; ++ } ++ print_func = ops->print; ++ if (print_func && (*print_func)(descriptor)) { ++ return -PW_ERROR; ++ } ++ } ++ return PW_SUCCESS; ++} ++ ++/* ++ * Driver interface info and collector list functions. ++ */ ++ ++/** ++ * sw_reset_collector_node_i - Call the reset op on all of the descriptors ++ * in coll that have one. ++ * @coll: The data structure containing an array of collector descriptors. ++ * ++ * Return: PW_SUCCESS if all of the resets succeeded, -PW_ERROR if any failed. ++ */ ++static int sw_reset_collector_node_i(struct sw_collector_data *coll) ++{ ++ struct sw_driver_io_descriptor *descriptor = NULL; ++ struct sw_driver_interface_info *info = NULL; ++ int num_descriptors; ++ int retcode = PW_SUCCESS; ++ ++ if (!coll) { ++ return -PW_ERROR; ++ } ++ info = coll->info; ++ ++ descriptor = (struct sw_driver_io_descriptor *)info->descriptors; ++ pw_pr_debug( ++ "cpu-mask = %d, Plugin-ID = %d, Metric-ID = %d, MSG-ID = %d\n", ++ info->cpu_mask, info->plugin_id, info->metric_id, info->msg_id); ++ for (num_descriptors = info->num_io_descriptors; num_descriptors > 0; ++ --num_descriptors, ++descriptor) { ++ const struct sw_hw_ops *ops = ++ sw_get_hw_ops_for(descriptor->collection_type); ++ if (ops && ops->reset && (*ops->reset)(descriptor)) { ++ retcode = -PW_ERROR; ++ } ++ } ++ return retcode; ++} ++ ++static int sw_iterate_trace_notifier_list_i(struct sw_trace_notifier_data *node, ++ void *dummy) ++{ ++ return sw_handle_collector_list(&node->list, ++ &sw_print_collector_node_i); ++} ++ ++void sw_iterate_driver_info_lists_i(void) ++{ ++ sw_when_type_t which; ++ ++ for (which = SW_WHEN_TYPE_BEGIN; which <= SW_WHEN_TYPE_END; ++which) { ++ pw_pr_debug("ITERATING list %s\n", s_when_type_names[which]); ++ if (sw_handle_collector_list( ++ &sw_collector_lists[which], ++ &sw_print_collector_node_i)) { // Should NEVER happen! ++ pw_pr_error( ++ "WARNING: error occurred while printing values!\n"); ++ } ++ } ++ ++ if (sw_for_each_tracepoint_node(&sw_iterate_trace_notifier_list_i, NULL, ++ false /*return-on-error*/)) { ++ pw_pr_error( ++ "WARNING: error occurred while printing tracepoint values!\n"); ++ } ++ if (sw_for_each_notifier_node(&sw_iterate_trace_notifier_list_i, NULL, ++ false /*return-on-error*/)) { ++ pw_pr_error( ++ "WARNING: error occurred while printing notifier values!\n"); ++ } ++} ++ ++static void sw_reset_collectors_i(void) ++{ ++ sw_when_type_t which; ++ ++ for (which = SW_WHEN_TYPE_BEGIN; which <= SW_WHEN_TYPE_END; ++which) { ++ pw_pr_debug("ITERATING list %s\n", s_when_type_names[which]); ++ if (sw_handle_collector_list(&sw_collector_lists[which], ++ &sw_reset_collector_node_i)) { ++ pw_pr_error( ++ "WARNING: error occurred while resetting a collector!\n"); ++ } ++ } ++} ++ ++int sw_init_data_structures_i(void) ++{ ++ /* ++ * Find the # CPUs in this system. ++ * Update: use 'num_possible' instead of 'num_present' in case ++ * the cpus aren't numbered contiguously ++ */ ++ sw_max_num_cpus = num_possible_cpus(); ++ ++ /* ++ * Initialize our trace subsys: MUST be called ++ * BEFORE 'sw_init_collector_lists_i()! ++ */ ++ if (sw_add_trace_notify()) { ++ sw_destroy_data_structures_i(); ++ return -PW_ERROR; ++ } ++ if (sw_init_collector_lists_i()) { ++ sw_destroy_data_structures_i(); ++ return -PW_ERROR; ++ } ++ if (sw_init_per_cpu_buffers()) { ++ sw_destroy_data_structures_i(); ++ return -PW_ERROR; ++ } ++ if (sw_register_hw_ops()) { ++ sw_destroy_data_structures_i(); ++ return -PW_ERROR; ++ } ++ return PW_SUCCESS; ++} ++ ++void sw_destroy_data_structures_i(void) ++{ ++ sw_free_hw_ops(); ++ sw_destroy_per_cpu_buffers(); ++ sw_destroy_collector_lists_i(); ++ sw_remove_trace_notify(); ++} ++ ++int sw_get_arch_details_i(void) ++{ ++ /* ++ * SCU F/W version (if applicable) ++ */ ++ sw_do_extract_scu_fw_version(); ++ return PW_SUCCESS; ++} ++ ++#define INIT_FLAG ((void *)0) ++#define DESTROY_FLAG ((void *)1) ++ ++static int ++sw_init_destroy_trace_notifier_lists_i(struct sw_trace_notifier_data *node, ++ void *is_init) ++{ ++ if (is_init == INIT_FLAG) { ++ sw_init_collector_list(&node->list); ++ } else { ++ sw_destroy_collector_list(&node->list); ++ } ++ node->was_registered = false; ++ ++ return PW_SUCCESS; ++} ++ ++int sw_init_collector_lists_i(void) ++{ ++ int i = 0; ++ ++ for (i = 0; i < NUM_COLLECTOR_MODES; ++i) { ++ sw_init_collector_list(&sw_collector_lists[i]); ++ } ++ sw_for_each_tracepoint_node(&sw_init_destroy_trace_notifier_lists_i, ++ INIT_FLAG, false /*return-on-error*/); ++ sw_for_each_notifier_node(&sw_init_destroy_trace_notifier_lists_i, ++ INIT_FLAG, false /*return-on-error*/); ++ ++ return PW_SUCCESS; ++} ++ ++void sw_destroy_collector_lists_i(void) ++{ ++ int i = 0; ++ ++ for (i = 0; i < NUM_COLLECTOR_MODES; ++i) { ++ sw_destroy_collector_list(&sw_collector_lists[i]); ++ } ++ sw_for_each_tracepoint_node(&sw_init_destroy_trace_notifier_lists_i, ++ DESTROY_FLAG, false /*return-on-error*/); ++ sw_for_each_notifier_node(&sw_init_destroy_trace_notifier_lists_i, ++ DESTROY_FLAG, false /*return-on-error*/); ++} ++ ++/* ++ * Used for {READ,WRITE}_IMMEDIATE requests. ++ */ ++typedef struct sw_immediate_request_info sw_immediate_request_info_t; ++struct sw_immediate_request_info { ++ struct sw_driver_io_descriptor *local_descriptor; ++ char *dst_vals; ++ int *retVal; ++}; ++void sw_handle_immediate_request_i(void *request) ++{ ++ struct sw_immediate_request_info *info = ++ (struct sw_immediate_request_info *)request; ++ struct sw_driver_io_descriptor *descriptor = info->local_descriptor; ++ char *dst_vals = info->dst_vals; ++ const struct sw_hw_ops *ops = ++ sw_get_hw_ops_for(descriptor->collection_type); ++ if (likely(ops != NULL)) { ++ *(info->retVal) = sw_handle_driver_io_descriptor( ++ dst_vals, RAW_CPU(), descriptor, ops); ++ } else { ++ pw_pr_error( ++ "No operations found to satisfy collection type %u!\n", ++ descriptor->collection_type); ++ } ++ return; ++} ++ ++static int num_times_polled; ++ ++int sw_collection_start_i(void) ++{ ++ /* ++ * Reset the poll tick counter. ++ */ ++ num_times_polled = 0; ++ /* ++ * Update the output buffers. ++ */ ++ sw_reset_per_cpu_buffers(); ++ /* ++ * Ensure clients don't think we're in 'flush' mode. ++ */ ++ s_internal_state.drain_buffers = false; ++ /* ++ * Set the 'command' ++ */ ++ s_internal_state.cmd = SW_DRIVER_START_COLLECTION; ++ /* ++ * Clear out the topology list ++ */ ++ sw_clear_topology_list(); ++ /* ++ * Handle 'START' snapshots, if any. ++ */ ++ { ++ if (sw_handle_collector_list( ++ &sw_collector_lists[SW_WHEN_TYPE_BEGIN], ++ &sw_handle_collector_node)) { ++ pw_pr_error( ++ "ERROR: could NOT handle START collector list!\n"); ++ return -PW_ERROR; ++ } ++ } ++ /* ++ * Register any required tracepoints and notifiers. ++ */ ++ { ++ if (sw_register_trace_notifiers()) { ++ pw_pr_error("ERROR registering trace_notifiers!\n"); ++ sw_unregister_trace_notifiers(); ++ return -PW_ERROR; ++ } ++ } ++ pw_pr_debug("OK, STARTED collection!\n"); ++ return PW_SUCCESS; ++} ++ ++int sw_collection_stop_i(void) ++{ ++ /* ++ * Unregister any registered tracepoints and notifiers. ++ */ ++ if (sw_unregister_trace_notifiers()) { ++ pw_pr_warn( ++ "Warning: some trace_notifier probe functions could NOT be unregistered!\n"); ++ } ++ /* ++ * Handle 'STOP' snapshots, if any. ++ */ ++ if (sw_handle_collector_list(&sw_collector_lists[SW_WHEN_TYPE_END], ++ &sw_handle_collector_node)) { ++ pw_pr_error("ERROR: could NOT handle STOP collector list!\n"); ++ return -PW_ERROR; ++ } ++ /* ++ * Set the 'command' ++ */ ++ s_internal_state.cmd = SW_DRIVER_STOP_COLLECTION; ++ /* ++ * Tell consumers to 'flush' all buffers. We need to ++ * defer this as long as possible because it needs to be ++ * close to the 'wake_up_interruptible', below. ++ */ ++ s_internal_state.drain_buffers = true; ++ smp_mb(); ++ /* ++ * Wakeup any sleeping readers, and cleanup any ++ * timers in the reader subsys. ++ */ ++ sw_cancel_reader(); ++ /* ++ * Collect stats on samples produced and dropped. ++ * TODO: call from 'device_read()' instead? ++ */ ++ sw_count_samples_produced_dropped(); ++#if DO_OVERHEAD_MEASUREMENTS ++ pw_pr_force( ++ "DEBUG: there were %llu samples produced and %llu samples dropped in buffer v5!\n", ++ sw_num_samples_produced, sw_num_samples_dropped); ++#endif // DO_OVERHEAD_MEASUREMENTS ++ /* ++ * DEBUG: iterate over collection lists. ++ */ ++ sw_iterate_driver_info_lists_i(); ++ /* ++ * Shut down any collectors that need shutting down. ++ */ ++ sw_reset_collectors_i(); ++ /* ++ * Clear out the collector lists. ++ */ ++ sw_destroy_collector_lists_i(); ++ pw_pr_debug("OK, STOPPED collection!\n"); ++#if DO_OVERHEAD_MEASUREMENTS ++ pw_pr_force("There were %d poll ticks!\n", num_times_polled); ++#endif // DO_OVERHEAD_MEASUREMENTS ++ return PW_SUCCESS; ++} ++ ++int sw_collection_poll_i(void) ++{ ++ /* ++ * Handle 'POLL' timer expirations. ++ */ ++ if (SW_LIST_EMPTY(&sw_collector_lists[SW_WHEN_TYPE_POLL])) { ++ pw_pr_debug("DEBUG: EMPTY POLL LIST\n"); ++ } ++ ++num_times_polled; ++ return sw_handle_collector_list(&sw_collector_lists[SW_WHEN_TYPE_POLL], ++ &sw_handle_collector_node); ++} ++ ++/* ++ * Private data for the 'sw_add_trace_notifier_driver_info_i' function. ++ */ ++struct tn_data { ++ struct sw_driver_interface_info *info; ++ u64 mask; ++}; ++ ++static int ++sw_add_trace_notifier_driver_info_i(struct sw_trace_notifier_data *node, ++ void *priv) ++{ ++ struct tn_data *data = (struct tn_data *)priv; ++ struct sw_driver_interface_info *local_info = data->info; ++ u64 mask = data->mask; ++ int id = sw_get_trace_notifier_id(node); ++ ++ if (IS_TRACE_NOTIFIER_ID_IN_MASK(id, mask)) { ++ pw_pr_debug("TRACEPOINT ID = %d is IN mask 0x%llx\n", id, mask); ++ if (sw_add_driver_info(&node->list, local_info)) { ++ pw_pr_error( ++ "WARNING: could NOT add driver info to list!\n"); ++ return -PW_ERROR; ++ } ++ } ++ return PW_SUCCESS; ++} ++ ++static int sw_post_config_i(const struct sw_hw_ops *op, void *priv) ++{ ++ if (!op->available || !(*op->available)()) { ++ /* op not available */ ++ return 0; ++ } ++ if (!op->post_config || (*op->post_config)()) { ++ return 0; ++ } ++ return -EIO; ++} ++ ++/** ++ * sw_set_driver_infos_i - Process the collection config data passed down ++ * from the client. ++ * @remote_msg: The user space address of our ioctl data. ++ * @local_len: The number of bytes of remote_msg we should copy. ++ * ++ * This function copies the ioctl data from user space to kernel ++ * space. That data is an array of sw_driver_interface_info structs, ++ * which hold information about tracepoints, notifiers, and collector ++ * configuration info for this collection run.. For each driver_info ++ * struct, it calls the appropriate "add info" (registration/ ++ * configuration) function for each of the "when types" (begin, poll, ++ * notifier, tracepoint, end) which should trigger a collection ++ * operation for that collector. ++ * ++ * When this function is done, the data structures corresponding to ++ * collection should be configured and initialized. ++ * ++ * ++ * Returns: PW_SUCCESS on success, or a non-zero on an error. ++ */ ++static long ++sw_set_driver_infos_i(struct sw_driver_interface_msg __user *remote_msg, ++ int local_len) ++{ ++ struct sw_driver_interface_info *local_info = NULL; ++ struct sw_driver_interface_msg *local_msg = vmalloc(local_len); ++ pw_u8_t read_triggers = 0x0; ++ pw_u16_t num_infos = 0; ++ sw_when_type_t i = SW_WHEN_TYPE_BEGIN; ++ char *__data = (char *)local_msg->infos; ++ size_t dst_idx = 0; ++ ++ if (!local_msg) { ++ pw_pr_error("ERROR allocating space for local message!\n"); ++ return -EFAULT; ++ } ++ if (copy_from_user(local_msg, (struct sw_driver_interface_msg __user *) ++ remote_msg, local_len)) { ++ pw_pr_error("ERROR copying message from user space!\n"); ++ vfree(local_msg); ++ return -EFAULT; ++ } ++ /* ++ * We aren't allowed to config the driver multiple times between ++ * collections. Clear out any previous config values. ++ */ ++ sw_destroy_collector_lists_i(); ++ ++ /* ++ * Did the user specify a min polling interval? ++ */ ++ sw_min_polling_interval_msecs = local_msg->min_polling_interval_msecs; ++ pw_pr_debug("min_polling_interval_msecs = %u\n", ++ sw_min_polling_interval_msecs); ++ ++ num_infos = local_msg->num_infos; ++ pw_pr_debug("LOCAL NUM INFOS = %u\n", num_infos); ++ for (; num_infos > 0; --num_infos) { ++ local_info = ++ (struct sw_driver_interface_info *)&__data[dst_idx]; ++ dst_idx += (SW_DRIVER_INTERFACE_INFO_HEADER_SIZE() + ++ local_info->num_io_descriptors * ++ sizeof(struct sw_driver_io_descriptor)); ++ read_triggers = local_info->trigger_bits; ++ pw_pr_debug( ++ "read_triggers = %u, # msrs = %u, new dst_idx = %u\n", ++ (unsigned int)read_triggers, ++ (unsigned int)local_info->num_io_descriptors, ++ (unsigned int)dst_idx); ++ for (i = SW_WHEN_TYPE_BEGIN; i <= SW_WHEN_TYPE_END; ++ ++i, read_triggers >>= 1) { ++ if (read_triggers & 0x1) { // Bit 'i' is set ++ pw_pr_debug("BIT %d is SET!\n", i); ++ if (i == SW_WHEN_TYPE_TRACEPOINT) { ++ struct tn_data tn_data = { ++ local_info, ++ local_info->tracepoint_id_mask ++ }; ++ pw_pr_debug( ++ "TRACEPOINT, MASK = 0x%llx\n", ++ local_info->tracepoint_id_mask); ++ sw_for_each_tracepoint_node( ++ &sw_add_trace_notifier_driver_info_i, ++ &tn_data, ++ false /*return-on-error*/); ++ } else if (i == SW_WHEN_TYPE_NOTIFIER) { ++ struct tn_data tn_data = { ++ local_info, ++ local_info->notifier_id_mask ++ }; ++ pw_pr_debug( ++ "NOTIFIER, MASK = 0x%llx\n", ++ local_info->notifier_id_mask); ++ sw_for_each_notifier_node( ++ &sw_add_trace_notifier_driver_info_i, ++ &tn_data, ++ false /*return-on-error*/); ++ } else { ++ if (sw_add_driver_info( ++ &sw_collector_lists[i], ++ local_info)) { ++ pw_pr_error( ++ "WARNING: could NOT add driver info to list for 'when type' %d!\n", ++ i); ++ } ++ } ++ } ++ } ++ } ++ if (sw_for_each_hw_op(&sw_post_config_i, NULL, ++ false /*return-on-error*/)) { ++ pw_pr_error("POST-CONFIG error!\n"); ++ } ++ vfree(local_msg); ++ memset(&s_internal_state, 0, sizeof(s_internal_state)); ++ /* ++ * DEBUG: iterate over collection lists. ++ */ ++ sw_iterate_driver_info_lists_i(); ++ return PW_SUCCESS; ++} ++ ++static long sw_handle_cmd_i(sw_driver_collection_cmd_t cmd, ++ u64 __user *remote_out_args) ++{ ++ /* ++ * First, handle the command. ++ */ ++ if (cmd < SW_DRIVER_START_COLLECTION || ++ cmd > SW_DRIVER_CANCEL_COLLECTION) { ++ pw_pr_error("ERROR: invalid cmd = %d\n", cmd); ++ return -PW_ERROR; ++ } ++ switch (cmd) { ++ case SW_DRIVER_START_COLLECTION: ++ if (sw_collection_start_i()) { ++ return -PW_ERROR; ++ } ++ break; ++ case SW_DRIVER_STOP_COLLECTION: ++ if (sw_collection_stop_i()) { ++ return -PW_ERROR; ++ } ++ break; ++ default: ++ pw_pr_error("WARNING: unsupported command %d\n", cmd); ++ break; ++ } ++ /* ++ * Then retrieve sample stats. ++ */ ++#if DO_COUNT_DROPPED_SAMPLES ++ if (cmd == SW_DRIVER_STOP_COLLECTION) { ++ u64 local_args[2] = { sw_num_samples_produced, ++ sw_num_samples_dropped }; ++ if (copy_to_user(remote_out_args, local_args, ++ sizeof(local_args))) { ++ pw_pr_error( ++ "couldn't copy collection stats to user space!\n"); ++ return -PW_ERROR; ++ } ++ } ++#endif // DO_COUNT_DROPPED_SAMPLES ++ return PW_SUCCESS; ++} ++ ++#ifdef SFI_SIG_OEMB ++static int sw_do_parse_sfi_oemb_table(struct sfi_table_header *header) ++{ ++#ifdef CONFIG_X86_WANT_INTEL_MID ++ struct sfi_table_oemb *oemb = (struct sfi_table_oemb *) ++ header; // 'struct sfi_table_oemb' defined in 'intel-mid.h' ++ if (!oemb) { ++ pw_pr_error("ERROR: NULL sfi table header!\n"); ++ return -PW_ERROR; ++ } ++ sw_scu_fw_major_minor = (oemb->scu_runtime_major_version << 8) | ++ (oemb->scu_runtime_minor_version); ++ pw_pr_debug("DEBUG: major = %u, minor = %u\n", ++ oemb->scu_runtime_major_version, ++ oemb->scu_runtime_minor_version); ++#endif // CONFIG_X86_WANT_INTEL_MID ++ return PW_SUCCESS; ++} ++#endif // SFI_SIG_OEMB ++ ++static void sw_do_extract_scu_fw_version(void) ++{ ++ sw_scu_fw_major_minor = 0x0; ++#ifdef SFI_SIG_OEMB ++ if (sfi_table_parse(SFI_SIG_OEMB, NULL, NULL, ++ &sw_do_parse_sfi_oemb_table)) { ++ pw_pr_force("WARNING: NO SFI information!\n"); ++ } ++#endif // SFI_SIG_OEMB ++} ++ ++static int sw_gather_trace_notifier_i(struct sw_trace_notifier_data *node, ++ struct sw_name_info_msg *msg, ++ enum sw_name_id_type type) ++{ ++ pw_u16_t *idx = &msg->payload_len; ++ char *buffer = (char *)&msg->pairs[*idx]; ++ struct sw_name_id_pair *pair = (struct sw_name_id_pair *)buffer; ++ int id = sw_get_trace_notifier_id(node); ++ struct sw_string_type *str = &pair->name; ++ const char *abstract_name = sw_get_trace_notifier_abstract_name(node); ++ ++ if (likely(abstract_name && id >= 0)) { ++ ++msg->num_name_id_pairs; ++ pair->type = type; ++ pair->id = (u16)id; ++ str->len = strlen(abstract_name) + 1; // "+1" for trailing '\0' ++ memcpy(&str->data[0], abstract_name, str->len); ++ ++ pw_pr_debug("TP[%d] = %s (%u)\n", ++ sw_get_trace_notifier_id(node), abstract_name, ++ (unsigned int)strlen(abstract_name)); ++ ++ *idx += SW_NAME_ID_HEADER_SIZE() + ++ SW_STRING_TYPE_HEADER_SIZE() + str->len; ++ } ++ ++ return PW_SUCCESS; ++} ++ ++static int sw_gather_tracepoint_i(struct sw_trace_notifier_data *node, ++ void *priv) ++{ ++ return sw_gather_trace_notifier_i(node, (struct sw_name_info_msg *)priv, ++ SW_NAME_TYPE_TRACEPOINT); ++} ++ ++static int sw_gather_notifier_i(struct sw_trace_notifier_data *node, void *priv) ++{ ++ return sw_gather_trace_notifier_i(node, (struct sw_name_info_msg *)priv, ++ SW_NAME_TYPE_NOTIFIER); ++} ++ ++static long ++sw_get_available_trace_notifiers_i(enum sw_name_id_type type, ++ struct sw_name_info_msg *local_info) ++{ ++ long retVal = PW_SUCCESS; ++ ++ if (type == SW_NAME_TYPE_TRACEPOINT) { ++ retVal = sw_for_each_tracepoint_node(&sw_gather_tracepoint_i, ++ local_info, ++ false /*return-on-error*/); ++ } else { ++ retVal = sw_for_each_notifier_node(&sw_gather_notifier_i, ++ local_info, ++ false /*return-on-error*/); ++ } ++ pw_pr_debug( ++ "There are %u extracted traces/notifiers for a total of %u bytes!\n", ++ local_info->num_name_id_pairs, local_info->payload_len); ++ return retVal; ++} ++ ++static int sw_gather_hw_op_i(const struct sw_hw_ops *op, void *priv) ++{ ++ struct sw_name_info_msg *msg = (struct sw_name_info_msg *)priv; ++ pw_u16_t *idx = &msg->payload_len; ++ char *buffer = (char *)&msg->pairs[*idx]; ++ struct sw_name_id_pair *pair = (struct sw_name_id_pair *)buffer; ++ struct sw_string_type *str = &pair->name; ++ const char *abstract_name = sw_get_hw_op_abstract_name(op); ++ int id = sw_get_hw_op_id(op); ++ ++ pw_pr_debug("Gather Collector[%d] = %s\n", id, abstract_name); ++ if (likely(abstract_name && id >= 0)) { ++ /* ++ * Final check: is this operation available on the ++ * target platform? If 'available' function doesn't ++ * exist then YES. Else call 'available' ++ * function to decide. ++ */ ++ pw_pr_debug("%s has available = %p\n", abstract_name, ++ op->available); ++ if (!op->available || (*op->available)()) { ++ ++msg->num_name_id_pairs; ++ pair->type = SW_NAME_TYPE_COLLECTOR; ++ pair->id = (u16)id; ++ str->len = strlen(abstract_name) + ++ 1; // "+1" for trailing '\0' ++ memcpy(&str->data[0], abstract_name, str->len); ++ ++ *idx += SW_NAME_ID_HEADER_SIZE() + ++ SW_STRING_TYPE_HEADER_SIZE() + str->len; ++ } ++ } ++ ++ return PW_SUCCESS; ++} ++ ++static long sw_get_available_collectors_i(struct sw_name_info_msg *local_info) ++{ ++ return sw_for_each_hw_op(&sw_gather_hw_op_i, local_info, ++ false /*return-on-error*/); ++} ++ ++static long ++sw_get_available_name_id_mappings_i(enum sw_name_id_type type, ++ struct sw_name_info_msg __user *remote_info, ++ size_t local_len) ++{ ++ char *buffer = vmalloc(local_len); ++ struct sw_name_info_msg *local_info = NULL; ++ long retVal = PW_SUCCESS; ++ ++ if (!buffer) { ++ pw_pr_error("ERROR: couldn't alloc temp buffer!\n"); ++ return -PW_ERROR; ++ } ++ memset(buffer, 0, local_len); ++ local_info = (struct sw_name_info_msg *)buffer; ++ ++ if (type == SW_NAME_TYPE_COLLECTOR) { ++ retVal = sw_get_available_collectors_i(local_info); ++ } else { ++ retVal = sw_get_available_trace_notifiers_i(type, local_info); ++ } ++ if (retVal == PW_SUCCESS) { ++ retVal = copy_to_user(remote_info, local_info, local_len); ++ if (retVal) { ++ pw_pr_error( ++ "ERROR: couldn't copy tracepoint info to user space!\n"); ++ } ++ } ++ vfree(buffer); ++ return retVal; ++} ++ ++static long ++sw_get_topology_changes_i(struct sw_driver_topology_msg __user *remote_msg, ++ size_t local_len) ++{ ++ char *buffer = NULL; ++ struct sw_driver_topology_msg *local_msg = NULL; ++ size_t buffer_len = sizeof(struct sw_driver_topology_msg) + ++ sw_num_topology_entries * ++ sizeof(struct sw_driver_topology_change); ++ long retVal = PW_SUCCESS; ++ struct sw_driver_topology_change *dst = NULL; ++ size_t dst_idx = 0; ++ ++ SW_LIST_HEAD_VAR(sw_topology_node) *head = (void *)&sw_topology_list; ++ struct sw_topology_node *tnode = NULL; ++ ++ if (local_len < buffer_len) { ++ pw_pr_error( ++ "ERROR: insufficient buffer space to encode topology changes! Requires %zu, output space = %zu\n", ++ buffer_len, local_len); ++ return -EIO; ++ } ++ ++ buffer = vmalloc(buffer_len); ++ if (!buffer) { ++ pw_pr_error( ++ "ERROR: couldn't allocate buffer for topology transfer!\n"); ++ return -EIO; ++ } ++ memset(buffer, 0, buffer_len); ++ ++ local_msg = (struct sw_driver_topology_msg *)buffer; ++ local_msg->num_entries = sw_num_topology_entries; ++ dst = (struct sw_driver_topology_change *)&local_msg ++ ->topology_entries[0]; ++ SW_LIST_FOR_EACH_ENTRY(tnode, head, list) ++ { ++ struct sw_driver_topology_change *change = &tnode->change; ++ ++ memcpy(&dst[dst_idx++], change, sizeof(*change)); ++ } ++ retVal = copy_to_user(remote_msg, local_msg, buffer_len); ++ if (retVal) { ++ pw_pr_error( ++ "ERROR: couldn't copy topology changes to user space!\n"); ++ } ++ vfree(buffer); ++ return retVal; ++} ++ ++#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) ++#define MATCH_IOCTL(num, pred) ((num) == (pred) || (num) == (pred##32)) ++#else ++#define MATCH_IOCTL(num, pred) ((num) == (pred)) ++#endif ++ ++static long sw_unlocked_handle_ioctl_i(unsigned int ioctl_num, ++ void *p_local_args) ++{ ++ struct sw_driver_ioctl_arg local_args; ++ int local_in_len, local_out_len; ++ ++ if (!p_local_args) { ++ pw_pr_error("ERROR: NULL p_local_args value?!\n"); ++ return -PW_ERROR; ++ } ++ ++ /* ++ * (1) Sanity check: ++ * Before doing anything, double check to ++ * make sure this IOCTL was really intended ++ * for us! ++ */ ++ if (_IOC_TYPE(ioctl_num) != APWR_IOCTL_MAGIC_NUM) { ++ pw_pr_error( ++ "ERROR: requested IOCTL TYPE (%d) != APWR_IOCTL_MAGIC_NUM (%d)\n", ++ _IOC_TYPE(ioctl_num), APWR_IOCTL_MAGIC_NUM); ++ return -PW_ERROR; ++ } ++ /* ++ * (2) Extract arg lengths. ++ */ ++ local_args = *((struct sw_driver_ioctl_arg *)p_local_args); ++ ++ local_in_len = local_args.in_len; ++ local_out_len = local_args.out_len; ++ pw_pr_debug("GU: local_in_len = %d, local_out_len = %d\n", local_in_len, ++ local_out_len); ++ /* ++ * (3) Service individual IOCTL requests. ++ */ ++ if (MATCH_IOCTL(ioctl_num, PW_IOCTL_CONFIG)) { ++ pw_pr_debug("PW_IOCTL_CONFIG\n"); ++ return sw_set_driver_infos_i( ++ (struct sw_driver_interface_msg __user *) ++ local_args.in_arg, ++ local_in_len); ++ } else if (MATCH_IOCTL(ioctl_num, PW_IOCTL_CMD)) { ++ sw_driver_collection_cmd_t local_cmd; ++ ++ pw_pr_debug("PW_IOCTL_CMD\n"); ++ if (get_user(local_cmd, (sw_driver_collection_cmd_t __user *) ++ local_args.in_arg)) { ++ pw_pr_error("ERROR: could NOT extract cmd value!\n"); ++ return -PW_ERROR; ++ } ++ return sw_handle_cmd_i(local_cmd, ++ (u64 __user *)local_args.out_arg); ++ } else if (MATCH_IOCTL(ioctl_num, PW_IOCTL_POLL)) { ++ pw_pr_debug("PW_IOCTL_POLL\n"); ++ return DO_PER_CPU_OVERHEAD_FUNC_RET(int, sw_collection_poll_i); ++ } else if (MATCH_IOCTL(ioctl_num, PW_IOCTL_IMMEDIATE_IO)) { ++ struct sw_driver_interface_info *local_info; ++ struct sw_driver_io_descriptor *local_descriptor = NULL; ++ int retVal = PW_SUCCESS; ++ char *src_vals = NULL; ++ char *dst_vals = NULL; ++ ++ pw_pr_debug("PW_IOCTL_IMMEDIATE_IO\n"); ++ pw_pr_debug("local_in_len = %u\n", local_in_len); ++ ++ src_vals = vmalloc(local_in_len); ++ if (!src_vals) { ++ pw_pr_error( ++ "ERROR allocating space for immediate IO\n"); ++ return -PW_ERROR; ++ } ++ if (local_out_len) { ++ dst_vals = vmalloc(local_out_len); ++ if (!dst_vals) { ++ vfree(src_vals); ++ pw_pr_error( ++ "ERROR allocating space for immediate IO\n"); ++ return -PW_ERROR; ++ } ++ } ++ if (copy_from_user(src_vals, (char __user *)local_args.in_arg, ++ local_in_len)) { ++ pw_pr_error( ++ "ERROR copying in immediate IO descriptor\n"); ++ retVal = -PW_ERROR; ++ goto ret_immediate_io; ++ } ++ local_info = (struct sw_driver_interface_info *)src_vals; ++ pw_pr_debug( ++ "OK, asked to perform immediate IO on cpu(s) %d, # descriptors = %d\n", ++ local_info->cpu_mask, local_info->num_io_descriptors); ++ /* ++ * For now, require only a single descriptor. ++ */ ++ if (local_info->num_io_descriptors != 1) { ++ pw_pr_error( ++ "ERROR: told to perform immediate IO with %d descriptors -- MAX of 1 descriptor allowed!\n", ++ local_info->num_io_descriptors); ++ retVal = -PW_ERROR; ++ goto ret_immediate_io; ++ } ++ local_descriptor = ((struct sw_driver_io_descriptor *) ++ local_info->descriptors); ++ pw_pr_debug("Collection type after %d\n", ++ local_descriptor->collection_type); ++ /* ++ * Check cpu mask for correctness here. For now, we do NOT allow ++ * reading on ALL cpus. ++ */ ++ if ((int)local_info->cpu_mask < -1 || ++ (int)local_info->cpu_mask >= (int)sw_max_num_cpus) { ++ pw_pr_error( ++ "ERROR: invalid cpu mask %d specified in immediate IO; valid values are: -1, [0 -- %d]!\n", ++ local_info->cpu_mask, sw_max_num_cpus - 1); ++ retVal = -PW_ERROR; ++ goto ret_immediate_io; ++ } ++ /* ++ * Check collection type for correctness here ++ */ ++ pw_pr_debug( ++ "Asked to perform immediate IO with descriptor with type = %d, on cpu = %d\n", ++ local_descriptor->collection_type, ++ local_info->cpu_mask); ++ if (sw_is_valid_hw_op_id(local_descriptor->collection_type) == ++ false) { ++ pw_pr_error( ++ "ERROR: invalid collection type %d specified for immediate IO\n", ++ (int)local_descriptor->collection_type); ++ retVal = -PW_ERROR; ++ goto ret_immediate_io; ++ } ++ /* ++ * Check collection cmd for correctness here ++ */ ++ if (local_descriptor->collection_command < SW_IO_CMD_READ || ++ local_descriptor->collection_command > SW_IO_CMD_WRITE) { ++ pw_pr_error( ++ "ERROR: invalid collection command %d specified for immediate IO\n", ++ local_descriptor->collection_command); ++ retVal = -PW_ERROR; ++ goto ret_immediate_io; ++ } ++ /* ++ * Initialize the descriptor -- 'MMIO' and 'IPC' reads may need ++ * an "ioremap_nocache" ++ */ ++ if (sw_init_driver_io_descriptor(local_descriptor)) { ++ pw_pr_error( ++ "ERROR initializing immediate IO descriptor\n"); ++ retVal = -PW_ERROR; ++ goto ret_immediate_io; ++ } ++ /* ++ * OK, perform the actual IO. ++ */ ++ { ++ struct sw_immediate_request_info request_info = { ++ local_descriptor, dst_vals, &retVal ++ }; ++ struct cpumask cpumask; ++ ++ cpumask_clear(&cpumask); ++ switch (local_info->cpu_mask) { ++ case -1: // IO on ANY CPU (assume current CPU) ++ cpumask_set_cpu(RAW_CPU(), &cpumask); ++ pw_pr_debug("ANY CPU\n"); ++ break; ++ default: // IO on a particular CPU ++ cpumask_set_cpu(local_info->cpu_mask, &cpumask); ++ pw_pr_debug("[%d] setting for %d\n", RAW_CPU(), ++ local_info->cpu_mask); ++ break; ++ } ++ sw_schedule_work(&cpumask, ++ &sw_handle_immediate_request_i, ++ &request_info); ++ } ++ if (retVal != PW_SUCCESS) { ++ pw_pr_error( ++ "ERROR performing immediate IO on one (or more) CPUs!\n"); ++ goto ret_immediate_io_reset; ++ } ++ /* ++ * OK, all done. ++ */ ++ if (local_descriptor->collection_command == SW_IO_CMD_READ) { ++ if (copy_to_user(local_args.out_arg, dst_vals, ++ local_out_len)) { ++ pw_pr_error( ++ "ERROR copying %u bytes of value to userspace!\n", ++ local_out_len); ++ retVal = -PW_ERROR; ++ goto ret_immediate_io_reset; ++ } ++ pw_pr_debug( ++ "OK, copied %u bytes of value to userspace addr %p!\n", ++ local_out_len, local_args.out_arg); ++ } ++ret_immediate_io_reset: ++ /* ++ * Reset the descriptor -- 'MMIO' and 'IPC' reads may have ++ * performed an "ioremap_nocache" which now needs to be ++ * unmapped. ++ */ ++ if (sw_reset_driver_io_descriptor(local_descriptor)) { ++ pw_pr_error( ++ "ERROR resetting immediate IO descriptor\n"); ++ retVal = -PW_ERROR; ++ goto ret_immediate_io; ++ } ++ret_immediate_io: ++ vfree(src_vals); ++ if (dst_vals) { ++ vfree(dst_vals); ++ } ++ return retVal; ++ } else if (MATCH_IOCTL(ioctl_num, PW_IOCTL_GET_SCU_FW_VERSION)) { ++ u32 local_data = (u32)sw_scu_fw_major_minor; ++ ++ if (put_user(local_data, (u32 __user *)local_args.out_arg)) { ++ pw_pr_error( ++ "ERROR copying scu fw version to userspace!\n"); ++ return -PW_ERROR; ++ } ++ return PW_SUCCESS; ++ } else if (MATCH_IOCTL(ioctl_num, PW_IOCTL_GET_DRIVER_VERSION)) { ++ pw_u64_t local_version = ++ (pw_u64_t)SW_DRIVER_VERSION_MAJOR << 32 | ++ (pw_u64_t)SW_DRIVER_VERSION_MINOR << 16 | ++ (pw_u64_t)SW_DRIVER_VERSION_OTHER; ++ if (put_user(local_version, (u64 __user *)local_args.out_arg)) { ++ pw_pr_error( ++ "ERROR copying driver version to userspace!\n"); ++ return -PW_ERROR; ++ } ++ return PW_SUCCESS; ++ } else if (MATCH_IOCTL(ioctl_num, PW_IOCTL_GET_AVAILABLE_TRACEPOINTS)) { ++ pw_pr_debug("DEBUG: AVAIL tracepoints! local_out_len = %u\n", ++ local_out_len); ++ return sw_get_available_name_id_mappings_i( ++ SW_NAME_TYPE_TRACEPOINT, ++ (struct sw_name_info_msg __user *)local_args.out_arg, ++ local_out_len); ++ } else if (MATCH_IOCTL(ioctl_num, PW_IOCTL_GET_AVAILABLE_NOTIFIERS)) { ++ pw_pr_debug("DEBUG: AVAIL tracepoints! local_out_len = %u\n", ++ local_out_len); ++ return sw_get_available_name_id_mappings_i( ++ SW_NAME_TYPE_NOTIFIER, ++ (struct sw_name_info_msg __user *)local_args.out_arg, ++ local_out_len); ++ } else if (MATCH_IOCTL(ioctl_num, PW_IOCTL_GET_AVAILABLE_COLLECTORS)) { ++ pw_pr_debug("DEBUG: AVAIL tracepoints! local_out_len = %u\n", ++ local_out_len); ++ return sw_get_available_name_id_mappings_i( ++ SW_NAME_TYPE_COLLECTOR, ++ (struct sw_name_info_msg __user *)local_args.out_arg, ++ local_out_len); ++ } else if (MATCH_IOCTL(ioctl_num, PW_IOCTL_GET_TOPOLOGY_CHANGES)) { ++ pw_pr_debug("DEBUG: TOPOLOGY changes! local_out_len = %u\n", ++ local_out_len); ++ return sw_get_topology_changes_i( ++ (struct sw_driver_topology_msg __user *) ++ local_args.out_arg, ++ local_out_len); ++ } else { ++ pw_pr_error("ERROR: invalid ioctl num: %u\n", ++ _IOC_NR(ioctl_num)); ++ } ++ return -PW_ERROR; ++} ++ ++static enum sw_driver_collection_cmd sw_get_collection_cmd_i(void) ++{ ++ return s_internal_state.cmd; ++}; ++ ++static bool sw_should_flush_buffer_i(void) ++{ ++ return s_internal_state.drain_buffers; ++}; ++ ++int sw_load_driver_i(void) ++{ ++ /* ++ * Set per-cpu buffer size. ++ * First, Perform sanity checking of per-cpu buffer size. ++ */ ++ /* ++ * 1. Num pages MUST be pow-of-2. ++ */ ++ { ++ if (sw_buffer_num_pages & (sw_buffer_num_pages - 1)) { ++ pw_pr_error( ++ "Invalid value (%u) for number of pages in each per-cpu buffer; MUST be a power of 2!\n", ++ sw_buffer_num_pages); ++ return -PW_ERROR; ++ } ++ } ++ /* ++ * 2. Num pages MUST be <= 16 (i.e. per-cpu buffer size ++ * MUST be <= 64 kB) ++ */ ++ { ++ if (sw_buffer_num_pages > 16) { ++ pw_pr_error( ++ "Invalid value (%u) for number of pages in each per-cpu buffer; MUST be <= 16!\n", ++ sw_buffer_num_pages); ++ return -PW_ERROR; ++ } ++ } ++ sw_buffer_alloc_size = sw_buffer_num_pages * PAGE_SIZE; ++ /* ++ * Retrieve any arch details here. ++ */ ++ if (sw_get_arch_details_i()) { ++ pw_pr_error("ERROR retrieving arch details!\n"); ++ return -PW_ERROR; ++ } ++ /* ++ * Check to see if the user wants us to force ++ * software coordination of CPU frequencies. ++ */ ++ if (do_force_module_scope_for_cpu_frequencies) { ++ pw_pr_force( ++ "DEBUG: FORCING MODULE SCOPE FOR CPU FREQUENCIES!\n"); ++ if (sw_set_module_scope_for_cpus()) { ++ pw_pr_force("ERROR setting affected cpus\n"); ++ return -PW_ERROR; ++ } else { ++ pw_pr_debug("OK, setting worked\n"); ++ } ++ } ++ if (sw_init_data_structures_i()) { ++ pw_pr_error("ERROR initializing data structures!\n"); ++ goto err_ret_init_data; ++ } ++ if (sw_register_dev(&s_ops)) { ++ goto err_ret_register_dev; ++ } ++ /* ++ * Retrieve a list of tracepoint structs to use when ++ * registering probe functions. ++ */ ++ { ++ if (sw_extract_tracepoints()) { ++ pw_pr_error( ++ "ERROR: could NOT retrieve a complete list of valid tracepoint structs!\n"); ++ goto err_ret_tracepoint; ++ } ++ } ++ pw_pr_force("-----------------------------------------\n"); ++ pw_pr_force("OK: LOADED SoC Watch Driver\n"); ++#ifdef CONFIG_X86_WANT_INTEL_MID ++ pw_pr_force("SOC Identifier = %u, Stepping = %u\n", ++ intel_mid_identify_cpu(), intel_mid_soc_stepping()); ++#endif // CONFIG_X86_WANT_INTEL_MID ++ pw_pr_force("-----------------------------------------\n"); ++ return PW_SUCCESS; ++ ++err_ret_tracepoint: ++ sw_unregister_dev(); ++err_ret_register_dev: ++ sw_destroy_data_structures_i(); ++err_ret_init_data: ++ if (do_force_module_scope_for_cpu_frequencies) { ++ if (sw_reset_module_scope_for_cpus()) { ++ pw_pr_force("ERROR resetting affected cpus\n"); ++ } else { ++ pw_pr_debug("OK, resetting worked\n"); ++ } ++ } ++ return -PW_ERROR; ++} ++ ++void sw_unload_driver_i(void) ++{ ++ sw_iterate_driver_info_lists_i(); ++ ++ sw_unregister_dev(); ++ ++ sw_destroy_data_structures_i(); ++ ++ if (do_force_module_scope_for_cpu_frequencies) { ++ if (sw_reset_module_scope_for_cpus()) { ++ pw_pr_force("ERROR resetting affected cpus\n"); ++ } else { ++ pw_pr_debug("OK, resetting worked\n"); ++ } ++ } ++ ++ pw_pr_force("-----------------------------------------\n"); ++ pw_pr_force("OK: UNLOADED SoC Watch Driver\n"); ++ ++ sw_print_trace_notifier_overheads(); ++ sw_print_output_buffer_overheads(); ++ PRINT_CUMULATIVE_OVERHEAD_PARAMS(sw_collection_poll_i, "POLL"); ++ PRINT_CUMULATIVE_OVERHEAD_PARAMS(sw_any_seg_full, "ANY_SEG_FULL"); ++#if DO_TRACK_MEMORY_USAGE ++ { ++ /* ++ * Dump memory stats. ++ */ ++ pw_pr_force( ++ "TOTAL # BYTES ALLOCED = %llu, CURR # BYTES ALLOCED = %llu, MAX # BYTES ALLOCED = %llu\n", ++ sw_get_total_bytes_alloced(), ++ sw_get_curr_bytes_alloced(), ++ sw_get_max_bytes_alloced()); ++ if (unlikely(sw_get_curr_bytes_alloced())) { ++ pw_pr_force( ++ "***********************************************************************\n"); ++ pw_pr_force( ++ "WARNING: possible memory leak: there are %llu bytes still allocated!\n", ++ sw_get_curr_bytes_alloced()); ++ pw_pr_force( ++ "***********************************************************************\n"); ++ } ++ } ++#endif // DO_TRACK_MEMORY_USAGE ++ pw_pr_force("-----------------------------------------\n"); ++} ++ ++module_init(sw_load_driver_i); ++module_exit(sw_unload_driver_i); ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR(MOD_AUTHOR); ++MODULE_DESCRIPTION(MOD_DESC); +diff --git a/drivers/platform/x86/socwatch/sw_file_ops.c b/drivers/platform/x86/socwatch/sw_file_ops.c +new file mode 100644 +index 000000000000..06c88801a9ec +--- /dev/null ++++ b/drivers/platform/x86/socwatch/sw_file_ops.c +@@ -0,0 +1,364 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++#include // try_module_get ++#include // inode ++#include // class_create ++#include // cdev_alloc ++#include // LINUX_VERSION_CODE ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) ++#include // copy_to_user ++#else ++#include // copy_to_user ++#endif // LINUX_VERSION_CODE ++#include // wait_event_interruptible ++#include // TASK_INTERRUPTIBLE ++ ++#include "sw_kernel_defines.h" ++#include "sw_types.h" ++#include "sw_structs.h" ++#include "sw_file_ops.h" ++#include "sw_ioctl.h" ++#include "sw_output_buffer.h" ++ ++/* ------------------------------------------------- ++ * Compile time constants. ++ * ------------------------------------------------- ++ */ ++/* ++ * Get current command. ++ */ ++#define GET_CMD() ((*s_file_ops->get_current_cmd)()) ++/* ++ * Check if we're currently collecting data. ++ */ ++#define IS_COLLECTING() \ ++ ({ \ ++ sw_driver_collection_cmd_t __cmd = GET_CMD(); \ ++ bool __val = (__cmd == SW_DRIVER_START_COLLECTION || \ ++ __cmd == SW_DRIVER_RESUME_COLLECTION); \ ++ __val; \ ++ }) ++/* ++ * Check if we're currently paused. ++ */ ++#define IS_SLEEPING() \ ++ ({ \ ++ sw_driver_collection_cmd_t __cmd = GET_CMD(); \ ++ bool __val = __cmd == SW_DRIVER_PAUSE_COLLECTION; \ ++ __val; \ ++ }) ++/* ------------------------------------------------- ++ * Typedefs ++ * ------------------------------------------------- ++ */ ++typedef unsigned long sw_bits_t; ++ ++/* ------------------------------------------------- ++ * Local function declarations. ++ * ------------------------------------------------- ++ */ ++static int sw_device_open_i(struct inode *inode, struct file *file); ++static int sw_device_release_i(struct inode *inode, struct file *file); ++static ssize_t sw_device_read_i(struct file *file, char __user *buffer, ++ size_t length, loff_t *offset); ++static long sw_device_unlocked_ioctl_i(struct file *filp, ++ unsigned int ioctl_num, ++ unsigned long ioctl_param); ++#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) ++static long sw_device_compat_ioctl_i(struct file *file, unsigned int ioctl_num, ++ unsigned long ioctl_param); ++#endif ++ ++/* ++ * File operations exported by the driver. ++ */ ++static struct file_operations s_fops = { ++ .open = &sw_device_open_i, ++ .read = &sw_device_read_i, ++ .unlocked_ioctl = &sw_device_unlocked_ioctl_i, ++#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) ++ .compat_ioctl = &sw_device_compat_ioctl_i, ++#endif // COMPAT && x64 ++ .release = &sw_device_release_i, ++}; ++/* ++ * Character device file MAJOR ++ * number -- we're now obtaining ++ * this dynamically. ++ */ ++static int apwr_dev_major_num = -1; ++/* ++ * Variables to create the character device file ++ */ ++static dev_t apwr_dev; ++static struct cdev *apwr_cdev; ++static struct class *apwr_class; ++/* ++ * Operations exported by the main driver. ++ */ ++static struct sw_file_ops *s_file_ops; ++/* ++ * Is the device open right now? Used to prevent ++ * concurent access into the same device. ++ */ ++#define DEV_IS_OPEN 0 // see if device is in use ++static volatile sw_bits_t dev_status; ++ ++/* ++ * File operations. ++ */ ++/* ++ * Service an "open(...)" call from user-space. ++ */ ++static int sw_device_open_i(struct inode *inode, struct file *file) ++{ ++ /* ++ * We don't want to talk to two processes at the same time ++ */ ++ if (test_and_set_bit(DEV_IS_OPEN, &dev_status)) { ++ // Device is busy ++ return -EBUSY; ++ } ++ ++ if (!try_module_get(THIS_MODULE)) { ++ // No such device ++ return -ENODEV; ++ } ++ pw_pr_debug("OK, allowed client open!\n"); ++ return PW_SUCCESS; ++} ++ ++/* ++ * Service a "close(...)" call from user-space. ++ */ ++static int sw_device_release_i(struct inode *inode, struct file *file) ++{ ++ /* ++ * Did the client just try to zombie us? ++ */ ++ int retVal = PW_SUCCESS; ++ ++ if (IS_COLLECTING()) { ++ pw_pr_error( ++ "ERROR: Detected ongoing collection on a device release!\n"); ++ retVal = (*s_file_ops->stop_handler)(); ++ } ++ module_put(THIS_MODULE); ++ /* ++ * We're now ready for our next caller ++ */ ++ clear_bit(DEV_IS_OPEN, &dev_status); ++ return retVal; ++} ++ ++static ssize_t sw_device_read_i(struct file *file, char __user *user_buffer, ++ size_t length, loff_t *offset) ++{ ++ size_t bytes_read = 0; ++ u32 val = 0; ++ ++ if (!user_buffer) { ++ pw_pr_error( ++ "ERROR: \"read\" called with an empty user_buffer?!\n"); ++ return -PW_ERROR; ++ } ++ do { ++ val = SW_ALL_WRITES_DONE_MASK; ++ if (wait_event_interruptible( ++ sw_reader_queue, ++ (sw_any_seg_full(&val, ++ (*s_file_ops->should_flush)()) || ++ (!IS_COLLECTING() && !IS_SLEEPING())))) { ++ pw_pr_error("wait_event_interruptible error\n"); ++ return -ERESTARTSYS; ++ } ++ pw_pr_debug(KERN_INFO "After wait: val = %u\n", val); ++ } while (val == SW_NO_DATA_AVAIL_MASK); ++ /* ++ * Are we done producing/consuming? ++ */ ++ if (val == SW_ALL_WRITES_DONE_MASK) { ++ return 0; // "0" ==> EOF ++ } ++ /* ++ * Copy the buffer contents into userspace. ++ */ ++ bytes_read = sw_consume_data( ++ val, user_buffer, ++ length); // 'read' returns # of bytes actually read ++ if (unlikely(bytes_read == 0)) { ++ /* Cannot be EOF since that has already been checked above */ ++ return -EIO; ++ } ++ return bytes_read; ++} ++ ++/* ++ * (1) Handle 32b IOCTLs in 32b kernel-space. ++ * (2) Handle 64b IOCTLs in 64b kernel-space. ++ */ ++static long sw_device_unlocked_ioctl_i(struct file *filp, ++ unsigned int ioctl_num, ++ unsigned long ioctl_param) ++{ ++ struct sw_driver_ioctl_arg __user *remote_args = ++ (struct sw_driver_ioctl_arg __user *)ioctl_param; ++ struct sw_driver_ioctl_arg local_args; ++ ++ if (copy_from_user(&local_args, remote_args, sizeof(local_args))) { ++ pw_pr_error("ERROR copying ioctl args from userspace\n"); ++ return -PW_ERROR; ++ } ++ return (*s_file_ops->ioctl_handler)(ioctl_num, &local_args); ++}; ++ ++#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) ++#include ++/* ++ * Helper struct for use in translating ++ * IOCTLs from 32b user programs in 64b ++ * kernels. ++ */ ++#pragma pack(push, 1) ++struct sw_driver_ioctl_arg32 { ++ pw_s32_t in_len; ++ pw_s32_t out_len; ++ compat_caddr_t in_arg; ++ compat_caddr_t out_arg; ++}; ++#pragma pack(pop) ++ ++/* ++ * Handle 32b IOCTLs in 64b kernel-space. ++ */ ++static long sw_device_compat_ioctl_i(struct file *file, unsigned int ioctl_num, ++ unsigned long ioctl_param) ++{ ++ struct sw_driver_ioctl_arg32 __user *remote_args32 = ++ compat_ptr(ioctl_param); ++ struct sw_driver_ioctl_arg local_args; ++ u32 data; ++ ++ if (get_user(local_args.in_len, &remote_args32->in_len)) { ++ return -PW_ERROR; ++ } ++ if (get_user(local_args.out_len, &remote_args32->out_len)) { ++ return -PW_ERROR; ++ } ++ if (get_user(data, &remote_args32->in_arg)) { ++ return -PW_ERROR; ++ } ++ local_args.in_arg = (char *)(unsigned long)data; ++ if (get_user(data, &remote_args32->out_arg)) { ++ return -PW_ERROR; ++ } ++ local_args.out_arg = (char *)(unsigned long)data; ++ return (*s_file_ops->ioctl_handler)(ioctl_num, &local_args); ++} ++#endif ++ ++/* ++ * Device creation, deletion operations. ++ */ ++int sw_register_dev(struct sw_file_ops *ops) ++{ ++ int ret; ++ /* ++ * Ensure we have valid handlers! ++ */ ++ if (!ops) { ++ pw_pr_error("NULL file ops?!\n"); ++ return -PW_ERROR; ++ } ++ ++ /* ++ * Create the character device ++ */ ++ ret = alloc_chrdev_region(&apwr_dev, 0, 1, PW_DEVICE_NAME); ++ apwr_dev_major_num = MAJOR(apwr_dev); ++ apwr_class = class_create(THIS_MODULE, "apwr"); ++ if (IS_ERR(apwr_class)) { ++ printk(KERN_ERR "Error registering apwr class\n"); ++ } ++ ++ device_create(apwr_class, NULL, apwr_dev, NULL, PW_DEVICE_NAME); ++ apwr_cdev = cdev_alloc(); ++ if (apwr_cdev == NULL) { ++ printk("Error allocating character device\n"); ++ return ret; ++ } ++ apwr_cdev->owner = THIS_MODULE; ++ apwr_cdev->ops = &s_fops; ++ if (cdev_add(apwr_cdev, apwr_dev, 1) < 0) { ++ printk("Error registering device driver\n"); ++ return ret; ++ } ++ s_file_ops = ops; ++ ++ return ret; ++} ++ ++void sw_unregister_dev(void) ++{ ++ /* ++ * Remove the device ++ */ ++ unregister_chrdev(apwr_dev_major_num, PW_DEVICE_NAME); ++ device_destroy(apwr_class, apwr_dev); ++ class_destroy(apwr_class); ++ unregister_chrdev_region(apwr_dev, 1); ++ cdev_del(apwr_cdev); ++} +diff --git a/drivers/platform/x86/socwatch/sw_hardware_io.c b/drivers/platform/x86/socwatch/sw_hardware_io.c +new file mode 100644 +index 000000000000..759288ac546e +--- /dev/null ++++ b/drivers/platform/x86/socwatch/sw_hardware_io.c +@@ -0,0 +1,188 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++ ++#include "sw_types.h" ++#include "sw_kernel_defines.h" ++#include "sw_ops_provider.h" ++#include "sw_mem.h" ++#include "sw_internal.h" ++#include "sw_hardware_io.h" ++ ++struct sw_ops_node { ++ const struct sw_hw_ops *op; ++ int id; ++ ++ SW_LIST_ENTRY(list, sw_ops_node); ++}; ++ ++static SW_DEFINE_LIST_HEAD(s_ops, ++sw_in sw_ops_node) = SW_LIST_HEAD_INITIALIZER(s_ops); ++ ++static int s_op_idx = -1; ++ ++/* ++ * Function definitions. ++ */ ++int sw_get_hw_op_id(const struct sw_hw_ops *ops) ++{ ++ if (ops && ops->name) { ++ struct sw_ops_node *node = NULL; ++ ++ SW_LIST_FOR_EACH_ENTRY(node, &s_ops, list) ++ { ++ if (node->op->name && ++ !strcmp(node->op->name, ops->name)) { ++ return node->id; ++ } ++ } ++ } ++ return -1; ++} ++ ++const struct sw_hw_ops *sw_get_hw_ops_for(int id) ++{ ++ struct sw_ops_node *node = NULL; ++ ++ SW_LIST_FOR_EACH_ENTRY(node, &s_ops, list) ++ { ++ if (node->id == id) { ++ return node->op; ++ } ++ } ++ return NULL; ++} ++ ++bool sw_is_valid_hw_op_id(int id) ++{ ++ struct sw_ops_node *node = NULL; ++ ++ SW_LIST_FOR_EACH_ENTRY(node, &s_ops, list) ++ { ++ if (node->id == id) { ++ return true; ++ } ++ } ++ return false; ++} ++ ++const char *sw_get_hw_op_abstract_name(const struct sw_hw_ops *op) ++{ ++ if (op) { ++ return op->name; ++ } ++ return NULL; ++} ++ ++int sw_for_each_hw_op(int (*func)(const struct sw_hw_ops *op, void *priv), ++ void *priv, bool return_on_error) { ++ int retval = PW_SUCCESS; ++ struct sw_ops_node *node = NULL; ++ ++ if (func) { ++ SW_LIST_FOR_EACH_ENTRY(node, &s_ops, list) ++ { ++ if ((*func)(node->op, priv)) { ++ retval = -EIO; ++ if (return_on_error) { ++ break; ++ } ++ } ++ } ++ } ++ return retval; ++} ++ ++int sw_register_hw_op(const struct sw_hw_ops *op) ++{ ++ struct sw_ops_node *node = NULL; ++ ++ if (!op) { ++ pw_pr_error("NULL input node in \"sw_register_hw_op\""); ++ return -EIO; ++ } ++ node = sw_kmalloc(sizeof(struct sw_ops_node), GFP_KERNEL); ++ if (!node) { ++ pw_pr_error("sw_kmalloc error in \"sw_register_hw_op\""); ++ return -ENOMEM; ++ } ++ node->op = op; ++ node->id = ++s_op_idx; ++ SW_LIST_ENTRY_INIT(node, list); ++ SW_LIST_ADD(&s_ops, node, list); ++ return PW_SUCCESS; ++} ++ ++int sw_register_hw_ops(void) ++{ ++ return sw_register_ops_providers(); ++} ++ ++void sw_free_hw_ops(void) ++{ ++ /* ++ * Free all nodes. ++ */ ++ while (!SW_LIST_EMPTY(&s_ops)) { ++ struct sw_ops_node *node = ++ SW_LIST_GET_HEAD_ENTRY(&s_ops, sw_ops_node, list); ++ SW_LIST_UNLINK(node, list); ++ sw_kfree(node); ++ } ++ /* ++ * Call our providers to deallocate resources. ++ */ ++ sw_free_ops_providers(); ++} +diff --git a/drivers/platform/x86/socwatch/sw_internal.c b/drivers/platform/x86/socwatch/sw_internal.c +new file mode 100644 +index 000000000000..a4a4dca9dc53 +--- /dev/null ++++ b/drivers/platform/x86/socwatch/sw_internal.c +@@ -0,0 +1,238 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++#include "sw_hardware_io.h" ++#include "sw_mem.h" ++#include "sw_kernel_defines.h" ++#include "sw_internal.h" ++ ++bool sw_check_output_buffer_params(void __user *buffer, size_t bytes_to_read, ++ size_t buff_size) ++{ ++ if (!buffer) { ++ pw_pr_error("ERROR: NULL ptr in sw_consume_data!\n"); ++ return false; ++ } ++ if (bytes_to_read != buff_size) { ++ pw_pr_error("Error: bytes_to_read = %zu, required to be %zu\n", ++ bytes_to_read, buff_size); ++ return false; ++ } ++ return true; ++} ++ ++unsigned long sw_copy_to_user(char __user *dst, char *src, size_t bytes_to_copy) ++{ ++ return copy_to_user(dst, src, bytes_to_copy); ++} ++ ++void sw_schedule_work(const struct cpumask *mask, void (*work)(void *), ++ void *data) ++{ ++ /* ++ * Did the user ask us to run on 'ANY' CPU? ++ */ ++ if (cpumask_empty(mask)) { ++ (*work)(data); // Call on current CPU ++ } else { ++ preempt_disable(); ++ { ++ /* ++ * Did the user ask to run on this CPU? ++ */ ++ if (cpumask_test_cpu(RAW_CPU(), mask)) { ++ (*work)(data); // Call on current CPU ++ } ++ /* ++ * OK, now check other CPUs. ++ */ ++ smp_call_function_many( ++ mask, work, data, ++ true /* Wait for all funcs to complete */); ++ } ++ preempt_enable(); ++ } ++} ++ ++int sw_get_cpu(unsigned long *flags) ++{ ++ local_irq_save(*flags); ++ return get_cpu(); ++} ++ ++void sw_put_cpu(unsigned long flags) ++{ ++ put_cpu(); ++ local_irq_restore(flags); ++} ++ ++#ifndef CONFIG_NR_CPUS_PER_MODULE ++#define CONFIG_NR_CPUS_PER_MODULE 2 ++#endif // CONFIG_NR_CPUS_PER_MODULE ++ ++static void sw_get_cpu_sibling_mask(int cpu, struct cpumask *sibling_mask) ++{ ++ unsigned int base = ++ (cpu / CONFIG_NR_CPUS_PER_MODULE) * CONFIG_NR_CPUS_PER_MODULE; ++ unsigned int i; ++ ++ cpumask_clear(sibling_mask); ++ for (i = base; i < (base + CONFIG_NR_CPUS_PER_MODULE); ++i) { ++ cpumask_set_cpu(i, sibling_mask); ++ } ++} ++ ++struct pw_cpufreq_node { ++ int cpu; ++ struct cpumask cpus, related_cpus; ++ unsigned int shared_type; ++ struct list_head list; ++}; ++static struct list_head pw_cpufreq_policy_lists; ++ ++int sw_set_module_scope_for_cpus(void) ++{ ++ /* ++ * Warning: no support for cpu hotplugging! ++ */ ++ int cpu = 0; ++ ++ INIT_LIST_HEAD(&pw_cpufreq_policy_lists); ++ ++ for_each_online_cpu(cpu) { ++ struct cpumask sibling_mask; ++ struct pw_cpufreq_node *node = NULL; ++ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); ++ ++ if (!policy) { ++ continue; ++ } ++ /* ++ * Get siblings for this cpu. ++ */ ++ sw_get_cpu_sibling_mask(cpu, &sibling_mask); ++ /* ++ * Check if affected_cpus already contains sibling_mask ++ */ ++ if (cpumask_subset(&sibling_mask, policy->cpus)) { ++ /* ++ * 'sibling_mask' is already a subset of affected_cpus -- nothing ++ * to do on this CPU. ++ */ ++ cpufreq_cpu_put(policy); ++ continue; ++ } ++ ++ node = sw_kmalloc(sizeof(*node), GFP_ATOMIC); ++ if (node) { ++ cpumask_clear(&node->cpus); ++ cpumask_clear(&node->related_cpus); ++ ++ node->cpu = cpu; ++ cpumask_copy(&node->cpus, policy->cpus); ++ cpumask_copy(&node->related_cpus, policy->related_cpus); ++ node->shared_type = policy->shared_type; ++ } ++ ++ policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; ++ /* ++ * Set siblings. Don't worry about online/offline, that's ++ * handled below. ++ */ ++ cpumask_copy(policy->cpus, &sibling_mask); ++ /* ++ * Ensure 'related_cpus' is a superset of 'cpus' ++ */ ++ cpumask_or(policy->related_cpus, policy->related_cpus, ++ policy->cpus); ++ /* ++ * Ensure 'cpus' only contains online cpus. ++ */ ++ cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); ++ ++ cpufreq_cpu_put(policy); ++ ++ if (node) { ++ INIT_LIST_HEAD(&node->list); ++ list_add_tail(&node->list, &pw_cpufreq_policy_lists); ++ } ++ } ++ return PW_SUCCESS; ++} ++ ++int sw_reset_module_scope_for_cpus(void) ++{ ++ struct list_head *head = &pw_cpufreq_policy_lists; ++ ++ while (!list_empty(head)) { ++ struct pw_cpufreq_node *node = ++ list_first_entry(head, struct pw_cpufreq_node, list); ++ int cpu = node->cpu; ++ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); ++ if (!policy) { ++ continue; ++ } ++ policy->shared_type = node->shared_type; ++ cpumask_copy(policy->related_cpus, &node->related_cpus); ++ cpumask_copy(policy->cpus, &node->cpus); ++ ++ cpufreq_cpu_put(policy); ++ ++ pw_pr_debug("OK, reset cpufreq_policy for cpu %d\n", cpu); ++ list_del(&node->list); ++ sw_kfree(node); ++ } ++ return PW_SUCCESS; ++} +diff --git a/drivers/platform/x86/socwatch/sw_mem.c b/drivers/platform/x86/socwatch/sw_mem.c +new file mode 100644 +index 000000000000..0d1231c2e3a8 +--- /dev/null ++++ b/drivers/platform/x86/socwatch/sw_mem.c +@@ -0,0 +1,331 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++#include ++ ++#include "sw_kernel_defines.h" ++#include "sw_lock_defs.h" ++#include "sw_mem.h" ++ ++/* ++ * How do we behave if we ever ++ * get an allocation error? ++ * (a) Setting to '1' REFUSES ANY FURTHER ++ * allocation requests. ++ * (b) Setting to '0' treats each ++ * allocation request as separate, and ++ * handles them on an on-demand basis ++ */ ++#define DO_MEM_PANIC_ON_ALLOC_ERROR 0 ++ ++#if DO_MEM_PANIC_ON_ALLOC_ERROR ++/* ++ * If we ever run into memory allocation errors then ++ * stop (and drop) everything. ++ */ ++static atomic_t pw_mem_should_panic = ATOMIC_INIT(0); ++/* ++ * Macro to check if PANIC is on. ++ */ ++#define MEM_PANIC() \ ++ do { \ ++ atomic_set(&pw_mem_should_panic, 1); \ ++ smp_mb(); \ ++ } while (0) ++#define SHOULD_TRACE() \ ++ ({ \ ++ bool __tmp = false; \ ++ smp_mb(); \ ++ __tmp = (atomic_read(&pw_mem_should_panic) == 0); \ ++ __tmp; \ ++ }) ++ ++#else // if !DO_MEM_PANIC_ON_ALLOC_ERROR ++ ++#define MEM_PANIC() ++#define SHOULD_TRACE() (true) ++ ++#endif ++ ++/* ++ * Variables to track memory usage. ++ */ ++/* ++ * TOTAL num bytes allocated. ++ */ ++static u64 total_num_bytes_alloced; ++/* ++ * Num of allocated bytes that have ++ * not yet been freed. ++ */ ++static u64 curr_num_bytes_alloced; ++/* ++ * Max # of allocated bytes that ++ * have not been freed at any point ++ * in time. ++ */ ++static u64 max_num_bytes_alloced; ++ ++u64 sw_get_total_bytes_alloced(void) ++{ ++ return total_num_bytes_alloced; ++}; ++ ++u64 sw_get_max_bytes_alloced(void) ++{ ++ return max_num_bytes_alloced; ++}; ++ ++u64 sw_get_curr_bytes_alloced(void) ++{ ++ return curr_num_bytes_alloced; ++}; ++ ++/* ++ * Allocate free pages. ++ * TODO: add memory tracker? ++ */ ++unsigned long sw_allocate_pages(gfp_t flags, ++ unsigned int alloc_size_in_bytes) ++{ ++ return __get_free_pages(flags, get_order(alloc_size_in_bytes)); ++} ++/* ++ * Free up previously allocated pages. ++ * TODO: add memory tracker? ++ */ ++void sw_release_pages(unsigned long addr, unsigned int alloc_size_in_bytes) ++{ ++ free_pages(addr, get_order(alloc_size_in_bytes)); ++} ++ ++#if DO_TRACK_MEMORY_USAGE ++ ++/* ++ * Lock to guard access to memory ++ * debugging stats. ++ */ ++static SW_DEFINE_SPINLOCK(sw_kmalloc_lock); ++ ++/* ++ * Helper macros to print out ++ * mem debugging stats. ++ */ ++#define TOTAL_NUM_BYTES_ALLOCED() total_num_bytes_alloced ++#define CURR_NUM_BYTES_ALLOCED() curr_num_bytes_alloced ++#define MAX_NUM_BYTES_ALLOCED() max_num_bytes_alloced ++ ++/* ++ * MAGIC number based memory tracker. Relies on ++ * storing (a) a MAGIC marker and (b) the requested ++ * size WITHIN the allocated block of memory. Standard ++ * malloc-tracking stuff, really. ++ * ++ * Overview: ++ * (1) ALLOCATION: ++ * When asked to allocate a block of 'X' bytes, allocate ++ * 'X' + 8 bytes. Then, in the FIRST 4 bytes, write the ++ * requested size. In the NEXT 4 bytes, write a special ++ * (i.e. MAGIC) number to let our deallocator know that ++ * this block of memory was allocated using this technique. ++ * Also, keep track of the number of bytes allocated. ++ * ++ * (2) DEALLOCATION: ++ * When given an object to deallocate, we first check ++ * the MAGIC number by decrementing the pointer by ++ * 4 bytes and reading the (integer) stored there. ++ * After ensuring the pointer was, in fact, allocated ++ * by us, we then read the size of the allocated ++ * block (again, by decrementing the pointer by 4 ++ * bytes and reading the integer size). We ++ * use this size argument to decrement # of bytes ++ * allocated. ++ */ ++#define PW_MEM_MAGIC 0xdeadbeef ++ ++#define PW_ADD_MAGIC(x) \ ++ ({ \ ++ char *__tmp1 = (char *)(x); \ ++ *((int *)__tmp1) = PW_MEM_MAGIC; \ ++ __tmp1 += sizeof(int); \ ++ __tmp1; \ ++ }) ++#define PW_ADD_SIZE(x, s) \ ++ ({ \ ++ char *__tmp1 = (char *)(x); \ ++ *((int *)__tmp1) = (s); \ ++ __tmp1 += sizeof(int); \ ++ __tmp1; \ ++ }) ++#define PW_ADD_STAMP(x, s) PW_ADD_MAGIC(PW_ADD_SIZE((x), (s))) ++ ++#define PW_IS_MAGIC(x) \ ++ ({ \ ++ int *__tmp1 = (int *)((char *)(x) - sizeof(int)); \ ++ *__tmp1 == PW_MEM_MAGIC; \ ++ }) ++#define PW_REMOVE_STAMP(x) \ ++ ({ \ ++ char *__tmp1 = (char *)(x); \ ++ __tmp1 -= sizeof(int) * 2; \ ++ __tmp1; \ ++ }) ++#define PW_GET_SIZE(x) (*((int *)(x))) ++ ++void *sw_kmalloc(size_t size, gfp_t flags) ++{ ++ size_t act_size = 0; ++ void *retVal = NULL; ++ /* ++ * No point in allocating if ++ * we were unable to allocate ++ * previously! ++ */ ++ { ++ if (!SHOULD_TRACE()) { ++ return NULL; ++ } ++ } ++ /* ++ * (1) Allocate requested block. ++ */ ++ act_size = size + sizeof(int) * 2; ++ retVal = kmalloc(act_size, flags); ++ if (!retVal) { ++ /* ++ * Panic if we couldn't allocate ++ * requested memory. ++ */ ++ printk(KERN_INFO "ERROR: could NOT allocate memory!\n"); ++ MEM_PANIC(); ++ return NULL; ++ } ++ /* ++ * (2) Update memory usage stats. ++ */ ++ LOCK(sw_kmalloc_lock); ++ { ++ total_num_bytes_alloced += size; ++ curr_num_bytes_alloced += size; ++ if (curr_num_bytes_alloced > max_num_bytes_alloced) ++ max_num_bytes_alloced = curr_num_bytes_alloced; ++ } ++ UNLOCK(sw_kmalloc_lock); ++ /* ++ * (3) And finally, add the 'size' ++ * and 'magic' stamps. ++ */ ++ return PW_ADD_STAMP(retVal, size); ++}; ++ ++void sw_kfree(const void *obj) ++{ ++ void *tmp = NULL; ++ size_t size = 0; ++ ++ /* ++ * (1) Check if this block was allocated ++ * by us. ++ */ ++ if (!PW_IS_MAGIC(obj)) { ++ printk(KERN_INFO "ERROR: %p is NOT a PW_MAGIC ptr!\n", obj); ++ return; ++ } ++ /* ++ * (2) Strip the magic num... ++ */ ++ tmp = PW_REMOVE_STAMP(obj); ++ /* ++ * ...and retrieve size of block. ++ */ ++ size = PW_GET_SIZE(tmp); ++ /* ++ * (3) Update memory usage stats. ++ */ ++ LOCK(sw_kmalloc_lock); ++ { ++ curr_num_bytes_alloced -= size; ++ } ++ UNLOCK(sw_kmalloc_lock); ++ /* ++ * And finally, free the block. ++ */ ++ kfree(tmp); ++}; ++ ++#else // !DO_TRACK_MEMORY_USAGE ++ ++void *sw_kmalloc(size_t size, gfp_t flags) ++{ ++ void *ret = NULL; ++ ++ if (SHOULD_TRACE()) { ++ if (!(ret = kmalloc(size, flags))) { ++ /* ++ * Panic if we couldn't allocate ++ * requested memory. ++ */ ++ MEM_PANIC(); ++ } ++ } ++ return ret; ++}; ++ ++void sw_kfree(const void *mem) ++{ ++ kfree(mem); ++}; ++ ++#endif // DO_TRACK_MEMORY_USAGE +diff --git a/drivers/platform/x86/socwatch/sw_ops_provider.c b/drivers/platform/x86/socwatch/sw_ops_provider.c +new file mode 100644 +index 000000000000..1eb60d12b701 +--- /dev/null ++++ b/drivers/platform/x86/socwatch/sw_ops_provider.c +@@ -0,0 +1,1225 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++#include ++#include ++#include // "pci_get_domain_bus_and_slot" ++#include // "udelay" ++#include ++#ifdef CONFIG_RPMSG_IPC ++#include ++#endif // CONFIG_RPMSG_IPC ++ ++#include "sw_types.h" ++#include "sw_kernel_defines.h" ++#include "sw_hardware_io.h" ++#include "sw_telem.h" ++#include "sw_ops_provider.h" ++ ++/* ++ * Compile time constants. ++ */ ++/* ++ * Should we be doing 'direct' PCI reads and writes? ++ * '1' ==> YES, call "pci_{read,write}_config_dword()" directly ++ * '0' ==> NO, Use the "intel_mid_msgbus_{read32,write32}_raw()" API (defined in 'intel_mid_pcihelpers.c') ++ */ ++#define DO_DIRECT_PCI_READ_WRITE 0 ++#if !IS_ENABLED(CONFIG_ANDROID) || !defined(CONFIG_X86_WANT_INTEL_MID) ++/* ++ * 'intel_mid_pcihelpers.h' is probably not present -- force ++ * direct PCI calls in this case. ++ */ ++#undef DO_DIRECT_PCI_READ_WRITE ++#define DO_DIRECT_PCI_READ_WRITE 1 ++#endif ++#if !DO_DIRECT_PCI_READ_WRITE ++#include ++#endif ++ ++#define SW_PCI_MSG_CTRL_REG 0x000000D0 ++#define SW_PCI_MSG_DATA_REG 0x000000D4 ++ ++/* ++ * NUM_RETRY & USEC_DELAY are used in PCH Mailbox (sw_read_pch_mailbox_info_i). ++ * Tested on KBL + SPT-LP. May need to revisit. ++ */ ++#define NUM_RETRY 100 ++#define USEC_DELAY 100 ++ ++#define EXTCNF_CTRL 0xF00 // offset for hw semaphore. ++#define FWSM_CTRL 0x5B54 // offset for fw semaphore ++#define GBE_CTRL_OFFSET 0x34 // GBE LPM offset ++ ++#define IS_HW_SEMAPHORE_SET(data) (data & (pw_u64_t)(0x1 << 6)) ++#define IS_FW_SEMAPHORE_SET(data) (data & (pw_u64_t)0x1) ++/* ++ * Number of retries for mailbox configuration ++ */ ++#define MAX_MAILBOX_ITERS 100 ++ ++/* ++ * Local data structures. ++ */ ++/* ++ * TODO: separate into H/W and S/W IO? ++ */ ++typedef enum sw_io_type { ++ SW_IO_MSR = 0, ++ SW_IO_IPC = 1, ++ SW_IO_MMIO = 2, ++ SW_IO_PCI = 3, ++ SW_IO_CONFIGDB = 4, ++ SW_IO_TRACE_ARGS = 5, ++ SW_IO_WAKEUP = 6, ++ SW_IO_SOCPERF = 7, ++ SW_IO_PROC_NAME = 8, ++ SW_IO_IRQ_NAME = 9, ++ SW_IO_WAKELOCK = 10, ++ SW_IO_TELEM = 11, ++ SW_IO_PCH_MAILBOX = 12, ++ SW_IO_MAILBOX = 13, ++ SW_IO_MAX = 14, ++} sw_io_type_t; ++ ++/* ++ * "io_remapped" values for HW and FW semaphores ++ */ ++static struct { ++ volatile void __iomem *hw_semaphore; ++ volatile void __iomem *fw_semaphore; ++} s_gbe_semaphore = { NULL, NULL }; ++ ++/* ++ * Function declarations. ++ */ ++/* ++ * Exported by the SOCPERF driver. ++ */ ++extern void SOCPERF_Read_Data2(void *data_buffer); ++ ++/* ++ * Init functions. ++ */ ++int sw_ipc_mmio_descriptor_init_func_i( ++ struct sw_driver_io_descriptor *descriptor); ++int sw_pch_mailbox_descriptor_init_func_i( ++ struct sw_driver_io_descriptor *descriptor); ++int sw_mailbox_descriptor_init_func_i( ++ struct sw_driver_io_descriptor *descriptor); ++ ++/* ++ * Read functions. ++ */ ++void sw_read_msr_info_i(char *dst_vals, int cpu, ++ const struct sw_driver_io_descriptor *descriptor, ++ u16 counter_size_in_bytes); ++void sw_read_ipc_info_i(char *dst_vals, int cpu, ++ const struct sw_driver_io_descriptor *descriptor, ++ u16 counter_size_in_bytes); ++void sw_read_mmio_info_i(char *dst_vals, int cpu, ++ const struct sw_driver_io_descriptor *descriptor, ++ u16 counter_size_in_bytes); ++void sw_read_pch_mailbox_info_i(char *dst_vals, int cpu, ++ const struct sw_driver_io_descriptor *descriptor, ++ u16 counter_size_in_bytes); ++void sw_read_mailbox_info_i(char *dst_vals, int cpu, ++ const struct sw_driver_io_descriptor *descriptor, ++ u16 counter_size_in_bytes); ++void sw_read_pci_info_i(char *dst_vals, int cpu, ++ const struct sw_driver_io_descriptor *descriptor, ++ u16 counter_size_in_bytes); ++void sw_read_configdb_info_i(char *dst_vals, int cpu, ++ const struct sw_driver_io_descriptor *descriptor, ++ u16 counter_size_in_bytes); ++void sw_read_socperf_info_i(char *dst_vals, int cpu, ++ const struct sw_driver_io_descriptor *descriptor, ++ u16 counter_size_in_bytes); ++ ++/* ++ * Write functions. ++ */ ++void sw_write_msr_info_i(char *dst_vals, int cpu, ++ const struct sw_driver_io_descriptor *descriptor, ++ u16 counter_size_in_bytes); ++void sw_write_ipc_info_i(char *dst_vals, int cpu, ++ const struct sw_driver_io_descriptor *descriptor, ++ u16 counter_size_in_bytes); ++void sw_write_mmio_info_i(char *dst_vals, int cpu, ++ const struct sw_driver_io_descriptor *descriptor, ++ u16 counter_size_in_bytes); ++void sw_write_mailbox_info_i(char *dst_vals, int cpu, ++ const struct sw_driver_io_descriptor *descriptor, ++ u16 counter_size_in_bytes); ++void sw_write_pci_info_i(char *dst_vals, int cpu, ++ const struct sw_driver_io_descriptor *descriptor, ++ u16 counter_size_in_bytes); ++void sw_write_configdb_info_i(char *dst_vals, int cpu, ++ const struct sw_driver_io_descriptor *descriptor, ++ u16 counter_size_in_bytes); ++void sw_write_trace_args_info_i(char *dst_vals, int cpu, ++ const struct sw_driver_io_descriptor *descriptor, ++ u16 counter_size_in_bytes); ++void sw_write_wakeup_info_i(char *dst_vals, int cpu, ++ const struct sw_driver_io_descriptor *descriptor, ++ u16 counter_size_in_bytes); ++void sw_write_socperf_info_i(char *dst_vals, int cpu, ++ const struct sw_driver_io_descriptor *descriptor, ++ u16 counter_size_in_bytes); ++ ++/* ++ * Print functions. ++ */ ++int sw_print_msr_io_descriptor(const struct sw_driver_io_descriptor ++ *descriptor); ++ ++/* ++ * Reset functions -- equal but opposite of init. ++ */ ++int sw_ipc_mmio_descriptor_reset_func_i( ++ const struct sw_driver_io_descriptor *descriptor); ++int sw_pch_mailbox_descriptor_reset_func_i( ++ const struct sw_driver_io_descriptor *descriptor); ++int sw_mailbox_descriptor_reset_func_i( ++ const struct sw_driver_io_descriptor *descriptor); ++ ++/* ++ * Available functions. ++ */ ++bool sw_socperf_available_i(void); ++ ++/* ++ * Helper functions. ++ */ ++u32 sw_platform_configdb_read32(u32 address); ++u32 sw_platform_pci_read32(u32 bus, u32 device, u32 function, u32 ctrl_offset, ++ u32 ctrl_value, u32 data_offset); ++u64 sw_platform_pci_read64(u32 bus, u32 device, u32 function, u32 ctrl_offset, ++ u32 ctrl_value, u32 data_offset); ++bool sw_platform_pci_write32(u32 bus, u32 device, u32 function, ++ u32 write_offset, u32 data_value); ++ ++/* ++ * Table of collector operations. ++ */ ++static const struct sw_hw_ops s_hw_ops[] = { ++ [SW_IO_MSR] = { .name = "MSR", ++ .init = NULL, ++ .read = &sw_read_msr_info_i, ++ .write = &sw_write_msr_info_i, ++ .print = &sw_print_msr_io_descriptor, ++ .reset = NULL, ++ .available = NULL }, ++ [SW_IO_IPC] = { ++ .name = "IPC", ++ .init = &sw_ipc_mmio_descriptor_init_func_i, ++ .read = &sw_read_ipc_info_i, ++ .reset = &sw_ipc_mmio_descriptor_reset_func_i, ++ /* Other fields are don't care (will be set to NULL) */ ++ }, ++ [SW_IO_MMIO] = { ++ .name = "MMIO", ++ .init = &sw_ipc_mmio_descriptor_init_func_i, ++ .read = &sw_read_mmio_info_i, ++ .write = &sw_write_mmio_info_i, ++ .reset = &sw_ipc_mmio_descriptor_reset_func_i, ++ /* Other fields are don't care (will be set to NULL) */ ++ }, ++ [SW_IO_PCI] = { ++ .name = "PCI", ++ .read = &sw_read_pci_info_i, ++ .write = &sw_write_pci_info_i, ++ /* Other fields are don't care (will be set to NULL) */ ++ }, ++ [SW_IO_CONFIGDB] = { ++ .name = "CONFIGDB", ++ .read = &sw_read_configdb_info_i, ++ /* Other fields are don't care (will be set to NULL) */ ++ }, ++ [SW_IO_WAKEUP] = { ++ .name = "WAKEUP", ++ /* Other fields are don't care (will be set to NULL) */ ++ }, ++ [SW_IO_SOCPERF] = { ++ .name = "SOCPERF", ++ .read = &sw_read_socperf_info_i, ++ .available = &sw_socperf_available_i, ++ /* Other fields are don't care (will be set to NULL) */ ++ }, ++ [SW_IO_PROC_NAME] = { ++ .name = "PROC-NAME", ++ /* Other fields are don't care (will be set to NULL) */ ++ }, ++ [SW_IO_IRQ_NAME] = { ++ .name = "IRQ-NAME", ++ /* Other fields are don't care (will be set to NULL) */ ++ }, ++ [SW_IO_WAKELOCK] = { ++ .name = "WAKELOCK", ++ /* Other fields are don't care (will be set to NULL) */ ++ }, ++ [SW_IO_TELEM] = { ++ .name = "TELEM", ++ .init = &sw_telem_init_func, ++ .read = &sw_read_telem_info, ++ .reset = &sw_reset_telem, ++ .available = &sw_telem_available, ++ .post_config = &sw_telem_post_config, ++ /* Other fields are don't care (will be set to NULL) */ ++ }, ++ [SW_IO_PCH_MAILBOX] = { ++ .name = "PCH-MAILBOX", ++ .init = &sw_pch_mailbox_descriptor_init_func_i, ++ .read = &sw_read_pch_mailbox_info_i, ++ .reset = &sw_pch_mailbox_descriptor_reset_func_i, ++ /* Other fields are don't care (will be set to NULL) */ ++ }, ++ [SW_IO_MAILBOX] = { ++ .name = "MAILBOX", ++ .init = &sw_mailbox_descriptor_init_func_i, ++ .read = &sw_read_mailbox_info_i, ++ .write = &sw_write_mailbox_info_i, ++ .reset = &sw_mailbox_descriptor_reset_func_i, ++ /* Other fields are don't care (will be set to NULL) */ ++ }, ++ [SW_IO_MAX] = { ++ .name = NULL, ++ /* Other fields are don't care (will be set to NULL) */ ++ } ++}; ++ ++/* ++ * Function definitions. ++ */ ++int sw_ipc_mmio_descriptor_init_func_i( ++ struct sw_driver_io_descriptor *descriptor) ++{ ++ // Perform any required 'io_remap' calls here ++ struct sw_driver_ipc_mmio_io_descriptor *__ipc_mmio = NULL; ++ u64 data_address = 0; ++ ++ if (!descriptor) { // Should NEVER happen ++ return -PW_ERROR; ++ } ++ if (descriptor->collection_type == SW_IO_IPC) { ++ __ipc_mmio = &descriptor->ipc_descriptor; ++ } else { ++ __ipc_mmio = &descriptor->mmio_descriptor; ++ } ++ pw_pr_debug("cmd = %u, sub-cmd = %u, data_addr = 0x%llx\n", ++ __ipc_mmio->command, __ipc_mmio->sub_command, ++ __ipc_mmio->data_address); ++ data_address = __ipc_mmio->data_address; ++ /* ++ if (__ipc_mmio->command || __ipc_mmio->sub_command) { ++ __ipc_mmio->ipc_command = ++ ((pw_u32_t)__ipc_mmio->sub_command << 12) ++ | (pw_u32_t)__ipc_mmio->command; ++ } ++ */ ++ if (data_address) { ++ __ipc_mmio->data_remapped_address = ++ (pw_u64_t)(unsigned long)ioremap_nocache( ++ (unsigned long)data_address, ++ descriptor->counter_size_in_bytes); ++ if ((void *)(unsigned long)__ipc_mmio->data_remapped_address == ++ NULL) { ++ return -EIO; ++ } ++ pw_pr_debug("mapped addr 0x%llx\n", ++ __ipc_mmio->data_remapped_address); ++ if (__ipc_mmio->is_gbe) { ++ if (!s_gbe_semaphore.hw_semaphore || ++ !s_gbe_semaphore.fw_semaphore) { ++ pw_pr_debug("Initializing GBE semaphore\n"); ++ if (data_address >= GBE_CTRL_OFFSET) { ++ u64 hw_addr = (data_address - ++ GBE_CTRL_OFFSET) + ++ EXTCNF_CTRL; ++ u64 fw_addr = (data_address - ++ GBE_CTRL_OFFSET) + ++ FWSM_CTRL; ++ s_gbe_semaphore.hw_semaphore = ++ ioremap_nocache( ++ (unsigned long)hw_addr, ++ descriptor ++ ->counter_size_in_bytes); ++ s_gbe_semaphore.fw_semaphore = ++ ioremap_nocache( ++ (unsigned long)fw_addr, ++ descriptor ++ ->counter_size_in_bytes); ++ if (s_gbe_semaphore.hw_semaphore == ++ NULL || ++ s_gbe_semaphore.fw_semaphore == ++ NULL) { ++ pw_pr_error( ++ "couldn't mmap hw/fw semaphores for GBE MMIO op!\n"); ++ return -EIO; ++ } ++ pw_pr_debug( ++ "GBE has hw_sem = 0x%llx, fw_sem = 0x%llx, size = %u\n", ++ (unsigned long long) ++ s_gbe_semaphore ++ .hw_semaphore, ++ (unsigned long long) ++ s_gbe_semaphore ++ .fw_semaphore, ++ descriptor ++ ->counter_size_in_bytes); ++ } ++ } ++ } ++ } ++ return PW_SUCCESS; ++} ++ ++int sw_pch_mailbox_descriptor_init_func_i( ++ struct sw_driver_io_descriptor *descriptor) ++{ ++ // Perform any required 'io_remap' calls here ++ struct sw_driver_pch_mailbox_io_descriptor *__pch_mailbox = NULL; ++ ++ if (!descriptor) { // Should NEVER happen ++ return -PW_ERROR; ++ } ++ __pch_mailbox = &descriptor->pch_mailbox_descriptor; ++ pw_pr_debug("pch_mailbox data_addr = 0x%llx\n", ++ (unsigned long long)__pch_mailbox->data_address); ++ if (__pch_mailbox->mtpmc_address) { ++ __pch_mailbox->mtpmc_remapped_address = ++ (pw_u64_t)(unsigned long)ioremap_nocache( ++ (unsigned long)__pch_mailbox->mtpmc_address, ++ descriptor->counter_size_in_bytes); ++ if ((void *)(unsigned long) ++ __pch_mailbox->mtpmc_remapped_address == NULL) { ++ return -PW_ERROR; ++ } ++ pw_pr_debug("mtpmc_mapped addr 0x%llx\n", ++ __pch_mailbox->mtpmc_remapped_address); ++ } ++ if (__pch_mailbox->msg_full_sts_address) { ++ __pch_mailbox->msg_full_sts_remapped_address = ++ (pw_u64_t)(unsigned long)ioremap_nocache( ++ (unsigned long) ++ __pch_mailbox->msg_full_sts_address, ++ descriptor->counter_size_in_bytes); ++ if ((void *)(unsigned long)__pch_mailbox ++ ->msg_full_sts_remapped_address == NULL) { ++ return -PW_ERROR; ++ } ++ pw_pr_debug("msg_full_sts_mapped addr 0x%llx\n", ++ __pch_mailbox->msg_full_sts_address); ++ } ++ if (__pch_mailbox->mfpmc_address) { ++ __pch_mailbox->mfpmc_remapped_address = ++ (pw_u64_t)(unsigned long)ioremap_nocache( ++ (unsigned long)__pch_mailbox->mfpmc_address, ++ descriptor->counter_size_in_bytes); ++ if ((void *)(unsigned long) ++ __pch_mailbox->mfpmc_remapped_address == NULL) { ++ return -PW_ERROR; ++ } ++ pw_pr_debug("mfpmc_mapped addr 0x%llx\n", ++ __pch_mailbox->mfpmc_remapped_address); ++ } ++ return PW_SUCCESS; ++} ++ ++int sw_mailbox_descriptor_init_func_i(struct sw_driver_io_descriptor *descriptor) ++{ ++ // Perform any required 'io_remap' calls here ++ struct sw_driver_mailbox_io_descriptor *__mailbox = NULL; ++ ++ if (!descriptor) { // Should NEVER happen ++ return -PW_ERROR; ++ } ++ __mailbox = &descriptor->mailbox_descriptor; ++ ++ pw_pr_debug( ++ "type = %u, interface_address = 0x%llx, data_address = 0x%llx\n", ++ __mailbox->is_msr_type, __mailbox->interface_address, ++ __mailbox->data_address); ++ ++ if (!__mailbox->is_msr_type) { ++ if (__mailbox->interface_address) { ++ __mailbox->interface_remapped_address = ++ (pw_u64_t)(unsigned long)ioremap_nocache( ++ (unsigned long) ++ __mailbox->interface_address, ++ descriptor->counter_size_in_bytes); ++ if ((void *)(unsigned long)__mailbox ++ ->interface_remapped_address == NULL) { ++ pw_pr_error( ++ "Couldn't iomap interface_address = 0x%llx\n", ++ __mailbox->interface_address); ++ return -PW_ERROR; ++ } ++ } ++ if (__mailbox->data_address) { ++ __mailbox->data_remapped_address = ++ (pw_u64_t)(unsigned long)ioremap_nocache( ++ (unsigned long)__mailbox->data_address, ++ descriptor->counter_size_in_bytes); ++ if ((void *)(unsigned long) ++ __mailbox->data_remapped_address == NULL) { ++ pw_pr_error( ++ "Couldn't iomap data_address = 0x%llx\n", ++ __mailbox->data_address); ++ return -PW_ERROR; ++ } ++ } ++ pw_pr_debug("OK, mapped addr 0x%llx, 0x%llx\n", ++ __mailbox->interface_remapped_address, ++ __mailbox->data_remapped_address); ++ } ++ return PW_SUCCESS; ++} ++ ++void sw_read_msr_info_i(char *dst_vals, int cpu, ++ const struct sw_driver_io_descriptor *descriptors, ++ u16 counter_size_in_bytes) ++{ ++ u64 address = descriptors->msr_descriptor.address; ++ u32 l = 0, h = 0; ++ ++ if (likely(cpu == RAW_CPU())) { ++ if (rdmsr_safe((unsigned long)address, &l, &h)) { ++ pw_pr_warn("Failed to read MSR address = 0x%llx\n", address); ++ } ++ } else { ++ if (rdmsr_safe_on_cpu(cpu, (unsigned long)address, &l, &h)) { ++ pw_pr_warn("Failed to read MSR address = 0x%llx\n", address); ++ } ++ } ++ switch (counter_size_in_bytes) { ++ case 4: ++ *((u32 *)dst_vals) = l; ++ break; ++ case 8: ++ *((u64 *)dst_vals) = ((u64)h << 32) | l; ++ break; ++ default: ++ break; ++ } ++ return; ++} ++ ++#ifdef CONFIG_RPMSG_IPC ++#define SW_DO_IPC(cmd, sub_cmd) rpmsg_send_generic_simple_command(cmd, sub_cmd) ++#else ++#define SW_DO_IPC(cmd, sub_cmd) (-ENODEV) ++#endif // CONFIG_RPMSG_IPC ++ ++void sw_read_ipc_info_i(char *dst_vals, int cpu, ++ const struct sw_driver_io_descriptor *descriptors, ++ u16 counter_size_in_bytes) ++{ ++ u16 cmd = descriptors->ipc_descriptor.command, ++ sub_cmd = descriptors->ipc_descriptor.sub_command; ++ unsigned long remapped_address = ++ (unsigned long)descriptors->ipc_descriptor.data_remapped_address; ++ ++ if (cmd || sub_cmd) { ++ pw_pr_debug("EXECUTING IPC Cmd = %u, %u\n", cmd, sub_cmd); ++ if (SW_DO_IPC(cmd, sub_cmd)) { ++ pw_pr_error("ERROR running IPC command(s)\n"); ++ return; ++ } ++ } ++ ++ if (remapped_address) { ++ // memcpy(&value, (void *)remapped_address, counter_size_in_bytes); ++ pw_pr_debug("COPYING MMIO size %u\n", counter_size_in_bytes); ++ memcpy(dst_vals, (void *)remapped_address, ++ counter_size_in_bytes); ++ } ++ pw_pr_debug("Value = %llu\n", *((u64 *)dst_vals)); ++} ++ ++static void ++sw_read_gbe_mmio_info_i(char *dst_vals, ++ const struct sw_driver_io_descriptor *descriptors, ++ u16 counter_size_in_bytes) ++{ ++ u32 hw_val = 0, fw_val = 0; ++ unsigned long remapped_address = ++ (unsigned long) ++ descriptors->mmio_descriptor.data_remapped_address; ++ u64 write_value = descriptors->write_value; ++ ++ memset(dst_vals, 0, counter_size_in_bytes); ++ ++ pw_pr_debug( ++ "hw_sem = 0x%llx, fw_sem = 0x%llx, addr = 0x%lx, dst_vals = 0x%lx, size = %u\n", ++ (unsigned long long)s_gbe_semaphore.hw_semaphore, ++ (unsigned long long)s_gbe_semaphore.fw_semaphore, ++ remapped_address, (unsigned long)dst_vals, ++ counter_size_in_bytes); ++ if (!s_gbe_semaphore.hw_semaphore || !s_gbe_semaphore.fw_semaphore || ++ !remapped_address) { ++ return; ++ } ++ ++ memcpy_fromio(&hw_val, s_gbe_semaphore.hw_semaphore, sizeof(hw_val)); ++ memcpy_fromio(&fw_val, s_gbe_semaphore.fw_semaphore, sizeof(fw_val)); ++ pw_pr_debug("HW_VAL = 0x%lx, FW_VAL = 0x%lx\n", (unsigned long)hw_val, ++ (unsigned long)fw_val); ++ if (!IS_HW_SEMAPHORE_SET(hw_val) && !IS_FW_SEMAPHORE_SET(fw_val)) { ++ memcpy_toio((volatile void __iomem *)remapped_address, ++ &write_value, ++ 4 /* counter_size_in_bytes*/); ++ memcpy_fromio(dst_vals, ++ (volatile void __iomem *)remapped_address, ++ counter_size_in_bytes); ++ } ++} ++void sw_read_mmio_info_i(char *dst_vals, int cpu, ++ const struct sw_driver_io_descriptor *descriptors, ++ u16 counter_size_in_bytes) ++{ ++ unsigned long remapped_address = ++ (unsigned long) ++ descriptors->mmio_descriptor.data_remapped_address; ++ if (descriptors->mmio_descriptor.is_gbe) { ++ /* MMIO for GBE requires a mailbox-like operation */ ++ sw_read_gbe_mmio_info_i(dst_vals, descriptors, ++ counter_size_in_bytes); ++ } else { ++ if (remapped_address) { ++ memcpy_fromio(dst_vals, ++ (volatile void __iomem *)remapped_address, ++ counter_size_in_bytes); ++ } ++ } ++ pw_pr_debug("Value = %llu\n", *((u64 *)dst_vals)); ++} ++ ++void sw_read_pch_mailbox_info_i(char *dst_vals, int cpu, ++ const struct sw_driver_io_descriptor *descriptor, ++ u16 counter_size_in_bytes) ++{ ++ /* ++ * TODO: spinlock? ++ */ ++ const struct sw_driver_pch_mailbox_io_descriptor *pch_mailbox = ++ &descriptor->pch_mailbox_descriptor; ++ u32 address = pch_mailbox->data_address; ++ u64 mtpmc_remapped_address = pch_mailbox->mtpmc_remapped_address; ++ u64 msg_full_sts_remapped_address = ++ pch_mailbox->msg_full_sts_remapped_address; ++ u64 mfpmc_remapped_address = pch_mailbox->mfpmc_remapped_address; ++ ++ /* ++ * write address of desired device counter to request ++ * from PMC (shift and add 2 to format device offset) ++ */ ++ if (mtpmc_remapped_address) { ++ int iter = 0; ++ u32 written_val = 0; ++ u32 write_value = ++ (address << 16) + ++ 2; /* shift and add 2 to format device offset */ ++ memcpy_toio( ++ (volatile void __iomem *) ++ (unsigned long)mtpmc_remapped_address, ++ &write_value, 4 /*counter_size_in_bytes*/); ++ /* ++ * Check if address has been written using a while loop in ++ * order to wait for the PMC to consume that address ++ * and to introduce sufficient delay so that the message full ++ * status bit has time to flip. This should ensure ++ * all is ready when begin the wait loop for it to turn 0, ++ * which indicates the value is available to be read. ++ * (This fixes problem where values being read were huge.) ++ */ ++ do { ++ memcpy_fromio(&written_val, ++ (volatile void __iomem *)(unsigned long) ++ mtpmc_remapped_address, ++ 4 /*counter_size_in_bytes*/); ++ pw_pr_debug( ++ "DEBUG: written_val = 0x%x, address = 0x%x\n", ++ written_val, address); ++ udelay(USEC_DELAY); ++ } while ((written_val >> 16) != address && ++iter < NUM_RETRY); ++ } ++ ++ /* ++ * wait for PMC to set status indicating that device counter ++ * is available for read. ++ */ ++ if (msg_full_sts_remapped_address) { ++ u32 status_wait = 0; ++ int iter = 0; ++ ++ do { ++ memcpy_fromio(&status_wait, ++ (volatile void __iomem *)(unsigned long) ++ msg_full_sts_remapped_address, ++ 4 /*counter_size_in_bytes*/); ++ pw_pr_debug("DEBUG: status_wait = 0x%x\n", status_wait); ++ udelay(USEC_DELAY); ++ } while ((status_wait & 0x01000000) >> 24 && ++ ++iter < NUM_RETRY); ++ } ++ ++ /* ++ * read device counter ++ */ ++ if (mfpmc_remapped_address) { ++ memcpy_fromio( ++ dst_vals, ++ (volatile void __iomem *) ++ (unsigned long)mfpmc_remapped_address, ++ 4 /*counter_size_in_bytes*/); ++ pw_pr_debug("DEBUG: read value = 0x%x\n", ++ *((pw_u32_t *)dst_vals)); ++ } ++} ++ ++void sw_read_mailbox_info_i(char *dst_vals, int cpu, ++ const struct sw_driver_io_descriptor *descriptor, ++ u16 counter_size_in_bytes) ++{ ++ /* ++ * TODO: spinlock? ++ */ ++ const struct sw_driver_mailbox_io_descriptor *mailbox = ++ &descriptor->mailbox_descriptor; ++ unsigned long interface_address = mailbox->interface_address; ++ unsigned long interface_remapped_address = ++ mailbox->interface_remapped_address; ++ unsigned long data_address = mailbox->data_address; ++ size_t iter = 0; ++ ++ if (mailbox->is_msr_type) { ++ u64 command = 0; ++ ++ if (rdmsrl_safe(interface_address, &command)) { ++ pw_pr_warn("Failed to read MSR address = 0x%llx\n", ++ interface_address); ++ } ++ command &= mailbox->command_mask; ++ command |= mailbox->command | (u64)0x1 << mailbox->run_busy_bit; ++ wrmsrl_safe(interface_address, command); ++ do { ++ udelay(1); ++ if (rdmsrl_safe(interface_address, &command)) { ++ pw_pr_warn("Failed to read MSR address = 0x%llx\n", ++ interface_address); ++ } ++ } while ((command & ((u64)0x1 << mailbox->run_busy_bit)) && ++ ++iter < MAX_MAILBOX_ITERS); ++ if (iter >= MAX_MAILBOX_ITERS) { ++ pw_pr_error("Couldn't write to BIOS mailbox\n"); ++ command = 0; ++ } else { ++ if (rdmsrl_safe(data_address, &command)) { ++ pw_pr_warn("Failed to read MSR address = 0x%llx\n", ++ data_address); ++ } ++ } ++ *((u64 *)dst_vals) = command; ++ } else { ++ u32 command = 0; ++ const size_t counter_size = ++ 4; /* Always use 4 bytes, regardless of ++ *'counter_size_in_bytes' ++ */ ++ memcpy_fromio(&command, ++ (volatile void __iomem *)(unsigned long) ++ interface_remapped_address, ++ sizeof(command)); ++ command &= mailbox->command_mask; ++ command |= (u32)mailbox->command | ++ (u32)0x1 << mailbox->run_busy_bit; ++ memcpy_toio((volatile void __iomem *)(unsigned long) ++ interface_remapped_address, ++ &command, sizeof(command)); ++ do { ++ udelay(1); ++ memcpy_fromio(&command, ++ (volatile void __iomem *)(unsigned long) ++ interface_remapped_address, ++ sizeof(command)); ++ } while ((command & ((u32)0x1 << mailbox->run_busy_bit)) && ++ ++iter < MAX_MAILBOX_ITERS); ++ if (iter >= MAX_MAILBOX_ITERS) { ++ pw_pr_error("Couldn't write to BIOS mailbox\n"); ++ command = 0; ++ } else { ++ memcpy_fromio(&command, ++ (volatile void __iomem *)(unsigned long) ++ mailbox->data_remapped_address, ++ counter_size); ++ } ++ *((u32 *)dst_vals) = command; ++ } ++} ++ ++void sw_read_pci_info_i(char *dst_vals, int cpu, ++ const struct sw_driver_io_descriptor *descriptors, ++ u16 counter_size_in_bytes) ++{ ++ u32 bus = descriptors->pci_descriptor.bus, ++ device = descriptors->pci_descriptor.device; ++ u32 function = descriptors->pci_descriptor.function, ++ offset = descriptors->pci_descriptor.offset; ++ u32 data32 = 0; ++ u64 data64 = 0; ++ ++ switch (counter_size_in_bytes) { ++ case 4: ++ data32 = sw_platform_pci_read32(bus, device, function, ++ 0 /* CTRL-OFFSET */, ++ 0 /* CTRL-DATA, don't care */, ++ offset /* DATA-OFFSET */); ++ *((u32 *)dst_vals) = data32; ++ break; ++ case 8: ++ data64 = sw_platform_pci_read64(bus, device, function, ++ 0 /* CTRL-OFFSET */, ++ 0 /* CTRL-DATA, don't care */, ++ offset /* DATA-OFFSET */); ++ *((u64 *)dst_vals) = data64; ++ break; ++ default: ++ pw_pr_error("ERROR: invalid read size = %u\n", ++ counter_size_in_bytes); ++ return; ++ } ++ return; ++} ++void sw_read_configdb_info_i(char *dst_vals, int cpu, ++ const struct sw_driver_io_descriptor *descriptors, ++ u16 counter_size_in_bytes) ++{ ++ { ++ pw_u32_t address = descriptors->configdb_descriptor.address; ++ u32 data = sw_platform_configdb_read32(address); ++ ++ pw_pr_debug( ++ "ADDRESS = 0x%x, CPU = %d, dst_vals = %p, counter size = %u, data = %u\n", ++ address, cpu, dst_vals, counter_size_in_bytes, data); ++ /* ++ * 'counter_size_in_bytes' is ignored, for now. ++ */ ++ *((u32 *)dst_vals) = data; ++ } ++ return; ++} ++void sw_read_socperf_info_i(char *dst_vals, int cpu, ++ const struct sw_driver_io_descriptor *descriptors, ++ u16 counter_size_in_bytes) ++{ ++#if IS_ENABLED(CONFIG_INTEL_SOCPERF) ++ u64 *socperf_buffer = (u64 *)dst_vals; ++ ++ memset(socperf_buffer, 0, counter_size_in_bytes); ++ SOCPERF_Read_Data2(socperf_buffer); ++#endif // IS_ENABLED(CONFIG_INTEL_SOCPERF) ++ return; ++} ++ ++/** ++ * Decide if the socperf interface is available for use ++ * @returns true if available ++ */ ++bool sw_socperf_available_i(void) ++{ ++ bool retVal = false; ++#if IS_ENABLED(CONFIG_INTEL_SOCPERF) ++ retVal = true; ++#endif // IS_ENABLED(CONFIG_INTEL_SOCPERF) ++ return retVal; ++} ++ ++/** ++ * sw_platform_configdb_read32 - for reading PCI space through config registers ++ * of the platform. ++ * @address: An address in the PCI space ++ * ++ * Returns: the value read from address. ++ */ ++u32 sw_platform_configdb_read32(u32 address) ++{ ++ u32 read_value = 0; ++#if DO_DIRECT_PCI_READ_WRITE ++ read_value = ++ sw_platform_pci_read32(0 /*bus*/, ++ 0 /*device*/, ++ 0 /*function*/, ++ SW_PCI_MSG_CTRL_REG /*ctrl-offset*/, ++ address /*ctrl-value*/, ++ SW_PCI_MSG_DATA_REG /*data-offset*/); ++#else // !DO_DIRECT_PCI_READ_WRITE ++ read_value = intel_mid_msgbus_read32_raw(address); ++#endif // if DO_DIRECT_PCI_READ_WRITE ++ pw_pr_debug("address = %u, value = %u\n", address, read_value); ++ return read_value; ++} ++ ++u32 sw_platform_pci_read32(u32 bus, u32 device, u32 function, u32 write_offset, ++ u32 write_value, u32 read_offset) ++{ ++ u32 read_value = 0; ++ struct pci_dev *pci_root = pci_get_domain_bus_and_slot( ++ 0, bus, PCI_DEVFN(device, function)); // 0, PCI_DEVFN(0, 0)); ++ if (!pci_root) { ++ return 0; /* Application will verify the data */ ++ } ++ if (write_offset) { ++ pci_write_config_dword( ++ pci_root, write_offset, ++ write_value); // SW_PCI_MSG_CTRL_REG, address); ++ } ++ pci_read_config_dword( ++ pci_root, read_offset, ++ &read_value); // SW_PCI_MSG_DATA_REG, &read_value); ++ return read_value; ++} ++ ++u64 sw_platform_pci_read64(u32 bus, u32 device, u32 function, u32 write_offset, ++ u32 write_value, u32 read_offset) ++{ ++ u32 lo = sw_platform_pci_read32(bus, device, function, ++ 0 /* CTRL-OFFSET */, ++ 0 /* CTRL-DATA, don't care */, ++ read_offset /* DATA-OFFSET */); ++ u32 hi = sw_platform_pci_read32(bus, device, function, ++ 0 /* CTRL-OFFSET */, ++ 0 /* CTRL-DATA, don't care */, ++ read_offset + 4 /* DATA-OFFSET */); ++ return ((u64)hi << 32) | lo; ++} ++ ++void sw_write_msr_info_i(char *dst_vals, int cpu, ++ const struct sw_driver_io_descriptor *descriptor, ++ u16 counter_size_in_bytes) ++{ ++ u64 write_value = descriptor->write_value; ++ u64 address = descriptor->msr_descriptor.address; ++ ++ pw_pr_debug( ++ "ADDRESS = 0x%llx, CPU = %d, counter size = %u, value = %llu\n", ++ address, cpu, counter_size_in_bytes, write_value); ++ if (likely(cpu == RAW_CPU())) { ++ wrmsrl_safe((unsigned long)address, write_value); ++ } else { ++ u32 l = write_value & 0xffffffff, ++ h = (write_value >> 32) & 0xffffffff; ++ wrmsr_safe_on_cpu(cpu, (u32)address, l, h); ++ } ++ return; ++}; ++ ++void sw_write_mmio_info_i(char *dst_vals, int cpu, ++ const struct sw_driver_io_descriptor *descriptor, ++ u16 counter_size_in_bytes) ++{ ++ unsigned long remapped_address = ++ (unsigned long) ++ descriptor->mmio_descriptor.data_remapped_address; ++ u64 write_value = descriptor->write_value; ++ ++ if (remapped_address) { ++ memcpy_toio((volatile void __iomem *)remapped_address, ++ &write_value, ++ counter_size_in_bytes); ++ } ++ pw_pr_debug("Value = %llu\n", *((u64 *)dst_vals)); ++}; ++ ++void sw_write_mailbox_info_i(char *dst_vals, int cpu, ++ const struct sw_driver_io_descriptor *descriptor, ++ u16 counter_size_in_bytes) ++{ ++ /* ++ * TODO: spinlock? ++ */ ++ const struct sw_driver_mailbox_io_descriptor *mailbox = ++ &descriptor->mailbox_descriptor; ++ unsigned long interface_address = mailbox->interface_address; ++ unsigned long interface_remapped_address = ++ mailbox->interface_remapped_address; ++ unsigned long data_address = mailbox->data_address; ++ u64 data = descriptor->write_value; ++ size_t iter = 0; ++ ++ if (mailbox->is_msr_type) { ++ u64 command = 0; ++ ++ if (rdmsrl_safe(interface_address, &command)) { ++ pw_pr_warn("Failed to read MSR address = 0x%llx\n", ++ interface_address); ++ } ++ command &= mailbox->command_mask; ++ command |= mailbox->command | (u64)0x1 << mailbox->run_busy_bit; ++ wrmsrl_safe(data_address, data); ++ wrmsrl_safe(interface_address, command); ++ do { ++ if (rdmsrl_safe(interface_address, &command)) { ++ pw_pr_warn("Failed to read MSR address = 0x%llx\n", ++ interface_address); ++ } ++ } while ((command & ((u64)0x1 << mailbox->run_busy_bit)) && ++ ++iter < MAX_MAILBOX_ITERS); ++ } else { ++ u32 command = 0; ++ ++ memcpy_fromio(&command, ++ (volatile void __iomem *)(unsigned long) ++ interface_remapped_address, ++ sizeof(command)); ++ command &= mailbox->command_mask; ++ command |= (u32)mailbox->command | ++ (u32)0x1 << mailbox->run_busy_bit; ++ memcpy_toio((volatile void __iomem *)(unsigned long) ++ mailbox->data_remapped_address, ++ &data, sizeof(data)); ++ memcpy_toio((volatile void __iomem *)(unsigned long) ++ interface_remapped_address, ++ &command, sizeof(command)); ++ do { ++ memcpy_fromio(&command, ++ (volatile void __iomem *)(unsigned long) ++ interface_remapped_address, ++ sizeof(command)); ++ } while ((command & ((u32)0x1 << mailbox->run_busy_bit)) && ++ ++iter < MAX_MAILBOX_ITERS); ++ } ++} ++ ++void sw_write_pci_info_i(char *dst_vals, int cpu, ++ const struct sw_driver_io_descriptor *descriptor, ++ u16 counter_size_in_bytes) ++{ ++ u32 bus = descriptor->pci_descriptor.bus, ++ device = descriptor->pci_descriptor.device; ++ u32 function = descriptor->pci_descriptor.function, ++ offset = descriptor->pci_descriptor.offset; ++ u32 write_value = (u32)descriptor->write_value; ++ /* ++ * 'counter_size_in_bytes' is ignored for now. ++ */ ++ if (!sw_platform_pci_write32(bus, device, function, offset, ++ write_value)) { ++ pw_pr_error("ERROR writing to PCI B/D/F/O %u/%u/%u/%u\n", bus, ++ device, function, offset); ++ } else { ++ pw_pr_debug( ++ "OK, successfully wrote to PCI B/D/F/O %u/%u/%u/%u\n", ++ bus, device, function, offset); ++ } ++ return; ++}; ++ ++/* ++ * Write to PCI space via config registers. ++ */ ++bool sw_platform_pci_write32(u32 bus, u32 device, u32 function, ++ u32 write_offset, u32 data_value) ++{ ++ struct pci_dev *pci_root = pci_get_domain_bus_and_slot( ++ 0, bus, PCI_DEVFN(device, function)); // 0, PCI_DEVFN(0, 0)); ++ if (!pci_root) { ++ return false; ++ } ++ ++ pci_write_config_dword(pci_root, write_offset, data_value); ++ ++ return true; ++}; ++ ++int sw_print_msr_io_descriptor(const struct sw_driver_io_descriptor *descriptor) ++{ ++ if (!descriptor) { ++ return -PW_ERROR; ++ } ++ pw_pr_debug("MSR address = 0x%llx\n", ++ descriptor->msr_descriptor.address); ++ return PW_SUCCESS; ++} ++ ++int sw_ipc_mmio_descriptor_reset_func_i( ++ const struct sw_driver_io_descriptor *descriptor) ++{ ++ /* Unmap previously mapped memory here */ ++ struct sw_driver_ipc_mmio_io_descriptor *__ipc_mmio = NULL; ++ ++ if (!descriptor) { // Should NEVER happen ++ return -PW_ERROR; ++ } ++ if (descriptor->collection_type == SW_IO_IPC) { ++ __ipc_mmio = ++ (struct sw_driver_ipc_mmio_io_descriptor *)&descriptor ++ ->ipc_descriptor; ++ } else { ++ __ipc_mmio = ++ (struct sw_driver_ipc_mmio_io_descriptor *)&descriptor ++ ->mmio_descriptor; ++ } ++ if (__ipc_mmio->data_remapped_address) { ++ pw_pr_debug("unmapping addr 0x%llx\n", ++ __ipc_mmio->data_remapped_address); ++ iounmap((volatile void __iomem *)(unsigned long) ++ __ipc_mmio->data_remapped_address); ++ __ipc_mmio->data_remapped_address = 0; ++ } ++ /* Uninitialize the GBE, if it wasn't already done */ ++ if (s_gbe_semaphore.hw_semaphore || s_gbe_semaphore.fw_semaphore) { ++ pw_pr_debug("Uninitializing gbe!\n"); ++ if (s_gbe_semaphore.hw_semaphore) { ++ iounmap(s_gbe_semaphore.hw_semaphore); ++ } ++ if (s_gbe_semaphore.fw_semaphore) { ++ iounmap(s_gbe_semaphore.fw_semaphore); ++ } ++ memset(&s_gbe_semaphore, 0, sizeof(s_gbe_semaphore)); ++ } ++ return PW_SUCCESS; ++} ++ ++int sw_pch_mailbox_descriptor_reset_func_i( ++ const struct sw_driver_io_descriptor *descriptor) ++{ ++ /* Unmap previously mapped memory here */ ++ struct sw_driver_pch_mailbox_io_descriptor *__pch_mailbox = NULL; ++ ++ if (!descriptor) { // Should NEVER happen ++ return -PW_ERROR; ++ } ++ __pch_mailbox = ++ (struct sw_driver_pch_mailbox_io_descriptor *)&descriptor ++ ->pch_mailbox_descriptor; ++ if (__pch_mailbox->mtpmc_remapped_address) { ++ pw_pr_debug("unmapping addr 0x%llx\n", ++ __pch_mailbox->mtpmc_remapped_address); ++ iounmap((volatile void __iomem *)(unsigned long) ++ __pch_mailbox->mtpmc_remapped_address); ++ __pch_mailbox->mtpmc_remapped_address = 0; ++ } ++ if (__pch_mailbox->msg_full_sts_remapped_address) { ++ pw_pr_debug("unmapping addr 0x%llx\n", ++ __pch_mailbox->msg_full_sts_remapped_address); ++ iounmap((volatile void __iomem *)(unsigned long) ++ __pch_mailbox->msg_full_sts_remapped_address); ++ __pch_mailbox->msg_full_sts_remapped_address = 0; ++ } ++ if (__pch_mailbox->mfpmc_remapped_address) { ++ pw_pr_debug("unmapping addr 0x%llx\n", ++ __pch_mailbox->mfpmc_remapped_address); ++ iounmap((volatile void __iomem *)(unsigned long) ++ __pch_mailbox->mfpmc_remapped_address); ++ __pch_mailbox->mfpmc_remapped_address = 0; ++ } ++ return PW_SUCCESS; ++} ++ ++int sw_mailbox_descriptor_reset_func_i( ++ const struct sw_driver_io_descriptor *descriptor) ++{ ++ /* Unmap previously mapped memory here */ ++ struct sw_driver_mailbox_io_descriptor *__mailbox = NULL; ++ ++ if (!descriptor) { // Should NEVER happen ++ return -PW_ERROR; ++ } ++ __mailbox = (struct sw_driver_mailbox_io_descriptor *)&descriptor ++ ->mailbox_descriptor; ++ if (!__mailbox->is_msr_type) { ++ if (__mailbox->interface_remapped_address) { ++ pw_pr_debug("unmapping addr 0x%llx\n", ++ __mailbox->interface_remapped_address); ++ iounmap((volatile void __iomem *)(unsigned long) ++ __mailbox->interface_remapped_address); ++ __mailbox->interface_remapped_address = 0; ++ } ++ if (__mailbox->data_remapped_address) { ++ pw_pr_debug("unmapping addr 0x%llx\n", ++ __mailbox->data_remapped_address); ++ iounmap((volatile void __iomem *)(unsigned long) ++ __mailbox->data_remapped_address); ++ __mailbox->data_remapped_address = 0; ++ } ++ } ++ return PW_SUCCESS; ++} ++ ++#define NUM_HW_OPS SW_ARRAY_SIZE(s_hw_ops) ++#define FOR_EACH_HW_OP(idx, op) \ ++ for (idx = 0; idx < NUM_HW_OPS && (op = &s_hw_ops[idx]); ++idx) ++ ++int sw_register_ops_providers(void) ++{ ++ size_t idx = 0; ++ const struct sw_hw_ops *op = NULL; ++ ++ FOR_EACH_HW_OP(idx, op) ++ { ++ if (op->name && sw_register_hw_op(op)) { ++ pw_pr_error("ERROR registering provider %s\n", ++ op->name); ++ return -EIO; ++ } ++ } ++ return PW_SUCCESS; ++} ++ ++void sw_free_ops_providers(void) ++{ ++ // NOP ++} +diff --git a/drivers/platform/x86/socwatch/sw_output_buffer.c b/drivers/platform/x86/socwatch/sw_output_buffer.c +new file mode 100644 +index 000000000000..d3b8e585595d +--- /dev/null ++++ b/drivers/platform/x86/socwatch/sw_output_buffer.c +@@ -0,0 +1,598 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++ ++#include "sw_internal.h" ++#include "sw_output_buffer.h" ++#include "sw_kernel_defines.h" ++#include "sw_mem.h" ++#include "sw_lock_defs.h" ++#include "sw_overhead_measurements.h" ++ ++/* ------------------------------------------------- ++ * Compile time constants and macros. ++ * ------------------------------------------------- ++ */ ++#define NUM_SEGS_PER_BUFFER 2 /* MUST be pow 2! */ ++#define NUM_SEGS_PER_BUFFER_MASK (NUM_SEGS_PER_BUFFER - 1) ++/* ++ * The size of the 'buffer' data array in each segment. ++ */ ++#define SW_SEG_DATA_SIZE (sw_buffer_alloc_size) ++/* ++ * Min size of per-cpu output buffers. ++ */ ++#define SW_MIN_SEG_SIZE_BYTES (1 << 10) /* 1kB */ ++#define SW_MIN_OUTPUT_BUFFER_SIZE (SW_MIN_SEG_SIZE_BYTES * NUM_SEGS_PER_BUFFER) ++/* ++ * A symbolic constant for an empty buffer index. ++ */ ++#define EMPTY_SEG (-1) ++/* ++ * How much space is available in a given segment? ++ */ ++#define EMPTY_TSC ((u64)-1) ++#define SEG_IS_FULL(seg) \ ++ ({ \ ++ bool __full = false; \ ++ smp_mb(); \ ++ __full = ((seg)->is_full != EMPTY_TSC); \ ++ __full; \ ++ }) ++#define SEG_SET_FULL(seg, tsc) \ ++ do { \ ++ (seg)->is_full = (tsc); \ ++ smp_mb(); \ ++ } while (0) ++#define SEG_SET_EMPTY(seg) \ ++ do { \ ++ barrier(); \ ++ (seg)->bytes_written = 0; \ ++ SEG_SET_FULL(seg, EMPTY_TSC); \ ++ /*smp_mb(); */ \ ++ } while (0) ++#define SPACE_AVAIL(seg) (SW_SEG_DATA_SIZE - (seg)->bytes_written) ++#define SEG_IS_EMPTY(seg) (SPACE_AVAIL(seg) == SW_SEG_DATA_SIZE) ++ ++#define GET_OUTPUT_BUFFER(cpu) (&per_cpu_output_buffers[(cpu)]) ++/* ++ * Convenience macro: iterate over each segment in a per-cpu output buffer. ++ */ ++#define for_each_segment(i) for (i = 0; i < NUM_SEGS_PER_BUFFER; ++i) ++#define for_each_seg(buffer, seg) \ ++ for (int i = 0; \ ++ i < NUM_SEGS_PER_BUFFER && (seg = (buffer)->segments[i]); ++i) ++/* ++ * How many buffers are we using? ++ */ ++#define GET_NUM_OUTPUT_BUFFERS() (sw_max_num_cpus + 1) ++/* ++ * Convenience macro: iterate over each per-cpu output buffer. ++ */ ++#define for_each_output_buffer(i) for (i = 0; i < GET_NUM_OUTPUT_BUFFERS(); ++i) ++ ++/* ------------------------------------------------- ++ * Local data structures. ++ * ------------------------------------------------- ++ */ ++typedef struct sw_data_buffer sw_data_buffer_t; ++typedef struct sw_output_buffer sw_output_buffer_t; ++struct sw_data_buffer { ++ u64 is_full; ++ u32 bytes_written; ++ char *buffer; ++} __attribute__((packed)); ++#define SW_SEG_HEADER_SIZE() (sizeof(struct sw_data_buffer) - sizeof(char *)) ++ ++struct sw_output_buffer { ++ sw_data_buffer_t buffers[NUM_SEGS_PER_BUFFER]; ++ int buff_index; ++ u32 produced_samples; ++ u32 dropped_samples; ++ int last_seg_read; ++ unsigned int mem_alloc_size; ++ unsigned long free_pages; ++} ____cacheline_aligned_in_smp; ++ ++/* ------------------------------------------------- ++ * Function declarations. ++ * ------------------------------------------------- ++ */ ++extern u64 sw_timestamp(void); ++ ++/* ------------------------------------------------- ++ * Variable definitions. ++ * ------------------------------------------------- ++ */ ++u64 sw_num_samples_produced = 0, sw_num_samples_dropped = 0; ++int sw_max_num_cpus = -1; ++ ++DECLARE_OVERHEAD_VARS(sw_produce_generic_msg_i); ++/* ++ * Per-cpu output buffers. ++ */ ++static sw_output_buffer_t *per_cpu_output_buffers = NULL; ++/* ++ * Variables for book keeping. ++ */ ++static volatile int sw_last_cpu_read = -1; ++static volatile s32 sw_last_mask = -1; ++/* ++ * Lock for the polled buffer. ++ */ ++SW_DECLARE_SPINLOCK(sw_polled_lock); ++/* ++ * Buffer allocation size. ++ */ ++unsigned long sw_buffer_alloc_size = (1 << 16); // 64 KB ++ ++/* ------------------------------------------------- ++ * Function definitions. ++ * ------------------------------------------------- ++ */ ++ ++static char *reserve_seg_space_i(size_t size, int cpu, bool *should_wakeup, ++ u64 *reservation_tsc) ++{ ++ sw_output_buffer_t *buffer = GET_OUTPUT_BUFFER(cpu); ++ int i = 0; ++ int buff_index = buffer->buff_index; ++ char *dst = NULL; ++ ++ if (buff_index < 0 || buff_index >= NUM_SEGS_PER_BUFFER) { ++ goto prod_seg_done; ++ } ++ for_each_segment(i) { ++ sw_data_buffer_t *seg = &buffer->buffers[buff_index]; ++ ++ if (SEG_IS_FULL(seg) == false) { ++ if (SPACE_AVAIL(seg) >= size) { ++ *reservation_tsc = sw_timestamp(); ++ dst = &seg->buffer[seg->bytes_written]; ++ seg->bytes_written += size; ++ smp_mb(); ++ buffer->buff_index = buff_index; ++ buffer->produced_samples++; ++ goto prod_seg_done; ++ } ++ SEG_SET_FULL(seg, sw_timestamp()); ++ } ++ buff_index = CIRCULAR_INC(buff_index, NUM_SEGS_PER_BUFFER_MASK); ++ *should_wakeup = true; ++ } ++prod_seg_done: ++ if (!dst) { ++ buffer->dropped_samples++; ++ } ++ return dst; ++}; ++ ++static int sw_produce_polled_msg_i(struct sw_driver_msg *msg, ++ enum sw_wakeup_action action) ++{ ++ int cpu = GET_POLLED_CPU(); ++ bool should_wakeup = false; ++ int retVal = PW_SUCCESS; ++ ++ if (!msg) { ++ return -PW_ERROR; ++ } ++ pw_pr_debug("POLLED! cpu = %d\n", cpu); ++ LOCK(sw_polled_lock); ++ { ++ size_t size = SW_DRIVER_MSG_HEADER_SIZE() + msg->payload_len; ++ char *dst = reserve_seg_space_i(size, cpu, &should_wakeup, ++ &msg->tsc); ++ if (dst) { ++ /* ++ * Assign a special CPU number to this CPU. ++ * This is OK, because messages enqueued in this buffer ++ * are always CPU agnostic (otherwise they would ++ * be invoked from within a preempt_disable()d context ++ * in 'sw_handle_collector_node_i()', which ensures they ++ * will be enqueued within the 'sw_produce_generic_msg_on_cpu()' ++ * function). ++ */ ++ msg->cpuidx = cpu; ++ memcpy(dst, msg, SW_DRIVER_MSG_HEADER_SIZE()); ++ dst += SW_DRIVER_MSG_HEADER_SIZE(); ++ memcpy(dst, msg->p_payload, msg->payload_len); ++ } else { ++ pw_pr_debug("NO space in polled msg!\n"); ++ retVal = -PW_ERROR; ++ } ++ } ++ UNLOCK(sw_polled_lock); ++ if (unlikely(should_wakeup)) { ++ sw_wakeup_reader(action); ++ } ++ return retVal; ++}; ++ ++static int sw_produce_generic_msg_i(struct sw_driver_msg *msg, ++ enum sw_wakeup_action action) ++{ ++ int retval = PW_SUCCESS; ++ bool should_wakeup = false; ++ int cpu = -1; ++ unsigned long flags = 0; ++ ++ if (!msg) { ++ pw_pr_error("ERROR: CANNOT produce a NULL msg!\n"); ++ return -PW_ERROR; ++ } ++ ++#ifdef CONFIG_PREEMPT_COUNT ++ if (!in_atomic()) { ++ return sw_produce_polled_msg(msg, action); ++ } ++#endif ++ ++ cpu = sw_get_cpu(&flags); ++ { ++ size_t size = msg->payload_len + SW_DRIVER_MSG_HEADER_SIZE(); ++ char *dst = reserve_seg_space_i(size, cpu, &should_wakeup, ++ &msg->tsc); ++ if (likely(dst)) { ++ memcpy(dst, msg, SW_DRIVER_MSG_HEADER_SIZE()); ++ dst += SW_DRIVER_MSG_HEADER_SIZE(); ++ memcpy(dst, msg->p_payload, msg->payload_len); ++ } else { ++ retval = -PW_ERROR; ++ } ++ } ++ sw_put_cpu(flags); ++ ++ if (unlikely(should_wakeup)) { ++ sw_wakeup_reader(action); ++ } ++ ++ return retval; ++}; ++ ++int sw_produce_polled_msg(struct sw_driver_msg *msg, ++ enum sw_wakeup_action action) ++{ ++ return DO_PER_CPU_OVERHEAD_FUNC_RET(int, sw_produce_polled_msg_i, msg, ++ action); ++}; ++ ++int sw_produce_generic_msg(struct sw_driver_msg *msg, ++ enum sw_wakeup_action action) ++{ ++ return DO_PER_CPU_OVERHEAD_FUNC_RET(int, sw_produce_generic_msg_i, msg, ++ action); ++}; ++ ++static int sw_init_per_cpu_buffers_i(unsigned long per_cpu_mem_size) ++{ ++ int cpu = -1; ++ ++ per_cpu_output_buffers = (sw_output_buffer_t *)sw_kmalloc( ++ sizeof(sw_output_buffer_t) * GET_NUM_OUTPUT_BUFFERS(), ++ GFP_KERNEL | __GFP_ZERO); ++ if (per_cpu_output_buffers == NULL) { ++ pw_pr_error( ++ "ERROR allocating space for per-cpu output buffers!\n"); ++ sw_destroy_per_cpu_buffers(); ++ return -PW_ERROR; ++ } ++ for_each_output_buffer(cpu) { ++ sw_output_buffer_t *buffer = &per_cpu_output_buffers[cpu]; ++ char *buff = NULL; ++ int i = 0; ++ ++ buffer->mem_alloc_size = per_cpu_mem_size; ++ buffer->free_pages = ++ sw_allocate_pages(GFP_KERNEL | __GFP_ZERO, ++ (unsigned int)per_cpu_mem_size); ++ if (buffer->free_pages == 0) { ++ pw_pr_error("ERROR allocating pages for buffer [%d]!\n", ++ cpu); ++ sw_destroy_per_cpu_buffers(); ++ return -PW_ERROR; ++ } ++ buff = (char *)buffer->free_pages; ++ for_each_segment(i) { ++ buffer->buffers[i].buffer = (char *)buff; ++ buff += SW_SEG_DATA_SIZE; ++ } ++ } ++ pw_pr_debug("PER_CPU_MEM_SIZE = %lu, order = %u\n", ++ (unsigned long)per_cpu_mem_size, ++ get_order(per_cpu_mem_size)); ++ return PW_SUCCESS; ++}; ++ ++int sw_init_per_cpu_buffers(void) ++{ ++ unsigned int per_cpu_mem_size = sw_get_output_buffer_size(); ++ ++ pw_pr_debug("Buffer alloc size = %ld\n", sw_buffer_alloc_size); ++ ++ if (GET_NUM_OUTPUT_BUFFERS() <= 0) { ++ pw_pr_error("ERROR: max # output buffers= %d\n", ++ GET_NUM_OUTPUT_BUFFERS()); ++ return -PW_ERROR; ++ } ++ ++ pw_pr_debug("DEBUG: sw_max_num_cpus = %d, num output buffers = %d\n", ++ sw_max_num_cpus, GET_NUM_OUTPUT_BUFFERS()); ++ ++ /* ++ * Try to allocate per-cpu buffers. If allocation fails, ++ * decrease buffer size and retry. Stop trying if size ++ * drops below 2KB (which means 1KB for each buffer). ++ */ ++ while (per_cpu_mem_size >= SW_MIN_OUTPUT_BUFFER_SIZE && ++ sw_init_per_cpu_buffers_i(per_cpu_mem_size)) { ++ pw_pr_debug( ++ "WARNING: couldn't allocate per-cpu buffers with size %u -- trying smaller size!\n", ++ per_cpu_mem_size); ++ sw_buffer_alloc_size >>= 1; ++ per_cpu_mem_size = sw_get_output_buffer_size(); ++ } ++ ++ if (unlikely(per_cpu_output_buffers == NULL)) { ++ pw_pr_error( ++ "ERROR: couldn't allocate space for per-cpu output buffers!\n"); ++ return -PW_ERROR; ++ } ++ /* ++ * Initialize our locks. ++ */ ++ SW_INIT_SPINLOCK(sw_polled_lock); ++ ++ pw_pr_debug("OK, allocated per-cpu buffers with size = %lu\n", ++ (unsigned long)per_cpu_mem_size); ++ ++ if (sw_init_reader_queue()) { ++ pw_pr_error("ERROR initializing reader subsys\n"); ++ return -PW_ERROR; ++ } ++ ++ return PW_SUCCESS; ++}; ++ ++void sw_destroy_per_cpu_buffers(void) ++{ ++ int cpu = -1; ++ ++ /* ++ * Perform lock finalization. ++ */ ++ SW_DESTROY_SPINLOCK(sw_polled_lock); ++ ++ if (per_cpu_output_buffers != NULL) { ++ for_each_output_buffer(cpu) { ++ sw_output_buffer_t *buffer = ++ &per_cpu_output_buffers[cpu]; ++ if (buffer->free_pages != 0) { ++ sw_release_pages(buffer->free_pages, ++ buffer->mem_alloc_size); ++ buffer->free_pages = 0; ++ } ++ } ++ sw_kfree(per_cpu_output_buffers); ++ per_cpu_output_buffers = NULL; ++ } ++}; ++ ++void sw_reset_per_cpu_buffers(void) ++{ ++ int cpu = 0, i = 0; ++ ++ for_each_output_buffer(cpu) { ++ sw_output_buffer_t *buffer = GET_OUTPUT_BUFFER(cpu); ++ ++ buffer->buff_index = buffer->dropped_samples = ++ buffer->produced_samples = 0; ++ buffer->last_seg_read = -1; ++ ++ for_each_segment(i) { ++ sw_data_buffer_t *seg = &buffer->buffers[i]; ++ ++ memset(seg->buffer, 0, SW_SEG_DATA_SIZE); ++ SEG_SET_EMPTY(seg); ++ } ++ } ++ sw_last_cpu_read = -1; ++ sw_last_mask = -1; ++ pw_pr_debug("OK, reset per-cpu output buffers!\n"); ++}; ++ ++bool sw_any_seg_full(u32 *val, bool is_flush_mode) ++{ ++ int num_visited = 0, i = 0; ++ ++ if (!val) { ++ pw_pr_error("ERROR: NULL ptrs in sw_any_seg_full!\n"); ++ return false; ++ } ++ ++ *val = SW_NO_DATA_AVAIL_MASK; ++ pw_pr_debug("Checking for full seg: val = %u, flush = %s\n", *val, ++ GET_BOOL_STRING(is_flush_mode)); ++ for_each_output_buffer(num_visited) { ++ int min_seg = EMPTY_SEG, non_empty_seg = EMPTY_SEG; ++ u64 min_tsc = EMPTY_TSC; ++ sw_output_buffer_t *buffer = NULL; ++ ++ if (++sw_last_cpu_read >= GET_NUM_OUTPUT_BUFFERS()) { ++ sw_last_cpu_read = 0; ++ } ++ buffer = GET_OUTPUT_BUFFER(sw_last_cpu_read); ++ for_each_segment(i) { ++ sw_data_buffer_t *seg = &buffer->buffers[i]; ++ u64 seg_tsc = seg->is_full; ++ ++ if (SEG_IS_EMPTY(seg)) { ++ continue; ++ } ++ non_empty_seg = i; ++ if (seg_tsc < min_tsc) { ++ /* ++ * Can only happen if seg was full, ++ * provided 'EMPTY_TSC' is set to "(u64)-1" ++ */ ++ min_tsc = seg_tsc; ++ min_seg = i; ++ } ++ } ++ if (min_seg != EMPTY_SEG) { ++ *val = (sw_last_cpu_read & 0xffff) << 16 | ++ (min_seg & 0xffff); ++ return true; ++ } else if (is_flush_mode && non_empty_seg != EMPTY_SEG) { ++ *val = (sw_last_cpu_read & 0xffff) << 16 | ++ (non_empty_seg & 0xffff); ++ return true; ++ } ++ } ++ /* ++ * Reaches here only if there's no data to be read. ++ */ ++ if (is_flush_mode) { ++ /* ++ * We've drained all buffers and need to tell the userspace ++ * application there isn't any data. Unfortunately, we can't ++ * just return a 'zero' value for the mask (because that could ++ * also indicate that segment # 0 of cpu #0 has data). ++ */ ++ *val = SW_ALL_WRITES_DONE_MASK; ++ return true; ++ } ++ return false; ++}; ++ ++/* ++ * Has semantics of 'copy_to_user()' -- returns # of bytes that could ++ * NOT be copied (On success ==> returns 0). ++ */ ++size_t sw_consume_data(u32 mask, void __user *buffer, size_t bytes_to_read) ++{ ++ int which_cpu = -1, which_seg = -1; ++ unsigned long bytes_not_copied = 0; ++ sw_output_buffer_t *buff = NULL; ++ sw_data_buffer_t *seg = NULL; ++ size_t bytes_read = 0; ++ ++ if (!sw_check_output_buffer_params(buffer, bytes_to_read, ++ SW_SEG_DATA_SIZE)) { ++ pw_pr_error("ERROR: invalid params to \"sw_consume_data\"!\n"); ++ return -PW_ERROR; ++ } ++ ++ which_cpu = mask >> 16; ++ which_seg = mask & 0xffff; ++ pw_pr_debug("CONSUME: cpu = %d, seg = %d\n", which_cpu, which_seg); ++ if (which_seg >= NUM_SEGS_PER_BUFFER) { ++ pw_pr_error( ++ "Error: which_seg (%d) >= NUM_SEGS_PER_BUFFER (%d)\n", ++ which_seg, NUM_SEGS_PER_BUFFER); ++ return bytes_to_read; ++ } ++ /* ++ * OK to access unlocked; either the segment is FULL, or no collection ++ * is ongoing. In either case, we're GUARANTEED no producer is touching ++ * this segment. ++ */ ++ buff = GET_OUTPUT_BUFFER(which_cpu); ++ seg = &buff->buffers[which_seg]; ++ ++ bytes_not_copied = sw_copy_to_user(buffer, seg->buffer, ++ seg->bytes_written); // dst, src ++ ++ // bytes_not_copied = ++ // copy_to_user(buffer, seg->buffer, seg->bytes_written); // dst,src ++ if (likely(bytes_not_copied == 0)) { ++ bytes_read = seg->bytes_written; ++ } else { ++ pw_pr_error("Warning: couldn't copy %lu bytes\n", ++ bytes_not_copied); ++ bytes_read = 0; ++ } ++ SEG_SET_EMPTY(seg); ++ return bytes_read; ++} ++ ++unsigned int sw_get_output_buffer_size(void) ++{ ++ return (sw_buffer_alloc_size * NUM_SEGS_PER_BUFFER); ++}; ++ ++void sw_count_samples_produced_dropped(void) ++{ ++ int cpu = 0; ++ ++ sw_num_samples_produced = sw_num_samples_dropped = 0; ++ ++ if (per_cpu_output_buffers == NULL) { ++ return; ++ } ++ for_each_output_buffer(cpu) { ++ sw_output_buffer_t *buff = GET_OUTPUT_BUFFER(cpu); ++ ++ sw_num_samples_dropped += buff->dropped_samples; ++ sw_num_samples_produced += buff->produced_samples; ++ } ++}; ++ ++void sw_print_output_buffer_overheads(void) ++{ ++ PRINT_CUMULATIVE_OVERHEAD_PARAMS(sw_produce_generic_msg_i, ++ "PRODUCE_GENERIC_MSG"); ++ sw_print_reader_stats(); ++}; +diff --git a/drivers/platform/x86/socwatch/sw_reader.c b/drivers/platform/x86/socwatch/sw_reader.c +new file mode 100644 +index 000000000000..c94e7e8983db +--- /dev/null ++++ b/drivers/platform/x86/socwatch/sw_reader.c +@@ -0,0 +1,163 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++#include "sw_internal.h" ++#include "sw_output_buffer.h" ++#include "sw_kernel_defines.h" ++ ++#define SW_BUFFER_CLEANUP_TIMER_DELAY_NSEC \ ++ 1000000 /* delay buffer cleanup by 10^6 nsec i.e. 1 msec */ ++ ++/* ++ * The alarm queue. ++ */ ++wait_queue_head_t sw_reader_queue; ++/* ++ * Reader wakeup timer. ++ */ ++static struct hrtimer s_reader_wakeup_timer; ++/* ++ * Variable to track # timer fires. ++ */ ++static int s_num_timer_fires; ++ ++/* ++ * The alarm callback. ++ */ ++static enum hrtimer_restart sw_wakeup_callback_i(struct hrtimer *timer) ++{ ++ ++s_num_timer_fires; ++ wake_up_interruptible(&sw_reader_queue); ++ return HRTIMER_NORESTART; ++} ++ ++/* ++ * Init reader queue. ++ */ ++int sw_init_reader_queue(void) ++{ ++ init_waitqueue_head(&sw_reader_queue); ++ /* ++ * Also init wakeup timer (used in low-overhead mode). ++ */ ++ hrtimer_init(&s_reader_wakeup_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ++ s_reader_wakeup_timer.function = &sw_wakeup_callback_i; ++ ++ return PW_SUCCESS; ++} ++/* ++ * Destroy reader queue. ++ */ ++void sw_destroy_reader_queue(void) ++{ ++ /* NOP */ ++} ++/* ++ * Wakeup client waiting for a full buffer. ++ */ ++void sw_wakeup_reader(enum sw_wakeup_action action) ++{ ++ if (!waitqueue_active(&sw_reader_queue)) { ++ return; ++ } ++ /* ++ * Direct mode? ++ */ ++ switch (action) { ++ case SW_WAKEUP_ACTION_DIRECT: ++ wake_up_interruptible(&sw_reader_queue); ++ break; ++ case SW_WAKEUP_ACTION_TIMER: ++ if (!hrtimer_active(&s_reader_wakeup_timer)) { ++ ktime_t ktime = ++ ns_to_ktime(SW_BUFFER_CLEANUP_TIMER_DELAY_NSEC); ++ // TODO: possible race here -- introduce locks? ++ hrtimer_start(&s_reader_wakeup_timer, ktime, ++ HRTIMER_MODE_REL); ++ } ++ break; ++ default: ++ break; ++ } ++ return; ++} ++/* ++ * Wakeup client waiting for a full buffer, and ++ * cancel any timers initialized by the reader ++ * subsys. ++ */ ++void sw_cancel_reader(void) ++{ ++ /* ++ * Cancel pending wakeup timer (used in low-overhead mode). ++ */ ++ if (hrtimer_active(&s_reader_wakeup_timer)) { ++ hrtimer_cancel(&s_reader_wakeup_timer); ++ } ++ /* ++ * There might be a reader thread blocked on a read: wake ++ * it up to give it a chance to respond to changed ++ * conditions. ++ */ ++ sw_wakeup_reader(SW_WAKEUP_ACTION_DIRECT); ++} ++ ++void sw_print_reader_stats(void) ++{ ++#if DO_OVERHEAD_MEASUREMENTS ++ printk(KERN_INFO "# reader queue timer fires = %d\n", ++ s_num_timer_fires); ++#endif // OVERHEAD ++} +diff --git a/drivers/platform/x86/socwatch/sw_telem.c b/drivers/platform/x86/socwatch/sw_telem.c +new file mode 100644 +index 000000000000..9f8beb57da68 +--- /dev/null ++++ b/drivers/platform/x86/socwatch/sw_telem.c +@@ -0,0 +1,493 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++ ++#include ++#include ++#include /* Definition of __weak */ ++#include /* LINUX_VERSION_CODE */ ++#include "sw_kernel_defines.h" /* pw_pr_debug */ ++#include "sw_mem.h" /* sw_kmalloc/free */ ++#include "sw_lock_defs.h" /* Various lock-related definitions */ ++#include "sw_telem.h" /* Signatures of fn's exported from here. */ ++ ++/* ++ * These functions and data structures are exported by the Telemetry ++ * driver. However, that file may not be available in the kernel for ++ * which this driver is being built, so we re-define many of the same ++ * things here. ++ */ ++/** ++ * struct telemetry_evtlog - The "event log" returned by the kernel's ++ * full-read telemetry driver. ++ * @telem_evtid: The 16-bit event ID. ++ * @telem_evtlog: The actual telemetry data. ++ */ ++struct telemetry_evtlog { ++ u32 telem_evtid; /* Event ID of a data item. */ ++ u64 telem_evtlog; /* Counter data */ ++}; ++ ++struct telemetry_evtconfig { ++ u32 *evtmap; /* Array of Event-IDs to Enable */ ++ u8 num_evts; /* Number of Events (<29) in evtmap */ ++ u8 period; /* Sampling period */ ++}; ++ ++#define MAX_TELEM_EVENTS 28 /* Max telem events per unit */ ++ ++/* The enable bit is set when programming events, but is returned ++ * cleared for queried events requests. ++ */ ++#define TELEM_EVENT_ENABLE 0x8000 /* Enabled when Event ID HIGH bit */ ++ ++/* ++ * Sampling Period values. ++ * The sampling period is encoded in an 7-bit value, where ++ * Period = (Value * 16^Exponent) usec where: ++ * bits[6:3] -> Value; ++ * bits [0:2]-> Exponent; ++ * Here are some of the calculated possible values: ++ * | Value Val+Exp | Value | Exponent | Period (usec) | Period (msec) | ++ * |-----------------+-------+----------+---------------+---------------| ++ * | 0xA = 000 1+010 | 1 | 2 | 256 | 0.256 | ++ * | 0x12= 001 0+010 | 2 | 2 | 512 | 0.512 | ++ * | 0x22= 010 0+010 | 4 | 2 | 1024 | 1.024 | ++ * | 0xB = 000 1+011 | 1 | 3 | 4096 | 4.096 | ++ * | 0x13= 001 0+011 | 2 | 3 | 8192 | 8.192 | ++ * | 0x1B= 001 1+011 | 3 | 3 | 12288 | 12.288 | ++ * | 0x0C= 000 1+100 | 1 | 4 | 65536 | 65.536 | ++ * | 0x0D= 000 1+101 | 1 | 5 | 1048576 | 1048.576 | ++ */ ++#define TELEM_SAMPLING_1MS 0x22 /* Approximately 1 ms */ ++#define TELEM_SAMPLING_1S 0x0D /* Approximately 1 s */ ++ ++/* These functions make up the main APIs of the telemetry driver. We ++ * define all of them with weak linkage so that we can still compile ++ * and load into kernels which don't have a telemetry driver. ++ */ ++extern int __weak telemetry_raw_read_eventlog(enum telemetry_unit telem_unit, ++ struct telemetry_evtlog *evtlog, ++ int evcount); ++extern int __weak telemetry_reset(void); ++extern int __weak telemetry_reset_events(void); ++extern int __weak telemetry_get_sampling_period(u8 *punit_min, u8 *punit_max, ++ u8 *pmc_min, u8 *pmc_max); ++extern int __weak telemetry_set_sampling_period(u8 punit_period, u8 pmc_period); ++extern int __weak telemetry_get_eventconfig( ++ struct telemetry_evtconfig *punit_config, ++ struct telemetry_evtconfig *pmc_config, int punit_len, int pmc_len); ++extern int __weak telemetry_add_events(u8 num_punit_evts, u8 num_pmc_evts, ++ u32 *punit_evtmap, u32 *pmc_evtmap); ++ ++extern int __weak ++telemetry_update_events(struct telemetry_evtconfig punit_config, ++ struct telemetry_evtconfig pmc_config); ++ ++/* ++ * Some telemetry IDs have multiple instances, indexed by cpu ID. We ++ * implement these by defining two types of IDs: 'regular' and 'scaled'. ++ * For Telemetry IDs with a single instance (the majority of them), the ++ * index into the system's telemetry table is stored in the ++ * sw_driver_io_descriptor.idx. At read time, the driver gets the telemetry ++ * "slot" from sw_driver_io_descriptor.idx, and reads that data. This case ++ * is illustrated by telem_desc_A in the illustration below, where idx 2 ++ * indicates that telem_data[2] contains the telem data for this descriptor. ++ * ++ * telem_desc_A telem_data ++ * scale_op: X |..|[0] ++ * idx : 2 -------------------- |..|[1] ++ * \------->|..|[2] ++ * Scaled_IDs |..|[3] ++ * telem_desc_B CPU#0 1 2 3 ------>|..|[4] ++ * scale_op: / [0]|.|.|.|.| / ++ * idx : 1---->[1]|4|4|5|5| / ++ * +----------/ ++ * ++ * Descriptors with scaled IDs contain a scale operation (scale_op) and ++ * value. They use a 'scaled_ids' table, which is indexed by descriptor ++ * number and CPU id, and stores the telem_data index. So in the ++ * illustration above, CPU 0 reading from telem_desc_B would fetch row 1 ++ * (from telem_desc_B.idx == 1), and column [0] yielding element 4, so ++ * that's the telemetry ID it looks up in the telemetry data. ++ * ++ * The scaled_ids table is populated at telemetry ID initialization time ++ * ++ */ ++static unsigned char *sw_telem_scaled_ids; /* Allocate on demand */ ++static unsigned int sw_telem_rows_alloced; /* Rows currently allocated */ ++static unsigned int sw_telem_rows_avail; /* Available rows */ ++ ++extern int sw_max_num_cpus; /* SoC Watch's copy of cpu count. */ ++ ++/* Macro for identifying telemetry IDs with either per-cpu, or per-module ++ * instances. These IDs need to be 'scaled' as per scale_op and scale_val. ++ */ ++#define IS_SCALED_ID(td) ((td)->scale_op != TELEM_OP_NONE) ++/* ++ * Event map that is populated with user-supplied IDs ++ */ ++static u32 s_event_map[2][MAX_TELEM_EVENTS]; ++/* ++ * Index into event map(s) ++ */ ++static size_t s_unit_idx[2] = { 0, 0 }; ++/* ++ * Used to decide if telemetry values need refreshing ++ */ ++static size_t s_unit_iters[2] = { 0, 0 }; ++/* ++ * Spinlock to guard updates to the 'iters' values. ++ */ ++static SW_DEFINE_SPINLOCK(sw_telem_lock); ++/* ++ * Macro to determine if socwatch telemetry system has been configured ++ */ ++#define SW_TELEM_CONFIGURED() (s_unit_idx[0] > 0 || s_unit_idx[1] > 0) ++ ++/** ++ * telemetry_available - Determine if telemetry driver is present ++ * ++ * Returns: 1 if telemetry driver is present, 0 if not. ++ */ ++static int telemetry_available(void) ++{ ++ int retval = 0; ++ struct telemetry_evtconfig punit_evtconfig; ++ struct telemetry_evtconfig pmc_evtconfig; ++ u32 punit_event_map[MAX_TELEM_EVENTS]; ++ u32 pmc_event_map[MAX_TELEM_EVENTS]; ++ ++ /* The symbol below is weak. We return 1 if we have a definition ++ * for this telemetry-driver-supplied symbol, or 0 if only the ++ * weak definition exists. This test will suffice to detect if ++ * the telemetry driver is loaded. ++ */ ++ if (telemetry_get_eventconfig == NULL) { ++ return 0; ++ } ++ /* OK, the telemetry driver is loaded. But it's possible it ++ * hasn't been configured properly. To check that, retrieve ++ * the number of events currently configured. This should never ++ * be zero since the telemetry driver reserves some SSRAM slots ++ * for its own use ++ */ ++ memset(&punit_evtconfig, 0, sizeof(punit_evtconfig)); ++ memset(&pmc_evtconfig, 0, sizeof(pmc_evtconfig)); ++ ++ punit_evtconfig.evtmap = (u32 *)&punit_event_map; ++ pmc_evtconfig.evtmap = (u32 *)&pmc_event_map; ++ ++ retval = telemetry_get_eventconfig(&punit_evtconfig, &pmc_evtconfig, ++ MAX_TELEM_EVENTS, MAX_TELEM_EVENTS); ++ return retval == 0 && punit_evtconfig.num_evts > 0 && ++ pmc_evtconfig.num_evts > 0; ++} ++ ++/** ++ * sw_get_instance_row -- Get the address of a 'row' of instance IDs. ++ * @rownum: The row number of the Instance ID table, whose address to return. ++ * Returns: The address of the appropriate row, or NULL if rownum is bad. ++ */ ++static unsigned char *sw_get_instance_row_addr(unsigned int rownum) ++{ ++ if (rownum >= (sw_telem_rows_alloced - sw_telem_rows_avail)) { ++ pw_pr_error("ERROR: Cannot retrieve row Instance ID row %d\n", ++ rownum); ++ return NULL; ++ } ++ return &sw_telem_scaled_ids[rownum * sw_max_num_cpus]; ++} ++ ++/** ++ * sw_free_telem_scaled_id_table - Free the allocated slots. ++ * Returns: Nothing ++ * ++ * Admittedly, a more symmetrical function name would be nice. ++ */ ++static void sw_telem_release_scaled_ids(void) ++{ ++ sw_telem_rows_alloced = 0; ++ sw_telem_rows_avail = 0; ++ if (sw_telem_scaled_ids) { ++ sw_kfree(sw_telem_scaled_ids); ++ } ++ sw_telem_scaled_ids = NULL; ++} ++ ++/** ++ * sw_telem_init_func - Set up the telemetry unit to retrieve a data item ++ * (e.g. counter). ++ * @descriptor: The IO descriptor containing the unit and ID ++ * of the telemetry info to gather. ++ * ++ * Because we don't (currently) control all of the counters, we ++ * economize by seeing if it's already being collected before allocate ++ * a slot for it. ++ * ++ * Returns: PW_SUCCESS if the telem collector can collect the requested data. ++ * -PW_ERROR if the the addition of that item fails. ++ */ ++int sw_telem_init_func(struct sw_driver_io_descriptor *descriptor) ++{ ++ struct sw_driver_telem_io_descriptor *td = ++ &(descriptor->telem_descriptor); ++ u8 unit = td->unit; /* Telemetry unit to use. */ ++ u32 id; /* Event ID we want telemetry to track. */ ++ size_t idx; /* Index into telemetry data array of event ID to gather. */ ++ const char *unit_str = unit == TELEM_PUNIT ? "PUNIT" : "PMC"; ++ size_t *unit_idx = &s_unit_idx[unit]; ++ ++ if (!telemetry_available()) { ++ return -ENXIO; ++ } ++ ++ id = (u32)(td->id); ++ ++ /* Check if we've already added this ID */ ++ for (idx = 0; idx < *unit_idx && idx < MAX_TELEM_EVENTS; ++idx) { ++ if (s_event_map[unit][idx] == id) { ++ /* Invariant: idx contains the index of the new data item. */ ++ /* Save the index for later fast lookup. */ ++ td->idx = (u16)idx; ++ return 0; ++ } ++ } ++ ++ if (*unit_idx >= MAX_TELEM_EVENTS) { ++ pw_pr_error( ++ "Too many events %s units requested; max of %u available!\n", ++ unit_str, MAX_TELEM_EVENTS); ++ return -E2BIG; ++ } ++ s_event_map[unit][(*unit_idx)++] = id; ++ /* Invariant: idx contains the index of the new data item. */ ++ /* Save the index for later fast lookup. */ ++ td->idx = (u16)idx; ++ pw_pr_debug( ++ "OK, added id = 0x%x to unit %s at entry %zu; retrieved = 0x%x\n", ++ id, unit_str, *unit_idx - 1, s_event_map[unit][*unit_idx - 1]); ++ ++ return 0; ++} ++ ++/** ++ * sw_read_telem_info - Read a metric's data from the telemetry driver. ++ * @dest: Destination (storage for the read data) ++ * @cpu: Which CPU to read from (not used) ++ * @descriptor: The descriptor containing the data ID to read ++ * @data_size_in_bytes: The # of bytes in the result (always 8) ++ * ++ * Returns: Nothing, but stores SW_TELEM_READ_FAIL_VALUE to dest if the read fails. ++ */ ++void sw_read_telem_info(char *dest, int cpu, ++ const sw_driver_io_descriptor_t *descriptor, ++ u16 data_size_in_bytes) ++{ ++ int len; ++ u64 *data_dest = (u64 *)dest; ++ int retry_count; ++ const struct sw_driver_telem_io_descriptor *td = ++ &(descriptor->telem_descriptor); ++ unsigned int idx; ++ u8 unit = td->unit; ++ bool needs_refresh = false; ++ ++#define TELEM_PKT_SIZE 16 /* sizeof(struct telemetry_evtlog) + padding */ ++ static struct telemetry_evtlog events[MAX_TELEM_EVENTS]; ++ ++ // Get the event index ++ if (IS_SCALED_ID(td)) { ++ unsigned char *scaled_ids; ++ ++ scaled_ids = sw_get_instance_row_addr(td->idx); ++ if (scaled_ids == NULL) { ++ pw_pr_error( ++ "Sw_read_telem_info_i: Illegal row index: *%p = %d", ++ &td->idx, td->idx); ++ *data_dest = SW_TELEM_READ_FAIL_VALUE; ++ return; /* Don't set the dest/data buffer. */ ++ } ++ idx = scaled_ids[RAW_CPU()]; /* Get per-cpu entry */ ++ } else { ++ idx = td->idx; ++ } ++ ++ /* ++ * Check if we need to refresh the list of values ++ */ ++ LOCK(sw_telem_lock); ++ { ++ if (s_unit_iters[unit] == 0) { ++ needs_refresh = true; ++ } ++ if (++s_unit_iters[unit] == s_unit_idx[unit]) { ++ s_unit_iters[unit] = 0; ++ } ++ } ++ UNLOCK(sw_telem_lock); ++ ++ /* ++ * Because of the enormous overhead of reading telemetry data from ++ * the current kernel driver, failure to read the data is not ++ * unheard of. As such, 3 times, should the read fail. Once we ++ * get a higher-performance read routine, we should be able to ++ * eliminate this retry (or maybe decrease it.) ++ */ ++ retry_count = 3; ++ while (needs_refresh && retry_count--) { ++ len = telemetry_raw_read_eventlog( ++ unit, events, sizeof(events) / TELEM_PKT_SIZE); ++ ++ if ((len < 0) || (len < idx)) { ++ pw_pr_error( ++ "sw_read_telem_info_i: read failed: len=%d\n", ++ len); ++ } else { ++ break; ++ } ++ } ++ ++ if (retry_count) { ++ // TODO: Resolve if we should return something other than ++ // SW_TELEM_READ_FAIL_VALUE, if the actual data happens to be that. ++ *data_dest = events[idx].telem_evtlog; ++ } else { ++ *data_dest = SW_TELEM_READ_FAIL_VALUE; ++ } ++} ++ ++/** ++ * sw_reset_telem - Stop collecting telemetry info. ++ * @descriptor: Unused in this function ++ * ++ * Stop collecting anything extra, and give the driver back to ++ * debugfs. Because this driver increases the sampling rate, the ++ * kernel's telemetry driver can't successfully reset the driver unless ++ * we first drop the rate back down to a much slower rate. This is a ++ * temporary measure, since the reset operation will then reset the ++ * sampling interval to whatever the GMIN driver wants. ++ * ++ * Return: PW_SUCCESS. ++ */ ++int sw_reset_telem(const struct sw_driver_io_descriptor *descriptor) ++{ ++ if (telemetry_available() && SW_TELEM_CONFIGURED()) { ++ telemetry_set_sampling_period(TELEM_SAMPLING_1S, ++ TELEM_SAMPLING_1S); ++ telemetry_reset_events(); ++ sw_telem_release_scaled_ids(); ++ memset(s_unit_idx, 0, sizeof(s_unit_idx)); ++ memset(s_unit_iters, 0, sizeof(s_unit_iters)); ++ } ++ return PW_SUCCESS; ++} ++ ++/** ++ * sw_available_telem -- Decide if the telemetry subsystem is available for use ++ */ ++bool sw_telem_available(void) ++{ ++ return telemetry_available(); ++}; ++ ++bool sw_telem_post_config(void) ++{ ++ bool retval = true; ++ size_t i = 0; ++ struct telemetry_evtconfig punit_evtconfig; ++ struct telemetry_evtconfig pmc_evtconfig; ++ ++ if (!SW_TELEM_CONFIGURED()) { ++ return true; ++ } ++ ++ memset(&punit_evtconfig, 0, sizeof(punit_evtconfig)); ++ memset(&pmc_evtconfig, 0, sizeof(pmc_evtconfig)); ++ ++ telemetry_set_sampling_period(TELEM_SAMPLING_1S, TELEM_SAMPLING_1S); ++ ++ punit_evtconfig.period = TELEM_SAMPLING_1S; ++ pmc_evtconfig.period = TELEM_SAMPLING_1S; ++ ++ /* Punit */ ++ punit_evtconfig.evtmap = (u32 *)&s_event_map[TELEM_PUNIT]; ++ punit_evtconfig.num_evts = s_unit_idx[TELEM_PUNIT]; ++ /* PMC */ ++ pmc_evtconfig.evtmap = (u32 *)&s_event_map[TELEM_PMC]; ++ pmc_evtconfig.num_evts = s_unit_idx[TELEM_PMC]; ++ ++ for (i = 0; i < punit_evtconfig.num_evts; ++i) { ++ pw_pr_debug("PUNIT[%zu] = 0x%x\n", i, ++ punit_evtconfig.evtmap[i]); ++ } ++ for (i = 0; i < pmc_evtconfig.num_evts; ++i) { ++ pw_pr_debug("PMC[%zu] = 0x%x\n", i, pmc_evtconfig.evtmap[i]); ++ } ++ ++ /* ++ * OK, everything done. Now update ++ */ ++ if (telemetry_update_events(punit_evtconfig, pmc_evtconfig)) { ++ pw_pr_error("telemetry_update_events error"); ++ retval = false; ++ } else { ++ pw_pr_debug("OK, telemetry_update_events success\n"); ++ } ++ ++ telemetry_set_sampling_period(TELEM_SAMPLING_1MS, TELEM_SAMPLING_1MS); ++ ++ return retval; ++} +diff --git a/drivers/platform/x86/socwatch/sw_trace_notifier_provider.c b/drivers/platform/x86/socwatch/sw_trace_notifier_provider.c +new file mode 100644 +index 000000000000..0c414423de09 +--- /dev/null ++++ b/drivers/platform/x86/socwatch/sw_trace_notifier_provider.c +@@ -0,0 +1,2233 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++#include // "LINUX_VERSION_CODE" ++#include ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) ++#include ++#else ++#include ++#endif ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) ++#include // for the various APIC vector tracepoints (e.g. "thermal_apic", "local_timer" etc.) ++#endif // LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) ++struct pool_workqueue; // Forward declaration to avoid compiler warnings ++struct cpu_workqueue_struct; // Forward declaration to avoid compiler warnings ++#include ++#include // for 'pm_notifier' ++#include // for "cpufreq_notifier" ++#include // for 'CPU_UP_PREPARE' etc ++ ++#include "sw_kernel_defines.h" ++#include "sw_collector.h" ++#include "sw_overhead_measurements.h" ++#include "sw_tracepoint_handlers.h" ++#include "sw_output_buffer.h" ++#include "sw_mem.h" ++#include "sw_trace_notifier_provider.h" ++ ++/* ------------------------------------------------- ++ * Compile time constants and useful macros. ++ * ------------------------------------------------- ++ */ ++#ifndef __get_cpu_var ++/* ++ * Kernels >= 3.19 don't include a definition ++ * of '__get_cpu_var'. Create one now. ++ */ ++#define __get_cpu_var(var) (*this_cpu_ptr(&var)) ++#endif // __get_cpu_var ++ ++#define BEGIN_LOCAL_IRQ_STATS_READ(p) \ ++ do { \ ++ p = &__get_cpu_var(irq_stat); ++ ++#define END_LOCAL_IRQ_STATS_READ(p) \ ++ } \ ++ while (0) ++/* ++ * CAS{32,64} ++ */ ++#define CAS32(p, o, n) (cmpxchg((p), (o), (n)) == (o)) ++#define CAS64(p, o, n) (cmpxchg64((p), (o), (n)) == (o)) ++/* ++ * Timer start pid accessor macros ++ */ ++#ifdef CONFIG_TIMER_STATS ++#define GET_TIMER_THREAD_ID(t) \ ++ ((t)->start_pid) /* 'start_pid' is actually the thread ID of the thread that initialized the timer */ ++#else ++#define GET_TIMER_THREAD_ID(t) (-1) ++#endif // CONFIG_TIMER_STATS ++/* ++ * Tracepoint probe register/unregister functions and ++ * helper macros. ++ */ ++#ifdef CONFIG_TRACEPOINTS ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35) ++#define DO_REGISTER_SW_TRACEPOINT_PROBE(node, name, probe) \ ++ WARN_ON(register_trace_##name(probe)) ++#define DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, name, probe) \ ++ unregister_trace_##name(probe) ++#elif LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0) ++#define DO_REGISTER_SW_TRACEPOINT_PROBE(node, name, probe) \ ++ WARN_ON(register_trace_##name(probe, NULL)) ++#define DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, name, probe) \ ++ unregister_trace_##name(probe, NULL) ++#else ++#define DO_REGISTER_SW_TRACEPOINT_PROBE(node, name, probe) \ ++ WARN_ON(tracepoint_probe_register(node->tp, probe, NULL)) ++#define DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, name, probe) \ ++ tracepoint_probe_unregister(node->tp, probe, NULL) ++#endif ++#else // CONFIG_TRACEPOINTS ++#define DO_REGISTER_SW_TRACEPOINT_PROBE(...) /* NOP */ ++#define DO_UNREGISTER_SW_TRACEPOINT_PROBE(...) /* NOP */ ++#endif // CONFIG_TRACEPOINTS ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35) ++#define _DEFINE_PROBE_FUNCTION(name, ...) static void name(__VA_ARGS__) ++#else ++#define _DEFINE_PROBE_FUNCTION(name, ...) \ ++ static void name(void *ignore, __VA_ARGS__) ++#endif ++#define DEFINE_PROBE_FUNCTION(x) _DEFINE_PROBE_FUNCTION(x) ++ ++/* ++ * Tracepoint probe function parameters. ++ * These tracepoint signatures depend on kernel version. ++ */ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36) ++#define PROBE_TPS_PARAMS \ ++ sw_probe_power_start_i, unsigned int type, unsigned int state ++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38) ++#define PROBE_TPS_PARAMS \ ++ sw_probe_power_start_i, unsigned int type, unsigned int state, \ ++ unsigned int cpu_id ++#else ++#define PROBE_TPS_PARAMS \ ++ sw_probe_cpu_idle_i, unsigned int state, unsigned int cpu_id ++#endif ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38) ++#define PROBE_TPF_PARAMS \ ++ sw_probe_power_frequency_i, unsigned int type, unsigned int state ++#else ++#define PROBE_TPF_PARAMS \ ++ sw_probe_cpu_frequency_i, unsigned int new_freq, unsigned int cpu ++#endif ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35) ++#define PROBE_SCHED_WAKEUP_PARAMS \ ++ sw_probe_sched_wakeup_i, struct rq *rq, struct task_struct *task, \ ++ int success ++#else ++#define PROBE_SCHED_WAKEUP_PARAMS \ ++ sw_probe_sched_wakeup_i, struct task_struct *task, int success ++#endif ++ ++#if IS_ENABLED(CONFIG_ANDROID) ++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0) ++#define PROBE_WAKE_LOCK_PARAMS sw_probe_wake_lock_i, struct wake_lock *lock ++#define PROBE_WAKE_UNLOCK_PARAMS \ ++ sw_probe_wake_unlock_i, struct wake_unlock *unlock ++#else ++#define PROBE_WAKE_LOCK_PARAMS \ ++ sw_probe_wakeup_source_activate_i, const char *name, unsigned int state ++#define PROBE_WAKE_UNLOCK_PARAMS \ ++ sw_probe_wakeup_source_deactivate_i, const char *name, \ ++ unsigned int state ++#endif // version ++#endif // CONFIG_ANDROID ++ ++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35) ++#define PROBE_WORKQUEUE_PARAMS \ ++ sw_probe_workqueue_execution_i, struct task_struct *wq_thread, \ ++ struct work_struct *work ++#else ++#define PROBE_WORKQUEUE_PARAMS \ ++ sw_probe_workqueue_execute_start_i, struct work_struct *work ++#endif ++ ++#define PROBE_SCHED_SWITCH_PARAMS \ ++ sw_probe_sched_switch_i, struct task_struct *prev, \ ++ struct task_struct *next ++/* ++ * These tracepoint signatures are independent of kernel version. ++ */ ++#define PROBE_IRQ_PARAMS \ ++ sw_probe_irq_handler_entry_i, int irq, struct irqaction *action ++#define PROBE_TIMER_ARGS sw_probe_timer_expire_entry_i, struct timer_list *t ++#define PROBE_HRTIMER_PARAMS \ ++ sw_probe_hrtimer_expire_entry_i, struct hrtimer *hrt, ktime_t *now ++#define PROBE_PROCESS_FORK_PARAMS \ ++ sw_probe_sched_process_fork_i, struct task_struct *parent, \ ++ struct task_struct *child ++#define PROBE_SCHED_PROCESS_EXIT_PARAMS \ ++ sw_probe_sched_process_exit_i, struct task_struct *task ++#define PROBE_THERMAL_APIC_ENTRY_PARAMS \ ++ sw_probe_thermal_apic_entry_i, int vector ++#define PROBE_THERMAL_APIC_EXIT_PARAMS sw_probe_thermal_apic_exit_i, int vector ++ ++#define IS_VALID_WAKEUP_EVENT(cpu) \ ++ ({ \ ++ bool *per_cpu_event = \ ++ &per_cpu(sw_is_valid_wakeup_event, (cpu)); \ ++ bool old_value = \ ++ CAS32(per_cpu_event, true, sw_wakeup_event_flag); \ ++ old_value; \ ++ }) ++#define SHOULD_PRODUCE_WAKEUP_SAMPLE(cpu) (IS_VALID_WAKEUP_EVENT(cpu)) ++#define RESET_VALID_WAKEUP_EVENT_COUNTER(cpu) \ ++ (per_cpu(sw_is_valid_wakeup_event, (cpu)) = true) ++ ++#define NUM_TRACEPOINT_NODES SW_ARRAY_SIZE(s_trace_collector_lists) ++#define NUM_VALID_TRACEPOINTS (NUM_TRACEPOINT_NODES - 1) /* "-1" for IPI */ ++#define FOR_EACH_TRACEPOINT_NODE(idx, node) \ ++ for (idx = 0; idx < NUM_TRACEPOINT_NODES && \ ++ (node = &s_trace_collector_lists[idx]); \ ++ ++idx) ++ ++#define FOR_EACH_NOTIFIER_NODE(idx, node) \ ++ for (idx = 0; idx < SW_ARRAY_SIZE(s_notifier_collector_lists) && \ ++ (node = &s_notifier_collector_lists[idx]); \ ++ ++idx) ++/* ++ * Use these macros if all tracepoint ID numbers ARE contiguous from 0 -- max tracepoint ID # ++ */ ++#if 0 ++#define IS_VALID_TRACE_NOTIFIER_ID(id) \ ++ ((id) >= 0 && (id) < SW_ARRAY_SIZE(s_trace_collector_lists)) ++#define GET_COLLECTOR_TRACE_NODE(id) (&s_trace_collector_lists[id]) ++#define FOR_EACH_trace_notifier_id(idx) \ ++ for (idx = 0; idx < SW_ARRAY_SIZE(s_trace_collector_lists); ++idx) ++#endif // if 0 ++/* ++ * Use these macros if all tracepoint ID numbers are NOT contiguous from 0 -- max tracepoint ID # ++ */ ++#define GET_COLLECTOR_TRACE_NODE(idx) \ ++ ({ \ ++ int __idx = 0; \ ++ struct sw_trace_notifier_data *__node = NULL, \ ++ *__retVal = NULL; \ ++ FOR_EACH_TRACEPOINT_NODE(__idx, __node) \ ++ { \ ++ if ((idx) == GET_TRACE_NOTIFIER_ID(__node)) { \ ++ __retVal = __node; \ ++ break; \ ++ } \ ++ } \ ++ __retVal; \ ++ }) ++#define IS_VALID_TRACE_NOTIFIER_ID(idx) (GET_COLLECTOR_TRACE_NODE(idx) != NULL) ++ ++#define GET_COLLECTOR_NOTIFIER_NODE(idx) \ ++ ({ \ ++ int __idx = 0; \ ++ struct sw_trace_notifier_data *__node = NULL, \ ++ *__retVal = NULL; \ ++ FOR_EACH_NOTIFIER_NODE(__idx, __node) \ ++ { \ ++ if ((idx) == GET_TRACE_NOTIFIER_ID(__node)) { \ ++ __retVal = __node; \ ++ break; \ ++ } \ ++ } \ ++ __retVal; \ ++ }) ++#define IS_VALID_NOTIFIER_ID(idx) (GET_COLLECTOR_NOTIFIER_NODE(idx) != NULL) ++ ++/* ------------------------------------------------- ++ * Local function declarations. ++ * ------------------------------------------------- ++ */ ++/* ++ * The tracepoint registration functions. ++ */ ++int sw_register_trace_cpu_idle_i(struct sw_trace_notifier_data *node); ++int sw_unregister_trace_cpu_idle_i(struct sw_trace_notifier_data *node); ++int sw_register_trace_cpu_frequency_i(struct sw_trace_notifier_data *node); ++int sw_unregister_trace_cpu_frequency_i(struct sw_trace_notifier_data *node); ++int sw_register_trace_irq_handler_entry_i(struct sw_trace_notifier_data *node); ++int sw_unregister_trace_irq_handler_entry_i(struct sw_trace_notifier_data *node); ++int sw_register_trace_timer_expire_entry_i(struct sw_trace_notifier_data *node); ++int sw_unregister_trace_timer_expire_entry_i( ++ struct sw_trace_notifier_data *node); ++int sw_register_trace_hrtimer_expire_entry_i( ++ struct sw_trace_notifier_data *node); ++int sw_unregister_trace_hrtimer_expire_entry_i( ++ struct sw_trace_notifier_data *node); ++int sw_register_trace_sched_wakeup_i(struct sw_trace_notifier_data *node); ++int sw_unregister_trace_sched_wakeup_i(struct sw_trace_notifier_data *node); ++int sw_register_trace_sched_process_fork_i(struct sw_trace_notifier_data *node); ++int sw_unregister_trace_sched_process_fork_i( ++ struct sw_trace_notifier_data *node); ++int sw_register_trace_sched_process_exit_i(struct sw_trace_notifier_data *node); ++int sw_unregister_trace_sched_process_exit_i( ++ struct sw_trace_notifier_data *node); ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) ++int sw_register_trace_thermal_apic_entry_i(struct sw_trace_notifier_data *node); ++int sw_unregister_trace_thermal_apic_entry_i( ++ struct sw_trace_notifier_data *node); ++int sw_register_trace_thermal_apic_exit_i(struct sw_trace_notifier_data *node); ++int sw_unregister_trace_thermal_apic_exit_i(struct sw_trace_notifier_data *node); ++#endif // LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) ++#if IS_ENABLED(CONFIG_ANDROID) ++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0) ++int sw_register_trace_wake_lock_i(struct sw_trace_notifier_data *node); ++int sw_unregister_trace_wake_lock_i(struct sw_trace_notifier_data *node); ++int sw_register_trace_wake_unlock_i(struct sw_trace_notifier_data *node); ++int sw_unregister_trace_wake_unlock_i(struct sw_trace_notifier_data *node); ++#else // LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0) ++int sw_register_trace_wakeup_source_activate_i( ++ struct sw_trace_notifier_data *node); ++int sw_unregister_trace_wakeup_source_activate_i( ++ struct sw_trace_notifier_data *node); ++int sw_register_trace_wakeup_source_deactivate_i( ++ struct sw_trace_notifier_data *node); ++int sw_unregister_trace_wakeup_source_deactivate_i( ++ struct sw_trace_notifier_data *node); ++#endif // LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) ++#endif // CONFIG_ANDROID ++int sw_register_trace_workqueue_execution_i(struct sw_trace_notifier_data *node); ++int sw_unregister_trace_workqueue_execution_i( ++ struct sw_trace_notifier_data *node); ++int sw_register_trace_sched_switch_i(struct sw_trace_notifier_data *node); ++int sw_unregister_trace_sched_switch_i(struct sw_trace_notifier_data *node); ++int sw_register_pm_notifier_i(struct sw_trace_notifier_data *node); ++int sw_unregister_pm_notifier_i(struct sw_trace_notifier_data *node); ++int sw_register_cpufreq_notifier_i(struct sw_trace_notifier_data *node); ++int sw_unregister_cpufreq_notifier_i(struct sw_trace_notifier_data *node); ++int sw_register_hotcpu_notifier_i(struct sw_trace_notifier_data *node); ++int sw_unregister_hotcpu_notifier_i(struct sw_trace_notifier_data *node); ++void sw_handle_sched_wakeup_i(struct sw_collector_data *node, int source_cpu, ++ int target_cpu); ++void sw_handle_timer_wakeup_helper_i(struct sw_collector_data *curr, ++ struct sw_trace_notifier_data *node, ++ pid_t tid); ++void sw_handle_apic_timer_wakeup_i(struct sw_collector_data *node); ++void sw_handle_workqueue_wakeup_helper_i(int cpu, ++ struct sw_collector_data *node); ++void sw_handle_sched_switch_helper_i(void); ++void sw_tps_apic_i(int cpu); ++void sw_tps_tps_i(int cpu); ++void sw_tps_wakeup_i(int cpu); ++void sw_tps_i(void); ++void sw_tpf_i(int cpu, struct sw_trace_notifier_data *node); ++void sw_process_fork_exit_helper_i(struct sw_collector_data *node, ++ struct task_struct *task, bool is_fork); ++void sw_produce_wakelock_msg_i(int cpu, struct sw_collector_data *node, ++ const char *name, int type, u64 timeout, int pid, ++ int tid, const char *proc_name); ++u64 sw_my_local_arch_irq_stats_cpu_i(void); ++ ++/* ++ * The tracepoint probes. ++ */ ++/* ++ * The tracepoint handlers. ++ */ ++void sw_handle_trace_notifier_i(struct sw_trace_notifier_data *node); ++void sw_handle_trace_notifier_on_cpu_i(int cpu, ++ struct sw_trace_notifier_data *node); ++void sw_handle_reset_messages_i(struct sw_trace_notifier_data *node); ++ ++/* ------------------------------------------------- ++ * Variable definitions. ++ * ------------------------------------------------- ++ */ ++/* ++ * For overhead measurements. ++ */ ++DECLARE_OVERHEAD_VARS( ++ sw_handle_timer_wakeup_helper_i); // for the "timer_expire" family of probes ++DECLARE_OVERHEAD_VARS(sw_handle_irq_wakeup_i); // for IRQ wakeups ++DECLARE_OVERHEAD_VARS(sw_handle_sched_wakeup_i); // for SCHED ++DECLARE_OVERHEAD_VARS(sw_tps_i); // for TPS ++DECLARE_OVERHEAD_VARS(sw_tpf_i); // for TPF ++DECLARE_OVERHEAD_VARS(sw_process_fork_exit_helper_i); ++#if IS_ENABLED(CONFIG_ANDROID) ++DECLARE_OVERHEAD_VARS(sw_handle_wakelock_i); // for wake lock/unlock ++#endif // CONFIG_ANDROID ++DECLARE_OVERHEAD_VARS(sw_handle_workqueue_wakeup_helper_i); ++DECLARE_OVERHEAD_VARS(sw_handle_sched_switch_helper_i); ++/* ++ * Per-cpu wakeup counters. ++ * Used to decide which wakeup event is the first to occur after a ++ * core wakes up from a C-state. ++ * Set to 'true' in TPS probe ++ */ ++static DEFINE_PER_CPU(bool, sw_is_valid_wakeup_event) = { true }; ++/* ++ * Per-cpu counts of the number of times the local APIC fired. ++ * We need a separate count because some apic timer fires don't seem ++ * to result in hrtimer/timer expires ++ */ ++static DEFINE_PER_CPU(u64, sw_num_local_apic_timer_inters); ++/* ++ * Flag value to use to decide if the event is a valid wakeup event. ++ * Set to 'false' in TPS probe. ++ */ ++static bool sw_wakeup_event_flag = true; ++/* ++ * Scheduler-based polling emulation. ++ */ ++static DEFINE_PER_CPU(unsigned long, sw_pcpu_polling_jiff); ++pw_u16_t sw_min_polling_interval_msecs; ++ ++/* ++ * IDs for supported tracepoints. ++ */ ++enum sw_trace_id { ++ SW_TRACE_ID_CPU_IDLE, ++ SW_TRACE_ID_CPU_FREQUENCY, ++ SW_TRACE_ID_IRQ_HANDLER_ENTRY, ++ SW_TRACE_ID_TIMER_EXPIRE_ENTRY, ++ SW_TRACE_ID_HRTIMER_EXPIRE_ENTRY, ++ SW_TRACE_ID_SCHED_WAKEUP, ++ SW_TRACE_ID_IPI, ++ SW_TRACE_ID_SCHED_PROCESS_FORK, ++ SW_TRACE_ID_SCHED_PROCESS_EXIT, ++ SW_TRACE_ID_THERMAL_APIC_ENTRY, ++ SW_TRACE_ID_THERMAL_APIC_EXIT, ++ SW_TRACE_ID_WAKE_LOCK, ++ SW_TRACE_ID_WAKE_UNLOCK, ++ SW_TRACE_ID_WORKQUEUE_EXECUTE_START, ++ SW_TRACE_ID_SCHED_SWITCH, ++}; ++/* ++ * IDs for supported notifiers. ++ */ ++enum sw_notifier_id { ++ SW_NOTIFIER_ID_SUSPEND, // TODO: change name? ++ SW_NOTIFIER_ID_SUSPEND_ENTER, ++ SW_NOTIFIER_ID_SUSPEND_EXIT, ++ SW_NOTIFIER_ID_HIBERNATE, ++ SW_NOTIFIER_ID_HIBERNATE_ENTER, ++ SW_NOTIFIER_ID_HIBERNATE_EXIT, ++ SW_NOTIFIER_ID_COUNTER_RESET, ++ SW_NOTIFIER_ID_CPUFREQ, ++ SW_NOTIFIER_ID_HOTCPU, ++}; ++/* ++ * Names for supported tracepoints. A tracepoint ++ * 'name' consists of two strings: a "kernel" string ++ * that is used to locate the tracepoint within the kernel ++ * and an "abstract" string, that is used by Ring-3 to ++ * specify which tracepoints to use during a collection. ++ */ ++static const struct sw_trace_notifier_name s_trace_names[] = { ++ [SW_TRACE_ID_CPU_IDLE] = { "cpu_idle", "CPU-IDLE" }, ++ [SW_TRACE_ID_CPU_FREQUENCY] = { "cpu_frequency", "CPU-FREQUENCY" }, ++ [SW_TRACE_ID_IRQ_HANDLER_ENTRY] = { "irq_handler_entry", "IRQ-ENTRY" }, ++ [SW_TRACE_ID_TIMER_EXPIRE_ENTRY] = { "timer_expire_entry", ++ "TIMER-ENTRY" }, ++ [SW_TRACE_ID_HRTIMER_EXPIRE_ENTRY] = { "hrtimer_expire_entry", ++ "HRTIMER-ENTRY" }, ++ [SW_TRACE_ID_SCHED_WAKEUP] = { "sched_wakeup", "SCHED-WAKEUP" }, ++ [SW_TRACE_ID_IPI] = { NULL, "IPI" }, ++ [SW_TRACE_ID_SCHED_PROCESS_FORK] = { "sched_process_fork", ++ "PROCESS-FORK" }, ++ [SW_TRACE_ID_SCHED_PROCESS_EXIT] = { "sched_process_exit", ++ "PROCESS-EXIT" }, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) ++ [SW_TRACE_ID_THERMAL_APIC_ENTRY] = { "thermal_apic_entry", ++ "THERMAL-THROTTLE-ENTRY" }, ++ [SW_TRACE_ID_THERMAL_APIC_EXIT] = { "thermal_apic_exit", ++ "THERMAL-THROTTLE-EXIT" }, ++#endif // LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) ++#if IS_ENABLED(CONFIG_ANDROID) ++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0) ++ [SW_TRACE_ID_WAKE_LOCK] = { "wake_lock", "WAKE-LOCK" }, ++ [SW_TRACE_ID_WAKE_UNLOCK] = { "wake_unlock", "WAKE-UNLOCK" }, ++#else // LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0) ++ [SW_TRACE_ID_WAKE_LOCK] = { "wakeup_source_activate", "WAKE-LOCK" }, ++ [SW_TRACE_ID_WAKE_UNLOCK] = { "wakeup_source_deactivate", ++ "WAKE-UNLOCK" }, ++#endif ++#endif ++ [SW_TRACE_ID_WORKQUEUE_EXECUTE_START] = { "workqueue_execute_start", ++ "WORKQUEUE-START" }, ++ [SW_TRACE_ID_SCHED_SWITCH] = { "sched_switch", "CONTEXT-SWITCH" }, ++}; ++ ++/* ++ * Names for supported notifiers. A notifier ++ * 'name' consists of two strings: an unused "kernel" string ++ * and an "abstract" string, that is used by Ring-3 to ++ * specify which notifiers to use during a collection. ++ */ ++static const struct sw_trace_notifier_name s_notifier_names[] = { ++ [SW_NOTIFIER_ID_SUSPEND] = { "suspend_notifier" /* don't care */, ++ "SUSPEND-NOTIFIER" }, ++ [SW_NOTIFIER_ID_SUSPEND_ENTER] = { NULL, "SUSPEND-ENTER" }, ++ [SW_NOTIFIER_ID_SUSPEND_EXIT] = { NULL, "SUSPEND-EXIT" }, ++ [SW_NOTIFIER_ID_HIBERNATE] = { "hibernate_notifier" /* don't care */, ++ "HIBERNATE-NOTIFIER" }, ++ [SW_NOTIFIER_ID_HIBERNATE_ENTER] = { NULL, "HIBERNATE-ENTER" }, ++ [SW_NOTIFIER_ID_HIBERNATE_EXIT] = { NULL, "HIBERNATE-EXIT" }, ++ [SW_NOTIFIER_ID_COUNTER_RESET] = { NULL, "COUNTER-RESET" }, ++ [SW_NOTIFIER_ID_CPUFREQ] = { "cpufreq_notifier" /* don't care */, ++ "CPUFREQ-NOTIFIER" }, ++ [SW_NOTIFIER_ID_HOTCPU] = { "hotcpu_notifier" /* don't care */, ++ "HOTCPU-NOTIFIER" }, ++}; ++ ++#ifdef CONFIG_TRACEPOINTS ++/* ++ * A list of supported tracepoints. ++ */ ++static struct sw_trace_notifier_data s_trace_collector_lists[] = { ++ { SW_TRACE_COLLECTOR_TRACEPOINT, &s_trace_names[SW_TRACE_ID_CPU_IDLE], ++ &sw_register_trace_cpu_idle_i, &sw_unregister_trace_cpu_idle_i, ++ NULL }, ++ { SW_TRACE_COLLECTOR_TRACEPOINT, ++ &s_trace_names[SW_TRACE_ID_CPU_FREQUENCY], ++ &sw_register_trace_cpu_frequency_i, ++ &sw_unregister_trace_cpu_frequency_i, NULL }, ++ { SW_TRACE_COLLECTOR_TRACEPOINT, ++ &s_trace_names[SW_TRACE_ID_IRQ_HANDLER_ENTRY], ++ &sw_register_trace_irq_handler_entry_i, ++ &sw_unregister_trace_irq_handler_entry_i, NULL }, ++ { SW_TRACE_COLLECTOR_TRACEPOINT, ++ &s_trace_names[SW_TRACE_ID_TIMER_EXPIRE_ENTRY], ++ &sw_register_trace_timer_expire_entry_i, ++ &sw_unregister_trace_timer_expire_entry_i, NULL }, ++ { SW_TRACE_COLLECTOR_TRACEPOINT, ++ &s_trace_names[SW_TRACE_ID_HRTIMER_EXPIRE_ENTRY], ++ &sw_register_trace_hrtimer_expire_entry_i, ++ &sw_unregister_trace_hrtimer_expire_entry_i, NULL }, ++ { SW_TRACE_COLLECTOR_TRACEPOINT, ++ &s_trace_names[SW_TRACE_ID_SCHED_WAKEUP], ++ &sw_register_trace_sched_wakeup_i, ++ &sw_unregister_trace_sched_wakeup_i, NULL }, ++ /* Placeholder for IPI -- no tracepoints associated with it! */ ++ { SW_TRACE_COLLECTOR_TRACEPOINT, &s_trace_names[SW_TRACE_ID_IPI], NULL, ++ NULL, NULL }, ++ { SW_TRACE_COLLECTOR_TRACEPOINT, ++ &s_trace_names[SW_TRACE_ID_SCHED_PROCESS_FORK], ++ &sw_register_trace_sched_process_fork_i, ++ &sw_unregister_trace_sched_process_fork_i, NULL }, ++ { SW_TRACE_COLLECTOR_TRACEPOINT, ++ &s_trace_names[SW_TRACE_ID_SCHED_PROCESS_EXIT], ++ &sw_register_trace_sched_process_exit_i, ++ &sw_unregister_trace_sched_process_exit_i, NULL }, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) ++ /* ++ * For thermal throttling. ++ * We probably only need one of either 'entry' or 'exit'. Use ++ * both, until we decide which one to keep. Note that ++ * tracepoint IDs for these, and subsequent tracepoints ++ * (e.g. 'wake_lock') will change once we've picked which ++ * one to use. ++ */ ++ { SW_TRACE_COLLECTOR_TRACEPOINT, ++ &s_trace_names[SW_TRACE_ID_THERMAL_APIC_ENTRY], ++ &sw_register_trace_thermal_apic_entry_i, ++ &sw_unregister_trace_thermal_apic_entry_i, NULL }, ++ { SW_TRACE_COLLECTOR_TRACEPOINT, ++ &s_trace_names[SW_TRACE_ID_THERMAL_APIC_EXIT], ++ &sw_register_trace_thermal_apic_exit_i, ++ &sw_unregister_trace_thermal_apic_exit_i, NULL }, ++#endif // LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) ++/* Wakelocks have multiple tracepoints, depending on kernel version */ ++#if IS_ENABLED(CONFIG_ANDROID) ++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0) ++ { SW_TRACE_COLLECTOR_TRACEPOINT, &s_trace_names[SW_TRACE_ID_WAKE_LOCK], ++ &sw_register_trace_wake_lock_i, &sw_unregister_trace_wake_lock_i, ++ NULL }, ++ { SW_TRACE_COLLECTOR_TRACEPOINT, ++ &s_trace_names[SW_TRACE_ID_WAKE_UNLOCK], ++ &sw_register_trace_wake_unlock_i, &sw_unregister_trace_wake_unlock_i, ++ NULL }, ++#else // LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0) ++ { SW_TRACE_COLLECTOR_TRACEPOINT, &s_trace_names[SW_TRACE_ID_WAKE_LOCK], ++ &sw_register_trace_wakeup_source_activate_i, ++ &sw_unregister_trace_wakeup_source_activate_i, NULL }, ++ { SW_TRACE_COLLECTOR_TRACEPOINT, ++ &s_trace_names[SW_TRACE_ID_WAKE_UNLOCK], ++ &sw_register_trace_wakeup_source_deactivate_i, ++ &sw_unregister_trace_wakeup_source_deactivate_i, NULL }, ++#endif // LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) ++#endif // CONFIG_ANDROID ++ { SW_TRACE_COLLECTOR_TRACEPOINT, ++ &s_trace_names[SW_TRACE_ID_WORKQUEUE_EXECUTE_START], ++ &sw_register_trace_workqueue_execution_i, ++ &sw_unregister_trace_workqueue_execution_i, NULL }, ++ { SW_TRACE_COLLECTOR_TRACEPOINT, ++ &s_trace_names[SW_TRACE_ID_SCHED_SWITCH], ++ &sw_register_trace_sched_switch_i, ++ &sw_unregister_trace_sched_switch_i, NULL }, ++}; ++/* ++ * List of supported notifiers. ++ */ ++static struct sw_trace_notifier_data s_notifier_collector_lists[] = { ++ { SW_TRACE_COLLECTOR_NOTIFIER, ++ &s_notifier_names[SW_NOTIFIER_ID_SUSPEND], &sw_register_pm_notifier_i, ++ &sw_unregister_pm_notifier_i, NULL, true /* always register */ }, ++ /* Placeholder for suspend enter/exit -- these will be called ++ from within the pm notifier */ ++ { SW_TRACE_COLLECTOR_NOTIFIER, ++ &s_notifier_names[SW_NOTIFIER_ID_SUSPEND_ENTER], NULL, NULL, NULL }, ++ { SW_TRACE_COLLECTOR_NOTIFIER, ++ &s_notifier_names[SW_NOTIFIER_ID_SUSPEND_EXIT], NULL, NULL, NULL }, ++ /* Placeholder for hibernate enter/exit -- these will be called ++ from within the pm notifier */ ++ { SW_TRACE_COLLECTOR_NOTIFIER, ++ &s_notifier_names[SW_NOTIFIER_ID_HIBERNATE], NULL, NULL, NULL }, ++ { SW_TRACE_COLLECTOR_NOTIFIER, ++ &s_notifier_names[SW_NOTIFIER_ID_HIBERNATE_ENTER], NULL, NULL, NULL }, ++ { SW_TRACE_COLLECTOR_NOTIFIER, ++ &s_notifier_names[SW_NOTIFIER_ID_HIBERNATE_EXIT], NULL, NULL, NULL }, ++ { SW_TRACE_COLLECTOR_NOTIFIER, ++ &s_notifier_names[SW_NOTIFIER_ID_COUNTER_RESET], NULL, NULL, NULL }, ++ { SW_TRACE_COLLECTOR_NOTIFIER, ++ &s_notifier_names[SW_NOTIFIER_ID_CPUFREQ], ++ &sw_register_cpufreq_notifier_i, &sw_unregister_cpufreq_notifier_i }, ++}; ++/* ++ * Special entry for CPU notifier (i.e. "hotplug" notifier) ++ * We don't want these to be visible to the user. ++ */ ++static struct sw_trace_notifier_data s_hotplug_notifier_data = { ++ SW_TRACE_COLLECTOR_NOTIFIER, ++ &s_notifier_names[SW_NOTIFIER_ID_HOTCPU], ++ &sw_register_hotcpu_notifier_i, ++ &sw_unregister_hotcpu_notifier_i, ++ NULL, ++ true /* always register */ ++}; ++ ++#else // !CONFIG_TRACEPOINTS ++/* ++ * A list of supported tracepoints. ++ */ ++static struct sw_trace_notifier_data s_trace_collector_lists[] = { ++ /* EMPTY */}; ++/* ++ * List of supported notifiers. ++ */ ++static struct sw_trace_notifier_data s_notifier_collector_lists[] = { ++ /* EMPTY */ }; ++ ++#endif // CONFIG_TRACEPOINTS ++ ++/* ++ * Macros to retrieve tracepoint and notifier IDs. ++ */ ++#define GET_TRACE_ID_FROM_NODE(node) ((node)->name - s_trace_names) ++#define GET_NOTIFIER_ID_FROM_NODE(node) ((node)->name - s_notifier_names) ++ ++#define GET_TRACE_NOTIFIER_ID(node) \ ++ (int)(((node)->type == SW_TRACE_COLLECTOR_TRACEPOINT) ? \ ++ GET_TRACE_ID_FROM_NODE(node) : \ ++ GET_NOTIFIER_ID_FROM_NODE(node)) ++ ++/* ------------------------------------------------- ++ * Function definitions. ++ * ------------------------------------------------- ++ */ ++/* ++ * Retrieve a TSC value ++ */ ++static inline u64 sw_tscval(void) ++{ ++ unsigned int low, high; ++ ++ asm volatile("rdtsc" : "=a"(low), "=d"(high)); ++ return low | ((unsigned long long)high) << 32; ++}; ++u64 sw_timestamp(void) ++{ ++ struct timespec ts; ++ ++ getnstimeofday(&ts); ++ return (ts.tv_sec * 1000000000ULL + ts.tv_nsec); ++} ++/* ++ * Basically the same as arch/x86/kernel/irq.c --> "arch_irq_stat_cpu(cpu)" ++ */ ++u64 sw_my_local_arch_irq_stats_cpu_i(void) ++{ ++ u64 sum = 0; ++ irq_cpustat_t *stats; ++#ifdef __arm__ ++ int i = 0; ++#endif ++ BEGIN_LOCAL_IRQ_STATS_READ(stats); ++ { ++#ifndef __arm__ ++ sum += stats->__nmi_count; ++ // #ifdef CONFIG_X86_LOCAL_APIC ++ sum += stats->apic_timer_irqs; ++// #endif ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 34) ++ sum += stats->x86_platform_ipis; ++#endif // 2,6,34 ++ sum += stats->apic_perf_irqs; ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0) ++ sum += stats->apic_irq_work_irqs; ++#endif // 3,5,0 ++#ifdef CONFIG_SMP ++ sum += stats->irq_call_count; ++ sum += stats->irq_resched_count; ++ sum += stats->irq_tlb_count; ++#endif ++#ifdef CONFIG_X86_THERMAL_VECTOR ++ sum += stats->irq_thermal_count; ++#endif ++ sum += stats->irq_spurious_count; // should NEVER be non-zero!!! ++#else ++ sum += stats->__softirq_pending; ++#ifdef CONFIG_SMP ++ for (i = 0; i < NR_IPI; ++i) { ++ sum += stats->ipi_irqs[i]; ++ } ++#endif ++#ifdef CONFIG_X86_MCE ++ sum += stats->mce_exception_count; ++ sum += stats->mce_poll_count; ++#endif ++#endif ++ } ++ END_LOCAL_IRQ_STATS_READ(stats); ++ return sum; ++}; ++ ++/* ++ * Generic tracepoint/notifier handling function. ++ */ ++void sw_handle_trace_notifier_i(struct sw_trace_notifier_data *node) ++{ ++ struct sw_collector_data *curr = NULL; ++ ++ if (!node) { ++ return; ++ } ++ list_for_each_entry(curr, &node->list, list) { ++ pw_pr_debug("DEBUG: handling message\n"); ++ sw_handle_per_cpu_msg(curr); ++ } ++}; ++/* ++ * Generic tracepoint/notifier handling function. ++ */ ++void sw_handle_trace_notifier_on_cpu_i(int cpu, ++ struct sw_trace_notifier_data *node) ++{ ++ struct sw_collector_data *curr = NULL; ++ ++ if (!node) { ++ return; ++ } ++ list_for_each_entry(curr, &node->list, list) { ++ sw_handle_per_cpu_msg_on_cpu(cpu, curr); ++ } ++}; ++void sw_handle_reset_messages_i(struct sw_trace_notifier_data *node) ++{ ++ struct sw_collector_data *curr = NULL; ++ ++ if (!node) { ++ return; ++ } ++ list_for_each_entry(curr, &node->list, list) { ++ pw_pr_debug("Handling message of unknown cpumask on cpu %d\n", ++ RAW_CPU()); ++ sw_schedule_work(&curr->cpumask, &sw_handle_per_cpu_msg, curr); ++ } ++} ++/* ++ * Tracepoint helpers. ++ */ ++/* ++ * IRQ wakeup handling function. ++ */ ++static void sw_handle_irq_wakeup_i(struct sw_collector_data *node, int irq) ++{ ++ int cpu = RAW_CPU(); ++ sw_driver_msg_t *msg = GET_MSG_SLOT_FOR_CPU(node->msg, cpu, ++ node->per_msg_payload_size); ++ // char *dst_vals = (char *)(unsigned long)msg->p_payload; ++ char *dst_vals = msg->p_payload; ++ ++ // msg->tsc = sw_timestamp(); // msg TSC assigned when msg is written to buffer ++ msg->cpuidx = cpu; ++ ++ /* ++ * IRQ handling ==> only return the irq number ++ */ ++ *((int *)dst_vals) = irq; ++ ++ if (sw_produce_generic_msg(msg, SW_WAKEUP_ACTION_DIRECT)) { ++ pw_pr_warn("WARNING: could NOT produce message!\n"); ++ } ++}; ++/* ++ * TIMER wakeup handling function. ++ */ ++static void sw_handle_timer_wakeup_i(struct sw_collector_data *node, pid_t pid, ++ pid_t tid) ++{ ++ int cpu = RAW_CPU(); ++ sw_driver_msg_t *msg = GET_MSG_SLOT_FOR_CPU(node->msg, cpu, ++ node->per_msg_payload_size); ++ // char *dst_vals = (char *)(unsigned long)msg->p_payload; ++ char *dst_vals = msg->p_payload; ++ ++ // msg->tsc = sw_timestamp(); // msg TSC assigned when msg is written to buffer ++ msg->cpuidx = cpu; ++ ++ /* ++ * TIMER handling ==> only return the pid, tid ++ */ ++ *((int *)dst_vals) = pid; ++ dst_vals += sizeof(pid); ++ *((int *)dst_vals) = tid; ++ ++ if (sw_produce_generic_msg(msg, SW_WAKEUP_ACTION_DIRECT)) { ++ pw_pr_warn("WARNING: could NOT produce message!\n"); ++ } ++ pw_pr_debug("HANDLED timer expire for %d, %d\n", pid, tid); ++}; ++/* ++ * Helper function for {hr}timer expires. Required for overhead tracking. ++ */ ++void sw_handle_timer_wakeup_helper_i(struct sw_collector_data *curr, ++ struct sw_trace_notifier_data *node, ++ pid_t tid) ++{ ++ pid_t pid = -1; ++ ++ if (tid == 0) { ++ pid = 0; ++ } else { ++ struct task_struct *task = ++ pid_task(find_pid_ns(tid, &init_pid_ns), PIDTYPE_PID); ++ if (likely(task)) { ++ pid = task->tgid; ++ } ++ } ++ list_for_each_entry(curr, &node->list, list) { ++ sw_handle_timer_wakeup_i(curr, pid, tid); ++ } ++}; ++/* ++ * SCHED wakeup handling function. ++ */ ++void sw_handle_sched_wakeup_i(struct sw_collector_data *node, int source_cpu, ++ int target_cpu) ++{ ++ int cpu = source_cpu; ++ sw_driver_msg_t *msg = GET_MSG_SLOT_FOR_CPU(node->msg, cpu, ++ node->per_msg_payload_size); ++ // char *dst_vals = (char *)(unsigned long)msg->p_payload; ++ char *dst_vals = msg->p_payload; ++ ++ // msg->tsc = sw_timestamp(); // msg TSC assigned when msg is written to buffer ++ msg->cpuidx = source_cpu; ++ ++ /* ++ * sched handling ==> only return the source, target CPUs ++ */ ++ *((int *)dst_vals) = source_cpu; ++ dst_vals += sizeof(source_cpu); ++ *((int *)dst_vals) = target_cpu; ++ ++ if (sw_produce_generic_msg(msg, SW_WAKEUP_ACTION_NONE)) { ++ pw_pr_warn("WARNING: could NOT produce message!\n"); ++ } ++}; ++/* ++ * APIC timer wakeup ++ */ ++void sw_handle_apic_timer_wakeup_i(struct sw_collector_data *node) ++{ ++ /* ++ * Send an empty message back to Ring-3 ++ */ ++ int cpu = RAW_CPU(); ++ sw_driver_msg_t *msg = GET_MSG_SLOT_FOR_CPU(node->msg, cpu, ++ node->per_msg_payload_size); ++ // char *dst_vals = (char *)(unsigned long)msg->p_payload; ++ ++ // msg->tsc = sw_timestamp(); // msg TSC assigned when msg is written to buffer ++ msg->cpuidx = cpu; ++ ++ if (sw_produce_generic_msg(msg, SW_WAKEUP_ACTION_DIRECT)) { ++ pw_pr_warn("WARNING: could NOT produce message!\n"); ++ } ++ pw_pr_debug("HANDLED APIC timer wakeup for cpu = %d\n", cpu); ++}; ++/* ++ * Helper function for workqueue executions. Required for overhead tracking. ++ */ ++void sw_handle_workqueue_wakeup_helper_i(int cpu, ++ struct sw_collector_data *node) ++{ ++ sw_driver_msg_t *msg = GET_MSG_SLOT_FOR_CPU(node->msg, cpu, ++ node->per_msg_payload_size); ++ ++ // msg->tsc = sw_timestamp(); // msg TSC assigned when msg is written to buffer ++ msg->cpuidx = cpu; ++ ++ /* ++ * Workqueue wakeup ==> empty message. ++ */ ++ if (sw_produce_generic_msg(msg, SW_WAKEUP_ACTION_DIRECT)) { ++ pw_pr_error("WARNING: could NOT produce message!\n"); ++ } ++}; ++/* ++ * Helper function for sched_switch. Required for overhead tracking. ++ */ ++void sw_handle_sched_switch_helper_i(void) ++{ ++ static struct sw_trace_notifier_data *node; ++ ++ if (unlikely(node == NULL)) { ++ node = GET_COLLECTOR_TRACE_NODE(SW_TRACE_ID_SCHED_SWITCH); ++ pw_pr_debug("SCHED SWITCH NODE = %p\n", node); ++ } ++ if (!node) { ++ return; ++ } ++ preempt_disable(); ++ { ++ struct sw_collector_data *curr; ++ ++ list_for_each_entry(curr, &node->list, list) { ++ unsigned long curr_jiff = jiffies, ++ prev_jiff = curr->last_update_jiffies; ++ unsigned long delta_msecs = ++ jiffies_to_msecs(curr_jiff) - ++ jiffies_to_msecs(prev_jiff); ++ struct cpumask *mask = &curr->cpumask; ++ u16 timeout = curr->info->sampling_interval_msec; ++ ++ if (!timeout) { ++ timeout = sw_min_polling_interval_msecs; ++ } ++ /* Has there been enough time since the last ++ collection point? */ ++ if (delta_msecs < timeout) { ++ continue; ++ } ++ /* Update timestamp and handle message */ ++ if (cpumask_test_cpu( ++ RAW_CPU(), ++ mask) /* This msg must be handled on ++ the current CPU */ ++ || ++ cpumask_empty( ++ mask) /* This msg may be handled by ++ any CPU */) { ++ if (!CAS64(&curr->last_update_jiffies, ++ prev_jiff, curr_jiff)) { ++ /* ++ * CAS failure should only be possible ++ * for messages that can be handled ++ * on any CPU, in which case it ++ * indicates a different CPU already ++ * handled this message. ++ */ ++ continue; ++ } ++ sw_handle_per_cpu_msg_no_sched(curr); ++ } ++ } ++ } ++ preempt_enable(); ++}; ++ ++/* ++ * Probe functions. ++ */ ++/* ++ * 1. TPS ++ */ ++/* ++ * Check IPI wakeups within the cpu_idle tracepoint. ++ */ ++void sw_tps_apic_i(int cpu) ++{ ++ static struct sw_trace_notifier_data *apic_timer_node; ++ ++ if (unlikely(apic_timer_node == NULL)) { ++ apic_timer_node = GET_COLLECTOR_TRACE_NODE(SW_TRACE_ID_IPI); ++ pw_pr_debug("apic NODE = %p\n", apic_timer_node); ++ } ++ if (apic_timer_node) { ++ bool local_apic_timer_fired = false; ++ u64 curr_num_local_apic = sw_my_local_arch_irq_stats_cpu_i(); ++ u64 *old_num_local_apic = ++ &__get_cpu_var(sw_num_local_apic_timer_inters); ++ ++ if (*old_num_local_apic && ++ (*old_num_local_apic != curr_num_local_apic)) { ++ local_apic_timer_fired = true; ++ } ++ *old_num_local_apic = curr_num_local_apic; ++ ++ if (local_apic_timer_fired && ++ SHOULD_PRODUCE_WAKEUP_SAMPLE(cpu)) { ++ struct sw_collector_data *curr = NULL; ++ list_for_each_entry(curr, &apic_timer_node->list, ++ list) { ++ sw_handle_apic_timer_wakeup_i(curr); ++ } ++ } ++ } ++}; ++/* ++ * Perform any user-defined tasks within the ++ * cpu_idle tracepoint. ++ */ ++void sw_tps_tps_i(int cpu) ++{ ++ static struct sw_trace_notifier_data *tps_node; ++ ++ if (unlikely(tps_node == NULL)) { ++ tps_node = GET_COLLECTOR_TRACE_NODE(SW_TRACE_ID_CPU_IDLE); ++ pw_pr_debug("TPS NODE = %p\n", tps_node); ++ } ++ sw_handle_trace_notifier_i(tps_node); ++}; ++/* ++ * Perform any wakeup-related tasks within the ++ * cpu_idle tracepoint. ++ */ ++void sw_tps_wakeup_i(int cpu) ++{ ++ /* ++ * For now, assume we will always have to ++ * do some wakeup book keeping. Later, we'll ++ * need to detect if the user requested wakeups. ++ */ ++ sw_wakeup_event_flag = false; ++ RESET_VALID_WAKEUP_EVENT_COUNTER(cpu); ++}; ++void sw_tps_i(void) ++{ ++ /* ++ * Update: FIRST handle IPI wakeups ++ * THEN handle TPS ++ */ ++ int cpu = RAW_CPU(); ++ ++ sw_tps_apic_i(cpu); ++ sw_tps_tps_i(cpu); ++ sw_tps_wakeup_i(cpu); ++}; ++ ++DEFINE_PROBE_FUNCTION(PROBE_TPS_PARAMS) ++{ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38) ++ if (state == PWR_EVENT_EXIT) { ++ return; ++ } ++#endif ++ DO_PER_CPU_OVERHEAD_FUNC(sw_tps_i); ++}; ++ ++/* ++ * 2. TPF ++ */ ++/* ++ * Helper function for overhead measurements. ++ */ ++void sw_tpf_i(int cpu, struct sw_trace_notifier_data *node) ++{ ++ sw_handle_trace_notifier_on_cpu_i((int)cpu, node); ++}; ++ ++DEFINE_PROBE_FUNCTION(PROBE_TPF_PARAMS) ++{ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38) ++ int cpu = RAW_CPU(); ++#endif // version < 2.6.38 ++ static struct sw_trace_notifier_data *node; ++ ++ if (unlikely(node == NULL)) { ++ node = GET_COLLECTOR_TRACE_NODE(SW_TRACE_ID_CPU_FREQUENCY); ++ pw_pr_debug("NODE = %p\n", node); ++ } ++ DO_PER_CPU_OVERHEAD_FUNC(sw_tpf_i, (int)cpu, node); ++}; ++ ++/* ++ * 3. IRQ handler entry ++ */ ++DEFINE_PROBE_FUNCTION(PROBE_IRQ_PARAMS) ++{ ++ int cpu = RAW_CPU(); ++ static struct sw_trace_notifier_data *node; ++ ++ struct sw_collector_data *curr = NULL; ++ ++ if (unlikely(node == NULL)) { ++ node = GET_COLLECTOR_TRACE_NODE(SW_TRACE_ID_IRQ_HANDLER_ENTRY); ++ pw_pr_debug("NODE = %p\n", node); ++ } ++ if (!node || !SHOULD_PRODUCE_WAKEUP_SAMPLE(cpu)) { ++ return; ++ } ++ list_for_each_entry(curr, &node->list, list) { ++ DO_PER_CPU_OVERHEAD_FUNC(sw_handle_irq_wakeup_i, curr, irq); ++ } ++}; ++/* ++ * 4. TIMER expire ++ */ ++DEFINE_PROBE_FUNCTION(PROBE_TIMER_ARGS) ++{ ++ int cpu = RAW_CPU(); ++ static struct sw_trace_notifier_data *node; ++ ++ struct sw_collector_data *curr = NULL; ++ pid_t tid = GET_TIMER_THREAD_ID(t); ++ ++ if (unlikely(node == NULL)) { ++ node = GET_COLLECTOR_TRACE_NODE(SW_TRACE_ID_TIMER_EXPIRE_ENTRY); ++ pw_pr_debug("NODE = %p\n", node); ++ } ++ ++ if (!node || !SHOULD_PRODUCE_WAKEUP_SAMPLE(cpu)) { ++ return; ++ } ++ DO_PER_CPU_OVERHEAD_FUNC(sw_handle_timer_wakeup_helper_i, curr, node, ++ tid); ++}; ++/* ++ * 5. HRTIMER expire ++ */ ++DEFINE_PROBE_FUNCTION(PROBE_HRTIMER_PARAMS) ++{ ++ int cpu = RAW_CPU(); ++ static struct sw_trace_notifier_data *node; ++ struct sw_collector_data *curr = NULL; ++ pid_t tid = GET_TIMER_THREAD_ID(hrt); ++ ++ if (unlikely(node == NULL)) { ++ node = GET_COLLECTOR_TRACE_NODE( ++ SW_TRACE_ID_HRTIMER_EXPIRE_ENTRY); ++ pw_pr_debug("NODE = %p\n", node); ++ } ++ ++ if (!node || !SHOULD_PRODUCE_WAKEUP_SAMPLE(cpu)) { ++ return; ++ } ++ DO_PER_CPU_OVERHEAD_FUNC(sw_handle_timer_wakeup_helper_i, curr, node, ++ tid); ++}; ++/* ++ * 6. SCHED wakeup ++ */ ++DEFINE_PROBE_FUNCTION(PROBE_SCHED_WAKEUP_PARAMS) ++{ ++ static struct sw_trace_notifier_data *node; ++ struct sw_collector_data *curr = NULL; ++ int target_cpu = task_cpu(task), source_cpu = RAW_CPU(); ++ /* ++ * "Self-sched" samples are "don't care". ++ */ ++ if (target_cpu == source_cpu) { ++ return; ++ } ++ if (unlikely(node == NULL)) { ++ node = GET_COLLECTOR_TRACE_NODE(SW_TRACE_ID_SCHED_WAKEUP); ++ pw_pr_debug("NODE = %p\n", node); ++ } ++ /* ++ * Unlike other wakeup sources, we check the per-cpu flag ++ * of the TARGET cpu to decide if we should produce a sample. ++ */ ++ if (!node || !SHOULD_PRODUCE_WAKEUP_SAMPLE(target_cpu)) { ++ return; ++ } ++ list_for_each_entry(curr, &node->list, list) { ++ // sw_handle_sched_wakeup_i(curr, source_cpu, target_cpu); ++ DO_PER_CPU_OVERHEAD_FUNC(sw_handle_sched_wakeup_i, curr, ++ source_cpu, target_cpu); ++ } ++}; ++/* ++ * 8. PROCESS fork ++ */ ++/* ++ * Helper for PROCESS fork, PROCESS exit ++ */ ++void sw_process_fork_exit_helper_i(struct sw_collector_data *node, ++ struct task_struct *task, bool is_fork) ++{ ++ int cpu = RAW_CPU(); ++ pid_t pid = task->tgid, tid = task->pid; ++ const char *name = task->comm; ++ sw_driver_msg_t *msg = GET_MSG_SLOT_FOR_CPU(node->msg, cpu, ++ node->per_msg_payload_size); ++ char *dst_vals = msg->p_payload; ++ ++ msg->cpuidx = cpu; ++ ++ /* ++ * Fork/Exit ==> return pid, tid ++ * Fork ==> also return name ++ */ ++ *((int *)dst_vals) = pid; ++ dst_vals += sizeof(pid); ++ *((int *)dst_vals) = tid; ++ dst_vals += sizeof(tid); ++ if (is_fork) { ++ memcpy(dst_vals, name, SW_MAX_PROC_NAME_SIZE); ++ } ++ ++ if (sw_produce_generic_msg(msg, SW_WAKEUP_ACTION_DIRECT)) { ++ pw_pr_warn("WARNING: could NOT produce message!\n"); ++ } ++ pw_pr_debug( ++ "HANDLED process %s event for task: pid = %d, tid = %d, name = %s\n", ++ is_fork ? "FORK" : "EXIT", pid, tid, name); ++}; ++ ++DEFINE_PROBE_FUNCTION(PROBE_PROCESS_FORK_PARAMS) ++{ ++ static struct sw_trace_notifier_data *node; ++ struct sw_collector_data *curr = NULL; ++ ++ if (unlikely(node == NULL)) { ++ node = GET_COLLECTOR_TRACE_NODE(SW_TRACE_ID_SCHED_PROCESS_FORK); ++ pw_pr_debug("NODE = %p\n", node); ++ } ++ if (!node) { ++ return; ++ } ++ list_for_each_entry(curr, &node->list, list) { ++ DO_PER_CPU_OVERHEAD_FUNC(sw_process_fork_exit_helper_i, curr, ++ child, true /* true ==> fork */); ++ } ++}; ++/* ++ * 9. PROCESS exit ++ */ ++DEFINE_PROBE_FUNCTION(PROBE_SCHED_PROCESS_EXIT_PARAMS) ++{ ++ static struct sw_trace_notifier_data *node; ++ struct sw_collector_data *curr = NULL; ++ ++ if (unlikely(node == NULL)) { ++ node = GET_COLLECTOR_TRACE_NODE(SW_TRACE_ID_SCHED_PROCESS_EXIT); ++ pw_pr_debug("NODE = %p\n", node); ++ } ++ if (!node) { ++ return; ++ } ++ list_for_each_entry(curr, &node->list, list) { ++ DO_PER_CPU_OVERHEAD_FUNC(sw_process_fork_exit_helper_i, curr, ++ task, false /* false ==> exit */); ++ } ++}; ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) ++/* ++ * 10. THERMAL_APIC entry ++ */ ++DEFINE_PROBE_FUNCTION(PROBE_THERMAL_APIC_ENTRY_PARAMS) ++{ ++ int cpu = RAW_CPU(); ++ static struct sw_trace_notifier_data *node; ++ ++ if (unlikely(node == NULL)) { ++ node = GET_COLLECTOR_TRACE_NODE(SW_TRACE_ID_THERMAL_APIC_ENTRY); ++ pw_pr_debug("NODE = %p\n", node); ++ } ++ DO_PER_CPU_OVERHEAD_FUNC(sw_tpf_i, (int)cpu, node); ++}; ++/* ++ * 10. THERMAL_APIC exit ++ */ ++DEFINE_PROBE_FUNCTION(PROBE_THERMAL_APIC_EXIT_PARAMS) ++{ ++ int cpu = RAW_CPU(); ++ static struct sw_trace_notifier_data *node; ++ ++ if (unlikely(node == NULL)) { ++ node = GET_COLLECTOR_TRACE_NODE(SW_TRACE_ID_THERMAL_APIC_EXIT); ++ pw_pr_debug("NODE = %p\n", node); ++ } ++ DO_PER_CPU_OVERHEAD_FUNC(sw_tpf_i, (int)cpu, node); ++}; ++#endif // LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) ++ ++#if IS_ENABLED(CONFIG_ANDROID) ++/* ++ * 11. WAKE lock / WAKEUP source activate. ++ */ ++/* ++ * Helper function to produce wake lock/unlock messages. ++ */ ++void sw_produce_wakelock_msg_i(int cpu, struct sw_collector_data *node, ++ const char *name, int type, u64 timeout, int pid, ++ int tid, const char *proc_name) ++{ ++ sw_driver_msg_t *msg = GET_MSG_SLOT_FOR_CPU(node->msg, cpu, ++ node->per_msg_payload_size); ++ char *dst_vals = msg->p_payload; ++ ++ msg->cpuidx = cpu; ++ ++ /* ++ * Protocol: ++ * wakelock_timeout, wakelock_type, wakelock_name, ++ * proc_pid, proc_tid, proc_name ++ */ ++ *((u64 *)dst_vals) = timeout; ++ dst_vals += sizeof(timeout); ++ *((int *)dst_vals) = type; ++ dst_vals += sizeof(type); ++ strncpy(dst_vals, name, SW_MAX_KERNEL_WAKELOCK_NAME_SIZE); ++ dst_vals += SW_MAX_KERNEL_WAKELOCK_NAME_SIZE; ++ ++ *((int *)dst_vals) = pid; ++ dst_vals += sizeof(pid); ++ *((int *)dst_vals) = tid; ++ dst_vals += sizeof(tid); ++ strncpy(dst_vals, proc_name, SW_MAX_PROC_NAME_SIZE); ++ dst_vals += SW_MAX_PROC_NAME_SIZE; ++ ++ if (sw_produce_generic_msg(msg, SW_WAKEUP_ACTION_DIRECT)) { ++ pw_pr_warn("WARNING: could NOT produce message!\n"); ++ } ++}; ++/* ++ * Helper function to handle wake lock/unlock callbacks. ++ */ ++void sw_handle_wakelock_i(int cpu, struct sw_trace_notifier_data *node, ++ const char *name, int type, u64 timeout) ++{ ++ int pid = PID(), tid = TID(); ++ const char *proc_name = NAME(); ++ struct sw_collector_data *curr = NULL; ++ ++ if (!node) { ++ return; ++ } ++ ++ list_for_each_entry(curr, &node->list, list) { ++ sw_produce_wakelock_msg_i(cpu, curr, name, type, timeout, pid, ++ tid, proc_name); ++ } ++}; ++DEFINE_PROBE_FUNCTION(PROBE_WAKE_LOCK_PARAMS) ++{ ++ int cpu = RAW_CPU(); ++ static struct sw_trace_notifier_data *node; ++ enum sw_kernel_wakelock_type type = SW_WAKE_LOCK; ++ u64 timeout = 0; ++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0) ++ const char *name = lock->name; ++#endif ++ ++ if (unlikely(node == NULL)) { ++ node = GET_COLLECTOR_TRACE_NODE(SW_TRACE_ID_WAKE_LOCK); ++ pw_pr_debug("NODE = %p\n", node); ++ } ++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0) ++ /* ++ * Was this wakelock acquired with a timeout i.e. ++ * is this an auto expire wakelock? ++ */ ++ if (lock->flags & (1U << 10)) { ++ type = SW_WAKE_LOCK_TIMEOUT; ++ timeout = jiffies_to_msecs(lock->expires - jiffies); ++ } ++#endif //LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) ++ DO_PER_CPU_OVERHEAD_FUNC(sw_handle_wakelock_i, cpu, node, name, ++ (int)type, timeout); ++}; ++/* ++ * 11. WAKE unlock / WAKEUP source deactivate. ++ */ ++DEFINE_PROBE_FUNCTION(PROBE_WAKE_UNLOCK_PARAMS) ++{ ++ int cpu = RAW_CPU(); ++ static struct sw_trace_notifier_data *node; ++ enum sw_kernel_wakelock_type type = SW_WAKE_UNLOCK; ++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0) ++ const char *name = lock->name; ++#endif ++ ++ if (unlikely(node == NULL)) { ++ node = GET_COLLECTOR_TRACE_NODE(SW_TRACE_ID_WAKE_UNLOCK); ++ pw_pr_debug("NODE = %p\n", node); ++ } ++ DO_PER_CPU_OVERHEAD_FUNC(sw_handle_wakelock_i, cpu, node, name, ++ (int)type, 0 /*timeout*/); ++}; ++#endif // CONFIG_ANDROID ++ ++/* ++ * 12. WORKQUEUE ++ */ ++DEFINE_PROBE_FUNCTION(PROBE_WORKQUEUE_PARAMS) ++{ ++ int cpu = RAW_CPU(); ++ static struct sw_trace_notifier_data *node; ++ struct sw_collector_data *curr = NULL; ++ ++ if (unlikely(node == NULL)) { ++ node = GET_COLLECTOR_TRACE_NODE( ++ SW_TRACE_ID_WORKQUEUE_EXECUTE_START); ++ pw_pr_debug("NODE = %p\n", node); ++ } ++ ++ if (!node || !SHOULD_PRODUCE_WAKEUP_SAMPLE(cpu)) { ++ return; ++ } ++ list_for_each_entry(curr, &node->list, list) { ++ DO_PER_CPU_OVERHEAD_FUNC(sw_handle_workqueue_wakeup_helper_i, ++ cpu, curr); ++ } ++}; ++ ++/* ++ * 13. SCHED switch ++ */ ++DEFINE_PROBE_FUNCTION(PROBE_SCHED_SWITCH_PARAMS) ++{ ++ DO_PER_CPU_OVERHEAD_FUNC(sw_handle_sched_switch_helper_i); ++}; ++ ++/* ++ * 1. SUSPEND notifier ++ */ ++static void sw_send_pm_notification_i(int value) ++{ ++ struct sw_driver_msg *msg = NULL; ++ size_t buffer_len = sizeof(*msg) + sizeof(value); ++ char *buffer = vmalloc(buffer_len); ++ ++ if (!buffer) { ++ pw_pr_error( ++ "couldn't allocate memory when sending suspend notification!\n"); ++ return; ++ } ++ msg = (struct sw_driver_msg *)buffer; ++ msg->tsc = sw_timestamp(); ++ msg->cpuidx = RAW_CPU(); ++ msg->plugin_id = 0; // "0" indicates a system message ++ msg->metric_id = 1; // "1" indicates a suspend/resume message (TODO) ++ msg->msg_id = ++ 0; /* don't care; TODO: use the 'msg_id' to encode the 'value'? */ ++ msg->payload_len = sizeof(value); ++ msg->p_payload = buffer + sizeof(*msg); ++ *((int *)msg->p_payload) = value; ++ if (sw_produce_generic_msg(msg, SW_WAKEUP_ACTION_DIRECT)) { ++ pw_pr_error("couldn't produce generic message!\n"); ++ } ++ vfree(buffer); ++} ++ ++static u64 sw_pm_enter_tsc; ++static bool sw_is_reset_i(void) ++{ ++ /* ++ * TODO: rely on checking the IA32_FIXED_CTR2 instead? ++ */ ++ u64 curr_tsc = sw_tscval(); ++ bool is_reset = sw_pm_enter_tsc > curr_tsc; ++ ++ pw_pr_force("DEBUG: curr tsc = %llu, prev tsc = %llu, is reset = %s\n", ++ curr_tsc, sw_pm_enter_tsc, is_reset ? "true" : "false"); ++ ++ return is_reset; ++} ++static void sw_probe_pm_helper_i(int id, int both_id, bool is_enter, ++ enum sw_pm_action action, enum sw_pm_mode mode) ++{ ++ struct sw_trace_notifier_data *node = GET_COLLECTOR_NOTIFIER_NODE(id); ++ struct sw_trace_notifier_data *both_node = ++ GET_COLLECTOR_NOTIFIER_NODE(both_id); ++ struct sw_trace_notifier_data *reset_node = ++ GET_COLLECTOR_NOTIFIER_NODE(SW_NOTIFIER_ID_COUNTER_RESET); ++ if (is_enter) { ++ /* ++ * Entering HIBERNATION/SUSPEND ++ */ ++ sw_pm_enter_tsc = sw_tscval(); ++ } else { ++ /* ++ * Exitting HIBERNATION/SUSPEND ++ */ ++ if (sw_is_reset_i() && reset_node) { ++ sw_handle_reset_messages_i(reset_node); ++ } ++ } ++ if (node) { ++ sw_handle_trace_notifier_i(node); ++ } ++ if (both_node) { ++ sw_handle_trace_notifier_i(both_node); ++ } ++ /* Send the suspend-resume notification */ ++ sw_send_pm_notification_i(SW_PM_VALUE(mode, action)); ++} ++ ++static bool sw_is_suspend_via_firmware(void) ++{ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) ++ /* 'pm_suspend_via_firmware' only available in kernel >= 4.4 */ ++ return pm_suspend_via_firmware(); ++#endif ++ return true; ++} ++ ++static int sw_probe_pm_notifier_i(struct notifier_block *block, unsigned long state, ++ void *dummy) ++{ ++ static const struct { ++ enum sw_pm_action action; ++ int node_id; ++ int both_id; ++ bool is_enter; ++ } pm_data[PM_POST_RESTORE] = { ++ [PM_HIBERNATION_PREPARE] = { SW_PM_ACTION_HIBERNATE_ENTER, ++ SW_NOTIFIER_ID_HIBERNATE_ENTER, ++ SW_NOTIFIER_ID_HIBERNATE, true }, ++ [PM_POST_HIBERNATION] = { SW_PM_ACTION_HIBERNATE_EXIT, ++ SW_NOTIFIER_ID_HIBERNATE_EXIT, ++ SW_NOTIFIER_ID_HIBERNATE, false }, ++ [PM_SUSPEND_PREPARE] = { SW_PM_ACTION_SUSPEND_ENTER, ++ SW_NOTIFIER_ID_SUSPEND_ENTER, ++ SW_NOTIFIER_ID_SUSPEND, true }, ++ [PM_POST_SUSPEND] = { SW_PM_ACTION_SUSPEND_EXIT, ++ SW_NOTIFIER_ID_SUSPEND_EXIT, ++ SW_NOTIFIER_ID_SUSPEND, false }, ++ }; ++ enum sw_pm_action action = pm_data[state].action; ++ enum sw_pm_mode mode = sw_is_suspend_via_firmware() ? ++ SW_PM_MODE_FIRMWARE : ++ SW_PM_MODE_NONE; ++ if (action != SW_PM_ACTION_NONE) { ++ int node_id = pm_data[state].node_id, ++ both_id = pm_data[state].both_id; ++ bool is_enter = pm_data[state].is_enter; ++ ++ sw_probe_pm_helper_i(node_id, both_id, is_enter, action, mode); ++ } else { ++ /* Not supported */ ++ pw_pr_error( ++ "ERROR: unknown state %lu passed to SWA pm notifier!\n", ++ state); ++ } ++ return NOTIFY_DONE; ++} ++ ++static void sw_store_topology_change_i(enum cpu_action type, int cpu, int core_id, ++ int pkg_id) ++{ ++ struct sw_topology_node *node = sw_kmalloc(sizeof(*node), GFP_ATOMIC); ++ ++ if (!node) { ++ pw_pr_error( ++ "couldn't allocate a node for topology change tracking!\n"); ++ return; ++ } ++ node->change.timestamp = sw_timestamp(); ++ node->change.type = type; ++ node->change.cpu = cpu; ++ node->change.core = core_id; ++ node->change.pkg = pkg_id; ++ ++ SW_LIST_ADD(&sw_topology_list, node, list); ++ ++sw_num_topology_entries; ++} ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) ++int sw_probe_hotplug_notifier_i(struct notifier_block *block, ++ unsigned long action, void *pcpu) ++{ ++ unsigned int cpu = (unsigned long)pcpu; ++ unsigned int pkg_id = topology_physical_package_id(cpu); ++ unsigned int core_id = topology_core_id(cpu); ++ ++ switch (action) { ++ case CPU_UP_PREPARE: ++ case CPU_UP_PREPARE_FROZEN: ++ /* CPU is coming online -- store top change */ ++ sw_store_topology_change_i(SW_CPU_ACTION_ONLINE_PREPARE, cpu, ++ core_id, pkg_id); ++ pw_pr_debug( ++ "DEBUG: SoC Watch has cpu %d (phys = %d, core = %d) preparing to come online at tsc = %llu! Current cpu = %d\n", ++ cpu, pkg_id, core_id, sw_timestamp(), RAW_CPU()); ++ break; ++ case CPU_ONLINE: ++ case CPU_ONLINE_FROZEN: ++ /* CPU is online -- first store top change ++ then take BEGIN snapshot */ ++ sw_store_topology_change_i(SW_CPU_ACTION_ONLINE, cpu, core_id, ++ pkg_id); ++ sw_process_snapshot_on_cpu(SW_WHEN_TYPE_BEGIN, cpu); ++ pw_pr_debug( ++ "DEBUG: SoC Watch has cpu %d (phys = %d, core = %d) online at tsc = %llu! Current cpu = %d\n", ++ cpu, pkg_id, core_id, sw_timestamp(), RAW_CPU()); ++ break; ++ case CPU_DOWN_PREPARE: ++ case CPU_DOWN_PREPARE_FROZEN: ++ /* CPU is going offline -- take END snapshot */ ++ sw_process_snapshot_on_cpu(SW_WHEN_TYPE_END, cpu); ++ pw_pr_debug( ++ "DEBUG: SoC Watch has cpu %d preparing to go offline at tsc = %llu! Current cpu = %d\n", ++ cpu, sw_timestamp(), RAW_CPU()); ++ break; ++ case CPU_DEAD: ++ case CPU_DEAD_FROZEN: ++ /* CPU is offline -- store top change */ ++ sw_store_topology_change_i(SW_CPU_ACTION_OFFLINE, cpu, core_id, ++ pkg_id); ++ pw_pr_debug( ++ "DEBUG: SoC Watch has cpu %d offlined at tsc = %llu! Current cpu = %d\n", ++ cpu, sw_timestamp(), RAW_CPU()); ++ break; ++ default: ++ break; ++ } ++ return NOTIFY_OK; ++}; ++#else ++static void sw_probe_cpuhp_helper_i(unsigned int cpu, enum cpu_action action) ++{ ++ unsigned int pkg_id = topology_physical_package_id(cpu); ++ unsigned int core_id = topology_core_id(cpu); ++ ++ switch (action) { ++ case SW_CPU_ACTION_ONLINE_PREPARE: ++ /* CPU is coming online -- store top change */ ++ sw_store_topology_change_i(action, cpu, core_id, pkg_id); ++ break; ++ case SW_CPU_ACTION_ONLINE: ++ /* CPU is online -- first store top change ++ then take BEGIN snapshot */ ++ sw_store_topology_change_i(action, cpu, core_id, pkg_id); ++ sw_process_snapshot_on_cpu(SW_WHEN_TYPE_BEGIN, cpu); ++ break; ++ case SW_CPU_ACTION_OFFLINE: ++ /* CPU is preparing to go offline -- take ++ END snapshot then store top change */ ++ sw_process_snapshot_on_cpu(SW_WHEN_TYPE_END, cpu); ++ sw_store_topology_change_i(action, cpu, core_id, pkg_id); ++ break; ++ default: ++ break; ++ } ++} ++static int sw_probe_cpu_offline_i(unsigned int cpu) ++{ ++ printk(KERN_INFO "DEBUG: offline notification for cpu %u at %llu\n", ++ cpu, sw_tscval()); ++ sw_probe_cpuhp_helper_i(cpu, SW_CPU_ACTION_OFFLINE); ++ return 0; ++} ++static int sw_probe_cpu_online_i(unsigned int cpu) ++{ ++ printk(KERN_INFO "DEBUG: online notification for cpu %u at %llu\n", cpu, ++ sw_tscval()); ++ sw_probe_cpuhp_helper_i(cpu, SW_CPU_ACTION_ONLINE_PREPARE); ++ sw_probe_cpuhp_helper_i(cpu, SW_CPU_ACTION_ONLINE); ++ return 0; ++} ++#endif // LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0) ++ ++/* ++ * 2. CPUFREQ notifier ++ */ ++static int sw_probe_cpufreq_notifier_i(struct notifier_block *block, ++ unsigned long state, void *data) ++{ ++ struct cpufreq_freqs *freqs = data; ++ static struct sw_trace_notifier_data *node; ++ int cpu = freqs->cpu; ++ ++ if (state == CPUFREQ_PRECHANGE) { ++ pw_pr_debug( ++ "CPU %d reports a CPUFREQ_PRECHANGE for target CPU %d at TSC = %llu\n", ++ RAW_CPU(), cpu, sw_timestamp()); ++ if (unlikely(node == NULL)) { ++ node = GET_COLLECTOR_NOTIFIER_NODE( ++ SW_NOTIFIER_ID_CPUFREQ); ++ pw_pr_debug("NODE = %p\n", node); ++ } ++ /* Force an atomic context by disabling preemption */ ++ get_cpu(); ++ DO_PER_CPU_OVERHEAD_FUNC(sw_tpf_i, cpu, node); ++ put_cpu(); ++ } ++ return NOTIFY_DONE; ++} ++/* ++ * 1. TPS. ++ */ ++int sw_register_trace_cpu_idle_i(struct sw_trace_notifier_data *node) ++{ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38) ++ DO_REGISTER_SW_TRACEPOINT_PROBE(node, power_start, ++ sw_probe_power_start_i); ++#else // kernel version >= 2.6.38 ++ DO_REGISTER_SW_TRACEPOINT_PROBE(node, cpu_idle, sw_probe_cpu_idle_i); ++#endif // LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ++ return PW_SUCCESS; ++}; ++int sw_unregister_trace_cpu_idle_i(struct sw_trace_notifier_data *node) ++{ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38) ++ DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, power_start, ++ sw_probe_power_start_i); ++#else // kernel version >= 2.6.38 ++ DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, cpu_idle, sw_probe_cpu_idle_i); ++#endif // LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ++ return PW_SUCCESS; ++}; ++/* ++ * 2. TPF ++ */ ++int sw_register_trace_cpu_frequency_i(struct sw_trace_notifier_data *node) ++{ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38) ++ DO_REGISTER_SW_TRACEPOINT_PROBE(node, power_frequency, ++ sw_probe_power_frequency_i); ++#else // kernel version >= 2.6.38 ++ DO_REGISTER_SW_TRACEPOINT_PROBE(node, cpu_frequency, ++ sw_probe_cpu_frequency_i); ++#endif // LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ++ return PW_SUCCESS; ++}; ++int sw_unregister_trace_cpu_frequency_i(struct sw_trace_notifier_data *node) ++{ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38) ++ DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, power_frequency, ++ sw_probe_power_frequency_i); ++#else // kernel version >= 2.6.38 ++ DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, cpu_frequency, ++ sw_probe_cpu_frequency_i); ++#endif // LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ++ return PW_SUCCESS; ++}; ++/* ++ * 3. IRQ handler entry ++ */ ++int sw_register_trace_irq_handler_entry_i(struct sw_trace_notifier_data *node) ++{ ++ DO_REGISTER_SW_TRACEPOINT_PROBE(node, irq_handler_entry, ++ sw_probe_irq_handler_entry_i); ++ return PW_SUCCESS; ++}; ++int sw_unregister_trace_irq_handler_entry_i(struct sw_trace_notifier_data *node) ++{ ++ DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, irq_handler_entry, ++ sw_probe_irq_handler_entry_i); ++ return PW_SUCCESS; ++}; ++/* ++ * 4. TIMER expire. ++ */ ++int sw_register_trace_timer_expire_entry_i(struct sw_trace_notifier_data *node) ++{ ++ DO_REGISTER_SW_TRACEPOINT_PROBE(node, timer_expire_entry, ++ sw_probe_timer_expire_entry_i); ++ return PW_SUCCESS; ++}; ++int sw_unregister_trace_timer_expire_entry_i(struct sw_trace_notifier_data *node) ++{ ++ DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, timer_expire_entry, ++ sw_probe_timer_expire_entry_i); ++ return PW_SUCCESS; ++}; ++/* ++ * 5. HRTIMER expire. ++ */ ++int sw_register_trace_hrtimer_expire_entry_i(struct sw_trace_notifier_data *node) ++{ ++ DO_REGISTER_SW_TRACEPOINT_PROBE(node, hrtimer_expire_entry, ++ sw_probe_hrtimer_expire_entry_i); ++ return PW_SUCCESS; ++}; ++int sw_unregister_trace_hrtimer_expire_entry_i( ++ struct sw_trace_notifier_data *node) ++{ ++ DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, hrtimer_expire_entry, ++ sw_probe_hrtimer_expire_entry_i); ++ return PW_SUCCESS; ++}; ++/* ++ * 6. SCHED wakeup ++ */ ++int sw_register_trace_sched_wakeup_i(struct sw_trace_notifier_data *node) ++{ ++ DO_REGISTER_SW_TRACEPOINT_PROBE(node, sched_wakeup, ++ sw_probe_sched_wakeup_i); ++ return PW_SUCCESS; ++}; ++int sw_unregister_trace_sched_wakeup_i(struct sw_trace_notifier_data *node) ++{ ++ DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, sched_wakeup, ++ sw_probe_sched_wakeup_i); ++ return PW_SUCCESS; ++}; ++/* ++ * 8. PROCESS fork ++ */ ++int sw_register_trace_sched_process_fork_i(struct sw_trace_notifier_data *node) ++{ ++ DO_REGISTER_SW_TRACEPOINT_PROBE(node, sched_process_fork, ++ sw_probe_sched_process_fork_i); ++ return PW_SUCCESS; ++}; ++int sw_unregister_trace_sched_process_fork_i(struct sw_trace_notifier_data *node) ++{ ++ DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, sched_process_fork, ++ sw_probe_sched_process_fork_i); ++ return PW_SUCCESS; ++}; ++/* ++ * 9. PROCESS exit ++ */ ++int sw_register_trace_sched_process_exit_i(struct sw_trace_notifier_data *node) ++{ ++ DO_REGISTER_SW_TRACEPOINT_PROBE(node, sched_process_exit, ++ sw_probe_sched_process_exit_i); ++ return PW_SUCCESS; ++}; ++int sw_unregister_trace_sched_process_exit_i(struct sw_trace_notifier_data *node) ++{ ++ DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, sched_process_exit, ++ sw_probe_sched_process_exit_i); ++ return PW_SUCCESS; ++}; ++/* ++ * 10. THERMAL_APIC entry ++ */ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) ++int sw_register_trace_thermal_apic_entry_i(struct sw_trace_notifier_data *node) ++{ ++ DO_REGISTER_SW_TRACEPOINT_PROBE(node, thermal_apic_entry, ++ sw_probe_thermal_apic_entry_i); ++ return PW_SUCCESS; ++}; ++int sw_unregister_trace_thermal_apic_entry_i(struct sw_trace_notifier_data *node) ++{ ++ DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, thermal_apic_entry, ++ sw_probe_thermal_apic_entry_i); ++ return PW_SUCCESS; ++}; ++/* ++ * 10. THERMAL_APIC exit ++ */ ++int sw_register_trace_thermal_apic_exit_i(struct sw_trace_notifier_data *node) ++{ ++ DO_REGISTER_SW_TRACEPOINT_PROBE(node, thermal_apic_exit, ++ sw_probe_thermal_apic_exit_i); ++ return PW_SUCCESS; ++}; ++int sw_unregister_trace_thermal_apic_exit_i(struct sw_trace_notifier_data *node) ++{ ++ DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, thermal_apic_exit, ++ sw_probe_thermal_apic_exit_i); ++ return PW_SUCCESS; ++}; ++#endif // LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) ++/* ++ * 11. WAKE lock / WAKEUP source activate. ++ */ ++#if IS_ENABLED(CONFIG_ANDROID) ++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0) ++int sw_register_trace_wake_lock_i(struct sw_trace_notifier_data *node) ++{ ++ DO_REGISTER_SW_TRACEPOINT_PROBE(node, wake_lock, sw_probe_wake_lock_i); ++ return PW_SUCCESS; ++}; ++int sw_unregister_trace_wake_lock_i(struct sw_trace_notifier_data *node) ++{ ++ DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, wake_lock, ++ sw_probe_wake_lock_i); ++ return PW_SUCCESS; ++}; ++#else // LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0) ++int sw_register_trace_wakeup_source_activate_i( ++ struct sw_trace_notifier_data *node) ++{ ++ DO_REGISTER_SW_TRACEPOINT_PROBE(node, wakeup_source_activate, ++ sw_probe_wakeup_source_activate_i); ++ return PW_SUCCESS; ++}; ++int sw_unregister_trace_wakeup_source_activate_i( ++ struct sw_trace_notifier_data *node) ++{ ++ DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, wakeup_source_activate, ++ sw_probe_wakeup_source_activate_i); ++ return PW_SUCCESS; ++}; ++#endif // LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) ++/* ++ * 11. WAKE unlock / WAKEUP source deactivate. ++ */ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0) ++int sw_register_trace_wake_unlock_i(struct sw_trace_notifier_data *node) ++{ ++ DO_REGISTER_SW_TRACEPOINT_PROBE(node, wake_unlock, ++ sw_probe_wake_unlock_i); ++ return PW_SUCCESS; ++}; ++int sw_unregister_trace_wake_unlock_i(struct sw_trace_notifier_data *node) ++{ ++ DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, wake_unlock, ++ sw_probe_wake_unlock_i); ++ return PW_SUCCESS; ++}; ++#else // LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0) ++int sw_register_trace_wakeup_source_deactivate_i( ++ struct sw_trace_notifier_data *node) ++{ ++ DO_REGISTER_SW_TRACEPOINT_PROBE(node, wakeup_source_deactivate, ++ sw_probe_wakeup_source_deactivate_i); ++ return PW_SUCCESS; ++}; ++int sw_unregister_trace_wakeup_source_deactivate_i( ++ struct sw_trace_notifier_data *node) ++{ ++ DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, wakeup_source_deactivate, ++ sw_probe_wakeup_source_deactivate_i); ++ return PW_SUCCESS; ++}; ++#endif // LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) ++#endif // CONFIG_ANDROID ++/* ++ * 12. WORKQUEUE execution. ++ */ ++int sw_register_trace_workqueue_execution_i(struct sw_trace_notifier_data *node) ++{ ++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35) ++ DO_REGISTER_SW_TRACEPOINT_PROBE(node, workqueue_execution, ++ sw_probe_workqueue_execution_i); ++#else ++ DO_REGISTER_SW_TRACEPOINT_PROBE(node, workqueue_execute_start, ++ sw_probe_workqueue_execute_start_i); ++#endif ++ return PW_SUCCESS; ++}; ++int sw_unregister_trace_workqueue_execution_i( ++ struct sw_trace_notifier_data *node) ++{ ++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35) ++ DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, workqueue_execution, ++ sw_probe_workqueue_execution_i); ++#else ++ DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, workqueue_execute_start, ++ sw_probe_workqueue_execute_start_i); ++#endif ++ return PW_SUCCESS; ++}; ++/* ++ * 13. SCHED switch ++ */ ++int sw_register_trace_sched_switch_i(struct sw_trace_notifier_data *node) ++{ ++ /* ++ * Set polling tick time, in jiffies. ++ * Used by the context switch tracepoint to decide ++ * if enough time has elapsed since the last ++ * collection point to read resources again. ++ */ ++ { ++ int cpu = 0; ++ for_each_present_cpu(cpu) { ++ *(&per_cpu(sw_pcpu_polling_jiff, cpu)) = jiffies; ++ } ++ } ++ DO_REGISTER_SW_TRACEPOINT_PROBE(node, sched_switch, ++ sw_probe_sched_switch_i); ++ return PW_SUCCESS; ++}; ++int sw_unregister_trace_sched_switch_i(struct sw_trace_notifier_data *node) ++{ ++ DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, sched_switch, ++ sw_probe_sched_switch_i); ++ return PW_SUCCESS; ++}; ++/* ++ * Notifier register/unregister functions. ++ */ ++/* ++ * 1. SUSPEND notifier. ++ */ ++static struct notifier_block sw_pm_notifier = { ++ .notifier_call = &sw_probe_pm_notifier_i, ++}; ++int sw_register_pm_notifier_i(struct sw_trace_notifier_data *node) ++{ ++ register_pm_notifier(&sw_pm_notifier); ++ return PW_SUCCESS; ++}; ++int sw_unregister_pm_notifier_i(struct sw_trace_notifier_data *node) ++{ ++ unregister_pm_notifier(&sw_pm_notifier); ++ return PW_SUCCESS; ++}; ++/* ++ * 2. CPUFREQ notifier. ++ */ ++static struct notifier_block sw_cpufreq_notifier = { ++ .notifier_call = &sw_probe_cpufreq_notifier_i, ++}; ++int sw_register_cpufreq_notifier_i(struct sw_trace_notifier_data *node) ++{ ++ cpufreq_register_notifier(&sw_cpufreq_notifier, ++ CPUFREQ_TRANSITION_NOTIFIER); ++ return PW_SUCCESS; ++}; ++int sw_unregister_cpufreq_notifier_i(struct sw_trace_notifier_data *node) ++{ ++ cpufreq_unregister_notifier(&sw_cpufreq_notifier, ++ CPUFREQ_TRANSITION_NOTIFIER); ++ return PW_SUCCESS; ++}; ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) ++/* ++ * 3. CPU hot plug notifier. ++ */ ++struct notifier_block sw_cpu_hotplug_notifier = { ++ .notifier_call = &sw_probe_hotplug_notifier_i, ++}; ++ ++int sw_register_hotcpu_notifier_i(struct sw_trace_notifier_data *node) ++{ ++ register_hotcpu_notifier(&sw_cpu_hotplug_notifier); ++ return PW_SUCCESS; ++}; ++int sw_unregister_hotcpu_notifier_i(struct sw_trace_notifier_data *node) ++{ ++ unregister_hotcpu_notifier(&sw_cpu_hotplug_notifier); ++ return PW_SUCCESS; ++}; ++#else // LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0) ++static int sw_cpuhp_state = -1; ++int sw_register_hotcpu_notifier_i(struct sw_trace_notifier_data *node) ++{ ++ sw_cpuhp_state = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, ++ "socwatch:online", ++ &sw_probe_cpu_online_i, ++ &sw_probe_cpu_offline_i); ++ if (sw_cpuhp_state < 0) { ++ pw_pr_error("couldn't register socwatch hotplug callbacks!\n"); ++ return -EIO; ++ } ++ return 0; ++}; ++int sw_unregister_hotcpu_notifier_i(struct sw_trace_notifier_data *node) ++{ ++ if (sw_cpuhp_state >= 0) { ++ cpuhp_remove_state_nocalls((enum cpuhp_state)sw_cpuhp_state); ++ } ++ return 0; ++}; ++#endif // LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0) ++ ++/* ++ * Tracepoint extraction routines. ++ * Required for newer kernels (>=3.15) ++ */ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) ++static void sw_extract_tracepoint_callback(struct tracepoint *tp, void *priv) ++{ ++ struct sw_trace_notifier_data *node = NULL; ++ int i = 0; ++ int *numStructsFound = (int *)priv; ++ ++ if (*numStructsFound == NUM_VALID_TRACEPOINTS) { ++ /* ++ * We've found all the tracepoints we need. ++ */ ++ return; ++ } ++ if (tp) { ++ FOR_EACH_TRACEPOINT_NODE(i, node) ++ { ++ if (node->tp == NULL && node->name) { ++ const char *name = ++ sw_get_trace_notifier_kernel_name(node); ++ if (name && !strcmp(tp->name, name)) { ++ node->tp = tp; ++ ++*numStructsFound; ++ pw_pr_debug("OK, found TP %s\n", ++ tp->name); ++ } ++ } ++ } ++ } ++}; ++#endif // LINUX_VERSION_CODE >= KERNEL_VERSION(3,15,0) ++ ++/* ++ * Retrieve the list of tracepoint structs to use when registering and unregistering ++ * tracepoint handlers. ++ */ ++int sw_extract_trace_notifier_providers(void) ++{ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) && \ ++ defined(CONFIG_TRACEPOINTS) ++ int numCallbacks = 0; ++ ++ for_each_kernel_tracepoint(&sw_extract_tracepoint_callback, ++ &numCallbacks); ++ /* ++ * Did we get the complete list? ++ */ ++ if (numCallbacks != NUM_VALID_TRACEPOINTS) { ++ printk(KERN_WARNING ++ "WARNING: Could NOT find tracepoint structs for some tracepoints!\n"); ++ } ++#endif // LINUX_VERSION_CODE >= KERNEL_VERSION(3,15,0) ++ return PW_SUCCESS; ++}; ++ ++void sw_reset_trace_notifier_providers(void) ++{ ++ /* ++ * Reset the wakeup flag. Not strictly required if we aren't probing ++ * any of the wakeup tracepoints. ++ */ ++ { ++ int cpu = 0; ++ ++ for_each_online_cpu(cpu) { ++ RESET_VALID_WAKEUP_EVENT_COUNTER(cpu); ++ } ++ } ++ /* ++ * Reset the wakeup event flag. Not strictly required if we ++ * aren't probing any of the wakeup tracepoints. Will be reset ++ * in the power_start tracepoint if user requested a c-state ++ * collection. ++ */ ++ sw_wakeup_event_flag = true; ++}; ++ ++void sw_print_trace_notifier_provider_overheads(void) ++{ ++ PRINT_CUMULATIVE_OVERHEAD_PARAMS(sw_tps_i, "TPS"); ++ PRINT_CUMULATIVE_OVERHEAD_PARAMS(sw_tpf_i, "TPF"); ++ PRINT_CUMULATIVE_OVERHEAD_PARAMS(sw_handle_irq_wakeup_i, "IRQ"); ++ PRINT_CUMULATIVE_OVERHEAD_PARAMS(sw_handle_timer_wakeup_helper_i, ++ "TIMER_EXPIRE"); ++ PRINT_CUMULATIVE_OVERHEAD_PARAMS(sw_handle_sched_wakeup_i, ++ "SCHED WAKEUP"); ++ PRINT_CUMULATIVE_OVERHEAD_PARAMS(sw_process_fork_exit_helper_i, ++ "PROCESS FORK/EXIT"); ++#if IS_ENABLED(CONFIG_ANDROID) ++ PRINT_CUMULATIVE_OVERHEAD_PARAMS(sw_handle_wakelock_i, ++ "WAKE LOCK/UNLOCK"); ++#endif // CONFIG_ANDROID ++ PRINT_CUMULATIVE_OVERHEAD_PARAMS(sw_handle_workqueue_wakeup_helper_i, ++ "WORKQUEUE"); ++ PRINT_CUMULATIVE_OVERHEAD_PARAMS(sw_handle_sched_switch_helper_i, ++ "SCHED SWITCH"); ++}; ++/* ++ * Add all trace/notifier providers. ++ */ ++int sw_add_trace_notifier_providers(void) ++{ ++ struct sw_trace_notifier_data *node = NULL; ++ int i = 0; ++ ++ FOR_EACH_TRACEPOINT_NODE(i, node) ++ { ++ if (sw_register_trace_notify_provider(node)) { ++ pw_pr_error("ERROR: couldn't add a trace provider!\n"); ++ return -EIO; ++ } ++ } ++ FOR_EACH_NOTIFIER_NODE(i, node) ++ { ++ if (sw_register_trace_notify_provider(node)) { ++ pw_pr_error( ++ "ERROR: couldn't add a notifier provider!\n"); ++ return -EIO; ++ } ++ } ++ /* ++ * Add the cpu hot plug notifier. ++ */ ++ { ++ if (sw_register_trace_notify_provider( ++ &s_hotplug_notifier_data)) { ++ pw_pr_error( ++ "ERROR: couldn't add cpu notifier provider!\n"); ++ return -EIO; ++ } ++ } ++ return PW_SUCCESS; ++} ++/* ++ * Remove previously added providers. ++ */ ++void sw_remove_trace_notifier_providers(void) ++{ /* NOP */ ++} +diff --git a/drivers/platform/x86/socwatch/sw_tracepoint_handlers.c b/drivers/platform/x86/socwatch/sw_tracepoint_handlers.c +new file mode 100644 +index 000000000000..bc335ce9a65e +--- /dev/null ++++ b/drivers/platform/x86/socwatch/sw_tracepoint_handlers.c +@@ -0,0 +1,399 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++#include "sw_structs.h" ++#include "sw_kernel_defines.h" ++#include "sw_types.h" ++#include "sw_tracepoint_handlers.h" ++#include "sw_trace_notifier_provider.h" ++#include "sw_mem.h" ++ ++/* ------------------------------------------------- ++ * Data structures and variable definitions. ++ * ------------------------------------------------- ++ */ ++struct sw_trace_list_node { ++ struct sw_trace_notifier_data *data; ++ int id; ++ ++ SW_LIST_ENTRY(list, sw_trace_list_node); ++}; ++static SW_DEFINE_LIST_HEAD(s_trace_list, sw_trace_list_node) = ++ SW_LIST_HEAD_INITIALIZER(s_trace_list); ++static SW_DEFINE_LIST_HEAD(s_notifier_list, sw_trace_list_node) = ++ SW_LIST_HEAD_INITIALIZER(s_notifier_list); ++static int s_trace_idx = -1, s_notifier_idx = -1; ++ ++SW_DEFINE_LIST_HEAD(sw_topology_list, sw_topology_node) = ++ SW_LIST_HEAD_INITIALIZER(sw_topology_list); ++size_t sw_num_topology_entries = 0; ++ ++/* ------------------------------------------------- ++ * Function definitions. ++ * ------------------------------------------------- ++ */ ++int sw_extract_tracepoints(void) ++{ ++ return sw_extract_trace_notifier_providers(); ++} ++ ++void sw_reset_trace_notifier_lists(void) ++{ ++ sw_reset_trace_notifier_providers(); ++} ++ ++void sw_print_trace_notifier_overheads(void) ++{ ++ sw_print_trace_notifier_provider_overheads(); ++} ++ ++static int sw_for_each_node_i(void *list_head, ++ int (*func)(struct sw_trace_notifier_data *node, ++ void *priv), ++ void *priv, bool return_on_error) { ++ SW_LIST_HEAD_VAR(sw_trace_list_node) * head = list_head; ++ int retval = PW_SUCCESS; ++ struct sw_trace_list_node *lnode = NULL; ++ ++ SW_LIST_FOR_EACH_ENTRY(lnode, head, list) ++ { ++ if ((*func)(lnode->data, priv)) { ++ retval = -EIO; ++ if (return_on_error) { ++ break; ++ } ++ } ++ } ++ return retval; ++} ++ ++int sw_for_each_tracepoint_node(int (*func)(struct sw_trace_notifier_data *node, ++ void *priv), ++ void *priv, bool return_on_error) { ++ if (func) { ++ return sw_for_each_node_i(&s_trace_list, func, priv, ++ return_on_error); ++ } ++ return PW_SUCCESS; ++} ++ ++int sw_for_each_notifier_node(int (*func)(struct sw_trace_notifier_data *node, ++ void *priv), ++ void *priv, bool return_on_error) { ++ if (func) { ++ return sw_for_each_node_i(&s_notifier_list, func, priv, ++ return_on_error); ++ } ++ return PW_SUCCESS; ++} ++ ++/* ++ * Retrieve the ID for the corresponding tracepoint/notifier. ++ */ ++int sw_get_trace_notifier_id(struct sw_trace_notifier_data *tnode) ++{ ++ struct sw_trace_list_node *lnode = NULL; ++ ++ SW_LIST_HEAD_VAR(sw_trace_list_node) * head = (void *)&s_trace_list; ++ if (!tnode) { ++ pw_pr_error( ++ "ERROR: cannot get ID for NULL trace/notifier data!\n"); ++ return -EIO; ++ } ++ if (!(tnode->type == SW_TRACE_COLLECTOR_TRACEPOINT || ++ tnode->type == SW_TRACE_COLLECTOR_NOTIFIER)) { ++ pw_pr_error( ++ "ERROR: cannot get ID for invalid trace/notifier data!\n"); ++ return -EIO; ++ } ++ if (!tnode->name || !tnode->name->abstract_name) { ++ pw_pr_error( ++ "ERROR: cannot get ID for trace/notifier data without valid name!\n"); ++ return -EIO; ++ } ++#ifdef LINUX_VERSION_CODE ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) && \ ++ defined(CONFIG_TRACEPOINTS) ++ if (tnode->type == SW_TRACE_COLLECTOR_TRACEPOINT && ++ tnode->name->kernel_name && !tnode->tp) { ++ /* No tracepoint structure found so no ID possible */ ++ return -EIO; ++ } ++#endif ++#endif ++ if (tnode->type == SW_TRACE_COLLECTOR_NOTIFIER) { ++ head = (void *)&s_notifier_list; ++ } ++ SW_LIST_FOR_EACH_ENTRY(lnode, head, list) ++ { ++ struct sw_trace_notifier_data *data = lnode->data; ++ ++ if (!strcmp(data->name->abstract_name, ++ tnode->name->abstract_name)) { ++ return lnode->id; ++ } ++ } ++ return -1; ++} ++/* ++ * Retrieve the "kernel" name for this tracepoint/notifier. ++ */ ++const char * ++sw_get_trace_notifier_kernel_name(struct sw_trace_notifier_data *node) ++{ ++ return node->name->kernel_name; ++}; ++/* ++ * Retrieve the "abstract" name for this tracepoint/notifier. ++ */ ++const char * ++sw_get_trace_notifier_abstract_name(struct sw_trace_notifier_data *node) ++{ ++ return node->name->abstract_name; ++}; ++ ++/* ++ * Add a single TRACE/NOTIFY provider. ++ */ ++int sw_register_trace_notify_provider(struct sw_trace_notifier_data *data) ++{ ++ struct sw_trace_list_node *lnode = NULL; ++ ++ if (!data) { ++ pw_pr_error( ++ "ERROR: cannot add NULL trace/notifier provider!\n"); ++ return -EIO; ++ } ++ if (!(data->type == SW_TRACE_COLLECTOR_TRACEPOINT || ++ data->type == SW_TRACE_COLLECTOR_NOTIFIER)) { ++ pw_pr_error("ERROR: cannot add invalid trace/notifier data!\n"); ++ return -EIO; ++ } ++ /* ++ * Kernel name is allowed to be NULL, but abstract name MUST be present! ++ */ ++ if (!data->name || !data->name->abstract_name) { ++ pw_pr_error( ++ "ERROR: cannot add trace/notifier provider without an abstract name!\n"); ++ pw_pr_error("ERROR: data->name = %p\n", data->name); ++ return -EIO; ++ } ++ lnode = sw_kmalloc(sizeof(*lnode), GFP_KERNEL); ++ if (!lnode) { ++ pw_pr_error( ++ "ERROR: couldn't allocate a list node when adding a trace/notifier provider!\n"); ++ return -ENOMEM; ++ } ++ lnode->data = data; ++ SW_LIST_ENTRY_INIT(lnode, list); ++ if (data->type == SW_TRACE_COLLECTOR_TRACEPOINT) { ++ lnode->id = ++s_trace_idx; ++ SW_LIST_ADD(&s_trace_list, lnode, list); ++ } else { ++ lnode->id = ++s_notifier_idx; ++ SW_LIST_ADD(&s_notifier_list, lnode, list); ++ } ++ return PW_SUCCESS; ++} ++/* ++ * Add all TRACE/NOTIFY providers. ++ */ ++int sw_add_trace_notify(void) ++{ ++ return sw_add_trace_notifier_providers(); ++} ++ ++static void sw_free_trace_notifier_list_i(void *list_head) ++{ ++ SW_LIST_HEAD_VAR(sw_trace_list_node) * head = list_head; ++ while (!SW_LIST_EMPTY(head)) { ++ struct sw_trace_list_node *lnode = ++ SW_LIST_GET_HEAD_ENTRY(head, sw_trace_list_node, list); ++ SW_LIST_UNLINK(lnode, list); ++ sw_kfree(lnode); ++ } ++} ++/* ++ * Remove TRACE/NOTIFY providers. ++ */ ++void sw_remove_trace_notify(void) ++{ ++ /* ++ * Free all nodes. ++ */ ++ sw_free_trace_notifier_list_i(&s_trace_list); ++ sw_free_trace_notifier_list_i(&s_notifier_list); ++ /* ++ * Call our providers to deallocate resources. ++ */ ++ sw_remove_trace_notifier_providers(); ++ /* ++ * Clear out the topology list ++ */ ++ sw_clear_topology_list(); ++} ++ ++#define REG_FLAG (void *)1 ++#define UNREG_FLAG (void *)2 ++static int sw_reg_unreg_node_i(struct sw_trace_notifier_data *node, ++ void *is_reg) ++{ ++ if (is_reg == REG_FLAG) { ++ /* ++ * Do we have anything to collect? ++ * Update: or were we asked to always register? ++ */ ++ if (SW_LIST_EMPTY(&node->list) && !node->always_register) { ++ return PW_SUCCESS; ++ } ++ /* ++ * Sanity: ensure we have a register AND an ++ * unregister function before proceeding! ++ */ ++ if (node->probe_register == NULL || ++ node->probe_unregister == NULL) { ++ pw_pr_debug( ++ "WARNING: invalid trace/notifier register/unregister function for %s\n", ++ sw_get_trace_notifier_kernel_name(node)); ++ /* ++ * Don't flag this as an error -- ++ * some socwatch trace providers don't have a ++ * register/unregister function ++ */ ++ return PW_SUCCESS; ++ } ++ if ((*node->probe_register)(node)) { ++ return -EIO; ++ } ++ node->was_registered = true; ++ return PW_SUCCESS; ++ } else if (is_reg == UNREG_FLAG) { ++ if (node->was_registered) { ++ /* ++ * No need to check for validity of probe ++ * unregister function -- 'sw_register_notifiers_i()' ++ * would already have done so! ++ */ ++ WARN_ON((*node->probe_unregister)(node)); ++ node->was_registered = false; ++ pw_pr_debug("OK, unregistered trace/notifier for %s\n", ++ sw_get_trace_notifier_kernel_name(node)); ++ } ++ return PW_SUCCESS; ++ } ++ pw_pr_error("ERROR: invalid reg/unreg flag value 0x%lx\n", ++ (unsigned long)is_reg); ++ return -EIO; ++} ++/* ++ * Register all required tracepoints and notifiers. ++ */ ++int sw_register_trace_notifiers(void) ++{ ++ /* ++ * First, the tracepoints. ++ */ ++ if (sw_for_each_tracepoint_node(&sw_reg_unreg_node_i, REG_FLAG, ++ true /* return on error */)) { ++ pw_pr_error("ERROR registering some tracepoints\n"); ++ return -EIO; ++ } ++ /* ++ * And then the notifiers. ++ */ ++ if (sw_for_each_notifier_node(&sw_reg_unreg_node_i, REG_FLAG, ++ true /* return on error */)) { ++ pw_pr_error("ERROR registering some tracepoints\n"); ++ return -EIO; ++ } ++ return PW_SUCCESS; ++}; ++/* ++ * Unregister all previously registered tracepoints and notifiers. ++ */ ++int sw_unregister_trace_notifiers(void) ++{ ++ /* ++ * First, the notifiers. ++ */ ++ if (sw_for_each_notifier_node(&sw_reg_unreg_node_i, UNREG_FLAG, ++ true /* return on error */)) { ++ pw_pr_error("ERROR registering some tracepoints\n"); ++ return -EIO; ++ } ++ /* ++ * And then the tracepoints. ++ */ ++ if (sw_for_each_tracepoint_node(&sw_reg_unreg_node_i, UNREG_FLAG, ++ true /* return on error */)) { ++ pw_pr_error("ERROR registering some tracepoints\n"); ++ return -EIO; ++ } ++ return PW_SUCCESS; ++}; ++ ++void sw_clear_topology_list(void) ++{ ++ SW_LIST_HEAD_VAR(sw_topology_node) * head = &sw_topology_list; ++ while (!SW_LIST_EMPTY(head)) { ++ struct sw_topology_node *lnode = ++ SW_LIST_GET_HEAD_ENTRY(head, sw_topology_node, list); ++ pw_pr_debug("Clearing topology node for cpu %d\n", ++ lnode->change.cpu); ++ SW_LIST_UNLINK(lnode, list); ++ sw_kfree(lnode); ++ } ++ sw_num_topology_entries = 0; ++} +diff --git a/drivers/platform/x86/socwatchhv/Kconfig b/drivers/platform/x86/socwatchhv/Kconfig +new file mode 100644 +index 000000000000..3226632de1fc +--- /dev/null ++++ b/drivers/platform/x86/socwatchhv/Kconfig +@@ -0,0 +1,6 @@ ++menuconfig INTEL_SOCWATCH_HV ++ depends on X86 && ACRN_VHM && ACRN_SHARED_BUFFER ++ tristate "SocWatch Hypervisor Driver Support" ++ default m ++ help ++ Say Y here to enable SocWatch hypervisor driver +diff --git a/drivers/platform/x86/socwatchhv/Makefile b/drivers/platform/x86/socwatchhv/Makefile +new file mode 100644 +index 000000000000..bd4b58a61f06 +--- /dev/null ++++ b/drivers/platform/x86/socwatchhv/Makefile +@@ -0,0 +1,20 @@ ++# ++# Makefile for the socwatch hv driver. ++# ++ ++DRIVER_BASE=socwatchhv ++DRIVER_MAJOR=2 ++DRIVER_MINOR=0 ++# basic name of driver ++DRIVER_NAME=${DRIVER_BASE}${DRIVER_MAJOR}_${DRIVER_MINOR} ++ ++HYPERVISOR=2 # ACRN ++ ++ccflags-y += -Idrivers/ \ ++ -Idrivers/platform/x86/socwatchhv/inc/ \ ++ -DHYPERVISOR=$(HYPERVISOR) ++ ++obj-$(CONFIG_INTEL_SOCWATCH_HV) += $(DRIVER_NAME).o ++ ++$(DRIVER_NAME)-objs := swhv_driver.o \ ++ swhv_acrn.o +diff --git a/drivers/platform/x86/socwatchhv/control.c b/drivers/platform/x86/socwatchhv/control.c +new file mode 100644 +index 000000000000..4d1c384b1fe8 +--- /dev/null ++++ b/drivers/platform/x86/socwatchhv/control.c +@@ -0,0 +1,141 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++ ++#include ++ ++#include "control.h" ++#include ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) ++#define SMP_CALL_FUNCTION(func, ctx, retry, wait) \ ++ smp_call_function((func), (ctx), (wait)) ++#define SMP_CALL_FUNCTION_SINGLE(cpuid, func, ctx, retry, wait) \ ++ smp_call_function_single((cpuid), (func), (ctx), (wait)) ++#define ON_EACH_CPU(func, ctx, retry, wait) on_each_cpu((func), (ctx), (wait)) ++#else ++#define SMP_CALL_FUNCTION(func, ctx, retry, wait) \ ++ smp_call_function((func), (ctx), (retry), (wait)) ++#define SMP_CALL_FUNCTION_SINGLE(cpuid, func, ctx, retry, wait) \ ++ smp_call_function_single((cpuid), (func), (ctx), (retry), (wait)) ++#define ON_EACH_CPU(func, ctx, retry, wait) \ ++ on_each_cpu((func), (ctx), (retry), (wait)) ++#endif ++ ++extern int num_CPUs; ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID CONTROL_Invoke_Cpu (func, ctx, arg) ++ * ++ * @brief Set up a DPC call and insert it into the queue ++ * ++ * @param IN cpu_idx - the core id to dispatch this function to ++ * IN func - function to be invoked by the specified core(s) ++ * IN ctx - pointer to the parameter block for each function ++ * invocation ++ * ++ * @return None ++ * ++ * Special Notes: ++ * ++ */ ++extern void CONTROL_Invoke_Cpu(int cpu_idx, void (*func)(pvoid), pvoid ctx) ++{ ++ SMP_CALL_FUNCTION_SINGLE(cpu_idx, func, ctx, 0, 1); ++ ++ return; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/* ++ * @fn VOID CONTROL_Invoke_Parallel_Service(func, ctx, blocking, exclude) ++ * ++ * @param func - function to be invoked by each core in the system ++ * @param ctx - pointer to the parameter block for each function invocation ++ * @param blocking - Wait for invoked function to complete ++ * @param exclude - exclude the current core from executing the code ++ * ++ * @returns None ++ * ++ * @brief Service routine to handle all kinds of parallel invoke on all CPU calls ++ * ++ * Special Notes: ++ * Invoke the function provided in parallel in either a blocking or ++ * non-blocking mode. The current core may be excluded if desired. ++ * NOTE - Do not call this function directly from source code. ++ * Use the aliases CONTROL_Invoke_Parallel(), CONTROL_Invoke_Parallel_NB(), ++ * or CONTROL_Invoke_Parallel_XS(). ++ * ++ */ ++extern void CONTROL_Invoke_Parallel_Service(void (*func)(pvoid), pvoid ctx, ++ int blocking, int exclude) ++{ ++ if (num_CPUs == 1) { ++ if (!exclude) { ++ func(ctx); ++ } ++ return; ++ } ++ if (!exclude) { ++ ON_EACH_CPU(func, ctx, 0, blocking); ++ return; ++ } ++ ++ preempt_disable(); ++ SMP_CALL_FUNCTION(func, ctx, 0, blocking); ++ preempt_enable(); ++ ++ return; ++} +diff --git a/drivers/platform/x86/socwatchhv/inc/asm_helper.h b/drivers/platform/x86/socwatchhv/inc/asm_helper.h +new file mode 100644 +index 000000000000..d09a3bbd19cb +--- /dev/null ++++ b/drivers/platform/x86/socwatchhv/inc/asm_helper.h +@@ -0,0 +1,158 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++ ++#ifndef _ASM_HELPER_H_ ++#define _ASM_HELPER_H_ ++ ++#include ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) ++ ++#include ++#include ++ ++#else ++ ++#ifdef CONFIG_AS_CFI ++ ++#define CFI_STARTPROC (.cfi_startproc) ++#define CFI_ENDPROC (.cfi_endproc) ++#define CFI_ADJUST_CFA_OFFSET (.cfi_adjust_cfa_offset) ++#define CFI_REL_OFFSET (.cfi_rel_offset) ++#define CFI_RESTORE (.cfi_restore) ++ ++#else ++ ++.macro cfi_ignore a = 0, b = 0, c = 0, d = 0.endm ++ ++#define CFI_STARTPROC cfi_ignore ++#define CFI_ENDPROC cfi_ignore ++#define CFI_ADJUST_CFA_OFFSET cfi_ignore ++#define CFI_REL_OFFSET cfi_ignore ++#define CFI_RESTORE cfi_ignore ++#endif ++ ++#ifdef CONFIG_X86_64 ++.macro SAVE_C_REGS_HELPER ++ offset = 0 rax = 1 rcx = 1 r8910 = 1 r11 = 1.if \r11 movq % r11, ++ 6 * 8 +\offset(% rsp) CFI_REL_OFFSET r11, \offset.endif.if \r8910 movq ++ % r10, ++ 7 * 8 +\offset(% rsp) CFI_REL_OFFSET r10, \offset movq % r9, ++ 8 * 8 +\offset(% rsp) CFI_REL_OFFSET r9, \offset movq % r8, ++ 9 * 8 +\offset(% rsp) CFI_REL_OFFSET r8, \offset.endif.if \rax movq ++ % rax, ++ 10 * 8 +\offset(% rsp) CFI_REL_OFFSET rax, \offset.endif.if \rcx movq ++ % rcx, ++ 11 * 8 +\offset(% rsp) CFI_REL_OFFSET rcx, \offset.endif movq % rdx, ++ 12 * 8 +\offset(% rsp) CFI_REL_OFFSET rdx, \offset movq % rsi, ++ 13 * 8 +\offset(% rsp) CFI_REL_OFFSET rsi, \offset movq % rdi, ++ 14 * 8 +\offset(% rsp) CFI_REL_OFFSET rdi, \offset.endm.macro ++ SAVE_C_REGS offset = ++ 0 SAVE_C_REGS_HELPER \offset ++ , ++ 1, 1, 1, 1.endm.macro SAVE_EXTRA_REGS offset = 0 movq % r15, ++ 0 * 8 +\offset(% rsp) CFI_REL_OFFSET r15, \offset movq % r14, ++ 1 * 8 +\offset(% rsp) CFI_REL_OFFSET r14, \offset movq % r13, ++ 2 * 8 +\offset(% rsp) CFI_REL_OFFSET r13, \offset movq % r12, ++ 3 * 8 +\offset(% rsp) CFI_REL_OFFSET r12, \offset movq % rbp, ++ 4 * 8 +\offset(% rsp) CFI_REL_OFFSET rbp, \offset movq % rbx, ++ 5 * 8 +\offset(% rsp) CFI_REL_OFFSET rbx, \offset.endm ++ ++ .macro ++ RESTORE_EXTRA_REGS offset = ++ 0 movq 0 * 8 +\offset( ++ % rsp), ++ % r15 CFI_RESTORE r15 movq 1 * 8 +\offset(% rsp), ++ % r14 CFI_RESTORE r14 movq 2 * 8 +\offset(% rsp), ++ % r13 CFI_RESTORE r13 movq 3 * 8 +\offset(% rsp), ++ % r12 CFI_RESTORE r12 movq 4 * 8 +\offset(% rsp), ++ % rbp CFI_RESTORE rbp movq 5 * 8 +\offset(% rsp), ++ % rbx CFI_RESTORE rbx.endm.macro RESTORE_C_REGS_HELPER rstor_rax = 1, ++ rstor_rcx = 1, rstor_r11 = 1, ++ rstor_r8910 = 1, rstor_rdx = 1.if \rstor_r11 movq 6 * 8(% rsp), ++ % r11 CFI_RESTORE r11.endif.if \rstor_r8910 movq 7 * 8(% rsp), ++ % r10 CFI_RESTORE r10 movq 8 * 8(% rsp), ++ % r9 CFI_RESTORE r9 movq 9 * 8(% rsp), ++ % r8 CFI_RESTORE r8.endif.if \rstor_rax movq 10 * 8(% rsp), ++ % rax CFI_RESTORE rax.endif.if \rstor_rcx movq 11 * 8(% rsp), ++ % rcx CFI_RESTORE rcx.endif.if \rstor_rdx movq 12 * 8(% rsp), ++ % rdx CFI_RESTORE rdx.endif movq 13 * 8(% rsp), ++ % rsi CFI_RESTORE rsi movq 14 * 8(% rsp), ++ % rdi CFI_RESTORE rdi.endm.macro RESTORE_C_REGS RESTORE_C_REGS_HELPER 1, ++ 1, 1, 1, ++ 1.endm ++ ++ .macro ALLOC_PT_GPREGS_ON_STACK addskip = 0 subq $15 * ++ 8 +\addskip, ++ % rsp CFI_ADJUST_CFA_OFFSET ++ 15 * 8 +\addskip.endm ++ ++ .macro REMOVE_PT_GPREGS_FROM_STACK ++ addskip = 0 addq $15 * 8 +\addskip, ++ % rsp CFI_ADJUST_CFA_OFFSET - ++ (15 * 8 +\addskip) ++ .endm ++ ++ .macro SAVE_ALL ALLOC_PT_GPREGS_ON_STACK SAVE_C_REGS ++ SAVE_EXTRA_REGS ++ .endm ++ ++ .macro RESTORE_ALL RESTORE_EXTRA_REGS RESTORE_C_REGS ++ REMOVE_PT_GPREGS_FROM_STACK.endm ++#endif //CONFIG_X86_64 ++#endif ++ ++#endif +diff --git a/drivers/platform/x86/socwatchhv/inc/control.h b/drivers/platform/x86/socwatchhv/inc/control.h +new file mode 100644 +index 000000000000..7403150dd679 +--- /dev/null ++++ b/drivers/platform/x86/socwatchhv/inc/control.h +@@ -0,0 +1,194 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++ ++#ifndef _CONTROL_H_ ++#define _CONTROL_H_ ++ ++#include ++#include ++#include ++#include ++ ++#include "swhv_driver.h" ++/**************************************************************************** ++ ** Handy Short cuts ++ ***************************************************************************/ ++ ++typedef void *pvoid; ++#define TRUE 1 ++#define FALSE 0 ++/* ++ * These routines have macros defined in asm/system.h ++ */ ++#define SYS_Local_Irq_Enable() local_irq_enable() ++#define SYS_Local_Irq_Disable() local_irq_disable() ++#define SYS_Local_Irq_Save(flags) local_irq_save(flags) ++#define SYS_Local_Irq_Restore(flags) local_irq_restore(flags) ++ ++/* ++ * CONTROL_THIS_CPU() ++ * Parameters ++ * None ++ * Returns ++ * CPU number of the processor being executed on ++ * ++ */ ++#define CONTROL_THIS_CPU() smp_processor_id() ++ ++/**************************************************************************** ++ ** Interface definitions ++ ***************************************************************************/ ++ ++/* ++ * Execution Control Functions ++ */ ++ ++extern void CONTROL_Invoke_Cpu(s32 cpuid, void (*func)(pvoid), pvoid ctx); ++ ++/* ++ * @fn VOID CONTROL_Invoke_Parallel_Service(func, ctx, blocking, exclude) ++ * ++ * @param func - function to be invoked by each core in the system ++ * @param ctx - pointer to the parameter block for each function ++ * invocation ++ * @param blocking - Wait for invoked function to complete ++ * @param exclude - exclude the current core from executing the code ++ * ++ * @returns none ++ * ++ * @brief Service routine to handle all kinds of parallel invoke on ++ * all CPU calls ++ * ++ * Special Notes: ++ * Invoke the function provided in parallel in either a ++ * blocking/non-blocking mode. ++ * The current core may be excluded if desired. ++ * NOTE - Do not call this function directly from source code. ++ * Use the aliases ++ * CONTROL_Invoke_Parallel(), CONTROL_Invoke_Parallel_NB(), ++ * CONTROL_Invoke_Parallel_XS(). ++ * ++ */ ++extern void CONTROL_Invoke_Parallel_Service(void (*func)(pvoid), pvoid ctx, ++ s32 blocking, s32 exclude); ++ ++/* ++ * @fn VOID CONTROL_Invoke_Parallel(func, ctx) ++ * ++ * @param func - function to be invoked by each core in the system ++ * @param ctx - pointer to the parameter block for each function ++ * invocation ++ * ++ * @returns none ++ * ++ * @brief Invoke the named function in parallel. Wait for all the ++ * functions to complete. ++ * ++ * Special Notes: ++ * Invoke the function named in parallel, including the CPU ++ * that the control is being invoked on ++ * ++ * Macro built on the service routine ++ * ++ */ ++#define CONTROL_Invoke_Parallel(a, b) \ ++ CONTROL_Invoke_Parallel_Service((a), (b), TRUE, FALSE) ++ ++/* ++ * @fn VOID CONTROL_Invoke_Parallel_NB(func, ctx) ++ * ++ * @param func - function to be invoked by each core in the system ++ * @param ctx - pointer to the parameter block for each function ++ * invocation ++ * ++ * @returns none ++ * ++ * @brief Invoke the named function in parallel. DO NOT Wait for all ++ * the functions to complete. ++ * ++ * Special Notes: ++ * Invoke the function named in parallel, including the CPU ++ * that the control is being invoked on ++ * ++ * Macro built on the service routine ++ * ++ */ ++#define CONTROL_Invoke_Parallel_NB(a, b) \ ++ CONTROL_Invoke_Parallel_Service((a), (b), FALSE, FALSE) ++ ++/* ++ * @fn VOID CONTROL_Invoke_Parallel_XS(func, ctx) ++ * ++ * @param func - function to be invoked by each core in the system ++ * @param ctx - pointer to the parameter block for each function ++ * invocation ++ * ++ * @returns none ++ * ++ * @brief Invoke the named function in parallel. Wait for all ++ * the functions to complete. ++ * ++ * Special Notes: ++ * Invoke the function named in parallel, excluding the CPU ++ * that the control is being invoked on ++ * ++ * Macro built on the service routine ++ * ++ */ ++#define CONTROL_Invoke_Parallel_XS(a, b) \ ++ CONTROL_Invoke_Parallel_Service((a), (b), TRUE, TRUE) ++ ++#endif +diff --git a/drivers/platform/x86/socwatchhv/inc/pw_types.h b/drivers/platform/x86/socwatchhv/inc/pw_types.h +new file mode 100644 +index 000000000000..b8a3ac855e53 +--- /dev/null ++++ b/drivers/platform/x86/socwatchhv/inc/pw_types.h +@@ -0,0 +1,132 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++ ++#ifndef _PW_TYPES_H_ ++#define _PW_TYPES_H_ ++ ++#if defined(__linux__) || defined(__QNX__) ++ ++#ifndef __KERNEL__ ++/* ++ * Called from Ring-3. ++ */ ++#include // Grab 'uint64_t' etc. ++#include // Grab 'pid_t' ++/* ++ * UNSIGNED types... ++ */ ++typedef uint8_t u8; ++typedef uint16_t u16; ++typedef uint32_t u32; ++typedef uint64_t u64; ++/* ++ * SIGNED types... ++ */ ++typedef int8_t s8; ++typedef int16_t s16; ++typedef int32_t s32; ++typedef int64_t s64; ++ ++#endif // __KERNEL__ ++ ++#elif defined(_WIN32) ++/* ++ * UNSIGNED types... ++ */ ++typedef unsigned char u8; ++typedef unsigned short u16; ++typedef unsigned int u32; ++typedef unsigned long long u64; ++/* ++ * SIGNED types... ++ */ ++typedef signed char s8; ++typedef signed short s16; ++typedef signed int s32; ++typedef signed long long s64; ++typedef s32 pid_t; ++typedef s32 ssize_t; ++ ++#endif // _WIN32 ++ ++/* ************************************ ++ * Common to both operating systems. ++ * ************************************ ++ */ ++/* ++ * UNSIGNED types... ++ */ ++typedef u8 pw_u8_t; ++typedef u16 pw_u16_t; ++typedef u32 pw_u32_t; ++typedef u64 pw_u64_t; ++ ++/* ++ * SIGNED types... ++ */ ++typedef s8 pw_s8_t; ++typedef s16 pw_s16_t; ++typedef s32 pw_s32_t; ++typedef s64 pw_s64_t; ++ ++typedef pid_t pw_pid_t; ++ ++typedef void *pvoid; ++ ++#define TRUE 1 ++#define FALSE 0 ++ ++#endif // _PW_TYPES_H_ +diff --git a/drivers/platform/x86/socwatchhv/inc/pw_version.h b/drivers/platform/x86/socwatchhv/inc/pw_version.h +new file mode 100644 +index 000000000000..8e1cf1cc4d62 +--- /dev/null ++++ b/drivers/platform/x86/socwatchhv/inc/pw_version.h +@@ -0,0 +1,67 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++ ++#ifndef _PW_VERSION_H_ ++#define _PW_VERSION_H_ 1 ++ ++/* ++ * SOCWatch driver version ++ */ ++#define SWHVDRV_VERSION_MAJOR 2 ++#define SWHVDRV_VERSION_MINOR 0 ++#define SWHVDRV_VERSION_OTHER 0 ++ ++#endif // _PW_VERSION_H_ +diff --git a/drivers/platform/x86/socwatchhv/inc/sw_defines.h b/drivers/platform/x86/socwatchhv/inc/sw_defines.h +new file mode 100644 +index 000000000000..9c8995805464 +--- /dev/null ++++ b/drivers/platform/x86/socwatchhv/inc/sw_defines.h +@@ -0,0 +1,156 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++ ++#ifndef _PW_DEFINES_H_ ++#define _PW_DEFINES_H_ 1 ++ ++#include "sw_version.h" ++ ++/* *************************************************** ++ * Common to kernel and userspace. ++ * *************************************************** ++ */ ++#define PW_SUCCESS 0 ++#define PW_ERROR 1 ++#define PW_SUCCESS_NO_COLLECT 2 ++ ++/* ++ * Helper macro to convert 'u64' to 'unsigned long long' to avoid gcc warnings. ++ */ ++#define TO_ULL(x) (unsigned long long)(x) ++/* ++* Convert an arg to 'long long' ++*/ ++#define TO_LL(x) (long long)(x) ++/* ++ * Convert an arg to 'unsigned long' ++ */ ++#define TO_UL(x) (unsigned long)(x) ++/* ++ * Helper macro for string representation of a boolean value. ++ */ ++#define GET_BOOL_STRING(b) ((b) ? "TRUE" : "FALSE") ++ ++/* ++ * Circularly increment 'i' MODULO 'l'. ++ * ONLY WORKS IF 'l' is (power of 2 - 1) ie. ++ * l == (2 ^ x) - 1 ++ */ ++#define CIRCULAR_INC(index, mask) (((index) + 1) & (mask)) ++#define CIRCULAR_ADD(index, val, mask) (((index) + (val)) & (mask)) ++/* ++ * Circularly decrement 'i'. ++ */ ++#define CIRCULAR_DEC(i, m) \ ++ ({ \ ++ int __tmp1 = (i); \ ++ if (--__tmp1 < 0) \ ++ __tmp1 = (m); \ ++ __tmp1; \ ++ }) ++/* ++ * Retrieve size of an array. ++ */ ++#define SW_ARRAY_SIZE(array) (sizeof(array) / sizeof((array)[0])) ++/* ++ * Should the driver count number of dropped samples? ++ */ ++#define DO_COUNT_DROPPED_SAMPLES 1 ++/* ++ * Extract F/W major, minor versions. ++ * Assumes version numbers are 8b unsigned ints. ++ */ ++#define SW_GET_SCU_FW_VERSION_MAJOR(ver) (((ver) >> 8) & 0xff) ++#define SW_GET_SCU_FW_VERSION_MINOR(ver) ((ver)&0xff) ++/* ++ * Max size of process name retrieved from kernel. ++ */ ++#define SW_MAX_PROC_NAME_SIZE 16 ++ ++/* ++ * Number of SOCPERF counters. ++ * Needed by both Ring-0 and Ring-3 ++ */ ++#define SW_NUM_SOCPERF_COUNTERS 9 ++ ++/* ++ * Max size of process name retrieved from kernel space. ++ */ ++#define SW_MAX_PROC_NAME_SIZE 16 ++/* ++ * Max size of kernel wakelock name. ++ */ ++#define SW_MAX_KERNEL_WAKELOCK_NAME_SIZE 100 ++ ++/* Data value read when a telemetry data read fails. */ ++#define SW_TELEM_READ_FAIL_VALUE 0xF00DF00DF00DF00D ++ ++#ifdef SWW_MERGE ++typedef enum { ++ SW_STOP_EVENT = 0, ++ SW_CS_EXIT_EVENT, ++ SW_COUNTER_RESET_EVENT, ++ SW_COUNTER_HOTKEY_EVENT, ++ SW_MAX_COLLECTION_EVENT ++} collector_stop_event_t; ++#endif // SWW_MERGE ++ ++#define MAX_UNSIGNED_16_BIT_VALUE 0xFFFF ++#define MAX_UNSIGNED_24_BIT_VALUE 0xFFFFFF ++#define MAX_UNSIGNED_32_BIT_VALUE 0xFFFFFFFF ++#define MAX_UNSIGNED_64_BIT_VALUE 0xFFFFFFFFFFFFFFFF ++ ++#endif // _PW_DEFINES_H_ +diff --git a/drivers/platform/x86/socwatchhv/inc/sw_ioctl.h b/drivers/platform/x86/socwatchhv/inc/sw_ioctl.h +new file mode 100644 +index 000000000000..baf93058c5c5 +--- /dev/null ++++ b/drivers/platform/x86/socwatchhv/inc/sw_ioctl.h +@@ -0,0 +1,303 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++#ifndef __SW_IOCTL_H__ ++#define __SW_IOCTL_H__ 1 ++ ++#if defined(__linux__) || defined(__QNX__) ++#if __KERNEL__ ++#include ++#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) ++#include ++#include ++#endif // COMPAT && x64 ++#else // !__KERNEL__ ++#include ++#endif // __KERNEL__ ++#endif // __linux__ ++/* ++ * Ensure we pull in definition of 'DO_COUNT_DROPPED_SAMPLES'! ++ */ ++#include "sw_defines.h" ++ ++#ifdef ONECORE ++#ifndef __KERNEL__ ++#include ++#endif //__KERNEL__ ++#endif // ONECORE ++ ++/* ++ * The APWR-specific IOCTL magic ++ * number -- used to ensure IOCTLs ++ * are delivered to the correct ++ * driver. ++ */ ++// #define APWR_IOCTL_MAGIC_NUM 0xdead ++#define APWR_IOCTL_MAGIC_NUM 100 ++ ++/* ++ * The name of the device file ++ */ ++// #define DEVICE_FILE_NAME "/dev/pw_driver_char_dev" ++#define PW_DEVICE_FILE_NAME "/dev/apwr_driver_char_dev" ++#define PW_DEVICE_NAME "apwr_driver_char_dev" ++ ++enum sw_ioctl_cmd { ++ sw_ioctl_cmd_none = 0, ++ sw_ioctl_cmd_config, ++ sw_ioctl_cmd_cmd, ++ sw_ioctl_cmd_poll, ++ sw_ioctl_cmd_immediate_io, ++ sw_ioctl_cmd_scu_version, ++ sw_ioctl_cmd_read_immediate, ++ sw_ioctl_cmd_driver_version, ++ sw_ioctl_cmd_avail_trace, ++ sw_ioctl_cmd_avail_notify, ++ sw_ioctl_cmd_avail_collect, ++ sw_ioctl_cmd_topology_changes, ++}; ++/* ++ * The actual IOCTL commands. ++ * ++ * From the kernel documentation: ++ * "_IOR" ==> Read IOCTL ++ * "_IOW" ==> Write IOCTL ++ * "_IOWR" ==> Read/Write IOCTL ++ * ++ * Where "Read" and "Write" are from the user's perspective ++ * (similar to the file "read" and "write" calls). ++ */ ++#ifdef SWW_MERGE // Windows ++// ++// Device type -- in the "User Defined" range." ++// ++#define POWER_I_CONF_TYPE 40000 ++ ++// List assigned tracepoint id ++#define CSIR_TRACEPOINT_ID_MASK 1 ++#define DEVICE_STATE_TRACEPOINT_ID_MASK 2 ++#define CSIR_SEPARATE_TRACEPOINT_ID_MASK 3 ++#define RESET_TRACEPOINT_ID_MASK 4 ++#define DISPLAY_ON_TRACEPOINT_ID_MASK 5 ++ ++#ifdef SWW_MERGE ++// ++// TELEM BAR CONFIG ++// ++#define MAX_TELEM_BAR_CFG 3 ++#define TELEM_MCHBAR_CFG 0 ++#define TELEM_IPC1BAR_CFG 1 ++#define TELEM_SSRAMBAR_CFG 2 ++#endif ++ ++// ++// The IOCTL function codes from 0x800 to 0xFFF are for customer use. ++// ++#define PW_IOCTL_CONFIG \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x900, METHOD_BUFFERED, FILE_ANY_ACCESS) ++#define PW_IOCTL_START_COLLECTION \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x901, METHOD_BUFFERED, FILE_ANY_ACCESS) ++#define PW_IOCTL_STOP_COLLECTION \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x902, METHOD_BUFFERED, FILE_ANY_ACCESS) ++ ++// TODO: pause, resume, cancel not supported yet ++#define PW_IOCTL_PAUSE_COLLECTION \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x903, METHOD_BUFFERED, FILE_ANY_ACCESS) ++#define PW_IOCTL_RESUME_COLLECTION \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x904, METHOD_BUFFERED, FILE_ANY_ACCESS) ++#define PW_IOCTL_CANCEL_COLLECTION \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x905, METHOD_BUFFERED, FILE_ANY_ACCESS) ++ ++#define PW_IOCTL_GET_PROCESSOR_GROUP_TOPOLOGY \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x906, METHOD_BUFFERED, FILE_ANY_ACCESS) ++#define PW_IOCTL_TOPOLOGY \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x907, METHOD_BUFFERED, FILE_ANY_ACCESS) ++#define PW_IOCTL_GET_AVAILABLE_COLLECTORS \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x908, METHOD_BUFFERED, FILE_ANY_ACCESS) ++#define PW_IOCTL_IMMEDIATE_IO \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x909, METHOD_BUFFERED, FILE_ANY_ACCESS) ++#define PW_IOCTL_DRV_CLEANUP \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x90A, METHOD_BUFFERED, FILE_ANY_ACCESS) ++#define PW_IOCTL_SET_COLLECTION_EVENT \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x90B, METHOD_BUFFERED, FILE_ANY_ACCESS) ++#define PW_IOCTL_TRY_STOP_EVENT \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x90C, METHOD_BUFFERED, FILE_ANY_ACCESS) ++#define PW_IOCTL_SET_PCH_ACTIVE_INTERVAL \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x90D, METHOD_BUFFERED, FILE_ANY_ACCESS) ++#define PW_IOCTL_SET_TELEM_BAR \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x90E, METHOD_BUFFERED, FILE_ANY_ACCESS) ++#define PW_IOCTL_METADATA \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x90F, METHOD_BUFFERED, FILE_ANY_ACCESS) ++#define PW_IOCTL_SET_GBE_INTERVAL \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x910, METHOD_BUFFERED, FILE_ANY_ACCESS) ++#define PW_IOCTL_ENABLE_COLLECTION \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x911, METHOD_BUFFERED, FILE_ANY_ACCESS) ++#define PW_IOCTL_DISABLE_COLLECTION \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x912, METHOD_BUFFERED, FILE_ANY_ACCESS) ++#define PW_IOCTL_DRIVER_BUILD_DATE \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x913, METHOD_BUFFERED, FILE_ANY_ACCESS) ++ ++#elif !defined(__APPLE__) ++#define PW_IOCTL_CONFIG \ ++ _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_config, \ ++ struct sw_driver_ioctl_arg *) ++#if DO_COUNT_DROPPED_SAMPLES ++#define PW_IOCTL_CMD \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, \ ++ struct sw_driver_ioctl_arg *) ++#else ++#define PW_IOCTL_CMD \ ++ _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, \ ++ struct sw_driver_ioctl_arg *) ++#endif // DO_COUNT_DROPPED_SAMPLES ++#define PW_IOCTL_POLL _IO(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_poll) ++#define PW_IOCTL_IMMEDIATE_IO \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_immediate_io, \ ++ struct sw_driver_ioctl_arg *) ++#define PW_IOCTL_GET_SCU_FW_VERSION \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_scu_version, \ ++ struct sw_driver_ioctl_arg *) ++#define PW_IOCTL_READ_IMMEDIATE \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_read_immediate, \ ++ struct sw_driver_ioctl_arg *) ++#define PW_IOCTL_GET_DRIVER_VERSION \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_driver_version, \ ++ struct sw_driver_ioctl_arg *) ++#define PW_IOCTL_GET_AVAILABLE_TRACEPOINTS \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_trace, \ ++ struct sw_driver_ioctl_arg *) ++#define PW_IOCTL_GET_AVAILABLE_NOTIFIERS \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_notify, \ ++ struct sw_driver_ioctl_arg *) ++#define PW_IOCTL_GET_AVAILABLE_COLLECTORS \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_collect, \ ++ struct sw_driver_ioctl_arg *) ++#define PW_IOCTL_GET_TOPOLOGY_CHANGES \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_topology_changes, \ ++ struct sw_driver_ioctl_arg *) ++#else // __APPLE__ ++#define PW_IOCTL_CONFIG \ ++ _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_config, \ ++ struct sw_driver_ioctl_arg) ++#if DO_COUNT_DROPPED_SAMPLES ++#define PW_IOCTL_CMD \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, \ ++ struct sw_driver_ioctl_arg) ++#else ++#define PW_IOCTL_CMD \ ++ _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, struct sw_driver_ioctl_arg) ++#endif // DO_COUNT_DROPPED_SAMPLES ++#define PW_IOCTL_POLL _IO(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_poll) ++#define PW_IOCTL_IMMEDIATE_IO \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_immediate_io, \ ++ struct sw_driver_ioctl_arg) ++#define PW_IOCTL_GET_SCU_FW_VERSION \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_scu_version, \ ++ struct sw_driver_ioctl_arg) ++#define PW_IOCTL_READ_IMMEDIATE \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_read_immediate, \ ++ struct sw_driver_ioctl_arg) ++#define PW_IOCTL_GET_DRIVER_VERSION \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_driver_version, \ ++ struct sw_driver_ioctl_arg) ++#define PW_IOCTL_GET_AVAILABLE_TRACEPOINTS \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_trace, \ ++ struct sw_driver_ioctl_arg) ++#define PW_IOCTL_GET_AVAILABLE_NOTIFIERS \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_notify, \ ++ struct sw_driver_ioctl_arg) ++#define PW_IOCTL_GET_AVAILABLE_COLLECTORS \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_collect, \ ++ struct sw_driver_ioctl_arg) ++#define PW_IOCTL_GET_TOPOLOGY_CHANGES \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_topology_changes, \ ++ struct sw_driver_ioctl_arg) ++#endif // __APPLE__ ++ ++/* ++ * 32b-compatible version of the above ++ * IOCTL numbers. Required ONLY for ++ * 32b compatibility on 64b systems, ++ * and ONLY by the driver. ++ */ ++#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) ++#define PW_IOCTL_CONFIG32 \ ++ _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_config, compat_uptr_t) ++#if DO_COUNT_DROPPED_SAMPLES ++#define PW_IOCTL_CMD32 \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, compat_uptr_t) ++#else ++#define PW_IOCTL_CMD32 \ ++ _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, compat_uptr_t) ++#endif // DO_COUNT_DROPPED_SAMPLES ++#define PW_IOCTL_POLL32 _IO(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_poll) ++#define PW_IOCTL_IMMEDIATE_IO32 \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_immediate_io, compat_uptr_t) ++#define PW_IOCTL_GET_SCU_FW_VERSION32 \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_scu_version, compat_uptr_t) ++#define PW_IOCTL_READ_IMMEDIATE32 \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_read_immediate, compat_uptr_t) ++#define PW_IOCTL_GET_DRIVER_VERSION32 \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_driver_version, compat_uptr_t) ++#define PW_IOCTL_GET_AVAILABLE_TRACEPOINTS32 \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_trace, compat_uptr_t) ++#define PW_IOCTL_GET_AVAILABLE_NOTIFIERS32 \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_notify, compat_uptr_t) ++#define PW_IOCTL_GET_AVAILABLE_COLLECTORS32 \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_collect, compat_uptr_t) ++#define PW_IOCTL_GET_TOPOLOGY_CHANGES32 \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_topology_changes, compat_uptr_t) ++#endif // defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) ++#endif // __SW_IOCTL_H__ +diff --git a/drivers/platform/x86/socwatchhv/inc/sw_kernel_defines.h b/drivers/platform/x86/socwatchhv/inc/sw_kernel_defines.h +new file mode 100644 +index 000000000000..23e939a732c7 +--- /dev/null ++++ b/drivers/platform/x86/socwatchhv/inc/sw_kernel_defines.h +@@ -0,0 +1,164 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++#ifndef _SW_KERNEL_DEFINES_H_ ++#define _SW_KERNEL_DEFINES_H_ 1 ++ ++#include "sw_defines.h" ++ ++#if defined(__APPLE__) ++#define likely(x) (x) ++#define unlikely(x) (x) ++#endif // __APPLE__ ++ ++#if !defined(__APPLE__) ++#define CPU() (raw_smp_processor_id()) ++#define RAW_CPU() (raw_smp_processor_id()) ++#else ++#define CPU() (cpu_number()) ++#define RAW_CPU() (cpu_number()) ++#endif // __APPLE__ ++ ++#define TID() (current->pid) ++#define PID() (current->tgid) ++#define NAME() (current->comm) ++#define PKG(c) (cpu_data(c).phys_proc_id) ++#define IT_REAL_INCR() (current->signal->it_real_incr.tv64) ++ ++#define ATOMIC_CAS(ptr, old_val, new_val) \ ++ (cmpxchg((ptr), (old_val), (new_val)) == (old_val)) ++ ++/* ++ * Should we measure overheads? ++ * '1' ==> YES ++ * '0' ==> NO ++ */ ++#define DO_OVERHEAD_MEASUREMENTS 0 ++/* ++ * Should we track memory usage? ++ * '1' ==> YES ++ * '0' ==> NO ++ */ ++#define DO_TRACK_MEMORY_USAGE 0 ++/* ++ * Are we compiling with driver profiling support ++ * turned ON? If YES then force 'DO_OVERHEAD_MEASUREMENTS' ++ * and 'DO_TRACK_MEMORY_USAGE' to be TRUE. ++ */ ++#if IS_ENABLED(CONFIG_SOCWATCH_DRIVER_PROFILING) ++#if !DO_OVERHEAD_MEASUREMENTS ++#undef DO_OVERHEAD_MEASUREMENTS ++#define DO_OVERHEAD_MEASUREMENTS 1 ++#endif // DO_OVERHEAD_MEASUREMENTS ++#if !DO_TRACK_MEMORY_USAGE ++#undef DO_TRACK_MEMORY_USAGE ++#define DO_TRACK_MEMORY_USAGE 1 ++#endif // DO_TRACK_MEMORY_USAGE ++#endif // CONFIG_SOCWATCH_DRIVER_PROFILING ++/* ++ * Should we allow debug output. ++ * Set to: "1" ==> 'OUTPUT' is enabled. ++ * "0" ==> 'OUTPUT' is disabled. ++ */ ++#define DO_DEBUG_OUTPUT 0 ++/* ++ * Control whether to output driver ERROR messages. ++ * These are independent of the 'OUTPUT' macro ++ * (which controls debug messages). ++ * Set to '1' ==> Print driver error messages (to '/var/log/messages') ++ * '0' ==> Do NOT print driver error messages ++ */ ++#define DO_PRINT_DRIVER_ERROR_MESSAGES 1 ++/* ++ * Macros to control output printing. ++ */ ++#if !defined(__APPLE__) ++#if DO_DEBUG_OUTPUT ++#define pw_pr_debug(...) printk(KERN_INFO __VA_ARGS__) ++#define pw_pr_warn(...) printk(KERN_WARNING __VA_ARGS__) ++#else ++#define pw_pr_debug(...) ++#define pw_pr_warn(...) ++#endif ++#define pw_pr_force(...) printk(KERN_INFO __VA_ARGS__) ++#else ++#if DO_DEBUG_OUTPUT ++#define pw_pr_debug(...) IOLog(__VA_ARGS__) ++#define pw_pr_warn(...) IOLog(__VA_ARGS__) ++#else ++#define pw_pr_debug(...) ++#define pw_pr_warn(...) ++#endif ++#define pw_pr_force(...) IOLog(__VA_ARGS__) ++#endif // __APPLE__ ++ ++/* ++ * Macro for driver error messages. ++ */ ++#if !defined(__APPLE__) ++#if (DO_PRINT_DRIVER_ERROR_MESSAGES || DO_DEBUG_OUTPUT) ++#define pw_pr_error(...) printk(KERN_ERR __VA_ARGS__) ++#else ++#define pw_pr_error(...) ++#endif ++#else ++#if (DO_PRINT_DRIVER_ERROR_MESSAGES || DO_DEBUG_OUTPUT) ++#define pw_pr_error(...) IOLog(__VA_ARGS__) ++#else ++#define pw_pr_error(...) ++#endif ++#endif // __APPLE__ ++ ++#endif // _SW_KERNEL_DEFINES_H_ +diff --git a/drivers/platform/x86/socwatchhv/inc/sw_structs.h b/drivers/platform/x86/socwatchhv/inc/sw_structs.h +new file mode 100644 +index 000000000000..94e58b5244f4 +--- /dev/null ++++ b/drivers/platform/x86/socwatchhv/inc/sw_structs.h +@@ -0,0 +1,501 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++#ifndef __SW_STRUCTS_H__ ++#define __SW_STRUCTS_H__ 1 ++ ++#include "sw_types.h" ++ ++/* ++ * An enumeration of MSR types. ++ * Required if we want to differentiate ++ * between different types of MSRs. ++ */ ++enum sw_msr_type { ++ SW_MSR_TYPE_THREAD, ++ SW_MSR_TYPE_CORE, ++ SW_MSR_TYPE_MODULE, ++ SW_MSR_TYPE_PACKAGE, ++ SW_MSR_TYPE_SOC, ++ SW_MSR_TYPE_MAX, ++}; ++ ++/* ++ * Convenience for a 'string' data type. ++ * Not strictly required. ++ */ ++#pragma pack(push, 1) ++typedef struct sw_string_type { ++ pw_u16_t len; ++ char data[1]; ++} sw_string_type_t; ++#pragma pack(pop) ++#define SW_STRING_TYPE_HEADER_SIZE() \ ++ (sizeof(struct sw_string_type) - sizeof(char[1])) ++ ++#pragma pack(push, 1) ++struct sw_key_value_payload { ++ pw_u16_t m_numKeyValuePairs; ++ char data[1]; ++}; ++#pragma pack(pop) ++#define SW_KEY_VALUE_PAYLOAD_HEADER_SIZE() \ ++ (sizeof(struct sw_key_value_payload) - sizeof(char[1])) ++ ++typedef enum sw_kernel_wakelock_type { ++ SW_WAKE_LOCK = 0, // A kernel wakelock was acquired ++ SW_WAKE_UNLOCK = 1, // A kernel wakelock was released ++ SW_WAKE_LOCK_TIMEOUT = ++ 2, // A kernel wakelock was acquired with a timeout ++ SW_WAKE_LOCK_INITIAL = 3, // A kernel wakelock was acquired before the ++ // collection started ++ SW_WAKE_UNLOCK_ALL = 4, // All previously held kernel wakelocks were ++ // released -- used in ACPI S3 notifications ++} sw_kernel_wakelock_type_t; ++ ++typedef enum sw_when_type { ++ SW_WHEN_TYPE_BEGIN = 0, /* Start snapshot */ ++ SW_WHEN_TYPE_POLL, ++ SW_WHEN_TYPE_NOTIFIER, ++ SW_WHEN_TYPE_TRACEPOINT, ++ SW_WHEN_TYPE_END, /* Stop snapshot */ ++ SW_WHEN_TYPE_NONE ++} sw_when_type_t; ++ ++/** ++ * trigger_bits is defined to use type pw_u8_t that makes only up ++ * to 8 types possible ++ */ ++#define SW_TRIGGER_BEGIN_MASK() (1U << SW_WHEN_TYPE_BEGIN) ++#define SW_TRIGGER_END_MASK() (1U << SW_WHEN_TYPE_END) ++#define SW_TRIGGER_POLL_MASK() (1U << SW_WHEN_TYPE_POLL) ++#define SW_TRIGGER_TRACEPOINT_MASK() (1U << SW_WHEN_TYPE_TRACEPOINT) ++#define SW_TRIGGER_NOTIFIER_MASK() (1U << SW_WHEN_TYPE_NOTIFIER) ++#define SW_GET_TRIGGER_MASK_VALUE(m) (1U << (m)) ++#define SW_TRIGGER_MASK_ALL() (0xFF) ++ ++enum sw_io_cmd { SW_IO_CMD_READ = 0, SW_IO_CMD_WRITE, SW_IO_CMD_MAX }; ++ ++#pragma pack(push, 1) ++struct sw_driver_msr_io_descriptor { ++ pw_u64_t address; ++ enum sw_msr_type type; ++}; ++#pragma pack(pop) ++ ++#pragma pack(push, 1) ++struct sw_driver_ipc_mmio_io_descriptor { ++ union { ++#ifdef SWW_MERGE ++#pragma warning(push) ++#pragma warning( \ ++ disable : 4201) // disable C4201: nonstandard extension used: nameless struct/union ++#endif ++ struct { ++ pw_u16_t command; ++ pw_u16_t sub_command; ++ }; ++#ifdef SWW_MERGE ++#pragma warning(pop) // enable C4201 ++#endif ++ union { ++ pw_u32_t ipc_command; // (sub_command << 12) | (command) ++ pw_u8_t is_gbe; // Used only for GBE MMIO ++ }; ++ }; ++ // TODO: add a section for 'ctrl_address' and 'ctrl_remapped_address' ++ union { ++ pw_u64_t data_address; // Will be "io_remapped" ++ pw_u64_t data_remapped_address; ++ }; ++}; ++#pragma pack(pop) ++ ++#pragma pack(push, 1) ++struct sw_driver_pci_io_descriptor { ++ pw_u32_t bus; ++ pw_u32_t device; ++ pw_u32_t function; ++#ifdef __QNX__ ++ union { ++ pw_u32_t offset; ++ pw_u32_t index; ++ }; ++#else /* __QNX__ */ ++ pw_u32_t offset; ++#endif /* __QNX__ */ ++}; ++#pragma pack(pop) ++ ++#pragma pack(push, 1) ++struct sw_driver_configdb_io_descriptor { ++ // pw_u32_t port; ++ // pw_u32_t offset; ++ pw_u32_t address; ++}; ++#pragma pack(pop) ++ ++#pragma pack(push, 1) ++struct sw_driver_trace_args_io_descriptor { ++ pw_u8_t num_args; // Number of valid entries in the 'args' array, below; 1 <= num_args <= 7 ++ pw_u8_t args[7]; // Max of 7 args can be recorded ++}; ++#pragma pack(pop) ++ ++#pragma pack(push, 1) ++/** ++ * struct - sw_driver_telem_io_descriptor - Telemetry Metric descriptor ++ * ++ * @id: (Client & Driver) Telemetry ID of the counter to read. ++ * @idx: (Driver only) index into telem array to read, or the row ++ * of the telem_indirect table to lookup the telem array index. ++ * @unit: Unit from which to collect: 0 = PMC, 1 = PUNIT ++ * Values come from the telemetry_unit enum. ++ * @scale_op: When there are multiple instances of a telem value (e.g. ++ * module C-states) the operation to use when scaling the CPU ID ++ * and adding it to the telemetry data ID. ++ * @scale_val: Amount to scale an ID (when scaling one.) ++ * ++ * Like all hardware mechanism descriptors, the client uses this to pass ++ * metric hardware properties (unit and ID) to the driver. The driver ++ * uses it to program the telemetry unit. ++ * ++ * Users can specify that IDs should be scaled based on the CPU id, using ++ * the equation: ID = ID_value + (cpuid ) ++ * where is one of +, *, /, or %, and scaling_val is an integer ++ * value. This gives you: ++ * Operation scale_op scale_val ++ * Single instance of an ID * 0 ++ * Sequentially increasing ++ * CPU-specific values * 1 ++ * Per module cpu-specific ++ * values (2 cores/module) / 2 ++ * Round Robin assignment % cpu_count ++ * ++ * Note that scaling_value of 0 implies that no scaling should be ++ * applied. While (*, 1) is equivalent to (+, 0), the scaling value of 0 ++ * is reserved/defined to mean "no scaling", and is disallowed. ++ * ++ * If you're really tight on space, you could always fold unit and ++ * scale_op into a single byte without a lot of pain or even effort. ++ */ ++struct sw_driver_telem_io_descriptor { ++ union { ++ pw_u16_t id; ++ pw_u8_t idx; ++ }; ++ pw_u8_t unit; ++ pw_u8_t scale_op; ++ pw_u16_t scale_val; ++}; ++#pragma pack(pop) ++enum telemetry_unit { TELEM_PUNIT = 0, TELEM_PMC, TELEM_UNIT_NONE }; ++#define TELEM_MAX_ID 0xFFFF /* Maximum value of a Telemtry event ID. */ ++#define TELEM_MAX_SCALE 0xFFFF /* Maximum ID scaling value. */ ++#define TELEM_OP_ADD '+' /* Addition operator */ ++#define TELEM_OP_MULT '*' /* Multiplication operator */ ++#define TELEM_OP_DIV '/' /* Division operator */ ++#define TELEM_OP_MOD '%' /* Modulus operator */ ++#define TELEM_OP_NONE 'X' /* No operator--Not a scaled ID */ ++ ++#pragma pack(push, 1) ++struct sw_driver_mailbox_io_descriptor { ++ union { ++ /* ++ * Will be "io_remapped" ++ */ ++ pw_u64_t interface_address; ++ pw_u64_t interface_remapped_address; ++ }; ++ union { ++ /* ++ * Will be "io_remapped" ++ */ ++ pw_u64_t data_address; ++ pw_u64_t data_remapped_address; ++ }; ++ pw_u64_t command; ++ pw_u64_t command_mask; ++ pw_u16_t run_busy_bit; ++ pw_u16_t is_msr_type; ++}; ++#pragma pack(pop) ++ ++#pragma pack(push, 1) ++struct sw_driver_pch_mailbox_io_descriptor { ++ union { ++ /* ++ * Will be "io_remapped" ++ */ ++ pw_u64_t mtpmc_address; ++ pw_u64_t mtpmc_remapped_address; ++ }; ++ union { ++ /* ++ * Will be "io_remapped" ++ */ ++ pw_u64_t msg_full_sts_address; ++ pw_u64_t msg_full_sts_remapped_address; ++ }; ++ union { ++ /* ++ * Will be "io_remapped" ++ */ ++ pw_u64_t mfpmc_address; ++ pw_u64_t mfpmc_remapped_address; ++ }; ++ pw_u32_t data_address; ++}; ++#pragma pack(pop) ++ ++#pragma pack(push, 1) ++typedef struct sw_driver_io_descriptor { ++ pw_u16_t collection_type; ++ // TODO: specify READ/WRITE ++ pw_s16_t collection_command; // One of 'enum sw_io_cmd' ++ pw_u16_t counter_size_in_bytes; // The number of bytes to READ or WRITE ++ union { ++ struct sw_driver_msr_io_descriptor msr_descriptor; ++ struct sw_driver_ipc_mmio_io_descriptor ipc_descriptor; ++ struct sw_driver_ipc_mmio_io_descriptor mmio_descriptor; ++ struct sw_driver_pci_io_descriptor pci_descriptor; ++ struct sw_driver_configdb_io_descriptor configdb_descriptor; ++ struct sw_driver_trace_args_io_descriptor trace_args_descriptor; ++ struct sw_driver_telem_io_descriptor telem_descriptor; ++ struct sw_driver_pch_mailbox_io_descriptor ++ pch_mailbox_descriptor; ++ struct sw_driver_mailbox_io_descriptor mailbox_descriptor; ++ }; ++ pw_u64_t write_value; // The value to WRITE ++} sw_driver_io_descriptor_t; ++#pragma pack(pop) ++ ++/** ++ * sw_driver_interface_info is used to map data collected by kernel-level ++ * collectors to metrics. The client passes one of these structs to the ++ * driver for each metric the driver should collect. The driver tags the ++ * collected data (messages) using info from this struct. When processing ++ * data from the driver, the client uses its copy of this data to ++ * identify the plugin, metric, and message IDs of each message. ++ */ ++#pragma pack(push, 1) ++struct sw_driver_interface_info { ++ pw_u64_t tracepoint_id_mask; ++ pw_u64_t notifier_id_mask; ++ pw_s16_t cpu_mask; // On which CPU(s) should the driver read the data? ++ // Currently: -2 ==> read on ALL CPUs, ++ // -1 ==> read on ANY CPU, ++ // >= 0 ==> the specific CPU to read on ++ pw_s16_t plugin_id; // Metric Plugin SID ++ pw_s16_t metric_id; // Domain-specific ID assigned by each Metric Plugin ++ pw_s16_t msg_id; // Msg ID retrieved from the SoC Watch config file ++ pw_u16_t num_io_descriptors; // Number of descriptors in the array, below. ++ pw_u8_t trigger_bits; // Mask of 'when bits' to fire this collector. ++ pw_u16_t sampling_interval_msec; // Sampling interval, in msecs ++ pw_u8_t descriptors[1]; // Array of sw_driver_io_descriptor structs. ++}; ++#pragma pack(pop) ++ ++#define SW_DRIVER_INTERFACE_INFO_HEADER_SIZE() \ ++ (sizeof(struct sw_driver_interface_info) - sizeof(pw_u8_t[1])) ++ ++#pragma pack(push, 1) ++struct sw_driver_interface_msg { ++ pw_u16_t num_infos; // Number of 'sw_driver_interface_info' structs contained within the 'infos' variable, below ++ pw_u16_t min_polling_interval_msecs; // Min time to wait before polling; used exclusively ++ // with the low overhead, context-switch based ++ // polling mode ++ // pw_u16_t infos_size_bytes; // Size of data inlined within the 'infos' variable, below ++ pw_u8_t infos[1]; ++}; ++#pragma pack(pop) ++#define SW_DRIVER_INTERFACE_MSG_HEADER_SIZE() \ ++ (sizeof(struct sw_driver_interface_msg) - sizeof(pw_u8_t[1])) ++ ++typedef enum sw_name_id_type { ++ SW_NAME_TYPE_TRACEPOINT, ++ SW_NAME_TYPE_NOTIFIER, ++ SW_NAME_TYPE_COLLECTOR, ++ SW_NAME_TYPE_MAX, ++} sw_name_id_type_t; ++ ++#pragma pack(push, 1) ++struct sw_name_id_pair { ++ pw_u16_t id; ++ pw_u16_t type; // One of 'sw_name_id_type' ++ struct sw_string_type name; ++}; ++#pragma pack(pop) ++#define SW_NAME_ID_HEADER_SIZE() \ ++ (sizeof(struct sw_name_id_pair) - sizeof(struct sw_string_type)) ++ ++#pragma pack(push, 1) ++struct sw_name_info_msg { ++ pw_u16_t num_name_id_pairs; ++ pw_u16_t payload_len; ++ pw_u8_t pairs[1]; ++}; ++#pragma pack(pop) ++ ++/** ++ * This is the basic data structure for passing data collected by the ++ * kernel-level collectors up to the client. In addition to the data ++ * (payload), it contains the minimum metadata required for the client ++ * to identify the source of that data. ++ */ ++#pragma pack(push, 1) ++typedef struct sw_driver_msg { ++ pw_u64_t tsc; ++ pw_u16_t cpuidx; ++ pw_u8_t plugin_id; // Cannot have more than 256 plugins ++ pw_u8_t metric_id; // Each plugin cannot handle more than 256 metrics ++ pw_u8_t msg_id; // Each metric cannot have more than 256 components ++ pw_u16_t payload_len; ++ // pw_u64_t p_payload; // Ptr to payload ++ union { ++ pw_u64_t __dummy; // Ensure size of struct is consistent on x86, x64 ++ char *p_payload; // Ptr to payload (collected data values). ++ }; ++} sw_driver_msg_t; ++#pragma pack(pop) ++#define SW_DRIVER_MSG_HEADER_SIZE() \ ++ (sizeof(struct sw_driver_msg) - sizeof(pw_u64_t)) ++ ++typedef enum sw_driver_collection_cmd { ++ SW_DRIVER_START_COLLECTION = 1, ++ SW_DRIVER_STOP_COLLECTION = 2, ++ SW_DRIVER_PAUSE_COLLECTION = 3, ++ SW_DRIVER_RESUME_COLLECTION = 4, ++ SW_DRIVER_CANCEL_COLLECTION = 5, ++} sw_driver_collection_cmd_t; ++ ++#pragma pack(push, 1) ++struct sw_driver_version_info { ++ pw_u16_t major; ++ pw_u16_t minor; ++ pw_u16_t other; ++}; ++#pragma pack(pop) ++ ++enum cpu_action { ++ SW_CPU_ACTION_NONE, ++ SW_CPU_ACTION_OFFLINE, ++ SW_CPU_ACTION_ONLINE_PREPARE, ++ SW_CPU_ACTION_ONLINE, ++ SW_CPU_ACTION_MAX, ++}; ++#pragma pack(push, 1) ++struct sw_driver_topology_change { ++ pw_u64_t timestamp; // timestamp ++ enum cpu_action type; // One of 'enum cpu_action' ++ pw_u16_t cpu; // logical cpu ++ pw_u16_t core; // core id ++ pw_u16_t pkg; // pkg/physical id ++}; ++struct sw_driver_topology_msg { ++ pw_u16_t num_entries; ++ pw_u8_t topology_entries[1]; ++}; ++#pragma pack(pop) ++ ++/** ++ * An enumeration of possible pm states that ++ * SoC Watch is interested in ++ */ ++enum sw_pm_action { ++ SW_PM_ACTION_NONE, ++ SW_PM_ACTION_SUSPEND_ENTER, ++ SW_PM_ACTION_SUSPEND_EXIT, ++ SW_PM_ACTION_HIBERNATE_ENTER, ++ SW_PM_ACTION_HIBERNATE_EXIT, ++ SW_PM_ACTION_MAX, ++}; ++ ++/** ++ * An enumeration of possible actions that trigger ++ * the power notifier ++ */ ++enum sw_pm_mode { ++ SW_PM_MODE_FIRMWARE, ++ SW_PM_MODE_NONE, ++}; ++ ++#define SW_PM_VALUE(mode, action) ((mode) << 16 | (action)) ++ ++/* ++ * Wrapper for ioctl arguments. ++ * EVERY ioctl MUST use this struct! ++ */ ++#pragma pack(push, 1) ++struct sw_driver_ioctl_arg { ++ pw_s32_t in_len; ++ pw_s32_t out_len; ++ // pw_u64_t p_in_arg; // Pointer to input arg ++ // pw_u64_t p_out_arg; // Pointer to output arg ++ char *in_arg; ++ char *out_arg; ++}; ++#pragma pack(pop) ++ ++#pragma pack(push, 1) ++typedef struct sw_driver_msg_interval { ++ pw_u8_t plugin_id; // Cannot have more than 256 plugins ++ pw_u8_t metric_id; // Each plugin cannot handle more than 256 metrics ++ pw_u8_t msg_id; // Each metric cannot have more than 256 components ++ pw_u16_t interval; // collection interval ++} sw_driver_msg_interval_t; ++#pragma pack(pop) ++ ++#endif // __SW_STRUCTS_H__ +diff --git a/drivers/platform/x86/socwatchhv/inc/sw_types.h b/drivers/platform/x86/socwatchhv/inc/sw_types.h +new file mode 100644 +index 000000000000..914ce9806965 +--- /dev/null ++++ b/drivers/platform/x86/socwatchhv/inc/sw_types.h +@@ -0,0 +1,152 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++ ++#ifndef _PW_TYPES_H_ ++#define _PW_TYPES_H_ ++ ++#if defined(__linux__) || defined(__APPLE__) || defined(__QNX__) ++ ++#ifndef __KERNEL__ ++/* ++ * Called from Ring-3. ++ */ ++#include // Grab 'uint64_t' etc. ++#include // Grab 'pid_t' ++/* ++ * UNSIGNED types... ++ */ ++typedef uint8_t u8; ++typedef uint16_t u16; ++typedef uint32_t u32; ++typedef uint64_t u64; ++/* ++ * SIGNED types... ++ */ ++typedef int8_t s8; ++typedef int16_t s16; ++typedef int32_t s32; ++typedef int64_t s64; ++ ++#else // __KERNEL__ ++#if !defined(__APPLE__) ++#include ++#else // __APPLE__ ++#include ++#include // Grab 'uint64_t' etc. ++ ++typedef uint8_t u8; ++typedef uint16_t u16; ++typedef uint32_t u32; ++typedef uint64_t u64; ++/* ++* SIGNED types... ++*/ ++typedef int8_t s8; ++typedef int16_t s16; ++typedef int32_t s32; ++typedef int64_t s64; ++#endif // __APPLE__ ++#endif // __KERNEL__ ++ ++#elif defined(_WIN32) ++typedef __int32 int32_t; ++typedef unsigned __int32 uint32_t; ++typedef __int64 int64_t; ++typedef unsigned __int64 uint64_t; ++ ++/* ++ * UNSIGNED types... ++ */ ++typedef unsigned char u8; ++typedef unsigned short u16; ++typedef unsigned int u32; ++typedef unsigned long long u64; ++ ++/* ++ * SIGNED types... ++ */ ++typedef signed char s8; ++typedef signed short s16; ++typedef signed int s32; ++typedef signed long long s64; ++typedef s32 pid_t; ++typedef s32 ssize_t; ++ ++#endif // _WIN32 ++ ++/* ************************************ ++ * Common to both operating systems. ++ * ************************************ ++ */ ++/* ++ * UNSIGNED types... ++ */ ++typedef u8 pw_u8_t; ++typedef u16 pw_u16_t; ++typedef u32 pw_u32_t; ++typedef u64 pw_u64_t; ++ ++/* ++ * SIGNED types... ++ */ ++typedef s8 pw_s8_t; ++typedef s16 pw_s16_t; ++typedef s32 pw_s32_t; ++typedef s64 pw_s64_t; ++ ++typedef pid_t pw_pid_t; ++ ++#endif // _PW_TYPES_H_ +diff --git a/drivers/platform/x86/socwatchhv/inc/sw_version.h b/drivers/platform/x86/socwatchhv/inc/sw_version.h +new file mode 100644 +index 000000000000..5797edffa64d +--- /dev/null ++++ b/drivers/platform/x86/socwatchhv/inc/sw_version.h +@@ -0,0 +1,74 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++ ++#ifndef __SW_VERSION_H__ ++#define __SW_VERSION_H__ 1 ++ ++/* ++ * SOCWatch driver version ++ */ ++#define SW_DRIVER_VERSION_MAJOR 2 ++#define SW_DRIVER_VERSION_MINOR 6 ++#define SW_DRIVER_VERSION_OTHER 2 ++ ++/* ++ * Every SOC Watch userspace component shares the same version number. ++ */ ++#define SOCWATCH_VERSION_MAJOR 2 ++#define SOCWATCH_VERSION_MINOR 8 ++#define SOCWATCH_VERSION_OTHER 0 ++ ++#endif // __SW_VERSION_H__ +diff --git a/drivers/platform/x86/socwatchhv/inc/swhv_acrn.h b/drivers/platform/x86/socwatchhv/inc/swhv_acrn.h +new file mode 100644 +index 000000000000..06a9e090932b +--- /dev/null ++++ b/drivers/platform/x86/socwatchhv/inc/swhv_acrn.h +@@ -0,0 +1,117 @@ ++#ifndef _SWHV_ACRN_H_ ++#define _SWHV_ACRN_H_ 1 ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include // LINUX_VERSION_CODE ++#include // for struct list_head ++ ++#include "swhv_defines.h" ++#include "pw_version.h" ++ ++#define SW_DEFINE_LIST_HEAD(name, dummy) struct list_head name ++#define SW_DECLARE_LIST_HEAD(name, dummy) extern struct list_head name ++#define SW_LIST_ENTRY(name, dummy) struct list_head name ++#define SW_LIST_HEAD_VAR(dummy) struct list_head ++#define SW_LIST_HEAD_INIT(head) INIT_LIST_HEAD(head) ++#define SW_LIST_ENTRY_INIT(node, field) INIT_LIST_HEAD(&node->field) ++#define SW_LIST_ADD(head, node, field) list_add_tail(&node->field, head) ++#define SW_LIST_GET_HEAD_ENTRY(head, type, field) \ ++ list_first_entry(head, struct type, field) ++#define SW_LIST_UNLINK(node, field) list_del(&node->field) ++#define SW_LIST_FOR_EACH_ENTRY(node, head, field) \ ++ list_for_each_entry(node, head, field) ++#define SW_LIST_EMPTY(head) list_empty(head) ++#define SW_LIST_HEAD_INITIALIZER(head) LIST_HEAD_INIT(head) ++ ++int device_open_i(struct inode *inode, struct file *file); ++ ++ssize_t device_read_i(struct file *file, /* see include/linux/fs.h */ ++ char __user *buffer, /* buffer to be filled with data */ ++ size_t length, /* length of the buffer */ ++ loff_t *offset); ++ ++long swhv_configure(struct swhv_driver_interface_msg __user *remote_msg, ++ int local_len); ++long swhv_start(void); ++long swhv_stop(void); ++long swhv_get_cpu_count(u32 __user *remote_args); ++long swhv_get_clock(u32 __user *remote_in_args, u64 __user *remote_args); ++long swhv_get_topology(u64 __user *remote_args); ++long swhv_get_hypervisor_type(u32 __user *remote_args); ++int swhv_load_driver_i(void); ++void swhv_unload_driver_i(void); ++void cleanup_error_i(void); ++long swhv_msr_read(u32 __user *remote_in_args, u64 __user *remote_args); ++long swhv_collection_poll(void); ++ ++enum MSR_CMD_TYPE { ++ MSR_OP_NONE = 0, ++ MSR_OP_READ, ++ MSR_OP_WRITE, ++ MSR_OP_READ_CLEAR ++}; ++ ++enum MSR_CMD_STATUS { MSR_OP_READY = 0, MSR_OP_REQUESTED, MSR_OP_HANDLED }; ++ ++struct profiling_msr_op { ++ /* value to write or location to write into */ ++ uint64_t value; ++ /* MSR address to read/write; last entry will have value of -1 */ ++ uint32_t msr_id; ++ /* parameter; usage depends on operation */ ++ uint16_t param; ++ uint8_t msr_op_type; ++ uint8_t reg_type; ++}; ++ ++#define MAX_MSR_LIST_NUM 15 ++struct profiling_msr_ops_list { ++ int32_t collector_id; ++ uint32_t num_entries; ++ int32_t msr_op_state; // enum value from 'MSR_CMD_STATUS' ++ struct profiling_msr_op entries[MAX_MSR_LIST_NUM]; ++}; ++ ++#define COLLECTOR_SOCWATCH 1 ++ ++struct profiling_control { ++ int32_t collector_id; ++ int32_t reserved; ++ uint64_t switches; ++}; ++ ++/** ++ * struct - sw_collector_data ++ * Information about the collector to be invoked at collection time. ++ * ++ * The collector_lists array holds linked lists of collectors to ++ * be exercised at specific points in time during the collection ++ * (e.g. begin, poll, end, etc.). At a trigger time, the driver walks ++ * that time's list of nodes, and exercises the collectors on that list. ++ * ++ * @list: List/link implementation ++ * @cpumask: Collect if cpu matches mask ++ * @info: Ptr to metric info ++ * @ops: Ptr to collector's operations ++ * @last_update_jiffies: Indicates when this node was last exercised. ++ * @per_msg_payload_size: Data size ++ * @msg: Ptr to collected data ++ */ ++typedef struct swhv_acrn_msr_collector_data { ++ SW_LIST_ENTRY(list, swhv_acrn_msr_collector_data); ++ pw_s16_t cpu_mask; ++ pw_s16_t sample_id; ++ struct profiling_msr_ops_list *msr_ops_list; ++ size_t per_msg_payload_size; ++} swhv_acrn_msr_collector_data_t; ++#endif // _SWHV_ACRN_H_ +diff --git a/drivers/platform/x86/socwatchhv/inc/swhv_acrn_sbuf.h b/drivers/platform/x86/socwatchhv/inc/swhv_acrn_sbuf.h +new file mode 100644 +index 000000000000..c5a08d1025ae +--- /dev/null ++++ b/drivers/platform/x86/socwatchhv/inc/swhv_acrn_sbuf.h +@@ -0,0 +1,186 @@ ++#ifndef _SWHV_ACRN_SBUF_H_ ++#define _SWHV_ACRN_SBUF_H_ 1 ++ ++#include ++ ++/* ++ * Checks if the passed sbuf is empty. ++ */ ++static inline bool sbuf_is_empty(struct shared_buf *sbuf) ++{ ++ return (sbuf->head == sbuf->tail); ++} ++ ++static inline uint32_t sbuf_next_ptr(uint32_t pos, uint32_t span, ++ uint32_t scope) ++{ ++ pos += span; ++ pos = (pos >= scope) ? (pos - scope) : pos; ++ return pos; ++} ++ ++/* ++ * This function returns the available free space in the ++ * passed sbuf. ++ */ ++inline uint32_t sbuf_available_space(struct shared_buf *sbuf) ++{ ++ uint32_t remaining_space; ++ /* ++ * if tail isn't wrapped around ++ * subtract difference of tail and head from size ++ * otherwise ++ * difference between head and tail ++ */ ++ if (sbuf->tail >= sbuf->head) ++ remaining_space = sbuf->size - (sbuf->tail - sbuf->head); ++ else ++ remaining_space = sbuf->head - sbuf->tail; ++ ++ return remaining_space; ++} ++ ++/* ++ * This function retrieves the requested 'size' amount of data from ++ * the passed buffer. ++ * This is a much more efficient implementation than the default ++ * 'sbuf_get()' which retrieves one 'element' size at a time. ++ */ ++int sbuf_get_variable(struct shared_buf *sbuf, void **data, uint32_t size) ++{ ++ /* ++ * 1. Check if buffer isn't empty and non-zero 'size' ++ * 2. check if enough ('size' bytes) data to be read is present. ++ * 3. Continue if buffer has enough data ++ * 4. Copy data from buffer ++ * 4a. copy data in 2 parts if there is a wrap-around ++ * 4b. Otherwise do a simple copy ++ */ ++ const void *from; ++ uint32_t current_data_size, offset = 0, next_head; ++ ++ if ((sbuf == NULL) || (*data == NULL)) ++ return -EINVAL; ++ ++ if (sbuf_is_empty(sbuf) || (size == 0)) { ++ /* no data available */ ++ return 0; ++ } ++ ++ current_data_size = sbuf->size - sbuf_available_space(sbuf); ++ ++ /* ++ * TODO If requested data size is greater than current buffer size, ++ * consider at least copying the current buffer size. ++ */ ++ if (size > current_data_size) { ++ pw_pr_warn( ++ "Requested data size is greater than the current buffer size!"); ++ /* not enough data to be read */ ++ return 0; ++ } ++ ++ next_head = sbuf_next_ptr(sbuf->head, size, sbuf->size); ++ ++ from = (void *)sbuf + SBUF_HEAD_SIZE + sbuf->head; ++ ++ if (next_head < sbuf->head) { // wrap-around ++ /* copy first part */ ++ offset = sbuf->size - sbuf->head; ++ memcpy(*data, from, offset); ++ ++ from = (void *)sbuf + SBUF_HEAD_SIZE; ++ } ++ memcpy((void *)*data + offset, from, size - offset); ++ ++ sbuf->head = next_head; ++ ++ return size; ++} ++ ++/* ++ * This API can be used to retrieve complete samples at a time from the ++ * sbuf. It internally uses the sbuf_get() which retrieves 1 'element' ++ * at a time and is probably not very efficient for reading large amount ++ * of data. ++ * Note: Not used currently. ++ */ ++int sbuf_get_wrapper(struct shared_buf *sbuf, uint8_t **data) ++{ ++ uint8_t *sample; ++ uint8_t sample_offset; ++ acrn_msg_header *header; ++ uint32_t payload_size, sample_size, _size; ++ ++ /* ++ * Assumption: A partial variable sample will not be written ++ * to the buffer. ++ * do while buf isn't empty ++ * Read header from the buffer ++ * write to data ++ * get size of payload ++ * check if the size of 'data' is enough for the ++ * variable sample to be read to ++ * Read the payload ++ * Keep reading ele_size chunks till available and write to data ++ * if the last chunk is less than ele_size, do a partial copy to ++ * data ++ * ++ * ++ */ ++ if ((sbuf == NULL) || (data == NULL)) ++ return -EINVAL; ++ ++ if (sbuf_is_empty(sbuf)) { ++ /* no data available */ ++ return 0; ++ } ++ ++ sample_offset = 0; ++ ++ header = vmalloc(sizeof(ACRN_MSG_HEADER_SIZE)); ++ memset(header, 0, sizeof(ACRN_MSG_HEADER_SIZE)); ++ //read header ++ sbuf_get(sbuf, (uint8_t *)header); ++ ++ payload_size = header->payload_size; ++ ++ sample_size = ACRN_MSG_HEADER_SIZE + header->payload_size; ++ ++ sample = vmalloc(sample_size); ++ ++ //copy header ++ memcpy((void *)sample, (void *)header, ACRN_MSG_HEADER_SIZE); ++ ++ sample_offset += ACRN_MSG_HEADER_SIZE; ++ ++ _size = payload_size; ++ while (_size) { ++ if (_size >= sbuf->ele_size) { ++ sbuf_get(sbuf, (uint8_t *)(sample + sample_offset)); ++ sample_offset += sbuf->ele_size; ++ _size -= sbuf->ele_size; ++ } else { ++ pw_pr_error( ++ "error: payload has to be multiple of 32\n"); ++ return 0; ++ /* ++ * This code can be enabled when support for variable ++ * sized samples needs to be added. ++ */ ++#if 0 ++ chunk = malloc(sbuf->ele_size); ++ sbuf_get(sbuf, chunk); ++ memcpys((void *)(sample + sample_offset), _size, chunk); ++ _size -= _size; ++ free(chunk); ++#endif ++ } ++ } ++ ++ *data = sample; ++ ++ vfree(header); ++ return sample_size; ++} ++#endif // _SWHV_ACRN_SBUF_H_ +diff --git a/drivers/platform/x86/socwatchhv/inc/swhv_defines.h b/drivers/platform/x86/socwatchhv/inc/swhv_defines.h +new file mode 100644 +index 000000000000..65239d566ae1 +--- /dev/null ++++ b/drivers/platform/x86/socwatchhv/inc/swhv_defines.h +@@ -0,0 +1,111 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++ ++#ifndef _SWHV_DEFINES_H_ ++#define _SWHV_DEFINES_H_ ++ ++/* *************************************************** ++ * Common to kernel and userspace. ++ * *************************************************** ++ */ ++#define PW_SUCCESS 0 ++#define PW_ERROR 1 ++#define PW_SUCCESS_NO_COLLECT 2 ++ ++// ++// Start off with none of the OS'es are defined ++// ++#undef SWDRV_OS_LINUX ++#undef SWDRV_OS_ANDROID ++#undef SWDRV_OS_UNIX ++ ++// ++// Make sure none of the architectures is defined here ++// ++#undef SWDRV_IA32 ++#undef SWDRV_EM64T ++ ++// ++// Make sure one (and only one) of the OS'es gets defined here ++// ++// Unfortunately entirex defines _WIN32 so we need to check for linux ++// first. The definition of these flags is one and only one ++// _OS_xxx is allowed to be defined. ++// ++#if defined(__ANDROID__) ++#define SWDRV_OS_ANDROID ++#define SWDRV_OS_UNIX ++#elif defined(__linux__) ++#define SWDRV_OS_LINUX ++#define SWDRV_OS_UNIX ++#else ++#error "Compiling for an unknown OS" ++#endif ++ ++// ++// Make sure one (and only one) architecture is defined here ++// as well as one (and only one) pointer__ size ++// ++#if defined(_M_IX86) || defined(__i386__) ++#define SWDRV_IA32 ++#elif defined(_M_AMD64) || defined(__x86_64__) ++#define SWDRV_EM64T ++#else ++#error "Unknown architecture for compilation" ++#endif ++ ++#endif // _SWHV_DEFINES_H_ +diff --git a/drivers/platform/x86/socwatchhv/inc/swhv_driver.h b/drivers/platform/x86/socwatchhv/inc/swhv_driver.h +new file mode 100644 +index 000000000000..8ad0d672f095 +--- /dev/null ++++ b/drivers/platform/x86/socwatchhv/inc/swhv_driver.h +@@ -0,0 +1,109 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++ ++#ifndef _SWHV_DRIVER_H_ ++#define _SWHV_DRIVER_H_ 1 ++ ++#include // LINUX_VERSION_CODE ++#include // vmalloc ++#include "swhv_defines.h" ++#include "sw_kernel_defines.h" ++#include "pw_version.h" ++ ++#define MAX_CORE_COUNT 8 ++ ++#define MOBILEVISOR 1 ++#define ACRN 2 ++ ++// define this flag to have IDT entry programmed for SoCWatch IRQ handler ++#define SOCWATCH_IDT_IRQ 1 ++ ++extern void SYS_Perfvec_Handler(void); ++extern short SYS_Get_cs(void); ++ ++#if defined(SWDRV_IA32) && (SOCWATCH_IDT_IRQ) ++extern void *SYS_Get_IDT_Base_HWR(void); /// IDT base from hardware IDTR ++ ++#define SYS_Get_IDT_Base SYS_Get_IDT_Base_HWR ++#endif // defined(SWDRV_IA32) && (SOCWATCH_IDT_IRQ) ++ ++#if defined(SWDRV_EM64T) && (SOCWATCH_IDT_IRQ) ++extern void SYS_Get_IDT_Base(void **); ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25) ++typedef struct gate_struct gate_struct_t; ++#else ++typedef struct gate_struct64 gate_struct_t; ++#endif // LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) ++#endif // defined(SWDRV_EM64T) && (SOCWATCH_IDT_IRQ) ++ ++// miscellaneous defines ++#define CPU() (raw_smp_processor_id()) ++#define GET_BOOL_STRING(b) ((b) ? "TRUE" : "FALSE") ++ ++#define _STRINGIFY(x) #x ++#define STRINGIFY(x) _STRINGIFY(x) ++#define _STRINGIFY_W(x) (L#x) ++#define STRINGIFY_W(x) _STRINGIFY_W(x) ++ ++/* ++ * 64bit Compare-and-swap. ++ */ ++#define CAS64(p, o, n) (cmpxchg64((p), (o), (n)) == (o)) ++ ++typedef struct PWCollector_msg PWCollector_msg_t; ++ ++#endif // _SWHV_DRIVER_H_ +diff --git a/drivers/platform/x86/socwatchhv/inc/swhv_ioctl.h b/drivers/platform/x86/socwatchhv/inc/swhv_ioctl.h +new file mode 100644 +index 000000000000..690bbcd5ccba +--- /dev/null ++++ b/drivers/platform/x86/socwatchhv/inc/swhv_ioctl.h +@@ -0,0 +1,164 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++#ifndef __SWHV_IOCTL_H__ ++#define __SWHV_IOCTL_H__ ++ ++#include "pw_types.h" ++ ++#if defined(__linux__) || defined(__QNX__) ++#if __KERNEL__ ++#include ++#if defined(HAVE_COMPAT_IOCTL) && defined(CONFIG_X86_64) ++#include ++#endif // COMPAT && x64 ++#else // !__KERNEL__ ++#include ++#endif // __KERNEL__ ++#endif // __linux__ ++/* ++ * Path to the Hypervisor driver device file. ++ */ ++#define SWHV_DEVICE_NAME "swhypervdrv" ++#define SWHV_DEVICE_PATH "/dev/" SWHV_DEVICE_NAME ++ ++/* ++ * The SoFIA-specific IOCTL magic ++ * number -- used to ensure IOCTLs ++ * are delivered to the correct ++ * driver. ++ */ ++#define SP_IOC_MAGIC 99 ++/* ++ * CONSTANTS that define the various operations. ++ * TODO: convert to enum? ++ */ ++#define SWHVDRV_OPERATION_CONFIGURE 1 /* configure a collection */ ++#define SWHVDRV_OPERATION_CMD 2 /* control a collection */ ++#define SWHVDRV_OPERATION_VERSION 3 /* retrieve driver version info */ ++#define SWHVDRV_OPERATION_CLOCK 4 /* retrieve STM clock */ ++#define SWHVDRV_OPERATION_TOPOLOGY 5 /* retrieve CPU topology */ ++#define SWHVDRV_OPERATION_CPUCOUNT 6 /* retrieve CPU count */ ++#define SWHVDRV_OPERATION_HYPERVISOR_TYPE 7 /* retrieve hypervisor type */ ++#define SWHVDRV_OPERATION_MSR_READ 8 /* retrieve MSR value */ ++#define SWHVDRV_OPERATION_POLL 9 /* Polling tick */ ++ ++enum swhv_ioctl_cmd { ++ swhv_ioctl_cmd_none = 0, ++ swhv_ioctl_cmd_config, ++ swhv_ioctl_cmd_cmd, ++ swhv_ioctl_cmd_version, ++ swhv_ioctl_cmd_clock, ++ swhv_ioctl_cmd_topology, ++ swhv_ioctl_cmd_cpucount, ++ swhv_ioctl_cmd_hypervisor_type, ++}; ++/* ++ * The actual IOCTL commands. ++ * ++ * From the kernel documentation: ++ * "_IOR" ==> Read IOCTL ++ * "_IOW" ==> Write IOCTL ++ * "_IOWR" ==> Read/Write IOCTL ++ * ++ * Where "Read" and "Write" are from the user's perspective ++ * (similar to the file "read" and "write" calls). ++ */ ++#define SWHVDRV_IOCTL_CONFIGURE \ ++ _IOW(SP_IOC_MAGIC, SWHVDRV_OPERATION_CONFIGURE, \ ++ struct spdrv_ioctl_arg *) ++#define SWHVDRV_IOCTL_CMD \ ++ _IOW(SP_IOC_MAGIC, SWHVDRV_OPERATION_CMD, struct spdrv_ioctl_arg *) ++#define SWHVDRV_IOCTL_VERSION \ ++ _IOR(SP_IOC_MAGIC, SWHVDRV_OPERATION_VERSION, struct spdrv_ioctl_arg *) ++#define SWHVDRV_IOCTL_CLOCK \ ++ _IOR(SP_IOC_MAGIC, SWHVDRV_OPERATION_CLOCK, struct spdrv_ioctl_arg *) ++#define SWHVDRV_IOCTL_TOPOLOGY \ ++ _IOR(SP_IOC_MAGIC, SWHVDRV_OPERATION_TOPOLOGY, struct spdrv_ioctl_arg *) ++#define SWHVDRV_IOCTL_CPUCOUNT \ ++ _IOR(SP_IOC_MAGIC, SWHVDRV_OPERATION_CPUCOUNT, struct spdrv_ioctl_arg *) ++#define SWHVDRV_IOCTL_HYPERVISOR_TYPE \ ++ _IOR(SP_IOC_MAGIC, SWHVDRV_OPERATION_HYPERVISOR_TYPE, \ ++ struct spdrv_ioctl_arg *) ++#define SWHVDRV_IOCTL_MSR_READ \ ++ _IOWR(SP_IOC_MAGIC, SWHVDRV_OPERATION_MSR_READ, \ ++ struct spdrv_ioctl_arg *) ++#define SWHVDRV_IOCTL_POLL \ ++ _IO(SP_IOC_MAGIC, SWHVDRV_OPERATION_POLL, struct spdrv_ioctl_arg *) ++ ++#if defined(HAVE_COMPAT_IOCTL) && defined(CONFIG_X86_64) ++#include ++ ++#define SWHVDRV_IOCTL_CONFIGURE32 \ ++ _IOW(SP_IOC_MAGIC, SWHVDRV_OPERATION_CONFIGURE, compat_uptr_t) ++#define SWHVDRV_IOCTL_CMD32 \ ++ _IOW(SP_IOC_MAGIC, SWHVDRV_OPERATION_CMD, compat_uptr_t) ++#define SWHVDRV_IOCTL_VERSION32 \ ++ _IOR(SP_IOC_MAGIC, SWHVDRV_OPERATION_VERSION, compat_uptr_t) ++#define SWHVDRV_IOCTL_CLOCK32 \ ++ _IOR(SP_IOC_MAGIC, SWHVDRV_OPERATION_CLOCK, compat_uptr_t) ++#define SWHVDRV_IOCTL_TOPOLOGY32 \ ++ _IOR(SP_IOC_MAGIC, SWHVDRV_OPERATION_TOPOLOGY, compat_uptr_t) ++#define SWHVDRV_IOCTL_CPUCOUNT32 \ ++ _IOR(SP_IOC_MAGIC, SWHVDRV_OPERATION_CPUCOUNT, compat_uptr_t) ++#define SWHVDRV_IOCTL_HYPERVISOR_TYPE32 \ ++ _IOR(SP_IOC_MAGIC, SWHVDRV_OPERATION_HYPERVISOR_TYPE, compat_uptr_t) ++#define SWHVDRV_IOCTL_MSR_READ32 \ ++ _IOWR(SP_IOC_MAGIC, SWHVDRV_OPERATION_MSR_READ, compat_uptr_t) ++#define SWHVDRV_IOCTL_POLL32 \ ++ _IO(SP_IOC_MAGIC, SWHVDRV_OPERATION_POLL, compat_uptr_t) ++#endif // COMPAT && x64 ++ ++#endif // __SWHV_IOCTL_H__ +diff --git a/drivers/platform/x86/socwatchhv/inc/swhv_structs.h b/drivers/platform/x86/socwatchhv/inc/swhv_structs.h +new file mode 100644 +index 000000000000..67bac8e36ad4 +--- /dev/null ++++ b/drivers/platform/x86/socwatchhv/inc/swhv_structs.h +@@ -0,0 +1,234 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++ ++#ifndef _SWHV_STRUCTS_H_ ++#define _SWHV_STRUCTS_H_ 1 ++ ++#include "sw_structs.h" ++ ++enum swhv_hypervisor_type { ++ swhv_hypervisor_none = 0, ++ swhv_hypervisor_mobilevisor, ++ swhv_hypervisor_acrn, ++}; ++ ++/* ++ * Structure to return version information. ++ */ ++#pragma pack(push) ++#pragma pack(1) ++struct sp_driver_version_info { ++ pw_s32_t major; ++ pw_s32_t minor; ++ pw_s32_t other; ++}; ++ ++struct spdrv_ioctl_arg { ++ pw_s32_t in_len; ++ pw_s32_t out_len; ++ char *in_arg; ++ char *out_arg; ++}; ++#pragma pack(pop) ++ ++/* ++ * Various commands to control a collection. ++ */ ++enum swhvdrv_cmd { ++ SWHVDRV_CMD_START, ++ SWHVDRV_CMD_STOP, ++ /* others here when appropriate */ ++ SVHVDRV_CMD_MAX ++}; ++ ++enum swhv_collector_type { ++ SWHV_COLLECTOR_TYPE_NONE, ++ SWHV_COLLECTOR_TYPE_SWITCH, ++ SWHV_COLLECTOR_TYPE_MSR, ++}; ++ ++enum swhv_io_cmd { SWHV_IO_CMD_READ = 0, SWHV_IO_CMD_WRITE, SWHV_IO_CMD_MAX }; ++ ++#pragma pack(push, 1) ++struct swhv_driver_msr_io_descriptor { ++ pw_u64_t address; ++ enum sw_msr_type type; ++}; ++#pragma pack(pop) ++ ++#pragma pack(push, 1) ++struct swhv_driver_switch_io_descriptor { ++ pw_u32_t switch_bitmask; ++}; ++#pragma pack(pop) ++ ++#pragma pack(push, 1) ++typedef struct swhv_driver_io_descriptor { ++ pw_u16_t collection_type; // One of 'enum swhv_collector_type' ++ pw_s16_t collection_command; // One of 'enum swhv_io_cmd' ++ pw_u16_t counter_size_in_bytes; // The number of bytes to READ or WRITE ++ union { ++ struct swhv_driver_msr_io_descriptor msr_descriptor; ++ struct swhv_driver_switch_io_descriptor switch_descriptor; ++ }; ++ pw_u64_t write_value; // The value to WRITE ++} swhv_driver_io_descriptor_t; ++#pragma pack(pop) ++ ++#pragma pack(push, 1) ++struct swhv_driver_interface_info { ++ pw_s16_t cpu_mask; // On which CPU(s) should the driver read the data? ++ // Currently: -2 ==> read on ALL CPUs, ++ // -1 ==> read on ANY CPU, ++ // >= 0 ==> the specific CPU to read on ++ pw_s16_t sample_id; // Sample ID, used to map it back to Metric Plugin, Metric and Msg ID combo ++ pw_u16_t num_io_descriptors; // Number of descriptors in the array, below. ++ pw_u8_t descriptors[1]; // Array of swhv_driver_io_descriptor structs. ++}; ++#pragma pack(pop) ++#define SWHV_DRIVER_INTERFACE_INFO_HEADER_SIZE() \ ++ (sizeof(struct swhv_driver_interface_info) - sizeof(pw_u8_t[1])) ++ ++#pragma pack(push, 1) ++struct swhv_driver_interface_msg { ++ pw_u16_t num_infos; // Number of 'swhv_driver_interface_info' structs contained within the 'infos' variable, below ++ // pw_u16_t infos_size_bytes; // Size of data inlined within the 'infos' variable, below ++ pw_u8_t infos[1]; ++}; ++#pragma pack(pop) ++#define SWHV_DRIVER_INTERFACE_MSG_HEADER_SIZE() \ ++ (sizeof(struct swhv_driver_interface_msg) - sizeof(pw_u8_t[1])) ++ ++/* ++ * ACRN specific structs, copied from the ACRN profiling service ++ * DO NOT modify these below stucts ++ */ ++#define SBUF_HEAD_SIZE 64 /* bytes */ ++ ++typedef enum PROFILING_SOCWATCH_FEATURE { ++ SOCWATCH_COMMAND = 0, ++ SOCWATCH_VM_SWITCH_TRACING, ++ MAX_SOCWATCH_FEATURE_ID, ++} profiling_socwatch_feature; ++ ++typedef enum PROFILING_SOCWATCH_FEATURE acrn_type; ++ ++/* ++ * current default ACRN header ++ */ ++struct data_header { ++ uint32_t collector_id; ++ uint16_t cpu_id; ++ uint16_t data_type; ++ uint64_t tsc; ++ uint64_t payload_size; ++ uint64_t reserved; ++} __attribute__((aligned(32))); ++#define ACRN_MSG_HEADER_SIZE (sizeof(struct data_header)) ++ ++struct vm_switch_trace { ++ int32_t os_id; ++ uint64_t vmenter_tsc; ++ uint64_t vmexit_tsc; ++ uint64_t vmexit_reason; ++} __attribute__((aligned(32))); ++#define VM_SWITCH_TRACE_SIZE (sizeof(struct vm_switch_trace)) ++ ++#define MAX_NR_VCPUS 8 ++#define MAX_NR_VMS 6 ++ ++struct profiling_vcpu_pcpu_map { ++ int32_t vcpu_id; ++ int32_t pcpu_id; ++ int32_t apic_id; ++} __attribute__((aligned(8))); ++ ++struct profiling_vm_info { ++ int32_t vm_id; ++ unsigned char guid[16]; ++ char vm_name[16]; ++ int32_t num_vcpus; ++ struct profiling_vcpu_pcpu_map cpu_map[MAX_NR_VCPUS]; ++} __attribute__((aligned(8))); ++ ++struct profiling_vm_info_list { ++ int32_t num_vms; ++ struct profiling_vm_info vm_list[MAX_NR_VMS]; ++} __attribute__((aligned(8))); ++ ++/* ++ * End of ACRN specific structs, copied from the ACRN profiling service ++ */ ++typedef struct data_header acrn_msg_header; ++typedef struct vm_switch_trace vmswitch_trace_t; ++ ++/* ++ * ACRN specific constants shared between the driver and user-mode ++ */ ++// Per CPU buffer size ++#define ACRN_BUF_SIZE ((4 * 1024 * 1024) - SBUF_HEAD_SIZE /* 64 bytes */) ++// Size of buffer at which data should be transferred to user-mode ++#define ACRN_BUF_TRANSFER_SIZE (ACRN_BUF_SIZE / 2) ++/* ++ * The ACRN 'sbuf' buffers consist of fixed size elements. ++ * This is how they are intended to be used, though SoCWatch only uses it to ++ * allocate the correct buffer size. ++ */ ++#define ACRN_BUF_ELEMENT_SIZE 32 /* byte */ ++#define ACRN_BUF_ELEMENT_NUM (ACRN_BUF_SIZE / ACRN_BUF_ELEMENT_SIZE) ++#define ACRN_BUF_FILLED_SIZE(sbuf) (sbuf->size - sbuf_available_space(sbuf)) ++ ++#endif // _SWHV_STRUCTS_H_ +diff --git a/drivers/platform/x86/socwatchhv/swhv_acrn.c b/drivers/platform/x86/socwatchhv/swhv_acrn.c +new file mode 100644 +index 000000000000..926ff09819a7 +--- /dev/null ++++ b/drivers/platform/x86/socwatchhv/swhv_acrn.c +@@ -0,0 +1,747 @@ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++ ++#include "swhv_defines.h" ++#include "swhv_driver.h" ++#include "swhv_ioctl.h" ++#include "swhv_structs.h" ++#include "control.h" ++#include "swhv_acrn.h" ++#include "swhv_acrn_sbuf.h" ++ ++/* ******************************************* ++ * Compile-time constants ++ * ******************************************* ++ */ ++#define foreach_cpu(cpu, cpu_num) for ((cpu) = 0; (cpu) < (cpu_num); (cpu)++) ++ ++/* actual physical cpu number, initialized by module init */ ++static int pcpu_num; ++bool flush_mode; ++ ++wait_queue_head_t read_queue; ++ ++//TODO is this needed? ++//module_param(nr_cpus, int, S_IRUSR | S_IWUSR); ++ ++static struct shared_buf **sbuf_per_cpu; ++ ++static pw_u64_t global_collection_switch; ++static SW_DEFINE_LIST_HEAD(swhv_msr_collector, swhv_acrn_msr_collector_data); ++ ++/* used by the MSR read IOCTL */ ++struct profiling_msr_ops_list *msr_read_ops_list; ++ ++bool buffer_not_ready(int *cpu); ++ ++struct swhv_acrn_msr_collector_data *swhv_alloc_msr_collector_node(void) ++{ ++ struct swhv_acrn_msr_collector_data *node = ++ (struct swhv_acrn_msr_collector_data *)kmalloc( ++ sizeof(struct swhv_acrn_msr_collector_data), ++ GFP_KERNEL); ++ if (node) { ++ node->per_msg_payload_size = 0x0; ++ node->sample_id = 0x0; ++ node->msr_ops_list = kmalloc( ++ pcpu_num * sizeof(struct profiling_msr_ops_list), ++ GFP_KERNEL); ++ memset(node->msr_ops_list, 0, ++ pcpu_num * sizeof(struct profiling_msr_ops_list)); ++ SW_LIST_ENTRY_INIT(node, list); ++ } ++ return node; ++} ++struct swhv_acrn_msr_collector_data * ++swhv_add_driver_msr_info(void *list_head, ++ const struct swhv_driver_interface_info *info) ++{ ++ int cpu; ++ ++ SW_LIST_HEAD_VAR(swhv_acrn_msr_collector_data) * head = list_head; ++ ++ struct swhv_acrn_msr_collector_data *node = ++ swhv_alloc_msr_collector_node(); ++ if (!node) { ++ pw_pr_error("ERROR allocating MSR collector node!\n"); ++ return NULL; ++ } ++ ++ node->sample_id = info->sample_id; ++ node->cpu_mask = info->cpu_mask; ++ foreach_cpu(cpu, pcpu_num) ++ { ++ node->msr_ops_list[cpu].collector_id = COLLECTOR_SOCWATCH; ++ node->msr_ops_list[cpu].msr_op_state = MSR_OP_REQUESTED; ++ } ++ ++ SW_LIST_ADD(head, node, list); ++ return node; ++} ++ ++int swhv_add_driver_msr_io_desc(struct swhv_acrn_msr_collector_data *node, ++ struct swhv_driver_io_descriptor *info) ++{ ++ int idx, cpu; ++ pw_u16_t num_entries; ++ struct profiling_msr_op *msr_op = NULL; ++ ++ // Confirm this is an MSR IO descriptor ++ if (info->collection_type != SWHV_COLLECTOR_TYPE_MSR) { ++ pw_pr_error( ++ "ERROR trying to configure MSR collector with other data!\n"); ++ return -EINVAL; ++ } ++ ++ foreach_cpu(cpu, pcpu_num) ++ { ++ num_entries = node->msr_ops_list[cpu].num_entries; ++ if (num_entries >= MAX_MSR_LIST_NUM) { ++ pw_pr_error( ++ "ERROR trying to add too many MSRs to collect!\n"); ++ return -PW_ERROR; ++ } ++ ++ idx = num_entries; ++ ++ msr_op = &(node->msr_ops_list[cpu].entries[idx]); ++ ++ msr_op->msr_id = info->msr_descriptor.address; ++ if (info->collection_command == SWHV_IO_CMD_READ) { ++ msr_op->msr_op_type = MSR_OP_READ; ++ } else if (info->collection_command == SWHV_IO_CMD_WRITE) { ++ msr_op->msr_op_type = MSR_OP_WRITE; ++ } ++ ++ /* ++ * Use the param field to set sample id. ++ * This'll be used in the hypervisor to ++ * set the id in the samples ++ */ ++ msr_op->param = (uint16_t)node->sample_id; ++ ++ num_entries++; ++ ++ if (num_entries < MAX_MSR_LIST_NUM) { ++ node->msr_ops_list[cpu].entries[num_entries].msr_id = ++ -1; ++ } ++ node->msr_ops_list[cpu].num_entries = num_entries; ++ } ++ return PW_SUCCESS; ++} ++ ++int swhv_init_per_cpu_buffers(void) ++{ ++ int i, ret, cpu; ++ ++ sbuf_per_cpu = vmalloc(pcpu_num * sizeof(struct shared_buf *)); ++ ++ foreach_cpu(cpu, pcpu_num) ++ { ++ /* allocate shared_buf */ ++ sbuf_per_cpu[cpu] = sbuf_allocate(ACRN_BUF_ELEMENT_NUM, ++ ACRN_BUF_ELEMENT_SIZE); ++ if (!sbuf_per_cpu[cpu]) { ++ pw_pr_error("Failed to allocate buffer for cpu %d\n", ++ cpu); ++ ret = -ENOMEM; ++ goto out_free; ++ } ++ } ++ ++ //TODO understand the use of this API ++ foreach_cpu(cpu, pcpu_num) ++ { ++ ret = sbuf_share_setup(cpu, ACRN_SOCWATCH, sbuf_per_cpu[cpu]); ++ if (ret < 0) { ++ pw_pr_error("Failed to setup buffer for cpu %d\n", cpu); ++ goto out_sbuf; ++ } ++ } ++ ++ return PW_SUCCESS; ++out_sbuf: ++ for (i = --cpu; i >= 0; i--) { ++ sbuf_share_setup(i, ACRN_SOCWATCH, NULL); ++ } ++ cpu = pcpu_num; ++ ++out_free: ++ for (i = --cpu; i >= 0; i--) { ++ sbuf_free(sbuf_per_cpu[i]); ++ } ++ ++ vfree(sbuf_per_cpu); ++ return ret; ++} ++ ++void swhv_destroy_per_cpu_buffers(void) ++{ ++ int cpu; ++ ++ pw_pr_debug("%s, pcpu_num: %d\n", __func__, pcpu_num); ++ ++ foreach_cpu(cpu, pcpu_num) ++ { ++ //TODO anything else to de-register? ++ /* deregister devices */ ++ ++ /* set sbuf pointer to NULL in HV */ ++ sbuf_share_setup(cpu, ACRN_SOCWATCH, NULL); ++ ++ /* free sbuf, sbuf_per_cpu[cpu] should be set NULL */ ++ sbuf_free(sbuf_per_cpu[cpu]); ++ } ++ vfree(sbuf_per_cpu); ++} ++ ++void swhv_free_msr_collector_node(struct swhv_acrn_msr_collector_data *node) ++{ ++ if (!node) { ++ return; ++ } ++ ++ kfree(node->msr_ops_list); ++ kfree(node); ++ return; ++} ++ ++void swhv_init_msr_collector_list(void) ++{ ++ void *list_head = &swhv_msr_collector; ++ ++ SW_LIST_HEAD_VAR(swhv_acrn_msr_collector_data) * head = list_head; ++ SW_LIST_HEAD_INIT(head); ++} ++ ++void swhv_destroy_msr_collector_list(void) ++{ ++ void *list_head = &swhv_msr_collector; ++ ++ SW_LIST_HEAD_VAR(swhv_acrn_msr_collector_data) * head = list_head; ++ while (!SW_LIST_EMPTY(head)) { ++ struct swhv_acrn_msr_collector_data *curr = ++ SW_LIST_GET_HEAD_ENTRY( ++ head, swhv_acrn_msr_collector_data, list); ++ SW_LIST_UNLINK(curr, list); ++ swhv_free_msr_collector_node(curr); ++ } ++} ++ ++void swhv_handle_hypervisor_collector(uint32_t control_cmd) ++{ ++ struct profiling_control *acrn_profiling_control; ++ ++ acrn_profiling_control = ++ kmalloc(sizeof(struct profiling_control), GFP_KERNEL); ++ memset(acrn_profiling_control, 0, sizeof(struct profiling_control)); ++ ++ acrn_profiling_control->collector_id = COLLECTOR_SOCWATCH; ++ ++ if (control_cmd == 1) { // start collection + send switch bitmask ++ pw_pr_debug("STARTING ACRN PROFILING SERVICE\n"); ++ global_collection_switch |= ++ control_cmd; // first bit controls start/stop ++ // of collection ++ } else if (control_cmd == 0) { // stop collection + reset switch bitmask ++ pw_pr_debug("STOPPING ACRN PROFILING SERVICE\n"); ++ global_collection_switch = control_cmd; ++ } ++ acrn_profiling_control->switches = global_collection_switch; ++ ++ // send collection command + switch bitmask ++ acrn_hypercall2(HC_PROFILING_OPS, PROFILING_SET_CONTROL_SWITCH, ++ virt_to_phys(acrn_profiling_control)); ++ kfree(acrn_profiling_control); ++} ++ ++int swhv_handle_msr_collector_list(void) ++{ ++ void *list_head = &swhv_msr_collector; ++ ++ SW_LIST_HEAD_VAR(swhv_acrn_msr_collector_data) * head = list_head; ++ int retVal = PW_SUCCESS; ++ int dummy_cpu = 0; ++ struct swhv_acrn_msr_collector_data *curr = NULL; ++ ++ if (SW_LIST_EMPTY(&swhv_msr_collector)) { ++ pw_pr_debug("DEBUG: EMPTY MSR COLLECTOR LIST\n"); ++ return retVal; ++ } ++ ++ if (!head) { ++ return -PW_ERROR; ++ } ++ SW_LIST_FOR_EACH_ENTRY(curr, head, list) ++ { ++ pw_pr_debug("HANDLING MSR NODE\n"); ++ ++ //hypervisor call to do immediate MSR read ++ acrn_hypercall2(HC_PROFILING_OPS, PROFILING_MSR_OPS, ++ virt_to_phys(curr->msr_ops_list)); ++ } ++ if (buffer_not_ready(&dummy_cpu) == false) { ++ /* ++ * force the device_read function to check if any buffers are ++ * filled with data above 'ACRN_BUF_TRANSFER_SIZE' size and ++ * if yes, copy to userspace ++ */ ++ wake_up_interruptible(&read_queue); ++ } ++ return retVal; ++} ++ ++long swhv_configure(struct swhv_driver_interface_msg __user *remote_msg, ++ int local_len) ++{ ++ struct swhv_driver_interface_info *local_info = NULL; ++ struct swhv_driver_io_descriptor *local_io_desc = NULL; ++ struct swhv_driver_interface_msg *local_msg = vmalloc(local_len); ++ pw_u16_t num_infos = 0, num_io_desc = 0; ++ pw_u32_t local_config_bitmap = 0; ++ int done = 0; ++ bool driver_info_added = false; ++ ++ char *__data = (char *)local_msg->infos; ++ size_t dst_idx = 0, desc_idx = 0; ++ struct swhv_acrn_msr_collector_data *msr_collector_node = NULL; ++ ++ if (!local_msg) { ++ pw_pr_error("ERROR allocating space for local message!\n"); ++ return -EFAULT; ++ } ++ if (copy_from_user(local_msg, remote_msg, local_len)) { ++ pw_pr_error("ERROR copying message from user space!\n"); ++ vfree(local_msg); ++ return -EFAULT; ++ } ++ ++ flush_mode = false; ++ ++ pw_pr_debug("local_len: %d\n", local_len); ++ /* ++ * We aren't allowed to config the driver multiple times between ++ * collections. Clear out any previous config values. ++ */ ++ swhv_destroy_msr_collector_list(); ++ ++ // clear the collection bitmask ++ global_collection_switch = 0; ++ ++ num_infos = local_msg->num_infos; ++ pw_pr_debug("LOCAL NUM INFOS = %u\n", num_infos); ++ for (; num_infos > 0 && !done; --num_infos) { ++ local_info = ++ (struct swhv_driver_interface_info *)&__data[dst_idx]; ++ desc_idx = dst_idx + SWHV_DRIVER_INTERFACE_INFO_HEADER_SIZE(); ++ dst_idx += (SWHV_DRIVER_INTERFACE_INFO_HEADER_SIZE() + ++ local_info->num_io_descriptors * ++ sizeof(struct swhv_driver_io_descriptor)); ++ pw_pr_debug("# msrs = %u\n", ++ (unsigned int)local_info->num_io_descriptors); ++ ++ num_io_desc = local_info->num_io_descriptors; ++ pw_pr_debug("LOCAL NUM IO DESC = %u\n", num_io_desc); ++ ++ driver_info_added = false; ++ for (; num_io_desc > 0; --num_io_desc) { ++ local_io_desc = (struct swhv_driver_io_descriptor ++ *)&__data[desc_idx]; ++ desc_idx += sizeof(struct swhv_driver_io_descriptor); ++ if (local_io_desc->collection_type == ++ SWHV_COLLECTOR_TYPE_MSR) { ++ if (!driver_info_added) { ++ msr_collector_node = ++ swhv_add_driver_msr_info( ++ &swhv_msr_collector, ++ local_info); ++ if (msr_collector_node == NULL) { ++ return -PW_ERROR; ++ } ++ driver_info_added = true; ++ } ++ ++ pw_pr_debug( ++ "MSR - addr: 0x%llx, type: %u, read/write: %u\n", ++ local_io_desc->msr_descriptor.address, ++ local_io_desc->msr_descriptor.type, ++ local_io_desc->collection_command); ++ swhv_add_driver_msr_io_desc(msr_collector_node, ++ local_io_desc); ++ } else if (local_io_desc->collection_type == ++ SWHV_COLLECTOR_TYPE_SWITCH) { ++ local_config_bitmap = ++ local_io_desc->switch_descriptor ++ .switch_bitmask; ++ pw_pr_debug("local bitmask = %u\n", ++ local_config_bitmap); ++ ++ global_collection_switch = local_config_bitmap; ++ ++ // only one set of collection switches are ++ // expected, we are done configuring ++ done = 1; ++ break; ++ } else { ++ pw_pr_error( ++ "WARNING: unknown collector configuration requested, collector id: %u!\n", ++ local_io_desc->collection_type); ++ } ++ } ++ driver_info_added = false; ++ } ++ vfree(local_msg); ++ return PW_SUCCESS; ++} ++ ++long swhv_stop(void) ++{ ++ uint32_t control = 0; // stop collection command ++ ++ pw_pr_debug("socwatch: stop called\n"); ++ ++ //If MSR ops are present, perform them to get begin snapshot data. ++ swhv_handle_msr_collector_list(); ++ ++ // stop collection + reset switch bitmask ++ swhv_handle_hypervisor_collector(control); ++ ++ // flush partially filled hypervisor buffers ++ flush_mode = true; ++ ++ // force the device_read function to check if any ++ // buffers are partially filled with data ++ wake_up_interruptible(&read_queue); ++ ++ /* ++ * Clear out the MSR collector list. ++ */ ++ swhv_destroy_msr_collector_list(); ++ ++ return PW_SUCCESS; ++} ++ ++long swhv_start(void) ++{ ++ uint32_t control = 1; // start collection command ++#if 0 ++ struct profiling_vm_info_list *vm_info_list = NULL; ++ int i; ++#endif ++ pw_pr_debug("socwatch: start called\n"); ++ ++ flush_mode = false; ++ ++ // start collection + send switch bitmask ++ swhv_handle_hypervisor_collector(control); ++ ++ //If MSR ops are present, perform them to get begin snapshot data. ++ swhv_handle_msr_collector_list(); ++ ++#if 0 ++ // Expand this eventually to retrieve VM-realted info from the hypervisor ++ // Leaving it here for now. ++ vm_info_list = kmalloc(sizeof(struct profiling_vm_info_list), ++ GFP_KERNEL); ++ memset(vm_info_list, 0, sizeof(struct profiling_vm_info_list)); ++ ++ acrn_hypercall2(HC_PROFILING_OPS, PROFILING_GET_VMINFO, ++ virt_to_phys(vm_info_list)); ++ ++ pw_pr_debug("Number of VMs: %d\n", vm_info_list->num_vms); ++ for (i = 0; i < vm_info_list->num_vms; ++i) { ++ pw_pr_debug("VM id: %d\n", vm_info_list->vm_list[i].vm_id_num); ++ pw_pr_debug("VM name: %s\n", vm_info_list->vm_list[i].vm_name); ++ } ++#endif ++ return PW_SUCCESS; ++} ++ ++long swhv_get_cpu_count(u32 __user *remote_args) ++{ ++ uint32_t num_CPUs = pcpu_num; ++ ++ return copy_to_user(remote_args, &num_CPUs, sizeof(num_CPUs)); ++}; ++ ++int device_open_i(struct inode *inode, struct file *file) ++{ ++ pw_pr_debug("socwatch: device_open_i() called\n"); ++ return PW_SUCCESS; ++} ++ ++long swhv_get_clock(u32 __user *remote_in_args, u64 __user *remote_args) ++{ ++ return -1; ++} ++ ++long swhv_get_topology(u64 __user *remote_args) ++{ ++ return -1; ++} ++ ++long swhv_get_hypervisor_type(u32 __user *remote_args) ++{ ++ uint32_t hypervisor_type = swhv_hypervisor_acrn; ++ ++ return copy_to_user(remote_args, &hypervisor_type, ++ sizeof(hypervisor_type)); ++} ++ ++long swhv_msr_read(u32 __user *remote_in_args, u64 __user *remote_args) ++{ ++ int cpu; ++ uint64_t msr_addr = 0, value; ++ int ret = PW_SUCCESS; ++ ++ if (get_user(msr_addr, remote_in_args)) { ++ pw_pr_error( ++ "ERROR: couldn't copy remote args for read MSR IOCTL!\n"); ++ return -1; ++ } ++ ++ if (!msr_read_ops_list) { ++ msr_read_ops_list = kmalloc( ++ pcpu_num * sizeof(struct profiling_msr_ops_list), ++ GFP_KERNEL); ++ if (!msr_read_ops_list) { ++ pw_pr_error( ++ "couldn't allocate memory for doing an MSR read!\n"); ++ return -1; ++ } ++ memset(msr_read_ops_list, 0, ++ pcpu_num * sizeof(struct profiling_msr_ops_list)); ++ } ++ ++ /* ++ * The hypercall is set in such a way that the MSR read will occur on ++ * all CPUs and as a result we have to set up structures for each CPU. ++ */ ++ foreach_cpu(cpu, pcpu_num) ++ { ++ msr_read_ops_list[cpu].collector_id = COLLECTOR_SOCWATCH; ++ msr_read_ops_list[cpu].msr_op_state = MSR_OP_REQUESTED; ++ msr_read_ops_list[cpu].num_entries = 1; ++ msr_read_ops_list[cpu].entries[0].msr_id = msr_addr; ++ msr_read_ops_list[cpu].entries[0].msr_op_type = MSR_OP_READ; ++ msr_read_ops_list[cpu].entries[1].msr_id = ++ -1; // the next entry is expected to be set to -1 ++ msr_read_ops_list[cpu].entries[1].param = ++ 0; // set to 0 to not generate sample in hypervisor ++ } ++ ++ //hypervisor call to do immediate MSR read ++ acrn_hypercall2(HC_PROFILING_OPS, PROFILING_MSR_OPS, ++ virt_to_phys(msr_read_ops_list)); ++ ++ // copy value to remote args, pick from any CPU ++ value = msr_read_ops_list[0].entries[0].value; ++ ++ if (copy_to_user(remote_args, &value, sizeof(value))) { ++ pw_pr_error("ERROR: unable to copy MSR value to userspace!\n"); ++ ret = -PW_ERROR; ++ } ++ ++ return ret; ++} ++ ++long swhv_collection_poll(void) ++{ ++ int ret = PW_SUCCESS; ++ /* ++ * Handle 'POLL' timer expirations. ++ */ ++ if (SW_LIST_EMPTY(&swhv_msr_collector)) { ++ pw_pr_debug("DEBUG: EMPTY MSR COLLECTOR POLL LIST\n"); ++ } ++ ++ if (swhv_handle_msr_collector_list()) { ++ pw_pr_error("ERROR: unable to copy MSR value to userspace!\n"); ++ ret = -PW_ERROR; ++ } ++ return ret; ++} ++ ++ssize_t swhv_transfer_data(void *user_buffer, struct shared_buf *sbuf_to_copy, ++ size_t bytes_to_read) ++{ ++ unsigned long bytes_not_copied; ++ ssize_t bytes_read; ++ ssize_t ret = 0; ++ void *data_read = NULL; ++ ++ if (bytes_to_read == 0) { ++ pw_pr_debug( ++ "%s - 0 bytes requested to transfer! Returning...\n", ++ __func__); ++ ++ return bytes_to_read; ++ } ++ ++ data_read = vmalloc(bytes_to_read); ++ if (!data_read) { ++ pw_pr_error( ++ "couldn't allocate memory when trying to transfer data to userspace!\n"); ++ return 0; ++ } ++ ++ pw_pr_debug("%s - bytes to transfer %zu\n", __func__, bytes_to_read); ++ ++ if (sbuf_to_copy) { ++ bytes_read = sbuf_get_variable(sbuf_to_copy, &data_read, ++ bytes_to_read); ++ ++ if (bytes_read != bytes_to_read) { ++ pw_pr_warn("%s - bytes read (%zu bytes) are not equal to expected bytes (%zu bytes) to be read!", __func__, bytes_read, bytes_to_read); ++ } ++ ++ if (bytes_read < 0) { ++ pw_pr_error("Error reading this buffer\n"); ++ ret = -PW_ERROR; ++ goto ret_free; ++ } ++ if (bytes_read) { ++ // copy data to device file ++ if (bytes_read > bytes_to_read) { ++ pw_pr_error("user buffer is too small\n"); ++ ret = -PW_ERROR; ++ goto ret_free; ++ } ++ ++ bytes_not_copied = copy_to_user(user_buffer, data_read, ++ bytes_read); ++ //TODO check if this is meaningful enough to have ++ //*offset += bytes_read - bytes_not_copied; ++ ++ if (bytes_not_copied) { ++ pw_pr_error( ++ "transferring data to user mode failed, bytes %ld\n", ++ bytes_not_copied); ++ // copy_to_user returns an unsigned ++ ret = -EIO; ++ goto ret_free; ++ } ++ ret = bytes_read; ++ goto ret_free; ++ } else { ++ pw_pr_debug( ++ "Buffer empty! nothing more to read from this buffer\n"); ++ } ++ } ++ ++ret_free: ++ vfree(data_read); ++ return ret; ++} ++ ++bool buffer_not_ready(int *cpu) ++{ ++ // cycle through and confirm buffers on all CPUs ++ // are less than ACRN_BUF_TRANSFER_SIZE ++ // as well as flush mode has not been requested ++ int i = 0; ++ bool not_enough_data = true; ++ ++ pw_pr_debug( ++ "checking if a buffer is ready to be copied to the device file\n"); ++ /* ++ * It's possible that the buffer from cpu0 may always have ++ * data to transfer and can potentially prevent buffers from ++ * other cpus from ever being serviced. ++ * TODO Consider adding an optimization to check for last cpu read. ++ */ ++ for (i = 0; i < pcpu_num; ++i) { ++ if (ACRN_BUF_FILLED_SIZE(sbuf_per_cpu[i]) >= ++ ACRN_BUF_TRANSFER_SIZE || ++ (flush_mode && ACRN_BUF_FILLED_SIZE(sbuf_per_cpu[i]))) { ++ not_enough_data = false; ++ *cpu = i; ++ pw_pr_debug( ++ "buffer ready (flush_mode=%d) on cpu %d, waking up read queue\n", ++ flush_mode, *cpu); ++ break; ++ } ++ } ++ return not_enough_data && !flush_mode; ++} ++ ++ssize_t device_read_i(struct file *file, char __user *user_buffer, ++ size_t length, loff_t *offset) ++{ ++ ssize_t bytes_read = 0; ++ int cpu = 0; ++ ++ pw_pr_debug("%s - usermode attempting to read device file\n", __func__); ++ ++ if (wait_event_interruptible(read_queue, !buffer_not_ready(&cpu))) { ++ pw_pr_error("%s - wait_event_interruptible failed\n", __func__); ++ return -ERESTARTSYS; ++ } ++ pw_pr_debug("%s - wait_event cleared\n", __func__); ++ ++ if (flush_mode) { ++ pw_pr_debug("flush mode on, ready to flush a buffer\n"); ++ } ++ length = ACRN_BUF_FILLED_SIZE(sbuf_per_cpu[cpu]); ++ pw_pr_debug("on cpu %d, buffer size is %zu bytes\n", cpu, length); ++ ++ bytes_read = swhv_transfer_data(user_buffer, sbuf_per_cpu[cpu], length); ++ ++ return bytes_read; ++} ++ ++void cleanup_error_i(void) ++{ ++ // NOP for acrn ++} ++ ++int swhv_load_driver_i(void) ++{ ++ int ret = PW_SUCCESS; ++ ++ if (x86_hyper_type != X86_HYPER_ACRN) { ++ pw_pr_error("Non-ACRN hypervisor not supported!\n"); ++ return -EINVAL; ++ } ++ ++ /* TODO: we could get the cpu count by querying the hypervisor later */ ++ pcpu_num = num_present_cpus(); ++ pw_pr_debug("%s, pcpu_num: %d\n", __func__, pcpu_num); ++ ++ ret = swhv_init_per_cpu_buffers(); ++ if (ret < 0) { ++ return ret; ++ } ++ ++ // initialize a work queue to be used for signalling when ++ // data is ready to copy to usermode ++ init_waitqueue_head(&read_queue); ++ ++ swhv_init_msr_collector_list(); ++ ++ return ret; ++} ++ ++void swhv_unload_driver_i(void) ++{ ++ swhv_destroy_per_cpu_buffers(); ++ ++ /* used by the MSR read IOCTL */ ++ kfree(msr_read_ops_list); ++} +diff --git a/drivers/platform/x86/socwatchhv/swhv_driver.c b/drivers/platform/x86/socwatchhv/swhv_driver.c +new file mode 100644 +index 000000000000..369d8a69158f +--- /dev/null ++++ b/drivers/platform/x86/socwatchhv/swhv_driver.c +@@ -0,0 +1,375 @@ ++/* ++ ++ This file is provided under a dual BSD/GPLv2 license. When using or ++ redistributing this file, you may do so under either license. ++ ++ GPL LICENSE SUMMARY ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ Contact Information: ++ SoC Watch Developer Team ++ Intel Corporation, ++ 1300 S Mopac Expwy, ++ Austin, TX 78746 ++ ++ BSD LICENSE ++ ++ Copyright(c) 2014 - 2018 Intel Corporation. ++ ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ * Neither the name of Intel Corporation nor the names of its ++ contributors may be used to endorse or promote products derived ++ from this software without specific prior written permission. ++ ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++*/ ++#define MOD_AUTHOR "SoCWatch Team" ++#define MOD_DESC "SoCWatch kernel module to communicate with hypervisors" ++ ++#include "swhv_defines.h" ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "swhv_driver.h" ++#include "swhv_ioctl.h" ++#include "swhv_structs.h" ++#if HYPERVISOR == MOBILEVISOR ++#include "swhv_mobilevisor.h" ++#include "swhv_mobilevisor_buffer.h" ++#elif HYPERVISOR == ACRN ++#include "swhv_acrn.h" ++#endif ++ ++/* ******************************************* ++ * Compile-time constants ++ * ******************************************* ++ */ ++/* ******************************************* ++ * Local data structures. ++ * ******************************************* ++ */ ++#if defined(HAVE_COMPAT_IOCTL) && defined(CONFIG_X86_64) ++#include ++/* ++ * Helper struct used to translate IOCTLs ++ * from 32b user programs in 64b kernels. ++ */ ++struct spdrv_ioctl_arg32 { ++ pw_s32_t in_len; ++ pw_s32_t out_len; ++ compat_caddr_t in_arg; ++ compat_caddr_t out_arg; ++}; ++#endif // COMPAT && x64 ++ ++static int sp_dev_major_num = -1; ++static dev_t sp_dev; ++static struct cdev *sp_cdev; ++static struct class *sp_class; ++ ++/* ******************************************* ++ * Variables. ++ * ******************************************* ++ */ ++ ++/* Per-CPU variable containing the currently running vcpu. */ ++//static DEFINE_PER_CPU(int, curr_vcpu) = 0; ++ ++/* ******************************************* ++ * Function definitions. ++ * ******************************************* ++ */ ++ ++static long swhv_handle_cmd(u32 __user *remote_cmd) ++{ ++ u32 local_cmd = 0; ++ long status = 0; ++ ++ if (get_user(local_cmd, remote_cmd)) { ++ pw_pr_error("ERROR: couldn't copy in remote command!\n"); ++ return -1; ++ } ++ switch (local_cmd) { ++ case SWHVDRV_CMD_START: ++ pw_pr_debug("RECEIVED CMD START!\n"); ++ status = swhv_start(); ++ break; ++ case SWHVDRV_CMD_STOP: ++ pw_pr_debug("RECEIVED CMD STOP!\n"); ++ status = swhv_stop(); ++ break; ++ default: ++ pw_pr_error( ++ "ERROR: invalid command %d passed to the SoFIA driver!\n", ++ local_cmd); ++ status = -1; ++ break; ++ } ++ return status; ++}; ++ ++long swhv_get_version(u64 __user *remote_args) ++{ ++ u64 local_version = (u64)SWHVDRV_VERSION_MAJOR << 32 | ++ (u64)SWHVDRV_VERSION_MINOR << 16 | ++ (u64)SWHVDRV_VERSION_OTHER; ++ ++ return put_user(local_version, remote_args); ++}; ++ ++#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) ++#define MATCH_IOCTL(num, pred) ((num) == (pred) || (num) == (pred##32)) ++#else ++#define MATCH_IOCTL(num, pred) ((num) == (pred)) ++#endif ++ ++static long handle_ioctl(unsigned int ioctl_num, ++ struct spdrv_ioctl_arg __user *remote_args) ++{ ++ long status = 0; ++ struct spdrv_ioctl_arg local_args; ++ ++ int local_in_len, local_out_len; ++ ++ if (copy_from_user(&local_args, remote_args, sizeof(local_args))) { ++ pw_pr_error("ERROR: couldn't copy in remote args!\n"); ++ return -1; ++ } ++ pw_pr_debug("Invoking IOCTL!\n"); ++ ++ local_in_len = local_args.in_len; ++ local_out_len = local_args.out_len; ++ ++ switch (ioctl_num) { ++ case SWHVDRV_OPERATION_CMD: ++ status = swhv_handle_cmd((u32 __user *)local_args.in_arg); ++ break; ++ ++ case SWHVDRV_OPERATION_CONFIGURE: ++ pw_pr_debug("Trying to configure driver!\n"); ++ status = swhv_configure( ++ (struct swhv_driver_interface_msg __user *) ++ local_args.in_arg, ++ local_in_len); ++ break; ++ ++ case SWHVDRV_OPERATION_VERSION: ++ pw_pr_debug("Trying to get driver version!\n"); ++ status = swhv_get_version((u64 __user *)local_args.out_arg); ++ break; ++ ++ case SWHVDRV_OPERATION_CLOCK: ++ pw_pr_debug("Trying to get hypervisor type!\n"); ++ status = swhv_get_clock((u32 __user *)local_args.in_arg, ++ (u64 __user *)local_args.out_arg); ++ break; ++ ++ case SWHVDRV_OPERATION_TOPOLOGY: ++ pw_pr_debug("Trying to get CPU topology!\n"); ++ status = swhv_get_topology((u64 __user *)local_args.out_arg); ++ break; ++ ++ case SWHVDRV_OPERATION_CPUCOUNT: ++ pw_pr_debug("Trying to get CPU count!\n"); ++ status = swhv_get_cpu_count((u32 __user *)local_args.out_arg); ++ break; ++ ++ case SWHVDRV_OPERATION_HYPERVISOR_TYPE: ++ pw_pr_debug("Trying to get hypervisor type!\n"); ++ status = swhv_get_hypervisor_type( ++ (u32 __user *)local_args.out_arg); ++ break; ++ ++ case SWHVDRV_OPERATION_MSR_READ: ++ pw_pr_debug("Trying to do MSR read!\n"); ++ status = swhv_msr_read((u32 __user *)local_args.in_arg, ++ (u64 __user *)local_args.out_arg); ++ break; ++ case SWHVDRV_OPERATION_POLL: ++ pw_pr_debug("Polling tick!\n"); ++ status = swhv_collection_poll(); ++ break; ++ } ++ return status; ++} ++ ++static long device_unlocked_ioctl(struct file *filep, unsigned int ioctl_num, ++ unsigned long ioctl_param) ++{ ++ return handle_ioctl(_IOC_NR(ioctl_num), ++ (struct spdrv_ioctl_arg __user *)ioctl_param); ++}; ++ ++#if defined(HAVE_COMPAT_IOCTL) && defined(CONFIG_X86_64) ++static long device_compat_ioctl(struct file *file, unsigned int ioctl_num, ++ unsigned long ioctl_param) ++{ ++ struct spdrv_ioctl_arg32 __user *remote_args32 = ++ compat_ptr(ioctl_param); ++ struct spdrv_ioctl_arg __user *remote_args = ++ compat_alloc_user_space(sizeof(*remote_args)); ++ int tmp; ++ u32 data; ++ ++ if (!remote_args) { ++ return -1; ++ } ++ if (get_user(tmp, &remote_args32->in_len) || ++ put_user(tmp, &remote_args->in_len)) { ++ return -1; ++ } ++ if (get_user(tmp, &remote_args32->out_len) || ++ put_user(tmp, &remote_args->out_len)) { ++ return -1; ++ } ++ if (get_user(data, &remote_args32->in_arg) || ++ put_user(compat_ptr(data), &remote_args->in_arg)) { ++ return -1; ++ } ++ if (get_user(data, &remote_args32->out_arg) || ++ put_user(compat_ptr(data), &remote_args->out_arg)) { ++ return -1; ++ } ++ return handle_ioctl(_IOC_NR(ioctl_num), remote_args); ++}; ++#endif // COMPAT && x64 ++ ++static int device_open(struct inode *inode, struct file *file) ++{ ++ return device_open_i(inode, file); ++} ++ ++static ssize_t ++device_read(struct file *file, /* see include/linux/fs.h */ ++ char __user *buffer, /* buffer to be filled with data */ ++ size_t length, /* length of the buffer */ ++ loff_t *offset) ++{ ++ return device_read_i(file, buffer, length, offset); ++} ++ ++static struct file_operations s_fops = { ++ .open = &device_open, ++ .read = &device_read, ++ .unlocked_ioctl = &device_unlocked_ioctl, ++#if defined(HAVE_COMPAT_IOCTL) && defined(CONFIG_X86_64) ++ .compat_ioctl = &device_compat_ioctl, ++#endif // COMPAT && x64 ++}; ++ ++static void cleanup_error(void) ++{ ++ unregister_chrdev(sp_dev_major_num, SWHV_DEVICE_NAME); ++ device_destroy(sp_class, sp_dev); ++ class_destroy(sp_class); ++ unregister_chrdev_region(sp_dev, 1); ++ cdev_del(sp_cdev); ++} ++ ++int __init swhv_load_driver(void) ++{ ++ int error; ++ struct device *dev; ++ ++ // create the char device "sp" ++ alloc_chrdev_region(&sp_dev, 0, 1, SWHV_DEVICE_NAME); ++ sp_dev_major_num = MAJOR(sp_dev); ++ sp_class = class_create(THIS_MODULE, SWHV_DEVICE_NAME); ++ if (IS_ERR(sp_class)) { ++ error = PTR_ERR(sp_class); ++ pw_pr_error("Error registering sp class\n"); ++ goto cleanup_return_error; ++ } ++ ++ dev = device_create(sp_class, NULL, sp_dev, NULL, SWHV_DEVICE_NAME); ++ if (dev == NULL) { ++ error = PTR_ERR(dev); ++ pw_pr_error("Error during call to device_create\n"); ++ goto cleanup_return_error; ++ } ++ ++ sp_cdev = cdev_alloc(); ++ if (sp_cdev == NULL) { ++ error = -ENOMEM; ++ pw_pr_error("Error allocating character device\n"); ++ goto cleanup_return_error; ++ } ++ sp_cdev->owner = THIS_MODULE; ++ sp_cdev->ops = &s_fops; ++ if (cdev_add(sp_cdev, sp_dev, 1) < 0) { ++ error = -1; ++ pw_pr_error("Error registering device driver\n"); ++ goto cleanup_return_error; ++ } ++ ++ error = swhv_load_driver_i(); ++ if (error < 0) { ++ pw_pr_error("Error initializing device driver\n"); ++ goto cleanup_return_error; ++ } ++ ++ return 0; ++ ++cleanup_return_error: ++ cleanup_error_i(); ++ ++ // release char device ++ cleanup_error(); ++ return error; ++} ++ ++static void __exit swhv_unload_driver(void) ++{ ++ swhv_unload_driver_i(); ++ ++ // release char device ++ cleanup_error(); ++} ++ ++module_init(swhv_load_driver); ++module_exit(swhv_unload_driver); ++ ++MODULE_LICENSE("Dual BSD/GPL"); ++MODULE_AUTHOR(MOD_AUTHOR); ++MODULE_DESCRIPTION(MOD_DESC); +-- +2.17.1 + diff --git a/patches/0001-test-configs-use-for-clean-and-android-bare-metal-BA.patch b/patches/0001-test-configs-use-for-clean-and-android-bare-metal-BA.patch new file mode 100644 index 0000000000..6a2ebfe56b --- /dev/null +++ b/patches/0001-test-configs-use-for-clean-and-android-bare-metal-BA.patch @@ -0,0 +1,13830 @@ +From fcd94173d571a2e97945e71c5a871d3dc221072d Mon Sep 17 00:00:00 2001 +From: sys_oak +Date: Mon, 28 Oct 2019 00:46:42 -0700 +Subject: [PATCH] test configs use for clean and android bare metal BAT testing + for kernel version: v5.4-rc4 + +--- + arch/x86/configs/android_test_defconfig | 6752 +++++++++++++++++++++++++++++ + arch/x86/configs/test_defconfig | 7050 +++++++++++++++++++++++++++++++ + 2 files changed, 13802 insertions(+) + create mode 100644 arch/x86/configs/android_test_defconfig + create mode 100644 arch/x86/configs/test_defconfig + +diff --git a/arch/x86/configs/android_test_defconfig b/arch/x86/configs/android_test_defconfig +new file mode 100644 +index 00000000..5d1900e +--- /dev/null ++++ b/arch/x86/configs/android_test_defconfig +@@ -0,0 +1,6752 @@ ++# ++# Automatically generated file; DO NOT EDIT. ++# Linux/x86_64 5.4.0-rc4 Kernel Configuration ++# ++ ++# ++# Compiler: x86_64-poky-linux-gcc (GCC) 7.3.0 ++# ++CONFIG_CC_IS_GCC=y ++CONFIG_GCC_VERSION=70300 ++CONFIG_CLANG_VERSION=0 ++CONFIG_CC_HAS_ASM_GOTO=y ++CONFIG_CC_HAS_WARN_MAYBE_UNINITIALIZED=y ++CONFIG_IRQ_WORK=y ++CONFIG_BUILDTIME_EXTABLE_SORT=y ++CONFIG_THREAD_INFO_IN_TASK=y ++ ++# ++# General setup ++# ++CONFIG_INIT_ENV_ARG_LIMIT=32 ++# CONFIG_COMPILE_TEST is not set ++# CONFIG_HEADER_TEST is not set ++CONFIG_LOCALVERSION="-quilt-2e5dc0ac" ++CONFIG_LOCALVERSION_AUTO=y ++CONFIG_BUILD_SALT="" ++CONFIG_HAVE_KERNEL_GZIP=y ++CONFIG_HAVE_KERNEL_BZIP2=y ++CONFIG_HAVE_KERNEL_LZMA=y ++CONFIG_HAVE_KERNEL_XZ=y ++CONFIG_HAVE_KERNEL_LZO=y ++CONFIG_HAVE_KERNEL_LZ4=y ++CONFIG_KERNEL_GZIP=y ++# CONFIG_KERNEL_BZIP2 is not set ++# CONFIG_KERNEL_LZMA is not set ++# CONFIG_KERNEL_XZ is not set ++# CONFIG_KERNEL_LZO is not set ++# CONFIG_KERNEL_LZ4 is not set ++CONFIG_DEFAULT_HOSTNAME="localhost" ++CONFIG_SWAP=y ++# CONFIG_SYSVIPC is not set ++CONFIG_POSIX_MQUEUE=y ++CONFIG_POSIX_MQUEUE_SYSCTL=y ++CONFIG_CROSS_MEMORY_ATTACH=y ++# CONFIG_USELIB is not set ++CONFIG_AUDIT=y ++CONFIG_HAVE_ARCH_AUDITSYSCALL=y ++CONFIG_AUDITSYSCALL=y ++ ++# ++# IRQ subsystem ++# ++CONFIG_GENERIC_IRQ_PROBE=y ++CONFIG_GENERIC_IRQ_SHOW=y ++CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y ++CONFIG_GENERIC_PENDING_IRQ=y ++CONFIG_GENERIC_IRQ_MIGRATION=y ++CONFIG_IRQ_DOMAIN=y ++CONFIG_IRQ_DOMAIN_HIERARCHY=y ++CONFIG_GENERIC_MSI_IRQ=y ++CONFIG_GENERIC_MSI_IRQ_DOMAIN=y ++CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR=y ++CONFIG_GENERIC_IRQ_RESERVATION_MODE=y ++CONFIG_IRQ_FORCED_THREADING=y ++CONFIG_SPARSE_IRQ=y ++# CONFIG_GENERIC_IRQ_DEBUGFS is not set ++# end of IRQ subsystem ++ ++CONFIG_CLOCKSOURCE_WATCHDOG=y ++CONFIG_ARCH_CLOCKSOURCE_DATA=y ++CONFIG_ARCH_CLOCKSOURCE_INIT=y ++CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y ++CONFIG_GENERIC_TIME_VSYSCALL=y ++CONFIG_GENERIC_CLOCKEVENTS=y ++CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y ++CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y ++CONFIG_GENERIC_CMOS_UPDATE=y ++ ++# ++# Timers subsystem ++# ++CONFIG_TICK_ONESHOT=y ++CONFIG_NO_HZ_COMMON=y ++# CONFIG_HZ_PERIODIC is not set ++CONFIG_NO_HZ_IDLE=y ++# CONFIG_NO_HZ_FULL is not set ++CONFIG_NO_HZ=y ++CONFIG_HIGH_RES_TIMERS=y ++# end of Timers subsystem ++ ++CONFIG_PREEMPT_NONE=y ++# CONFIG_PREEMPT_VOLUNTARY is not set ++# CONFIG_PREEMPT is not set ++CONFIG_PREEMPT_COUNT=y ++ ++# ++# CPU/Task time and stats accounting ++# ++CONFIG_TICK_CPU_ACCOUNTING=y ++# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set ++# CONFIG_IRQ_TIME_ACCOUNTING is not set ++CONFIG_BSD_PROCESS_ACCT=y ++# CONFIG_BSD_PROCESS_ACCT_V3 is not set ++CONFIG_TASKSTATS=y ++CONFIG_TASK_DELAY_ACCT=y ++CONFIG_TASK_XACCT=y ++CONFIG_TASK_IO_ACCOUNTING=y ++# CONFIG_PSI is not set ++# end of CPU/Task time and stats accounting ++ ++# CONFIG_CPU_ISOLATION is not set ++ ++# ++# RCU Subsystem ++# ++CONFIG_TREE_RCU=y ++# CONFIG_RCU_EXPERT is not set ++CONFIG_SRCU=y ++CONFIG_TREE_SRCU=y ++CONFIG_RCU_STALL_COMMON=y ++CONFIG_RCU_NEED_SEGCBLIST=y ++# end of RCU Subsystem ++ ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++# CONFIG_IKHEADERS is not set ++CONFIG_LOG_BUF_SHIFT=18 ++CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 ++CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 ++CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y ++ ++# ++# Scheduler features ++# ++# end of Scheduler features ++ ++CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y ++CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y ++CONFIG_ARCH_SUPPORTS_INT128=y ++CONFIG_CGROUPS=y ++CONFIG_PAGE_COUNTER=y ++CONFIG_MEMCG=y ++CONFIG_MEMCG_SWAP=y ++CONFIG_MEMCG_SWAP_ENABLED=y ++CONFIG_MEMCG_KMEM=y ++# CONFIG_BLK_CGROUP is not set ++CONFIG_CGROUP_SCHED=y ++CONFIG_FAIR_GROUP_SCHED=y ++# CONFIG_CFS_BANDWIDTH is not set ++CONFIG_RT_GROUP_SCHED=y ++# CONFIG_CGROUP_PIDS is not set ++# CONFIG_CGROUP_RDMA is not set ++CONFIG_CGROUP_FREEZER=y ++# CONFIG_CGROUP_HUGETLB is not set ++CONFIG_CPUSETS=y ++CONFIG_PROC_PID_CPUSET=y ++# CONFIG_CGROUP_DEVICE is not set ++CONFIG_CGROUP_CPUACCT=y ++# CONFIG_CGROUP_PERF is not set ++CONFIG_CGROUP_BPF=y ++CONFIG_CGROUP_DEBUG=y ++CONFIG_SOCK_CGROUP_DATA=y ++CONFIG_NAMESPACES=y ++CONFIG_UTS_NS=y ++CONFIG_IPC_NS=y ++# CONFIG_USER_NS is not set ++CONFIG_PID_NS=y ++CONFIG_NET_NS=y ++# CONFIG_CHECKPOINT_RESTORE is not set ++CONFIG_SCHED_AUTOGROUP=y ++# CONFIG_SYSFS_DEPRECATED is not set ++CONFIG_RELAY=y ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_INITRAMFS_SOURCE="" ++CONFIG_RD_GZIP=y ++CONFIG_RD_BZIP2=y ++CONFIG_RD_LZMA=y ++CONFIG_RD_XZ=y ++CONFIG_RD_LZO=y ++CONFIG_RD_LZ4=y ++CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y ++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set ++CONFIG_SYSCTL=y ++CONFIG_HAVE_UID16=y ++CONFIG_SYSCTL_EXCEPTION_TRACE=y ++CONFIG_HAVE_PCSPKR_PLATFORM=y ++CONFIG_BPF=y ++CONFIG_EXPERT=y ++CONFIG_UID16=y ++CONFIG_MULTIUSER=y ++# CONFIG_SGETMASK_SYSCALL is not set ++# CONFIG_SYSFS_SYSCALL is not set ++# CONFIG_SYSCTL_SYSCALL is not set ++# CONFIG_FHANDLE is not set ++CONFIG_POSIX_TIMERS=y ++CONFIG_PRINTK=y ++CONFIG_PRINTK_NMI=y ++# CONFIG_BUG is not set ++CONFIG_ELF_CORE=y ++CONFIG_PCSPKR_PLATFORM=y ++CONFIG_BASE_FULL=y ++CONFIG_FUTEX=y ++CONFIG_FUTEX_PI=y ++CONFIG_EPOLL=y ++CONFIG_SIGNALFD=y ++CONFIG_TIMERFD=y ++CONFIG_EVENTFD=y ++CONFIG_SHMEM=y ++CONFIG_AIO=y ++CONFIG_IO_URING=y ++CONFIG_ADVISE_SYSCALLS=y ++CONFIG_MEMBARRIER=y ++CONFIG_KALLSYMS=y ++CONFIG_KALLSYMS_ALL=y ++CONFIG_KALLSYMS_ABSOLUTE_PERCPU=y ++CONFIG_KALLSYMS_BASE_RELATIVE=y ++CONFIG_BPF_SYSCALL=y ++# CONFIG_USERFAULTFD is not set ++CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y ++CONFIG_RSEQ=y ++# CONFIG_DEBUG_RSEQ is not set ++CONFIG_EMBEDDED=y ++CONFIG_HAVE_PERF_EVENTS=y ++# CONFIG_PC104 is not set ++ ++# ++# Kernel Performance Events And Counters ++# ++CONFIG_PERF_EVENTS=y ++# CONFIG_DEBUG_PERF_USE_VMALLOC is not set ++# end of Kernel Performance Events And Counters ++ ++CONFIG_VM_EVENT_COUNTERS=y ++# CONFIG_COMPAT_BRK is not set ++CONFIG_SLAB=y ++# CONFIG_SLUB is not set ++# CONFIG_SLOB is not set ++CONFIG_SLAB_MERGE_DEFAULT=y ++CONFIG_SLAB_FREELIST_RANDOM=y ++# CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set ++CONFIG_SYSTEM_DATA_VERIFICATION=y ++CONFIG_PROFILING=y ++CONFIG_TRACEPOINTS=y ++# end of General setup ++ ++CONFIG_64BIT=y ++CONFIG_X86_64=y ++CONFIG_X86=y ++CONFIG_INSTRUCTION_DECODER=y ++CONFIG_OUTPUT_FORMAT="elf64-x86-64" ++CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig" ++CONFIG_LOCKDEP_SUPPORT=y ++CONFIG_STACKTRACE_SUPPORT=y ++CONFIG_MMU=y ++CONFIG_ARCH_MMAP_RND_BITS_MIN=28 ++CONFIG_ARCH_MMAP_RND_BITS_MAX=32 ++CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 ++CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 ++CONFIG_GENERIC_ISA_DMA=y ++CONFIG_ARCH_MAY_HAVE_PC_FDC=y ++CONFIG_GENERIC_CALIBRATE_DELAY=y ++CONFIG_ARCH_HAS_CPU_RELAX=y ++CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y ++CONFIG_ARCH_HAS_FILTER_PGPROT=y ++CONFIG_HAVE_SETUP_PER_CPU_AREA=y ++CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y ++CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y ++CONFIG_ARCH_HIBERNATION_POSSIBLE=y ++CONFIG_ARCH_SUSPEND_POSSIBLE=y ++CONFIG_ARCH_WANT_GENERAL_HUGETLB=y ++CONFIG_ZONE_DMA32=y ++CONFIG_AUDIT_ARCH=y ++CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y ++CONFIG_HAVE_INTEL_TXT=y ++CONFIG_X86_64_SMP=y ++CONFIG_ARCH_SUPPORTS_UPROBES=y ++CONFIG_FIX_EARLYCON_MEM=y ++CONFIG_PGTABLE_LEVELS=4 ++CONFIG_CC_HAS_SANE_STACKPROTECTOR=y ++ ++# ++# Processor type and features ++# ++CONFIG_ZONE_DMA=y ++CONFIG_SMP=y ++CONFIG_X86_FEATURE_NAMES=y ++# CONFIG_X86_X2APIC is not set ++CONFIG_X86_MPPARSE=y ++# CONFIG_GOLDFISH is not set ++CONFIG_RETPOLINE=y ++# CONFIG_X86_CPU_RESCTRL is not set ++# CONFIG_X86_EXTENDED_PLATFORM is not set ++CONFIG_X86_INTEL_LPSS=y ++# CONFIG_X86_AMD_PLATFORM_DEVICE is not set ++CONFIG_IOSF_MBI=y ++# CONFIG_IOSF_MBI_DEBUG is not set ++CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y ++CONFIG_SCHED_OMIT_FRAME_POINTER=y ++CONFIG_HYPERVISOR_GUEST=y ++# CONFIG_PARAVIRT is not set ++# CONFIG_ARCH_CPUIDLE_HALTPOLL is not set ++# CONFIG_PVH is not set ++# CONFIG_JAILHOUSE_GUEST is not set ++# CONFIG_ACRN_GUEST is not set ++# CONFIG_MK8 is not set ++# CONFIG_MPSC is not set ++CONFIG_MCORE2=y ++# CONFIG_MATOM is not set ++# CONFIG_GENERIC_CPU is not set ++CONFIG_X86_INTERNODE_CACHE_SHIFT=6 ++CONFIG_X86_L1_CACHE_SHIFT=6 ++CONFIG_X86_INTEL_USERCOPY=y ++CONFIG_X86_USE_PPRO_CHECKSUM=y ++CONFIG_X86_P6_NOP=y ++CONFIG_X86_TSC=y ++CONFIG_X86_CMPXCHG64=y ++CONFIG_X86_CMOV=y ++CONFIG_X86_MINIMUM_CPU_FAMILY=64 ++CONFIG_X86_DEBUGCTLMSR=y ++# CONFIG_PROCESSOR_SELECT is not set ++CONFIG_CPU_SUP_INTEL=y ++CONFIG_CPU_SUP_AMD=y ++CONFIG_CPU_SUP_HYGON=y ++CONFIG_CPU_SUP_CENTAUR=y ++CONFIG_CPU_SUP_ZHAOXIN=y ++CONFIG_HPET_TIMER=y ++CONFIG_HPET_EMULATE_RTC=y ++CONFIG_DMI=y ++CONFIG_GART_IOMMU=y ++# CONFIG_CALGARY_IOMMU is not set ++# CONFIG_MAXSMP is not set ++CONFIG_NR_CPUS_RANGE_BEGIN=2 ++CONFIG_NR_CPUS_RANGE_END=512 ++CONFIG_NR_CPUS_DEFAULT=64 ++CONFIG_NR_CPUS=32 ++CONFIG_SCHED_SMT=y ++CONFIG_SCHED_MC=y ++CONFIG_SCHED_MC_PRIO=y ++CONFIG_X86_LOCAL_APIC=y ++CONFIG_X86_IO_APIC=y ++CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y ++CONFIG_X86_MCE=y ++# CONFIG_X86_MCELOG_LEGACY is not set ++CONFIG_X86_MCE_INTEL=y ++# CONFIG_X86_MCE_AMD is not set ++CONFIG_X86_MCE_THRESHOLD=y ++# CONFIG_X86_MCE_INJECT is not set ++CONFIG_X86_THERMAL_VECTOR=y ++ ++# ++# Performance monitoring ++# ++CONFIG_PERF_EVENTS_INTEL_UNCORE=y ++CONFIG_PERF_EVENTS_INTEL_RAPL=y ++CONFIG_PERF_EVENTS_INTEL_CSTATE=y ++# CONFIG_PERF_EVENTS_AMD_POWER is not set ++# end of Performance monitoring ++ ++CONFIG_X86_VSYSCALL_EMULATION=y ++CONFIG_I8K=m ++CONFIG_MICROCODE=y ++CONFIG_MICROCODE_INTEL=y ++# CONFIG_MICROCODE_AMD is not set ++CONFIG_MICROCODE_OLD_INTERFACE=y ++# CONFIG_X86_MSR is not set ++CONFIG_X86_CPUID=y ++# CONFIG_X86_5LEVEL is not set ++CONFIG_X86_DIRECT_GBPAGES=y ++# CONFIG_X86_CPA_STATISTICS is not set ++# CONFIG_AMD_MEM_ENCRYPT is not set ++# CONFIG_NUMA is not set ++CONFIG_ARCH_SPARSEMEM_ENABLE=y ++CONFIG_ARCH_SPARSEMEM_DEFAULT=y ++CONFIG_ARCH_SELECT_MEMORY_MODEL=y ++CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 ++# CONFIG_X86_PMEM_LEGACY is not set ++CONFIG_X86_CHECK_BIOS_CORRUPTION=y ++CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK=y ++CONFIG_X86_RESERVE_LOW=64 ++CONFIG_MTRR=y ++CONFIG_MTRR_SANITIZER=y ++CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1 ++CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1 ++CONFIG_X86_PAT=y ++CONFIG_ARCH_USES_PG_UNCACHED=y ++CONFIG_ARCH_RANDOM=y ++CONFIG_X86_SMAP=y ++CONFIG_X86_INTEL_UMIP=y ++# CONFIG_X86_INTEL_MPX is not set ++CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS=y ++CONFIG_EFI=y ++CONFIG_EFI_STUB=y ++# CONFIG_EFI_MIXED is not set ++CONFIG_SECCOMP=y ++# CONFIG_HZ_100 is not set ++# CONFIG_HZ_250 is not set ++# CONFIG_HZ_300 is not set ++CONFIG_HZ_1000=y ++CONFIG_HZ=1000 ++CONFIG_SCHED_HRTICK=y ++# CONFIG_KEXEC is not set ++# CONFIG_KEXEC_FILE is not set ++# CONFIG_CRASH_DUMP is not set ++CONFIG_PHYSICAL_START=0x1000000 ++CONFIG_RELOCATABLE=y ++CONFIG_RANDOMIZE_BASE=y ++CONFIG_X86_NEED_RELOCS=y ++CONFIG_PHYSICAL_ALIGN=0x1000000 ++CONFIG_DYNAMIC_MEMORY_LAYOUT=y ++CONFIG_RANDOMIZE_MEMORY=y ++CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING=0x0 ++CONFIG_HOTPLUG_CPU=y ++# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set ++# CONFIG_DEBUG_HOTPLUG_CPU0 is not set ++# CONFIG_COMPAT_VDSO is not set ++# CONFIG_LEGACY_VSYSCALL_EMULATE is not set ++# CONFIG_LEGACY_VSYSCALL_XONLY is not set ++CONFIG_LEGACY_VSYSCALL_NONE=y ++# CONFIG_CMDLINE_BOOL is not set ++# CONFIG_MODIFY_LDT_SYSCALL is not set ++CONFIG_HAVE_LIVEPATCH=y ++# CONFIG_LIVEPATCH is not set ++# end of Processor type and features ++ ++CONFIG_ARCH_HAS_ADD_PAGES=y ++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y ++CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y ++CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y ++CONFIG_ARCH_ENABLE_THP_MIGRATION=y ++ ++# ++# Power management and ACPI options ++# ++CONFIG_SUSPEND=y ++CONFIG_SUSPEND_FREEZER=y ++# CONFIG_SUSPEND_SKIP_SYNC is not set ++# CONFIG_HIBERNATION is not set ++CONFIG_PM_SLEEP=y ++CONFIG_PM_SLEEP_SMP=y ++CONFIG_PM_AUTOSLEEP=y ++CONFIG_PM_WAKELOCKS=y ++CONFIG_PM_WAKELOCKS_LIMIT=100 ++CONFIG_PM_WAKELOCKS_GC=y ++CONFIG_PM=y ++CONFIG_PM_DEBUG=y ++CONFIG_PM_ADVANCED_DEBUG=y ++# CONFIG_PM_TEST_SUSPEND is not set ++CONFIG_PM_SLEEP_DEBUG=y ++# CONFIG_DPM_WATCHDOG is not set ++CONFIG_PM_TRACE=y ++CONFIG_PM_TRACE_RTC=y ++CONFIG_PM_CLK=y ++CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y ++# CONFIG_ENERGY_MODEL is not set ++CONFIG_ARCH_SUPPORTS_ACPI=y ++CONFIG_ACPI=y ++CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y ++CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y ++CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y ++# CONFIG_ACPI_DEBUGGER is not set ++CONFIG_ACPI_SPCR_TABLE=y ++CONFIG_ACPI_LPIT=y ++CONFIG_ACPI_SLEEP=y ++# CONFIG_ACPI_PROCFS_POWER is not set ++CONFIG_ACPI_REV_OVERRIDE_POSSIBLE=y ++# CONFIG_ACPI_EC_DEBUGFS is not set ++CONFIG_ACPI_AC=y ++CONFIG_ACPI_BATTERY=y ++CONFIG_ACPI_BUTTON=y ++CONFIG_ACPI_VIDEO=y ++CONFIG_ACPI_FAN=y ++# CONFIG_ACPI_TAD is not set ++CONFIG_ACPI_DOCK=y ++CONFIG_ACPI_CPU_FREQ_PSS=y ++CONFIG_ACPI_PROCESSOR_CSTATE=y ++CONFIG_ACPI_PROCESSOR_IDLE=y ++CONFIG_ACPI_CPPC_LIB=y ++CONFIG_ACPI_PROCESSOR=y ++CONFIG_ACPI_HOTPLUG_CPU=y ++# CONFIG_ACPI_PROCESSOR_AGGREGATOR is not set ++CONFIG_ACPI_THERMAL=y ++CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y ++CONFIG_ACPI_TABLE_UPGRADE=y ++# CONFIG_ACPI_DEBUG is not set ++# CONFIG_ACPI_PCI_SLOT is not set ++CONFIG_ACPI_CONTAINER=y ++CONFIG_ACPI_HOTPLUG_IOAPIC=y ++# CONFIG_ACPI_SBS is not set ++# CONFIG_ACPI_HED is not set ++# CONFIG_ACPI_CUSTOM_METHOD is not set ++# CONFIG_ACPI_BGRT is not set ++# CONFIG_ACPI_REDUCED_HARDWARE_ONLY is not set ++# CONFIG_ACPI_NFIT is not set ++CONFIG_HAVE_ACPI_APEI=y ++CONFIG_HAVE_ACPI_APEI_NMI=y ++CONFIG_ACPI_APEI=y ++# CONFIG_ACPI_APEI_GHES is not set ++# CONFIG_ACPI_APEI_PCIEAER is not set ++# CONFIG_ACPI_APEI_EINJ is not set ++# CONFIG_ACPI_APEI_ERST_DEBUG is not set ++# CONFIG_DPTF_POWER is not set ++CONFIG_PMIC_OPREGION=y ++CONFIG_CRC_PMIC_OPREGION=y ++CONFIG_BXT_WC_PMIC_OPREGION=y ++# CONFIG_ACPI_CONFIGFS is not set ++CONFIG_X86_PM_TIMER=y ++# CONFIG_SFI is not set ++ ++# ++# CPU Frequency scaling ++# ++CONFIG_CPU_FREQ=y ++CONFIG_CPU_FREQ_GOV_ATTR_SET=y ++CONFIG_CPU_FREQ_GOV_COMMON=y ++CONFIG_CPU_FREQ_STAT=y ++CONFIG_CPU_FREQ_TIMES=y ++# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set ++# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set ++# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set ++CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y ++# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set ++# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set ++CONFIG_CPU_FREQ_GOV_PERFORMANCE=y ++CONFIG_CPU_FREQ_GOV_POWERSAVE=y ++# CONFIG_CPU_FREQ_GOV_USERSPACE is not set ++CONFIG_CPU_FREQ_GOV_ONDEMAND=y ++# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set ++# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set ++ ++# ++# CPU frequency scaling drivers ++# ++CONFIG_X86_INTEL_PSTATE=y ++CONFIG_X86_PCC_CPUFREQ=y ++CONFIG_X86_ACPI_CPUFREQ=y ++CONFIG_X86_ACPI_CPUFREQ_CPB=y ++# CONFIG_X86_POWERNOW_K8 is not set ++# CONFIG_X86_AMD_FREQ_SENSITIVITY is not set ++# CONFIG_X86_SPEEDSTEP_CENTRINO is not set ++# CONFIG_X86_P4_CLOCKMOD is not set ++ ++# ++# shared options ++# ++# end of CPU Frequency scaling ++ ++# ++# CPU Idle ++# ++CONFIG_CPU_IDLE=y ++CONFIG_CPU_IDLE_GOV_LADDER=y ++CONFIG_CPU_IDLE_GOV_MENU=y ++# CONFIG_CPU_IDLE_GOV_TEO is not set ++# end of CPU Idle ++ ++CONFIG_INTEL_IDLE=y ++# end of Power management and ACPI options ++ ++# ++# Bus options (PCI etc.) ++# ++CONFIG_PCI_DIRECT=y ++CONFIG_PCI_MMCONFIG=y ++CONFIG_MMCONF_FAM10H=y ++# CONFIG_PCI_CNB20LE_QUIRK is not set ++# CONFIG_ISA_BUS is not set ++CONFIG_ISA_DMA_API=y ++CONFIG_AMD_NB=y ++CONFIG_X86_SYSFB=y ++# end of Bus options (PCI etc.) ++ ++# ++# Binary Emulations ++# ++CONFIG_IA32_EMULATION=y ++# CONFIG_X86_X32 is not set ++CONFIG_COMPAT_32=y ++CONFIG_COMPAT=y ++CONFIG_COMPAT_FOR_U64_ALIGNMENT=y ++# end of Binary Emulations ++ ++# ++# Firmware Drivers ++# ++CONFIG_EDD=y ++# CONFIG_EDD_OFF is not set ++CONFIG_FIRMWARE_MEMMAP=y ++CONFIG_DMIID=y ++CONFIG_DMI_SYSFS=y ++CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y ++# CONFIG_ISCSI_IBFT is not set ++# CONFIG_FW_CFG_SYSFS is not set ++# CONFIG_GOOGLE_FIRMWARE is not set ++ ++# ++# EFI (Extensible Firmware Interface) Support ++# ++CONFIG_EFI_VARS=y ++CONFIG_EFI_ESRT=y ++# CONFIG_EFI_VARS_PSTORE is not set ++# CONFIG_EFI_FAKE_MEMMAP is not set ++CONFIG_EFI_RUNTIME_WRAPPERS=y ++CONFIG_EFI_BOOTLOADER_CONTROL=y ++# CONFIG_EFI_CAPSULE_LOADER is not set ++# CONFIG_EFI_TEST is not set ++# CONFIG_APPLE_PROPERTIES is not set ++# CONFIG_RESET_ATTACK_MITIGATION is not set ++# CONFIG_EFI_RCI2_TABLE is not set ++# end of EFI (Extensible Firmware Interface) Support ++ ++CONFIG_UEFI_CPER=y ++CONFIG_UEFI_CPER_X86=y ++CONFIG_EFI_EARLYCON=y ++ ++# ++# Tegra firmware driver ++# ++# end of Tegra firmware driver ++# end of Firmware Drivers ++ ++CONFIG_HAVE_KVM=y ++CONFIG_VIRTUALIZATION=y ++# CONFIG_KVM is not set ++# CONFIG_VHOST_NET is not set ++# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set ++ ++# ++# General architecture-dependent options ++# ++CONFIG_HOTPLUG_SMT=y ++# CONFIG_OPROFILE is not set ++CONFIG_HAVE_OPROFILE=y ++CONFIG_OPROFILE_NMI_TIMER=y ++# CONFIG_KPROBES is not set ++CONFIG_JUMP_LABEL=y ++# CONFIG_STATIC_KEYS_SELFTEST is not set ++CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y ++CONFIG_ARCH_USE_BUILTIN_BSWAP=y ++CONFIG_HAVE_IOREMAP_PROT=y ++CONFIG_HAVE_KPROBES=y ++CONFIG_HAVE_KRETPROBES=y ++CONFIG_HAVE_OPTPROBES=y ++CONFIG_HAVE_KPROBES_ON_FTRACE=y ++CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y ++CONFIG_HAVE_NMI=y ++CONFIG_HAVE_ARCH_TRACEHOOK=y ++CONFIG_HAVE_DMA_CONTIGUOUS=y ++CONFIG_GENERIC_SMP_IDLE_THREAD=y ++CONFIG_ARCH_HAS_FORTIFY_SOURCE=y ++CONFIG_ARCH_HAS_SET_MEMORY=y ++CONFIG_ARCH_HAS_SET_DIRECT_MAP=y ++CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y ++CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y ++CONFIG_HAVE_ASM_MODVERSIONS=y ++CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y ++CONFIG_HAVE_RSEQ=y ++CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y ++CONFIG_HAVE_CLK=y ++CONFIG_HAVE_HW_BREAKPOINT=y ++CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y ++CONFIG_HAVE_USER_RETURN_NOTIFIER=y ++CONFIG_HAVE_PERF_EVENTS_NMI=y ++CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y ++CONFIG_HAVE_PERF_REGS=y ++CONFIG_HAVE_PERF_USER_STACK_DUMP=y ++CONFIG_HAVE_ARCH_JUMP_LABEL=y ++CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y ++CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y ++CONFIG_HAVE_CMPXCHG_LOCAL=y ++CONFIG_HAVE_CMPXCHG_DOUBLE=y ++CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y ++CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y ++CONFIG_HAVE_ARCH_SECCOMP_FILTER=y ++CONFIG_SECCOMP_FILTER=y ++CONFIG_HAVE_ARCH_STACKLEAK=y ++CONFIG_HAVE_STACKPROTECTOR=y ++CONFIG_CC_HAS_STACKPROTECTOR_NONE=y ++CONFIG_STACKPROTECTOR=y ++CONFIG_STACKPROTECTOR_STRONG=y ++CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y ++CONFIG_HAVE_CONTEXT_TRACKING=y ++CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y ++CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y ++CONFIG_HAVE_MOVE_PMD=y ++CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y ++CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y ++CONFIG_HAVE_ARCH_HUGE_VMAP=y ++CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y ++CONFIG_HAVE_ARCH_SOFT_DIRTY=y ++CONFIG_HAVE_MOD_ARCH_SPECIFIC=y ++CONFIG_MODULES_USE_ELF_RELA=y ++CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y ++CONFIG_ARCH_HAS_ELF_RANDOMIZE=y ++CONFIG_HAVE_ARCH_MMAP_RND_BITS=y ++CONFIG_HAVE_EXIT_THREAD=y ++CONFIG_ARCH_MMAP_RND_BITS=32 ++CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y ++CONFIG_ARCH_MMAP_RND_COMPAT_BITS=8 ++CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y ++CONFIG_HAVE_COPY_THREAD_TLS=y ++CONFIG_HAVE_STACK_VALIDATION=y ++CONFIG_HAVE_RELIABLE_STACKTRACE=y ++CONFIG_OLD_SIGSUSPEND3=y ++CONFIG_COMPAT_OLD_SIGACTION=y ++CONFIG_64BIT_TIME=y ++CONFIG_COMPAT_32BIT_TIME=y ++CONFIG_HAVE_ARCH_VMAP_STACK=y ++CONFIG_VMAP_STACK=y ++CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y ++CONFIG_STRICT_KERNEL_RWX=y ++CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y ++CONFIG_STRICT_MODULE_RWX=y ++CONFIG_ARCH_HAS_REFCOUNT=y ++CONFIG_REFCOUNT_FULL=y ++CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y ++CONFIG_ARCH_USE_MEMREMAP_PROT=y ++# CONFIG_LOCK_EVENT_COUNTS is not set ++CONFIG_ARCH_HAS_MEM_ENCRYPT=y ++ ++# ++# GCOV-based kernel profiling ++# ++# CONFIG_GCOV_KERNEL is not set ++CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y ++# end of GCOV-based kernel profiling ++ ++CONFIG_PLUGIN_HOSTCC="" ++CONFIG_HAVE_GCC_PLUGINS=y ++# end of General architecture-dependent options ++ ++CONFIG_RT_MUTEXES=y ++CONFIG_BASE_SMALL=0 ++CONFIG_MODULE_SIG_FORMAT=y ++CONFIG_MODULES=y ++# CONFIG_MODULE_FORCE_LOAD is not set ++CONFIG_MODULE_UNLOAD=y ++CONFIG_MODULE_FORCE_UNLOAD=y ++CONFIG_MODVERSIONS=y ++CONFIG_ASM_MODVERSIONS=y ++# CONFIG_MODULE_SRCVERSION_ALL is not set ++CONFIG_MODULE_SIG=y ++CONFIG_MODULE_SIG_FORCE=y ++CONFIG_MODULE_SIG_ALL=y ++# CONFIG_MODULE_SIG_SHA1 is not set ++# CONFIG_MODULE_SIG_SHA224 is not set ++# CONFIG_MODULE_SIG_SHA256 is not set ++# CONFIG_MODULE_SIG_SHA384 is not set ++CONFIG_MODULE_SIG_SHA512=y ++CONFIG_MODULE_SIG_HASH="sha512" ++# CONFIG_MODULE_COMPRESS is not set ++# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set ++# CONFIG_UNUSED_SYMBOLS is not set ++# CONFIG_TRIM_UNUSED_KSYMS is not set ++CONFIG_MODULES_TREE_LOOKUP=y ++CONFIG_BLOCK=y ++CONFIG_BLK_SCSI_REQUEST=y ++CONFIG_BLK_DEV_BSG=y ++# CONFIG_BLK_DEV_BSGLIB is not set ++# CONFIG_BLK_DEV_INTEGRITY is not set ++# CONFIG_BLK_DEV_ZONED is not set ++# CONFIG_BLK_CMDLINE_PARSER is not set ++# CONFIG_BLK_WBT is not set ++CONFIG_BLK_DEBUG_FS=y ++# CONFIG_BLK_SED_OPAL is not set ++ ++# ++# Partition Types ++# ++CONFIG_PARTITION_ADVANCED=y ++# CONFIG_ACORN_PARTITION is not set ++# CONFIG_AIX_PARTITION is not set ++# CONFIG_OSF_PARTITION is not set ++# CONFIG_AMIGA_PARTITION is not set ++# CONFIG_ATARI_PARTITION is not set ++# CONFIG_MAC_PARTITION is not set ++CONFIG_MSDOS_PARTITION=y ++# CONFIG_BSD_DISKLABEL is not set ++# CONFIG_MINIX_SUBPARTITION is not set ++# CONFIG_SOLARIS_X86_PARTITION is not set ++# CONFIG_UNIXWARE_DISKLABEL is not set ++# CONFIG_LDM_PARTITION is not set ++# CONFIG_SGI_PARTITION is not set ++# CONFIG_ULTRIX_PARTITION is not set ++# CONFIG_SUN_PARTITION is not set ++# CONFIG_KARMA_PARTITION is not set ++CONFIG_EFI_PARTITION=y ++# CONFIG_SYSV68_PARTITION is not set ++# CONFIG_CMDLINE_PARTITION is not set ++# end of Partition Types ++ ++CONFIG_BLOCK_COMPAT=y ++CONFIG_BLK_MQ_PCI=y ++CONFIG_BLK_MQ_VIRTIO=y ++CONFIG_BLK_PM=y ++ ++# ++# IO Schedulers ++# ++CONFIG_MQ_IOSCHED_DEADLINE=y ++CONFIG_MQ_IOSCHED_KYBER=y ++# CONFIG_IOSCHED_BFQ is not set ++# end of IO Schedulers ++ ++CONFIG_ASN1=y ++CONFIG_INLINE_SPIN_UNLOCK_IRQ=y ++CONFIG_INLINE_READ_UNLOCK=y ++CONFIG_INLINE_READ_UNLOCK_IRQ=y ++CONFIG_INLINE_WRITE_UNLOCK=y ++CONFIG_INLINE_WRITE_UNLOCK_IRQ=y ++CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y ++CONFIG_MUTEX_SPIN_ON_OWNER=y ++CONFIG_RWSEM_SPIN_ON_OWNER=y ++CONFIG_LOCK_SPIN_ON_OWNER=y ++CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y ++CONFIG_QUEUED_SPINLOCKS=y ++CONFIG_ARCH_USE_QUEUED_RWLOCKS=y ++CONFIG_QUEUED_RWLOCKS=y ++CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y ++CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y ++CONFIG_FREEZER=y ++ ++# ++# Executable file formats ++# ++CONFIG_BINFMT_ELF=y ++CONFIG_COMPAT_BINFMT_ELF=y ++CONFIG_ELFCORE=y ++CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y ++CONFIG_BINFMT_SCRIPT=y ++# CONFIG_BINFMT_MISC is not set ++CONFIG_COREDUMP=y ++# end of Executable file formats ++ ++# ++# Memory Management options ++# ++CONFIG_SELECT_MEMORY_MODEL=y ++CONFIG_SPARSEMEM_MANUAL=y ++CONFIG_SPARSEMEM=y ++CONFIG_HAVE_MEMORY_PRESENT=y ++CONFIG_SPARSEMEM_EXTREME=y ++CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y ++CONFIG_SPARSEMEM_VMEMMAP=y ++CONFIG_HAVE_MEMBLOCK_NODE_MAP=y ++CONFIG_HAVE_FAST_GUP=y ++# CONFIG_MEMORY_HOTPLUG is not set ++CONFIG_SPLIT_PTLOCK_CPUS=4 ++CONFIG_COMPACTION=y ++CONFIG_MIGRATION=y ++CONFIG_PHYS_ADDR_T_64BIT=y ++CONFIG_BOUNCE=y ++CONFIG_VIRT_TO_BUS=y ++CONFIG_MMU_NOTIFIER=y ++CONFIG_KSM=y ++CONFIG_DEFAULT_MMAP_MIN_ADDR=8192 ++CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y ++# CONFIG_MEMORY_FAILURE is not set ++CONFIG_TRANSPARENT_HUGEPAGE=y ++# CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS is not set ++CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y ++CONFIG_ARCH_WANTS_THP_SWAP=y ++CONFIG_THP_SWAP=y ++CONFIG_TRANSPARENT_HUGE_PAGECACHE=y ++# CONFIG_CLEANCACHE is not set ++# CONFIG_FRONTSWAP is not set ++# CONFIG_CMA is not set ++# CONFIG_ZPOOL is not set ++# CONFIG_ZBUD is not set ++CONFIG_ZSMALLOC=y ++# CONFIG_PGTABLE_MAPPING is not set ++# CONFIG_DEBUG_PANIC_ON_BAD_PAGE is not set ++# CONFIG_ZSMALLOC_STAT is not set ++CONFIG_GENERIC_EARLY_IOREMAP=y ++# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set ++# CONFIG_IDLE_PAGE_TRACKING is not set ++CONFIG_ARCH_HAS_PTE_DEVMAP=y ++CONFIG_FRAME_VECTOR=y ++CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y ++CONFIG_ARCH_HAS_PKEYS=y ++# CONFIG_PERCPU_STATS is not set ++# CONFIG_GUP_BENCHMARK is not set ++# CONFIG_READ_ONLY_THP_FOR_FS is not set ++CONFIG_ARCH_HAS_PTE_SPECIAL=y ++# end of Memory Management options ++ ++CONFIG_NET=y ++CONFIG_COMPAT_NETLINK_MESSAGES=y ++CONFIG_NET_INGRESS=y ++CONFIG_NET_EGRESS=y ++CONFIG_SKB_EXTENSIONS=y ++ ++# ++# Networking options ++# ++CONFIG_PACKET=y ++# CONFIG_PACKET_DIAG is not set ++CONFIG_UNIX=y ++CONFIG_UNIX_SCM=y ++# CONFIG_UNIX_DIAG is not set ++# CONFIG_TLS is not set ++CONFIG_XFRM=y ++CONFIG_XFRM_ALGO=y ++CONFIG_XFRM_USER=y ++CONFIG_XFRM_INTERFACE=y ++# CONFIG_XFRM_SUB_POLICY is not set ++# CONFIG_XFRM_MIGRATE is not set ++CONFIG_XFRM_STATISTICS=y ++CONFIG_XFRM_IPCOMP=y ++CONFIG_NET_KEY=y ++# CONFIG_NET_KEY_MIGRATE is not set ++# CONFIG_XDP_SOCKETS is not set ++CONFIG_INET=y ++CONFIG_IP_MULTICAST=y ++CONFIG_IP_ADVANCED_ROUTER=y ++# CONFIG_IP_FIB_TRIE_STATS is not set ++CONFIG_IP_MULTIPLE_TABLES=y ++# CONFIG_IP_ROUTE_MULTIPATH is not set ++# CONFIG_IP_ROUTE_VERBOSE is not set ++# CONFIG_IP_PNP is not set ++# CONFIG_NET_IPIP is not set ++CONFIG_NET_IPGRE_DEMUX=y ++CONFIG_NET_IP_TUNNEL=y ++# CONFIG_NET_IPGRE is not set ++# CONFIG_IP_MROUTE is not set ++CONFIG_SYN_COOKIES=y ++CONFIG_NET_IPVTI=y ++CONFIG_NET_UDP_TUNNEL=y ++# CONFIG_NET_FOU is not set ++# CONFIG_NET_FOU_IP_TUNNELS is not set ++# CONFIG_INET_AH is not set ++CONFIG_INET_ESP=y ++# CONFIG_INET_ESP_OFFLOAD is not set ++# CONFIG_INET_IPCOMP is not set ++CONFIG_INET_TUNNEL=y ++CONFIG_INET_DIAG=y ++CONFIG_INET_TCP_DIAG=y ++CONFIG_INET_UDP_DIAG=y ++# CONFIG_INET_RAW_DIAG is not set ++CONFIG_INET_DIAG_DESTROY=y ++CONFIG_TCP_CONG_ADVANCED=y ++# CONFIG_TCP_CONG_BIC is not set ++CONFIG_TCP_CONG_CUBIC=y ++# CONFIG_TCP_CONG_WESTWOOD is not set ++# CONFIG_TCP_CONG_HTCP is not set ++# CONFIG_TCP_CONG_HSTCP is not set ++# CONFIG_TCP_CONG_HYBLA is not set ++# CONFIG_TCP_CONG_VEGAS is not set ++# CONFIG_TCP_CONG_NV is not set ++# CONFIG_TCP_CONG_SCALABLE is not set ++# CONFIG_TCP_CONG_LP is not set ++# CONFIG_TCP_CONG_VENO is not set ++# CONFIG_TCP_CONG_YEAH is not set ++# CONFIG_TCP_CONG_ILLINOIS is not set ++# CONFIG_TCP_CONG_DCTCP is not set ++# CONFIG_TCP_CONG_CDG is not set ++# CONFIG_TCP_CONG_BBR is not set ++CONFIG_DEFAULT_CUBIC=y ++# CONFIG_DEFAULT_RENO is not set ++CONFIG_DEFAULT_TCP_CONG="cubic" ++CONFIG_TCP_MD5SIG=y ++CONFIG_IPV6=y ++CONFIG_IPV6_ROUTER_PREF=y ++CONFIG_IPV6_ROUTE_INFO=y ++CONFIG_IPV6_OPTIMISTIC_DAD=y ++CONFIG_INET6_AH=y ++CONFIG_INET6_ESP=y ++# CONFIG_INET6_ESP_OFFLOAD is not set ++CONFIG_INET6_IPCOMP=y ++CONFIG_IPV6_MIP6=y ++# CONFIG_IPV6_ILA is not set ++CONFIG_INET6_XFRM_TUNNEL=y ++CONFIG_INET6_TUNNEL=y ++CONFIG_IPV6_VTI=y ++CONFIG_IPV6_SIT=y ++# CONFIG_IPV6_SIT_6RD is not set ++CONFIG_IPV6_NDISC_NODETYPE=y ++CONFIG_IPV6_TUNNEL=y ++# CONFIG_IPV6_GRE is not set ++CONFIG_IPV6_MULTIPLE_TABLES=y ++# CONFIG_IPV6_SUBTREES is not set ++# CONFIG_IPV6_MROUTE is not set ++# CONFIG_IPV6_SEG6_LWTUNNEL is not set ++# CONFIG_IPV6_SEG6_HMAC is not set ++# CONFIG_NETLABEL is not set ++CONFIG_NETWORK_SECMARK=y ++CONFIG_NET_PTP_CLASSIFY=y ++# CONFIG_NETWORK_PHY_TIMESTAMPING is not set ++CONFIG_NETFILTER=y ++CONFIG_NETFILTER_ADVANCED=y ++ ++# ++# Core Netfilter Configuration ++# ++CONFIG_NETFILTER_INGRESS=y ++CONFIG_NETFILTER_NETLINK=y ++CONFIG_NETFILTER_FAMILY_ARP=y ++# CONFIG_NETFILTER_NETLINK_ACCT is not set ++CONFIG_NETFILTER_NETLINK_QUEUE=y ++CONFIG_NETFILTER_NETLINK_LOG=y ++# CONFIG_NETFILTER_NETLINK_OSF is not set ++CONFIG_NF_CONNTRACK=y ++# CONFIG_NF_LOG_NETDEV is not set ++CONFIG_NETFILTER_CONNCOUNT=y ++CONFIG_NF_CONNTRACK_MARK=y ++CONFIG_NF_CONNTRACK_SECMARK=y ++# CONFIG_NF_CONNTRACK_ZONES is not set ++CONFIG_NF_CONNTRACK_PROCFS=y ++CONFIG_NF_CONNTRACK_EVENTS=y ++# CONFIG_NF_CONNTRACK_TIMEOUT is not set ++# CONFIG_NF_CONNTRACK_TIMESTAMP is not set ++# CONFIG_NF_CONNTRACK_LABELS is not set ++CONFIG_NF_CT_PROTO_DCCP=y ++CONFIG_NF_CT_PROTO_GRE=y ++CONFIG_NF_CT_PROTO_SCTP=y ++CONFIG_NF_CT_PROTO_UDPLITE=y ++CONFIG_NF_CONNTRACK_AMANDA=y ++CONFIG_NF_CONNTRACK_FTP=y ++CONFIG_NF_CONNTRACK_H323=y ++CONFIG_NF_CONNTRACK_IRC=y ++CONFIG_NF_CONNTRACK_BROADCAST=y ++CONFIG_NF_CONNTRACK_NETBIOS_NS=y ++# CONFIG_NF_CONNTRACK_SNMP is not set ++CONFIG_NF_CONNTRACK_PPTP=y ++CONFIG_NF_CONNTRACK_SANE=y ++CONFIG_NF_CONNTRACK_SIP=y ++CONFIG_NF_CONNTRACK_TFTP=y ++CONFIG_NF_CT_NETLINK=y ++# CONFIG_NETFILTER_NETLINK_GLUE_CT is not set ++CONFIG_NF_NAT=y ++CONFIG_NF_NAT_AMANDA=y ++CONFIG_NF_NAT_FTP=y ++CONFIG_NF_NAT_IRC=y ++CONFIG_NF_NAT_SIP=y ++CONFIG_NF_NAT_TFTP=y ++CONFIG_NF_NAT_REDIRECT=y ++CONFIG_NF_NAT_MASQUERADE=y ++# CONFIG_NF_TABLES is not set ++CONFIG_NETFILTER_XTABLES=y ++ ++# ++# Xtables combined modules ++# ++CONFIG_NETFILTER_XT_MARK=y ++CONFIG_NETFILTER_XT_CONNMARK=y ++ ++# ++# Xtables targets ++# ++CONFIG_NETFILTER_XT_TARGET_AUDIT=m ++# CONFIG_NETFILTER_XT_TARGET_CHECKSUM is not set ++CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y ++CONFIG_NETFILTER_XT_TARGET_CONNMARK=y ++CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y ++CONFIG_NETFILTER_XT_TARGET_CT=y ++# CONFIG_NETFILTER_XT_TARGET_DSCP is not set ++# CONFIG_NETFILTER_XT_TARGET_HL is not set ++# CONFIG_NETFILTER_XT_TARGET_HMARK is not set ++CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y ++# CONFIG_NETFILTER_XT_TARGET_LED is not set ++# CONFIG_NETFILTER_XT_TARGET_LOG is not set ++CONFIG_NETFILTER_XT_TARGET_MARK=y ++CONFIG_NETFILTER_XT_NAT=y ++CONFIG_NETFILTER_XT_TARGET_NETMAP=y ++CONFIG_NETFILTER_XT_TARGET_NFLOG=y ++CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y ++# CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set ++# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set ++CONFIG_NETFILTER_XT_TARGET_REDIRECT=y ++CONFIG_NETFILTER_XT_TARGET_MASQUERADE=y ++# CONFIG_NETFILTER_XT_TARGET_TEE is not set ++CONFIG_NETFILTER_XT_TARGET_TPROXY=y ++CONFIG_NETFILTER_XT_TARGET_TRACE=y ++CONFIG_NETFILTER_XT_TARGET_SECMARK=y ++CONFIG_NETFILTER_XT_TARGET_TCPMSS=y ++# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set ++ ++# ++# Xtables matches ++# ++# CONFIG_NETFILTER_XT_MATCH_ADDRTYPE is not set ++CONFIG_NETFILTER_XT_MATCH_BPF=y ++# CONFIG_NETFILTER_XT_MATCH_CGROUP is not set ++# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set ++CONFIG_NETFILTER_XT_MATCH_COMMENT=y ++# CONFIG_NETFILTER_XT_MATCH_CONNBYTES is not set ++# CONFIG_NETFILTER_XT_MATCH_CONNLABEL is not set ++CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y ++CONFIG_NETFILTER_XT_MATCH_CONNMARK=y ++CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y ++# CONFIG_NETFILTER_XT_MATCH_CPU is not set ++# CONFIG_NETFILTER_XT_MATCH_DCCP is not set ++# CONFIG_NETFILTER_XT_MATCH_DEVGROUP is not set ++# CONFIG_NETFILTER_XT_MATCH_DSCP is not set ++CONFIG_NETFILTER_XT_MATCH_ECN=y ++# CONFIG_NETFILTER_XT_MATCH_ESP is not set ++CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y ++CONFIG_NETFILTER_XT_MATCH_HELPER=y ++CONFIG_NETFILTER_XT_MATCH_HL=y ++# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set ++CONFIG_NETFILTER_XT_MATCH_IPRANGE=y ++CONFIG_NETFILTER_XT_MATCH_L2TP=y ++CONFIG_NETFILTER_XT_MATCH_LENGTH=y ++CONFIG_NETFILTER_XT_MATCH_LIMIT=y ++CONFIG_NETFILTER_XT_MATCH_MAC=y ++CONFIG_NETFILTER_XT_MATCH_MARK=y ++# CONFIG_NETFILTER_XT_MATCH_MULTIPORT is not set ++# CONFIG_NETFILTER_XT_MATCH_NFACCT is not set ++# CONFIG_NETFILTER_XT_MATCH_OSF is not set ++CONFIG_NETFILTER_XT_MATCH_OWNER=y ++CONFIG_NETFILTER_XT_MATCH_POLICY=y ++CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y ++CONFIG_NETFILTER_XT_MATCH_QUOTA=y ++CONFIG_NETFILTER_XT_MATCH_QUOTA2=y ++CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y ++# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set ++# CONFIG_NETFILTER_XT_MATCH_REALM is not set ++# CONFIG_NETFILTER_XT_MATCH_RECENT is not set ++# CONFIG_NETFILTER_XT_MATCH_SCTP is not set ++CONFIG_NETFILTER_XT_MATCH_SOCKET=y ++CONFIG_NETFILTER_XT_MATCH_STATE=y ++CONFIG_NETFILTER_XT_MATCH_STATISTIC=y ++CONFIG_NETFILTER_XT_MATCH_STRING=y ++# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set ++CONFIG_NETFILTER_XT_MATCH_TIME=y ++CONFIG_NETFILTER_XT_MATCH_U32=y ++# end of Core Netfilter Configuration ++ ++# CONFIG_IP_SET is not set ++# CONFIG_IP_VS is not set ++ ++# ++# IP: Netfilter Configuration ++# ++CONFIG_NF_DEFRAG_IPV4=y ++CONFIG_NF_SOCKET_IPV4=y ++CONFIG_NF_TPROXY_IPV4=y ++# CONFIG_NF_DUP_IPV4 is not set ++# CONFIG_NF_LOG_ARP is not set ++# CONFIG_NF_LOG_IPV4 is not set ++CONFIG_NF_REJECT_IPV4=y ++CONFIG_NF_NAT_PPTP=y ++CONFIG_NF_NAT_H323=y ++CONFIG_IP_NF_IPTABLES=y ++CONFIG_IP_NF_MATCH_AH=y ++CONFIG_IP_NF_MATCH_ECN=y ++# CONFIG_IP_NF_MATCH_RPFILTER is not set ++CONFIG_IP_NF_MATCH_TTL=y ++CONFIG_IP_NF_FILTER=y ++CONFIG_IP_NF_TARGET_REJECT=y ++# CONFIG_IP_NF_TARGET_SYNPROXY is not set ++CONFIG_IP_NF_NAT=y ++CONFIG_IP_NF_TARGET_MASQUERADE=y ++CONFIG_IP_NF_TARGET_NETMAP=y ++CONFIG_IP_NF_TARGET_REDIRECT=y ++CONFIG_IP_NF_MANGLE=y ++# CONFIG_IP_NF_TARGET_CLUSTERIP is not set ++# CONFIG_IP_NF_TARGET_ECN is not set ++# CONFIG_IP_NF_TARGET_TTL is not set ++CONFIG_IP_NF_RAW=y ++CONFIG_IP_NF_SECURITY=y ++CONFIG_IP_NF_ARPTABLES=y ++CONFIG_IP_NF_ARPFILTER=y ++CONFIG_IP_NF_ARP_MANGLE=y ++# end of IP: Netfilter Configuration ++ ++# ++# IPv6: Netfilter Configuration ++# ++CONFIG_NF_SOCKET_IPV6=y ++CONFIG_NF_TPROXY_IPV6=y ++# CONFIG_NF_DUP_IPV6 is not set ++CONFIG_NF_REJECT_IPV6=y ++# CONFIG_NF_LOG_IPV6 is not set ++CONFIG_IP6_NF_IPTABLES=y ++# CONFIG_IP6_NF_MATCH_AH is not set ++# CONFIG_IP6_NF_MATCH_EUI64 is not set ++# CONFIG_IP6_NF_MATCH_FRAG is not set ++# CONFIG_IP6_NF_MATCH_OPTS is not set ++# CONFIG_IP6_NF_MATCH_HL is not set ++CONFIG_IP6_NF_MATCH_IPV6HEADER=y ++# CONFIG_IP6_NF_MATCH_MH is not set ++CONFIG_IP6_NF_MATCH_RPFILTER=y ++# CONFIG_IP6_NF_MATCH_RT is not set ++# CONFIG_IP6_NF_MATCH_SRH is not set ++# CONFIG_IP6_NF_TARGET_HL is not set ++CONFIG_IP6_NF_FILTER=y ++CONFIG_IP6_NF_TARGET_REJECT=y ++# CONFIG_IP6_NF_TARGET_SYNPROXY is not set ++CONFIG_IP6_NF_MANGLE=y ++CONFIG_IP6_NF_RAW=y ++# CONFIG_IP6_NF_SECURITY is not set ++CONFIG_IP6_NF_NAT=y ++CONFIG_IP6_NF_TARGET_MASQUERADE=y ++# CONFIG_IP6_NF_TARGET_NPT is not set ++# end of IPv6: Netfilter Configuration ++ ++CONFIG_NF_DEFRAG_IPV6=y ++# CONFIG_NF_CONNTRACK_BRIDGE is not set ++# CONFIG_BPFILTER is not set ++# CONFIG_IP_DCCP is not set ++# CONFIG_IP_SCTP is not set ++# CONFIG_RDS is not set ++# CONFIG_TIPC is not set ++# CONFIG_ATM is not set ++CONFIG_L2TP=y ++# CONFIG_L2TP_DEBUGFS is not set ++# CONFIG_L2TP_V3 is not set ++# CONFIG_BRIDGE is not set ++CONFIG_HAVE_NET_DSA=y ++# CONFIG_NET_DSA is not set ++CONFIG_VLAN_8021Q=y ++# CONFIG_VLAN_8021Q_GVRP is not set ++# CONFIG_VLAN_8021Q_MVRP is not set ++# CONFIG_DECNET is not set ++# CONFIG_LLC2 is not set ++# CONFIG_ATALK is not set ++# CONFIG_X25 is not set ++# CONFIG_LAPB is not set ++# CONFIG_PHONET is not set ++# CONFIG_6LOWPAN is not set ++# CONFIG_IEEE802154 is not set ++CONFIG_NET_SCHED=y ++ ++# ++# Queueing/Scheduling ++# ++# CONFIG_NET_SCH_CBQ is not set ++CONFIG_NET_SCH_HTB=y ++# CONFIG_NET_SCH_HFSC is not set ++# CONFIG_NET_SCH_PRIO is not set ++# CONFIG_NET_SCH_MULTIQ is not set ++# CONFIG_NET_SCH_RED is not set ++# CONFIG_NET_SCH_SFB is not set ++# CONFIG_NET_SCH_SFQ is not set ++# CONFIG_NET_SCH_TEQL is not set ++# CONFIG_NET_SCH_TBF is not set ++# CONFIG_NET_SCH_CBS is not set ++CONFIG_NET_SCH_ETF=y ++# CONFIG_NET_SCH_TAPRIO is not set ++# CONFIG_NET_SCH_GRED is not set ++# CONFIG_NET_SCH_DSMARK is not set ++# CONFIG_NET_SCH_NETEM is not set ++# CONFIG_NET_SCH_DRR is not set ++CONFIG_NET_SCH_MQPRIO=y ++# CONFIG_NET_SCH_SKBPRIO is not set ++# CONFIG_NET_SCH_CHOKE is not set ++# CONFIG_NET_SCH_QFQ is not set ++# CONFIG_NET_SCH_CODEL is not set ++# CONFIG_NET_SCH_FQ_CODEL is not set ++# CONFIG_NET_SCH_CAKE is not set ++CONFIG_NET_SCH_FQ=y ++# CONFIG_NET_SCH_HHF is not set ++# CONFIG_NET_SCH_PIE is not set ++CONFIG_NET_SCH_INGRESS=y ++# CONFIG_NET_SCH_PLUG is not set ++# CONFIG_NET_SCH_DEFAULT is not set ++ ++# ++# Classification ++# ++CONFIG_NET_CLS=y ++# CONFIG_NET_CLS_BASIC is not set ++# CONFIG_NET_CLS_TCINDEX is not set ++# CONFIG_NET_CLS_ROUTE4 is not set ++# CONFIG_NET_CLS_FW is not set ++CONFIG_NET_CLS_U32=y ++# CONFIG_CLS_U32_PERF is not set ++# CONFIG_CLS_U32_MARK is not set ++# CONFIG_NET_CLS_RSVP is not set ++# CONFIG_NET_CLS_RSVP6 is not set ++# CONFIG_NET_CLS_FLOW is not set ++# CONFIG_NET_CLS_CGROUP is not set ++CONFIG_NET_CLS_BPF=y ++# CONFIG_NET_CLS_FLOWER is not set ++# CONFIG_NET_CLS_MATCHALL is not set ++CONFIG_NET_EMATCH=y ++CONFIG_NET_EMATCH_STACK=32 ++# CONFIG_NET_EMATCH_CMP is not set ++# CONFIG_NET_EMATCH_NBYTE is not set ++CONFIG_NET_EMATCH_U32=y ++# CONFIG_NET_EMATCH_META is not set ++# CONFIG_NET_EMATCH_TEXT is not set ++# CONFIG_NET_EMATCH_CANID is not set ++# CONFIG_NET_EMATCH_IPT is not set ++CONFIG_NET_CLS_ACT=y ++# CONFIG_NET_ACT_POLICE is not set ++# CONFIG_NET_ACT_GACT is not set ++# CONFIG_NET_ACT_MIRRED is not set ++# CONFIG_NET_ACT_SAMPLE is not set ++# CONFIG_NET_ACT_IPT is not set ++# CONFIG_NET_ACT_NAT is not set ++# CONFIG_NET_ACT_PEDIT is not set ++# CONFIG_NET_ACT_SIMP is not set ++# CONFIG_NET_ACT_SKBEDIT is not set ++# CONFIG_NET_ACT_CSUM is not set ++# CONFIG_NET_ACT_MPLS is not set ++# CONFIG_NET_ACT_VLAN is not set ++# CONFIG_NET_ACT_BPF is not set ++# CONFIG_NET_ACT_CONNMARK is not set ++# CONFIG_NET_ACT_CTINFO is not set ++# CONFIG_NET_ACT_SKBMOD is not set ++# CONFIG_NET_ACT_IFE is not set ++# CONFIG_NET_ACT_TUNNEL_KEY is not set ++# CONFIG_NET_ACT_CT is not set ++# CONFIG_NET_TC_SKB_EXT is not set ++CONFIG_NET_SCH_FIFO=y ++# CONFIG_DCB is not set ++CONFIG_DNS_RESOLVER=y ++# CONFIG_BATMAN_ADV is not set ++# CONFIG_OPENVSWITCH is not set ++# CONFIG_VSOCKETS is not set ++# CONFIG_NETLINK_DIAG is not set ++# CONFIG_MPLS is not set ++# CONFIG_NET_NSH is not set ++# CONFIG_HSR is not set ++# CONFIG_NET_SWITCHDEV is not set ++# CONFIG_NET_L3_MASTER_DEV is not set ++# CONFIG_NET_NCSI is not set ++CONFIG_RPS=y ++CONFIG_RFS_ACCEL=y ++CONFIG_XPS=y ++# CONFIG_CGROUP_NET_PRIO is not set ++# CONFIG_CGROUP_NET_CLASSID is not set ++CONFIG_NET_RX_BUSY_POLL=y ++CONFIG_BQL=y ++# CONFIG_BPF_JIT is not set ++# CONFIG_BPF_STREAM_PARSER is not set ++CONFIG_NET_FLOW_LIMIT=y ++ ++# ++# Network testing ++# ++# CONFIG_NET_PKTGEN is not set ++# CONFIG_NET_DROP_MONITOR is not set ++# end of Network testing ++# end of Networking options ++ ++# CONFIG_HAMRADIO is not set ++CONFIG_CAN=y ++CONFIG_CAN_RAW=y ++CONFIG_CAN_BCM=y ++CONFIG_CAN_GW=y ++# CONFIG_CAN_J1939 is not set ++ ++# ++# CAN Device Drivers ++# ++# CONFIG_CAN_VCAN is not set ++# CONFIG_CAN_VXCAN is not set ++CONFIG_CAN_SLCAN=y ++CONFIG_CAN_DEV=y ++CONFIG_CAN_CALC_BITTIMING=y ++# CONFIG_CAN_KVASER_PCIEFD is not set ++# CONFIG_CAN_C_CAN is not set ++# CONFIG_CAN_CC770 is not set ++# CONFIG_CAN_IFI_CANFD is not set ++# CONFIG_CAN_M_CAN is not set ++# CONFIG_CAN_PEAK_PCIEFD is not set ++# CONFIG_CAN_SJA1000 is not set ++# CONFIG_CAN_SOFTING is not set ++ ++# ++# CAN SPI interfaces ++# ++# CONFIG_CAN_HI311X is not set ++# CONFIG_CAN_MCP251X is not set ++# end of CAN SPI interfaces ++ ++# ++# CAN USB interfaces ++# ++CONFIG_CAN_8DEV_USB=y ++# CONFIG_CAN_EMS_USB is not set ++# CONFIG_CAN_ESD_USB2 is not set ++# CONFIG_CAN_GS_USB is not set ++# CONFIG_CAN_KVASER_USB is not set ++# CONFIG_CAN_MCBA_USB is not set ++# CONFIG_CAN_PEAK_USB is not set ++# CONFIG_CAN_UCAN is not set ++# end of CAN USB interfaces ++ ++# CONFIG_CAN_DEBUG_DEVICES is not set ++# end of CAN Device Drivers ++ ++CONFIG_BT=m ++CONFIG_BT_BREDR=y ++CONFIG_BT_RFCOMM=m ++# CONFIG_BT_RFCOMM_TTY is not set ++CONFIG_BT_BNEP=m ++# CONFIG_BT_BNEP_MC_FILTER is not set ++# CONFIG_BT_BNEP_PROTO_FILTER is not set ++CONFIG_BT_HIDP=m ++CONFIG_BT_HS=y ++CONFIG_BT_LE=y ++# CONFIG_BT_LEDS is not set ++# CONFIG_BT_SELFTEST is not set ++CONFIG_BT_DEBUGFS=y ++ ++# ++# Bluetooth device drivers ++# ++CONFIG_BT_INTEL=m ++CONFIG_BT_BCM=m ++CONFIG_BT_RTL=m ++CONFIG_BT_HCIBTUSB=m ++# CONFIG_BT_HCIBTUSB_AUTOSUSPEND is not set ++CONFIG_BT_HCIBTUSB_BCM=y ++# CONFIG_BT_HCIBTUSB_MTK is not set ++CONFIG_BT_HCIBTUSB_RTL=y ++# CONFIG_BT_HCIBTSDIO is not set ++CONFIG_BT_HCIUART=m ++CONFIG_BT_HCIUART_H4=y ++# CONFIG_BT_HCIUART_BCSP is not set ++# CONFIG_BT_HCIUART_ATH3K is not set ++# CONFIG_BT_HCIUART_INTEL is not set ++# CONFIG_BT_HCIUART_AG6XX is not set ++# CONFIG_BT_HCIBCM203X is not set ++# CONFIG_BT_HCIBPA10X is not set ++# CONFIG_BT_HCIBFUSB is not set ++# CONFIG_BT_HCIVHCI is not set ++# CONFIG_BT_MRVL is not set ++CONFIG_BT_ATH3K=m ++# CONFIG_BT_MTKSDIO is not set ++# end of Bluetooth device drivers ++ ++# CONFIG_AF_RXRPC is not set ++# CONFIG_AF_KCM is not set ++CONFIG_FIB_RULES=y ++CONFIG_WIRELESS=y ++CONFIG_WIRELESS_EXT=y ++CONFIG_WEXT_CORE=y ++CONFIG_WEXT_PROC=y ++CONFIG_WEXT_PRIV=y ++CONFIG_CFG80211=m ++# CONFIG_NL80211_TESTMODE is not set ++# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set ++# CONFIG_CFG80211_CERTIFICATION_ONUS is not set ++CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y ++CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y ++CONFIG_CFG80211_DEFAULT_PS=y ++# CONFIG_CFG80211_DEBUGFS is not set ++CONFIG_CFG80211_CRDA_SUPPORT=y ++# CONFIG_CFG80211_WEXT is not set ++CONFIG_LIB80211=m ++# CONFIG_LIB80211_DEBUG is not set ++CONFIG_MAC80211=m ++CONFIG_MAC80211_HAS_RC=y ++CONFIG_MAC80211_RC_MINSTREL=y ++CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y ++CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" ++# CONFIG_MAC80211_MESH is not set ++CONFIG_MAC80211_LEDS=y ++# CONFIG_MAC80211_DEBUGFS is not set ++# CONFIG_MAC80211_MESSAGE_TRACING is not set ++# CONFIG_MAC80211_DEBUG_MENU is not set ++CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 ++# CONFIG_WIMAX is not set ++CONFIG_RFKILL=y ++CONFIG_RFKILL_LEDS=y ++CONFIG_RFKILL_INPUT=y ++CONFIG_RFKILL_GPIO=m ++# CONFIG_NET_9P is not set ++# CONFIG_CAIF is not set ++# CONFIG_CEPH_LIB is not set ++# CONFIG_NFC is not set ++# CONFIG_PSAMPLE is not set ++# CONFIG_NET_IFE is not set ++# CONFIG_LWTUNNEL is not set ++CONFIG_DST_CACHE=y ++CONFIG_GRO_CELLS=y ++# CONFIG_FAILOVER is not set ++CONFIG_HAVE_EBPF_JIT=y ++ ++# ++# Device Drivers ++# ++CONFIG_HAVE_EISA=y ++# CONFIG_EISA is not set ++CONFIG_HAVE_PCI=y ++CONFIG_PCI=y ++CONFIG_PCI_DOMAINS=y ++CONFIG_PCIEPORTBUS=y ++CONFIG_HOTPLUG_PCI_PCIE=y ++CONFIG_PCIEAER=y ++# CONFIG_PCIEAER_INJECT is not set ++# CONFIG_PCIE_ECRC is not set ++CONFIG_PCIEASPM=y ++# CONFIG_PCIEASPM_DEBUG is not set ++# CONFIG_PCIEASPM_DEFAULT is not set ++CONFIG_PCIEASPM_POWERSAVE=y ++# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set ++# CONFIG_PCIEASPM_PERFORMANCE is not set ++CONFIG_PCIE_PME=y ++# CONFIG_PCIE_DPC is not set ++# CONFIG_PCIE_PTM is not set ++# CONFIG_PCIE_BW is not set ++CONFIG_PCI_MSI=y ++CONFIG_PCI_MSI_IRQ_DOMAIN=y ++CONFIG_PCI_QUIRKS=y ++# CONFIG_PCI_DEBUG is not set ++# CONFIG_PCI_STUB is not set ++CONFIG_PCI_LOCKLESS_CONFIG=y ++# CONFIG_PCI_IOV is not set ++# CONFIG_PCI_PRI is not set ++# CONFIG_PCI_PASID is not set ++CONFIG_PCI_LABEL=y ++CONFIG_HOTPLUG_PCI=y ++CONFIG_HOTPLUG_PCI_ACPI=y ++# CONFIG_HOTPLUG_PCI_ACPI_IBM is not set ++# CONFIG_HOTPLUG_PCI_CPCI is not set ++# CONFIG_HOTPLUG_PCI_SHPC is not set ++ ++# ++# PCI controller drivers ++# ++ ++# ++# Cadence PCIe controllers support ++# ++# end of Cadence PCIe controllers support ++ ++# CONFIG_VMD is not set ++ ++# ++# DesignWare PCI Core Support ++# ++# CONFIG_PCIE_DW_PLAT_HOST is not set ++# CONFIG_PCI_MESON is not set ++# end of DesignWare PCI Core Support ++# end of PCI controller drivers ++ ++# ++# PCI Endpoint ++# ++# CONFIG_PCI_ENDPOINT is not set ++# end of PCI Endpoint ++ ++# ++# PCI switch controller drivers ++# ++# CONFIG_PCI_SW_SWITCHTEC is not set ++# end of PCI switch controller drivers ++ ++# CONFIG_PCCARD is not set ++# CONFIG_RAPIDIO is not set ++ ++# ++# Generic Driver Options ++# ++CONFIG_UEVENT_HELPER=y ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++# CONFIG_DEVTMPFS is not set ++CONFIG_STANDALONE=y ++CONFIG_PREVENT_FIRMWARE_BUILD=y ++ ++# ++# Firmware loader ++# ++CONFIG_FW_LOADER=y ++CONFIG_FW_LOADER_PAGED_BUF=y ++CONFIG_EXTRA_FIRMWARE="${EXTRA_FW}" ++CONFIG_EXTRA_FIRMWARE_DIR="${EXTRA_FW_DIR}" ++CONFIG_FW_LOADER_USER_HELPER=y ++CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y ++# CONFIG_FW_LOADER_COMPRESS is not set ++# end of Firmware loader ++ ++CONFIG_ALLOW_DEV_COREDUMP=y ++# CONFIG_DEBUG_DRIVER is not set ++CONFIG_DEBUG_DEVRES=y ++# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set ++# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set ++CONFIG_GENERIC_CPU_AUTOPROBE=y ++CONFIG_GENERIC_CPU_VULNERABILITIES=y ++CONFIG_REGMAP=y ++CONFIG_REGMAP_I2C=y ++CONFIG_REGMAP_SPI=y ++CONFIG_REGMAP_IRQ=y ++CONFIG_DMA_SHARED_BUFFER=y ++# CONFIG_DMA_FENCE_TRACE is not set ++# end of Generic Driver Options ++ ++# ++# Bus devices ++# ++# end of Bus devices ++ ++CONFIG_CONNECTOR=y ++CONFIG_PROC_EVENTS=y ++# CONFIG_GNSS is not set ++# CONFIG_MTD is not set ++# CONFIG_OF is not set ++CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y ++# CONFIG_PARPORT is not set ++CONFIG_PNP=y ++# CONFIG_PNP_DEBUG_MESSAGES is not set ++ ++# ++# Protocols ++# ++CONFIG_PNPACPI=y ++CONFIG_BLK_DEV=y ++# CONFIG_BLK_DEV_NULL_BLK is not set ++# CONFIG_BLK_DEV_FD is not set ++CONFIG_CDROM=y ++# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set ++CONFIG_ZRAM=y ++# CONFIG_ZRAM_WRITEBACK is not set ++# CONFIG_ZRAM_MEMORY_TRACKING is not set ++# CONFIG_BLK_DEV_UMEM is not set ++CONFIG_BLK_DEV_LOOP=y ++CONFIG_BLK_DEV_LOOP_MIN_COUNT=16 ++# CONFIG_BLK_DEV_CRYPTOLOOP is not set ++# CONFIG_BLK_DEV_DRBD is not set ++# CONFIG_BLK_DEV_NBD is not set ++# CONFIG_BLK_DEV_SKD is not set ++# CONFIG_BLK_DEV_SX8 is not set ++CONFIG_BLK_DEV_RAM=y ++CONFIG_BLK_DEV_RAM_COUNT=16 ++CONFIG_BLK_DEV_RAM_SIZE=16384 ++# CONFIG_CDROM_PKTCDVD is not set ++# CONFIG_ATA_OVER_ETH is not set ++# CONFIG_VIRTIO_BLK is not set ++# CONFIG_BLK_DEV_RBD is not set ++# CONFIG_BLK_DEV_RSXX is not set ++ ++# ++# NVME Support ++# ++# CONFIG_BLK_DEV_NVME is not set ++# CONFIG_NVME_FC is not set ++# CONFIG_NVME_TARGET is not set ++# end of NVME Support ++ ++# ++# Misc devices ++# ++# CONFIG_AD525X_DPOT is not set ++# CONFIG_DUMMY_IRQ is not set ++# CONFIG_IBM_ASM is not set ++# CONFIG_PHANTOM is not set ++# CONFIG_TIFM_CORE is not set ++# CONFIG_ICS932S401 is not set ++# CONFIG_ENCLOSURE_SERVICES is not set ++# CONFIG_HP_ILO is not set ++# CONFIG_APDS9802ALS is not set ++# CONFIG_ISL29003 is not set ++# CONFIG_ISL29020 is not set ++# CONFIG_SENSORS_TSL2550 is not set ++# CONFIG_SENSORS_BH1770 is not set ++# CONFIG_SENSORS_APDS990X is not set ++# CONFIG_HMC6352 is not set ++# CONFIG_DS1682 is not set ++# CONFIG_LATTICE_ECP3_CONFIG is not set ++# CONFIG_SRAM is not set ++# CONFIG_PCI_ENDPOINT_TEST is not set ++# CONFIG_XILINX_SDFEC is not set ++# CONFIG_PVPANIC is not set ++CONFIG_UID_SYS_STATS=y ++# CONFIG_UID_SYS_STATS_DEBUG is not set ++# CONFIG_C2PORT is not set ++ ++# ++# EEPROM support ++# ++# CONFIG_EEPROM_AT24 is not set ++# CONFIG_EEPROM_AT25 is not set ++# CONFIG_EEPROM_LEGACY is not set ++# CONFIG_EEPROM_MAX6875 is not set ++CONFIG_EEPROM_93CX6=m ++# CONFIG_EEPROM_93XX46 is not set ++# CONFIG_EEPROM_IDT_89HPESX is not set ++# CONFIG_EEPROM_EE1004 is not set ++# end of EEPROM support ++ ++# CONFIG_CB710_CORE is not set ++ ++# ++# Texas Instruments shared transport line discipline ++# ++# CONFIG_TI_ST is not set ++# end of Texas Instruments shared transport line discipline ++ ++# CONFIG_SENSORS_LIS3_I2C is not set ++# CONFIG_ALTERA_STAPL is not set ++CONFIG_INTEL_MEI=m ++CONFIG_INTEL_MEI_ME=m ++CONFIG_INTEL_MEI_TXE=m ++# CONFIG_INTEL_MEI_HDCP is not set ++# CONFIG_INTEL_MEI_SPD is not set ++# CONFIG_INTEL_MEI_DAL is not set ++# CONFIG_VMWARE_VMCI is not set ++ ++# ++# Intel MIC & related support ++# ++ ++# ++# Intel MIC Bus Driver ++# ++# CONFIG_INTEL_MIC_BUS is not set ++ ++# ++# SCIF Bus Driver ++# ++# CONFIG_SCIF_BUS is not set ++ ++# ++# VOP Bus Driver ++# ++# CONFIG_VOP_BUS is not set ++ ++# ++# Intel MIC Host Driver ++# ++ ++# ++# Intel MIC Card Driver ++# ++ ++# ++# SCIF Driver ++# ++ ++# ++# Intel MIC Coprocessor State Management (COSM) Drivers ++# ++ ++# ++# VOP Driver ++# ++# end of Intel MIC & related support ++ ++# CONFIG_GENWQE is not set ++# CONFIG_ECHO is not set ++# CONFIG_MISC_ALCOR_PCI is not set ++# CONFIG_MISC_RTSX_PCI is not set ++# CONFIG_MISC_RTSX_USB is not set ++# CONFIG_HABANA_AI is not set ++# end of Misc devices ++ ++CONFIG_HAVE_IDE=y ++# CONFIG_IDE is not set ++ ++# ++# SCSI device support ++# ++CONFIG_SCSI_MOD=y ++# CONFIG_RAID_ATTRS is not set ++CONFIG_SCSI=y ++CONFIG_SCSI_DMA=y ++CONFIG_SCSI_PROC_FS=y ++ ++# ++# SCSI support type (disk, tape, CD-ROM) ++# ++CONFIG_BLK_DEV_SD=y ++# CONFIG_CHR_DEV_ST is not set ++CONFIG_BLK_DEV_SR=y ++CONFIG_BLK_DEV_SR_VENDOR=y ++CONFIG_CHR_DEV_SG=y ++# CONFIG_CHR_DEV_SCH is not set ++CONFIG_SCSI_CONSTANTS=y ++# CONFIG_SCSI_LOGGING is not set ++# CONFIG_SCSI_SCAN_ASYNC is not set ++ ++# ++# SCSI Transports ++# ++CONFIG_SCSI_SPI_ATTRS=y ++# CONFIG_SCSI_FC_ATTRS is not set ++# CONFIG_SCSI_ISCSI_ATTRS is not set ++# CONFIG_SCSI_SAS_ATTRS is not set ++# CONFIG_SCSI_SAS_LIBSAS is not set ++# CONFIG_SCSI_SRP_ATTRS is not set ++# end of SCSI Transports ++ ++CONFIG_SCSI_LOWLEVEL=y ++# CONFIG_ISCSI_TCP is not set ++# CONFIG_ISCSI_BOOT_SYSFS is not set ++# CONFIG_SCSI_CXGB3_ISCSI is not set ++# CONFIG_SCSI_CXGB4_ISCSI is not set ++# CONFIG_SCSI_BNX2_ISCSI is not set ++# CONFIG_BE2ISCSI is not set ++# CONFIG_BLK_DEV_3W_XXXX_RAID is not set ++# CONFIG_SCSI_HPSA is not set ++# CONFIG_SCSI_3W_9XXX is not set ++# CONFIG_SCSI_3W_SAS is not set ++# CONFIG_SCSI_ACARD is not set ++# CONFIG_SCSI_AACRAID is not set ++# CONFIG_SCSI_AIC7XXX is not set ++# CONFIG_SCSI_AIC79XX is not set ++# CONFIG_SCSI_AIC94XX is not set ++# CONFIG_SCSI_MVSAS is not set ++# CONFIG_SCSI_MVUMI is not set ++# CONFIG_SCSI_DPT_I2O is not set ++# CONFIG_SCSI_ADVANSYS is not set ++# CONFIG_SCSI_ARCMSR is not set ++# CONFIG_SCSI_ESAS2R is not set ++# CONFIG_MEGARAID_NEWGEN is not set ++# CONFIG_MEGARAID_LEGACY is not set ++# CONFIG_MEGARAID_SAS is not set ++# CONFIG_SCSI_MPT3SAS is not set ++# CONFIG_SCSI_MPT2SAS is not set ++# CONFIG_SCSI_SMARTPQI is not set ++CONFIG_SCSI_UFSHCD=y ++CONFIG_SCSI_UFSHCD_PCI=y ++# CONFIG_SCSI_UFS_DWC_TC_PCI is not set ++# CONFIG_SCSI_UFSHCD_PLATFORM is not set ++# CONFIG_SCSI_UFS_BSG is not set ++# CONFIG_SCSI_HPTIOP is not set ++# CONFIG_SCSI_BUSLOGIC is not set ++# CONFIG_SCSI_MYRB is not set ++# CONFIG_SCSI_MYRS is not set ++# CONFIG_VMWARE_PVSCSI is not set ++# CONFIG_SCSI_SNIC is not set ++# CONFIG_SCSI_DMX3191D is not set ++# CONFIG_SCSI_FDOMAIN_PCI is not set ++# CONFIG_SCSI_GDTH is not set ++# CONFIG_SCSI_ISCI is not set ++# CONFIG_SCSI_IPS is not set ++# CONFIG_SCSI_INITIO is not set ++# CONFIG_SCSI_INIA100 is not set ++# CONFIG_SCSI_STEX is not set ++# CONFIG_SCSI_SYM53C8XX_2 is not set ++# CONFIG_SCSI_IPR is not set ++# CONFIG_SCSI_QLOGIC_1280 is not set ++# CONFIG_SCSI_QLA_ISCSI is not set ++# CONFIG_SCSI_DC395x is not set ++# CONFIG_SCSI_AM53C974 is not set ++# CONFIG_SCSI_WD719X is not set ++# CONFIG_SCSI_DEBUG is not set ++# CONFIG_SCSI_PMCRAID is not set ++# CONFIG_SCSI_PM8001 is not set ++# CONFIG_SCSI_VIRTIO is not set ++# CONFIG_SCSI_DH is not set ++# end of SCSI device support ++ ++CONFIG_ATA=y ++CONFIG_ATA_VERBOSE_ERROR=y ++CONFIG_ATA_ACPI=y ++CONFIG_SATA_ZPODD=y ++CONFIG_SATA_PMP=y ++ ++# ++# Controllers with non-SFF native interface ++# ++CONFIG_SATA_AHCI=y ++CONFIG_SATA_MOBILE_LPM_POLICY=0 ++CONFIG_SATA_AHCI_PLATFORM=y ++# CONFIG_SATA_INIC162X is not set ++# CONFIG_SATA_ACARD_AHCI is not set ++# CONFIG_SATA_SIL24 is not set ++CONFIG_ATA_SFF=y ++ ++# ++# SFF controllers with custom DMA interface ++# ++# CONFIG_PDC_ADMA is not set ++# CONFIG_SATA_QSTOR is not set ++# CONFIG_SATA_SX4 is not set ++CONFIG_ATA_BMDMA=y ++ ++# ++# SATA SFF controllers with BMDMA ++# ++CONFIG_ATA_PIIX=y ++# CONFIG_SATA_DWC is not set ++# CONFIG_SATA_MV is not set ++# CONFIG_SATA_NV is not set ++# CONFIG_SATA_PROMISE is not set ++# CONFIG_SATA_SIL is not set ++# CONFIG_SATA_SIS is not set ++# CONFIG_SATA_SVW is not set ++# CONFIG_SATA_ULI is not set ++# CONFIG_SATA_VIA is not set ++# CONFIG_SATA_VITESSE is not set ++ ++# ++# PATA SFF controllers with BMDMA ++# ++# CONFIG_PATA_ALI is not set ++CONFIG_PATA_AMD=y ++# CONFIG_PATA_ARTOP is not set ++# CONFIG_PATA_ATIIXP is not set ++# CONFIG_PATA_ATP867X is not set ++# CONFIG_PATA_CMD64X is not set ++# CONFIG_PATA_CYPRESS is not set ++# CONFIG_PATA_EFAR is not set ++# CONFIG_PATA_HPT366 is not set ++# CONFIG_PATA_HPT37X is not set ++# CONFIG_PATA_HPT3X2N is not set ++# CONFIG_PATA_HPT3X3 is not set ++# CONFIG_PATA_IT8213 is not set ++# CONFIG_PATA_IT821X is not set ++# CONFIG_PATA_JMICRON is not set ++# CONFIG_PATA_MARVELL is not set ++# CONFIG_PATA_NETCELL is not set ++# CONFIG_PATA_NINJA32 is not set ++# CONFIG_PATA_NS87415 is not set ++CONFIG_PATA_OLDPIIX=y ++# CONFIG_PATA_OPTIDMA is not set ++# CONFIG_PATA_PDC2027X is not set ++# CONFIG_PATA_PDC_OLD is not set ++# CONFIG_PATA_RADISYS is not set ++# CONFIG_PATA_RDC is not set ++CONFIG_PATA_SCH=y ++# CONFIG_PATA_SERVERWORKS is not set ++# CONFIG_PATA_SIL680 is not set ++# CONFIG_PATA_SIS is not set ++# CONFIG_PATA_TOSHIBA is not set ++# CONFIG_PATA_TRIFLEX is not set ++# CONFIG_PATA_VIA is not set ++# CONFIG_PATA_WINBOND is not set ++ ++# ++# PIO-only SFF controllers ++# ++# CONFIG_PATA_CMD640_PCI is not set ++CONFIG_PATA_MPIIX=y ++# CONFIG_PATA_NS87410 is not set ++# CONFIG_PATA_OPTI is not set ++# CONFIG_PATA_PLATFORM is not set ++# CONFIG_PATA_RZ1000 is not set ++ ++# ++# Generic fallback / legacy drivers ++# ++# CONFIG_PATA_ACPI is not set ++CONFIG_ATA_GENERIC=y ++# CONFIG_PATA_LEGACY is not set ++CONFIG_MD=y ++# CONFIG_BLK_DEV_MD is not set ++CONFIG_BCACHE=m ++# CONFIG_BCACHE_DEBUG is not set ++# CONFIG_BCACHE_CLOSURES_DEBUG is not set ++CONFIG_BLK_DEV_DM_BUILTIN=y ++CONFIG_BLK_DEV_DM=y ++# CONFIG_DM_DEBUG is not set ++CONFIG_DM_BUFIO=y ++# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set ++# CONFIG_DM_UNSTRIPED is not set ++CONFIG_DM_CRYPT=y ++CONFIG_DM_SNAPSHOT=y ++# CONFIG_DM_THIN_PROVISIONING is not set ++# CONFIG_DM_CACHE is not set ++# CONFIG_DM_WRITECACHE is not set ++# CONFIG_DM_ERA is not set ++# CONFIG_DM_CLONE is not set ++CONFIG_DM_MIRROR=y ++# CONFIG_DM_LOG_USERSPACE is not set ++# CONFIG_DM_RAID is not set ++CONFIG_DM_ZERO=y ++# CONFIG_DM_MULTIPATH is not set ++# CONFIG_DM_DELAY is not set ++# CONFIG_DM_DUST is not set ++# CONFIG_DM_INIT is not set ++CONFIG_DM_UEVENT=y ++# CONFIG_DM_FLAKEY is not set ++CONFIG_DM_VERITY=y ++# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set ++# CONFIG_DM_VERITY_AVB is not set ++CONFIG_DM_VERITY_FEC=y ++# CONFIG_DM_SWITCH is not set ++# CONFIG_DM_LOG_WRITES is not set ++# CONFIG_DM_INTEGRITY is not set ++CONFIG_DM_BOW=y ++# CONFIG_TARGET_CORE is not set ++# CONFIG_FUSION is not set ++ ++# ++# IEEE 1394 (FireWire) support ++# ++# CONFIG_FIREWIRE is not set ++# CONFIG_FIREWIRE_NOSY is not set ++# end of IEEE 1394 (FireWire) support ++ ++# CONFIG_MACINTOSH_DRIVERS is not set ++CONFIG_NETDEVICES=y ++CONFIG_MII=y ++CONFIG_NET_CORE=y ++# CONFIG_BONDING is not set ++# CONFIG_DUMMY is not set ++# CONFIG_EQUALIZER is not set ++# CONFIG_NET_FC is not set ++# CONFIG_IFB is not set ++# CONFIG_NET_TEAM is not set ++# CONFIG_MACVLAN is not set ++# CONFIG_IPVLAN is not set ++# CONFIG_VXLAN is not set ++# CONFIG_GENEVE is not set ++# CONFIG_GTP is not set ++# CONFIG_MACSEC is not set ++CONFIG_NETCONSOLE=y ++# CONFIG_NETCONSOLE_DYNAMIC is not set ++CONFIG_NETPOLL=y ++CONFIG_NET_POLL_CONTROLLER=y ++CONFIG_TUN=y ++# CONFIG_TUN_VNET_CROSS_LE is not set ++# CONFIG_VETH is not set ++# CONFIG_VIRTIO_NET is not set ++CONFIG_NLMON=m ++# CONFIG_ARCNET is not set ++ ++# ++# CAIF transport drivers ++# ++ ++# ++# Distributed Switch Architecture drivers ++# ++# end of Distributed Switch Architecture drivers ++ ++CONFIG_ETHERNET=y ++CONFIG_NET_VENDOR_3COM=y ++# CONFIG_VORTEX is not set ++# CONFIG_TYPHOON is not set ++CONFIG_NET_VENDOR_ADAPTEC=y ++# CONFIG_ADAPTEC_STARFIRE is not set ++# CONFIG_NET_VENDOR_AGERE is not set ++CONFIG_NET_VENDOR_ALACRITECH=y ++# CONFIG_SLICOSS is not set ++CONFIG_NET_VENDOR_ALTEON=y ++# CONFIG_ACENIC is not set ++# CONFIG_ALTERA_TSE is not set ++CONFIG_NET_VENDOR_AMAZON=y ++# CONFIG_ENA_ETHERNET is not set ++CONFIG_NET_VENDOR_AMD=y ++# CONFIG_AMD8111_ETH is not set ++# CONFIG_PCNET32 is not set ++# CONFIG_AMD_XGBE is not set ++CONFIG_NET_VENDOR_AQUANTIA=y ++# CONFIG_AQTION is not set ++# CONFIG_NET_VENDOR_ARC is not set ++CONFIG_NET_VENDOR_ATHEROS=y ++# CONFIG_ATL2 is not set ++# CONFIG_ATL1 is not set ++# CONFIG_ATL1E is not set ++# CONFIG_ATL1C is not set ++# CONFIG_ALX is not set ++# CONFIG_NET_VENDOR_AURORA is not set ++CONFIG_NET_VENDOR_BROADCOM=y ++# CONFIG_B44 is not set ++# CONFIG_BCMGENET is not set ++# CONFIG_BNX2 is not set ++# CONFIG_CNIC is not set ++# CONFIG_TIGON3 is not set ++# CONFIG_BNX2X is not set ++# CONFIG_SYSTEMPORT is not set ++# CONFIG_BNXT is not set ++CONFIG_NET_VENDOR_BROCADE=y ++# CONFIG_BNA is not set ++CONFIG_NET_VENDOR_CADENCE=y ++# CONFIG_MACB is not set ++CONFIG_NET_VENDOR_CAVIUM=y ++# CONFIG_THUNDER_NIC_PF is not set ++# CONFIG_THUNDER_NIC_VF is not set ++# CONFIG_THUNDER_NIC_BGX is not set ++# CONFIG_THUNDER_NIC_RGX is not set ++CONFIG_CAVIUM_PTP=y ++# CONFIG_LIQUIDIO is not set ++# CONFIG_LIQUIDIO_VF is not set ++CONFIG_NET_VENDOR_CHELSIO=y ++# CONFIG_CHELSIO_T1 is not set ++# CONFIG_CHELSIO_T3 is not set ++# CONFIG_CHELSIO_T4 is not set ++# CONFIG_CHELSIO_T4VF is not set ++CONFIG_NET_VENDOR_CISCO=y ++# CONFIG_ENIC is not set ++CONFIG_NET_VENDOR_CORTINA=y ++# CONFIG_CX_ECAT is not set ++# CONFIG_DNET is not set ++CONFIG_NET_VENDOR_DEC=y ++CONFIG_NET_TULIP=y ++# CONFIG_DE2104X is not set ++# CONFIG_TULIP is not set ++# CONFIG_DE4X5 is not set ++# CONFIG_WINBOND_840 is not set ++# CONFIG_DM9102 is not set ++# CONFIG_ULI526X is not set ++CONFIG_NET_VENDOR_DLINK=y ++# CONFIG_DL2K is not set ++# CONFIG_SUNDANCE is not set ++CONFIG_NET_VENDOR_EMULEX=y ++# CONFIG_BE2NET is not set ++CONFIG_NET_VENDOR_EZCHIP=y ++CONFIG_NET_VENDOR_GOOGLE=y ++# CONFIG_GVE is not set ++CONFIG_NET_VENDOR_HP=y ++# CONFIG_HP100 is not set ++CONFIG_NET_VENDOR_HUAWEI=y ++# CONFIG_HINIC is not set ++CONFIG_NET_VENDOR_I825XX=y ++CONFIG_NET_VENDOR_INTEL=y ++CONFIG_E100=y ++CONFIG_E1000=y ++CONFIG_E1000E=y ++CONFIG_E1000E_HWTS=y ++# CONFIG_IGB is not set ++# CONFIG_IGBVF is not set ++# CONFIG_IXGB is not set ++# CONFIG_IXGBE is not set ++# CONFIG_IXGBEVF is not set ++# CONFIG_I40E is not set ++# CONFIG_I40EVF is not set ++# CONFIG_ICE is not set ++# CONFIG_FM10K is not set ++# CONFIG_IGC is not set ++# CONFIG_JME is not set ++# CONFIG_NET_VENDOR_MARVELL is not set ++# CONFIG_NET_VENDOR_MELLANOX is not set ++# CONFIG_NET_VENDOR_MICREL is not set ++# CONFIG_NET_VENDOR_MICROCHIP is not set ++CONFIG_NET_VENDOR_MICROSEMI=y ++# CONFIG_NET_VENDOR_MYRI is not set ++# CONFIG_FEALNX is not set ++CONFIG_NET_VENDOR_NATSEMI=y ++# CONFIG_NATSEMI is not set ++# CONFIG_NS83820 is not set ++CONFIG_NET_VENDOR_NETERION=y ++# CONFIG_S2IO is not set ++# CONFIG_VXGE is not set ++CONFIG_NET_VENDOR_NETRONOME=y ++# CONFIG_NFP is not set ++CONFIG_NET_VENDOR_NI=y ++# CONFIG_NI_XGE_MANAGEMENT_ENET is not set ++CONFIG_NET_VENDOR_8390=y ++CONFIG_NE2K_PCI=y ++CONFIG_NET_VENDOR_NVIDIA=y ++# CONFIG_FORCEDETH is not set ++# CONFIG_NET_VENDOR_OKI is not set ++# CONFIG_ETHOC is not set ++CONFIG_NET_VENDOR_PACKET_ENGINES=y ++# CONFIG_HAMACHI is not set ++# CONFIG_YELLOWFIN is not set ++CONFIG_NET_VENDOR_PENSANDO=y ++# CONFIG_IONIC is not set ++# CONFIG_NET_VENDOR_QLOGIC is not set ++# CONFIG_NET_VENDOR_QUALCOMM is not set ++# CONFIG_NET_VENDOR_RDC is not set ++# CONFIG_NET_VENDOR_REALTEK is not set ++CONFIG_NET_VENDOR_RENESAS=y ++# CONFIG_NET_VENDOR_ROCKER is not set ++# CONFIG_NET_VENDOR_SAMSUNG is not set ++# CONFIG_NET_VENDOR_SEEQ is not set ++CONFIG_NET_VENDOR_SOLARFLARE=y ++# CONFIG_SFC is not set ++# CONFIG_SFC_FALCON is not set ++# CONFIG_NET_VENDOR_SILAN is not set ++# CONFIG_NET_VENDOR_SIS is not set ++# CONFIG_NET_VENDOR_SMSC is not set ++CONFIG_NET_VENDOR_SOCIONEXT=y ++# CONFIG_NET_VENDOR_STMICRO is not set ++# CONFIG_NET_VENDOR_SUN is not set ++CONFIG_NET_VENDOR_SYNOPSYS=y ++# CONFIG_DWC_XLGMAC is not set ++# CONFIG_NET_VENDOR_TEHUTI is not set ++# CONFIG_NET_VENDOR_TI is not set ++# CONFIG_NET_VENDOR_VIA is not set ++# CONFIG_NET_VENDOR_WIZNET is not set ++CONFIG_NET_VENDOR_XILINX=y ++# CONFIG_XILINX_AXI_EMAC is not set ++# CONFIG_XILINX_LL_TEMAC is not set ++# CONFIG_FDDI is not set ++# CONFIG_HIPPI is not set ++# CONFIG_NET_SB1000 is not set ++CONFIG_MDIO_DEVICE=y ++CONFIG_MDIO_BUS=y ++# CONFIG_MDIO_BCM_UNIMAC is not set ++CONFIG_MDIO_BITBANG=m ++# CONFIG_MDIO_GPIO is not set ++# CONFIG_MDIO_MSCC_MIIM is not set ++# CONFIG_MDIO_THUNDER is not set ++CONFIG_PHYLIB=y ++# CONFIG_LED_TRIGGER_PHY is not set ++ ++# ++# MII PHY device drivers ++# ++# CONFIG_ADIN_PHY is not set ++# CONFIG_AMD_PHY is not set ++# CONFIG_AQUANTIA_PHY is not set ++# CONFIG_AX88796B_PHY is not set ++# CONFIG_AT803X_PHY is not set ++# CONFIG_BCM7XXX_PHY is not set ++# CONFIG_BCM87XX_PHY is not set ++# CONFIG_BROADCOM_PHY is not set ++# CONFIG_CICADA_PHY is not set ++# CONFIG_CORTINA_PHY is not set ++# CONFIG_DAVICOM_PHY is not set ++# CONFIG_DP83822_PHY is not set ++# CONFIG_DP83TC811_PHY is not set ++# CONFIG_DP83848_PHY is not set ++# CONFIG_DP83867_PHY is not set ++# CONFIG_DWXPCS is not set ++# CONFIG_FIXED_PHY is not set ++# CONFIG_ICPLUS_PHY is not set ++# CONFIG_INTEL_XWAY_PHY is not set ++# CONFIG_LSI_ET1011C_PHY is not set ++# CONFIG_LXT_PHY is not set ++# CONFIG_MARVELL_PHY is not set ++# CONFIG_MARVELL_10G_PHY is not set ++# CONFIG_MICREL_PHY is not set ++# CONFIG_MICROCHIP_PHY is not set ++# CONFIG_MICROCHIP_T1_PHY is not set ++# CONFIG_MICROSEMI_PHY is not set ++# CONFIG_NATIONAL_PHY is not set ++# CONFIG_NXP_TJA11XX_PHY is not set ++# CONFIG_QSEMI_PHY is not set ++# CONFIG_REALTEK_PHY is not set ++# CONFIG_RENESAS_PHY is not set ++# CONFIG_ROCKCHIP_PHY is not set ++# CONFIG_SMSC_PHY is not set ++# CONFIG_STE10XP is not set ++# CONFIG_TERANETICS_PHY is not set ++# CONFIG_VITESSE_PHY is not set ++# CONFIG_XILINX_GMII2RGMII is not set ++# CONFIG_MICREL_KS8995MA is not set ++CONFIG_PPP=y ++CONFIG_PPP_BSDCOMP=y ++CONFIG_PPP_DEFLATE=y ++CONFIG_PPP_FILTER=y ++CONFIG_PPP_MPPE=y ++# CONFIG_PPP_MULTILINK is not set ++CONFIG_PPPOE=y ++CONFIG_PPTP=y ++CONFIG_PPPOL2TP=y ++# CONFIG_PPP_ASYNC is not set ++# CONFIG_PPP_SYNC_TTY is not set ++# CONFIG_SLIP is not set ++CONFIG_SLHC=y ++CONFIG_USB_NET_DRIVERS=y ++CONFIG_USB_CATC=y ++CONFIG_USB_KAWETH=y ++CONFIG_USB_PEGASUS=y ++CONFIG_USB_RTL8150=y ++# CONFIG_USB_RTL8152 is not set ++# CONFIG_USB_LAN78XX is not set ++CONFIG_USB_USBNET=y ++CONFIG_USB_NET_AX8817X=y ++CONFIG_USB_NET_AX88179_178A=y ++CONFIG_USB_NET_CDCETHER=y ++CONFIG_USB_NET_CDC_EEM=y ++CONFIG_USB_NET_CDC_NCM=y ++CONFIG_USB_NET_HUAWEI_CDC_NCM=m ++# CONFIG_USB_NET_CDC_MBIM is not set ++CONFIG_USB_NET_DM9601=y ++CONFIG_USB_NET_SR9700=m ++# CONFIG_USB_NET_SR9800 is not set ++CONFIG_USB_NET_SMSC75XX=y ++CONFIG_USB_NET_SMSC95XX=y ++# CONFIG_USB_NET_GL620A is not set ++CONFIG_USB_NET_NET1080=y ++# CONFIG_USB_NET_PLUSB is not set ++CONFIG_USB_NET_MCS7830=y ++# CONFIG_USB_NET_RNDIS_HOST is not set ++CONFIG_USB_NET_CDC_SUBSET_ENABLE=y ++CONFIG_USB_NET_CDC_SUBSET=y ++# CONFIG_USB_ALI_M5632 is not set ++# CONFIG_USB_AN2720 is not set ++CONFIG_USB_BELKIN=y ++CONFIG_USB_ARMLINUX=y ++# CONFIG_USB_EPSON2888 is not set ++# CONFIG_USB_KC2190 is not set ++CONFIG_USB_NET_ZAURUS=y ++# CONFIG_USB_NET_CX82310_ETH is not set ++# CONFIG_USB_NET_KALMIA is not set ++# CONFIG_USB_NET_QMI_WWAN is not set ++# CONFIG_USB_HSO is not set ++# CONFIG_USB_NET_INT51X1 is not set ++CONFIG_USB_IPHETH=y ++CONFIG_USB_SIERRA_NET=y ++# CONFIG_USB_VL600 is not set ++# CONFIG_USB_NET_CH9200 is not set ++CONFIG_USB_NET_AQC111=y ++CONFIG_WLAN=y ++# CONFIG_WIRELESS_WDS is not set ++CONFIG_WLAN_VENDOR_ADMTEK=y ++# CONFIG_ADM8211 is not set ++CONFIG_WLAN_VENDOR_ATH=y ++# CONFIG_ATH_DEBUG is not set ++# CONFIG_ATH5K is not set ++# CONFIG_ATH5K_PCI is not set ++# CONFIG_ATH9K is not set ++# CONFIG_ATH9K_HTC is not set ++# CONFIG_CARL9170 is not set ++# CONFIG_ATH6KL is not set ++# CONFIG_AR5523 is not set ++# CONFIG_WIL6210 is not set ++# CONFIG_ATH10K is not set ++# CONFIG_WCN36XX is not set ++CONFIG_WLAN_VENDOR_ATMEL=y ++# CONFIG_ATMEL is not set ++# CONFIG_AT76C50X_USB is not set ++CONFIG_WLAN_VENDOR_BROADCOM=y ++# CONFIG_B43 is not set ++# CONFIG_B43LEGACY is not set ++# CONFIG_BRCMSMAC is not set ++# CONFIG_BRCMFMAC is not set ++CONFIG_WLAN_VENDOR_CISCO=y ++# CONFIG_AIRO is not set ++CONFIG_WLAN_VENDOR_INTEL=y ++# CONFIG_IPW2100 is not set ++# CONFIG_IPW2200 is not set ++# CONFIG_IWL4965 is not set ++# CONFIG_IWL3945 is not set ++# CONFIG_IWLWIFI is not set ++CONFIG_WLAN_VENDOR_INTERSIL=y ++# CONFIG_HOSTAP is not set ++# CONFIG_HERMES is not set ++# CONFIG_P54_COMMON is not set ++# CONFIG_PRISM54 is not set ++CONFIG_WLAN_VENDOR_MARVELL=y ++# CONFIG_LIBERTAS is not set ++# CONFIG_LIBERTAS_THINFIRM is not set ++# CONFIG_MWIFIEX is not set ++# CONFIG_MWL8K is not set ++CONFIG_WLAN_VENDOR_MEDIATEK=y ++# CONFIG_MT7601U is not set ++# CONFIG_MT76x0U is not set ++# CONFIG_MT76x0E is not set ++# CONFIG_MT76x2E is not set ++# CONFIG_MT76x2U is not set ++# CONFIG_MT7603E is not set ++# CONFIG_MT7615E is not set ++CONFIG_WLAN_VENDOR_RALINK=y ++# CONFIG_RT2X00 is not set ++CONFIG_WLAN_VENDOR_REALTEK=y ++# CONFIG_RTL8180 is not set ++# CONFIG_RTL8187 is not set ++CONFIG_RTL_CARDS=m ++# CONFIG_RTL8192CE is not set ++# CONFIG_RTL8192SE is not set ++# CONFIG_RTL8192DE is not set ++# CONFIG_RTL8723AE is not set ++# CONFIG_RTL8723BE is not set ++# CONFIG_RTL8188EE is not set ++# CONFIG_RTL8192EE is not set ++# CONFIG_RTL8821AE is not set ++# CONFIG_RTL8192CU is not set ++# CONFIG_RTL8XXXU is not set ++# CONFIG_RTW88 is not set ++CONFIG_WLAN_VENDOR_RSI=y ++# CONFIG_RSI_91X is not set ++CONFIG_WLAN_VENDOR_ST=y ++# CONFIG_CW1200 is not set ++CONFIG_WLAN_VENDOR_TI=y ++# CONFIG_WL1251 is not set ++# CONFIG_WL12XX is not set ++# CONFIG_WL18XX is not set ++# CONFIG_WLCORE is not set ++CONFIG_WLAN_VENDOR_ZYDAS=y ++# CONFIG_USB_ZD1201 is not set ++# CONFIG_ZD1211RW is not set ++CONFIG_WLAN_VENDOR_QUANTENNA=y ++# CONFIG_QTNFMAC_PCIE is not set ++# CONFIG_MAC80211_HWSIM is not set ++# CONFIG_USB_NET_RNDIS_WLAN is not set ++# CONFIG_VIRT_WIFI is not set ++ ++# ++# Enable WiMAX (Networking options) to see the WiMAX drivers ++# ++# CONFIG_WAN is not set ++# CONFIG_VMXNET3 is not set ++# CONFIG_FUJITSU_ES is not set ++# CONFIG_NETDEVSIM is not set ++# CONFIG_NET_FAILOVER is not set ++# CONFIG_ISDN is not set ++# CONFIG_NVM is not set ++ ++# ++# Input device support ++# ++CONFIG_INPUT=y ++CONFIG_INPUT_LEDS=y ++CONFIG_INPUT_FF_MEMLESS=y ++CONFIG_INPUT_POLLDEV=y ++CONFIG_INPUT_SPARSEKMAP=y ++# CONFIG_INPUT_MATRIXKMAP is not set ++ ++# ++# Userland interfaces ++# ++CONFIG_INPUT_MOUSEDEV=y ++# CONFIG_INPUT_MOUSEDEV_PSAUX is not set ++CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 ++CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 ++# CONFIG_INPUT_JOYDEV is not set ++CONFIG_INPUT_EVDEV=y ++# CONFIG_INPUT_EVBUG is not set ++ ++# ++# Input Device Drivers ++# ++CONFIG_INPUT_KEYBOARD=y ++# CONFIG_KEYBOARD_ADC is not set ++# CONFIG_KEYBOARD_ADP5588 is not set ++# CONFIG_KEYBOARD_ADP5589 is not set ++# CONFIG_KEYBOARD_APPLESPI is not set ++# CONFIG_KEYBOARD_ATKBD is not set ++# CONFIG_KEYBOARD_QT1050 is not set ++# CONFIG_KEYBOARD_QT1070 is not set ++# CONFIG_KEYBOARD_QT2160 is not set ++# CONFIG_KEYBOARD_DLINK_DIR685 is not set ++# CONFIG_KEYBOARD_LKKBD is not set ++CONFIG_KEYBOARD_GPIO=y ++# CONFIG_KEYBOARD_GPIO_POLLED is not set ++# CONFIG_KEYBOARD_TCA6416 is not set ++# CONFIG_KEYBOARD_TCA8418 is not set ++# CONFIG_KEYBOARD_MATRIX is not set ++# CONFIG_KEYBOARD_LM8323 is not set ++# CONFIG_KEYBOARD_LM8333 is not set ++# CONFIG_KEYBOARD_MAX7359 is not set ++# CONFIG_KEYBOARD_MCS is not set ++# CONFIG_KEYBOARD_MPR121 is not set ++# CONFIG_KEYBOARD_NEWTON is not set ++# CONFIG_KEYBOARD_OPENCORES is not set ++# CONFIG_KEYBOARD_SAMSUNG is not set ++# CONFIG_KEYBOARD_STOWAWAY is not set ++# CONFIG_KEYBOARD_SUNKBD is not set ++# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set ++# CONFIG_KEYBOARD_XTKBD is not set ++CONFIG_INPUT_MOUSE=y ++# CONFIG_MOUSE_PS2 is not set ++# CONFIG_MOUSE_SERIAL is not set ++# CONFIG_MOUSE_APPLETOUCH is not set ++# CONFIG_MOUSE_BCM5974 is not set ++# CONFIG_MOUSE_CYAPA is not set ++# CONFIG_MOUSE_ELAN_I2C is not set ++# CONFIG_MOUSE_VSXXXAA is not set ++# CONFIG_MOUSE_GPIO is not set ++# CONFIG_MOUSE_SYNAPTICS_I2C is not set ++# CONFIG_MOUSE_SYNAPTICS_USB is not set ++CONFIG_INPUT_JOYSTICK=y ++# CONFIG_JOYSTICK_ANALOG is not set ++# CONFIG_JOYSTICK_A3D is not set ++# CONFIG_JOYSTICK_ADI is not set ++# CONFIG_JOYSTICK_COBRA is not set ++# CONFIG_JOYSTICK_GF2K is not set ++# CONFIG_JOYSTICK_GRIP is not set ++# CONFIG_JOYSTICK_GRIP_MP is not set ++# CONFIG_JOYSTICK_GUILLEMOT is not set ++# CONFIG_JOYSTICK_INTERACT is not set ++# CONFIG_JOYSTICK_SIDEWINDER is not set ++# CONFIG_JOYSTICK_TMDC is not set ++# CONFIG_JOYSTICK_IFORCE is not set ++# CONFIG_JOYSTICK_WARRIOR is not set ++# CONFIG_JOYSTICK_MAGELLAN is not set ++# CONFIG_JOYSTICK_SPACEORB is not set ++# CONFIG_JOYSTICK_SPACEBALL is not set ++# CONFIG_JOYSTICK_STINGER is not set ++# CONFIG_JOYSTICK_TWIDJOY is not set ++# CONFIG_JOYSTICK_ZHENHUA is not set ++# CONFIG_JOYSTICK_AS5011 is not set ++# CONFIG_JOYSTICK_JOYDUMP is not set ++# CONFIG_JOYSTICK_XPAD is not set ++# CONFIG_JOYSTICK_PSXPAD_SPI is not set ++# CONFIG_JOYSTICK_PXRC is not set ++# CONFIG_JOYSTICK_FSIA6B is not set ++CONFIG_INPUT_TABLET=y ++# CONFIG_TABLET_USB_ACECAD is not set ++# CONFIG_TABLET_USB_AIPTEK is not set ++# CONFIG_TABLET_USB_GTCO is not set ++# CONFIG_TABLET_USB_HANWANG is not set ++# CONFIG_TABLET_USB_KBTAB is not set ++# CONFIG_TABLET_USB_PEGASUS is not set ++# CONFIG_TABLET_SERIAL_WACOM4 is not set ++CONFIG_INPUT_TOUCHSCREEN=y ++CONFIG_TOUCHSCREEN_PROPERTIES=y ++# CONFIG_TOUCHSCREEN_ADS7846 is not set ++# CONFIG_TOUCHSCREEN_AD7877 is not set ++# CONFIG_TOUCHSCREEN_AD7879 is not set ++# CONFIG_TOUCHSCREEN_ADC is not set ++CONFIG_TOUCHSCREEN_ATMEL_MXT=y ++# CONFIG_TOUCHSCREEN_ATMEL_MXT_T37 is not set ++# CONFIG_TOUCHSCREEN_AUO_PIXCIR is not set ++# CONFIG_TOUCHSCREEN_BU21013 is not set ++# CONFIG_TOUCHSCREEN_BU21029 is not set ++# CONFIG_TOUCHSCREEN_CHIPONE_ICN8505 is not set ++# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set ++# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set ++CONFIG_TOUCHSCREEN_CYTTSP4_CORE=m ++CONFIG_TOUCHSCREEN_CYTTSP4_I2C=m ++CONFIG_TOUCHSCREEN_CYTTSP4_SPI=m ++# CONFIG_TOUCHSCREEN_DYNAPRO is not set ++# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set ++# CONFIG_TOUCHSCREEN_EETI is not set ++# CONFIG_TOUCHSCREEN_EGALAX_SERIAL is not set ++# CONFIG_TOUCHSCREEN_EXC3000 is not set ++# CONFIG_TOUCHSCREEN_FUJITSU is not set ++# CONFIG_TOUCHSCREEN_GOODIX is not set ++# CONFIG_TOUCHSCREEN_HIDEEP is not set ++# CONFIG_TOUCHSCREEN_ILI210X is not set ++# CONFIG_TOUCHSCREEN_S6SY761 is not set ++# CONFIG_TOUCHSCREEN_GUNZE is not set ++# CONFIG_TOUCHSCREEN_EKTF2127 is not set ++# CONFIG_TOUCHSCREEN_ELAN is not set ++# CONFIG_TOUCHSCREEN_ELO is not set ++# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set ++# CONFIG_TOUCHSCREEN_WACOM_I2C is not set ++# CONFIG_TOUCHSCREEN_MAX11801 is not set ++# CONFIG_TOUCHSCREEN_MCS5000 is not set ++# CONFIG_TOUCHSCREEN_MMS114 is not set ++# CONFIG_TOUCHSCREEN_MELFAS_MIP4 is not set ++# CONFIG_TOUCHSCREEN_MTOUCH is not set ++# CONFIG_TOUCHSCREEN_INEXIO is not set ++# CONFIG_TOUCHSCREEN_MK712 is not set ++# CONFIG_TOUCHSCREEN_PENMOUNT is not set ++# CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set ++# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set ++# CONFIG_TOUCHSCREEN_TOUCHWIN is not set ++# CONFIG_TOUCHSCREEN_PIXCIR is not set ++# CONFIG_TOUCHSCREEN_WDT87XX_I2C is not set ++# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set ++# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set ++# CONFIG_TOUCHSCREEN_TSC_SERIO is not set ++# CONFIG_TOUCHSCREEN_TSC2004 is not set ++# CONFIG_TOUCHSCREEN_TSC2005 is not set ++# CONFIG_TOUCHSCREEN_TSC2007 is not set ++# CONFIG_TOUCHSCREEN_RM_TS is not set ++# CONFIG_TOUCHSCREEN_SILEAD is not set ++# CONFIG_TOUCHSCREEN_SIS_I2C is not set ++# CONFIG_TOUCHSCREEN_ST1232 is not set ++# CONFIG_TOUCHSCREEN_STMFTS is not set ++CONFIG_TOUCHSCREEN_SUR40=m ++# CONFIG_TOUCHSCREEN_SURFACE3_SPI is not set ++# CONFIG_TOUCHSCREEN_SX8654 is not set ++# CONFIG_TOUCHSCREEN_TPS6507X is not set ++# CONFIG_TOUCHSCREEN_ZET6223 is not set ++CONFIG_TOUCHSCREEN_ZFORCE=m ++# CONFIG_TOUCHSCREEN_ROHM_BU21023 is not set ++# CONFIG_TOUCHSCREEN_IQS5XX is not set ++CONFIG_INPUT_MISC=y ++# CONFIG_INPUT_AD714X is not set ++# CONFIG_INPUT_ARIZONA_HAPTICS is not set ++# CONFIG_INPUT_BMA150 is not set ++# CONFIG_INPUT_E3X0_BUTTON is not set ++# CONFIG_INPUT_MSM_VIBRATOR is not set ++# CONFIG_INPUT_PCSPKR is not set ++# CONFIG_INPUT_MMA8450 is not set ++# CONFIG_INPUT_APANEL is not set ++# CONFIG_INPUT_GP2A is not set ++# CONFIG_INPUT_GPIO_BEEPER is not set ++# CONFIG_INPUT_GPIO_DECODER is not set ++# CONFIG_INPUT_GPIO_VIBRA is not set ++# CONFIG_INPUT_ATLAS_BTNS is not set ++# CONFIG_INPUT_ATI_REMOTE2 is not set ++# CONFIG_INPUT_KEYSPAN_REMOTE is not set ++# CONFIG_INPUT_KXTJ9 is not set ++# CONFIG_INPUT_POWERMATE is not set ++# CONFIG_INPUT_YEALINK is not set ++# CONFIG_INPUT_CM109 is not set ++# CONFIG_INPUT_REGULATOR_HAPTIC is not set ++CONFIG_INPUT_UINPUT=y ++# CONFIG_INPUT_PCF8574 is not set ++# CONFIG_INPUT_PWM_BEEPER is not set ++# CONFIG_INPUT_PWM_VIBRA is not set ++# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set ++# CONFIG_INPUT_ADXL34X is not set ++# CONFIG_INPUT_IMS_PCU is not set ++# CONFIG_INPUT_CMA3000 is not set ++CONFIG_INPUT_SOC_BUTTON_ARRAY=y ++# CONFIG_INPUT_DRV260X_HAPTICS is not set ++# CONFIG_INPUT_DRV2665_HAPTICS is not set ++# CONFIG_INPUT_DRV2667_HAPTICS is not set ++# CONFIG_RMI4_CORE is not set ++ ++# ++# Hardware I/O ports ++# ++# CONFIG_SERIO is not set ++CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y ++# CONFIG_GAMEPORT is not set ++# end of Hardware I/O ports ++# end of Input device support ++ ++# ++# Character devices ++# ++CONFIG_TTY=y ++CONFIG_VT=y ++CONFIG_CONSOLE_TRANSLATIONS=y ++CONFIG_VT_CONSOLE=y ++CONFIG_VT_CONSOLE_SLEEP=y ++CONFIG_HW_CONSOLE=y ++CONFIG_VT_HW_CONSOLE_BINDING=y ++CONFIG_UNIX98_PTYS=y ++# CONFIG_LEGACY_PTYS is not set ++CONFIG_SERIAL_NONSTANDARD=y ++# CONFIG_ROCKETPORT is not set ++# CONFIG_CYCLADES is not set ++# CONFIG_MOXA_INTELLIO is not set ++# CONFIG_MOXA_SMARTIO is not set ++# CONFIG_SYNCLINK is not set ++# CONFIG_SYNCLINKMP is not set ++# CONFIG_SYNCLINK_GT is not set ++# CONFIG_NOZOMI is not set ++# CONFIG_ISI is not set ++# CONFIG_N_HDLC is not set ++CONFIG_N_GSM=y ++CONFIG_TRACE_ROUTER=y ++CONFIG_TRACE_SINK=y ++# CONFIG_NULL_TTY is not set ++CONFIG_LDISC_AUTOLOAD=y ++# CONFIG_DEVMEM is not set ++# CONFIG_DEVKMEM is not set ++ ++# ++# Serial drivers ++# ++CONFIG_SERIAL_EARLYCON=y ++CONFIG_SERIAL_8250=y ++CONFIG_SERIAL_8250_DEPRECATED_OPTIONS=y ++CONFIG_SERIAL_8250_PNP=y ++# CONFIG_SERIAL_8250_FINTEK is not set ++CONFIG_SERIAL_8250_CONSOLE=y ++CONFIG_SERIAL_8250_DMA=y ++CONFIG_SERIAL_8250_PCI=y ++CONFIG_SERIAL_8250_EXAR=y ++CONFIG_SERIAL_8250_NR_UARTS=32 ++CONFIG_SERIAL_8250_RUNTIME_UARTS=4 ++CONFIG_SERIAL_8250_EXTENDED=y ++CONFIG_SERIAL_8250_MANY_PORTS=y ++CONFIG_SERIAL_8250_SHARE_IRQ=y ++CONFIG_SERIAL_8250_DETECT_IRQ=y ++CONFIG_SERIAL_8250_RSA=y ++CONFIG_SERIAL_8250_DWLIB=y ++CONFIG_SERIAL_8250_DW=y ++# CONFIG_SERIAL_8250_RT288X is not set ++CONFIG_SERIAL_8250_LPSS=y ++CONFIG_SERIAL_8250_MID=y ++ ++# ++# Non-8250 serial port support ++# ++# CONFIG_SERIAL_MAX3100 is not set ++# CONFIG_SERIAL_MAX310X is not set ++# CONFIG_SERIAL_UARTLITE is not set ++CONFIG_SERIAL_CORE=y ++CONFIG_SERIAL_CORE_CONSOLE=y ++# CONFIG_SERIAL_JSM is not set ++# CONFIG_SERIAL_SCCNXP is not set ++# CONFIG_SERIAL_SC16IS7XX is not set ++# CONFIG_SERIAL_ALTERA_JTAGUART is not set ++# CONFIG_SERIAL_ALTERA_UART is not set ++# CONFIG_SERIAL_IFX6X60 is not set ++# CONFIG_SERIAL_ARC is not set ++# CONFIG_SERIAL_RP2 is not set ++# CONFIG_SERIAL_FSL_LPUART is not set ++# CONFIG_SERIAL_FSL_LINFLEXUART is not set ++# end of Serial drivers ++ ++CONFIG_SERIAL_MCTRL_GPIO=y ++# CONFIG_SERIAL_DEV_BUS is not set ++# CONFIG_TTY_PRINTK is not set ++# CONFIG_VIRTIO_CONSOLE is not set ++# CONFIG_IPMI_HANDLER is not set ++# CONFIG_IPMB_DEVICE_INTERFACE is not set ++CONFIG_HW_RANDOM=y ++# CONFIG_HW_RANDOM_TIMERIOMEM is not set ++CONFIG_HW_RANDOM_INTEL=y ++# CONFIG_HW_RANDOM_AMD is not set ++# CONFIG_HW_RANDOM_VIA is not set ++# CONFIG_HW_RANDOM_VIRTIO is not set ++CONFIG_NVRAM=y ++# CONFIG_APPLICOM is not set ++# CONFIG_MWAVE is not set ++# CONFIG_RAW_DRIVER is not set ++# CONFIG_HPET is not set ++# CONFIG_HANGCHECK_TIMER is not set ++CONFIG_TCG_TPM=m ++CONFIG_HW_RANDOM_TPM=y ++CONFIG_TCG_TIS_CORE=m ++CONFIG_TCG_TIS=m ++# CONFIG_TCG_TIS_SPI is not set ++CONFIG_TCG_TIS_I2C_ATMEL=m ++CONFIG_TCG_TIS_I2C_INFINEON=m ++CONFIG_TCG_TIS_I2C_NUVOTON=m ++CONFIG_TCG_NSC=m ++CONFIG_TCG_ATMEL=m ++CONFIG_TCG_INFINEON=m ++CONFIG_TCG_CRB=m ++# CONFIG_TCG_VTPM_PROXY is not set ++# CONFIG_TCG_TIS_ST33ZP24_I2C is not set ++# CONFIG_TCG_TIS_ST33ZP24_SPI is not set ++# CONFIG_TELCLOCK is not set ++# CONFIG_DEVPORT is not set ++# CONFIG_XILLYBUS is not set ++CONFIG_RPMB_SUPPORT=y ++CONFIG_RPMB=y ++# CONFIG_RPMB_INTF_DEV is not set ++# CONFIG_RPMB_SIM is not set ++# CONFIG_VIRTIO_RPMB is not set ++# CONFIG_RPMB_MUX is not set ++# CONFIG_RPMB_MUX_KEY is not set ++# end of Character devices ++ ++CONFIG_RANDOM_TRUST_CPU=y ++# CONFIG_RANDOM_TRUST_BOOTLOADER is not set ++ ++# ++# I2C support ++# ++CONFIG_I2C=y ++CONFIG_ACPI_I2C_OPREGION=y ++CONFIG_I2C_BOARDINFO=y ++CONFIG_I2C_COMPAT=y ++CONFIG_I2C_CHARDEV=y ++CONFIG_I2C_MUX=y ++ ++# ++# Multiplexer I2C Chip support ++# ++# CONFIG_I2C_MUX_GPIO is not set ++# CONFIG_I2C_MUX_LTC4306 is not set ++# CONFIG_I2C_MUX_PCA9541 is not set ++# CONFIG_I2C_MUX_PCA954x is not set ++# CONFIG_I2C_MUX_REG is not set ++# CONFIG_I2C_MUX_MLXCPLD is not set ++# end of Multiplexer I2C Chip support ++ ++# CONFIG_I2C_HELPER_AUTO is not set ++CONFIG_I2C_SMBUS=y ++ ++# ++# I2C Algorithms ++# ++CONFIG_I2C_ALGOBIT=y ++# CONFIG_I2C_ALGOPCF is not set ++# CONFIG_I2C_ALGOPCA is not set ++# end of I2C Algorithms ++ ++# ++# I2C Hardware Bus support ++# ++ ++# ++# PC SMBus host controller drivers ++# ++# CONFIG_I2C_ALI1535 is not set ++# CONFIG_I2C_VIRTIO is not set ++# CONFIG_I2C_ALI1563 is not set ++# CONFIG_I2C_ALI15X3 is not set ++# CONFIG_I2C_AMD756 is not set ++# CONFIG_I2C_AMD8111 is not set ++# CONFIG_I2C_AMD_MP2 is not set ++CONFIG_I2C_I801=y ++CONFIG_I2C_ISCH=y ++# CONFIG_I2C_ISMT is not set ++# CONFIG_I2C_PIIX4 is not set ++# CONFIG_I2C_NFORCE2 is not set ++# CONFIG_I2C_NVIDIA_GPU is not set ++# CONFIG_I2C_SIS5595 is not set ++# CONFIG_I2C_SIS630 is not set ++# CONFIG_I2C_SIS96X is not set ++# CONFIG_I2C_VIA is not set ++# CONFIG_I2C_VIAPRO is not set ++ ++# ++# ACPI drivers ++# ++CONFIG_I2C_SCMI=y ++ ++# ++# I2C system bus drivers (mostly embedded / system-on-chip) ++# ++# CONFIG_I2C_CBUS_GPIO is not set ++CONFIG_I2C_DESIGNWARE_CORE=y ++CONFIG_I2C_DESIGNWARE_PLATFORM=y ++# CONFIG_I2C_DESIGNWARE_SLAVE is not set ++CONFIG_I2C_DESIGNWARE_PCI=y ++CONFIG_I2C_DESIGNWARE_BAYTRAIL=y ++# CONFIG_I2C_EMEV2 is not set ++# CONFIG_I2C_GPIO is not set ++# CONFIG_I2C_OCORES is not set ++# CONFIG_I2C_PCA_PLATFORM is not set ++# CONFIG_I2C_SIMTEC is not set ++# CONFIG_I2C_XILINX is not set ++ ++# ++# External I2C/SMBus adapter drivers ++# ++# CONFIG_I2C_DIOLAN_U2C is not set ++# CONFIG_I2C_PARPORT_LIGHT is not set ++# CONFIG_I2C_ROBOTFUZZ_OSIF is not set ++# CONFIG_I2C_TAOS_EVM is not set ++# CONFIG_I2C_TINY_USB is not set ++ ++# ++# Other I2C/SMBus bus drivers ++# ++# CONFIG_I2C_MLXCPLD is not set ++# end of I2C Hardware Bus support ++ ++# CONFIG_I2C_STUB is not set ++CONFIG_I2C_SLAVE=y ++# CONFIG_I2C_SLAVE_EEPROM is not set ++# CONFIG_I2C_DEBUG_CORE is not set ++# CONFIG_I2C_DEBUG_ALGO is not set ++# CONFIG_I2C_DEBUG_BUS is not set ++# end of I2C support ++ ++# CONFIG_I3C is not set ++CONFIG_SPI=y ++# CONFIG_SPI_DEBUG is not set ++CONFIG_SPI_MASTER=y ++# CONFIG_SPI_MEM is not set ++ ++# ++# SPI Master Controller Drivers ++# ++# CONFIG_SPI_ALTERA is not set ++# CONFIG_SPI_AXI_SPI_ENGINE is not set ++# CONFIG_SPI_BITBANG is not set ++# CONFIG_SPI_CADENCE is not set ++# CONFIG_SPI_DESIGNWARE is not set ++# CONFIG_SPI_NXP_FLEXSPI is not set ++# CONFIG_SPI_GPIO is not set ++# CONFIG_SPI_OC_TINY is not set ++CONFIG_SPI_PXA2XX=y ++CONFIG_SPI_PXA2XX_PCI=y ++# CONFIG_SPI_ROCKCHIP is not set ++# CONFIG_SPI_SC18IS602 is not set ++# CONFIG_SPI_SIFIVE is not set ++# CONFIG_SPI_MXIC is not set ++# CONFIG_SPI_XCOMM is not set ++# CONFIG_SPI_XILINX is not set ++# CONFIG_SPI_ZYNQMP_GQSPI is not set ++ ++# ++# SPI Protocol Masters ++# ++# CONFIG_SPI_SPIDEV is not set ++# CONFIG_SPI_LOOPBACK_TEST is not set ++# CONFIG_SPI_TLE62X0 is not set ++# CONFIG_SPI_SLAVE is not set ++# CONFIG_SPMI is not set ++# CONFIG_HSI is not set ++CONFIG_PPS=y ++# CONFIG_PPS_DEBUG is not set ++ ++# ++# PPS clients support ++# ++# CONFIG_PPS_CLIENT_KTIMER is not set ++# CONFIG_PPS_CLIENT_LDISC is not set ++# CONFIG_PPS_CLIENT_GPIO is not set ++ ++# ++# PPS generators support ++# ++ ++# ++# PTP clock support ++# ++CONFIG_PTP_1588_CLOCK=y ++ ++# ++# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks. ++# ++# CONFIG_PTP_INTEL_TGPIO is not set ++# CONFIG_PTP_INTEL_PMC_TGPIO is not set ++# end of PTP clock support ++ ++CONFIG_PINCTRL=y ++CONFIG_PINMUX=y ++CONFIG_PINCONF=y ++CONFIG_GENERIC_PINCONF=y ++# CONFIG_DEBUG_PINCTRL is not set ++# CONFIG_PINCTRL_AMD is not set ++# CONFIG_PINCTRL_MCP23S08 is not set ++# CONFIG_PINCTRL_SX150X is not set ++CONFIG_PINCTRL_BAYTRAIL=y ++CONFIG_PINCTRL_CHERRYVIEW=y ++CONFIG_PINCTRL_INTEL=y ++CONFIG_PINCTRL_BROXTON=y ++# CONFIG_PINCTRL_CANNONLAKE is not set ++# CONFIG_PINCTRL_CEDARFORK is not set ++# CONFIG_PINCTRL_DENVERTON is not set ++# CONFIG_PINCTRL_ELKHARTLAKE is not set ++# CONFIG_PINCTRL_GEMINILAKE is not set ++# CONFIG_PINCTRL_ICELAKE is not set ++# CONFIG_PINCTRL_LEWISBURG is not set ++# CONFIG_PINCTRL_SUNRISEPOINT is not set ++# CONFIG_PINCTRL_TIGERLAKE is not set ++# CONFIG_PINCTRL_WHITLEY is not set ++CONFIG_GPIOLIB=y ++CONFIG_GPIOLIB_FASTPATH_LIMIT=512 ++CONFIG_GPIO_ACPI=y ++CONFIG_GPIOLIB_IRQCHIP=y ++# CONFIG_DEBUG_GPIO is not set ++CONFIG_GPIO_SYSFS=y ++ ++# ++# Memory mapped GPIO drivers ++# ++# CONFIG_GPIO_AMDPT is not set ++# CONFIG_GPIO_DWAPB is not set ++# CONFIG_GPIO_EXAR is not set ++# CONFIG_GPIO_GENERIC_PLATFORM is not set ++# CONFIG_GPIO_ICH is not set ++CONFIG_GPIO_LYNXPOINT=y ++# CONFIG_GPIO_MB86S7X is not set ++# CONFIG_GPIO_VX855 is not set ++# CONFIG_GPIO_XILINX is not set ++# CONFIG_GPIO_AMD_FCH is not set ++# end of Memory mapped GPIO drivers ++ ++# ++# Port-mapped I/O GPIO drivers ++# ++# CONFIG_GPIO_F7188X is not set ++# CONFIG_GPIO_IT87 is not set ++# CONFIG_GPIO_SCH is not set ++# CONFIG_GPIO_SCH311X is not set ++# CONFIG_GPIO_WINBOND is not set ++# CONFIG_GPIO_WS16C48 is not set ++# end of Port-mapped I/O GPIO drivers ++ ++# ++# I2C GPIO expanders ++# ++# CONFIG_GPIO_ADP5588 is not set ++# CONFIG_GPIO_MAX7300 is not set ++# CONFIG_GPIO_MAX732X is not set ++# CONFIG_GPIO_PCA953X is not set ++# CONFIG_GPIO_PCF857X is not set ++# CONFIG_GPIO_TPIC2810 is not set ++# end of I2C GPIO expanders ++ ++# ++# MFD GPIO expanders ++# ++CONFIG_GPIO_ARIZONA=y ++# CONFIG_GPIO_CRYSTAL_COVE is not set ++CONFIG_GPIO_WHISKEY_COVE=y ++# end of MFD GPIO expanders ++ ++# ++# PCI GPIO expanders ++# ++# CONFIG_GPIO_AMD8111 is not set ++# CONFIG_GPIO_BT8XX is not set ++# CONFIG_GPIO_ML_IOH is not set ++# CONFIG_GPIO_PCI_IDIO_16 is not set ++# CONFIG_GPIO_PCIE_IDIO_24 is not set ++# CONFIG_GPIO_RDC321X is not set ++# end of PCI GPIO expanders ++ ++# ++# SPI GPIO expanders ++# ++# CONFIG_GPIO_MAX3191X is not set ++# CONFIG_GPIO_MAX7301 is not set ++# CONFIG_GPIO_MC33880 is not set ++# CONFIG_GPIO_PISOSR is not set ++# CONFIG_GPIO_XRA1403 is not set ++# end of SPI GPIO expanders ++ ++# ++# USB GPIO expanders ++# ++# end of USB GPIO expanders ++ ++# CONFIG_GPIO_MOCKUP is not set ++# CONFIG_W1 is not set ++# CONFIG_POWER_AVS is not set ++# CONFIG_POWER_RESET is not set ++CONFIG_POWER_SUPPLY=y ++# CONFIG_POWER_SUPPLY_DEBUG is not set ++CONFIG_POWER_SUPPLY_HWMON=y ++# CONFIG_PDA_POWER is not set ++CONFIG_GENERIC_ADC_BATTERY=m ++# CONFIG_TEST_POWER is not set ++# CONFIG_CHARGER_ADP5061 is not set ++# CONFIG_BATTERY_DS2780 is not set ++# CONFIG_BATTERY_DS2781 is not set ++# CONFIG_BATTERY_DS2782 is not set ++# CONFIG_BATTERY_SBS is not set ++# CONFIG_CHARGER_SBS is not set ++# CONFIG_MANAGER_SBS is not set ++# CONFIG_BATTERY_BQ27XXX is not set ++# CONFIG_BATTERY_MAX17040 is not set ++CONFIG_BATTERY_MAX17042=y ++CONFIG_CHARGER_ISP1704=m ++# CONFIG_CHARGER_MAX8903 is not set ++# CONFIG_CHARGER_LP8727 is not set ++# CONFIG_CHARGER_GPIO is not set ++# CONFIG_CHARGER_MANAGER is not set ++# CONFIG_CHARGER_LT3651 is not set ++# CONFIG_CHARGER_BQ2415X is not set ++# CONFIG_CHARGER_BQ24190 is not set ++# CONFIG_CHARGER_BQ24257 is not set ++# CONFIG_CHARGER_BQ24735 is not set ++CONFIG_CHARGER_BQ25890=y ++CONFIG_CHARGER_SMB347=y ++# CONFIG_BATTERY_GAUGE_LTC2941 is not set ++# CONFIG_CHARGER_RT9455 is not set ++# CONFIG_CHARGER_WCOVE is not set ++CONFIG_HWMON=y ++# CONFIG_HWMON_DEBUG_CHIP is not set ++ ++# ++# Native drivers ++# ++# CONFIG_SENSORS_ABITUGURU is not set ++# CONFIG_SENSORS_ABITUGURU3 is not set ++# CONFIG_SENSORS_AD7314 is not set ++# CONFIG_SENSORS_AD7414 is not set ++# CONFIG_SENSORS_AD7418 is not set ++# CONFIG_SENSORS_ADM1021 is not set ++# CONFIG_SENSORS_ADM1025 is not set ++# CONFIG_SENSORS_ADM1026 is not set ++# CONFIG_SENSORS_ADM1029 is not set ++# CONFIG_SENSORS_ADM1031 is not set ++# CONFIG_SENSORS_ADM9240 is not set ++# CONFIG_SENSORS_ADT7310 is not set ++# CONFIG_SENSORS_ADT7410 is not set ++# CONFIG_SENSORS_ADT7411 is not set ++# CONFIG_SENSORS_ADT7462 is not set ++# CONFIG_SENSORS_ADT7470 is not set ++# CONFIG_SENSORS_ADT7475 is not set ++# CONFIG_SENSORS_AS370 is not set ++# CONFIG_SENSORS_ASC7621 is not set ++# CONFIG_SENSORS_K8TEMP is not set ++# CONFIG_SENSORS_K10TEMP is not set ++# CONFIG_SENSORS_FAM15H_POWER is not set ++# CONFIG_SENSORS_APPLESMC is not set ++# CONFIG_SENSORS_ASB100 is not set ++# CONFIG_SENSORS_ASPEED is not set ++# CONFIG_SENSORS_ATXP1 is not set ++# CONFIG_SENSORS_DS620 is not set ++# CONFIG_SENSORS_DS1621 is not set ++CONFIG_SENSORS_DELL_SMM=m ++# CONFIG_SENSORS_I5K_AMB is not set ++# CONFIG_SENSORS_F71805F is not set ++# CONFIG_SENSORS_F71882FG is not set ++# CONFIG_SENSORS_F75375S is not set ++# CONFIG_SENSORS_FSCHMD is not set ++# CONFIG_SENSORS_FTSTEUTATES is not set ++# CONFIG_SENSORS_GL518SM is not set ++# CONFIG_SENSORS_GL520SM is not set ++# CONFIG_SENSORS_G760A is not set ++# CONFIG_SENSORS_G762 is not set ++# CONFIG_SENSORS_HIH6130 is not set ++CONFIG_SENSORS_IIO_HWMON=y ++# CONFIG_SENSORS_I5500 is not set ++CONFIG_SENSORS_CORETEMP=y ++# CONFIG_SENSORS_IT87 is not set ++# CONFIG_SENSORS_JC42 is not set ++# CONFIG_SENSORS_POWR1220 is not set ++# CONFIG_SENSORS_LINEAGE is not set ++# CONFIG_SENSORS_LTC2945 is not set ++# CONFIG_SENSORS_LTC2990 is not set ++# CONFIG_SENSORS_LTC4151 is not set ++# CONFIG_SENSORS_LTC4215 is not set ++# CONFIG_SENSORS_LTC4222 is not set ++# CONFIG_SENSORS_LTC4245 is not set ++# CONFIG_SENSORS_LTC4260 is not set ++# CONFIG_SENSORS_LTC4261 is not set ++# CONFIG_SENSORS_MAX1111 is not set ++# CONFIG_SENSORS_MAX16065 is not set ++# CONFIG_SENSORS_MAX1619 is not set ++# CONFIG_SENSORS_MAX1668 is not set ++# CONFIG_SENSORS_MAX197 is not set ++# CONFIG_SENSORS_MAX31722 is not set ++# CONFIG_SENSORS_MAX6621 is not set ++# CONFIG_SENSORS_MAX6639 is not set ++# CONFIG_SENSORS_MAX6642 is not set ++# CONFIG_SENSORS_MAX6650 is not set ++# CONFIG_SENSORS_MAX6697 is not set ++# CONFIG_SENSORS_MAX31790 is not set ++# CONFIG_SENSORS_MCP3021 is not set ++# CONFIG_SENSORS_TC654 is not set ++# CONFIG_SENSORS_ADCXX is not set ++# CONFIG_SENSORS_LM63 is not set ++# CONFIG_SENSORS_LM70 is not set ++# CONFIG_SENSORS_LM73 is not set ++# CONFIG_SENSORS_LM75 is not set ++# CONFIG_SENSORS_LM77 is not set ++# CONFIG_SENSORS_LM78 is not set ++# CONFIG_SENSORS_LM80 is not set ++# CONFIG_SENSORS_LM83 is not set ++# CONFIG_SENSORS_LM85 is not set ++# CONFIG_SENSORS_LM87 is not set ++# CONFIG_SENSORS_LM90 is not set ++# CONFIG_SENSORS_LM92 is not set ++# CONFIG_SENSORS_LM93 is not set ++# CONFIG_SENSORS_LM95234 is not set ++# CONFIG_SENSORS_LM95241 is not set ++# CONFIG_SENSORS_LM95245 is not set ++# CONFIG_SENSORS_PC87360 is not set ++# CONFIG_SENSORS_PC87427 is not set ++# CONFIG_SENSORS_NTC_THERMISTOR is not set ++# CONFIG_SENSORS_NCT6683 is not set ++# CONFIG_SENSORS_NCT6775 is not set ++# CONFIG_SENSORS_NCT7802 is not set ++# CONFIG_SENSORS_NCT7904 is not set ++# CONFIG_SENSORS_NPCM7XX is not set ++# CONFIG_SENSORS_PCF8591 is not set ++# CONFIG_PMBUS is not set ++# CONFIG_SENSORS_SHT15 is not set ++# CONFIG_SENSORS_SHT21 is not set ++# CONFIG_SENSORS_SHT3x is not set ++# CONFIG_SENSORS_SHTC1 is not set ++# CONFIG_SENSORS_SIS5595 is not set ++# CONFIG_SENSORS_DME1737 is not set ++# CONFIG_SENSORS_EMC1403 is not set ++# CONFIG_SENSORS_EMC2103 is not set ++# CONFIG_SENSORS_EMC6W201 is not set ++# CONFIG_SENSORS_SMSC47M1 is not set ++# CONFIG_SENSORS_SMSC47M192 is not set ++# CONFIG_SENSORS_SMSC47B397 is not set ++CONFIG_SENSORS_SCH56XX_COMMON=m ++CONFIG_SENSORS_SCH5627=m ++CONFIG_SENSORS_SCH5636=m ++# CONFIG_SENSORS_STTS751 is not set ++# CONFIG_SENSORS_SMM665 is not set ++# CONFIG_SENSORS_ADC128D818 is not set ++# CONFIG_SENSORS_ADS7828 is not set ++# CONFIG_SENSORS_ADS7871 is not set ++# CONFIG_SENSORS_AMC6821 is not set ++# CONFIG_SENSORS_INA209 is not set ++# CONFIG_SENSORS_INA2XX is not set ++# CONFIG_SENSORS_INA3221 is not set ++# CONFIG_SENSORS_TC74 is not set ++# CONFIG_SENSORS_THMC50 is not set ++# CONFIG_SENSORS_TMP102 is not set ++# CONFIG_SENSORS_TMP103 is not set ++# CONFIG_SENSORS_TMP108 is not set ++# CONFIG_SENSORS_TMP401 is not set ++# CONFIG_SENSORS_TMP421 is not set ++# CONFIG_SENSORS_VIA_CPUTEMP is not set ++# CONFIG_SENSORS_VIA686A is not set ++# CONFIG_SENSORS_VT1211 is not set ++# CONFIG_SENSORS_VT8231 is not set ++# CONFIG_SENSORS_W83773G is not set ++# CONFIG_SENSORS_W83781D is not set ++# CONFIG_SENSORS_W83791D is not set ++# CONFIG_SENSORS_W83792D is not set ++# CONFIG_SENSORS_W83793 is not set ++# CONFIG_SENSORS_W83795 is not set ++# CONFIG_SENSORS_W83L785TS is not set ++# CONFIG_SENSORS_W83L786NG is not set ++# CONFIG_SENSORS_W83627HF is not set ++# CONFIG_SENSORS_W83627EHF is not set ++# CONFIG_SENSORS_XGENE is not set ++ ++# ++# ACPI drivers ++# ++# CONFIG_SENSORS_ACPI_POWER is not set ++# CONFIG_SENSORS_ATK0110 is not set ++CONFIG_THERMAL=y ++# CONFIG_THERMAL_STATISTICS is not set ++CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 ++CONFIG_THERMAL_HWMON=y ++CONFIG_THERMAL_WRITABLE_TRIPS=y ++CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y ++# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set ++# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set ++# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set ++# CONFIG_THERMAL_GOV_FAIR_SHARE is not set ++CONFIG_THERMAL_GOV_STEP_WISE=y ++CONFIG_THERMAL_GOV_BANG_BANG=y ++CONFIG_THERMAL_GOV_USER_SPACE=y ++# CONFIG_THERMAL_GOV_POWER_ALLOCATOR is not set ++# CONFIG_CLOCK_THERMAL is not set ++# CONFIG_DEVFREQ_THERMAL is not set ++# CONFIG_THERMAL_EMULATION is not set ++ ++# ++# Intel thermal drivers ++# ++CONFIG_INTEL_POWERCLAMP=y ++CONFIG_X86_PKG_TEMP_THERMAL=y ++CONFIG_INTEL_SOC_DTS_IOSF_CORE=y ++CONFIG_INTEL_SOC_DTS_THERMAL=m ++ ++# ++# ACPI INT340X thermal drivers ++# ++CONFIG_INT340X_THERMAL=y ++CONFIG_ACPI_THERMAL_REL=y ++# CONFIG_INT3406_THERMAL is not set ++CONFIG_PROC_THERMAL_MMIO_RAPL=y ++# end of ACPI INT340X thermal drivers ++ ++# CONFIG_INTEL_BXT_PMIC_THERMAL is not set ++# CONFIG_INTEL_PCH_THERMAL is not set ++# end of Intel thermal drivers ++ ++# CONFIG_GENERIC_ADC_THERMAL is not set ++ ++# ++# Trusty ++# ++CONFIG_TRUSTY=y ++CONFIG_TRUSTY_LOG=y ++CONFIG_TRUSTY_VIRTIO=y ++CONFIG_TRUSTY_VIRTIO_IPC=y ++CONFIG_TRUSTY_BACKUP_TIMER=m ++# end of Trusty ++ ++CONFIG_WATCHDOG=y ++CONFIG_WATCHDOG_CORE=y ++# CONFIG_WATCHDOG_NOWAYOUT is not set ++CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y ++CONFIG_WATCHDOG_OPEN_TIMEOUT=0 ++# CONFIG_WATCHDOG_SYSFS is not set ++ ++# ++# Watchdog Pretimeout Governors ++# ++# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set ++ ++# ++# Watchdog Device Drivers ++# ++# CONFIG_SOFT_WATCHDOG is not set ++# CONFIG_WDAT_WDT is not set ++# CONFIG_XILINX_WATCHDOG is not set ++# CONFIG_ZIIRAVE_WATCHDOG is not set ++# CONFIG_CADENCE_WATCHDOG is not set ++# CONFIG_DW_WATCHDOG is not set ++# CONFIG_MAX63XX_WATCHDOG is not set ++# CONFIG_ACQUIRE_WDT is not set ++# CONFIG_ADVANTECH_WDT is not set ++# CONFIG_ALIM1535_WDT is not set ++# CONFIG_ALIM7101_WDT is not set ++# CONFIG_EBC_C384_WDT is not set ++# CONFIG_F71808E_WDT is not set ++# CONFIG_SP5100_TCO is not set ++# CONFIG_SBC_FITPC2_WATCHDOG is not set ++# CONFIG_EUROTECH_WDT is not set ++# CONFIG_IB700_WDT is not set ++# CONFIG_IBMASR is not set ++# CONFIG_WAFER_WDT is not set ++# CONFIG_I6300ESB_WDT is not set ++# CONFIG_IE6XX_WDT is not set ++CONFIG_ITCO_WDT=y ++# CONFIG_ITCO_NO_NMI_INTR is not set ++# CONFIG_ITCO_VENDOR_SUPPORT is not set ++# CONFIG_IT8712F_WDT is not set ++# CONFIG_IT87_WDT is not set ++# CONFIG_HP_WATCHDOG is not set ++# CONFIG_SC1200_WDT is not set ++# CONFIG_PC87413_WDT is not set ++# CONFIG_NV_TCO is not set ++# CONFIG_60XX_WDT is not set ++# CONFIG_CPU5_WDT is not set ++# CONFIG_SMSC_SCH311X_WDT is not set ++# CONFIG_SMSC37B787_WDT is not set ++# CONFIG_TQMX86_WDT is not set ++# CONFIG_VIA_WDT is not set ++# CONFIG_W83627HF_WDT is not set ++# CONFIG_W83877F_WDT is not set ++# CONFIG_W83977F_WDT is not set ++# CONFIG_MACHZ_WDT is not set ++# CONFIG_SBC_EPX_C3_WATCHDOG is not set ++# CONFIG_INTEL_MEI_WDT is not set ++# CONFIG_NI903X_WDT is not set ++# CONFIG_NIC7018_WDT is not set ++# CONFIG_MEN_A21_WDT is not set ++ ++# ++# PCI-based Watchdog Cards ++# ++# CONFIG_PCIPCWATCHDOG is not set ++# CONFIG_WDTPCI is not set ++ ++# ++# USB-based Watchdog Cards ++# ++# CONFIG_USBPCWATCHDOG is not set ++CONFIG_SSB_POSSIBLE=y ++# CONFIG_SSB is not set ++CONFIG_BCMA_POSSIBLE=y ++CONFIG_BCMA=m ++CONFIG_BCMA_HOST_PCI_POSSIBLE=y ++CONFIG_BCMA_HOST_PCI=y ++# CONFIG_BCMA_HOST_SOC is not set ++CONFIG_BCMA_DRIVER_PCI=y ++# CONFIG_BCMA_DRIVER_GMAC_CMN is not set ++# CONFIG_BCMA_DRIVER_GPIO is not set ++# CONFIG_BCMA_DEBUG is not set ++ ++# ++# Multifunction device drivers ++# ++CONFIG_MFD_CORE=y ++# CONFIG_MFD_AS3711 is not set ++# CONFIG_PMIC_ADP5520 is not set ++# CONFIG_MFD_AAT2870_CORE is not set ++# CONFIG_MFD_BCM590XX is not set ++# CONFIG_MFD_BD9571MWV is not set ++# CONFIG_MFD_AXP20X_I2C is not set ++# CONFIG_MFD_MADERA is not set ++# CONFIG_PMIC_DA903X is not set ++# CONFIG_MFD_DA9052_SPI is not set ++# CONFIG_MFD_DA9052_I2C is not set ++# CONFIG_MFD_DA9055 is not set ++# CONFIG_MFD_DA9062 is not set ++# CONFIG_MFD_DA9063 is not set ++# CONFIG_MFD_DA9150 is not set ++# CONFIG_MFD_DLN2 is not set ++# CONFIG_MFD_MC13XXX_SPI is not set ++# CONFIG_MFD_MC13XXX_I2C is not set ++# CONFIG_HTC_PASIC3 is not set ++# CONFIG_HTC_I2CPLD is not set ++# CONFIG_MFD_INTEL_QUARK_I2C_GPIO is not set ++CONFIG_LPC_ICH=y ++CONFIG_LPC_SCH=y ++CONFIG_INTEL_SOC_PMIC=y ++CONFIG_INTEL_SOC_PMIC_BXTWC=y ++# CONFIG_INTEL_SOC_PMIC_CHTWC is not set ++# CONFIG_INTEL_SOC_PMIC_CHTDC_TI is not set ++CONFIG_MFD_INTEL_LPSS=y ++CONFIG_MFD_INTEL_LPSS_ACPI=y ++CONFIG_MFD_INTEL_LPSS_PCI=y ++# CONFIG_MFD_JANZ_CMODIO is not set ++# CONFIG_MFD_KEMPLD is not set ++# CONFIG_MFD_88PM800 is not set ++# CONFIG_MFD_88PM805 is not set ++# CONFIG_MFD_88PM860X is not set ++# CONFIG_MFD_MAX14577 is not set ++# CONFIG_MFD_MAX77693 is not set ++# CONFIG_MFD_MAX77843 is not set ++# CONFIG_MFD_MAX8907 is not set ++# CONFIG_MFD_MAX8925 is not set ++# CONFIG_MFD_MAX8997 is not set ++# CONFIG_MFD_MAX8998 is not set ++# CONFIG_MFD_MT6397 is not set ++# CONFIG_MFD_MENF21BMC is not set ++# CONFIG_EZX_PCAP is not set ++# CONFIG_MFD_VIPERBOARD is not set ++# CONFIG_MFD_RETU is not set ++# CONFIG_MFD_PCF50633 is not set ++# CONFIG_MFD_RDC321X is not set ++# CONFIG_MFD_RT5033 is not set ++# CONFIG_MFD_RC5T583 is not set ++# CONFIG_MFD_SEC_CORE is not set ++# CONFIG_MFD_SI476X_CORE is not set ++# CONFIG_MFD_SM501 is not set ++# CONFIG_MFD_SKY81452 is not set ++# CONFIG_MFD_SMSC is not set ++# CONFIG_ABX500_CORE is not set ++# CONFIG_MFD_SYSCON is not set ++# CONFIG_MFD_TI_AM335X_TSCADC is not set ++# CONFIG_MFD_LP3943 is not set ++# CONFIG_MFD_LP8788 is not set ++# CONFIG_MFD_TI_LMU is not set ++# CONFIG_MFD_PALMAS is not set ++# CONFIG_TPS6105X is not set ++# CONFIG_TPS65010 is not set ++# CONFIG_TPS6507X is not set ++# CONFIG_MFD_TPS65086 is not set ++# CONFIG_MFD_TPS65090 is not set ++# CONFIG_MFD_TPS68470 is not set ++# CONFIG_MFD_TI_LP873X is not set ++# CONFIG_MFD_TPS6586X is not set ++# CONFIG_MFD_TPS65910 is not set ++# CONFIG_MFD_TPS65912_I2C is not set ++# CONFIG_MFD_TPS65912_SPI is not set ++# CONFIG_MFD_TPS80031 is not set ++# CONFIG_TWL4030_CORE is not set ++# CONFIG_TWL6040_CORE is not set ++# CONFIG_MFD_WL1273_CORE is not set ++# CONFIG_MFD_LM3533 is not set ++# CONFIG_MFD_TQMX86 is not set ++# CONFIG_MFD_VX855 is not set ++CONFIG_MFD_ARIZONA=y ++CONFIG_MFD_ARIZONA_I2C=m ++# CONFIG_MFD_ARIZONA_SPI is not set ++# CONFIG_MFD_CS47L24 is not set ++# CONFIG_MFD_WM5102 is not set ++CONFIG_MFD_WM5110=y ++# CONFIG_MFD_WM8997 is not set ++CONFIG_MFD_WM8998=y ++# CONFIG_MFD_WM8400 is not set ++# CONFIG_MFD_WM831X_I2C is not set ++# CONFIG_MFD_WM831X_SPI is not set ++# CONFIG_MFD_WM8350_I2C is not set ++# CONFIG_MFD_WM8994 is not set ++# end of Multifunction device drivers ++ ++CONFIG_REGULATOR=y ++# CONFIG_REGULATOR_DEBUG is not set ++CONFIG_REGULATOR_FIXED_VOLTAGE=y ++# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set ++# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set ++# CONFIG_REGULATOR_88PG86X is not set ++# CONFIG_REGULATOR_ACT8865 is not set ++# CONFIG_REGULATOR_AD5398 is not set ++# CONFIG_REGULATOR_ARIZONA_LDO1 is not set ++# CONFIG_REGULATOR_ARIZONA_MICSUPP is not set ++# CONFIG_REGULATOR_DA9210 is not set ++# CONFIG_REGULATOR_DA9211 is not set ++# CONFIG_REGULATOR_FAN53555 is not set ++CONFIG_REGULATOR_GPIO=y ++# CONFIG_REGULATOR_ISL9305 is not set ++# CONFIG_REGULATOR_ISL6271A is not set ++# CONFIG_REGULATOR_LP3971 is not set ++# CONFIG_REGULATOR_LP3972 is not set ++# CONFIG_REGULATOR_LP872X is not set ++# CONFIG_REGULATOR_LP8755 is not set ++# CONFIG_REGULATOR_LTC3589 is not set ++# CONFIG_REGULATOR_LTC3676 is not set ++# CONFIG_REGULATOR_MAX1586 is not set ++# CONFIG_REGULATOR_MAX8649 is not set ++# CONFIG_REGULATOR_MAX8660 is not set ++# CONFIG_REGULATOR_MAX8952 is not set ++# CONFIG_REGULATOR_MT6311 is not set ++# CONFIG_REGULATOR_PFUZE100 is not set ++# CONFIG_REGULATOR_PV88060 is not set ++# CONFIG_REGULATOR_PV88080 is not set ++# CONFIG_REGULATOR_PV88090 is not set ++# CONFIG_REGULATOR_PWM is not set ++# CONFIG_REGULATOR_SLG51000 is not set ++# CONFIG_REGULATOR_TPS51632 is not set ++# CONFIG_REGULATOR_TPS62360 is not set ++# CONFIG_REGULATOR_TPS65023 is not set ++# CONFIG_REGULATOR_TPS6507X is not set ++# CONFIG_REGULATOR_TPS65132 is not set ++# CONFIG_REGULATOR_TPS6524X is not set ++CONFIG_RC_CORE=y ++CONFIG_RC_MAP=y ++# CONFIG_LIRC is not set ++CONFIG_RC_DECODERS=y ++CONFIG_IR_NEC_DECODER=y ++CONFIG_IR_RC5_DECODER=y ++CONFIG_IR_RC6_DECODER=y ++CONFIG_IR_JVC_DECODER=y ++CONFIG_IR_SONY_DECODER=y ++CONFIG_IR_SANYO_DECODER=y ++CONFIG_IR_SHARP_DECODER=y ++CONFIG_IR_MCE_KBD_DECODER=y ++CONFIG_IR_XMP_DECODER=y ++# CONFIG_IR_IMON_DECODER is not set ++# CONFIG_IR_RCMM_DECODER is not set ++# CONFIG_RC_DEVICES is not set ++CONFIG_MEDIA_SUPPORT=y ++ ++# ++# Multimedia core support ++# ++CONFIG_MEDIA_CAMERA_SUPPORT=y ++# CONFIG_MEDIA_ANALOG_TV_SUPPORT is not set ++# CONFIG_MEDIA_DIGITAL_TV_SUPPORT is not set ++CONFIG_MEDIA_RADIO_SUPPORT=y ++# CONFIG_MEDIA_SDR_SUPPORT is not set ++# CONFIG_MEDIA_CEC_SUPPORT is not set ++CONFIG_MEDIA_CONTROLLER=y ++CONFIG_VIDEO_DEV=y ++CONFIG_VIDEO_V4L2_SUBDEV_API=y ++CONFIG_VIDEO_V4L2=y ++CONFIG_VIDEO_V4L2_I2C=y ++# CONFIG_VIDEO_ADV_DEBUG is not set ++# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set ++CONFIG_V4L2_FWNODE=m ++ ++# ++# Media drivers ++# ++CONFIG_MEDIA_USB_SUPPORT=y ++ ++# ++# Webcam devices ++# ++CONFIG_USB_VIDEO_CLASS=y ++CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y ++# CONFIG_USB_GSPCA is not set ++# CONFIG_USB_PWC is not set ++# CONFIG_VIDEO_CPIA2 is not set ++# CONFIG_USB_ZR364XX is not set ++# CONFIG_USB_STKWEBCAM is not set ++# CONFIG_USB_S2255 is not set ++# CONFIG_VIDEO_USBTV is not set ++ ++# ++# Webcam, TV (analog/digital) USB devices ++# ++# CONFIG_VIDEO_EM28XX is not set ++CONFIG_MEDIA_PCI_SUPPORT=y ++ ++# ++# Media capture support ++# ++# CONFIG_VIDEO_SOLO6X10 is not set ++# CONFIG_VIDEO_TW5864 is not set ++# CONFIG_VIDEO_TW68 is not set ++# CONFIG_VIDEO_TW686X is not set ++# CONFIG_VIDEO_IPU3_CIO2 is not set ++CONFIG_V4L_PLATFORM_DRIVERS=y ++# CONFIG_VIDEO_CAFE_CCIC is not set ++# CONFIG_VIDEO_CADENCE is not set ++# CONFIG_VIDEO_ASPEED is not set ++# CONFIG_V4L_MEM2MEM_DRIVERS is not set ++# CONFIG_V4L_TEST_DRIVERS is not set ++ ++# ++# Supported MMC/SDIO adapters ++# ++CONFIG_RADIO_ADAPTERS=y ++# CONFIG_RADIO_SI470X is not set ++# CONFIG_RADIO_SI4713 is not set ++# CONFIG_USB_MR800 is not set ++# CONFIG_USB_DSBR is not set ++# CONFIG_RADIO_MAXIRADIO is not set ++# CONFIG_RADIO_SHARK is not set ++# CONFIG_RADIO_SHARK2 is not set ++# CONFIG_USB_KEENE is not set ++# CONFIG_USB_RAREMONO is not set ++# CONFIG_USB_MA901 is not set ++# CONFIG_RADIO_TEA5764 is not set ++# CONFIG_RADIO_SAA7706H is not set ++# CONFIG_RADIO_TEF6862 is not set ++# CONFIG_RADIO_WL1273 is not set ++ ++# ++# Texas Instruments WL128x FM driver (ST based) ++# ++# end of Texas Instruments WL128x FM driver (ST based) ++ ++# CONFIG_CYPRESS_FIRMWARE is not set ++CONFIG_VIDEOBUF2_CORE=y ++CONFIG_VIDEOBUF2_V4L2=y ++CONFIG_VIDEOBUF2_MEMOPS=y ++CONFIG_VIDEOBUF2_VMALLOC=y ++CONFIG_VIDEOBUF2_DMA_SG=m ++ ++# ++# Media ancillary drivers (tuners, sensors, i2c, spi, frontends) ++# ++# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set ++CONFIG_MEDIA_ATTACH=y ++CONFIG_VIDEO_IR_I2C=y ++ ++# ++# I2C Encoders, decoders, sensors and other helper chips ++# ++ ++# ++# Audio decoders, processors and mixers ++# ++# CONFIG_VIDEO_TVAUDIO is not set ++# CONFIG_VIDEO_TDA7432 is not set ++# CONFIG_VIDEO_TDA9840 is not set ++# CONFIG_VIDEO_TDA1997X is not set ++# CONFIG_VIDEO_TEA6415C is not set ++# CONFIG_VIDEO_TEA6420 is not set ++# CONFIG_VIDEO_MSP3400 is not set ++# CONFIG_VIDEO_CS3308 is not set ++# CONFIG_VIDEO_CS5345 is not set ++# CONFIG_VIDEO_CS53L32A is not set ++# CONFIG_VIDEO_TLV320AIC23B is not set ++# CONFIG_VIDEO_UDA1342 is not set ++# CONFIG_VIDEO_WM8775 is not set ++# CONFIG_VIDEO_WM8739 is not set ++# CONFIG_VIDEO_VP27SMPX is not set ++# CONFIG_VIDEO_SONY_BTF_MPX is not set ++ ++# ++# RDS decoders ++# ++# CONFIG_VIDEO_SAA6588 is not set ++ ++# ++# Video decoders ++# ++# CONFIG_VIDEO_ADV7180 is not set ++# CONFIG_VIDEO_ADV7183 is not set ++# CONFIG_VIDEO_ADV7604 is not set ++# CONFIG_VIDEO_ADV7842 is not set ++# CONFIG_VIDEO_BT819 is not set ++# CONFIG_VIDEO_BT856 is not set ++# CONFIG_VIDEO_BT866 is not set ++# CONFIG_VIDEO_KS0127 is not set ++# CONFIG_VIDEO_ML86V7667 is not set ++# CONFIG_VIDEO_SAA7110 is not set ++# CONFIG_VIDEO_SAA711X is not set ++# CONFIG_VIDEO_TC358743 is not set ++# CONFIG_VIDEO_TVP514X is not set ++# CONFIG_VIDEO_TVP5150 is not set ++# CONFIG_VIDEO_TVP7002 is not set ++# CONFIG_VIDEO_TW2804 is not set ++# CONFIG_VIDEO_TW9903 is not set ++# CONFIG_VIDEO_TW9906 is not set ++# CONFIG_VIDEO_TW9910 is not set ++# CONFIG_VIDEO_VPX3220 is not set ++ ++# ++# Video and audio decoders ++# ++# CONFIG_VIDEO_SAA717X is not set ++# CONFIG_VIDEO_CX25840 is not set ++ ++# ++# Video encoders ++# ++# CONFIG_VIDEO_SAA7127 is not set ++# CONFIG_VIDEO_SAA7185 is not set ++# CONFIG_VIDEO_ADV7170 is not set ++# CONFIG_VIDEO_ADV7175 is not set ++# CONFIG_VIDEO_ADV7343 is not set ++# CONFIG_VIDEO_ADV7393 is not set ++# CONFIG_VIDEO_ADV7511 is not set ++# CONFIG_VIDEO_AD9389B is not set ++# CONFIG_VIDEO_AK881X is not set ++# CONFIG_VIDEO_THS8200 is not set ++ ++# ++# Camera sensor devices ++# ++CONFIG_VIDEO_SMIAPP_PLL=m ++# CONFIG_VIDEO_IMX214 is not set ++# CONFIG_VIDEO_IMX258 is not set ++# CONFIG_VIDEO_IMX274 is not set ++# CONFIG_VIDEO_IMX319 is not set ++# CONFIG_VIDEO_IMX355 is not set ++# CONFIG_VIDEO_OV2640 is not set ++# CONFIG_VIDEO_OV2659 is not set ++# CONFIG_VIDEO_OV2680 is not set ++# CONFIG_VIDEO_OV2685 is not set ++# CONFIG_VIDEO_OV5647 is not set ++# CONFIG_VIDEO_OV6650 is not set ++# CONFIG_VIDEO_OV5670 is not set ++# CONFIG_VIDEO_OV5675 is not set ++# CONFIG_VIDEO_OV5695 is not set ++# CONFIG_VIDEO_OV7251 is not set ++# CONFIG_VIDEO_OV772X is not set ++# CONFIG_VIDEO_OV7640 is not set ++# CONFIG_VIDEO_OV7670 is not set ++# CONFIG_VIDEO_OV7740 is not set ++# CONFIG_VIDEO_OV8856 is not set ++# CONFIG_VIDEO_OV9640 is not set ++# CONFIG_VIDEO_OV9650 is not set ++# CONFIG_VIDEO_OV13858 is not set ++# CONFIG_VIDEO_VS6624 is not set ++# CONFIG_VIDEO_MT9M001 is not set ++# CONFIG_VIDEO_MT9M032 is not set ++# CONFIG_VIDEO_MT9M111 is not set ++# CONFIG_VIDEO_MT9P031 is not set ++# CONFIG_VIDEO_MT9T001 is not set ++# CONFIG_VIDEO_MT9T112 is not set ++# CONFIG_VIDEO_MT9V011 is not set ++# CONFIG_VIDEO_MT9V032 is not set ++# CONFIG_VIDEO_MT9V111 is not set ++# CONFIG_VIDEO_SR030PC30 is not set ++# CONFIG_VIDEO_NOON010PC30 is not set ++# CONFIG_VIDEO_M5MOLS is not set ++# CONFIG_VIDEO_RJ54N1 is not set ++# CONFIG_VIDEO_S5K6AA is not set ++# CONFIG_VIDEO_S5K6A3 is not set ++# CONFIG_VIDEO_S5K4ECGX is not set ++# CONFIG_VIDEO_S5K5BAF is not set ++CONFIG_VIDEO_SMIAPP=m ++# CONFIG_VIDEO_ET8EK8 is not set ++# CONFIG_VIDEO_S5C73M3 is not set ++ ++# ++# Lens drivers ++# ++# CONFIG_VIDEO_AD5820 is not set ++# CONFIG_VIDEO_AK7375 is not set ++# CONFIG_VIDEO_DW9714 is not set ++# CONFIG_VIDEO_DW9807_VCM is not set ++ ++# ++# Flash devices ++# ++# CONFIG_VIDEO_ADP1653 is not set ++# CONFIG_VIDEO_LM3560 is not set ++# CONFIG_VIDEO_LM3646 is not set ++ ++# ++# Video improvement chips ++# ++# CONFIG_VIDEO_UPD64031A is not set ++# CONFIG_VIDEO_UPD64083 is not set ++ ++# ++# Audio/Video compression chips ++# ++# CONFIG_VIDEO_SAA6752HS is not set ++ ++# ++# SDR tuner chips ++# ++ ++# ++# Miscellaneous helper chips ++# ++# CONFIG_VIDEO_THS7303 is not set ++# CONFIG_VIDEO_M52790 is not set ++# CONFIG_VIDEO_I2C is not set ++# CONFIG_VIDEO_ST_MIPID02 is not set ++# end of I2C Encoders, decoders, sensors and other helper chips ++ ++# ++# SPI helper chips ++# ++# CONFIG_VIDEO_GS1662 is not set ++# end of SPI helper chips ++ ++# ++# Media SPI Adapters ++# ++# end of Media SPI Adapters ++ ++CONFIG_MEDIA_TUNER=y ++ ++# ++# Customize TV tuners ++# ++# CONFIG_MEDIA_TUNER_SIMPLE is not set ++CONFIG_MEDIA_TUNER_TDA18250=m ++# CONFIG_MEDIA_TUNER_TDA8290 is not set ++# CONFIG_MEDIA_TUNER_TDA827X is not set ++# CONFIG_MEDIA_TUNER_TDA18271 is not set ++# CONFIG_MEDIA_TUNER_TDA9887 is not set ++# CONFIG_MEDIA_TUNER_TEA5761 is not set ++# CONFIG_MEDIA_TUNER_TEA5767 is not set ++# CONFIG_MEDIA_TUNER_MSI001 is not set ++# CONFIG_MEDIA_TUNER_MT20XX is not set ++# CONFIG_MEDIA_TUNER_MT2060 is not set ++# CONFIG_MEDIA_TUNER_MT2063 is not set ++# CONFIG_MEDIA_TUNER_MT2266 is not set ++# CONFIG_MEDIA_TUNER_MT2131 is not set ++# CONFIG_MEDIA_TUNER_QT1010 is not set ++# CONFIG_MEDIA_TUNER_XC2028 is not set ++# CONFIG_MEDIA_TUNER_XC5000 is not set ++# CONFIG_MEDIA_TUNER_XC4000 is not set ++# CONFIG_MEDIA_TUNER_MXL5005S is not set ++# CONFIG_MEDIA_TUNER_MXL5007T is not set ++# CONFIG_MEDIA_TUNER_MC44S803 is not set ++# CONFIG_MEDIA_TUNER_MAX2165 is not set ++# CONFIG_MEDIA_TUNER_TDA18218 is not set ++# CONFIG_MEDIA_TUNER_FC0011 is not set ++# CONFIG_MEDIA_TUNER_FC0012 is not set ++# CONFIG_MEDIA_TUNER_FC0013 is not set ++# CONFIG_MEDIA_TUNER_TDA18212 is not set ++# CONFIG_MEDIA_TUNER_E4000 is not set ++# CONFIG_MEDIA_TUNER_FC2580 is not set ++# CONFIG_MEDIA_TUNER_M88RS6000T is not set ++# CONFIG_MEDIA_TUNER_TUA9001 is not set ++# CONFIG_MEDIA_TUNER_SI2157 is not set ++# CONFIG_MEDIA_TUNER_IT913X is not set ++# CONFIG_MEDIA_TUNER_R820T is not set ++# CONFIG_MEDIA_TUNER_MXL301RF is not set ++# CONFIG_MEDIA_TUNER_QM1D1C0042 is not set ++CONFIG_MEDIA_TUNER_QM1D1B0004=m ++# end of Customize TV tuners ++ ++# ++# Customise DVB Frontends ++# ++ ++# ++# Tools to develop new frontends ++# ++# end of Customise DVB Frontends ++ ++# ++# Graphics support ++# ++CONFIG_AGP=y ++# CONFIG_AGP_AMD64 is not set ++CONFIG_AGP_INTEL=y ++# CONFIG_AGP_SIS is not set ++# CONFIG_AGP_VIA is not set ++CONFIG_INTEL_GTT=y ++CONFIG_VGA_ARB=y ++CONFIG_VGA_ARB_MAX_GPUS=16 ++CONFIG_VGA_SWITCHEROO=y ++CONFIG_DRM=y ++CONFIG_DRM_MIPI_DSI=y ++# CONFIG_DRM_DP_AUX_CHARDEV is not set ++# CONFIG_DRM_DEBUG_MM is not set ++# CONFIG_DRM_DEBUG_SELFTEST is not set ++CONFIG_DRM_KMS_HELPER=y ++CONFIG_DRM_KMS_FB_HELPER=y ++CONFIG_DRM_FBDEV_EMULATION=y ++CONFIG_DRM_FBDEV_OVERALLOC=100 ++# CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM is not set ++# CONFIG_DRM_LOAD_EDID_FIRMWARE is not set ++# CONFIG_DRM_DP_CEC is not set ++CONFIG_DRM_TTM=y ++CONFIG_DRM_VRAM_HELPER=y ++CONFIG_DRM_TTM_HELPER=y ++ ++# ++# I2C encoder or helper chips ++# ++# CONFIG_DRM_I2C_CH7006 is not set ++# CONFIG_DRM_I2C_SIL164 is not set ++# CONFIG_DRM_I2C_NXP_TDA998X is not set ++# CONFIG_DRM_I2C_NXP_TDA9950 is not set ++# end of I2C encoder or helper chips ++ ++# ++# ARM devices ++# ++# end of ARM devices ++ ++# CONFIG_DRM_RADEON is not set ++# CONFIG_DRM_AMDGPU is not set ++ ++# ++# ACP (Audio CoProcessor) Configuration ++# ++# end of ACP (Audio CoProcessor) Configuration ++ ++# CONFIG_DRM_NOUVEAU is not set ++CONFIG_DRM_I915=y ++# CONFIG_DRM_I915_ALPHA_SUPPORT is not set ++CONFIG_DRM_I915_FORCE_PROBE="" ++CONFIG_DRM_I915_CAPTURE_ERROR=y ++CONFIG_DRM_I915_COMPRESS_ERROR=y ++CONFIG_DRM_I915_USERPTR=y ++# CONFIG_DRM_I915_GVT is not set ++ ++# ++# drm/i915 Debugging ++# ++# CONFIG_DRM_I915_WERROR is not set ++# CONFIG_DRM_I915_DEBUG is not set ++# CONFIG_DRM_I915_DEBUG_MMIO is not set ++# CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS is not set ++# CONFIG_DRM_I915_SW_FENCE_CHECK_DAG is not set ++# CONFIG_DRM_I915_DEBUG_GUC is not set ++# CONFIG_DRM_I915_SELFTEST is not set ++# CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS is not set ++# CONFIG_DRM_I915_DEBUG_VBLANK_EVADE is not set ++# CONFIG_DRM_I915_DEBUG_RUNTIME_PM is not set ++# end of drm/i915 Debugging ++ ++# ++# drm/i915 Profile Guided Optimisation ++# ++CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND=250 ++CONFIG_DRM_I915_SPIN_REQUEST=5 ++# end of drm/i915 Profile Guided Optimisation ++ ++# CONFIG_DRM_VGEM is not set ++# CONFIG_DRM_VKMS is not set ++# CONFIG_DRM_VMWGFX is not set ++# CONFIG_DRM_GMA500 is not set ++# CONFIG_DRM_UDL is not set ++# CONFIG_DRM_AST is not set ++# CONFIG_DRM_MGAG200 is not set ++# CONFIG_DRM_CIRRUS_QEMU is not set ++# CONFIG_DRM_QXL is not set ++CONFIG_DRM_BOCHS=y ++# CONFIG_DRM_VIRTIO_GPU is not set ++CONFIG_DRM_PANEL=y ++ ++# ++# Display Panels ++# ++# CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN is not set ++# end of Display Panels ++ ++CONFIG_DRM_BRIDGE=y ++CONFIG_DRM_PANEL_BRIDGE=y ++ ++# ++# Display Interface Bridges ++# ++# CONFIG_DRM_ANALOGIX_ANX78XX is not set ++# end of Display Interface Bridges ++ ++# CONFIG_DRM_ETNAVIV is not set ++# CONFIG_DRM_GM12U320 is not set ++# CONFIG_TINYDRM_HX8357D is not set ++# CONFIG_TINYDRM_ILI9225 is not set ++# CONFIG_TINYDRM_ILI9341 is not set ++# CONFIG_TINYDRM_MI0283QT is not set ++# CONFIG_TINYDRM_REPAPER is not set ++# CONFIG_TINYDRM_ST7586 is not set ++# CONFIG_TINYDRM_ST7735R is not set ++# CONFIG_DRM_VBOXVIDEO is not set ++# CONFIG_DRM_LEGACY is not set ++CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y ++ ++# ++# Frame buffer Devices ++# ++CONFIG_FB_CMDLINE=y ++CONFIG_FB_NOTIFY=y ++CONFIG_FB=y ++# CONFIG_FIRMWARE_EDID is not set ++CONFIG_FB_BOOT_VESA_SUPPORT=y ++CONFIG_FB_CFB_FILLRECT=y ++CONFIG_FB_CFB_COPYAREA=y ++CONFIG_FB_CFB_IMAGEBLIT=y ++CONFIG_FB_SYS_FILLRECT=y ++CONFIG_FB_SYS_COPYAREA=y ++CONFIG_FB_SYS_IMAGEBLIT=y ++# CONFIG_FB_FOREIGN_ENDIAN is not set ++CONFIG_FB_SYS_FOPS=y ++CONFIG_FB_DEFERRED_IO=y ++CONFIG_FB_MODE_HELPERS=y ++CONFIG_FB_TILEBLITTING=y ++ ++# ++# Frame buffer hardware drivers ++# ++# CONFIG_FB_CIRRUS is not set ++# CONFIG_FB_PM2 is not set ++# CONFIG_FB_CYBER2000 is not set ++# CONFIG_FB_ARC is not set ++# CONFIG_FB_ASILIANT is not set ++# CONFIG_FB_IMSTT is not set ++# CONFIG_FB_VGA16 is not set ++# CONFIG_FB_UVESA is not set ++CONFIG_FB_VESA=y ++CONFIG_FB_EFI=y ++# CONFIG_FB_N411 is not set ++# CONFIG_FB_HGA is not set ++# CONFIG_FB_OPENCORES is not set ++# CONFIG_FB_S1D13XXX is not set ++# CONFIG_FB_NVIDIA is not set ++# CONFIG_FB_RIVA is not set ++# CONFIG_FB_I740 is not set ++# CONFIG_FB_LE80578 is not set ++# CONFIG_FB_MATROX is not set ++# CONFIG_FB_RADEON is not set ++# CONFIG_FB_ATY128 is not set ++# CONFIG_FB_ATY is not set ++# CONFIG_FB_S3 is not set ++# CONFIG_FB_SAVAGE is not set ++# CONFIG_FB_SIS is not set ++# CONFIG_FB_VIA is not set ++# CONFIG_FB_NEOMAGIC is not set ++# CONFIG_FB_KYRO is not set ++# CONFIG_FB_3DFX is not set ++# CONFIG_FB_VOODOO1 is not set ++# CONFIG_FB_VT8623 is not set ++# CONFIG_FB_TRIDENT is not set ++# CONFIG_FB_ARK is not set ++# CONFIG_FB_PM3 is not set ++# CONFIG_FB_CARMINE is not set ++# CONFIG_FB_SMSCUFX is not set ++# CONFIG_FB_UDL is not set ++# CONFIG_FB_IBM_GXT4500 is not set ++# CONFIG_FB_VIRTUAL is not set ++# CONFIG_FB_METRONOME is not set ++# CONFIG_FB_MB862XX is not set ++CONFIG_FB_SIMPLE=y ++# CONFIG_FB_SM712 is not set ++# end of Frame buffer Devices ++ ++# ++# Backlight & LCD device support ++# ++CONFIG_LCD_CLASS_DEVICE=m ++# CONFIG_LCD_L4F00242T03 is not set ++# CONFIG_LCD_LMS283GF05 is not set ++# CONFIG_LCD_LTV350QV is not set ++# CONFIG_LCD_ILI922X is not set ++# CONFIG_LCD_ILI9320 is not set ++# CONFIG_LCD_TDO24M is not set ++# CONFIG_LCD_VGG2432A4 is not set ++CONFIG_LCD_PLATFORM=m ++# CONFIG_LCD_AMS369FG06 is not set ++# CONFIG_LCD_LMS501KF03 is not set ++# CONFIG_LCD_HX8357 is not set ++# CONFIG_LCD_OTM3225A is not set ++CONFIG_BACKLIGHT_CLASS_DEVICE=y ++CONFIG_BACKLIGHT_GENERIC=m ++# CONFIG_BACKLIGHT_PWM is not set ++# CONFIG_BACKLIGHT_APPLE is not set ++# CONFIG_BACKLIGHT_PM8941_WLED is not set ++# CONFIG_BACKLIGHT_SAHARA is not set ++# CONFIG_BACKLIGHT_ADP8860 is not set ++# CONFIG_BACKLIGHT_ADP8870 is not set ++CONFIG_BACKLIGHT_LM3630A=m ++# CONFIG_BACKLIGHT_LM3639 is not set ++# CONFIG_BACKLIGHT_LP855X is not set ++CONFIG_BACKLIGHT_GPIO=m ++CONFIG_BACKLIGHT_LV5207LP=m ++CONFIG_BACKLIGHT_BD6107=m ++# CONFIG_BACKLIGHT_ARCXCNN is not set ++# end of Backlight & LCD device support ++ ++CONFIG_HDMI=y ++ ++# ++# Console display driver support ++# ++# CONFIG_VGA_CONSOLE is not set ++CONFIG_DUMMY_CONSOLE=y ++CONFIG_DUMMY_CONSOLE_COLUMNS=80 ++CONFIG_DUMMY_CONSOLE_ROWS=25 ++# CONFIG_FRAMEBUFFER_CONSOLE is not set ++# end of Console display driver support ++ ++# CONFIG_LOGO is not set ++# end of Graphics support ++ ++CONFIG_SOUND=y ++CONFIG_SOUND_OSS_CORE=y ++CONFIG_SOUND_OSS_CORE_PRECLAIM=y ++CONFIG_SND=y ++CONFIG_SND_TIMER=y ++CONFIG_SND_PCM=y ++CONFIG_SND_HWDEP=m ++CONFIG_SND_SEQ_DEVICE=y ++CONFIG_SND_RAWMIDI=y ++CONFIG_SND_COMPRESS_OFFLOAD=y ++CONFIG_SND_JACK=y ++CONFIG_SND_JACK_INPUT_DEV=y ++CONFIG_SND_OSSEMUL=y ++CONFIG_SND_MIXER_OSS=m ++CONFIG_SND_PCM_OSS=m ++CONFIG_SND_PCM_OSS_PLUGINS=y ++CONFIG_SND_PCM_TIMER=y ++CONFIG_SND_HRTIMER=y ++CONFIG_SND_DYNAMIC_MINORS=y ++CONFIG_SND_MAX_CARDS=32 ++CONFIG_SND_SUPPORT_OLD_API=y ++CONFIG_SND_PROC_FS=y ++CONFIG_SND_VERBOSE_PROCFS=y ++CONFIG_SND_VERBOSE_PRINTK=y ++# CONFIG_SND_DEBUG is not set ++CONFIG_SND_DMA_SGBUF=y ++CONFIG_SND_SEQUENCER=m ++# CONFIG_SND_SEQ_DUMMY is not set ++# CONFIG_SND_SEQUENCER_OSS is not set ++CONFIG_SND_SEQ_HRTIMER_DEFAULT=y ++CONFIG_SND_SEQ_MIDI_EVENT=m ++CONFIG_SND_SEQ_MIDI=m ++CONFIG_SND_DRIVERS=y ++# CONFIG_SND_PCSP is not set ++CONFIG_SND_DUMMY=m ++# CONFIG_SND_ALOOP is not set ++# CONFIG_SND_VIRMIDI is not set ++# CONFIG_SND_MTPAV is not set ++# CONFIG_SND_SERIAL_U16550 is not set ++# CONFIG_SND_MPU401 is not set ++CONFIG_SND_PCI=y ++# CONFIG_SND_AD1889 is not set ++# CONFIG_SND_ALS300 is not set ++# CONFIG_SND_ALS4000 is not set ++# CONFIG_SND_ALI5451 is not set ++# CONFIG_SND_ASIHPI is not set ++# CONFIG_SND_ATIIXP is not set ++# CONFIG_SND_ATIIXP_MODEM is not set ++# CONFIG_SND_AU8810 is not set ++# CONFIG_SND_AU8820 is not set ++# CONFIG_SND_AU8830 is not set ++# CONFIG_SND_AW2 is not set ++# CONFIG_SND_AZT3328 is not set ++# CONFIG_SND_BT87X is not set ++# CONFIG_SND_CA0106 is not set ++# CONFIG_SND_CMIPCI is not set ++# CONFIG_SND_OXYGEN is not set ++# CONFIG_SND_CS4281 is not set ++# CONFIG_SND_CS46XX is not set ++# CONFIG_SND_CTXFI is not set ++# CONFIG_SND_DARLA20 is not set ++# CONFIG_SND_GINA20 is not set ++# CONFIG_SND_LAYLA20 is not set ++# CONFIG_SND_DARLA24 is not set ++# CONFIG_SND_GINA24 is not set ++# CONFIG_SND_LAYLA24 is not set ++# CONFIG_SND_MONA is not set ++# CONFIG_SND_MIA is not set ++# CONFIG_SND_ECHO3G is not set ++# CONFIG_SND_INDIGO is not set ++# CONFIG_SND_INDIGOIO is not set ++# CONFIG_SND_INDIGODJ is not set ++# CONFIG_SND_INDIGOIOX is not set ++# CONFIG_SND_INDIGODJX is not set ++# CONFIG_SND_EMU10K1 is not set ++# CONFIG_SND_EMU10K1X is not set ++# CONFIG_SND_ENS1370 is not set ++# CONFIG_SND_ENS1371 is not set ++# CONFIG_SND_ES1938 is not set ++# CONFIG_SND_ES1968 is not set ++# CONFIG_SND_FM801 is not set ++# CONFIG_SND_HDSP is not set ++# CONFIG_SND_HDSPM is not set ++# CONFIG_SND_ICE1712 is not set ++# CONFIG_SND_ICE1724 is not set ++# CONFIG_SND_INTEL8X0 is not set ++# CONFIG_SND_INTEL8X0M is not set ++# CONFIG_SND_KORG1212 is not set ++# CONFIG_SND_LOLA is not set ++# CONFIG_SND_LX6464ES is not set ++# CONFIG_SND_MAESTRO3 is not set ++# CONFIG_SND_MIXART is not set ++# CONFIG_SND_NM256 is not set ++# CONFIG_SND_PCXHR is not set ++# CONFIG_SND_RIPTIDE is not set ++# CONFIG_SND_RME32 is not set ++# CONFIG_SND_RME96 is not set ++# CONFIG_SND_RME9652 is not set ++# CONFIG_SND_SE6X is not set ++# CONFIG_SND_SONICVIBES is not set ++# CONFIG_SND_TRIDENT is not set ++# CONFIG_SND_VIA82XX is not set ++# CONFIG_SND_VIA82XX_MODEM is not set ++# CONFIG_SND_VIRTUOSO is not set ++# CONFIG_SND_VX222 is not set ++# CONFIG_SND_YMFPCI is not set ++ ++# ++# HD-Audio ++# ++# CONFIG_SND_HDA_INTEL is not set ++# end of HD-Audio ++ ++CONFIG_SND_HDA_CORE=m ++CONFIG_SND_HDA_DSP_LOADER=y ++CONFIG_SND_HDA_COMPONENT=y ++CONFIG_SND_HDA_I915=y ++CONFIG_SND_HDA_EXT_CORE=m ++CONFIG_SND_HDA_PREALLOC_SIZE=64 ++CONFIG_SND_INTEL_NHLT=m ++# CONFIG_SND_SPI is not set ++CONFIG_SND_USB=y ++CONFIG_SND_USB_AUDIO=m ++CONFIG_SND_USB_AUDIO_USE_MEDIA_CONTROLLER=y ++# CONFIG_SND_USB_UA101 is not set ++# CONFIG_SND_USB_USX2Y is not set ++# CONFIG_SND_USB_CAIAQ is not set ++# CONFIG_SND_USB_US122L is not set ++# CONFIG_SND_USB_6FIRE is not set ++CONFIG_SND_USB_HIFACE=m ++# CONFIG_SND_BCD2000 is not set ++# CONFIG_SND_USB_POD is not set ++# CONFIG_SND_USB_PODHD is not set ++# CONFIG_SND_USB_TONEPORT is not set ++# CONFIG_SND_USB_VARIAX is not set ++CONFIG_SND_SOC=y ++CONFIG_SND_SOC_COMPRESS=y ++CONFIG_SND_SOC_TOPOLOGY=y ++CONFIG_SND_SOC_ACPI=y ++# CONFIG_SND_SOC_AMD_ACP is not set ++# CONFIG_SND_SOC_AMD_ACP3x is not set ++# CONFIG_SND_ATMEL_SOC is not set ++# CONFIG_SND_DESIGNWARE_I2S is not set ++ ++# ++# SoC Audio for Freescale CPUs ++# ++ ++# ++# Common SoC Audio options for Freescale CPUs: ++# ++# CONFIG_SND_SOC_FSL_ASRC is not set ++# CONFIG_SND_SOC_FSL_SAI is not set ++# CONFIG_SND_SOC_FSL_AUDMIX is not set ++# CONFIG_SND_SOC_FSL_SSI is not set ++# CONFIG_SND_SOC_FSL_SPDIF is not set ++# CONFIG_SND_SOC_FSL_ESAI is not set ++# CONFIG_SND_SOC_FSL_MICFIL is not set ++# CONFIG_SND_SOC_IMX_AUDMUX is not set ++# end of SoC Audio for Freescale CPUs ++ ++# CONFIG_SND_I2S_HI6210_I2S is not set ++# CONFIG_SND_SOC_IMG is not set ++CONFIG_SND_SOC_INTEL_SST_TOPLEVEL=y ++CONFIG_SND_SST_IPC=y ++CONFIG_SND_SST_IPC_ACPI=y ++CONFIG_SND_SOC_INTEL_SST=m ++# CONFIG_SND_SOC_INTEL_HASWELL is not set ++CONFIG_SND_SST_ATOM_HIFI2_PLATFORM=y ++# CONFIG_SND_SST_ATOM_HIFI2_PLATFORM_PCI is not set ++CONFIG_SND_SST_ATOM_HIFI2_PLATFORM_ACPI=y ++CONFIG_SND_SOC_INTEL_SKYLAKE=m ++CONFIG_SND_SOC_INTEL_SKL=m ++CONFIG_SND_SOC_INTEL_APL=m ++CONFIG_SND_SOC_INTEL_KBL=m ++CONFIG_SND_SOC_INTEL_GLK=m ++CONFIG_SND_SOC_INTEL_CNL=m ++CONFIG_SND_SOC_INTEL_CFL=m ++# CONFIG_SND_SOC_INTEL_CML_H is not set ++# CONFIG_SND_SOC_INTEL_CML_LP is not set ++CONFIG_SND_SOC_INTEL_SKYLAKE_FAMILY=m ++# CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC is not set ++CONFIG_SND_SOC_INTEL_SKYLAKE_COMMON=m ++CONFIG_SND_SOC_ACPI_INTEL_MATCH=y ++CONFIG_SND_SOC_INTEL_MACH=y ++# CONFIG_SND_SOC_INTEL_BYTCR_RT5640_MACH is not set ++# CONFIG_SND_SOC_INTEL_BYTCR_RT5651_MACH is not set ++# CONFIG_SND_SOC_INTEL_CHT_BSW_RT5672_MACH is not set ++# CONFIG_SND_SOC_INTEL_CHT_BSW_RT5645_MACH is not set ++# CONFIG_SND_SOC_INTEL_CHT_BSW_MAX98090_TI_MACH is not set ++# CONFIG_SND_SOC_INTEL_CHT_BSW_NAU8824_MACH is not set ++# CONFIG_SND_SOC_INTEL_BYT_CHT_CX2072X_MACH is not set ++# CONFIG_SND_SOC_INTEL_BYT_CHT_DA7213_MACH is not set ++# CONFIG_SND_SOC_INTEL_BYT_CHT_ES8316_MACH is not set ++# CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH is not set ++# CONFIG_SND_SOC_INTEL_SKL_RT286_MACH is not set ++# CONFIG_SND_SOC_INTEL_SKL_NAU88L25_SSM4567_MACH is not set ++# CONFIG_SND_SOC_INTEL_SKL_NAU88L25_MAX98357A_MACH is not set ++# CONFIG_SND_SOC_INTEL_BXT_DA7219_MAX98357A_MACH is not set ++# CONFIG_SND_SOC_INTEL_KBL_RT5663_MAX98927_MACH is not set ++# CONFIG_SND_SOC_INTEL_KBL_RT5663_RT5514_MAX98927_MACH is not set ++# CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98357A_MACH is not set ++# CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98927_MACH is not set ++# CONFIG_SND_SOC_INTEL_KBL_RT5660_MACH is not set ++# CONFIG_SND_SOC_INTEL_KBLR_RT298_MACH is not set ++# CONFIG_SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH is not set ++# CONFIG_SND_SOC_INTEL_CNL_RT274_MACH is not set ++# CONFIG_SND_SOC_INTEL_EHL_RT5660_MACH is not set ++# CONFIG_SND_SOC_INTEL_SSP_TEST_MACH is not set ++# CONFIG_SND_SOC_MTK_BTCVSD is not set ++# CONFIG_SND_SOC_SOF_TOPLEVEL is not set ++ ++# ++# STMicroelectronics STM32 SOC audio support ++# ++# end of STMicroelectronics STM32 SOC audio support ++ ++# CONFIG_SND_SOC_XILINX_I2S is not set ++# CONFIG_SND_SOC_XILINX_AUDIO_FORMATTER is not set ++# CONFIG_SND_SOC_XILINX_SPDIF is not set ++# CONFIG_SND_SOC_XTFPGA_I2S is not set ++# CONFIG_ZX_TDM is not set ++CONFIG_SND_SOC_I2C_AND_SPI=y ++ ++# ++# CODEC drivers ++# ++# CONFIG_SND_SOC_AC97_CODEC is not set ++# CONFIG_SND_SOC_ADAU1701 is not set ++# CONFIG_SND_SOC_ADAU1761_I2C is not set ++# CONFIG_SND_SOC_ADAU1761_SPI is not set ++# CONFIG_SND_SOC_ADAU7002 is not set ++# CONFIG_SND_SOC_AK4104 is not set ++# CONFIG_SND_SOC_AK4118 is not set ++# CONFIG_SND_SOC_AK4458 is not set ++# CONFIG_SND_SOC_AK4554 is not set ++# CONFIG_SND_SOC_AK4613 is not set ++# CONFIG_SND_SOC_AK4642 is not set ++# CONFIG_SND_SOC_AK5386 is not set ++# CONFIG_SND_SOC_AK5558 is not set ++# CONFIG_SND_SOC_ALC5623 is not set ++# CONFIG_SND_SOC_BD28623 is not set ++# CONFIG_SND_SOC_BT_SCO is not set ++# CONFIG_SND_SOC_CS35L32 is not set ++# CONFIG_SND_SOC_CS35L33 is not set ++# CONFIG_SND_SOC_CS35L34 is not set ++# CONFIG_SND_SOC_CS35L35 is not set ++# CONFIG_SND_SOC_CS35L36 is not set ++# CONFIG_SND_SOC_CS42L42 is not set ++# CONFIG_SND_SOC_CS42L51_I2C is not set ++# CONFIG_SND_SOC_CS42L52 is not set ++# CONFIG_SND_SOC_CS42L56 is not set ++# CONFIG_SND_SOC_CS42L73 is not set ++# CONFIG_SND_SOC_CS4265 is not set ++# CONFIG_SND_SOC_CS4270 is not set ++# CONFIG_SND_SOC_CS4271_I2C is not set ++# CONFIG_SND_SOC_CS4271_SPI is not set ++# CONFIG_SND_SOC_CS42XX8_I2C is not set ++# CONFIG_SND_SOC_CS43130 is not set ++# CONFIG_SND_SOC_CS4341 is not set ++# CONFIG_SND_SOC_CS4349 is not set ++# CONFIG_SND_SOC_CS53L30 is not set ++# CONFIG_SND_SOC_CX2072X is not set ++# CONFIG_SND_SOC_DMIC is not set ++# CONFIG_SND_SOC_ES7134 is not set ++# CONFIG_SND_SOC_ES7241 is not set ++# CONFIG_SND_SOC_ES8316 is not set ++# CONFIG_SND_SOC_ES8328_I2C is not set ++# CONFIG_SND_SOC_ES8328_SPI is not set ++# CONFIG_SND_SOC_GTM601 is not set ++# CONFIG_SND_SOC_INNO_RK3036 is not set ++# CONFIG_SND_SOC_MAX98088 is not set ++# CONFIG_SND_SOC_MAX98357A is not set ++# CONFIG_SND_SOC_MAX98504 is not set ++# CONFIG_SND_SOC_MAX9867 is not set ++# CONFIG_SND_SOC_MAX98927 is not set ++# CONFIG_SND_SOC_MAX98373 is not set ++# CONFIG_SND_SOC_MAX9860 is not set ++# CONFIG_SND_SOC_MSM8916_WCD_DIGITAL is not set ++# CONFIG_SND_SOC_PCM1681 is not set ++# CONFIG_SND_SOC_PCM1789_I2C is not set ++# CONFIG_SND_SOC_PCM179X_I2C is not set ++# CONFIG_SND_SOC_PCM179X_SPI is not set ++# CONFIG_SND_SOC_PCM186X_I2C is not set ++# CONFIG_SND_SOC_PCM186X_SPI is not set ++# CONFIG_SND_SOC_PCM3060_I2C is not set ++# CONFIG_SND_SOC_PCM3060_SPI is not set ++# CONFIG_SND_SOC_PCM3168A_I2C is not set ++# CONFIG_SND_SOC_PCM3168A_SPI is not set ++# CONFIG_SND_SOC_PCM512x_I2C is not set ++# CONFIG_SND_SOC_PCM512x_SPI is not set ++# CONFIG_SND_SOC_RK3328 is not set ++# CONFIG_SND_SOC_RT5616 is not set ++# CONFIG_SND_SOC_RT5631 is not set ++# CONFIG_SND_SOC_SGTL5000 is not set ++# CONFIG_SND_SOC_SIMPLE_AMPLIFIER is not set ++# CONFIG_SND_SOC_SIRF_AUDIO_CODEC is not set ++# CONFIG_SND_SOC_SPDIF is not set ++# CONFIG_SND_SOC_SSM2305 is not set ++# CONFIG_SND_SOC_SSM2602_SPI is not set ++# CONFIG_SND_SOC_SSM2602_I2C is not set ++# CONFIG_SND_SOC_SSM4567 is not set ++# CONFIG_SND_SOC_STA32X is not set ++# CONFIG_SND_SOC_STA350 is not set ++# CONFIG_SND_SOC_STI_SAS is not set ++# CONFIG_SND_SOC_TAS2552 is not set ++# CONFIG_SND_SOC_TAS5086 is not set ++# CONFIG_SND_SOC_TAS571X is not set ++# CONFIG_SND_SOC_TAS5720 is not set ++# CONFIG_SND_SOC_TAS6424 is not set ++# CONFIG_SND_SOC_TDA7419 is not set ++# CONFIG_SND_SOC_TFA9879 is not set ++# CONFIG_SND_SOC_TLV320AIC23_I2C is not set ++# CONFIG_SND_SOC_TLV320AIC23_SPI is not set ++# CONFIG_SND_SOC_TLV320AIC31XX is not set ++# CONFIG_SND_SOC_TLV320AIC32X4_I2C is not set ++# CONFIG_SND_SOC_TLV320AIC32X4_SPI is not set ++# CONFIG_SND_SOC_TLV320AIC3X is not set ++# CONFIG_SND_SOC_TS3A227E is not set ++# CONFIG_SND_SOC_TSCS42XX is not set ++# CONFIG_SND_SOC_TSCS454 is not set ++# CONFIG_SND_SOC_UDA1334 is not set ++# CONFIG_SND_SOC_WM8510 is not set ++# CONFIG_SND_SOC_WM8523 is not set ++# CONFIG_SND_SOC_WM8524 is not set ++# CONFIG_SND_SOC_WM8580 is not set ++# CONFIG_SND_SOC_WM8711 is not set ++# CONFIG_SND_SOC_WM8728 is not set ++# CONFIG_SND_SOC_WM8731 is not set ++# CONFIG_SND_SOC_WM8737 is not set ++# CONFIG_SND_SOC_WM8741 is not set ++# CONFIG_SND_SOC_WM8750 is not set ++# CONFIG_SND_SOC_WM8753 is not set ++# CONFIG_SND_SOC_WM8770 is not set ++# CONFIG_SND_SOC_WM8776 is not set ++# CONFIG_SND_SOC_WM8782 is not set ++# CONFIG_SND_SOC_WM8804_I2C is not set ++# CONFIG_SND_SOC_WM8804_SPI is not set ++# CONFIG_SND_SOC_WM8903 is not set ++# CONFIG_SND_SOC_WM8904 is not set ++# CONFIG_SND_SOC_WM8960 is not set ++# CONFIG_SND_SOC_WM8962 is not set ++# CONFIG_SND_SOC_WM8974 is not set ++# CONFIG_SND_SOC_WM8978 is not set ++# CONFIG_SND_SOC_WM8985 is not set ++# CONFIG_SND_SOC_ZX_AUD96P22 is not set ++# CONFIG_SND_SOC_MAX9759 is not set ++# CONFIG_SND_SOC_MT6351 is not set ++# CONFIG_SND_SOC_MT6358 is not set ++# CONFIG_SND_SOC_NAU8540 is not set ++# CONFIG_SND_SOC_NAU8810 is not set ++# CONFIG_SND_SOC_NAU8822 is not set ++# CONFIG_SND_SOC_NAU8824 is not set ++# CONFIG_SND_SOC_TPA6130A2 is not set ++# end of CODEC drivers ++ ++# CONFIG_SND_SIMPLE_CARD is not set ++CONFIG_SND_X86=y ++# CONFIG_HDMI_LPE_AUDIO is not set ++ ++# ++# HID support ++# ++CONFIG_HID=y ++# CONFIG_HID_BATTERY_STRENGTH is not set ++CONFIG_HIDRAW=y ++CONFIG_UHID=y ++CONFIG_HID_GENERIC=y ++ ++# ++# Special HID drivers ++# ++CONFIG_HID_A4TECH=y ++# CONFIG_HID_ACCUTOUCH is not set ++# CONFIG_HID_ACRUX is not set ++CONFIG_HID_APPLE=y ++# CONFIG_HID_APPLEIR is not set ++# CONFIG_HID_ASUS is not set ++# CONFIG_HID_AUREAL is not set ++CONFIG_HID_BELKIN=y ++# CONFIG_HID_BETOP_FF is not set ++# CONFIG_HID_BIGBEN_FF is not set ++CONFIG_HID_CHERRY=y ++CONFIG_HID_CHICONY=y ++# CONFIG_HID_CORSAIR is not set ++# CONFIG_HID_COUGAR is not set ++# CONFIG_HID_MACALLY is not set ++# CONFIG_HID_PRODIKEYS is not set ++# CONFIG_HID_CMEDIA is not set ++# CONFIG_HID_CP2112 is not set ++# CONFIG_HID_CREATIVE_SB0540 is not set ++CONFIG_HID_CYPRESS=y ++# CONFIG_HID_DRAGONRISE is not set ++# CONFIG_HID_EMS_FF is not set ++# CONFIG_HID_ELAN is not set ++# CONFIG_HID_ELECOM is not set ++CONFIG_HID_ELO=m ++CONFIG_HID_EZKEY=y ++# CONFIG_HID_GEMBIRD is not set ++# CONFIG_HID_GFRM is not set ++# CONFIG_HID_HOLTEK is not set ++# CONFIG_HID_GT683R is not set ++# CONFIG_HID_KEYTOUCH is not set ++# CONFIG_HID_KYE is not set ++# CONFIG_HID_UCLOGIC is not set ++# CONFIG_HID_WALTOP is not set ++# CONFIG_HID_VIEWSONIC is not set ++# CONFIG_HID_GYRATION is not set ++# CONFIG_HID_ICADE is not set ++# CONFIG_HID_ITE is not set ++# CONFIG_HID_JABRA is not set ++# CONFIG_HID_TWINHAN is not set ++CONFIG_HID_KENSINGTON=y ++# CONFIG_HID_LCPOWER is not set ++# CONFIG_HID_LED is not set ++# CONFIG_HID_LENOVO is not set ++CONFIG_HID_LOGITECH=y ++# CONFIG_HID_LOGITECH_DJ is not set ++# CONFIG_HID_LOGITECH_HIDPP is not set ++CONFIG_LOGITECH_FF=y ++# CONFIG_LOGIRUMBLEPAD2_FF is not set ++# CONFIG_LOGIG940_FF is not set ++CONFIG_LOGIWHEELS_FF=y ++CONFIG_HID_MAGICMOUSE=m ++# CONFIG_HID_MALTRON is not set ++# CONFIG_HID_MAYFLASH is not set ++# CONFIG_HID_REDRAGON is not set ++CONFIG_HID_MICROSOFT=y ++CONFIG_HID_MONTEREY=y ++CONFIG_HID_MULTITOUCH=y ++# CONFIG_HID_NTI is not set ++CONFIG_HID_NTRIG=m ++# CONFIG_HID_ORTEK is not set ++# CONFIG_HID_PANTHERLORD is not set ++# CONFIG_HID_PENMOUNT is not set ++# CONFIG_HID_PETALYNX is not set ++# CONFIG_HID_PICOLCD is not set ++# CONFIG_HID_PLANTRONICS is not set ++# CONFIG_HID_PRIMAX is not set ++# CONFIG_HID_RETRODE is not set ++# CONFIG_HID_ROCCAT is not set ++# CONFIG_HID_SAITEK is not set ++# CONFIG_HID_SAMSUNG is not set ++# CONFIG_HID_SONY is not set ++# CONFIG_HID_SPEEDLINK is not set ++# CONFIG_HID_STEAM is not set ++# CONFIG_HID_STEELSERIES is not set ++# CONFIG_HID_SUNPLUS is not set ++# CONFIG_HID_RMI is not set ++# CONFIG_HID_GREENASIA is not set ++# CONFIG_HID_SMARTJOYPLUS is not set ++# CONFIG_HID_TIVO is not set ++# CONFIG_HID_TOPSEED is not set ++# CONFIG_HID_THINGM is not set ++# CONFIG_HID_THRUSTMASTER is not set ++# CONFIG_HID_UDRAW_PS3 is not set ++# CONFIG_HID_U2FZERO is not set ++# CONFIG_HID_WACOM is not set ++# CONFIG_HID_WIIMOTE is not set ++CONFIG_HID_XINMO=m ++# CONFIG_HID_ZEROPLUS is not set ++# CONFIG_HID_ZYDACRON is not set ++CONFIG_HID_SENSOR_HUB=m ++# CONFIG_HID_SENSOR_CUSTOM_SENSOR is not set ++# CONFIG_HID_ALPS is not set ++# end of Special HID drivers ++ ++# ++# USB HID support ++# ++CONFIG_USB_HID=y ++CONFIG_HID_PID=y ++CONFIG_USB_HIDDEV=y ++# end of USB HID support ++ ++# ++# I2C HID support ++# ++CONFIG_I2C_HID=m ++# end of I2C HID support ++ ++# ++# Intel ISH HID support ++# ++# CONFIG_INTEL_ISH_HID is not set ++# end of Intel ISH HID support ++# end of HID support ++ ++CONFIG_USB_OHCI_LITTLE_ENDIAN=y ++CONFIG_USB_SUPPORT=y ++CONFIG_USB_COMMON=y ++# CONFIG_USB_LED_TRIG is not set ++CONFIG_USB_ULPI_BUS=y ++# CONFIG_USB_CONN_GPIO is not set ++CONFIG_USB_ARCH_HAS_HCD=y ++CONFIG_USB=y ++CONFIG_USB_PCI=y ++CONFIG_USB_ANNOUNCE_NEW_DEVICES=y ++ ++# ++# Miscellaneous USB options ++# ++CONFIG_USB_DEFAULT_PERSIST=y ++CONFIG_USB_DYNAMIC_MINORS=y ++CONFIG_USB_OTG=y ++# CONFIG_USB_OTG_WHITELIST is not set ++# CONFIG_USB_OTG_BLACKLIST_HUB is not set ++CONFIG_USB_OTG_FSM=y ++# CONFIG_USB_LEDS_TRIGGER_USBPORT is not set ++CONFIG_USB_AUTOSUSPEND_DELAY=2 ++CONFIG_USB_MON=y ++ ++# ++# USB Host Controller Drivers ++# ++CONFIG_USB_C67X00_HCD=y ++CONFIG_USB_XHCI_HCD=m ++# CONFIG_USB_XHCI_DBGCAP is not set ++CONFIG_USB_XHCI_PCI=m ++CONFIG_USB_XHCI_PLATFORM=m ++CONFIG_USB_EHCI_HCD=y ++# CONFIG_USB_EHCI_ROOT_HUB_TT is not set ++CONFIG_USB_EHCI_TT_NEWSCHED=y ++CONFIG_USB_EHCI_PCI=y ++# CONFIG_USB_EHCI_FSL is not set ++# CONFIG_USB_EHCI_HCD_PLATFORM is not set ++CONFIG_USB_OXU210HP_HCD=y ++CONFIG_USB_ISP116X_HCD=y ++CONFIG_USB_FOTG210_HCD=m ++# CONFIG_USB_MAX3421_HCD is not set ++CONFIG_USB_OHCI_HCD=y ++# CONFIG_USB_OHCI_HCD_PCI is not set ++# CONFIG_USB_OHCI_HCD_PLATFORM is not set ++CONFIG_USB_UHCI_HCD=y ++CONFIG_USB_SL811_HCD=y ++# CONFIG_USB_SL811_HCD_ISO is not set ++CONFIG_USB_R8A66597_HCD=y ++# CONFIG_USB_HCD_BCMA is not set ++# CONFIG_USB_HCD_TEST_MODE is not set ++ ++# ++# USB Device Class drivers ++# ++CONFIG_USB_ACM=y ++CONFIG_USB_PRINTER=m ++CONFIG_USB_WDM=m ++# CONFIG_USB_TMC is not set ++ ++# ++# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may ++# ++ ++# ++# also be needed; see USB_STORAGE Help for more info ++# ++CONFIG_USB_STORAGE=y ++# CONFIG_USB_STORAGE_DEBUG is not set ++CONFIG_USB_STORAGE_REALTEK=y ++CONFIG_REALTEK_AUTOPM=y ++# CONFIG_USB_STORAGE_DATAFAB is not set ++# CONFIG_USB_STORAGE_FREECOM is not set ++# CONFIG_USB_STORAGE_ISD200 is not set ++# CONFIG_USB_STORAGE_USBAT is not set ++# CONFIG_USB_STORAGE_SDDR09 is not set ++# CONFIG_USB_STORAGE_SDDR55 is not set ++# CONFIG_USB_STORAGE_JUMPSHOT is not set ++# CONFIG_USB_STORAGE_ALAUDA is not set ++# CONFIG_USB_STORAGE_ONETOUCH is not set ++# CONFIG_USB_STORAGE_KARMA is not set ++# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set ++# CONFIG_USB_STORAGE_ENE_UB6250 is not set ++# CONFIG_USB_UAS is not set ++ ++# ++# USB Imaging devices ++# ++# CONFIG_USB_MDC800 is not set ++# CONFIG_USB_MICROTEK is not set ++# CONFIG_USBIP_CORE is not set ++# CONFIG_USB_CDNS3 is not set ++# CONFIG_USB_MUSB_HDRC is not set ++CONFIG_USB_DWC3=m ++CONFIG_USB_DWC3_ULPI=y ++# CONFIG_USB_DWC3_HOST is not set ++CONFIG_USB_DWC3_GADGET=y ++# CONFIG_USB_DWC3_DUAL_ROLE is not set ++ ++# ++# Platform Glue Driver Support ++# ++CONFIG_USB_DWC3_PCI=m ++CONFIG_USB_DWC3_HAPS=m ++CONFIG_USB_DWC2=y ++# CONFIG_USB_DWC2_HOST is not set ++ ++# ++# Gadget/Dual-role mode requires USB Gadget support to be enabled ++# ++CONFIG_USB_DWC2_PERIPHERAL=y ++# CONFIG_USB_DWC2_DUAL_ROLE is not set ++# CONFIG_USB_DWC2_PCI is not set ++# CONFIG_USB_DWC2_DEBUG is not set ++# CONFIG_USB_DWC2_TRACK_MISSED_SOFS is not set ++# CONFIG_USB_CHIPIDEA is not set ++# CONFIG_USB_ISP1760 is not set ++ ++# ++# USB port drivers ++# ++CONFIG_USB_SERIAL=y ++CONFIG_USB_SERIAL_CONSOLE=y ++CONFIG_USB_SERIAL_GENERIC=y ++# CONFIG_USB_SERIAL_SIMPLE is not set ++# CONFIG_USB_SERIAL_AIRCABLE is not set ++CONFIG_USB_SERIAL_ARK3116=y ++CONFIG_USB_SERIAL_BELKIN=y ++CONFIG_USB_SERIAL_CH341=y ++CONFIG_USB_SERIAL_WHITEHEAT=y ++CONFIG_USB_SERIAL_DIGI_ACCELEPORT=y ++CONFIG_USB_SERIAL_CP210X=y ++# CONFIG_USB_SERIAL_CYPRESS_M8 is not set ++# CONFIG_USB_SERIAL_EMPEG is not set ++CONFIG_USB_SERIAL_FTDI_SIO=y ++# CONFIG_USB_SERIAL_VISOR is not set ++# CONFIG_USB_SERIAL_IPAQ is not set ++# CONFIG_USB_SERIAL_IR is not set ++# CONFIG_USB_SERIAL_EDGEPORT is not set ++# CONFIG_USB_SERIAL_EDGEPORT_TI is not set ++CONFIG_USB_SERIAL_F81232=y ++# CONFIG_USB_SERIAL_F8153X is not set ++# CONFIG_USB_SERIAL_GARMIN is not set ++# CONFIG_USB_SERIAL_IPW is not set ++# CONFIG_USB_SERIAL_IUU is not set ++# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set ++# CONFIG_USB_SERIAL_KEYSPAN is not set ++# CONFIG_USB_SERIAL_KLSI is not set ++# CONFIG_USB_SERIAL_KOBIL_SCT is not set ++CONFIG_USB_SERIAL_MCT_U232=y ++# CONFIG_USB_SERIAL_METRO is not set ++CONFIG_USB_SERIAL_MOS7720=y ++CONFIG_USB_SERIAL_MOS7840=y ++# CONFIG_USB_SERIAL_MXUPORT is not set ++# CONFIG_USB_SERIAL_NAVMAN is not set ++CONFIG_USB_SERIAL_PL2303=y ++CONFIG_USB_SERIAL_OTI6858=y ++# CONFIG_USB_SERIAL_QCAUX is not set ++# CONFIG_USB_SERIAL_QUALCOMM is not set ++CONFIG_USB_SERIAL_SPCP8X5=y ++# CONFIG_USB_SERIAL_SAFE is not set ++# CONFIG_USB_SERIAL_SIERRAWIRELESS is not set ++# CONFIG_USB_SERIAL_SYMBOL is not set ++CONFIG_USB_SERIAL_TI=y ++# CONFIG_USB_SERIAL_CYBERJACK is not set ++CONFIG_USB_SERIAL_XIRCOM=y ++CONFIG_USB_SERIAL_WWAN=y ++CONFIG_USB_SERIAL_OPTION=y ++# CONFIG_USB_SERIAL_OMNINET is not set ++# CONFIG_USB_SERIAL_OPTICON is not set ++# CONFIG_USB_SERIAL_XSENS_MT is not set ++# CONFIG_USB_SERIAL_WISHBONE is not set ++CONFIG_USB_SERIAL_SSU100=y ++# CONFIG_USB_SERIAL_QT2 is not set ++# CONFIG_USB_SERIAL_UPD78F0730 is not set ++# CONFIG_USB_SERIAL_DEBUG is not set ++ ++# ++# USB Miscellaneous drivers ++# ++# CONFIG_USB_EMI62 is not set ++# CONFIG_USB_EMI26 is not set ++# CONFIG_USB_ADUTUX is not set ++# CONFIG_USB_SEVSEG is not set ++# CONFIG_USB_LEGOTOWER is not set ++# CONFIG_USB_LCD is not set ++# CONFIG_USB_CYPRESS_CY7C63 is not set ++# CONFIG_USB_CYTHERM is not set ++# CONFIG_USB_IDMOUSE is not set ++# CONFIG_USB_FTDI_ELAN is not set ++# CONFIG_USB_APPLEDISPLAY is not set ++# CONFIG_USB_SISUSBVGA is not set ++# CONFIG_USB_LD is not set ++# CONFIG_USB_TRANCEVIBRATOR is not set ++# CONFIG_USB_IOWARRIOR is not set ++# CONFIG_USB_TEST is not set ++# CONFIG_USB_EHSET_TEST_FIXTURE is not set ++# CONFIG_USB_ISIGHTFW is not set ++# CONFIG_USB_YUREX is not set ++CONFIG_USB_EZUSB_FX2=y ++# CONFIG_USB_HUB_USB251XB is not set ++# CONFIG_USB_HSIC_USB3503 is not set ++# CONFIG_USB_HSIC_USB4604 is not set ++# CONFIG_USB_LINK_LAYER_TEST is not set ++# CONFIG_USB_CHAOSKEY is not set ++ ++# ++# USB Physical Layer drivers ++# ++CONFIG_USB_PHY=y ++CONFIG_NOP_USB_XCEIV=y ++# CONFIG_USB_GPIO_VBUS is not set ++# CONFIG_USB_ISP1301 is not set ++# end of USB Physical Layer drivers ++ ++CONFIG_USB_GADGET=y ++# CONFIG_USB_GADGET_DEBUG is not set ++# CONFIG_USB_GADGET_DEBUG_FILES is not set ++# CONFIG_USB_GADGET_DEBUG_FS is not set ++CONFIG_USB_GADGET_VBUS_DRAW=2 ++CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2 ++# CONFIG_U_SERIAL_CONSOLE is not set ++ ++# ++# USB Peripheral Controller ++# ++# CONFIG_USB_FOTG210_UDC is not set ++# CONFIG_USB_GR_UDC is not set ++# CONFIG_USB_R8A66597 is not set ++# CONFIG_USB_PXA27X is not set ++# CONFIG_USB_MV_UDC is not set ++# CONFIG_USB_MV_U3D is not set ++# CONFIG_USB_M66592 is not set ++# CONFIG_USB_BDC_UDC is not set ++# CONFIG_USB_AMD5536UDC is not set ++# CONFIG_USB_NET2272 is not set ++# CONFIG_USB_NET2280 is not set ++# CONFIG_USB_GOKU is not set ++# CONFIG_USB_EG20T is not set ++# CONFIG_USB_DUMMY_HCD is not set ++# end of USB Peripheral Controller ++ ++CONFIG_USB_LIBCOMPOSITE=y ++CONFIG_USB_F_ACM=y ++CONFIG_USB_U_SERIAL=y ++CONFIG_USB_U_ETHER=y ++CONFIG_USB_F_SERIAL=y ++CONFIG_USB_F_NCM=y ++CONFIG_USB_F_RNDIS=y ++CONFIG_USB_F_MASS_STORAGE=y ++CONFIG_USB_F_FS=y ++CONFIG_USB_F_MIDI=y ++CONFIG_USB_F_HID=y ++CONFIG_USB_F_ACC=y ++CONFIG_USB_F_AUDIO_SRC=y ++CONFIG_USB_CONFIGFS=y ++CONFIG_USB_CONFIGFS_UEVENT=y ++CONFIG_USB_CONFIGFS_SERIAL=y ++CONFIG_USB_CONFIGFS_ACM=y ++# CONFIG_USB_CONFIGFS_OBEX is not set ++CONFIG_USB_CONFIGFS_NCM=y ++# CONFIG_USB_CONFIGFS_ECM is not set ++# CONFIG_USB_CONFIGFS_ECM_SUBSET is not set ++CONFIG_USB_CONFIGFS_RNDIS=y ++# CONFIG_USB_CONFIGFS_EEM is not set ++CONFIG_USB_CONFIGFS_MASS_STORAGE=y ++# CONFIG_USB_CONFIGFS_F_LB_SS is not set ++CONFIG_USB_CONFIGFS_F_FS=y ++CONFIG_USB_CONFIGFS_F_ACC=y ++CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y ++# CONFIG_USB_CONFIGFS_F_UAC1 is not set ++# CONFIG_USB_CONFIGFS_F_UAC1_LEGACY is not set ++# CONFIG_USB_CONFIGFS_F_UAC2 is not set ++CONFIG_USB_CONFIGFS_F_MIDI=y ++CONFIG_USB_CONFIGFS_F_HID=y ++# CONFIG_USB_CONFIGFS_F_UVC is not set ++# CONFIG_USB_CONFIGFS_F_PRINTER is not set ++CONFIG_TYPEC=y ++CONFIG_TYPEC_TCPM=y ++CONFIG_TYPEC_TCPCI=y ++# CONFIG_TYPEC_RT1711H is not set ++# CONFIG_TYPEC_FUSB302 is not set ++# CONFIG_TYPEC_WCOVE is not set ++# CONFIG_TYPEC_UCSI is not set ++# CONFIG_TYPEC_TPS6598X is not set ++ ++# ++# USB Type-C Multiplexer/DeMultiplexer Switch support ++# ++# CONFIG_TYPEC_MUX_PI3USB30532 is not set ++# end of USB Type-C Multiplexer/DeMultiplexer Switch support ++ ++# ++# USB Type-C Alternate Mode drivers ++# ++# CONFIG_TYPEC_DP_ALTMODE is not set ++# end of USB Type-C Alternate Mode drivers ++ ++CONFIG_USB_ROLE_SWITCH=y ++CONFIG_USB_ROLES_INTEL_XHCI=y ++CONFIG_MMC=y ++CONFIG_MMC_BLOCK=y ++CONFIG_MMC_BLOCK_MINORS=16 ++# CONFIG_SDIO_UART is not set ++# CONFIG_MMC_TEST is not set ++ ++# ++# MMC/SD/SDIO Host Controller Drivers ++# ++# CONFIG_MMC_DEBUG is not set ++CONFIG_MMC_SDHCI=y ++CONFIG_MMC_SDHCI_IO_ACCESSORS=y ++CONFIG_MMC_SDHCI_PCI=y ++# CONFIG_MMC_RICOH_MMC is not set ++CONFIG_MMC_SDHCI_ACPI=y ++# CONFIG_MMC_SDHCI_PLTFM is not set ++# CONFIG_MMC_WBSD is not set ++# CONFIG_MMC_TIFM_SD is not set ++# CONFIG_MMC_SPI is not set ++# CONFIG_MMC_CB710 is not set ++# CONFIG_MMC_VIA_SDMMC is not set ++# CONFIG_MMC_VUB300 is not set ++# CONFIG_MMC_USHC is not set ++# CONFIG_MMC_USDHI6ROL0 is not set ++CONFIG_MMC_CQHCI=y ++# CONFIG_MMC_TOSHIBA_PCI is not set ++# CONFIG_MMC_MTK is not set ++# CONFIG_MEMSTICK is not set ++CONFIG_NEW_LEDS=y ++CONFIG_LEDS_CLASS=y ++# CONFIG_LEDS_CLASS_FLASH is not set ++# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set ++ ++# ++# LED drivers ++# ++# CONFIG_LEDS_APU is not set ++# CONFIG_LEDS_LM3530 is not set ++# CONFIG_LEDS_LM3532 is not set ++# CONFIG_LEDS_LM3642 is not set ++# CONFIG_LEDS_PCA9532 is not set ++# CONFIG_LEDS_GPIO is not set ++# CONFIG_LEDS_LP3944 is not set ++# CONFIG_LEDS_LP3952 is not set ++# CONFIG_LEDS_LP5521 is not set ++# CONFIG_LEDS_LP5523 is not set ++# CONFIG_LEDS_LP5562 is not set ++# CONFIG_LEDS_LP8501 is not set ++# CONFIG_LEDS_PCA955X is not set ++# CONFIG_LEDS_PCA963X is not set ++# CONFIG_LEDS_DAC124S085 is not set ++# CONFIG_LEDS_PWM is not set ++# CONFIG_LEDS_REGULATOR is not set ++# CONFIG_LEDS_BD2802 is not set ++# CONFIG_LEDS_INTEL_SS4200 is not set ++# CONFIG_LEDS_TCA6507 is not set ++# CONFIG_LEDS_TLC591XX is not set ++# CONFIG_LEDS_LM355x is not set ++ ++# ++# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) ++# ++# CONFIG_LEDS_BLINKM is not set ++# CONFIG_LEDS_MLXCPLD is not set ++# CONFIG_LEDS_MLXREG is not set ++# CONFIG_LEDS_USER is not set ++# CONFIG_LEDS_NIC78BX is not set ++# CONFIG_LEDS_TI_LMU_COMMON is not set ++ ++# ++# LED Triggers ++# ++CONFIG_LEDS_TRIGGERS=y ++# CONFIG_LEDS_TRIGGER_TIMER is not set ++# CONFIG_LEDS_TRIGGER_ONESHOT is not set ++# CONFIG_LEDS_TRIGGER_DISK is not set ++# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set ++# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set ++# CONFIG_LEDS_TRIGGER_CPU is not set ++# CONFIG_LEDS_TRIGGER_ACTIVITY is not set ++# CONFIG_LEDS_TRIGGER_GPIO is not set ++# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set ++ ++# ++# iptables trigger is under Netfilter config (LED target) ++# ++# CONFIG_LEDS_TRIGGER_TRANSIENT is not set ++# CONFIG_LEDS_TRIGGER_CAMERA is not set ++# CONFIG_LEDS_TRIGGER_PANIC is not set ++# CONFIG_LEDS_TRIGGER_NETDEV is not set ++# CONFIG_LEDS_TRIGGER_PATTERN is not set ++# CONFIG_LEDS_TRIGGER_AUDIO is not set ++# CONFIG_ACCESSIBILITY is not set ++# CONFIG_INFINIBAND is not set ++CONFIG_EDAC_ATOMIC_SCRUB=y ++CONFIG_EDAC_SUPPORT=y ++# CONFIG_EDAC is not set ++CONFIG_RTC_LIB=y ++CONFIG_RTC_MC146818_LIB=y ++CONFIG_RTC_CLASS=y ++CONFIG_RTC_HCTOSYS=y ++CONFIG_RTC_HCTOSYS_DEVICE="rtc0" ++CONFIG_RTC_SYSTOHC=y ++CONFIG_RTC_SYSTOHC_DEVICE="rtc0" ++# CONFIG_RTC_DEBUG is not set ++CONFIG_RTC_NVMEM=y ++ ++# ++# RTC interfaces ++# ++CONFIG_RTC_INTF_SYSFS=y ++CONFIG_RTC_INTF_PROC=y ++CONFIG_RTC_INTF_DEV=y ++# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set ++# CONFIG_RTC_DRV_TEST is not set ++ ++# ++# I2C RTC drivers ++# ++# CONFIG_RTC_DRV_ABB5ZES3 is not set ++# CONFIG_RTC_DRV_ABEOZ9 is not set ++# CONFIG_RTC_DRV_ABX80X is not set ++# CONFIG_RTC_DRV_DS1307 is not set ++# CONFIG_RTC_DRV_DS1374 is not set ++# CONFIG_RTC_DRV_DS1672 is not set ++# CONFIG_RTC_DRV_MAX6900 is not set ++# CONFIG_RTC_DRV_RS5C372 is not set ++# CONFIG_RTC_DRV_ISL1208 is not set ++# CONFIG_RTC_DRV_ISL12022 is not set ++# CONFIG_RTC_DRV_X1205 is not set ++# CONFIG_RTC_DRV_PCF8523 is not set ++# CONFIG_RTC_DRV_PCF85063 is not set ++# CONFIG_RTC_DRV_PCF85363 is not set ++# CONFIG_RTC_DRV_PCF8563 is not set ++# CONFIG_RTC_DRV_PCF8583 is not set ++# CONFIG_RTC_DRV_M41T80 is not set ++# CONFIG_RTC_DRV_BQ32K is not set ++# CONFIG_RTC_DRV_S35390A is not set ++# CONFIG_RTC_DRV_FM3130 is not set ++# CONFIG_RTC_DRV_RX8010 is not set ++# CONFIG_RTC_DRV_RX8581 is not set ++# CONFIG_RTC_DRV_RX8025 is not set ++# CONFIG_RTC_DRV_EM3027 is not set ++# CONFIG_RTC_DRV_RV3028 is not set ++# CONFIG_RTC_DRV_RV8803 is not set ++# CONFIG_RTC_DRV_SD3078 is not set ++ ++# ++# SPI RTC drivers ++# ++# CONFIG_RTC_DRV_M41T93 is not set ++# CONFIG_RTC_DRV_M41T94 is not set ++# CONFIG_RTC_DRV_DS1302 is not set ++# CONFIG_RTC_DRV_DS1305 is not set ++# CONFIG_RTC_DRV_DS1343 is not set ++# CONFIG_RTC_DRV_DS1347 is not set ++# CONFIG_RTC_DRV_DS1390 is not set ++# CONFIG_RTC_DRV_MAX6916 is not set ++# CONFIG_RTC_DRV_R9701 is not set ++# CONFIG_RTC_DRV_RX4581 is not set ++# CONFIG_RTC_DRV_RX6110 is not set ++# CONFIG_RTC_DRV_RS5C348 is not set ++# CONFIG_RTC_DRV_MAX6902 is not set ++# CONFIG_RTC_DRV_PCF2123 is not set ++# CONFIG_RTC_DRV_MCP795 is not set ++CONFIG_RTC_I2C_AND_SPI=y ++ ++# ++# SPI and I2C RTC drivers ++# ++# CONFIG_RTC_DRV_DS3232 is not set ++# CONFIG_RTC_DRV_PCF2127 is not set ++# CONFIG_RTC_DRV_RV3029C2 is not set ++ ++# ++# Platform RTC drivers ++# ++CONFIG_RTC_DRV_CMOS=y ++# CONFIG_RTC_DRV_DS1286 is not set ++# CONFIG_RTC_DRV_DS1511 is not set ++# CONFIG_RTC_DRV_DS1553 is not set ++# CONFIG_RTC_DRV_DS1685_FAMILY is not set ++# CONFIG_RTC_DRV_DS1742 is not set ++# CONFIG_RTC_DRV_DS2404 is not set ++# CONFIG_RTC_DRV_STK17TA8 is not set ++# CONFIG_RTC_DRV_M48T86 is not set ++# CONFIG_RTC_DRV_M48T35 is not set ++# CONFIG_RTC_DRV_M48T59 is not set ++# CONFIG_RTC_DRV_MSM6242 is not set ++# CONFIG_RTC_DRV_BQ4802 is not set ++# CONFIG_RTC_DRV_RP5C01 is not set ++# CONFIG_RTC_DRV_V3020 is not set ++ ++# ++# on-CPU RTC drivers ++# ++# CONFIG_RTC_DRV_FTRTC010 is not set ++ ++# ++# HID Sensor RTC drivers ++# ++# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set ++CONFIG_DMADEVICES=y ++# CONFIG_DMADEVICES_DEBUG is not set ++ ++# ++# DMA Devices ++# ++CONFIG_DMA_ENGINE=y ++CONFIG_DMA_VIRTUAL_CHANNELS=y ++CONFIG_DMA_ACPI=y ++# CONFIG_ALTERA_MSGDMA is not set ++CONFIG_INTEL_IDMA64=y ++# CONFIG_INTEL_IOATDMA is not set ++# CONFIG_QCOM_HIDMA_MGMT is not set ++# CONFIG_QCOM_HIDMA is not set ++CONFIG_DW_DMAC_CORE=y ++CONFIG_DW_DMAC=y ++CONFIG_DW_DMAC_PCI=y ++# CONFIG_DW_EDMA is not set ++# CONFIG_DW_EDMA_PCIE is not set ++CONFIG_HSU_DMA=y ++ ++# ++# DMA Clients ++# ++# CONFIG_ASYNC_TX_DMA is not set ++# CONFIG_DMATEST is not set ++ ++# ++# DMABUF options ++# ++CONFIG_SYNC_FILE=y ++CONFIG_SW_SYNC=y ++# CONFIG_UDMABUF is not set ++# CONFIG_DMABUF_SELFTESTS is not set ++# end of DMABUF options ++ ++# CONFIG_AUXDISPLAY is not set ++# CONFIG_UIO is not set ++# CONFIG_VFIO is not set ++CONFIG_VIRT_DRIVERS=y ++# CONFIG_VBOXGUEST is not set ++CONFIG_VIRTIO=y ++CONFIG_VIRTIO_MENU=y ++# CONFIG_VIRTIO_PCI is not set ++# CONFIG_VIRTIO_BALLOON is not set ++# CONFIG_VIRTIO_INPUT is not set ++# CONFIG_VIRTIO_MMIO is not set ++# CONFIG_VIRTIO_PMD is not set ++ ++# ++# Microsoft Hyper-V guest support ++# ++# CONFIG_HYPERV is not set ++# end of Microsoft Hyper-V guest support ++ ++# CONFIG_GREYBUS is not set ++CONFIG_STAGING=y ++# CONFIG_PRISM2_USB is not set ++# CONFIG_COMEDI is not set ++CONFIG_RTL8192U=m ++CONFIG_RTLLIB=m ++CONFIG_RTLLIB_CRYPTO_CCMP=m ++CONFIG_RTLLIB_CRYPTO_TKIP=m ++CONFIG_RTLLIB_CRYPTO_WEP=m ++CONFIG_RTL8192E=m ++# CONFIG_RTL8723BS is not set ++CONFIG_R8712U=m ++# CONFIG_R8188EU is not set ++# CONFIG_RTS5208 is not set ++# CONFIG_VT6655 is not set ++# CONFIG_VT6656 is not set ++ ++# ++# IIO staging drivers ++# ++ ++# ++# Accelerometers ++# ++# CONFIG_ADIS16203 is not set ++# CONFIG_ADIS16240 is not set ++# end of Accelerometers ++ ++# ++# Analog to digital converters ++# ++# CONFIG_AD7816 is not set ++# CONFIG_AD7192 is not set ++# CONFIG_AD7280 is not set ++# end of Analog to digital converters ++ ++# ++# Analog digital bi-direction converters ++# ++# CONFIG_ADT7316 is not set ++# end of Analog digital bi-direction converters ++ ++# ++# Capacitance to digital converters ++# ++# CONFIG_AD7150 is not set ++# CONFIG_AD7746 is not set ++# end of Capacitance to digital converters ++ ++# ++# Direct Digital Synthesis ++# ++# CONFIG_AD9832 is not set ++# CONFIG_AD9834 is not set ++# end of Direct Digital Synthesis ++ ++# ++# Network Analyzer, Impedance Converters ++# ++# CONFIG_AD5933 is not set ++# end of Network Analyzer, Impedance Converters ++ ++# ++# Active energy metering IC ++# ++# CONFIG_ADE7854 is not set ++# end of Active energy metering IC ++ ++# ++# Resolver to digital converters ++# ++# CONFIG_AD2S1210 is not set ++# end of Resolver to digital converters ++# end of IIO staging drivers ++ ++# CONFIG_FB_SM750 is not set ++ ++# ++# Speakup console speech ++# ++# CONFIG_SPEAKUP is not set ++# end of Speakup console speech ++ ++# CONFIG_STAGING_MEDIA is not set ++ ++# ++# Android ++# ++CONFIG_ASHMEM=y ++# CONFIG_ANDROID_VSOC is not set ++CONFIG_ANDROID_FWDATA=y ++CONFIG_ION=y ++CONFIG_ION_SYSTEM_HEAP=y ++# CONFIG_ION_SYSTEM_CONTIG_HEAP is not set ++CONFIG_ABL_BOOTLOADER_CONTROL=y ++# CONFIG_SEND_SLCAN_ENABLE is not set ++# CONFIG_SBL_BOOTLOADER_CONTROL is not set ++# CONFIG_VSBL_BOOTLOADER_CONTROL is not set ++# end of Android ++ ++CONFIG_LTE_GDM724X=m ++# CONFIG_GS_FPGABOOT is not set ++# CONFIG_UNISYSSPAR is not set ++# CONFIG_WILC1000_SDIO is not set ++# CONFIG_WILC1000_SPI is not set ++# CONFIG_MOST is not set ++# CONFIG_KS7010 is not set ++# CONFIG_PI433 is not set ++ ++# ++# Gasket devices ++# ++# CONFIG_STAGING_GASKET_FRAMEWORK is not set ++# end of Gasket devices ++ ++# CONFIG_FIELDBUS_DEV is not set ++# CONFIG_USB_WUSB_CBAF is not set ++# CONFIG_UWB is not set ++# CONFIG_EXFAT_FS is not set ++# CONFIG_QLGE is not set ++CONFIG_X86_PLATFORM_DEVICES=y ++# CONFIG_ACER_WIRELESS is not set ++# CONFIG_ACERHDF is not set ++# CONFIG_ASUS_LAPTOP is not set ++CONFIG_DCDBAS=y ++# CONFIG_DELL_SMBIOS is not set ++# CONFIG_DELL_SMO8800 is not set ++# CONFIG_DELL_RBTN is not set ++CONFIG_DELL_RBU=y ++# CONFIG_FUJITSU_LAPTOP is not set ++# CONFIG_FUJITSU_TABLET is not set ++# CONFIG_GPD_POCKET_FAN is not set ++# CONFIG_HP_WIRELESS is not set ++# CONFIG_PANASONIC_LAPTOP is not set ++# CONFIG_COMPAL_LAPTOP is not set ++# CONFIG_SONY_LAPTOP is not set ++# CONFIG_THINKPAD_ACPI is not set ++# CONFIG_SENSORS_HDAPS is not set ++# CONFIG_INTEL_MENLOW is not set ++# CONFIG_EEEPC_LAPTOP is not set ++# CONFIG_ASUS_WIRELESS is not set ++# CONFIG_ACPI_WMI is not set ++# CONFIG_TOPSTAR_LAPTOP is not set ++# CONFIG_TOSHIBA_BT_RFKILL is not set ++# CONFIG_TOSHIBA_HAPS is not set ++# CONFIG_ACPI_CMPC is not set ++# CONFIG_INTEL_INT0002_VGPIO is not set ++# CONFIG_INTEL_HID_EVENT is not set ++# CONFIG_INTEL_VBTN is not set ++# CONFIG_INTEL_IPS is not set ++# CONFIG_INTEL_PMC_CORE is not set ++# CONFIG_IBM_RTL is not set ++# CONFIG_SAMSUNG_LAPTOP is not set ++# CONFIG_INTEL_OAKTRAIL is not set ++# CONFIG_SAMSUNG_Q10 is not set ++# CONFIG_APPLE_GMUX is not set ++CONFIG_INTEL_RST=y ++# CONFIG_INTEL_SMARTCONNECT is not set ++CONFIG_INTEL_PMC_IPC=y ++# CONFIG_INTEL_BXTWC_PMIC_TMU is not set ++# CONFIG_SURFACE_PRO3_BUTTON is not set ++# CONFIG_SURFACE_3_BUTTON is not set ++CONFIG_INTEL_PUNIT_IPC=y ++CONFIG_INTEL_TELEMETRY=y ++# CONFIG_MLX_PLATFORM is not set ++# CONFIG_INTEL_TURBO_MAX_3 is not set ++# CONFIG_I2C_MULTI_INSTANTIATE is not set ++# CONFIG_INTEL_PSTORE_PRAM is not set ++# CONFIG_INTEL_ATOMISP2_PM is not set ++# CONFIG_PCENGINES_APU2 is not set ++ ++# ++# Intel Speed Select Technology interface support ++# ++# CONFIG_INTEL_SPEED_SELECT_INTERFACE is not set ++# end of Intel Speed Select Technology interface support ++ ++CONFIG_PMC_ATOM=y ++CONFIG_INTEL_SOCPERF=y ++CONFIG_SOCPERF=m ++CONFIG_INTEL_SOCWATCH=m ++CONFIG_INTEL_SEP=y ++CONFIG_SEP=m ++CONFIG_SEP_PAX=m ++# CONFIG_SEP_PER_USER_MODE is not set ++CONFIG_SEP_STANDARD_MODE=y ++# CONFIG_SEP_MINLOG_MODE is not set ++# CONFIG_SEP_MAXLOG_MODE is not set ++# CONFIG_MFD_CROS_EC is not set ++# CONFIG_CHROME_PLATFORMS is not set ++# CONFIG_MELLANOX_PLATFORM is not set ++CONFIG_CLKDEV_LOOKUP=y ++CONFIG_HAVE_CLK_PREPARE=y ++CONFIG_COMMON_CLK=y ++ ++# ++# Common Clock Framework ++# ++# CONFIG_COMMON_CLK_MAX9485 is not set ++# CONFIG_COMMON_CLK_SI5341 is not set ++# CONFIG_COMMON_CLK_SI5351 is not set ++# CONFIG_COMMON_CLK_SI544 is not set ++# CONFIG_COMMON_CLK_CDCE706 is not set ++# CONFIG_COMMON_CLK_CS2000_CP is not set ++# CONFIG_COMMON_CLK_PWM is not set ++# end of Common Clock Framework ++ ++# CONFIG_HWSPINLOCK is not set ++ ++# ++# Clock Source drivers ++# ++CONFIG_CLKEVT_I8253=y ++CONFIG_I8253_LOCK=y ++CONFIG_CLKBLD_I8253=y ++# end of Clock Source drivers ++ ++CONFIG_MAILBOX=y ++CONFIG_PCC=y ++# CONFIG_ALTERA_MBOX is not set ++CONFIG_IOMMU_IOVA=y ++CONFIG_IOMMU_API=y ++CONFIG_IOMMU_SUPPORT=y ++ ++# ++# Generic IOMMU Pagetable Support ++# ++# end of Generic IOMMU Pagetable Support ++ ++# CONFIG_IOMMU_DEBUGFS is not set ++# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set ++# CONFIG_AMD_IOMMU is not set ++CONFIG_DMAR_TABLE=y ++CONFIG_INTEL_IOMMU=y ++# CONFIG_INTEL_IOMMU_SVM is not set ++CONFIG_INTEL_IOMMU_DEFAULT_ON=y ++CONFIG_INTEL_IOMMU_FLOPPY_WA=y ++# CONFIG_IRQ_REMAP is not set ++ ++# ++# Remoteproc drivers ++# ++# CONFIG_REMOTEPROC is not set ++# end of Remoteproc drivers ++ ++# ++# Rpmsg drivers ++# ++# CONFIG_RPMSG_QCOM_GLINK_RPM is not set ++# CONFIG_RPMSG_VIRTIO is not set ++# end of Rpmsg drivers ++ ++# CONFIG_SOUNDWIRE is not set ++ ++# ++# SOC (System On Chip) specific Drivers ++# ++ ++# ++# Amlogic SoC drivers ++# ++# end of Amlogic SoC drivers ++ ++# ++# Aspeed SoC drivers ++# ++# end of Aspeed SoC drivers ++ ++# ++# Broadcom SoC drivers ++# ++# end of Broadcom SoC drivers ++ ++# ++# NXP/Freescale QorIQ SoC drivers ++# ++# end of NXP/Freescale QorIQ SoC drivers ++ ++# ++# i.MX SoC drivers ++# ++# end of i.MX SoC drivers ++ ++# ++# Qualcomm SoC drivers ++# ++# end of Qualcomm SoC drivers ++ ++# CONFIG_SOC_TI is not set ++ ++# ++# Xilinx SoC drivers ++# ++# CONFIG_XILINX_VCU is not set ++# end of Xilinx SoC drivers ++# end of SOC (System On Chip) specific Drivers ++ ++CONFIG_PM_DEVFREQ=y ++ ++# ++# DEVFREQ Governors ++# ++CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y ++# CONFIG_DEVFREQ_GOV_PERFORMANCE is not set ++# CONFIG_DEVFREQ_GOV_POWERSAVE is not set ++# CONFIG_DEVFREQ_GOV_USERSPACE is not set ++# CONFIG_DEVFREQ_GOV_PASSIVE is not set ++ ++# ++# DEVFREQ Drivers ++# ++# CONFIG_PM_DEVFREQ_EVENT is not set ++CONFIG_EXTCON=y ++ ++# ++# Extcon Device Drivers ++# ++# CONFIG_EXTCON_ADC_JACK is not set ++CONFIG_EXTCON_ARIZONA=y ++# CONFIG_EXTCON_FSA9480 is not set ++CONFIG_EXTCON_GPIO=y ++# CONFIG_EXTCON_INTEL_INT3496 is not set ++# CONFIG_EXTCON_MAX3355 is not set ++# CONFIG_EXTCON_PTN5150 is not set ++# CONFIG_EXTCON_RT8973A is not set ++# CONFIG_EXTCON_SM5502 is not set ++# CONFIG_EXTCON_USB_GPIO is not set ++# CONFIG_MEMORY is not set ++CONFIG_IIO=y ++CONFIG_IIO_BUFFER=y ++# CONFIG_IIO_BUFFER_CB is not set ++# CONFIG_IIO_BUFFER_HW_CONSUMER is not set ++CONFIG_IIO_KFIFO_BUF=y ++CONFIG_IIO_TRIGGERED_BUFFER=y ++# CONFIG_IIO_CONFIGFS is not set ++CONFIG_IIO_TRIGGER=y ++CONFIG_IIO_CONSUMERS_PER_TRIGGER=2 ++# CONFIG_IIO_SW_DEVICE is not set ++# CONFIG_IIO_SW_TRIGGER is not set ++ ++# ++# Accelerometers ++# ++# CONFIG_ADIS16201 is not set ++# CONFIG_ADIS16209 is not set ++# CONFIG_ADXL345_I2C is not set ++# CONFIG_ADXL345_SPI is not set ++# CONFIG_ADXL372_SPI is not set ++# CONFIG_ADXL372_I2C is not set ++CONFIG_BMA180=m ++# CONFIG_BMA220 is not set ++CONFIG_BMC150_ACCEL=y ++CONFIG_BMC150_ACCEL_I2C=y ++CONFIG_BMC150_ACCEL_SPI=y ++# CONFIG_DA280 is not set ++# CONFIG_DA311 is not set ++# CONFIG_DMARD09 is not set ++# CONFIG_DMARD10 is not set ++CONFIG_HID_SENSOR_ACCEL_3D=m ++CONFIG_IIO_ST_ACCEL_3AXIS=y ++CONFIG_IIO_ST_ACCEL_I2C_3AXIS=y ++CONFIG_IIO_ST_ACCEL_SPI_3AXIS=y ++# CONFIG_KXSD9 is not set ++CONFIG_KXCJK1013=y ++# CONFIG_MC3230 is not set ++# CONFIG_MMA7455_I2C is not set ++# CONFIG_MMA7455_SPI is not set ++# CONFIG_MMA7660 is not set ++# CONFIG_MMA8452 is not set ++# CONFIG_MMA9551 is not set ++# CONFIG_MMA9553 is not set ++# CONFIG_MXC4005 is not set ++# CONFIG_MXC6255 is not set ++# CONFIG_SCA3000 is not set ++# CONFIG_STK8312 is not set ++# CONFIG_STK8BA50 is not set ++# end of Accelerometers ++ ++# ++# Analog to digital converters ++# ++# CONFIG_AD7124 is not set ++# CONFIG_AD7266 is not set ++# CONFIG_AD7291 is not set ++# CONFIG_AD7298 is not set ++# CONFIG_AD7476 is not set ++# CONFIG_AD7606_IFACE_PARALLEL is not set ++# CONFIG_AD7606_IFACE_SPI is not set ++# CONFIG_AD7766 is not set ++# CONFIG_AD7768_1 is not set ++# CONFIG_AD7780 is not set ++# CONFIG_AD7791 is not set ++# CONFIG_AD7793 is not set ++# CONFIG_AD7887 is not set ++# CONFIG_AD7923 is not set ++# CONFIG_AD7949 is not set ++# CONFIG_AD799X is not set ++# CONFIG_CC10001_ADC is not set ++# CONFIG_HI8435 is not set ++# CONFIG_HX711 is not set ++# CONFIG_INA2XX_ADC is not set ++# CONFIG_INTEL_ADC is not set ++# CONFIG_LTC2471 is not set ++# CONFIG_LTC2485 is not set ++# CONFIG_LTC2497 is not set ++# CONFIG_MAX1027 is not set ++# CONFIG_MAX11100 is not set ++# CONFIG_MAX1118 is not set ++# CONFIG_MAX1363 is not set ++# CONFIG_MAX9611 is not set ++CONFIG_MCP320X=m ++# CONFIG_MCP3422 is not set ++# CONFIG_MCP3911 is not set ++CONFIG_NAU7802=m ++# CONFIG_TI_ADC081C is not set ++# CONFIG_TI_ADC0832 is not set ++# CONFIG_TI_ADC084S021 is not set ++# CONFIG_TI_ADC12138 is not set ++# CONFIG_TI_ADC108S102 is not set ++# CONFIG_TI_ADC128S052 is not set ++# CONFIG_TI_ADC161S626 is not set ++# CONFIG_TI_ADS1015 is not set ++# CONFIG_TI_ADS7950 is not set ++# CONFIG_TI_TLC4541 is not set ++# CONFIG_XILINX_XADC is not set ++# end of Analog to digital converters ++ ++# ++# Analog Front Ends ++# ++# end of Analog Front Ends ++ ++# ++# Amplifiers ++# ++# CONFIG_AD8366 is not set ++# end of Amplifiers ++ ++# ++# Chemical Sensors ++# ++# CONFIG_ATLAS_PH_SENSOR is not set ++# CONFIG_BME680 is not set ++# CONFIG_CCS811 is not set ++# CONFIG_IAQCORE is not set ++# CONFIG_SENSIRION_SGP30 is not set ++# CONFIG_SPS30 is not set ++# CONFIG_VZ89X is not set ++# end of Chemical Sensors ++ ++# ++# Hid Sensor IIO Common ++# ++CONFIG_HID_SENSOR_IIO_COMMON=m ++CONFIG_HID_SENSOR_IIO_TRIGGER=m ++# end of Hid Sensor IIO Common ++ ++# ++# SSP Sensor Common ++# ++# CONFIG_IIO_SSP_SENSORHUB is not set ++# end of SSP Sensor Common ++ ++CONFIG_IIO_ST_SENSORS_I2C=y ++CONFIG_IIO_ST_SENSORS_SPI=y ++CONFIG_IIO_ST_SENSORS_CORE=y ++ ++# ++# Digital to analog converters ++# ++# CONFIG_AD5064 is not set ++# CONFIG_AD5360 is not set ++# CONFIG_AD5380 is not set ++# CONFIG_AD5421 is not set ++# CONFIG_AD5446 is not set ++# CONFIG_AD5449 is not set ++# CONFIG_AD5592R is not set ++# CONFIG_AD5593R is not set ++# CONFIG_AD5504 is not set ++# CONFIG_AD5624R_SPI is not set ++# CONFIG_LTC1660 is not set ++# CONFIG_LTC2632 is not set ++# CONFIG_AD5686_SPI is not set ++# CONFIG_AD5696_I2C is not set ++# CONFIG_AD5755 is not set ++# CONFIG_AD5758 is not set ++# CONFIG_AD5761 is not set ++# CONFIG_AD5764 is not set ++# CONFIG_AD5791 is not set ++# CONFIG_AD7303 is not set ++# CONFIG_AD8801 is not set ++# CONFIG_DS4424 is not set ++# CONFIG_M62332 is not set ++# CONFIG_MAX517 is not set ++# CONFIG_MCP4725 is not set ++# CONFIG_MCP4922 is not set ++# CONFIG_TI_DAC082S085 is not set ++# CONFIG_TI_DAC5571 is not set ++# CONFIG_TI_DAC7311 is not set ++# CONFIG_TI_DAC7612 is not set ++# end of Digital to analog converters ++ ++# ++# IIO dummy driver ++# ++# end of IIO dummy driver ++ ++# ++# Frequency Synthesizers DDS/PLL ++# ++ ++# ++# Clock Generator/Distribution ++# ++# CONFIG_AD9523 is not set ++# end of Clock Generator/Distribution ++ ++# ++# Phase-Locked Loop (PLL) frequency synthesizers ++# ++# CONFIG_ADF4350 is not set ++# CONFIG_ADF4371 is not set ++# end of Phase-Locked Loop (PLL) frequency synthesizers ++# end of Frequency Synthesizers DDS/PLL ++ ++# ++# Digital gyroscope sensors ++# ++# CONFIG_ADIS16080 is not set ++# CONFIG_ADIS16130 is not set ++# CONFIG_ADIS16136 is not set ++# CONFIG_ADIS16260 is not set ++# CONFIG_ADXRS450 is not set ++CONFIG_BMG160=y ++CONFIG_BMG160_I2C=y ++CONFIG_BMG160_SPI=y ++# CONFIG_FXAS21002C is not set ++CONFIG_HID_SENSOR_GYRO_3D=m ++# CONFIG_MPU3050_I2C is not set ++CONFIG_IIO_ST_GYRO_3AXIS=y ++CONFIG_IIO_ST_GYRO_I2C_3AXIS=y ++CONFIG_IIO_ST_GYRO_SPI_3AXIS=y ++# CONFIG_ITG3200 is not set ++# end of Digital gyroscope sensors ++ ++# ++# Health Sensors ++# ++ ++# ++# Heart Rate Monitors ++# ++# CONFIG_AFE4403 is not set ++# CONFIG_AFE4404 is not set ++# CONFIG_MAX30100 is not set ++# CONFIG_MAX30102 is not set ++# end of Heart Rate Monitors ++# end of Health Sensors ++ ++# ++# Humidity sensors ++# ++# CONFIG_AM2315 is not set ++# CONFIG_DHT11 is not set ++# CONFIG_HDC100X is not set ++# CONFIG_HID_SENSOR_HUMIDITY is not set ++# CONFIG_HTS221 is not set ++# CONFIG_HTU21 is not set ++# CONFIG_SI7005 is not set ++# CONFIG_SI7020 is not set ++# end of Humidity sensors ++ ++# ++# Inertial measurement units ++# ++# CONFIG_ADIS16400 is not set ++# CONFIG_ADIS16460 is not set ++# CONFIG_ADIS16480 is not set ++# CONFIG_BMI160_I2C is not set ++# CONFIG_BMI160_SPI is not set ++CONFIG_KMX61=y ++# CONFIG_INV_MPU6050_I2C is not set ++# CONFIG_INV_MPU6050_SPI is not set ++# CONFIG_IIO_ST_LSM6DSX is not set ++# end of Inertial measurement units ++ ++# ++# Light sensors ++# ++CONFIG_ACPI_ALS=y ++# CONFIG_ADJD_S311 is not set ++# CONFIG_AL3320A is not set ++# CONFIG_APDS9300 is not set ++# CONFIG_APDS9960 is not set ++# CONFIG_BH1750 is not set ++# CONFIG_BH1780 is not set ++CONFIG_CM32181=y ++CONFIG_CM3232=y ++# CONFIG_CM3323 is not set ++CONFIG_CM36651=m ++# CONFIG_GP2AP020A00F is not set ++# CONFIG_SENSORS_ISL29018 is not set ++# CONFIG_SENSORS_ISL29028 is not set ++# CONFIG_ISL29125 is not set ++CONFIG_HID_SENSOR_ALS=m ++# CONFIG_HID_SENSOR_PROX is not set ++CONFIG_JSA1212=m ++# CONFIG_RPR0521 is not set ++# CONFIG_LTR501 is not set ++# CONFIG_LV0104CS is not set ++# CONFIG_MAX44000 is not set ++# CONFIG_MAX44009 is not set ++# CONFIG_NOA1305 is not set ++# CONFIG_OPT3001 is not set ++# CONFIG_PA12203001 is not set ++# CONFIG_SI1133 is not set ++# CONFIG_SI1145 is not set ++# CONFIG_STK3310 is not set ++# CONFIG_ST_UVIS25 is not set ++# CONFIG_TCS3414 is not set ++# CONFIG_TCS3472 is not set ++# CONFIG_SENSORS_TSL2563 is not set ++# CONFIG_TSL2583 is not set ++# CONFIG_TSL2772 is not set ++# CONFIG_TSL4531 is not set ++# CONFIG_US5182D is not set ++# CONFIG_VCNL4000 is not set ++# CONFIG_VCNL4035 is not set ++# CONFIG_VEML6070 is not set ++# CONFIG_VL6180 is not set ++# CONFIG_ZOPT2201 is not set ++# end of Light sensors ++ ++# ++# Magnetometer sensors ++# ++CONFIG_AK8975=m ++CONFIG_AK09911=m ++# CONFIG_BMC150_MAGN_I2C is not set ++# CONFIG_BMC150_MAGN_SPI is not set ++# CONFIG_MAG3110 is not set ++CONFIG_HID_SENSOR_MAGNETOMETER_3D=m ++# CONFIG_MMC35240 is not set ++CONFIG_IIO_ST_MAGN_3AXIS=y ++CONFIG_IIO_ST_MAGN_I2C_3AXIS=y ++CONFIG_IIO_ST_MAGN_SPI_3AXIS=y ++# CONFIG_SENSORS_HMC5843_I2C is not set ++# CONFIG_SENSORS_HMC5843_SPI is not set ++# CONFIG_SENSORS_RM3100_I2C is not set ++# CONFIG_SENSORS_RM3100_SPI is not set ++# end of Magnetometer sensors ++ ++# ++# Multiplexers ++# ++# end of Multiplexers ++ ++# ++# Inclinometer sensors ++# ++CONFIG_HID_SENSOR_INCLINOMETER_3D=m ++# CONFIG_HID_SENSOR_DEVICE_ROTATION is not set ++# end of Inclinometer sensors ++ ++# ++# Triggers - standalone ++# ++CONFIG_IIO_INTERRUPT_TRIGGER=y ++CONFIG_IIO_SYSFS_TRIGGER=y ++# end of Triggers - standalone ++ ++# ++# Digital potentiometers ++# ++# CONFIG_AD5272 is not set ++# CONFIG_DS1803 is not set ++# CONFIG_MAX5432 is not set ++# CONFIG_MAX5481 is not set ++# CONFIG_MAX5487 is not set ++# CONFIG_MCP4018 is not set ++# CONFIG_MCP4131 is not set ++# CONFIG_MCP4531 is not set ++# CONFIG_MCP41010 is not set ++# CONFIG_TPL0102 is not set ++# end of Digital potentiometers ++ ++# ++# Digital potentiostats ++# ++# CONFIG_LMP91000 is not set ++# end of Digital potentiostats ++ ++# ++# Pressure sensors ++# ++# CONFIG_ABP060MG is not set ++# CONFIG_BMP280 is not set ++# CONFIG_DPS310 is not set ++# CONFIG_HID_SENSOR_PRESS is not set ++# CONFIG_HP03 is not set ++# CONFIG_MPL115_I2C is not set ++# CONFIG_MPL115_SPI is not set ++# CONFIG_MPL3115 is not set ++# CONFIG_MS5611 is not set ++# CONFIG_MS5637 is not set ++CONFIG_IIO_ST_PRESS=y ++CONFIG_IIO_ST_PRESS_I2C=y ++CONFIG_IIO_ST_PRESS_SPI=y ++# CONFIG_T5403 is not set ++# CONFIG_HP206C is not set ++# CONFIG_ZPA2326 is not set ++# end of Pressure sensors ++ ++# ++# Lightning sensors ++# ++# CONFIG_AS3935 is not set ++# end of Lightning sensors ++ ++# ++# Proximity and distance sensors ++# ++# CONFIG_ISL29501 is not set ++# CONFIG_LIDAR_LITE_V2 is not set ++# CONFIG_MB1232 is not set ++# CONFIG_RFD77402 is not set ++# CONFIG_SRF04 is not set ++# CONFIG_SX9500 is not set ++# CONFIG_SRF08 is not set ++# CONFIG_VL53L0X_I2C is not set ++# end of Proximity and distance sensors ++ ++# ++# Resolver to digital converters ++# ++# CONFIG_AD2S90 is not set ++# CONFIG_AD2S1200 is not set ++# end of Resolver to digital converters ++ ++# ++# Temperature sensors ++# ++# CONFIG_MAXIM_THERMOCOUPLE is not set ++# CONFIG_HID_SENSOR_TEMP is not set ++# CONFIG_MLX90614 is not set ++# CONFIG_MLX90632 is not set ++CONFIG_TMP006=m ++# CONFIG_TMP007 is not set ++# CONFIG_TSYS01 is not set ++# CONFIG_TSYS02D is not set ++# CONFIG_MAX31856 is not set ++# end of Temperature sensors ++ ++# CONFIG_NTB is not set ++# CONFIG_VME_BUS is not set ++CONFIG_PWM=y ++CONFIG_PWM_SYSFS=y ++# CONFIG_PWM_CRC is not set ++# CONFIG_PWM_DWC is not set ++CONFIG_PWM_LPSS=y ++CONFIG_PWM_LPSS_PCI=y ++# CONFIG_PWM_LPSS_PLATFORM is not set ++# CONFIG_PWM_PCA9685 is not set ++ ++# ++# IRQ chip support ++# ++# end of IRQ chip support ++ ++# CONFIG_IPACK_BUS is not set ++# CONFIG_RESET_CONTROLLER is not set ++ ++# ++# PHY Subsystem ++# ++CONFIG_GENERIC_PHY=y ++# CONFIG_BCM_KONA_USB2_PHY is not set ++# CONFIG_PHY_PXA_28NM_HSIC is not set ++# CONFIG_PHY_PXA_28NM_USB2 is not set ++# CONFIG_PHY_CPCAP_USB is not set ++# CONFIG_PHY_QCOM_USB_HS is not set ++# CONFIG_PHY_QCOM_USB_HSIC is not set ++# CONFIG_PHY_SAMSUNG_USB2 is not set ++# CONFIG_PHY_TUSB1210 is not set ++# end of PHY Subsystem ++ ++CONFIG_POWERCAP=y ++CONFIG_INTEL_RAPL_CORE=y ++CONFIG_INTEL_RAPL=y ++# CONFIG_IDLE_INJECT is not set ++# CONFIG_MCB is not set ++ ++# ++# Performance monitor support ++# ++# end of Performance monitor support ++ ++CONFIG_RAS=y ++# CONFIG_THUNDERBOLT is not set ++ ++# ++# Android ++# ++CONFIG_ANDROID=y ++CONFIG_ANDROID_BINDER_IPC=y ++# CONFIG_ANDROID_BINDERFS is not set ++CONFIG_ANDROID_BINDER_DEVICES="binder,hwbinder,vndbinder" ++# CONFIG_ANDROID_BINDER_IPC_SELFTEST is not set ++# end of Android ++ ++# CONFIG_LIBNVDIMM is not set ++CONFIG_DAX=y ++# CONFIG_DEV_DAX is not set ++CONFIG_NVMEM=y ++CONFIG_NVMEM_SYSFS=y ++ ++# ++# HW tracing support ++# ++# CONFIG_STM is not set ++# CONFIG_INTEL_TH is not set ++# end of HW tracing support ++ ++# CONFIG_FPGA is not set ++CONFIG_PM_OPP=y ++# CONFIG_UNISYS_VISORBUS is not set ++# CONFIG_SIOX is not set ++# CONFIG_SLIMBUS is not set ++# CONFIG_INTERCONNECT is not set ++# CONFIG_COUNTER is not set ++# end of Device Drivers ++ ++# ++# File systems ++# ++CONFIG_DCACHE_WORD_ACCESS=y ++CONFIG_VALIDATE_FS_PARSER=y ++CONFIG_FS_IOMAP=y ++# CONFIG_EXT2_FS is not set ++# CONFIG_EXT3_FS is not set ++CONFIG_EXT4_FS=y ++CONFIG_EXT4_USE_FOR_EXT2=y ++CONFIG_EXT4_FS_POSIX_ACL=y ++CONFIG_EXT4_FS_SECURITY=y ++# CONFIG_EXT4_DEBUG is not set ++CONFIG_JBD2=y ++# CONFIG_JBD2_DEBUG is not set ++CONFIG_FS_MBCACHE=y ++# CONFIG_REISERFS_FS is not set ++# CONFIG_JFS_FS is not set ++# CONFIG_XFS_FS is not set ++# CONFIG_GFS2_FS is not set ++# CONFIG_OCFS2_FS is not set ++# CONFIG_BTRFS_FS is not set ++# CONFIG_NILFS2_FS is not set ++CONFIG_F2FS_FS=y ++CONFIG_F2FS_STAT_FS=y ++CONFIG_F2FS_FS_XATTR=y ++CONFIG_F2FS_FS_POSIX_ACL=y ++CONFIG_F2FS_FS_SECURITY=y ++# CONFIG_F2FS_CHECK_FS is not set ++# CONFIG_F2FS_IO_TRACE is not set ++# CONFIG_F2FS_FAULT_INJECTION is not set ++# CONFIG_FS_DAX is not set ++CONFIG_FS_POSIX_ACL=y ++CONFIG_EXPORTFS=y ++# CONFIG_EXPORTFS_BLOCK_OPS is not set ++CONFIG_FILE_LOCKING=y ++CONFIG_MANDATORY_FILE_LOCKING=y ++CONFIG_FS_ENCRYPTION=y ++# CONFIG_FS_VERITY is not set ++CONFIG_FSNOTIFY=y ++CONFIG_DNOTIFY=y ++CONFIG_INOTIFY_USER=y ++CONFIG_FANOTIFY=y ++# CONFIG_FANOTIFY_ACCESS_PERMISSIONS is not set ++CONFIG_QUOTA=y ++CONFIG_QUOTA_NETLINK_INTERFACE=y ++# CONFIG_PRINT_QUOTA_WARNING is not set ++# CONFIG_QUOTA_DEBUG is not set ++CONFIG_QUOTA_TREE=y ++# CONFIG_QFMT_V1 is not set ++CONFIG_QFMT_V2=y ++CONFIG_QUOTACTL=y ++CONFIG_QUOTACTL_COMPAT=y ++# CONFIG_AUTOFS4_FS is not set ++# CONFIG_AUTOFS_FS is not set ++CONFIG_FUSE_FS=y ++# CONFIG_CUSE is not set ++# CONFIG_VIRTIO_FS is not set ++CONFIG_OVERLAY_FS=y ++# CONFIG_OVERLAY_FS_REDIRECT_DIR is not set ++CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y ++# CONFIG_OVERLAY_FS_INDEX is not set ++# CONFIG_OVERLAY_FS_XINO_AUTO is not set ++# CONFIG_OVERLAY_FS_METACOPY is not set ++ ++# ++# Caches ++# ++# CONFIG_FSCACHE is not set ++# end of Caches ++ ++# ++# CD-ROM/DVD Filesystems ++# ++# CONFIG_ISO9660_FS is not set ++# CONFIG_UDF_FS is not set ++# end of CD-ROM/DVD Filesystems ++ ++# ++# DOS/FAT/NT Filesystems ++# ++CONFIG_FAT_FS=y ++CONFIG_MSDOS_FS=y ++CONFIG_VFAT_FS=y ++CONFIG_FAT_DEFAULT_CODEPAGE=437 ++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" ++# CONFIG_FAT_DEFAULT_UTF8 is not set ++# CONFIG_NTFS_FS is not set ++# end of DOS/FAT/NT Filesystems ++ ++# ++# Pseudo filesystems ++# ++CONFIG_PROC_FS=y ++# CONFIG_PROC_KCORE is not set ++CONFIG_PROC_SYSCTL=y ++CONFIG_PROC_PAGE_MONITOR=y ++# CONFIG_PROC_CHILDREN is not set ++CONFIG_PROC_PID_ARCH_STATUS=y ++CONFIG_PROC_UID=y ++CONFIG_KERNFS=y ++CONFIG_SYSFS=y ++CONFIG_TMPFS=y ++CONFIG_TMPFS_POSIX_ACL=y ++CONFIG_TMPFS_XATTR=y ++CONFIG_HUGETLBFS=y ++CONFIG_HUGETLB_PAGE=y ++CONFIG_MEMFD_CREATE=y ++CONFIG_ARCH_HAS_GIGANTIC_PAGE=y ++CONFIG_CONFIGFS_FS=y ++CONFIG_EFIVAR_FS=y ++# end of Pseudo filesystems ++ ++CONFIG_MISC_FILESYSTEMS=y ++# CONFIG_ORANGEFS_FS is not set ++# CONFIG_ADFS_FS is not set ++# CONFIG_AFFS_FS is not set ++# CONFIG_ECRYPT_FS is not set ++CONFIG_SDCARD_FS=y ++# CONFIG_HFS_FS is not set ++# CONFIG_HFSPLUS_FS is not set ++# CONFIG_BEFS_FS is not set ++# CONFIG_BFS_FS is not set ++# CONFIG_EFS_FS is not set ++# CONFIG_CRAMFS is not set ++CONFIG_SQUASHFS=y ++CONFIG_SQUASHFS_FILE_CACHE=y ++# CONFIG_SQUASHFS_FILE_DIRECT is not set ++# CONFIG_SQUASHFS_DECOMP_SINGLE is not set ++CONFIG_SQUASHFS_DECOMP_MULTI=y ++# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set ++CONFIG_SQUASHFS_XATTR=y ++CONFIG_SQUASHFS_ZLIB=y ++CONFIG_SQUASHFS_LZ4=y ++CONFIG_SQUASHFS_LZO=y ++CONFIG_SQUASHFS_XZ=y ++# CONFIG_SQUASHFS_ZSTD is not set ++# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set ++CONFIG_SQUASHFS_EMBEDDED=y ++CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 ++# CONFIG_VXFS_FS is not set ++# CONFIG_MINIX_FS is not set ++# CONFIG_OMFS_FS is not set ++# CONFIG_HPFS_FS is not set ++# CONFIG_QNX4FS_FS is not set ++# CONFIG_QNX6FS_FS is not set ++# CONFIG_ROMFS_FS is not set ++CONFIG_PSTORE=y ++CONFIG_PSTORE_DEFLATE_COMPRESS=y ++# CONFIG_PSTORE_LZO_COMPRESS is not set ++# CONFIG_PSTORE_LZ4_COMPRESS is not set ++# CONFIG_PSTORE_LZ4HC_COMPRESS is not set ++# CONFIG_PSTORE_842_COMPRESS is not set ++# CONFIG_PSTORE_ZSTD_COMPRESS is not set ++CONFIG_PSTORE_COMPRESS=y ++CONFIG_PSTORE_DEFLATE_COMPRESS_DEFAULT=y ++CONFIG_PSTORE_COMPRESS_DEFAULT="deflate" ++CONFIG_PSTORE_CONSOLE=y ++# CONFIG_PSTORE_PMSG is not set ++# CONFIG_PSTORE_FTRACE is not set ++CONFIG_PSTORE_RAM=y ++# CONFIG_SYSV_FS is not set ++# CONFIG_UFS_FS is not set ++# CONFIG_EROFS_FS is not set ++CONFIG_NETWORK_FILESYSTEMS=y ++# CONFIG_NFS_FS is not set ++# CONFIG_NFSD is not set ++# CONFIG_CEPH_FS is not set ++CONFIG_CIFS=y ++# CONFIG_CIFS_STATS2 is not set ++CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y ++CONFIG_CIFS_WEAK_PW_HASH=y ++# CONFIG_CIFS_UPCALL is not set ++# CONFIG_CIFS_XATTR is not set ++CONFIG_CIFS_DEBUG=y ++# CONFIG_CIFS_DEBUG2 is not set ++# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set ++# CONFIG_CIFS_DFS_UPCALL is not set ++# CONFIG_CODA_FS is not set ++# CONFIG_AFS_FS is not set ++CONFIG_NLS=y ++CONFIG_NLS_DEFAULT="utf8" ++CONFIG_NLS_CODEPAGE_437=y ++# CONFIG_NLS_CODEPAGE_737 is not set ++# CONFIG_NLS_CODEPAGE_775 is not set ++# CONFIG_NLS_CODEPAGE_850 is not set ++# CONFIG_NLS_CODEPAGE_852 is not set ++# CONFIG_NLS_CODEPAGE_855 is not set ++# CONFIG_NLS_CODEPAGE_857 is not set ++# CONFIG_NLS_CODEPAGE_860 is not set ++# CONFIG_NLS_CODEPAGE_861 is not set ++# CONFIG_NLS_CODEPAGE_862 is not set ++# CONFIG_NLS_CODEPAGE_863 is not set ++# CONFIG_NLS_CODEPAGE_864 is not set ++# CONFIG_NLS_CODEPAGE_865 is not set ++# CONFIG_NLS_CODEPAGE_866 is not set ++# CONFIG_NLS_CODEPAGE_869 is not set ++# CONFIG_NLS_CODEPAGE_936 is not set ++# CONFIG_NLS_CODEPAGE_950 is not set ++# CONFIG_NLS_CODEPAGE_932 is not set ++# CONFIG_NLS_CODEPAGE_949 is not set ++# CONFIG_NLS_CODEPAGE_874 is not set ++# CONFIG_NLS_ISO8859_8 is not set ++# CONFIG_NLS_CODEPAGE_1250 is not set ++# CONFIG_NLS_CODEPAGE_1251 is not set ++CONFIG_NLS_ASCII=y ++CONFIG_NLS_ISO8859_1=y ++# CONFIG_NLS_ISO8859_2 is not set ++# CONFIG_NLS_ISO8859_3 is not set ++# CONFIG_NLS_ISO8859_4 is not set ++# CONFIG_NLS_ISO8859_5 is not set ++# CONFIG_NLS_ISO8859_6 is not set ++# CONFIG_NLS_ISO8859_7 is not set ++# CONFIG_NLS_ISO8859_9 is not set ++# CONFIG_NLS_ISO8859_13 is not set ++# CONFIG_NLS_ISO8859_14 is not set ++# CONFIG_NLS_ISO8859_15 is not set ++# CONFIG_NLS_KOI8_R is not set ++# CONFIG_NLS_KOI8_U is not set ++# CONFIG_NLS_MAC_ROMAN is not set ++# CONFIG_NLS_MAC_CELTIC is not set ++# CONFIG_NLS_MAC_CENTEURO is not set ++# CONFIG_NLS_MAC_CROATIAN is not set ++# CONFIG_NLS_MAC_CYRILLIC is not set ++# CONFIG_NLS_MAC_GAELIC is not set ++# CONFIG_NLS_MAC_GREEK is not set ++# CONFIG_NLS_MAC_ICELAND is not set ++# CONFIG_NLS_MAC_INUIT is not set ++# CONFIG_NLS_MAC_ROMANIAN is not set ++# CONFIG_NLS_MAC_TURKISH is not set ++CONFIG_NLS_UTF8=y ++# CONFIG_DLM is not set ++# CONFIG_UNICODE is not set ++# end of File systems ++ ++# ++# Security options ++# ++CONFIG_KEYS=y ++CONFIG_KEYS_COMPAT=y ++# CONFIG_KEYS_REQUEST_CACHE is not set ++# CONFIG_PERSISTENT_KEYRINGS is not set ++CONFIG_BIG_KEYS=y ++# CONFIG_TRUSTED_KEYS is not set ++# CONFIG_ENCRYPTED_KEYS is not set ++# CONFIG_KEY_DH_OPERATIONS is not set ++# CONFIG_SECURITY_DMESG_RESTRICT is not set ++CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y ++CONFIG_SECURITY=y ++CONFIG_SECURITYFS=y ++CONFIG_SECURITY_NETWORK=y ++CONFIG_PAGE_TABLE_ISOLATION=y ++# CONFIG_SECURITY_NETWORK_XFRM is not set ++# CONFIG_SECURITY_PATH is not set ++# CONFIG_INTEL_TXT is not set ++CONFIG_LSM_MMAP_MIN_ADDR=65536 ++CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y ++CONFIG_HARDENED_USERCOPY=y ++# CONFIG_HARDENED_USERCOPY_FALLBACK is not set ++# CONFIG_HARDENED_USERCOPY_PAGESPAN is not set ++CONFIG_FORTIFY_SOURCE=y ++# CONFIG_STATIC_USERMODEHELPER is not set ++CONFIG_SECURITY_SELINUX=y ++CONFIG_SECURITY_SELINUX_BOOTPARAM=y ++# CONFIG_SECURITY_SELINUX_DISABLE is not set ++CONFIG_SECURITY_SELINUX_DEVELOP=y ++CONFIG_SECURITY_SELINUX_AVC_STATS=y ++CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 ++# CONFIG_SECURITY_SMACK is not set ++# CONFIG_SECURITY_TOMOYO is not set ++# CONFIG_SECURITY_APPARMOR is not set ++# CONFIG_SECURITY_LOADPIN is not set ++CONFIG_SECURITY_YAMA=y ++# CONFIG_SECURITY_SAFESETID is not set ++# CONFIG_SECURITY_LOCKDOWN_LSM is not set ++# CONFIG_INTEGRITY is not set ++CONFIG_DEFAULT_SECURITY_SELINUX=y ++# CONFIG_DEFAULT_SECURITY_DAC is not set ++CONFIG_LSM="yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor" ++ ++# ++# Kernel hardening options ++# ++ ++# ++# Memory initialization ++# ++CONFIG_INIT_STACK_NONE=y ++# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set ++# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set ++# end of Memory initialization ++# end of Kernel hardening options ++# end of Security options ++ ++CONFIG_CRYPTO=y ++ ++# ++# Crypto core or helper ++# ++CONFIG_CRYPTO_ALGAPI=y ++CONFIG_CRYPTO_ALGAPI2=y ++CONFIG_CRYPTO_AEAD=y ++CONFIG_CRYPTO_AEAD2=y ++CONFIG_CRYPTO_BLKCIPHER=y ++CONFIG_CRYPTO_BLKCIPHER2=y ++CONFIG_CRYPTO_HASH=y ++CONFIG_CRYPTO_HASH2=y ++CONFIG_CRYPTO_RNG=y ++CONFIG_CRYPTO_RNG2=y ++CONFIG_CRYPTO_RNG_DEFAULT=y ++CONFIG_CRYPTO_AKCIPHER2=y ++CONFIG_CRYPTO_AKCIPHER=y ++CONFIG_CRYPTO_KPP2=y ++CONFIG_CRYPTO_KPP=m ++CONFIG_CRYPTO_ACOMP2=y ++CONFIG_CRYPTO_MANAGER=y ++CONFIG_CRYPTO_MANAGER2=y ++# CONFIG_CRYPTO_USER is not set ++CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y ++CONFIG_CRYPTO_GF128MUL=y ++CONFIG_CRYPTO_NULL=y ++CONFIG_CRYPTO_NULL2=y ++# CONFIG_CRYPTO_PCRYPT is not set ++CONFIG_CRYPTO_CRYPTD=y ++CONFIG_CRYPTO_AUTHENC=y ++# CONFIG_CRYPTO_TEST is not set ++CONFIG_CRYPTO_SIMD=y ++CONFIG_CRYPTO_GLUE_HELPER_X86=y ++CONFIG_CRYPTO_ENGINE=m ++ ++# ++# Public-key cryptography ++# ++CONFIG_CRYPTO_RSA=y ++# CONFIG_CRYPTO_DH is not set ++CONFIG_CRYPTO_ECC=m ++CONFIG_CRYPTO_ECDH=m ++# CONFIG_CRYPTO_ECRDSA is not set ++ ++# ++# Authenticated Encryption with Associated Data ++# ++CONFIG_CRYPTO_CCM=y ++CONFIG_CRYPTO_GCM=y ++# CONFIG_CRYPTO_CHACHA20POLY1305 is not set ++# CONFIG_CRYPTO_AEGIS128 is not set ++# CONFIG_CRYPTO_AEGIS128_AESNI_SSE2 is not set ++CONFIG_CRYPTO_SEQIV=y ++CONFIG_CRYPTO_ECHAINIV=y ++ ++# ++# Block modes ++# ++CONFIG_CRYPTO_CBC=y ++# CONFIG_CRYPTO_CFB is not set ++CONFIG_CRYPTO_CTR=y ++CONFIG_CRYPTO_CTS=y ++CONFIG_CRYPTO_ECB=y ++CONFIG_CRYPTO_LRW=y ++# CONFIG_CRYPTO_OFB is not set ++# CONFIG_CRYPTO_PCBC is not set ++CONFIG_CRYPTO_XTS=y ++# CONFIG_CRYPTO_KEYWRAP is not set ++# CONFIG_CRYPTO_NHPOLY1305_SSE2 is not set ++# CONFIG_CRYPTO_NHPOLY1305_AVX2 is not set ++# CONFIG_CRYPTO_ADIANTUM is not set ++CONFIG_CRYPTO_ESSIV=y ++ ++# ++# Hash modes ++# ++CONFIG_CRYPTO_CMAC=y ++CONFIG_CRYPTO_HMAC=y ++# CONFIG_CRYPTO_XCBC is not set ++# CONFIG_CRYPTO_VMAC is not set ++ ++# ++# Digest ++# ++CONFIG_CRYPTO_CRC32C=y ++CONFIG_CRYPTO_CRC32C_INTEL=y ++CONFIG_CRYPTO_CRC32=y ++# CONFIG_CRYPTO_CRC32_PCLMUL is not set ++# CONFIG_CRYPTO_XXHASH is not set ++CONFIG_CRYPTO_CRCT10DIF=y ++CONFIG_CRYPTO_CRCT10DIF_PCLMUL=y ++CONFIG_CRYPTO_GHASH=y ++# CONFIG_CRYPTO_POLY1305 is not set ++# CONFIG_CRYPTO_POLY1305_X86_64 is not set ++CONFIG_CRYPTO_MD4=y ++CONFIG_CRYPTO_MD5=y ++CONFIG_CRYPTO_MICHAEL_MIC=m ++# CONFIG_CRYPTO_RMD128 is not set ++# CONFIG_CRYPTO_RMD160 is not set ++# CONFIG_CRYPTO_RMD256 is not set ++# CONFIG_CRYPTO_RMD320 is not set ++CONFIG_CRYPTO_SHA1=y ++CONFIG_CRYPTO_SHA1_SSSE3=y ++# CONFIG_CRYPTO_SHA256_SSSE3 is not set ++# CONFIG_CRYPTO_SHA512_SSSE3 is not set ++CONFIG_CRYPTO_LIB_SHA256=y ++CONFIG_CRYPTO_SHA256=y ++CONFIG_CRYPTO_SHA512=y ++# CONFIG_CRYPTO_SHA3 is not set ++# CONFIG_CRYPTO_SM3 is not set ++# CONFIG_CRYPTO_STREEBOG is not set ++# CONFIG_CRYPTO_TGR192 is not set ++# CONFIG_CRYPTO_WP512 is not set ++# CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL is not set ++ ++# ++# Ciphers ++# ++CONFIG_CRYPTO_LIB_AES=y ++CONFIG_CRYPTO_AES=y ++# CONFIG_CRYPTO_AES_TI is not set ++CONFIG_CRYPTO_AES_NI_INTEL=y ++# CONFIG_CRYPTO_ANUBIS is not set ++CONFIG_CRYPTO_LIB_ARC4=y ++CONFIG_CRYPTO_ARC4=y ++CONFIG_CRYPTO_BLOWFISH=y ++CONFIG_CRYPTO_BLOWFISH_COMMON=y ++CONFIG_CRYPTO_BLOWFISH_X86_64=y ++# CONFIG_CRYPTO_CAMELLIA is not set ++# CONFIG_CRYPTO_CAMELLIA_X86_64 is not set ++# CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64 is not set ++# CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64 is not set ++# CONFIG_CRYPTO_CAST5 is not set ++# CONFIG_CRYPTO_CAST5_AVX_X86_64 is not set ++# CONFIG_CRYPTO_CAST6 is not set ++# CONFIG_CRYPTO_CAST6_AVX_X86_64 is not set ++CONFIG_CRYPTO_LIB_DES=y ++CONFIG_CRYPTO_DES=y ++# CONFIG_CRYPTO_DES3_EDE_X86_64 is not set ++# CONFIG_CRYPTO_FCRYPT is not set ++# CONFIG_CRYPTO_KHAZAD is not set ++# CONFIG_CRYPTO_SALSA20 is not set ++# CONFIG_CRYPTO_CHACHA20 is not set ++# CONFIG_CRYPTO_CHACHA20_X86_64 is not set ++# CONFIG_CRYPTO_SEED is not set ++# CONFIG_CRYPTO_SERPENT is not set ++# CONFIG_CRYPTO_SERPENT_SSE2_X86_64 is not set ++# CONFIG_CRYPTO_SERPENT_AVX_X86_64 is not set ++# CONFIG_CRYPTO_SERPENT_AVX2_X86_64 is not set ++# CONFIG_CRYPTO_SM4 is not set ++# CONFIG_CRYPTO_TEA is not set ++CONFIG_CRYPTO_TWOFISH=y ++CONFIG_CRYPTO_TWOFISH_COMMON=y ++CONFIG_CRYPTO_TWOFISH_X86_64=y ++CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=y ++CONFIG_CRYPTO_TWOFISH_AVX_X86_64=y ++ ++# ++# Compression ++# ++CONFIG_CRYPTO_DEFLATE=y ++CONFIG_CRYPTO_LZO=y ++# CONFIG_CRYPTO_842 is not set ++CONFIG_CRYPTO_LZ4=y ++CONFIG_CRYPTO_LZ4HC=y ++# CONFIG_CRYPTO_ZSTD is not set ++ ++# ++# Random Number Generation ++# ++# CONFIG_CRYPTO_ANSI_CPRNG is not set ++CONFIG_CRYPTO_DRBG_MENU=y ++CONFIG_CRYPTO_DRBG_HMAC=y ++# CONFIG_CRYPTO_DRBG_HASH is not set ++# CONFIG_CRYPTO_DRBG_CTR is not set ++CONFIG_CRYPTO_DRBG=y ++CONFIG_CRYPTO_JITTERENTROPY=y ++# CONFIG_CRYPTO_USER_API_HASH is not set ++# CONFIG_CRYPTO_USER_API_SKCIPHER is not set ++# CONFIG_CRYPTO_USER_API_RNG is not set ++# CONFIG_CRYPTO_USER_API_AEAD is not set ++CONFIG_CRYPTO_HASH_INFO=y ++CONFIG_CRYPTO_HW=y ++# CONFIG_CRYPTO_DEV_PADLOCK is not set ++# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set ++# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set ++# CONFIG_CRYPTO_DEV_CCP is not set ++# CONFIG_CRYPTO_DEV_QAT_DH895xCC is not set ++# CONFIG_CRYPTO_DEV_QAT_C3XXX is not set ++# CONFIG_CRYPTO_DEV_QAT_C62X is not set ++# CONFIG_CRYPTO_DEV_QAT_DH895xCCVF is not set ++# CONFIG_CRYPTO_DEV_QAT_C3XXXVF is not set ++# CONFIG_CRYPTO_DEV_QAT_C62XVF is not set ++# CONFIG_CRYPTO_DEV_NITROX_CNN55XX is not set ++CONFIG_CRYPTO_DEV_VIRTIO=m ++# CONFIG_CRYPTO_DEV_SAFEXCEL is not set ++CONFIG_ASYMMETRIC_KEY_TYPE=y ++CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y ++CONFIG_X509_CERTIFICATE_PARSER=y ++# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set ++CONFIG_PKCS7_MESSAGE_PARSER=y ++CONFIG_PKCS7_TEST_KEY=y ++# CONFIG_SIGNED_PE_FILE_VERIFICATION is not set ++ ++# ++# Certificates for signature checking ++# ++CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" ++CONFIG_SYSTEM_TRUSTED_KEYRING=y ++CONFIG_SYSTEM_TRUSTED_KEYS="" ++# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set ++# CONFIG_SECONDARY_TRUSTED_KEYRING is not set ++# CONFIG_SYSTEM_BLACKLIST_KEYRING is not set ++# end of Certificates for signature checking ++ ++CONFIG_BINARY_PRINTF=y ++ ++# ++# Library routines ++# ++# CONFIG_PACKING is not set ++CONFIG_BITREVERSE=y ++CONFIG_GENERIC_STRNCPY_FROM_USER=y ++CONFIG_GENERIC_STRNLEN_USER=y ++CONFIG_GENERIC_NET_UTILS=y ++CONFIG_GENERIC_FIND_FIRST_BIT=y ++CONFIG_CORDIC=m ++CONFIG_RATIONAL=y ++CONFIG_GENERIC_PCI_IOMAP=y ++CONFIG_GENERIC_IOMAP=y ++CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y ++CONFIG_ARCH_HAS_FAST_MULTIPLIER=y ++CONFIG_CRC_CCITT=y ++CONFIG_CRC16=y ++CONFIG_CRC_T10DIF=y ++CONFIG_CRC_ITU_T=y ++CONFIG_CRC32=y ++# CONFIG_CRC32_SELFTEST is not set ++CONFIG_CRC32_SLICEBY8=y ++# CONFIG_CRC32_SLICEBY4 is not set ++# CONFIG_CRC32_SARWATE is not set ++# CONFIG_CRC32_BIT is not set ++CONFIG_CRC64=m ++# CONFIG_CRC4 is not set ++# CONFIG_CRC7 is not set ++CONFIG_LIBCRC32C=y ++CONFIG_CRC8=y ++CONFIG_XXHASH=y ++# CONFIG_RANDOM32_SELFTEST is not set ++CONFIG_ZLIB_INFLATE=y ++CONFIG_ZLIB_DEFLATE=y ++CONFIG_LZO_COMPRESS=y ++CONFIG_LZO_DECOMPRESS=y ++CONFIG_LZ4_COMPRESS=y ++CONFIG_LZ4HC_COMPRESS=y ++CONFIG_LZ4_DECOMPRESS=y ++CONFIG_XZ_DEC=y ++CONFIG_XZ_DEC_X86=y ++# CONFIG_XZ_DEC_POWERPC is not set ++# CONFIG_XZ_DEC_IA64 is not set ++# CONFIG_XZ_DEC_ARM is not set ++# CONFIG_XZ_DEC_ARMTHUMB is not set ++# CONFIG_XZ_DEC_SPARC is not set ++CONFIG_XZ_DEC_BCJ=y ++# CONFIG_XZ_DEC_TEST is not set ++CONFIG_DECOMPRESS_GZIP=y ++CONFIG_DECOMPRESS_BZIP2=y ++CONFIG_DECOMPRESS_LZMA=y ++CONFIG_DECOMPRESS_XZ=y ++CONFIG_DECOMPRESS_LZO=y ++CONFIG_DECOMPRESS_LZ4=y ++CONFIG_GENERIC_ALLOCATOR=y ++CONFIG_REED_SOLOMON=y ++CONFIG_REED_SOLOMON_ENC8=y ++CONFIG_REED_SOLOMON_DEC8=y ++CONFIG_TEXTSEARCH=y ++CONFIG_TEXTSEARCH_KMP=y ++CONFIG_TEXTSEARCH_BM=y ++CONFIG_TEXTSEARCH_FSM=y ++CONFIG_INTERVAL_TREE=y ++CONFIG_XARRAY_MULTI=y ++CONFIG_ASSOCIATIVE_ARRAY=y ++CONFIG_HAS_IOMEM=y ++CONFIG_HAS_IOPORT_MAP=y ++CONFIG_HAS_DMA=y ++CONFIG_NEED_SG_DMA_LENGTH=y ++CONFIG_NEED_DMA_MAP_STATE=y ++CONFIG_ARCH_DMA_ADDR_T_64BIT=y ++CONFIG_SWIOTLB=y ++# CONFIG_DMA_API_DEBUG is not set ++CONFIG_SGL_ALLOC=y ++CONFIG_IOMMU_HELPER=y ++CONFIG_CHECK_SIGNATURE=y ++CONFIG_CPU_RMAP=y ++CONFIG_DQL=y ++CONFIG_GLOB=y ++# CONFIG_GLOB_SELFTEST is not set ++CONFIG_NLATTR=y ++CONFIG_CLZ_TAB=y ++# CONFIG_IRQ_POLL is not set ++CONFIG_MPILIB=y ++CONFIG_OID_REGISTRY=y ++CONFIG_UCS2_STRING=y ++CONFIG_HAVE_GENERIC_VDSO=y ++CONFIG_GENERIC_GETTIMEOFDAY=y ++CONFIG_FONT_SUPPORT=y ++CONFIG_FONT_8x16=y ++CONFIG_FONT_AUTOSELECT=y ++CONFIG_SG_POOL=y ++CONFIG_ARCH_HAS_PMEM_API=y ++CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y ++CONFIG_ARCH_HAS_UACCESS_MCSAFE=y ++CONFIG_ARCH_STACKWALK=y ++CONFIG_SBITMAP=y ++# CONFIG_STRING_SELFTEST is not set ++# end of Library routines ++ ++# ++# Kernel hacking ++# ++ ++# ++# printk and dmesg options ++# ++CONFIG_PRINTK_TIME=y ++# CONFIG_PRINTK_CALLER is not set ++CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 ++CONFIG_CONSOLE_LOGLEVEL_QUIET=4 ++CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 ++# CONFIG_BOOT_PRINTK_DELAY is not set ++CONFIG_DYNAMIC_DEBUG=y ++# end of printk and dmesg options ++ ++# ++# Compile-time checks and compiler options ++# ++# CONFIG_DEBUG_INFO is not set ++CONFIG_ENABLE_MUST_CHECK=y ++CONFIG_FRAME_WARN=2048 ++# CONFIG_STRIP_ASM_SYMS is not set ++# CONFIG_READABLE_ASM is not set ++CONFIG_DEBUG_FS=y ++# CONFIG_HEADERS_INSTALL is not set ++CONFIG_OPTIMIZE_INLINING=y ++# CONFIG_DEBUG_SECTION_MISMATCH is not set ++CONFIG_SECTION_MISMATCH_WARN_ONLY=y ++CONFIG_FRAME_POINTER=y ++CONFIG_STACK_VALIDATION=y ++# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set ++# end of Compile-time checks and compiler options ++ ++CONFIG_MAGIC_SYSRQ=y ++CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 ++CONFIG_MAGIC_SYSRQ_SERIAL=y ++CONFIG_DEBUG_KERNEL=y ++CONFIG_DEBUG_MISC=y ++ ++# ++# Memory Debugging ++# ++# CONFIG_PAGE_EXTENSION is not set ++# CONFIG_DEBUG_PAGEALLOC is not set ++# CONFIG_PAGE_OWNER is not set ++CONFIG_PAGE_POISONING=y ++# CONFIG_PAGE_POISONING_NO_SANITY is not set ++# CONFIG_PAGE_POISONING_ZERO is not set ++# CONFIG_DEBUG_PAGE_REF is not set ++# CONFIG_DEBUG_RODATA_TEST is not set ++# CONFIG_DEBUG_OBJECTS is not set ++# CONFIG_DEBUG_SLAB is not set ++CONFIG_HAVE_DEBUG_KMEMLEAK=y ++# CONFIG_DEBUG_KMEMLEAK is not set ++# CONFIG_DEBUG_STACK_USAGE is not set ++# CONFIG_DEBUG_VM is not set ++CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y ++# CONFIG_DEBUG_VIRTUAL is not set ++CONFIG_DEBUG_MEMORY_INIT=y ++# CONFIG_DEBUG_PER_CPU_MAPS is not set ++CONFIG_HAVE_ARCH_KASAN=y ++CONFIG_CC_HAS_KASAN_GENERIC=y ++# CONFIG_KASAN is not set ++CONFIG_KASAN_STACK=1 ++# end of Memory Debugging ++ ++CONFIG_ARCH_HAS_KCOV=y ++CONFIG_CC_HAS_SANCOV_TRACE_PC=y ++# CONFIG_KCOV is not set ++# CONFIG_DEBUG_SHIRQ is not set ++ ++# ++# Debug Lockups and Hangs ++# ++# CONFIG_SOFTLOCKUP_DETECTOR is not set ++CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y ++# CONFIG_HARDLOCKUP_DETECTOR is not set ++CONFIG_DETECT_HUNG_TASK=y ++CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 ++# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set ++CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 ++# CONFIG_WQ_WATCHDOG is not set ++# end of Debug Lockups and Hangs ++ ++CONFIG_PANIC_ON_OOPS=y ++CONFIG_PANIC_ON_OOPS_VALUE=1 ++CONFIG_PANIC_TIMEOUT=10 ++CONFIG_SCHED_DEBUG=y ++CONFIG_SCHED_INFO=y ++CONFIG_SCHEDSTATS=y ++CONFIG_SCHED_STACK_END_CHECK=y ++# CONFIG_DEBUG_TIMEKEEPING is not set ++ ++# ++# Lock Debugging (spinlocks, mutexes, etc...) ++# ++CONFIG_LOCK_DEBUGGING_SUPPORT=y ++# CONFIG_PROVE_LOCKING is not set ++# CONFIG_LOCK_STAT is not set ++# CONFIG_DEBUG_RT_MUTEXES is not set ++# CONFIG_DEBUG_SPINLOCK is not set ++# CONFIG_DEBUG_MUTEXES is not set ++# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set ++# CONFIG_DEBUG_RWSEMS is not set ++# CONFIG_DEBUG_LOCK_ALLOC is not set ++CONFIG_DEBUG_ATOMIC_SLEEP=y ++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set ++# CONFIG_LOCK_TORTURE_TEST is not set ++# CONFIG_WW_MUTEX_SELFTEST is not set ++# end of Lock Debugging (spinlocks, mutexes, etc...) ++ ++CONFIG_TRACE_IRQFLAGS=y ++CONFIG_STACKTRACE=y ++# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set ++# CONFIG_DEBUG_KOBJECT is not set ++CONFIG_DEBUG_LIST=y ++# CONFIG_DEBUG_PLIST is not set ++CONFIG_DEBUG_SG=y ++CONFIG_DEBUG_NOTIFIERS=y ++CONFIG_DEBUG_CREDENTIALS=y ++ ++# ++# RCU Debugging ++# ++# CONFIG_RCU_PERF_TEST is not set ++# CONFIG_RCU_TORTURE_TEST is not set ++CONFIG_RCU_CPU_STALL_TIMEOUT=21 ++# CONFIG_RCU_TRACE is not set ++# CONFIG_RCU_EQS_DEBUG is not set ++# end of RCU Debugging ++ ++# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set ++# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set ++# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set ++# CONFIG_NOTIFIER_ERROR_INJECTION is not set ++# CONFIG_FAULT_INJECTION is not set ++CONFIG_LATENCYTOP=y ++CONFIG_USER_STACKTRACE_SUPPORT=y ++CONFIG_NOP_TRACER=y ++CONFIG_HAVE_FUNCTION_TRACER=y ++CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y ++CONFIG_HAVE_DYNAMIC_FTRACE=y ++CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y ++CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y ++CONFIG_HAVE_SYSCALL_TRACEPOINTS=y ++CONFIG_HAVE_FENTRY=y ++CONFIG_HAVE_C_RECORDMCOUNT=y ++CONFIG_TRACER_MAX_TRACE=y ++CONFIG_TRACE_CLOCK=y ++CONFIG_RING_BUFFER=y ++CONFIG_EVENT_TRACING=y ++CONFIG_CONTEXT_SWITCH_TRACER=y ++CONFIG_RING_BUFFER_ALLOW_SWAP=y ++CONFIG_PREEMPTIRQ_TRACEPOINTS=y ++CONFIG_TRACING=y ++CONFIG_GENERIC_TRACER=y ++CONFIG_TRACING_SUPPORT=y ++CONFIG_FTRACE=y ++CONFIG_FUNCTION_TRACER=y ++CONFIG_FUNCTION_GRAPH_TRACER=y ++# CONFIG_PREEMPTIRQ_EVENTS is not set ++CONFIG_IRQSOFF_TRACER=y ++CONFIG_SCHED_TRACER=y ++# CONFIG_HWLAT_TRACER is not set ++CONFIG_FTRACE_SYSCALLS=y ++CONFIG_TRACER_SNAPSHOT=y ++CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y ++CONFIG_BRANCH_PROFILE_NONE=y ++# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set ++# CONFIG_STACK_TRACER is not set ++CONFIG_BLK_DEV_IO_TRACE=y ++# CONFIG_UPROBE_EVENTS is not set ++CONFIG_DYNAMIC_FTRACE=y ++CONFIG_DYNAMIC_FTRACE_WITH_REGS=y ++# CONFIG_FUNCTION_PROFILER is not set ++CONFIG_FTRACE_MCOUNT_RECORD=y ++# CONFIG_FTRACE_STARTUP_TEST is not set ++# CONFIG_MMIOTRACE is not set ++# CONFIG_HIST_TRIGGERS is not set ++# CONFIG_TRACEPOINT_BENCHMARK is not set ++# CONFIG_RING_BUFFER_BENCHMARK is not set ++# CONFIG_RING_BUFFER_STARTUP_TEST is not set ++# CONFIG_PREEMPTIRQ_DELAY_TEST is not set ++# CONFIG_TRACE_EVAL_MAP_FILE is not set ++CONFIG_PROVIDE_OHCI1394_DMA_INIT=y ++CONFIG_RUNTIME_TESTING_MENU=y ++# CONFIG_LKDTM is not set ++# CONFIG_TEST_LIST_SORT is not set ++# CONFIG_TEST_SORT is not set ++# CONFIG_BACKTRACE_SELF_TEST is not set ++# CONFIG_RBTREE_TEST is not set ++# CONFIG_REED_SOLOMON_TEST is not set ++# CONFIG_INTERVAL_TREE_TEST is not set ++# CONFIG_PERCPU_TEST is not set ++# CONFIG_ATOMIC64_SELFTEST is not set ++# CONFIG_TEST_HEXDUMP is not set ++# CONFIG_TEST_STRING_HELPERS is not set ++# CONFIG_TEST_STRSCPY is not set ++# CONFIG_TEST_KSTRTOX is not set ++# CONFIG_TEST_PRINTF is not set ++# CONFIG_TEST_BITMAP is not set ++# CONFIG_TEST_BITFIELD is not set ++# CONFIG_TEST_UUID is not set ++# CONFIG_TEST_XARRAY is not set ++# CONFIG_TEST_OVERFLOW is not set ++# CONFIG_TEST_RHASHTABLE is not set ++# CONFIG_TEST_HASH is not set ++# CONFIG_TEST_IDA is not set ++# CONFIG_TEST_LKM is not set ++# CONFIG_TEST_VMALLOC is not set ++# CONFIG_TEST_USER_COPY is not set ++# CONFIG_TEST_BPF is not set ++# CONFIG_TEST_BLACKHOLE_DEV is not set ++# CONFIG_FIND_BIT_BENCHMARK is not set ++# CONFIG_TEST_FIRMWARE is not set ++# CONFIG_TEST_SYSCTL is not set ++# CONFIG_TEST_UDELAY is not set ++# CONFIG_TEST_STATIC_KEYS is not set ++# CONFIG_TEST_KMOD is not set ++# CONFIG_TEST_MEMCAT_P is not set ++# CONFIG_TEST_STACKINIT is not set ++# CONFIG_TEST_MEMINIT is not set ++# CONFIG_MEMTEST is not set ++CONFIG_BUG_ON_DATA_CORRUPTION=y ++# CONFIG_SAMPLES is not set ++CONFIG_HAVE_ARCH_KGDB=y ++# CONFIG_KGDB is not set ++CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y ++# CONFIG_UBSAN is not set ++CONFIG_UBSAN_ALIGNMENT=y ++CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y ++CONFIG_TRACE_IRQFLAGS_SUPPORT=y ++CONFIG_EARLY_PRINTK_USB=y ++CONFIG_X86_VERBOSE_BOOTUP=y ++CONFIG_EARLY_PRINTK=y ++CONFIG_EARLY_PRINTK_DBGP=y ++# CONFIG_EARLY_PRINTK_USB_XDBC is not set ++CONFIG_X86_PTDUMP_CORE=y ++# CONFIG_X86_PTDUMP is not set ++# CONFIG_EFI_PGT_DUMP is not set ++CONFIG_DEBUG_WX=y ++CONFIG_DOUBLEFAULT=y ++# CONFIG_DEBUG_TLBFLUSH is not set ++# CONFIG_IOMMU_DEBUG is not set ++CONFIG_HAVE_MMIOTRACE_SUPPORT=y ++# CONFIG_X86_DECODER_SELFTEST is not set ++CONFIG_IO_DELAY_0X80=y ++# CONFIG_IO_DELAY_0XED is not set ++# CONFIG_IO_DELAY_UDELAY is not set ++# CONFIG_IO_DELAY_NONE is not set ++CONFIG_DEBUG_BOOT_PARAMS=y ++# CONFIG_CPA_DEBUG is not set ++# CONFIG_DEBUG_ENTRY is not set ++# CONFIG_DEBUG_NMI_SELFTEST is not set ++CONFIG_X86_DEBUG_FPU=y ++# CONFIG_PUNIT_ATOM_DEBUG is not set ++# CONFIG_UNWINDER_ORC is not set ++CONFIG_UNWINDER_FRAME_POINTER=y ++# CONFIG_UNWINDER_GUESS is not set ++# end of Kernel hacking +diff --git a/arch/x86/configs/test_defconfig b/arch/x86/configs/test_defconfig +new file mode 100644 +index 00000000..c410c20 +--- /dev/null ++++ b/arch/x86/configs/test_defconfig +@@ -0,0 +1,7050 @@ ++# ++# Automatically generated file; DO NOT EDIT. ++# Linux/x86_64 5.4.0-rc4 Kernel Configuration ++# ++ ++# ++# Compiler: x86_64-poky-linux-gcc (GCC) 7.3.0 ++# ++CONFIG_CC_IS_GCC=y ++CONFIG_GCC_VERSION=70300 ++CONFIG_CLANG_VERSION=0 ++CONFIG_CC_HAS_ASM_GOTO=y ++CONFIG_CC_HAS_WARN_MAYBE_UNINITIALIZED=y ++CONFIG_IRQ_WORK=y ++CONFIG_BUILDTIME_EXTABLE_SORT=y ++CONFIG_THREAD_INFO_IN_TASK=y ++ ++# ++# General setup ++# ++CONFIG_INIT_ENV_ARG_LIMIT=32 ++# CONFIG_COMPILE_TEST is not set ++# CONFIG_HEADER_TEST is not set ++CONFIG_LOCALVERSION="" ++# CONFIG_LOCALVERSION_AUTO is not set ++CONFIG_BUILD_SALT="" ++CONFIG_HAVE_KERNEL_GZIP=y ++CONFIG_HAVE_KERNEL_BZIP2=y ++CONFIG_HAVE_KERNEL_LZMA=y ++CONFIG_HAVE_KERNEL_XZ=y ++CONFIG_HAVE_KERNEL_LZO=y ++CONFIG_HAVE_KERNEL_LZ4=y ++# CONFIG_KERNEL_GZIP is not set ++# CONFIG_KERNEL_BZIP2 is not set ++# CONFIG_KERNEL_LZMA is not set ++CONFIG_KERNEL_XZ=y ++# CONFIG_KERNEL_LZO is not set ++# CONFIG_KERNEL_LZ4 is not set ++CONFIG_DEFAULT_HOSTNAME="CannotLeaveINTEL" ++CONFIG_SWAP=y ++CONFIG_SYSVIPC=y ++CONFIG_SYSVIPC_SYSCTL=y ++CONFIG_POSIX_MQUEUE=y ++CONFIG_POSIX_MQUEUE_SYSCTL=y ++# CONFIG_CROSS_MEMORY_ATTACH is not set ++# CONFIG_USELIB is not set ++# CONFIG_AUDIT is not set ++CONFIG_HAVE_ARCH_AUDITSYSCALL=y ++ ++# ++# IRQ subsystem ++# ++CONFIG_GENERIC_IRQ_PROBE=y ++CONFIG_GENERIC_IRQ_SHOW=y ++CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y ++CONFIG_GENERIC_PENDING_IRQ=y ++CONFIG_GENERIC_IRQ_MIGRATION=y ++CONFIG_IRQ_DOMAIN=y ++CONFIG_IRQ_DOMAIN_HIERARCHY=y ++CONFIG_GENERIC_MSI_IRQ=y ++CONFIG_GENERIC_MSI_IRQ_DOMAIN=y ++CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR=y ++CONFIG_GENERIC_IRQ_RESERVATION_MODE=y ++CONFIG_IRQ_FORCED_THREADING=y ++CONFIG_SPARSE_IRQ=y ++# CONFIG_GENERIC_IRQ_DEBUGFS is not set ++# end of IRQ subsystem ++ ++CONFIG_CLOCKSOURCE_WATCHDOG=y ++CONFIG_ARCH_CLOCKSOURCE_DATA=y ++CONFIG_ARCH_CLOCKSOURCE_INIT=y ++CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y ++CONFIG_GENERIC_TIME_VSYSCALL=y ++CONFIG_GENERIC_CLOCKEVENTS=y ++CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y ++CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y ++CONFIG_GENERIC_CMOS_UPDATE=y ++ ++# ++# Timers subsystem ++# ++CONFIG_TICK_ONESHOT=y ++CONFIG_NO_HZ_COMMON=y ++# CONFIG_HZ_PERIODIC is not set ++# CONFIG_NO_HZ_IDLE is not set ++CONFIG_NO_HZ_FULL=y ++CONFIG_CONTEXT_TRACKING=y ++# CONFIG_CONTEXT_TRACKING_FORCE is not set ++CONFIG_NO_HZ=y ++CONFIG_HIGH_RES_TIMERS=y ++# end of Timers subsystem ++ ++# CONFIG_PREEMPT_NONE is not set ++CONFIG_PREEMPT_VOLUNTARY=y ++# CONFIG_PREEMPT is not set ++ ++# ++# CPU/Task time and stats accounting ++# ++CONFIG_VIRT_CPU_ACCOUNTING=y ++CONFIG_VIRT_CPU_ACCOUNTING_GEN=y ++CONFIG_IRQ_TIME_ACCOUNTING=y ++CONFIG_HAVE_SCHED_AVG_IRQ=y ++CONFIG_BSD_PROCESS_ACCT=y ++CONFIG_BSD_PROCESS_ACCT_V3=y ++CONFIG_TASKSTATS=y ++CONFIG_TASK_DELAY_ACCT=y ++CONFIG_TASK_XACCT=y ++CONFIG_TASK_IO_ACCOUNTING=y ++# CONFIG_PSI is not set ++# end of CPU/Task time and stats accounting ++ ++CONFIG_CPU_ISOLATION=y ++ ++# ++# RCU Subsystem ++# ++CONFIG_TREE_RCU=y ++CONFIG_RCU_EXPERT=y ++CONFIG_SRCU=y ++CONFIG_TREE_SRCU=y ++CONFIG_RCU_STALL_COMMON=y ++CONFIG_RCU_NEED_SEGCBLIST=y ++CONFIG_RCU_FANOUT=32 ++CONFIG_RCU_FANOUT_LEAF=16 ++CONFIG_RCU_FAST_NO_HZ=y ++CONFIG_RCU_NOCB_CPU=y ++# end of RCU Subsystem ++ ++# CONFIG_IKCONFIG is not set ++# CONFIG_IKHEADERS is not set ++CONFIG_LOG_BUF_SHIFT=17 ++CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 ++CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 ++CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y ++ ++# ++# Scheduler features ++# ++# end of Scheduler features ++ ++CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y ++CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y ++CONFIG_ARCH_SUPPORTS_INT128=y ++# CONFIG_NUMA_BALANCING is not set ++CONFIG_CGROUPS=y ++CONFIG_PAGE_COUNTER=y ++CONFIG_MEMCG=y ++CONFIG_MEMCG_SWAP=y ++CONFIG_MEMCG_SWAP_ENABLED=y ++CONFIG_MEMCG_KMEM=y ++CONFIG_BLK_CGROUP=y ++CONFIG_CGROUP_WRITEBACK=y ++CONFIG_CGROUP_SCHED=y ++CONFIG_FAIR_GROUP_SCHED=y ++CONFIG_CFS_BANDWIDTH=y ++CONFIG_RT_GROUP_SCHED=y ++CONFIG_CGROUP_PIDS=y ++# CONFIG_CGROUP_RDMA is not set ++CONFIG_CGROUP_FREEZER=y ++CONFIG_CGROUP_HUGETLB=y ++CONFIG_CPUSETS=y ++CONFIG_PROC_PID_CPUSET=y ++CONFIG_CGROUP_DEVICE=y ++CONFIG_CGROUP_CPUACCT=y ++CONFIG_CGROUP_PERF=y ++CONFIG_CGROUP_BPF=y ++# CONFIG_CGROUP_DEBUG is not set ++CONFIG_SOCK_CGROUP_DATA=y ++CONFIG_NAMESPACES=y ++CONFIG_UTS_NS=y ++CONFIG_IPC_NS=y ++CONFIG_USER_NS=y ++CONFIG_PID_NS=y ++CONFIG_NET_NS=y ++CONFIG_CHECKPOINT_RESTORE=y ++CONFIG_SCHED_AUTOGROUP=y ++# CONFIG_SYSFS_DEPRECATED is not set ++CONFIG_RELAY=y ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_INITRAMFS_SOURCE="" ++CONFIG_RD_GZIP=y ++# CONFIG_RD_BZIP2 is not set ++# CONFIG_RD_LZMA is not set ++CONFIG_RD_XZ=y ++CONFIG_RD_LZO=y ++CONFIG_RD_LZ4=y ++CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y ++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set ++CONFIG_SYSCTL=y ++CONFIG_HAVE_UID16=y ++CONFIG_SYSCTL_EXCEPTION_TRACE=y ++CONFIG_HAVE_PCSPKR_PLATFORM=y ++CONFIG_BPF=y ++CONFIG_EXPERT=y ++# CONFIG_UID16 is not set ++CONFIG_MULTIUSER=y ++CONFIG_SGETMASK_SYSCALL=y ++# CONFIG_SYSFS_SYSCALL is not set ++CONFIG_SYSCTL_SYSCALL=y ++CONFIG_FHANDLE=y ++CONFIG_POSIX_TIMERS=y ++CONFIG_PRINTK=y ++CONFIG_PRINTK_NMI=y ++CONFIG_BUG=y ++CONFIG_ELF_CORE=y ++# CONFIG_PCSPKR_PLATFORM is not set ++CONFIG_BASE_FULL=y ++CONFIG_FUTEX=y ++CONFIG_FUTEX_PI=y ++CONFIG_EPOLL=y ++CONFIG_SIGNALFD=y ++CONFIG_TIMERFD=y ++CONFIG_EVENTFD=y ++CONFIG_SHMEM=y ++CONFIG_AIO=y ++CONFIG_IO_URING=y ++CONFIG_ADVISE_SYSCALLS=y ++CONFIG_MEMBARRIER=y ++CONFIG_KALLSYMS=y ++CONFIG_KALLSYMS_ALL=y ++CONFIG_KALLSYMS_ABSOLUTE_PERCPU=y ++CONFIG_KALLSYMS_BASE_RELATIVE=y ++CONFIG_BPF_SYSCALL=y ++CONFIG_BPF_JIT_ALWAYS_ON=y ++# CONFIG_USERFAULTFD is not set ++CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y ++CONFIG_RSEQ=y ++# CONFIG_DEBUG_RSEQ is not set ++# CONFIG_EMBEDDED is not set ++CONFIG_HAVE_PERF_EVENTS=y ++# CONFIG_PC104 is not set ++ ++# ++# Kernel Performance Events And Counters ++# ++CONFIG_PERF_EVENTS=y ++# CONFIG_DEBUG_PERF_USE_VMALLOC is not set ++# end of Kernel Performance Events And Counters ++ ++CONFIG_VM_EVENT_COUNTERS=y ++# CONFIG_COMPAT_BRK is not set ++CONFIG_SLAB=y ++# CONFIG_SLUB is not set ++# CONFIG_SLOB is not set ++CONFIG_SLAB_MERGE_DEFAULT=y ++CONFIG_SLAB_FREELIST_RANDOM=y ++CONFIG_SHUFFLE_PAGE_ALLOCATOR=y ++CONFIG_SYSTEM_DATA_VERIFICATION=y ++CONFIG_PROFILING=y ++CONFIG_TRACEPOINTS=y ++# end of General setup ++ ++CONFIG_64BIT=y ++CONFIG_X86_64=y ++CONFIG_X86=y ++CONFIG_INSTRUCTION_DECODER=y ++CONFIG_OUTPUT_FORMAT="elf64-x86-64" ++CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig" ++CONFIG_LOCKDEP_SUPPORT=y ++CONFIG_STACKTRACE_SUPPORT=y ++CONFIG_MMU=y ++CONFIG_ARCH_MMAP_RND_BITS_MIN=28 ++CONFIG_ARCH_MMAP_RND_BITS_MAX=32 ++CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 ++CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 ++CONFIG_GENERIC_ISA_DMA=y ++CONFIG_GENERIC_BUG=y ++CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y ++CONFIG_ARCH_MAY_HAVE_PC_FDC=y ++CONFIG_GENERIC_CALIBRATE_DELAY=y ++CONFIG_ARCH_HAS_CPU_RELAX=y ++CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y ++CONFIG_ARCH_HAS_FILTER_PGPROT=y ++CONFIG_HAVE_SETUP_PER_CPU_AREA=y ++CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y ++CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y ++CONFIG_ARCH_HIBERNATION_POSSIBLE=y ++CONFIG_ARCH_SUSPEND_POSSIBLE=y ++CONFIG_ARCH_WANT_GENERAL_HUGETLB=y ++CONFIG_ZONE_DMA32=y ++CONFIG_AUDIT_ARCH=y ++CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y ++CONFIG_HAVE_INTEL_TXT=y ++CONFIG_X86_64_SMP=y ++CONFIG_ARCH_SUPPORTS_UPROBES=y ++CONFIG_FIX_EARLYCON_MEM=y ++CONFIG_PGTABLE_LEVELS=4 ++CONFIG_CC_HAS_SANE_STACKPROTECTOR=y ++ ++# ++# Processor type and features ++# ++CONFIG_ZONE_DMA=y ++CONFIG_SMP=y ++CONFIG_X86_FEATURE_NAMES=y ++CONFIG_X86_X2APIC=y ++CONFIG_X86_MPPARSE=y ++# CONFIG_GOLDFISH is not set ++CONFIG_RETPOLINE=y ++# CONFIG_X86_CPU_RESCTRL is not set ++# CONFIG_X86_EXTENDED_PLATFORM is not set ++CONFIG_X86_INTEL_LPSS=y ++# CONFIG_X86_AMD_PLATFORM_DEVICE is not set ++CONFIG_IOSF_MBI=y ++# CONFIG_IOSF_MBI_DEBUG is not set ++CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y ++# CONFIG_SCHED_OMIT_FRAME_POINTER is not set ++CONFIG_HYPERVISOR_GUEST=y ++# CONFIG_PARAVIRT is not set ++# CONFIG_ARCH_CPUIDLE_HALTPOLL is not set ++# CONFIG_PVH is not set ++# CONFIG_JAILHOUSE_GUEST is not set ++# CONFIG_ACRN_GUEST is not set ++# CONFIG_MK8 is not set ++# CONFIG_MPSC is not set ++CONFIG_MCORE2=y ++# CONFIG_MATOM is not set ++# CONFIG_GENERIC_CPU is not set ++CONFIG_X86_INTERNODE_CACHE_SHIFT=6 ++CONFIG_X86_L1_CACHE_SHIFT=6 ++CONFIG_X86_INTEL_USERCOPY=y ++CONFIG_X86_USE_PPRO_CHECKSUM=y ++CONFIG_X86_P6_NOP=y ++CONFIG_X86_TSC=y ++CONFIG_X86_CMPXCHG64=y ++CONFIG_X86_CMOV=y ++CONFIG_X86_MINIMUM_CPU_FAMILY=64 ++CONFIG_X86_DEBUGCTLMSR=y ++CONFIG_PROCESSOR_SELECT=y ++CONFIG_CPU_SUP_INTEL=y ++CONFIG_CPU_SUP_AMD=y ++CONFIG_CPU_SUP_HYGON=y ++# CONFIG_CPU_SUP_CENTAUR is not set ++CONFIG_CPU_SUP_ZHAOXIN=y ++CONFIG_HPET_TIMER=y ++CONFIG_HPET_EMULATE_RTC=y ++CONFIG_DMI=y ++# CONFIG_GART_IOMMU is not set ++# CONFIG_CALGARY_IOMMU is not set ++# CONFIG_MAXSMP is not set ++CONFIG_NR_CPUS_RANGE_BEGIN=2 ++CONFIG_NR_CPUS_RANGE_END=512 ++CONFIG_NR_CPUS_DEFAULT=64 ++CONFIG_NR_CPUS=320 ++CONFIG_SCHED_SMT=y ++CONFIG_SCHED_MC=y ++CONFIG_SCHED_MC_PRIO=y ++CONFIG_X86_LOCAL_APIC=y ++CONFIG_X86_IO_APIC=y ++# CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS is not set ++CONFIG_X86_MCE=y ++CONFIG_X86_MCELOG_LEGACY=y ++CONFIG_X86_MCE_INTEL=y ++CONFIG_X86_MCE_AMD=y ++CONFIG_X86_MCE_THRESHOLD=y ++CONFIG_X86_MCE_INJECT=m ++CONFIG_X86_THERMAL_VECTOR=y ++ ++# ++# Performance monitoring ++# ++CONFIG_PERF_EVENTS_INTEL_UNCORE=y ++CONFIG_PERF_EVENTS_INTEL_RAPL=y ++CONFIG_PERF_EVENTS_INTEL_CSTATE=y ++CONFIG_PERF_EVENTS_AMD_POWER=m ++# end of Performance monitoring ++ ++# CONFIG_X86_VSYSCALL_EMULATION is not set ++# CONFIG_I8K is not set ++CONFIG_MICROCODE=y ++CONFIG_MICROCODE_INTEL=y ++# CONFIG_MICROCODE_AMD is not set ++CONFIG_MICROCODE_OLD_INTERFACE=y ++CONFIG_X86_MSR=y ++CONFIG_X86_CPUID=y ++# CONFIG_X86_5LEVEL is not set ++CONFIG_X86_DIRECT_GBPAGES=y ++# CONFIG_X86_CPA_STATISTICS is not set ++# CONFIG_AMD_MEM_ENCRYPT is not set ++CONFIG_NUMA=y ++# CONFIG_AMD_NUMA is not set ++CONFIG_X86_64_ACPI_NUMA=y ++CONFIG_NODES_SPAN_OTHER_NODES=y ++# CONFIG_NUMA_EMU is not set ++CONFIG_NODES_SHIFT=2 ++CONFIG_ARCH_SPARSEMEM_ENABLE=y ++CONFIG_ARCH_SPARSEMEM_DEFAULT=y ++CONFIG_ARCH_SELECT_MEMORY_MODEL=y ++# CONFIG_ARCH_MEMORY_PROBE is not set ++CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 ++# CONFIG_X86_PMEM_LEGACY is not set ++CONFIG_X86_CHECK_BIOS_CORRUPTION=y ++# CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK is not set ++CONFIG_X86_RESERVE_LOW=64 ++CONFIG_MTRR=y ++CONFIG_MTRR_SANITIZER=y ++CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1 ++CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=0 ++CONFIG_X86_PAT=y ++CONFIG_ARCH_USES_PG_UNCACHED=y ++CONFIG_ARCH_RANDOM=y ++CONFIG_X86_SMAP=y ++CONFIG_X86_INTEL_UMIP=y ++# CONFIG_X86_INTEL_MPX is not set ++# CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS is not set ++CONFIG_EFI=y ++CONFIG_EFI_STUB=y ++# CONFIG_EFI_MIXED is not set ++CONFIG_SECCOMP=y ++# CONFIG_HZ_100 is not set ++# CONFIG_HZ_250 is not set ++# CONFIG_HZ_300 is not set ++CONFIG_HZ_1000=y ++CONFIG_HZ=1000 ++CONFIG_SCHED_HRTICK=y ++# CONFIG_KEXEC is not set ++# CONFIG_KEXEC_FILE is not set ++# CONFIG_CRASH_DUMP is not set ++CONFIG_PHYSICAL_START=0x100000 ++CONFIG_RELOCATABLE=y ++CONFIG_RANDOMIZE_BASE=y ++CONFIG_X86_NEED_RELOCS=y ++CONFIG_PHYSICAL_ALIGN=0x1000000 ++CONFIG_DYNAMIC_MEMORY_LAYOUT=y ++CONFIG_RANDOMIZE_MEMORY=y ++CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING=0xa ++CONFIG_HOTPLUG_CPU=y ++# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set ++# CONFIG_DEBUG_HOTPLUG_CPU0 is not set ++# CONFIG_COMPAT_VDSO is not set ++# CONFIG_LEGACY_VSYSCALL_EMULATE is not set ++# CONFIG_LEGACY_VSYSCALL_XONLY is not set ++CONFIG_LEGACY_VSYSCALL_NONE=y ++# CONFIG_CMDLINE_BOOL is not set ++# CONFIG_MODIFY_LDT_SYSCALL is not set ++CONFIG_HAVE_LIVEPATCH=y ++# end of Processor type and features ++ ++CONFIG_ARCH_HAS_ADD_PAGES=y ++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y ++CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y ++CONFIG_USE_PERCPU_NUMA_NODE_ID=y ++CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y ++CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y ++CONFIG_ARCH_ENABLE_THP_MIGRATION=y ++ ++# ++# Power management and ACPI options ++# ++CONFIG_SUSPEND=y ++CONFIG_SUSPEND_FREEZER=y ++# CONFIG_SUSPEND_SKIP_SYNC is not set ++# CONFIG_HIBERNATION is not set ++CONFIG_PM_SLEEP=y ++CONFIG_PM_SLEEP_SMP=y ++# CONFIG_PM_AUTOSLEEP is not set ++# CONFIG_PM_WAKELOCKS is not set ++CONFIG_PM=y ++CONFIG_PM_DEBUG=y ++CONFIG_PM_ADVANCED_DEBUG=y ++# CONFIG_PM_TEST_SUSPEND is not set ++CONFIG_PM_SLEEP_DEBUG=y ++# CONFIG_DPM_WATCHDOG is not set ++# CONFIG_PM_TRACE_RTC is not set ++CONFIG_PM_CLK=y ++# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set ++# CONFIG_ENERGY_MODEL is not set ++CONFIG_ARCH_SUPPORTS_ACPI=y ++CONFIG_ACPI=y ++CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y ++CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y ++CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y ++# CONFIG_ACPI_DEBUGGER is not set ++CONFIG_ACPI_SPCR_TABLE=y ++CONFIG_ACPI_LPIT=y ++CONFIG_ACPI_SLEEP=y ++# CONFIG_ACPI_PROCFS_POWER is not set ++# CONFIG_ACPI_REV_OVERRIDE_POSSIBLE is not set ++CONFIG_ACPI_EC_DEBUGFS=y ++CONFIG_ACPI_AC=m ++CONFIG_ACPI_BATTERY=m ++CONFIG_ACPI_BUTTON=y ++CONFIG_ACPI_VIDEO=y ++CONFIG_ACPI_FAN=y ++CONFIG_ACPI_TAD=y ++# CONFIG_ACPI_DOCK is not set ++CONFIG_ACPI_CPU_FREQ_PSS=y ++CONFIG_ACPI_PROCESSOR_CSTATE=y ++CONFIG_ACPI_PROCESSOR_IDLE=y ++CONFIG_ACPI_CPPC_LIB=y ++CONFIG_ACPI_PROCESSOR=y ++CONFIG_ACPI_IPMI=m ++CONFIG_ACPI_HOTPLUG_CPU=y ++CONFIG_ACPI_PROCESSOR_AGGREGATOR=y ++CONFIG_ACPI_THERMAL=y ++CONFIG_ACPI_NUMA=y ++CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y ++# CONFIG_ACPI_TABLE_UPGRADE is not set ++CONFIG_ACPI_DEBUG=y ++# CONFIG_ACPI_PCI_SLOT is not set ++CONFIG_ACPI_CONTAINER=y ++CONFIG_ACPI_HOTPLUG_MEMORY=y ++CONFIG_ACPI_HOTPLUG_IOAPIC=y ++# CONFIG_ACPI_SBS is not set ++CONFIG_ACPI_HED=m ++# CONFIG_ACPI_CUSTOM_METHOD is not set ++# CONFIG_ACPI_BGRT is not set ++# CONFIG_ACPI_REDUCED_HARDWARE_ONLY is not set ++CONFIG_ACPI_NFIT=m ++# CONFIG_NFIT_SECURITY_DEBUG is not set ++# CONFIG_ACPI_HMAT is not set ++CONFIG_HAVE_ACPI_APEI=y ++CONFIG_HAVE_ACPI_APEI_NMI=y ++CONFIG_ACPI_APEI=y ++# CONFIG_ACPI_APEI_GHES is not set ++# CONFIG_ACPI_APEI_EINJ is not set ++# CONFIG_ACPI_APEI_ERST_DEBUG is not set ++# CONFIG_DPTF_POWER is not set ++# CONFIG_PMIC_OPREGION is not set ++# CONFIG_ACPI_CONFIGFS is not set ++CONFIG_X86_PM_TIMER=y ++# CONFIG_SFI is not set ++ ++# ++# CPU Frequency scaling ++# ++CONFIG_CPU_FREQ=y ++CONFIG_CPU_FREQ_GOV_ATTR_SET=y ++CONFIG_CPU_FREQ_GOV_COMMON=y ++CONFIG_CPU_FREQ_STAT=y ++CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y ++# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set ++# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set ++# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set ++# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set ++# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set ++CONFIG_CPU_FREQ_GOV_PERFORMANCE=y ++# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set ++# CONFIG_CPU_FREQ_GOV_USERSPACE is not set ++CONFIG_CPU_FREQ_GOV_ONDEMAND=y ++# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set ++# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set ++ ++# ++# CPU frequency scaling drivers ++# ++CONFIG_X86_INTEL_PSTATE=y ++# CONFIG_X86_PCC_CPUFREQ is not set ++CONFIG_X86_ACPI_CPUFREQ=y ++# CONFIG_X86_ACPI_CPUFREQ_CPB is not set ++# CONFIG_X86_POWERNOW_K8 is not set ++CONFIG_X86_AMD_FREQ_SENSITIVITY=m ++# CONFIG_X86_SPEEDSTEP_CENTRINO is not set ++# CONFIG_X86_P4_CLOCKMOD is not set ++ ++# ++# shared options ++# ++# end of CPU Frequency scaling ++ ++# ++# CPU Idle ++# ++CONFIG_CPU_IDLE=y ++CONFIG_CPU_IDLE_GOV_LADDER=y ++CONFIG_CPU_IDLE_GOV_MENU=y ++# CONFIG_CPU_IDLE_GOV_TEO is not set ++# end of CPU Idle ++ ++CONFIG_INTEL_IDLE=y ++# end of Power management and ACPI options ++ ++# ++# Bus options (PCI etc.) ++# ++CONFIG_PCI_DIRECT=y ++CONFIG_PCI_MMCONFIG=y ++CONFIG_MMCONF_FAM10H=y ++# CONFIG_PCI_CNB20LE_QUIRK is not set ++# CONFIG_ISA_BUS is not set ++CONFIG_ISA_DMA_API=y ++CONFIG_AMD_NB=y ++# CONFIG_X86_SYSFB is not set ++# end of Bus options (PCI etc.) ++ ++# ++# Binary Emulations ++# ++CONFIG_IA32_EMULATION=y ++# CONFIG_X86_X32 is not set ++CONFIG_COMPAT_32=y ++CONFIG_COMPAT=y ++CONFIG_COMPAT_FOR_U64_ALIGNMENT=y ++CONFIG_SYSVIPC_COMPAT=y ++# end of Binary Emulations ++ ++# ++# Firmware Drivers ++# ++# CONFIG_EDD is not set ++CONFIG_FIRMWARE_MEMMAP=y ++CONFIG_DMIID=y ++CONFIG_DMI_SYSFS=y ++CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y ++# CONFIG_ISCSI_IBFT is not set ++CONFIG_FW_CFG_SYSFS=m ++CONFIG_FW_CFG_SYSFS_CMDLINE=y ++# CONFIG_GOOGLE_FIRMWARE is not set ++ ++# ++# EFI (Extensible Firmware Interface) Support ++# ++CONFIG_EFI_VARS=y ++CONFIG_EFI_ESRT=y ++CONFIG_EFI_VARS_PSTORE=y ++# CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE is not set ++# CONFIG_EFI_FAKE_MEMMAP is not set ++CONFIG_EFI_RUNTIME_WRAPPERS=y ++CONFIG_EFI_BOOTLOADER_CONTROL=y ++CONFIG_EFI_CAPSULE_LOADER=y ++# CONFIG_EFI_TEST is not set ++CONFIG_APPLE_PROPERTIES=y ++# CONFIG_RESET_ATTACK_MITIGATION is not set ++# CONFIG_EFI_RCI2_TABLE is not set ++# end of EFI (Extensible Firmware Interface) Support ++ ++CONFIG_UEFI_CPER=y ++CONFIG_UEFI_CPER_X86=y ++CONFIG_EFI_DEV_PATH_PARSER=y ++CONFIG_EFI_EARLYCON=y ++ ++# ++# Tegra firmware driver ++# ++# end of Tegra firmware driver ++# end of Firmware Drivers ++ ++CONFIG_HAVE_KVM=y ++CONFIG_HAVE_KVM_IRQCHIP=y ++CONFIG_HAVE_KVM_IRQFD=y ++CONFIG_HAVE_KVM_IRQ_ROUTING=y ++CONFIG_HAVE_KVM_EVENTFD=y ++CONFIG_KVM_MMIO=y ++CONFIG_KVM_ASYNC_PF=y ++CONFIG_HAVE_KVM_MSI=y ++CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y ++CONFIG_KVM_VFIO=y ++CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y ++CONFIG_KVM_COMPAT=y ++CONFIG_HAVE_KVM_IRQ_BYPASS=y ++CONFIG_HAVE_KVM_NO_POLL=y ++CONFIG_VIRTUALIZATION=y ++CONFIG_KVM=y ++CONFIG_KVM_INTEL=y ++CONFIG_KVM_AMD=m ++CONFIG_KVM_MMU_AUDIT=y ++CONFIG_VHOST_NET=y ++CONFIG_VHOST_VSOCK=m ++CONFIG_VHOST=y ++# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set ++ ++# ++# General architecture-dependent options ++# ++CONFIG_HOTPLUG_SMT=y ++# CONFIG_OPROFILE is not set ++CONFIG_HAVE_OPROFILE=y ++CONFIG_OPROFILE_NMI_TIMER=y ++# CONFIG_KPROBES is not set ++CONFIG_JUMP_LABEL=y ++# CONFIG_STATIC_KEYS_SELFTEST is not set ++CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y ++CONFIG_ARCH_USE_BUILTIN_BSWAP=y ++CONFIG_USER_RETURN_NOTIFIER=y ++CONFIG_HAVE_IOREMAP_PROT=y ++CONFIG_HAVE_KPROBES=y ++CONFIG_HAVE_KRETPROBES=y ++CONFIG_HAVE_OPTPROBES=y ++CONFIG_HAVE_KPROBES_ON_FTRACE=y ++CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y ++CONFIG_HAVE_NMI=y ++CONFIG_HAVE_ARCH_TRACEHOOK=y ++CONFIG_HAVE_DMA_CONTIGUOUS=y ++CONFIG_GENERIC_SMP_IDLE_THREAD=y ++CONFIG_ARCH_HAS_FORTIFY_SOURCE=y ++CONFIG_ARCH_HAS_SET_MEMORY=y ++CONFIG_ARCH_HAS_SET_DIRECT_MAP=y ++CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y ++CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y ++CONFIG_HAVE_ASM_MODVERSIONS=y ++CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y ++CONFIG_HAVE_RSEQ=y ++CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y ++CONFIG_HAVE_CLK=y ++CONFIG_HAVE_HW_BREAKPOINT=y ++CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y ++CONFIG_HAVE_USER_RETURN_NOTIFIER=y ++CONFIG_HAVE_PERF_EVENTS_NMI=y ++CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y ++CONFIG_HAVE_PERF_REGS=y ++CONFIG_HAVE_PERF_USER_STACK_DUMP=y ++CONFIG_HAVE_ARCH_JUMP_LABEL=y ++CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y ++CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y ++CONFIG_HAVE_CMPXCHG_LOCAL=y ++CONFIG_HAVE_CMPXCHG_DOUBLE=y ++CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y ++CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y ++CONFIG_HAVE_ARCH_SECCOMP_FILTER=y ++CONFIG_SECCOMP_FILTER=y ++CONFIG_HAVE_ARCH_STACKLEAK=y ++CONFIG_HAVE_STACKPROTECTOR=y ++CONFIG_CC_HAS_STACKPROTECTOR_NONE=y ++CONFIG_STACKPROTECTOR=y ++CONFIG_STACKPROTECTOR_STRONG=y ++CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y ++CONFIG_HAVE_CONTEXT_TRACKING=y ++CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y ++CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y ++CONFIG_HAVE_MOVE_PMD=y ++CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y ++CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y ++CONFIG_HAVE_ARCH_HUGE_VMAP=y ++CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y ++CONFIG_HAVE_ARCH_SOFT_DIRTY=y ++CONFIG_HAVE_MOD_ARCH_SPECIFIC=y ++CONFIG_MODULES_USE_ELF_RELA=y ++CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y ++CONFIG_ARCH_HAS_ELF_RANDOMIZE=y ++CONFIG_HAVE_ARCH_MMAP_RND_BITS=y ++CONFIG_HAVE_EXIT_THREAD=y ++CONFIG_ARCH_MMAP_RND_BITS=28 ++CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y ++CONFIG_ARCH_MMAP_RND_COMPAT_BITS=8 ++CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y ++CONFIG_HAVE_COPY_THREAD_TLS=y ++CONFIG_HAVE_STACK_VALIDATION=y ++CONFIG_HAVE_RELIABLE_STACKTRACE=y ++CONFIG_OLD_SIGSUSPEND3=y ++CONFIG_COMPAT_OLD_SIGACTION=y ++CONFIG_64BIT_TIME=y ++CONFIG_COMPAT_32BIT_TIME=y ++CONFIG_HAVE_ARCH_VMAP_STACK=y ++CONFIG_VMAP_STACK=y ++CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y ++CONFIG_STRICT_KERNEL_RWX=y ++CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y ++CONFIG_STRICT_MODULE_RWX=y ++CONFIG_ARCH_HAS_REFCOUNT=y ++CONFIG_REFCOUNT_FULL=y ++CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y ++CONFIG_ARCH_USE_MEMREMAP_PROT=y ++# CONFIG_LOCK_EVENT_COUNTS is not set ++CONFIG_ARCH_HAS_MEM_ENCRYPT=y ++ ++# ++# GCOV-based kernel profiling ++# ++# CONFIG_GCOV_KERNEL is not set ++CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y ++# end of GCOV-based kernel profiling ++ ++CONFIG_PLUGIN_HOSTCC="" ++CONFIG_HAVE_GCC_PLUGINS=y ++# end of General architecture-dependent options ++ ++CONFIG_RT_MUTEXES=y ++CONFIG_BASE_SMALL=0 ++CONFIG_MODULE_SIG_FORMAT=y ++CONFIG_MODULES=y ++# CONFIG_MODULE_FORCE_LOAD is not set ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_MODULE_FORCE_UNLOAD is not set ++# CONFIG_MODVERSIONS is not set ++# CONFIG_MODULE_SRCVERSION_ALL is not set ++CONFIG_MODULE_SIG=y ++CONFIG_MODULE_SIG_FORCE=y ++CONFIG_MODULE_SIG_ALL=y ++# CONFIG_MODULE_SIG_SHA1 is not set ++# CONFIG_MODULE_SIG_SHA224 is not set ++# CONFIG_MODULE_SIG_SHA256 is not set ++# CONFIG_MODULE_SIG_SHA384 is not set ++CONFIG_MODULE_SIG_SHA512=y ++CONFIG_MODULE_SIG_HASH="sha512" ++# CONFIG_MODULE_COMPRESS is not set ++# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set ++# CONFIG_UNUSED_SYMBOLS is not set ++# CONFIG_TRIM_UNUSED_KSYMS is not set ++CONFIG_MODULES_TREE_LOOKUP=y ++CONFIG_BLOCK=y ++CONFIG_BLK_SCSI_REQUEST=y ++CONFIG_BLK_DEV_BSG=y ++CONFIG_BLK_DEV_BSGLIB=y ++CONFIG_BLK_DEV_INTEGRITY=y ++CONFIG_BLK_DEV_ZONED=y ++CONFIG_BLK_DEV_THROTTLING=y ++# CONFIG_BLK_DEV_THROTTLING_LOW is not set ++# CONFIG_BLK_CMDLINE_PARSER is not set ++# CONFIG_BLK_WBT is not set ++# CONFIG_BLK_CGROUP_IOLATENCY is not set ++# CONFIG_BLK_CGROUP_IOCOST is not set ++# CONFIG_BLK_DEBUG_FS is not set ++CONFIG_BLK_SED_OPAL=y ++ ++# ++# Partition Types ++# ++CONFIG_PARTITION_ADVANCED=y ++# CONFIG_ACORN_PARTITION is not set ++# CONFIG_AIX_PARTITION is not set ++# CONFIG_OSF_PARTITION is not set ++# CONFIG_AMIGA_PARTITION is not set ++# CONFIG_ATARI_PARTITION is not set ++# CONFIG_MAC_PARTITION is not set ++CONFIG_MSDOS_PARTITION=y ++# CONFIG_BSD_DISKLABEL is not set ++# CONFIG_MINIX_SUBPARTITION is not set ++# CONFIG_SOLARIS_X86_PARTITION is not set ++# CONFIG_UNIXWARE_DISKLABEL is not set ++# CONFIG_LDM_PARTITION is not set ++# CONFIG_SGI_PARTITION is not set ++# CONFIG_ULTRIX_PARTITION is not set ++# CONFIG_SUN_PARTITION is not set ++# CONFIG_KARMA_PARTITION is not set ++CONFIG_EFI_PARTITION=y ++# CONFIG_SYSV68_PARTITION is not set ++# CONFIG_CMDLINE_PARTITION is not set ++# end of Partition Types ++ ++CONFIG_BLOCK_COMPAT=y ++CONFIG_BLK_MQ_PCI=y ++CONFIG_BLK_MQ_VIRTIO=y ++CONFIG_BLK_MQ_RDMA=y ++CONFIG_BLK_PM=y ++ ++# ++# IO Schedulers ++# ++CONFIG_MQ_IOSCHED_DEADLINE=y ++CONFIG_MQ_IOSCHED_KYBER=y ++CONFIG_IOSCHED_BFQ=y ++CONFIG_BFQ_GROUP_IOSCHED=y ++# CONFIG_BFQ_CGROUP_DEBUG is not set ++# end of IO Schedulers ++ ++CONFIG_PREEMPT_NOTIFIERS=y ++CONFIG_PADATA=y ++CONFIG_ASN1=y ++CONFIG_INLINE_SPIN_UNLOCK_IRQ=y ++CONFIG_INLINE_READ_UNLOCK=y ++CONFIG_INLINE_READ_UNLOCK_IRQ=y ++CONFIG_INLINE_WRITE_UNLOCK=y ++CONFIG_INLINE_WRITE_UNLOCK_IRQ=y ++CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y ++CONFIG_MUTEX_SPIN_ON_OWNER=y ++CONFIG_RWSEM_SPIN_ON_OWNER=y ++CONFIG_LOCK_SPIN_ON_OWNER=y ++CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y ++CONFIG_QUEUED_SPINLOCKS=y ++CONFIG_ARCH_USE_QUEUED_RWLOCKS=y ++CONFIG_QUEUED_RWLOCKS=y ++CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y ++CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y ++CONFIG_FREEZER=y ++ ++# ++# Executable file formats ++# ++CONFIG_BINFMT_ELF=y ++CONFIG_COMPAT_BINFMT_ELF=y ++CONFIG_ELFCORE=y ++CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y ++CONFIG_BINFMT_SCRIPT=y ++CONFIG_BINFMT_MISC=y ++CONFIG_COREDUMP=y ++# end of Executable file formats ++ ++# ++# Memory Management options ++# ++CONFIG_SELECT_MEMORY_MODEL=y ++CONFIG_SPARSEMEM_MANUAL=y ++CONFIG_SPARSEMEM=y ++CONFIG_NEED_MULTIPLE_NODES=y ++CONFIG_HAVE_MEMORY_PRESENT=y ++CONFIG_SPARSEMEM_EXTREME=y ++CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y ++CONFIG_SPARSEMEM_VMEMMAP=y ++CONFIG_HAVE_MEMBLOCK_NODE_MAP=y ++CONFIG_HAVE_FAST_GUP=y ++CONFIG_MEMORY_ISOLATION=y ++CONFIG_HAVE_BOOTMEM_INFO_NODE=y ++CONFIG_MEMORY_HOTPLUG=y ++CONFIG_MEMORY_HOTPLUG_SPARSE=y ++CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y ++CONFIG_MEMORY_HOTREMOVE=y ++CONFIG_SPLIT_PTLOCK_CPUS=4 ++CONFIG_MEMORY_BALLOON=y ++# CONFIG_BALLOON_COMPACTION is not set ++CONFIG_COMPACTION=y ++CONFIG_MIGRATION=y ++CONFIG_CONTIG_ALLOC=y ++CONFIG_PHYS_ADDR_T_64BIT=y ++CONFIG_BOUNCE=y ++CONFIG_VIRT_TO_BUS=y ++CONFIG_MMU_NOTIFIER=y ++CONFIG_KSM=y ++CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 ++CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y ++# CONFIG_MEMORY_FAILURE is not set ++CONFIG_TRANSPARENT_HUGEPAGE=y ++# CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS is not set ++CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y ++CONFIG_ARCH_WANTS_THP_SWAP=y ++CONFIG_THP_SWAP=y ++CONFIG_TRANSPARENT_HUGE_PAGECACHE=y ++# CONFIG_CLEANCACHE is not set ++# CONFIG_FRONTSWAP is not set ++# CONFIG_CMA is not set ++# CONFIG_MEM_SOFT_DIRTY is not set ++# CONFIG_ZPOOL is not set ++# CONFIG_ZBUD is not set ++CONFIG_ZSMALLOC=m ++# CONFIG_PGTABLE_MAPPING is not set ++# CONFIG_ZSMALLOC_STAT is not set ++CONFIG_GENERIC_EARLY_IOREMAP=y ++# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set ++# CONFIG_IDLE_PAGE_TRACKING is not set ++CONFIG_ARCH_HAS_PTE_DEVMAP=y ++CONFIG_ZONE_DEVICE=y ++CONFIG_DEV_PAGEMAP_OPS=y ++# CONFIG_DEVICE_PRIVATE is not set ++CONFIG_FRAME_VECTOR=y ++# CONFIG_PERCPU_STATS is not set ++# CONFIG_GUP_BENCHMARK is not set ++# CONFIG_READ_ONLY_THP_FOR_FS is not set ++CONFIG_ARCH_HAS_PTE_SPECIAL=y ++# end of Memory Management options ++ ++CONFIG_NET=y ++CONFIG_COMPAT_NETLINK_MESSAGES=y ++CONFIG_NET_INGRESS=y ++CONFIG_NET_EGRESS=y ++CONFIG_SKB_EXTENSIONS=y ++ ++# ++# Networking options ++# ++CONFIG_PACKET=y ++CONFIG_PACKET_DIAG=y ++CONFIG_UNIX=y ++CONFIG_UNIX_SCM=y ++CONFIG_UNIX_DIAG=m ++CONFIG_TLS=m ++# CONFIG_TLS_DEVICE is not set ++CONFIG_XFRM=y ++CONFIG_XFRM_OFFLOAD=y ++CONFIG_XFRM_ALGO=y ++CONFIG_XFRM_USER=y ++# CONFIG_XFRM_INTERFACE is not set ++CONFIG_XFRM_SUB_POLICY=y ++CONFIG_XFRM_MIGRATE=y ++CONFIG_XFRM_STATISTICS=y ++CONFIG_XFRM_IPCOMP=m ++CONFIG_NET_KEY=m ++CONFIG_NET_KEY_MIGRATE=y ++CONFIG_SMC=m ++CONFIG_SMC_DIAG=m ++# CONFIG_XDP_SOCKETS is not set ++CONFIG_INET=y ++CONFIG_IP_MULTICAST=y ++CONFIG_IP_ADVANCED_ROUTER=y ++# CONFIG_IP_FIB_TRIE_STATS is not set ++CONFIG_IP_MULTIPLE_TABLES=y ++# CONFIG_IP_ROUTE_MULTIPATH is not set ++# CONFIG_IP_ROUTE_VERBOSE is not set ++CONFIG_IP_ROUTE_CLASSID=y ++# CONFIG_IP_PNP is not set ++CONFIG_NET_IPIP=m ++CONFIG_NET_IPGRE_DEMUX=m ++CONFIG_NET_IP_TUNNEL=m ++CONFIG_NET_IPGRE=m ++# CONFIG_NET_IPGRE_BROADCAST is not set ++# CONFIG_IP_MROUTE is not set ++CONFIG_SYN_COOKIES=y ++# CONFIG_NET_IPVTI is not set ++CONFIG_NET_UDP_TUNNEL=m ++# CONFIG_NET_FOU is not set ++# CONFIG_NET_FOU_IP_TUNNELS is not set ++CONFIG_INET_AH=m ++CONFIG_INET_ESP=m ++CONFIG_INET_ESP_OFFLOAD=m ++CONFIG_INET_IPCOMP=m ++CONFIG_INET_XFRM_TUNNEL=m ++CONFIG_INET_TUNNEL=m ++# CONFIG_INET_DIAG is not set ++CONFIG_TCP_CONG_ADVANCED=y ++CONFIG_TCP_CONG_BIC=m ++CONFIG_TCP_CONG_CUBIC=m ++# CONFIG_TCP_CONG_WESTWOOD is not set ++# CONFIG_TCP_CONG_HTCP is not set ++# CONFIG_TCP_CONG_HSTCP is not set ++# CONFIG_TCP_CONG_HYBLA is not set ++# CONFIG_TCP_CONG_VEGAS is not set ++# CONFIG_TCP_CONG_NV is not set ++# CONFIG_TCP_CONG_SCALABLE is not set ++# CONFIG_TCP_CONG_LP is not set ++# CONFIG_TCP_CONG_VENO is not set ++# CONFIG_TCP_CONG_YEAH is not set ++# CONFIG_TCP_CONG_ILLINOIS is not set ++CONFIG_TCP_CONG_DCTCP=m ++# CONFIG_TCP_CONG_CDG is not set ++CONFIG_TCP_CONG_BBR=y ++CONFIG_DEFAULT_BBR=y ++# CONFIG_DEFAULT_RENO is not set ++CONFIG_DEFAULT_TCP_CONG="bbr" ++CONFIG_TCP_MD5SIG=y ++CONFIG_IPV6=y ++CONFIG_IPV6_ROUTER_PREF=y ++CONFIG_IPV6_ROUTE_INFO=y ++CONFIG_IPV6_OPTIMISTIC_DAD=y ++CONFIG_INET6_AH=m ++CONFIG_INET6_ESP=m ++CONFIG_INET6_ESP_OFFLOAD=m ++CONFIG_INET6_IPCOMP=m ++CONFIG_IPV6_MIP6=m ++# CONFIG_IPV6_ILA is not set ++CONFIG_INET6_XFRM_TUNNEL=m ++CONFIG_INET6_TUNNEL=m ++# CONFIG_IPV6_VTI is not set ++CONFIG_IPV6_SIT=m ++# CONFIG_IPV6_SIT_6RD is not set ++CONFIG_IPV6_NDISC_NODETYPE=y ++CONFIG_IPV6_TUNNEL=m ++CONFIG_IPV6_GRE=m ++# CONFIG_IPV6_MULTIPLE_TABLES is not set ++# CONFIG_IPV6_MROUTE is not set ++# CONFIG_IPV6_SEG6_LWTUNNEL is not set ++# CONFIG_IPV6_SEG6_HMAC is not set ++CONFIG_NETLABEL=y ++# CONFIG_NETWORK_SECMARK is not set ++CONFIG_NET_PTP_CLASSIFY=y ++CONFIG_NETWORK_PHY_TIMESTAMPING=y ++CONFIG_NETFILTER=y ++CONFIG_NETFILTER_ADVANCED=y ++CONFIG_BRIDGE_NETFILTER=y ++ ++# ++# Core Netfilter Configuration ++# ++# CONFIG_NETFILTER_INGRESS is not set ++CONFIG_NETFILTER_NETLINK=y ++CONFIG_NETFILTER_FAMILY_BRIDGE=y ++CONFIG_NETFILTER_FAMILY_ARP=y ++CONFIG_NETFILTER_NETLINK_ACCT=y ++CONFIG_NETFILTER_NETLINK_QUEUE=y ++CONFIG_NETFILTER_NETLINK_LOG=y ++CONFIG_NETFILTER_NETLINK_OSF=m ++CONFIG_NF_CONNTRACK=m ++CONFIG_NF_LOG_COMMON=m ++# CONFIG_NF_LOG_NETDEV is not set ++CONFIG_NETFILTER_CONNCOUNT=m ++CONFIG_NF_CONNTRACK_MARK=y ++CONFIG_NF_CONNTRACK_ZONES=y ++# CONFIG_NF_CONNTRACK_PROCFS is not set ++CONFIG_NF_CONNTRACK_EVENTS=y ++# CONFIG_NF_CONNTRACK_TIMEOUT is not set ++# CONFIG_NF_CONNTRACK_TIMESTAMP is not set ++CONFIG_NF_CONNTRACK_LABELS=y ++CONFIG_NF_CT_PROTO_DCCP=y ++CONFIG_NF_CT_PROTO_GRE=y ++CONFIG_NF_CT_PROTO_SCTP=y ++CONFIG_NF_CT_PROTO_UDPLITE=y ++# CONFIG_NF_CONNTRACK_AMANDA is not set ++CONFIG_NF_CONNTRACK_FTP=m ++CONFIG_NF_CONNTRACK_H323=m ++CONFIG_NF_CONNTRACK_IRC=m ++CONFIG_NF_CONNTRACK_BROADCAST=m ++CONFIG_NF_CONNTRACK_NETBIOS_NS=m ++CONFIG_NF_CONNTRACK_SNMP=m ++CONFIG_NF_CONNTRACK_PPTP=m ++CONFIG_NF_CONNTRACK_SANE=m ++CONFIG_NF_CONNTRACK_SIP=m ++CONFIG_NF_CONNTRACK_TFTP=m ++CONFIG_NF_CT_NETLINK=m ++# CONFIG_NETFILTER_NETLINK_GLUE_CT is not set ++CONFIG_NF_NAT=m ++CONFIG_NF_NAT_FTP=m ++CONFIG_NF_NAT_IRC=m ++CONFIG_NF_NAT_SIP=m ++CONFIG_NF_NAT_TFTP=m ++CONFIG_NF_NAT_REDIRECT=y ++CONFIG_NF_NAT_MASQUERADE=y ++CONFIG_NF_TABLES=m ++# CONFIG_NF_TABLES_SET is not set ++CONFIG_NF_TABLES_INET=y ++CONFIG_NF_TABLES_NETDEV=y ++CONFIG_NFT_NUMGEN=m ++CONFIG_NFT_CT=m ++CONFIG_NFT_COUNTER=m ++# CONFIG_NFT_CONNLIMIT is not set ++CONFIG_NFT_LOG=m ++CONFIG_NFT_LIMIT=m ++# CONFIG_NFT_MASQ is not set ++CONFIG_NFT_REDIR=m ++CONFIG_NFT_NAT=m ++# CONFIG_NFT_TUNNEL is not set ++CONFIG_NFT_OBJREF=m ++CONFIG_NFT_QUEUE=m ++CONFIG_NFT_QUOTA=m ++CONFIG_NFT_REJECT=m ++CONFIG_NFT_REJECT_INET=m ++CONFIG_NFT_COMPAT=m ++CONFIG_NFT_HASH=m ++CONFIG_NFT_FIB=m ++CONFIG_NFT_FIB_INET=m ++# CONFIG_NFT_XFRM is not set ++# CONFIG_NFT_SOCKET is not set ++# CONFIG_NFT_OSF is not set ++# CONFIG_NFT_TPROXY is not set ++# CONFIG_NFT_SYNPROXY is not set ++CONFIG_NF_DUP_NETDEV=m ++CONFIG_NFT_DUP_NETDEV=m ++CONFIG_NFT_FWD_NETDEV=m ++CONFIG_NFT_FIB_NETDEV=m ++CONFIG_NETFILTER_XTABLES=y ++ ++# ++# Xtables combined modules ++# ++CONFIG_NETFILTER_XT_MARK=y ++CONFIG_NETFILTER_XT_CONNMARK=m ++CONFIG_NETFILTER_XT_SET=m ++ ++# ++# Xtables targets ++# ++CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m ++CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m ++CONFIG_NETFILTER_XT_TARGET_CONNMARK=m ++CONFIG_NETFILTER_XT_TARGET_CT=m ++CONFIG_NETFILTER_XT_TARGET_DSCP=m ++CONFIG_NETFILTER_XT_TARGET_HL=m ++CONFIG_NETFILTER_XT_TARGET_HMARK=m ++CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m ++# CONFIG_NETFILTER_XT_TARGET_LED is not set ++CONFIG_NETFILTER_XT_TARGET_LOG=m ++CONFIG_NETFILTER_XT_TARGET_MARK=m ++CONFIG_NETFILTER_XT_NAT=m ++CONFIG_NETFILTER_XT_TARGET_NETMAP=m ++CONFIG_NETFILTER_XT_TARGET_NFLOG=m ++CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m ++CONFIG_NETFILTER_XT_TARGET_NOTRACK=m ++CONFIG_NETFILTER_XT_TARGET_RATEEST=m ++CONFIG_NETFILTER_XT_TARGET_REDIRECT=m ++CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m ++CONFIG_NETFILTER_XT_TARGET_TEE=m ++CONFIG_NETFILTER_XT_TARGET_TPROXY=m ++CONFIG_NETFILTER_XT_TARGET_TRACE=m ++CONFIG_NETFILTER_XT_TARGET_TCPMSS=m ++CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m ++ ++# ++# Xtables matches ++# ++CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m ++CONFIG_NETFILTER_XT_MATCH_BPF=m ++# CONFIG_NETFILTER_XT_MATCH_CGROUP is not set ++CONFIG_NETFILTER_XT_MATCH_CLUSTER=m ++CONFIG_NETFILTER_XT_MATCH_COMMENT=m ++CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m ++CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m ++CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m ++CONFIG_NETFILTER_XT_MATCH_CONNMARK=m ++CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m ++CONFIG_NETFILTER_XT_MATCH_CPU=m ++CONFIG_NETFILTER_XT_MATCH_DCCP=m ++CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m ++CONFIG_NETFILTER_XT_MATCH_DSCP=m ++CONFIG_NETFILTER_XT_MATCH_ECN=m ++CONFIG_NETFILTER_XT_MATCH_ESP=m ++CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m ++CONFIG_NETFILTER_XT_MATCH_HELPER=m ++CONFIG_NETFILTER_XT_MATCH_HL=m ++# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set ++CONFIG_NETFILTER_XT_MATCH_IPRANGE=m ++CONFIG_NETFILTER_XT_MATCH_IPVS=m ++CONFIG_NETFILTER_XT_MATCH_L2TP=m ++CONFIG_NETFILTER_XT_MATCH_LENGTH=m ++CONFIG_NETFILTER_XT_MATCH_LIMIT=m ++CONFIG_NETFILTER_XT_MATCH_MAC=m ++CONFIG_NETFILTER_XT_MATCH_MARK=m ++CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m ++CONFIG_NETFILTER_XT_MATCH_NFACCT=m ++CONFIG_NETFILTER_XT_MATCH_OSF=m ++CONFIG_NETFILTER_XT_MATCH_OWNER=m ++CONFIG_NETFILTER_XT_MATCH_POLICY=m ++CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m ++CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m ++CONFIG_NETFILTER_XT_MATCH_QUOTA=m ++CONFIG_NETFILTER_XT_MATCH_RATEEST=m ++CONFIG_NETFILTER_XT_MATCH_REALM=m ++CONFIG_NETFILTER_XT_MATCH_RECENT=m ++CONFIG_NETFILTER_XT_MATCH_SCTP=m ++CONFIG_NETFILTER_XT_MATCH_SOCKET=m ++CONFIG_NETFILTER_XT_MATCH_STATE=m ++CONFIG_NETFILTER_XT_MATCH_STATISTIC=m ++CONFIG_NETFILTER_XT_MATCH_STRING=m ++CONFIG_NETFILTER_XT_MATCH_TCPMSS=m ++CONFIG_NETFILTER_XT_MATCH_TIME=m ++CONFIG_NETFILTER_XT_MATCH_U32=m ++# end of Core Netfilter Configuration ++ ++CONFIG_IP_SET=y ++CONFIG_IP_SET_MAX=256 ++CONFIG_IP_SET_BITMAP_IP=m ++CONFIG_IP_SET_BITMAP_IPMAC=m ++CONFIG_IP_SET_BITMAP_PORT=m ++CONFIG_IP_SET_HASH_IP=y ++CONFIG_IP_SET_HASH_IPMARK=m ++CONFIG_IP_SET_HASH_IPPORT=m ++CONFIG_IP_SET_HASH_IPPORTIP=m ++CONFIG_IP_SET_HASH_IPPORTNET=m ++CONFIG_IP_SET_HASH_IPMAC=m ++CONFIG_IP_SET_HASH_MAC=m ++CONFIG_IP_SET_HASH_NETPORTNET=m ++CONFIG_IP_SET_HASH_NET=m ++CONFIG_IP_SET_HASH_NETNET=m ++CONFIG_IP_SET_HASH_NETPORT=m ++CONFIG_IP_SET_HASH_NETIFACE=m ++CONFIG_IP_SET_LIST_SET=m ++CONFIG_IP_VS=m ++CONFIG_IP_VS_IPV6=y ++# CONFIG_IP_VS_DEBUG is not set ++CONFIG_IP_VS_TAB_BITS=15 ++ ++# ++# IPVS transport protocol load balancing support ++# ++CONFIG_IP_VS_PROTO_TCP=y ++CONFIG_IP_VS_PROTO_UDP=y ++CONFIG_IP_VS_PROTO_AH_ESP=y ++CONFIG_IP_VS_PROTO_ESP=y ++CONFIG_IP_VS_PROTO_AH=y ++CONFIG_IP_VS_PROTO_SCTP=y ++ ++# ++# IPVS scheduler ++# ++CONFIG_IP_VS_RR=m ++CONFIG_IP_VS_WRR=m ++CONFIG_IP_VS_LC=m ++CONFIG_IP_VS_WLC=m ++CONFIG_IP_VS_FO=m ++CONFIG_IP_VS_OVF=m ++CONFIG_IP_VS_LBLC=m ++CONFIG_IP_VS_LBLCR=m ++CONFIG_IP_VS_DH=m ++CONFIG_IP_VS_SH=m ++# CONFIG_IP_VS_MH is not set ++CONFIG_IP_VS_SED=m ++CONFIG_IP_VS_NQ=m ++ ++# ++# IPVS SH scheduler ++# ++CONFIG_IP_VS_SH_TAB_BITS=8 ++ ++# ++# IPVS MH scheduler ++# ++CONFIG_IP_VS_MH_TAB_INDEX=12 ++ ++# ++# IPVS application helper ++# ++CONFIG_IP_VS_FTP=m ++CONFIG_IP_VS_NFCT=y ++CONFIG_IP_VS_PE_SIP=m ++ ++# ++# IP: Netfilter Configuration ++# ++CONFIG_NF_DEFRAG_IPV4=m ++CONFIG_NF_SOCKET_IPV4=m ++CONFIG_NF_TPROXY_IPV4=m ++CONFIG_NF_TABLES_IPV4=y ++CONFIG_NFT_REJECT_IPV4=m ++CONFIG_NFT_DUP_IPV4=m ++CONFIG_NFT_FIB_IPV4=m ++CONFIG_NF_TABLES_ARP=y ++CONFIG_NF_DUP_IPV4=m ++# CONFIG_NF_LOG_ARP is not set ++CONFIG_NF_LOG_IPV4=m ++CONFIG_NF_REJECT_IPV4=y ++CONFIG_NF_NAT_SNMP_BASIC=m ++CONFIG_NF_NAT_PPTP=m ++CONFIG_NF_NAT_H323=m ++CONFIG_IP_NF_IPTABLES=m ++CONFIG_IP_NF_MATCH_AH=m ++CONFIG_IP_NF_MATCH_ECN=m ++CONFIG_IP_NF_MATCH_RPFILTER=m ++CONFIG_IP_NF_MATCH_TTL=m ++CONFIG_IP_NF_FILTER=m ++CONFIG_IP_NF_TARGET_REJECT=m ++# CONFIG_IP_NF_TARGET_SYNPROXY is not set ++CONFIG_IP_NF_NAT=m ++CONFIG_IP_NF_TARGET_MASQUERADE=m ++CONFIG_IP_NF_TARGET_NETMAP=m ++CONFIG_IP_NF_TARGET_REDIRECT=m ++CONFIG_IP_NF_MANGLE=m ++CONFIG_IP_NF_TARGET_CLUSTERIP=m ++CONFIG_IP_NF_TARGET_ECN=m ++CONFIG_IP_NF_TARGET_TTL=m ++CONFIG_IP_NF_RAW=m ++CONFIG_IP_NF_SECURITY=m ++CONFIG_IP_NF_ARPTABLES=m ++CONFIG_IP_NF_ARPFILTER=m ++CONFIG_IP_NF_ARP_MANGLE=m ++# end of IP: Netfilter Configuration ++ ++# ++# IPv6: Netfilter Configuration ++# ++CONFIG_NF_SOCKET_IPV6=m ++CONFIG_NF_TPROXY_IPV6=m ++CONFIG_NF_TABLES_IPV6=y ++CONFIG_NFT_REJECT_IPV6=m ++CONFIG_NFT_DUP_IPV6=m ++CONFIG_NFT_FIB_IPV6=m ++CONFIG_NF_DUP_IPV6=m ++CONFIG_NF_REJECT_IPV6=y ++CONFIG_NF_LOG_IPV6=m ++CONFIG_IP6_NF_IPTABLES=m ++CONFIG_IP6_NF_MATCH_AH=m ++CONFIG_IP6_NF_MATCH_EUI64=m ++CONFIG_IP6_NF_MATCH_FRAG=m ++CONFIG_IP6_NF_MATCH_OPTS=m ++CONFIG_IP6_NF_MATCH_HL=m ++CONFIG_IP6_NF_MATCH_IPV6HEADER=m ++CONFIG_IP6_NF_MATCH_MH=m ++CONFIG_IP6_NF_MATCH_RPFILTER=m ++CONFIG_IP6_NF_MATCH_RT=m ++CONFIG_IP6_NF_MATCH_SRH=m ++CONFIG_IP6_NF_TARGET_HL=m ++CONFIG_IP6_NF_FILTER=m ++CONFIG_IP6_NF_TARGET_REJECT=m ++# CONFIG_IP6_NF_TARGET_SYNPROXY is not set ++CONFIG_IP6_NF_MANGLE=m ++CONFIG_IP6_NF_RAW=m ++# CONFIG_IP6_NF_SECURITY is not set ++CONFIG_IP6_NF_NAT=m ++CONFIG_IP6_NF_TARGET_MASQUERADE=m ++# CONFIG_IP6_NF_TARGET_NPT is not set ++# end of IPv6: Netfilter Configuration ++ ++CONFIG_NF_DEFRAG_IPV6=m ++# CONFIG_NF_TABLES_BRIDGE is not set ++# CONFIG_NF_CONNTRACK_BRIDGE is not set ++CONFIG_BRIDGE_NF_EBTABLES=y ++CONFIG_BRIDGE_EBT_BROUTE=y ++CONFIG_BRIDGE_EBT_T_FILTER=y ++CONFIG_BRIDGE_EBT_T_NAT=y ++CONFIG_BRIDGE_EBT_802_3=y ++CONFIG_BRIDGE_EBT_AMONG=y ++CONFIG_BRIDGE_EBT_ARP=y ++CONFIG_BRIDGE_EBT_IP=y ++CONFIG_BRIDGE_EBT_IP6=y ++CONFIG_BRIDGE_EBT_LIMIT=y ++CONFIG_BRIDGE_EBT_MARK=y ++CONFIG_BRIDGE_EBT_PKTTYPE=y ++CONFIG_BRIDGE_EBT_STP=y ++CONFIG_BRIDGE_EBT_VLAN=y ++CONFIG_BRIDGE_EBT_ARPREPLY=y ++CONFIG_BRIDGE_EBT_DNAT=y ++CONFIG_BRIDGE_EBT_MARK_T=y ++CONFIG_BRIDGE_EBT_REDIRECT=y ++CONFIG_BRIDGE_EBT_SNAT=y ++CONFIG_BRIDGE_EBT_LOG=y ++CONFIG_BRIDGE_EBT_NFLOG=y ++# CONFIG_BPFILTER is not set ++# CONFIG_IP_DCCP is not set ++# CONFIG_IP_SCTP is not set ++# CONFIG_RDS is not set ++# CONFIG_TIPC is not set ++# CONFIG_ATM is not set ++# CONFIG_L2TP is not set ++CONFIG_STP=y ++CONFIG_BRIDGE=y ++CONFIG_BRIDGE_IGMP_SNOOPING=y ++CONFIG_BRIDGE_VLAN_FILTERING=y ++CONFIG_HAVE_NET_DSA=y ++CONFIG_NET_DSA=m ++# CONFIG_NET_DSA_TAG_8021Q is not set ++CONFIG_NET_DSA_TAG_BRCM_COMMON=m ++CONFIG_NET_DSA_TAG_BRCM=m ++CONFIG_NET_DSA_TAG_BRCM_PREPEND=m ++# CONFIG_NET_DSA_TAG_GSWIP is not set ++CONFIG_NET_DSA_TAG_DSA=m ++CONFIG_NET_DSA_TAG_EDSA=m ++# CONFIG_NET_DSA_TAG_MTK is not set ++# CONFIG_NET_DSA_TAG_KSZ is not set ++# CONFIG_NET_DSA_TAG_QCA is not set ++# CONFIG_NET_DSA_TAG_LAN9303 is not set ++# CONFIG_NET_DSA_TAG_SJA1105 is not set ++# CONFIG_NET_DSA_TAG_TRAILER is not set ++CONFIG_VLAN_8021Q=m ++# CONFIG_VLAN_8021Q_GVRP is not set ++# CONFIG_VLAN_8021Q_MVRP is not set ++# CONFIG_DECNET is not set ++CONFIG_LLC=y ++# CONFIG_LLC2 is not set ++# CONFIG_ATALK is not set ++# CONFIG_X25 is not set ++# CONFIG_LAPB is not set ++# CONFIG_PHONET is not set ++# CONFIG_6LOWPAN is not set ++# CONFIG_IEEE802154 is not set ++CONFIG_NET_SCHED=y ++ ++# ++# Queueing/Scheduling ++# ++CONFIG_NET_SCH_CBQ=y ++CONFIG_NET_SCH_HTB=m ++CONFIG_NET_SCH_HFSC=m ++# CONFIG_NET_SCH_PRIO is not set ++CONFIG_NET_SCH_MULTIQ=y ++CONFIG_NET_SCH_RED=m ++# CONFIG_NET_SCH_SFB is not set ++CONFIG_NET_SCH_SFQ=m ++# CONFIG_NET_SCH_TEQL is not set ++# CONFIG_NET_SCH_TBF is not set ++CONFIG_NET_SCH_CBS=m ++CONFIG_NET_SCH_ETF=y ++# CONFIG_NET_SCH_TAPRIO is not set ++# CONFIG_NET_SCH_GRED is not set ++# CONFIG_NET_SCH_DSMARK is not set ++# CONFIG_NET_SCH_NETEM is not set ++# CONFIG_NET_SCH_DRR is not set ++CONFIG_NET_SCH_MQPRIO=y ++# CONFIG_NET_SCH_SKBPRIO is not set ++# CONFIG_NET_SCH_CHOKE is not set ++# CONFIG_NET_SCH_QFQ is not set ++CONFIG_NET_SCH_CODEL=y ++CONFIG_NET_SCH_FQ_CODEL=y ++# CONFIG_NET_SCH_CAKE is not set ++CONFIG_NET_SCH_FQ=y ++# CONFIG_NET_SCH_HHF is not set ++# CONFIG_NET_SCH_PIE is not set ++CONFIG_NET_SCH_INGRESS=m ++# CONFIG_NET_SCH_PLUG is not set ++# CONFIG_NET_SCH_DEFAULT is not set ++ ++# ++# Classification ++# ++CONFIG_NET_CLS=y ++CONFIG_NET_CLS_BASIC=m ++CONFIG_NET_CLS_TCINDEX=m ++CONFIG_NET_CLS_ROUTE4=m ++CONFIG_NET_CLS_FW=m ++CONFIG_NET_CLS_U32=m ++# CONFIG_CLS_U32_PERF is not set ++# CONFIG_CLS_U32_MARK is not set ++# CONFIG_NET_CLS_RSVP is not set ++# CONFIG_NET_CLS_RSVP6 is not set ++CONFIG_NET_CLS_FLOW=m ++CONFIG_NET_CLS_CGROUP=y ++CONFIG_NET_CLS_BPF=y ++# CONFIG_NET_CLS_FLOWER is not set ++# CONFIG_NET_CLS_MATCHALL is not set ++CONFIG_NET_EMATCH=y ++CONFIG_NET_EMATCH_STACK=32 ++# CONFIG_NET_EMATCH_CMP is not set ++# CONFIG_NET_EMATCH_NBYTE is not set ++# CONFIG_NET_EMATCH_U32 is not set ++# CONFIG_NET_EMATCH_META is not set ++# CONFIG_NET_EMATCH_TEXT is not set ++CONFIG_NET_EMATCH_IPSET=m ++CONFIG_NET_EMATCH_IPT=m ++CONFIG_NET_CLS_ACT=y ++CONFIG_NET_ACT_POLICE=m ++CONFIG_NET_ACT_GACT=m ++# CONFIG_GACT_PROB is not set ++CONFIG_NET_ACT_MIRRED=m ++CONFIG_NET_ACT_SAMPLE=m ++CONFIG_NET_ACT_IPT=m ++CONFIG_NET_ACT_NAT=m ++CONFIG_NET_ACT_PEDIT=m ++CONFIG_NET_ACT_SIMP=m ++CONFIG_NET_ACT_SKBEDIT=m ++CONFIG_NET_ACT_CSUM=m ++# CONFIG_NET_ACT_MPLS is not set ++CONFIG_NET_ACT_VLAN=m ++CONFIG_NET_ACT_BPF=m ++CONFIG_NET_ACT_CONNMARK=m ++# CONFIG_NET_ACT_CTINFO is not set ++CONFIG_NET_ACT_SKBMOD=m ++CONFIG_NET_ACT_IFE=m ++CONFIG_NET_ACT_TUNNEL_KEY=m ++# CONFIG_NET_ACT_CT is not set ++CONFIG_NET_IFE_SKBMARK=m ++CONFIG_NET_IFE_SKBPRIO=m ++CONFIG_NET_IFE_SKBTCINDEX=m ++# CONFIG_NET_TC_SKB_EXT is not set ++CONFIG_NET_SCH_FIFO=y ++CONFIG_DCB=y ++CONFIG_DNS_RESOLVER=m ++# CONFIG_BATMAN_ADV is not set ++CONFIG_OPENVSWITCH=m ++CONFIG_OPENVSWITCH_GRE=m ++CONFIG_OPENVSWITCH_VXLAN=m ++CONFIG_VSOCKETS=m ++CONFIG_VSOCKETS_DIAG=m ++CONFIG_VMWARE_VMCI_VSOCKETS=m ++CONFIG_VIRTIO_VSOCKETS=m ++CONFIG_VIRTIO_VSOCKETS_COMMON=m ++# CONFIG_NETLINK_DIAG is not set ++CONFIG_MPLS=y ++CONFIG_NET_MPLS_GSO=m ++# CONFIG_MPLS_ROUTING is not set ++CONFIG_NET_NSH=m ++# CONFIG_HSR is not set ++CONFIG_NET_SWITCHDEV=y ++CONFIG_NET_L3_MASTER_DEV=y ++# CONFIG_NET_NCSI is not set ++CONFIG_RPS=y ++CONFIG_RFS_ACCEL=y ++CONFIG_XPS=y ++CONFIG_CGROUP_NET_PRIO=y ++CONFIG_CGROUP_NET_CLASSID=y ++CONFIG_NET_RX_BUSY_POLL=y ++CONFIG_BQL=y ++CONFIG_BPF_JIT=y ++# CONFIG_BPF_STREAM_PARSER is not set ++CONFIG_NET_FLOW_LIMIT=y ++ ++# ++# Network testing ++# ++# CONFIG_NET_PKTGEN is not set ++# CONFIG_NET_DROP_MONITOR is not set ++# end of Network testing ++# end of Networking options ++ ++# CONFIG_HAMRADIO is not set ++# CONFIG_CAN is not set ++CONFIG_BT=m ++CONFIG_BT_BREDR=y ++CONFIG_BT_RFCOMM=m ++# CONFIG_BT_RFCOMM_TTY is not set ++CONFIG_BT_BNEP=m ++CONFIG_BT_BNEP_MC_FILTER=y ++CONFIG_BT_BNEP_PROTO_FILTER=y ++CONFIG_BT_HIDP=m ++CONFIG_BT_HS=y ++CONFIG_BT_LE=y ++# CONFIG_BT_LEDS is not set ++# CONFIG_BT_SELFTEST is not set ++# CONFIG_BT_DEBUGFS is not set ++ ++# ++# Bluetooth device drivers ++# ++CONFIG_BT_INTEL=m ++CONFIG_BT_BCM=m ++CONFIG_BT_RTL=m ++CONFIG_BT_HCIBTUSB=m ++CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y ++CONFIG_BT_HCIBTUSB_BCM=y ++# CONFIG_BT_HCIBTUSB_MTK is not set ++CONFIG_BT_HCIBTUSB_RTL=y ++# CONFIG_BT_HCIBTSDIO is not set ++# CONFIG_BT_HCIUART is not set ++# CONFIG_BT_HCIBCM203X is not set ++CONFIG_BT_HCIBPA10X=m ++# CONFIG_BT_HCIBFUSB is not set ++# CONFIG_BT_HCIVHCI is not set ++# CONFIG_BT_MRVL is not set ++# CONFIG_BT_ATH3K is not set ++# CONFIG_BT_MTKSDIO is not set ++# CONFIG_BT_MTKUART is not set ++# end of Bluetooth device drivers ++ ++# CONFIG_AF_RXRPC is not set ++CONFIG_AF_KCM=m ++CONFIG_STREAM_PARSER=y ++CONFIG_FIB_RULES=y ++CONFIG_WIRELESS=y ++CONFIG_WIRELESS_EXT=y ++CONFIG_WEXT_CORE=y ++CONFIG_WEXT_PROC=y ++CONFIG_WEXT_PRIV=y ++CONFIG_CFG80211=m ++# CONFIG_NL80211_TESTMODE is not set ++# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set ++# CONFIG_CFG80211_CERTIFICATION_ONUS is not set ++CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y ++CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y ++CONFIG_CFG80211_DEFAULT_PS=y ++# CONFIG_CFG80211_DEBUGFS is not set ++CONFIG_CFG80211_CRDA_SUPPORT=y ++# CONFIG_CFG80211_WEXT is not set ++CONFIG_LIB80211=m ++CONFIG_LIB80211_CRYPT_WEP=m ++CONFIG_LIB80211_CRYPT_CCMP=m ++# CONFIG_LIB80211_DEBUG is not set ++CONFIG_MAC80211=m ++CONFIG_MAC80211_HAS_RC=y ++CONFIG_MAC80211_RC_MINSTREL=y ++CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y ++CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" ++# CONFIG_MAC80211_MESH is not set ++CONFIG_MAC80211_LEDS=y ++# CONFIG_MAC80211_DEBUGFS is not set ++# CONFIG_MAC80211_MESSAGE_TRACING is not set ++# CONFIG_MAC80211_DEBUG_MENU is not set ++CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 ++# CONFIG_WIMAX is not set ++CONFIG_RFKILL=m ++CONFIG_RFKILL_LEDS=y ++CONFIG_RFKILL_INPUT=y ++# CONFIG_RFKILL_GPIO is not set ++CONFIG_NET_9P=m ++CONFIG_NET_9P_VIRTIO=m ++# CONFIG_NET_9P_RDMA is not set ++# CONFIG_NET_9P_DEBUG is not set ++# CONFIG_CAIF is not set ++CONFIG_CEPH_LIB=m ++# CONFIG_CEPH_LIB_PRETTYDEBUG is not set ++# CONFIG_CEPH_LIB_USE_DNS_RESOLVER is not set ++# CONFIG_NFC is not set ++CONFIG_PSAMPLE=m ++CONFIG_NET_IFE=m ++# CONFIG_LWTUNNEL is not set ++CONFIG_DST_CACHE=y ++CONFIG_GRO_CELLS=y ++CONFIG_NET_SOCK_MSG=y ++CONFIG_NET_DEVLINK=y ++CONFIG_PAGE_POOL=y ++CONFIG_FAILOVER=m ++CONFIG_HAVE_EBPF_JIT=y ++ ++# ++# Device Drivers ++# ++CONFIG_HAVE_EISA=y ++# CONFIG_EISA is not set ++CONFIG_HAVE_PCI=y ++CONFIG_PCI=y ++CONFIG_PCI_DOMAINS=y ++CONFIG_PCIEPORTBUS=y ++CONFIG_HOTPLUG_PCI_PCIE=y ++# CONFIG_PCIEAER is not set ++CONFIG_PCIEASPM=y ++# CONFIG_PCIEASPM_DEBUG is not set ++# CONFIG_PCIEASPM_DEFAULT is not set ++CONFIG_PCIEASPM_POWERSAVE=y ++# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set ++# CONFIG_PCIEASPM_PERFORMANCE is not set ++CONFIG_PCIE_PME=y ++# CONFIG_PCIE_PTM is not set ++# CONFIG_PCIE_BW is not set ++CONFIG_PCI_MSI=y ++CONFIG_PCI_MSI_IRQ_DOMAIN=y ++CONFIG_PCI_QUIRKS=y ++# CONFIG_PCI_DEBUG is not set ++# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set ++CONFIG_PCI_STUB=y ++# CONFIG_PCI_PF_STUB is not set ++CONFIG_PCI_ATS=y ++CONFIG_PCI_LOCKLESS_CONFIG=y ++CONFIG_PCI_IOV=y ++# CONFIG_PCI_PRI is not set ++# CONFIG_PCI_PASID is not set ++# CONFIG_PCI_P2PDMA is not set ++CONFIG_PCI_LABEL=y ++CONFIG_HOTPLUG_PCI=y ++CONFIG_HOTPLUG_PCI_ACPI=y ++CONFIG_HOTPLUG_PCI_ACPI_IBM=m ++# CONFIG_HOTPLUG_PCI_CPCI is not set ++# CONFIG_HOTPLUG_PCI_SHPC is not set ++ ++# ++# PCI controller drivers ++# ++ ++# ++# Cadence PCIe controllers support ++# ++# end of Cadence PCIe controllers support ++ ++# CONFIG_VMD is not set ++ ++# ++# DesignWare PCI Core Support ++# ++# CONFIG_PCIE_DW_PLAT_HOST is not set ++# CONFIG_PCIE_DW_PLAT_EP is not set ++# CONFIG_PCI_MESON is not set ++# end of DesignWare PCI Core Support ++# end of PCI controller drivers ++ ++# ++# PCI Endpoint ++# ++CONFIG_PCI_ENDPOINT=y ++CONFIG_PCI_ENDPOINT_CONFIGFS=y ++# CONFIG_PCI_EPF_TEST is not set ++# end of PCI Endpoint ++ ++# ++# PCI switch controller drivers ++# ++CONFIG_PCI_SW_SWITCHTEC=m ++# end of PCI switch controller drivers ++ ++# CONFIG_PCCARD is not set ++# CONFIG_RAPIDIO is not set ++ ++# ++# Generic Driver Options ++# ++# CONFIG_UEVENT_HELPER is not set ++CONFIG_DEVTMPFS=y ++CONFIG_DEVTMPFS_MOUNT=y ++CONFIG_STANDALONE=y ++CONFIG_PREVENT_FIRMWARE_BUILD=y ++ ++# ++# Firmware loader ++# ++CONFIG_FW_LOADER=y ++CONFIG_EXTRA_FIRMWARE="" ++# CONFIG_FW_LOADER_USER_HELPER is not set ++# CONFIG_FW_LOADER_COMPRESS is not set ++# end of Firmware loader ++ ++CONFIG_WANT_DEV_COREDUMP=y ++CONFIG_ALLOW_DEV_COREDUMP=y ++CONFIG_DEV_COREDUMP=y ++# CONFIG_DEBUG_DRIVER is not set ++CONFIG_DEBUG_DEVRES=y ++# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set ++# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set ++CONFIG_GENERIC_CPU_AUTOPROBE=y ++CONFIG_GENERIC_CPU_VULNERABILITIES=y ++CONFIG_REGMAP=y ++CONFIG_REGMAP_I2C=y ++CONFIG_REGMAP_SPI=y ++CONFIG_REGMAP_IRQ=y ++CONFIG_DMA_SHARED_BUFFER=y ++# CONFIG_DMA_FENCE_TRACE is not set ++# end of Generic Driver Options ++ ++# ++# Bus devices ++# ++# end of Bus devices ++ ++CONFIG_CONNECTOR=y ++CONFIG_PROC_EVENTS=y ++# CONFIG_GNSS is not set ++CONFIG_MTD=m ++# CONFIG_MTD_TESTS is not set ++ ++# ++# Partition parsers ++# ++# CONFIG_MTD_AR7_PARTS is not set ++# CONFIG_MTD_CMDLINE_PARTS is not set ++# CONFIG_MTD_REDBOOT_PARTS is not set ++# end of Partition parsers ++ ++# ++# User Modules And Translation Layers ++# ++# CONFIG_MTD_BLOCK is not set ++# CONFIG_MTD_BLOCK_RO is not set ++# CONFIG_FTL is not set ++# CONFIG_NFTL is not set ++# CONFIG_INFTL is not set ++# CONFIG_RFD_FTL is not set ++# CONFIG_SSFDC is not set ++# CONFIG_SM_FTL is not set ++# CONFIG_MTD_OOPS is not set ++# CONFIG_MTD_SWAP is not set ++# CONFIG_MTD_PARTITIONED_MASTER is not set ++ ++# ++# RAM/ROM/Flash chip drivers ++# ++CONFIG_MTD_CFI=m ++# CONFIG_MTD_JEDECPROBE is not set ++CONFIG_MTD_GEN_PROBE=m ++CONFIG_MTD_CFI_ADV_OPTIONS=y ++CONFIG_MTD_CFI_NOSWAP=y ++# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set ++# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set ++CONFIG_MTD_CFI_GEOMETRY=y ++CONFIG_MTD_MAP_BANK_WIDTH_1=y ++CONFIG_MTD_MAP_BANK_WIDTH_2=y ++CONFIG_MTD_MAP_BANK_WIDTH_4=y ++CONFIG_MTD_MAP_BANK_WIDTH_8=y ++CONFIG_MTD_MAP_BANK_WIDTH_16=y ++CONFIG_MTD_MAP_BANK_WIDTH_32=y ++CONFIG_MTD_CFI_I1=y ++CONFIG_MTD_CFI_I2=y ++CONFIG_MTD_CFI_I4=y ++CONFIG_MTD_CFI_I8=y ++# CONFIG_MTD_OTP is not set ++CONFIG_MTD_CFI_INTELEXT=m ++# CONFIG_MTD_CFI_AMDSTD is not set ++# CONFIG_MTD_CFI_STAA is not set ++CONFIG_MTD_CFI_UTIL=m ++CONFIG_MTD_RAM=m ++# CONFIG_MTD_ROM is not set ++# CONFIG_MTD_ABSENT is not set ++# end of RAM/ROM/Flash chip drivers ++ ++# ++# Mapping drivers for chip access ++# ++CONFIG_MTD_COMPLEX_MAPPINGS=y ++# CONFIG_MTD_PHYSMAP is not set ++# CONFIG_MTD_SBC_GXX is not set ++# CONFIG_MTD_PCI is not set ++CONFIG_MTD_INTEL_VR_NOR=m ++CONFIG_MTD_PLATRAM=m ++# end of Mapping drivers for chip access ++ ++# ++# Self-contained MTD device drivers ++# ++# CONFIG_MTD_PMC551 is not set ++# CONFIG_MTD_DATAFLASH is not set ++# CONFIG_MTD_MCHP23K256 is not set ++# CONFIG_MTD_SST25L is not set ++# CONFIG_MTD_SLRAM is not set ++# CONFIG_MTD_PHRAM is not set ++# CONFIG_MTD_MTDRAM is not set ++# CONFIG_MTD_BLOCK2MTD is not set ++ ++# ++# Disk-On-Chip Device Drivers ++# ++# CONFIG_MTD_DOCG3 is not set ++# end of Self-contained MTD device drivers ++ ++# CONFIG_MTD_ONENAND is not set ++# CONFIG_MTD_RAW_NAND is not set ++# CONFIG_MTD_SPI_NAND is not set ++ ++# ++# LPDDR & LPDDR2 PCM memory drivers ++# ++# CONFIG_MTD_LPDDR is not set ++# end of LPDDR & LPDDR2 PCM memory drivers ++ ++# CONFIG_MTD_SPI_NOR is not set ++# CONFIG_MTD_UBI is not set ++# CONFIG_MTD_HYPERBUS is not set ++# CONFIG_OF is not set ++CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y ++# CONFIG_PARPORT is not set ++CONFIG_PNP=y ++# CONFIG_PNP_DEBUG_MESSAGES is not set ++ ++# ++# Protocols ++# ++CONFIG_PNPACPI=y ++CONFIG_BLK_DEV=y ++# CONFIG_BLK_DEV_NULL_BLK is not set ++# CONFIG_BLK_DEV_FD is not set ++CONFIG_CDROM=m ++# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set ++CONFIG_ZRAM=m ++CONFIG_ZRAM_WRITEBACK=y ++# CONFIG_ZRAM_MEMORY_TRACKING is not set ++# CONFIG_BLK_DEV_UMEM is not set ++CONFIG_BLK_DEV_LOOP=y ++CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 ++CONFIG_BLK_DEV_CRYPTOLOOP=y ++# CONFIG_BLK_DEV_DRBD is not set ++CONFIG_BLK_DEV_NBD=m ++# CONFIG_BLK_DEV_SKD is not set ++# CONFIG_BLK_DEV_SX8 is not set ++CONFIG_BLK_DEV_RAM=m ++CONFIG_BLK_DEV_RAM_COUNT=16 ++CONFIG_BLK_DEV_RAM_SIZE=16384 ++# CONFIG_CDROM_PKTCDVD is not set ++# CONFIG_ATA_OVER_ETH is not set ++CONFIG_VIRTIO_BLK=y ++CONFIG_VIRTIO_BLK_SCSI=y ++CONFIG_BLK_DEV_RBD=m ++# CONFIG_BLK_DEV_RSXX is not set ++ ++# ++# NVME Support ++# ++CONFIG_NVME_CORE=y ++CONFIG_BLK_DEV_NVME=y ++CONFIG_NVME_MULTIPATH=y ++# CONFIG_NVME_RDMA is not set ++# CONFIG_NVME_FC is not set ++# CONFIG_NVME_TCP is not set ++# CONFIG_NVME_TARGET is not set ++# end of NVME Support ++ ++# ++# Misc devices ++# ++# CONFIG_AD525X_DPOT is not set ++# CONFIG_DUMMY_IRQ is not set ++# CONFIG_IBM_ASM is not set ++# CONFIG_PHANTOM is not set ++# CONFIG_TIFM_CORE is not set ++# CONFIG_ICS932S401 is not set ++# CONFIG_ENCLOSURE_SERVICES is not set ++CONFIG_HP_ILO=m ++# CONFIG_APDS9802ALS is not set ++# CONFIG_ISL29003 is not set ++# CONFIG_ISL29020 is not set ++# CONFIG_SENSORS_TSL2550 is not set ++# CONFIG_SENSORS_BH1770 is not set ++# CONFIG_SENSORS_APDS990X is not set ++# CONFIG_HMC6352 is not set ++# CONFIG_DS1682 is not set ++CONFIG_VMWARE_BALLOON=m ++# CONFIG_LATTICE_ECP3_CONFIG is not set ++# CONFIG_SRAM is not set ++# CONFIG_PCI_ENDPOINT_TEST is not set ++# CONFIG_XILINX_SDFEC is not set ++CONFIG_MISC_RTSX=m ++# CONFIG_PVPANIC is not set ++# CONFIG_C2PORT is not set ++ ++# ++# EEPROM support ++# ++# CONFIG_EEPROM_AT24 is not set ++# CONFIG_EEPROM_AT25 is not set ++# CONFIG_EEPROM_LEGACY is not set ++# CONFIG_EEPROM_MAX6875 is not set ++CONFIG_EEPROM_93CX6=m ++# CONFIG_EEPROM_93XX46 is not set ++# CONFIG_EEPROM_IDT_89HPESX is not set ++# CONFIG_EEPROM_EE1004 is not set ++# end of EEPROM support ++ ++# CONFIG_CB710_CORE is not set ++ ++# ++# Texas Instruments shared transport line discipline ++# ++# CONFIG_TI_ST is not set ++# end of Texas Instruments shared transport line discipline ++ ++# CONFIG_SENSORS_LIS3_I2C is not set ++# CONFIG_ALTERA_STAPL is not set ++# CONFIG_INTEL_MEI is not set ++# CONFIG_INTEL_MEI_ME is not set ++# CONFIG_INTEL_MEI_TXE is not set ++# CONFIG_INTEL_MEI_HDCP is not set ++CONFIG_VMWARE_VMCI=m ++ ++# ++# Intel MIC & related support ++# ++ ++# ++# Intel MIC Bus Driver ++# ++# CONFIG_INTEL_MIC_BUS is not set ++ ++# ++# SCIF Bus Driver ++# ++# CONFIG_SCIF_BUS is not set ++ ++# ++# VOP Bus Driver ++# ++# CONFIG_VOP_BUS is not set ++ ++# ++# Intel MIC Host Driver ++# ++ ++# ++# Intel MIC Card Driver ++# ++ ++# ++# SCIF Driver ++# ++ ++# ++# Intel MIC Coprocessor State Management (COSM) Drivers ++# ++ ++# ++# VOP Driver ++# ++# end of Intel MIC & related support ++ ++# CONFIG_GENWQE is not set ++# CONFIG_ECHO is not set ++# CONFIG_MISC_ALCOR_PCI is not set ++CONFIG_MISC_RTSX_PCI=m ++CONFIG_MISC_RTSX_USB=m ++# CONFIG_HABANA_AI is not set ++# end of Misc devices ++ ++CONFIG_HAVE_IDE=y ++# CONFIG_IDE is not set ++ ++# ++# SCSI device support ++# ++CONFIG_SCSI_MOD=y ++CONFIG_RAID_ATTRS=y ++CONFIG_SCSI=y ++CONFIG_SCSI_DMA=y ++CONFIG_SCSI_NETLINK=y ++# CONFIG_SCSI_PROC_FS is not set ++ ++# ++# SCSI support type (disk, tape, CD-ROM) ++# ++CONFIG_BLK_DEV_SD=y ++# CONFIG_CHR_DEV_ST is not set ++CONFIG_BLK_DEV_SR=m ++# CONFIG_BLK_DEV_SR_VENDOR is not set ++CONFIG_CHR_DEV_SG=y ++# CONFIG_CHR_DEV_SCH is not set ++CONFIG_SCSI_CONSTANTS=y ++CONFIG_SCSI_LOGGING=y ++CONFIG_SCSI_SCAN_ASYNC=y ++ ++# ++# SCSI Transports ++# ++CONFIG_SCSI_SPI_ATTRS=y ++CONFIG_SCSI_FC_ATTRS=y ++CONFIG_SCSI_ISCSI_ATTRS=m ++CONFIG_SCSI_SAS_ATTRS=y ++CONFIG_SCSI_SAS_LIBSAS=y ++CONFIG_SCSI_SAS_ATA=y ++CONFIG_SCSI_SAS_HOST_SMP=y ++CONFIG_SCSI_SRP_ATTRS=m ++# end of SCSI Transports ++ ++CONFIG_SCSI_LOWLEVEL=y ++CONFIG_ISCSI_TCP=m ++# CONFIG_ISCSI_BOOT_SYSFS is not set ++# CONFIG_SCSI_CXGB3_ISCSI is not set ++# CONFIG_SCSI_CXGB4_ISCSI is not set ++# CONFIG_SCSI_BNX2_ISCSI is not set ++# CONFIG_SCSI_BNX2X_FCOE is not set ++# CONFIG_BE2ISCSI is not set ++# CONFIG_BLK_DEV_3W_XXXX_RAID is not set ++CONFIG_SCSI_HPSA=y ++# CONFIG_SCSI_3W_9XXX is not set ++# CONFIG_SCSI_3W_SAS is not set ++# CONFIG_SCSI_ACARD is not set ++# CONFIG_SCSI_AACRAID is not set ++# CONFIG_SCSI_AIC7XXX is not set ++# CONFIG_SCSI_AIC79XX is not set ++# CONFIG_SCSI_AIC94XX is not set ++# CONFIG_SCSI_MVSAS is not set ++# CONFIG_SCSI_MVUMI is not set ++# CONFIG_SCSI_DPT_I2O is not set ++# CONFIG_SCSI_ADVANSYS is not set ++# CONFIG_SCSI_ARCMSR is not set ++# CONFIG_SCSI_ESAS2R is not set ++# CONFIG_MEGARAID_NEWGEN is not set ++# CONFIG_MEGARAID_LEGACY is not set ++CONFIG_MEGARAID_SAS=y ++CONFIG_SCSI_MPT3SAS=y ++CONFIG_SCSI_MPT2SAS_MAX_SGE=128 ++CONFIG_SCSI_MPT3SAS_MAX_SGE=128 ++CONFIG_SCSI_MPT2SAS=y ++CONFIG_SCSI_SMARTPQI=y ++CONFIG_SCSI_UFSHCD=m ++# CONFIG_SCSI_UFSHCD_PCI is not set ++# CONFIG_SCSI_UFSHCD_PLATFORM is not set ++# CONFIG_SCSI_UFS_BSG is not set ++# CONFIG_SCSI_HPTIOP is not set ++CONFIG_SCSI_BUSLOGIC=y ++# CONFIG_SCSI_FLASHPOINT is not set ++# CONFIG_SCSI_MYRB is not set ++# CONFIG_SCSI_MYRS is not set ++CONFIG_VMWARE_PVSCSI=y ++CONFIG_LIBFC=y ++CONFIG_LIBFCOE=m ++CONFIG_FCOE=m ++CONFIG_FCOE_FNIC=m ++# CONFIG_SCSI_SNIC is not set ++# CONFIG_SCSI_DMX3191D is not set ++# CONFIG_SCSI_FDOMAIN_PCI is not set ++# CONFIG_SCSI_GDTH is not set ++CONFIG_SCSI_ISCI=y ++# CONFIG_SCSI_IPS is not set ++# CONFIG_SCSI_INITIO is not set ++# CONFIG_SCSI_INIA100 is not set ++# CONFIG_SCSI_STEX is not set ++# CONFIG_SCSI_SYM53C8XX_2 is not set ++# CONFIG_SCSI_IPR is not set ++# CONFIG_SCSI_QLOGIC_1280 is not set ++# CONFIG_SCSI_QLA_FC is not set ++# CONFIG_SCSI_QLA_ISCSI is not set ++# CONFIG_SCSI_LPFC is not set ++# CONFIG_SCSI_DC395x is not set ++# CONFIG_SCSI_AM53C974 is not set ++# CONFIG_SCSI_WD719X is not set ++# CONFIG_SCSI_DEBUG is not set ++# CONFIG_SCSI_PMCRAID is not set ++# CONFIG_SCSI_PM8001 is not set ++# CONFIG_SCSI_BFA_FC is not set ++CONFIG_SCSI_VIRTIO=y ++# CONFIG_SCSI_CHELSIO_FCOE is not set ++# CONFIG_SCSI_DH is not set ++# end of SCSI device support ++ ++CONFIG_ATA=y ++# CONFIG_ATA_VERBOSE_ERROR is not set ++CONFIG_ATA_ACPI=y ++# CONFIG_SATA_ZPODD is not set ++CONFIG_SATA_PMP=y ++ ++# ++# Controllers with non-SFF native interface ++# ++CONFIG_SATA_AHCI=y ++CONFIG_SATA_MOBILE_LPM_POLICY=3 ++CONFIG_SATA_AHCI_PLATFORM=y ++# CONFIG_SATA_INIC162X is not set ++# CONFIG_SATA_ACARD_AHCI is not set ++# CONFIG_SATA_SIL24 is not set ++CONFIG_ATA_SFF=y ++ ++# ++# SFF controllers with custom DMA interface ++# ++# CONFIG_PDC_ADMA is not set ++# CONFIG_SATA_QSTOR is not set ++# CONFIG_SATA_SX4 is not set ++CONFIG_ATA_BMDMA=y ++ ++# ++# SATA SFF controllers with BMDMA ++# ++CONFIG_ATA_PIIX=y ++# CONFIG_SATA_DWC is not set ++# CONFIG_SATA_MV is not set ++# CONFIG_SATA_NV is not set ++# CONFIG_SATA_PROMISE is not set ++# CONFIG_SATA_SIL is not set ++# CONFIG_SATA_SIS is not set ++# CONFIG_SATA_SVW is not set ++# CONFIG_SATA_ULI is not set ++# CONFIG_SATA_VIA is not set ++# CONFIG_SATA_VITESSE is not set ++ ++# ++# PATA SFF controllers with BMDMA ++# ++# CONFIG_PATA_ALI is not set ++# CONFIG_PATA_AMD is not set ++# CONFIG_PATA_ARTOP is not set ++# CONFIG_PATA_ATIIXP is not set ++# CONFIG_PATA_ATP867X is not set ++# CONFIG_PATA_CMD64X is not set ++# CONFIG_PATA_CYPRESS is not set ++# CONFIG_PATA_EFAR is not set ++# CONFIG_PATA_HPT366 is not set ++# CONFIG_PATA_HPT37X is not set ++# CONFIG_PATA_HPT3X2N is not set ++# CONFIG_PATA_HPT3X3 is not set ++# CONFIG_PATA_IT8213 is not set ++# CONFIG_PATA_IT821X is not set ++# CONFIG_PATA_JMICRON is not set ++# CONFIG_PATA_MARVELL is not set ++# CONFIG_PATA_NETCELL is not set ++# CONFIG_PATA_NINJA32 is not set ++# CONFIG_PATA_NS87415 is not set ++# CONFIG_PATA_OLDPIIX is not set ++# CONFIG_PATA_OPTIDMA is not set ++# CONFIG_PATA_PDC2027X is not set ++# CONFIG_PATA_PDC_OLD is not set ++# CONFIG_PATA_RADISYS is not set ++# CONFIG_PATA_RDC is not set ++CONFIG_PATA_SCH=y ++# CONFIG_PATA_SERVERWORKS is not set ++# CONFIG_PATA_SIL680 is not set ++# CONFIG_PATA_SIS is not set ++# CONFIG_PATA_TOSHIBA is not set ++# CONFIG_PATA_TRIFLEX is not set ++# CONFIG_PATA_VIA is not set ++# CONFIG_PATA_WINBOND is not set ++ ++# ++# PIO-only SFF controllers ++# ++# CONFIG_PATA_CMD640_PCI is not set ++CONFIG_PATA_MPIIX=y ++# CONFIG_PATA_NS87410 is not set ++# CONFIG_PATA_OPTI is not set ++# CONFIG_PATA_PLATFORM is not set ++# CONFIG_PATA_RZ1000 is not set ++ ++# ++# Generic fallback / legacy drivers ++# ++CONFIG_PATA_ACPI=m ++CONFIG_ATA_GENERIC=y ++# CONFIG_PATA_LEGACY is not set ++CONFIG_MD=y ++CONFIG_BLK_DEV_MD=y ++# CONFIG_MD_AUTODETECT is not set ++# CONFIG_MD_LINEAR is not set ++CONFIG_MD_RAID0=y ++CONFIG_MD_RAID1=y ++CONFIG_MD_RAID10=y ++CONFIG_MD_RAID456=m ++CONFIG_MD_MULTIPATH=m ++CONFIG_MD_FAULTY=m ++CONFIG_BCACHE=m ++# CONFIG_BCACHE_DEBUG is not set ++# CONFIG_BCACHE_CLOSURES_DEBUG is not set ++CONFIG_BLK_DEV_DM_BUILTIN=y ++CONFIG_BLK_DEV_DM=y ++CONFIG_DM_DEBUG=y ++CONFIG_DM_BUFIO=m ++# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set ++CONFIG_DM_BIO_PRISON=m ++CONFIG_DM_PERSISTENT_DATA=m ++CONFIG_DM_UNSTRIPED=m ++CONFIG_DM_CRYPT=y ++CONFIG_DM_SNAPSHOT=m ++CONFIG_DM_THIN_PROVISIONING=m ++# CONFIG_DM_CACHE is not set ++# CONFIG_DM_WRITECACHE is not set ++# CONFIG_DM_ERA is not set ++# CONFIG_DM_CLONE is not set ++CONFIG_DM_MIRROR=m ++# CONFIG_DM_LOG_USERSPACE is not set ++CONFIG_DM_RAID=m ++CONFIG_DM_ZERO=m ++CONFIG_DM_MULTIPATH=m ++# CONFIG_DM_MULTIPATH_QL is not set ++# CONFIG_DM_MULTIPATH_ST is not set ++CONFIG_DM_DELAY=m ++# CONFIG_DM_DUST is not set ++# CONFIG_DM_INIT is not set ++# CONFIG_DM_UEVENT is not set ++# CONFIG_DM_FLAKEY is not set ++# CONFIG_DM_VERITY is not set ++# CONFIG_DM_SWITCH is not set ++# CONFIG_DM_LOG_WRITES is not set ++# CONFIG_DM_INTEGRITY is not set ++CONFIG_DM_ZONED=m ++# CONFIG_TARGET_CORE is not set ++CONFIG_FUSION=y ++CONFIG_FUSION_SPI=y ++# CONFIG_FUSION_FC is not set ++CONFIG_FUSION_SAS=y ++CONFIG_FUSION_MAX_SGE=128 ++# CONFIG_FUSION_CTL is not set ++# CONFIG_FUSION_LOGGING is not set ++ ++# ++# IEEE 1394 (FireWire) support ++# ++# CONFIG_FIREWIRE is not set ++# CONFIG_FIREWIRE_NOSY is not set ++# end of IEEE 1394 (FireWire) support ++ ++# CONFIG_MACINTOSH_DRIVERS is not set ++CONFIG_NETDEVICES=y ++CONFIG_MII=y ++CONFIG_NET_CORE=y ++CONFIG_BONDING=m ++CONFIG_DUMMY=m ++# CONFIG_EQUALIZER is not set ++# CONFIG_NET_FC is not set ++CONFIG_IFB=m ++CONFIG_NET_TEAM=m ++CONFIG_NET_TEAM_MODE_BROADCAST=m ++CONFIG_NET_TEAM_MODE_ROUNDROBIN=m ++CONFIG_NET_TEAM_MODE_RANDOM=m ++CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m ++CONFIG_NET_TEAM_MODE_LOADBALANCE=m ++CONFIG_MACVLAN=y ++CONFIG_MACVTAP=y ++CONFIG_IPVLAN_L3S=y ++CONFIG_IPVLAN=m ++CONFIG_IPVTAP=m ++CONFIG_VXLAN=m ++# CONFIG_GENEVE is not set ++# CONFIG_GTP is not set ++CONFIG_MACSEC=m ++CONFIG_NETCONSOLE=m ++CONFIG_NETCONSOLE_DYNAMIC=y ++CONFIG_NETPOLL=y ++CONFIG_NET_POLL_CONTROLLER=y ++CONFIG_TUN=y ++CONFIG_TAP=y ++# CONFIG_TUN_VNET_CROSS_LE is not set ++CONFIG_VETH=m ++CONFIG_VIRTIO_NET=m ++# CONFIG_NLMON is not set ++CONFIG_VSOCKMON=m ++# CONFIG_ARCNET is not set ++ ++# ++# CAIF transport drivers ++# ++ ++# ++# Distributed Switch Architecture drivers ++# ++CONFIG_B53=m ++# CONFIG_B53_SPI_DRIVER is not set ++# CONFIG_B53_MDIO_DRIVER is not set ++# CONFIG_B53_MMAP_DRIVER is not set ++# CONFIG_B53_SRAB_DRIVER is not set ++# CONFIG_B53_SERDES is not set ++# CONFIG_NET_DSA_BCM_SF2 is not set ++# CONFIG_NET_DSA_LOOP is not set ++# CONFIG_NET_DSA_LANTIQ_GSWIP is not set ++# CONFIG_NET_DSA_MT7530 is not set ++# CONFIG_NET_DSA_MV88E6060 is not set ++# CONFIG_NET_DSA_MICROCHIP_KSZ9477 is not set ++# CONFIG_NET_DSA_MICROCHIP_KSZ8795 is not set ++CONFIG_NET_DSA_MV88E6XXX=m ++CONFIG_NET_DSA_MV88E6XXX_GLOBAL2=y ++# CONFIG_NET_DSA_MV88E6XXX_PTP is not set ++# CONFIG_NET_DSA_SJA1105 is not set ++# CONFIG_NET_DSA_QCA8K is not set ++# CONFIG_NET_DSA_REALTEK_SMI is not set ++# CONFIG_NET_DSA_SMSC_LAN9303_I2C is not set ++# CONFIG_NET_DSA_SMSC_LAN9303_MDIO is not set ++# end of Distributed Switch Architecture drivers ++ ++CONFIG_ETHERNET=y ++CONFIG_MDIO=m ++# CONFIG_NET_VENDOR_3COM is not set ++# CONFIG_NET_VENDOR_ADAPTEC is not set ++CONFIG_NET_VENDOR_AGERE=y ++CONFIG_ET131X=m ++# CONFIG_NET_VENDOR_ALACRITECH is not set ++# CONFIG_NET_VENDOR_ALTEON is not set ++# CONFIG_ALTERA_TSE is not set ++CONFIG_NET_VENDOR_AMAZON=y ++CONFIG_ENA_ETHERNET=m ++# CONFIG_NET_VENDOR_AMD is not set ++# CONFIG_NET_VENDOR_AQUANTIA is not set ++# CONFIG_NET_VENDOR_ARC is not set ++CONFIG_NET_VENDOR_ATHEROS=y ++CONFIG_ATL2=m ++CONFIG_ATL1=m ++CONFIG_ATL1E=m ++CONFIG_ATL1C=m ++CONFIG_ALX=m ++# CONFIG_NET_VENDOR_AURORA is not set ++CONFIG_NET_VENDOR_BROADCOM=y ++CONFIG_B44=m ++CONFIG_B44_PCI_AUTOSELECT=y ++CONFIG_B44_PCICORE_AUTOSELECT=y ++CONFIG_B44_PCI=y ++# CONFIG_BCMGENET is not set ++CONFIG_BNX2=m ++CONFIG_CNIC=m ++CONFIG_TIGON3=m ++# CONFIG_TIGON3_HWMON is not set ++CONFIG_BNX2X=m ++CONFIG_BNX2X_SRIOV=y ++# CONFIG_SYSTEMPORT is not set ++CONFIG_BNXT=m ++CONFIG_BNXT_SRIOV=y ++CONFIG_BNXT_FLOWER_OFFLOAD=y ++# CONFIG_BNXT_DCB is not set ++CONFIG_BNXT_HWMON=y ++# CONFIG_NET_VENDOR_BROCADE is not set ++CONFIG_NET_VENDOR_CADENCE=y ++# CONFIG_MACB is not set ++# CONFIG_NET_VENDOR_CAVIUM is not set ++# CONFIG_NET_VENDOR_CHELSIO is not set ++CONFIG_NET_VENDOR_CISCO=y ++CONFIG_ENIC=m ++CONFIG_NET_VENDOR_CORTINA=y ++# CONFIG_CX_ECAT is not set ++# CONFIG_DNET is not set ++# CONFIG_NET_VENDOR_DEC is not set ++CONFIG_NET_VENDOR_DLINK=y ++CONFIG_DL2K=m ++CONFIG_SUNDANCE=m ++CONFIG_SUNDANCE_MMIO=y ++CONFIG_NET_VENDOR_EMULEX=y ++CONFIG_BE2NET=m ++CONFIG_BE2NET_HWMON=y ++CONFIG_BE2NET_BE2=y ++CONFIG_BE2NET_BE3=y ++CONFIG_BE2NET_LANCER=y ++CONFIG_BE2NET_SKYHAWK=y ++# CONFIG_NET_VENDOR_EZCHIP is not set ++CONFIG_NET_VENDOR_GOOGLE=y ++# CONFIG_GVE is not set ++# CONFIG_NET_VENDOR_HP is not set ++CONFIG_NET_VENDOR_HUAWEI=y ++CONFIG_HINIC=m ++CONFIG_NET_VENDOR_I825XX=y ++CONFIG_NET_VENDOR_INTEL=y ++CONFIG_E100=y ++CONFIG_E1000=m ++CONFIG_E1000E=m ++CONFIG_E1000E_HWTS=y ++CONFIG_IGB=m ++# CONFIG_IGB_HWMON is not set ++CONFIG_IGB_DCA=y ++CONFIG_IGBVF=m ++CONFIG_IXGB=m ++CONFIG_IXGBE=m ++# CONFIG_IXGBE_HWMON is not set ++CONFIG_IXGBE_DCA=y ++# CONFIG_IXGBE_DCB is not set ++CONFIG_IXGBE_IPSEC=y ++CONFIG_IXGBEVF=m ++CONFIG_IXGBEVF_IPSEC=y ++CONFIG_I40E=m ++CONFIG_I40E_DCB=y ++CONFIG_IAVF=m ++CONFIG_I40EVF=m ++CONFIG_ICE=m ++CONFIG_FM10K=m ++# CONFIG_IGC is not set ++# CONFIG_JME is not set ++CONFIG_NET_VENDOR_MARVELL=y ++CONFIG_MVMDIO=m ++CONFIG_SKGE=m ++CONFIG_SKGE_DEBUG=y ++CONFIG_SKGE_GENESIS=y ++CONFIG_SKY2=m ++# CONFIG_SKY2_DEBUG is not set ++CONFIG_NET_VENDOR_MELLANOX=y ++CONFIG_MLX4_EN=m ++CONFIG_MLX4_EN_DCB=y ++CONFIG_MLX4_CORE=m ++CONFIG_MLX4_DEBUG=y ++# CONFIG_MLX4_CORE_GEN2 is not set ++CONFIG_MLX5_CORE=m ++CONFIG_MLX5_ACCEL=y ++CONFIG_MLX5_FPGA=y ++CONFIG_MLX5_CORE_EN=y ++CONFIG_MLX5_EN_ARFS=y ++CONFIG_MLX5_EN_RXNFC=y ++CONFIG_MLX5_MPFS=y ++CONFIG_MLX5_ESWITCH=y ++CONFIG_MLX5_CORE_EN_DCB=y ++CONFIG_MLX5_CORE_IPOIB=y ++# CONFIG_MLX5_FPGA_IPSEC is not set ++CONFIG_MLX5_SW_STEERING=y ++# CONFIG_MLXSW_CORE is not set ++CONFIG_MLXFW=m ++# CONFIG_NET_VENDOR_MICREL is not set ++# CONFIG_NET_VENDOR_MICROCHIP is not set ++CONFIG_NET_VENDOR_MICROSEMI=y ++# CONFIG_MSCC_OCELOT_SWITCH is not set ++# CONFIG_NET_VENDOR_MYRI is not set ++# CONFIG_FEALNX is not set ++# CONFIG_NET_VENDOR_NATSEMI is not set ++CONFIG_NET_VENDOR_NETERION=y ++# CONFIG_S2IO is not set ++# CONFIG_VXGE is not set ++CONFIG_NET_VENDOR_NETRONOME=y ++# CONFIG_NFP is not set ++CONFIG_NET_VENDOR_NI=y ++# CONFIG_NI_XGE_MANAGEMENT_ENET is not set ++# CONFIG_NET_VENDOR_NVIDIA is not set ++# CONFIG_NET_VENDOR_OKI is not set ++# CONFIG_ETHOC is not set ++CONFIG_NET_VENDOR_PACKET_ENGINES=y ++# CONFIG_HAMACHI is not set ++# CONFIG_YELLOWFIN is not set ++CONFIG_NET_VENDOR_PENSANDO=y ++# CONFIG_IONIC is not set ++# CONFIG_NET_VENDOR_QLOGIC is not set ++# CONFIG_NET_VENDOR_QUALCOMM is not set ++# CONFIG_NET_VENDOR_RDC is not set ++CONFIG_NET_VENDOR_REALTEK=y ++CONFIG_8139CP=m ++CONFIG_8139TOO=m ++CONFIG_8139TOO_PIO=y ++CONFIG_8139TOO_TUNE_TWISTER=y ++CONFIG_8139TOO_8129=y ++CONFIG_8139_OLD_RX_RESET=y ++CONFIG_R8169=m ++# CONFIG_NET_VENDOR_RENESAS is not set ++# CONFIG_NET_VENDOR_ROCKER is not set ++# CONFIG_NET_VENDOR_SAMSUNG is not set ++# CONFIG_NET_VENDOR_SEEQ is not set ++CONFIG_NET_VENDOR_SOLARFLARE=y ++CONFIG_SFC=m ++CONFIG_SFC_MTD=y ++CONFIG_SFC_MCDI_MON=y ++CONFIG_SFC_SRIOV=y ++CONFIG_SFC_MCDI_LOGGING=y ++CONFIG_SFC_FALCON=m ++CONFIG_SFC_FALCON_MTD=y ++# CONFIG_NET_VENDOR_SILAN is not set ++# CONFIG_NET_VENDOR_SIS is not set ++# CONFIG_NET_VENDOR_SMSC is not set ++CONFIG_NET_VENDOR_SOCIONEXT=y ++# CONFIG_NET_VENDOR_STMICRO is not set ++# CONFIG_NET_VENDOR_SUN is not set ++# CONFIG_NET_VENDOR_SYNOPSYS is not set ++# CONFIG_NET_VENDOR_TEHUTI is not set ++# CONFIG_NET_VENDOR_TI is not set ++# CONFIG_NET_VENDOR_VIA is not set ++# CONFIG_NET_VENDOR_WIZNET is not set ++CONFIG_NET_VENDOR_XILINX=y ++# CONFIG_XILINX_AXI_EMAC is not set ++# CONFIG_XILINX_LL_TEMAC is not set ++# CONFIG_FDDI is not set ++# CONFIG_HIPPI is not set ++# CONFIG_NET_SB1000 is not set ++CONFIG_MDIO_DEVICE=m ++CONFIG_MDIO_BUS=m ++# CONFIG_MDIO_BCM_UNIMAC is not set ++# CONFIG_MDIO_BITBANG is not set ++# CONFIG_MDIO_MSCC_MIIM is not set ++# CONFIG_MDIO_THUNDER is not set ++CONFIG_PHYLINK=m ++CONFIG_PHYLIB=m ++CONFIG_SWPHY=y ++# CONFIG_LED_TRIGGER_PHY is not set ++ ++# ++# MII PHY device drivers ++# ++# CONFIG_SFP is not set ++# CONFIG_ADIN_PHY is not set ++# CONFIG_AMD_PHY is not set ++# CONFIG_AQUANTIA_PHY is not set ++# CONFIG_AX88796B_PHY is not set ++# CONFIG_AT803X_PHY is not set ++CONFIG_BCM7XXX_PHY=m ++# CONFIG_BCM87XX_PHY is not set ++CONFIG_BCM_NET_PHYLIB=m ++# CONFIG_BROADCOM_PHY is not set ++# CONFIG_CICADA_PHY is not set ++CONFIG_CORTINA_PHY=m ++# CONFIG_DAVICOM_PHY is not set ++# CONFIG_DP83822_PHY is not set ++# CONFIG_DP83TC811_PHY is not set ++# CONFIG_DP83848_PHY is not set ++# CONFIG_DP83867_PHY is not set ++CONFIG_FIXED_PHY=m ++# CONFIG_ICPLUS_PHY is not set ++# CONFIG_INTEL_XWAY_PHY is not set ++# CONFIG_LSI_ET1011C_PHY is not set ++CONFIG_LXT_PHY=m ++CONFIG_MARVELL_PHY=m ++CONFIG_MARVELL_10G_PHY=m ++# CONFIG_MICREL_PHY is not set ++# CONFIG_MICROCHIP_PHY is not set ++# CONFIG_MICROCHIP_T1_PHY is not set ++# CONFIG_MICROSEMI_PHY is not set ++# CONFIG_NATIONAL_PHY is not set ++# CONFIG_NXP_TJA11XX_PHY is not set ++# CONFIG_QSEMI_PHY is not set ++CONFIG_REALTEK_PHY=m ++# CONFIG_RENESAS_PHY is not set ++CONFIG_ROCKCHIP_PHY=m ++# CONFIG_SMSC_PHY is not set ++# CONFIG_STE10XP is not set ++# CONFIG_TERANETICS_PHY is not set ++# CONFIG_VITESSE_PHY is not set ++# CONFIG_XILINX_GMII2RGMII is not set ++# CONFIG_MICREL_KS8995MA is not set ++# CONFIG_PPP is not set ++# CONFIG_SLIP is not set ++CONFIG_USB_NET_DRIVERS=m ++CONFIG_USB_CATC=m ++CONFIG_USB_KAWETH=m ++CONFIG_USB_PEGASUS=m ++CONFIG_USB_RTL8150=m ++CONFIG_USB_RTL8152=m ++# CONFIG_USB_LAN78XX is not set ++CONFIG_USB_USBNET=m ++CONFIG_USB_NET_AX8817X=m ++CONFIG_USB_NET_AX88179_178A=m ++CONFIG_USB_NET_CDCETHER=m ++CONFIG_USB_NET_CDC_EEM=m ++CONFIG_USB_NET_CDC_NCM=m ++CONFIG_USB_NET_HUAWEI_CDC_NCM=m ++CONFIG_USB_NET_CDC_MBIM=m ++CONFIG_USB_NET_DM9601=m ++# CONFIG_USB_NET_SR9700 is not set ++# CONFIG_USB_NET_SR9800 is not set ++# CONFIG_USB_NET_SMSC75XX is not set ++# CONFIG_USB_NET_SMSC95XX is not set ++# CONFIG_USB_NET_GL620A is not set ++# CONFIG_USB_NET_NET1080 is not set ++# CONFIG_USB_NET_PLUSB is not set ++# CONFIG_USB_NET_MCS7830 is not set ++CONFIG_USB_NET_RNDIS_HOST=m ++CONFIG_USB_NET_CDC_SUBSET_ENABLE=m ++CONFIG_USB_NET_CDC_SUBSET=m ++CONFIG_USB_ALI_M5632=y ++CONFIG_USB_AN2720=y ++CONFIG_USB_BELKIN=y ++CONFIG_USB_ARMLINUX=y ++# CONFIG_USB_EPSON2888 is not set ++# CONFIG_USB_KC2190 is not set ++# CONFIG_USB_NET_ZAURUS is not set ++# CONFIG_USB_NET_CX82310_ETH is not set ++# CONFIG_USB_NET_KALMIA is not set ++CONFIG_USB_NET_QMI_WWAN=m ++# CONFIG_USB_HSO is not set ++# CONFIG_USB_NET_INT51X1 is not set ++# CONFIG_USB_IPHETH is not set ++CONFIG_USB_SIERRA_NET=m ++# CONFIG_USB_VL600 is not set ++# CONFIG_USB_NET_CH9200 is not set ++# CONFIG_USB_NET_AQC111 is not set ++CONFIG_WLAN=y ++# CONFIG_WIRELESS_WDS is not set ++CONFIG_WLAN_VENDOR_ADMTEK=y ++# CONFIG_ADM8211 is not set ++CONFIG_ATH_COMMON=m ++CONFIG_WLAN_VENDOR_ATH=y ++# CONFIG_ATH_DEBUG is not set ++CONFIG_ATH5K=m ++# CONFIG_ATH5K_DEBUG is not set ++# CONFIG_ATH5K_TRACER is not set ++CONFIG_ATH5K_PCI=y ++CONFIG_ATH9K_HW=m ++CONFIG_ATH9K_COMMON=m ++CONFIG_ATH9K_BTCOEX_SUPPORT=y ++CONFIG_ATH9K=m ++CONFIG_ATH9K_PCI=y ++# CONFIG_ATH9K_AHB is not set ++# CONFIG_ATH9K_DEBUGFS is not set ++# CONFIG_ATH9K_DYNACK is not set ++# CONFIG_ATH9K_WOW is not set ++CONFIG_ATH9K_RFKILL=y ++# CONFIG_ATH9K_CHANNEL_CONTEXT is not set ++CONFIG_ATH9K_PCOEM=y ++# CONFIG_ATH9K_PCI_NO_EEPROM is not set ++CONFIG_ATH9K_HTC=m ++# CONFIG_ATH9K_HTC_DEBUGFS is not set ++# CONFIG_ATH9K_HWRNG is not set ++CONFIG_CARL9170=m ++CONFIG_CARL9170_LEDS=y ++CONFIG_CARL9170_WPC=y ++# CONFIG_CARL9170_HWRNG is not set ++# CONFIG_ATH6KL is not set ++CONFIG_AR5523=m ++CONFIG_WIL6210=m ++CONFIG_WIL6210_ISR_COR=y ++CONFIG_WIL6210_TRACING=y ++CONFIG_WIL6210_DEBUGFS=y ++CONFIG_ATH10K=m ++CONFIG_ATH10K_CE=y ++CONFIG_ATH10K_PCI=m ++CONFIG_ATH10K_SDIO=m ++CONFIG_ATH10K_USB=m ++# CONFIG_ATH10K_DEBUG is not set ++# CONFIG_ATH10K_DEBUGFS is not set ++# CONFIG_ATH10K_TRACING is not set ++# CONFIG_WCN36XX is not set ++CONFIG_WLAN_VENDOR_ATMEL=y ++# CONFIG_ATMEL is not set ++# CONFIG_AT76C50X_USB is not set ++CONFIG_WLAN_VENDOR_BROADCOM=y ++CONFIG_B43=m ++CONFIG_B43_BCMA=y ++CONFIG_B43_SSB=y ++CONFIG_B43_BUSES_BCMA_AND_SSB=y ++# CONFIG_B43_BUSES_BCMA is not set ++# CONFIG_B43_BUSES_SSB is not set ++CONFIG_B43_PCI_AUTOSELECT=y ++CONFIG_B43_PCICORE_AUTOSELECT=y ++# CONFIG_B43_SDIO is not set ++CONFIG_B43_BCMA_PIO=y ++CONFIG_B43_PIO=y ++CONFIG_B43_PHY_G=y ++CONFIG_B43_PHY_N=y ++CONFIG_B43_PHY_LP=y ++CONFIG_B43_PHY_HT=y ++CONFIG_B43_LEDS=y ++CONFIG_B43_HWRNG=y ++# CONFIG_B43_DEBUG is not set ++# CONFIG_B43LEGACY is not set ++CONFIG_BRCMUTIL=m ++CONFIG_BRCMSMAC=m ++CONFIG_BRCMFMAC=m ++CONFIG_BRCMFMAC_PROTO_BCDC=y ++CONFIG_BRCMFMAC_PROTO_MSGBUF=y ++CONFIG_BRCMFMAC_SDIO=y ++CONFIG_BRCMFMAC_USB=y ++CONFIG_BRCMFMAC_PCIE=y ++# CONFIG_BRCM_TRACING is not set ++# CONFIG_BRCMDBG is not set ++CONFIG_WLAN_VENDOR_CISCO=y ++# CONFIG_AIRO is not set ++CONFIG_WLAN_VENDOR_INTEL=y ++# CONFIG_IPW2100 is not set ++# CONFIG_IPW2200 is not set ++# CONFIG_IWL4965 is not set ++# CONFIG_IWL3945 is not set ++CONFIG_IWLWIFI=m ++CONFIG_IWLWIFI_LEDS=y ++CONFIG_IWLDVM=m ++CONFIG_IWLMVM=m ++CONFIG_IWLWIFI_OPMODE_MODULAR=y ++# CONFIG_IWLWIFI_BCAST_FILTERING is not set ++ ++# ++# Debugging Options ++# ++# CONFIG_IWLWIFI_DEBUG is not set ++# CONFIG_IWLWIFI_DEVICE_TRACING is not set ++# end of Debugging Options ++ ++CONFIG_WLAN_VENDOR_INTERSIL=y ++# CONFIG_HOSTAP is not set ++# CONFIG_HERMES is not set ++# CONFIG_P54_COMMON is not set ++# CONFIG_PRISM54 is not set ++CONFIG_WLAN_VENDOR_MARVELL=y ++# CONFIG_LIBERTAS is not set ++# CONFIG_LIBERTAS_THINFIRM is not set ++# CONFIG_MWIFIEX is not set ++# CONFIG_MWL8K is not set ++CONFIG_WLAN_VENDOR_MEDIATEK=y ++# CONFIG_MT7601U is not set ++CONFIG_MT76_CORE=m ++CONFIG_MT76_LEDS=y ++CONFIG_MT76x02_LIB=m ++# CONFIG_MT76x0U is not set ++# CONFIG_MT76x0E is not set ++CONFIG_MT76x2_COMMON=m ++CONFIG_MT76x2E=m ++# CONFIG_MT76x2U is not set ++# CONFIG_MT7603E is not set ++# CONFIG_MT7615E is not set ++CONFIG_WLAN_VENDOR_RALINK=y ++# CONFIG_RT2X00 is not set ++CONFIG_WLAN_VENDOR_REALTEK=y ++# CONFIG_RTL8180 is not set ++# CONFIG_RTL8187 is not set ++# CONFIG_RTL_CARDS is not set ++# CONFIG_RTL8XXXU is not set ++# CONFIG_RTW88 is not set ++CONFIG_WLAN_VENDOR_RSI=y ++# CONFIG_RSI_91X is not set ++CONFIG_WLAN_VENDOR_ST=y ++# CONFIG_CW1200 is not set ++CONFIG_WLAN_VENDOR_TI=y ++# CONFIG_WL1251 is not set ++# CONFIG_WL12XX is not set ++# CONFIG_WL18XX is not set ++# CONFIG_WLCORE is not set ++CONFIG_WLAN_VENDOR_ZYDAS=y ++# CONFIG_USB_ZD1201 is not set ++# CONFIG_ZD1211RW is not set ++CONFIG_WLAN_VENDOR_QUANTENNA=y ++# CONFIG_QTNFMAC_PCIE is not set ++# CONFIG_MAC80211_HWSIM is not set ++CONFIG_USB_NET_RNDIS_WLAN=m ++# CONFIG_VIRT_WIFI is not set ++ ++# ++# Enable WiMAX (Networking options) to see the WiMAX drivers ++# ++# CONFIG_WAN is not set ++CONFIG_VMXNET3=m ++# CONFIG_FUJITSU_ES is not set ++# CONFIG_THUNDERBOLT_NET is not set ++# CONFIG_NETDEVSIM is not set ++CONFIG_NET_FAILOVER=m ++# CONFIG_ISDN is not set ++# CONFIG_NVM is not set ++ ++# ++# Input device support ++# ++CONFIG_INPUT=y ++# CONFIG_INPUT_LEDS is not set ++CONFIG_INPUT_FF_MEMLESS=y ++CONFIG_INPUT_POLLDEV=m ++CONFIG_INPUT_SPARSEKMAP=m ++CONFIG_INPUT_MATRIXKMAP=m ++ ++# ++# Userland interfaces ++# ++# CONFIG_INPUT_MOUSEDEV is not set ++# CONFIG_INPUT_JOYDEV is not set ++CONFIG_INPUT_EVDEV=y ++# CONFIG_INPUT_EVBUG is not set ++ ++# ++# Input Device Drivers ++# ++CONFIG_INPUT_KEYBOARD=y ++# CONFIG_KEYBOARD_ADC is not set ++# CONFIG_KEYBOARD_ADP5588 is not set ++# CONFIG_KEYBOARD_ADP5589 is not set ++# CONFIG_KEYBOARD_APPLESPI is not set ++CONFIG_KEYBOARD_ATKBD=m ++# CONFIG_KEYBOARD_QT1050 is not set ++# CONFIG_KEYBOARD_QT1070 is not set ++# CONFIG_KEYBOARD_QT2160 is not set ++CONFIG_KEYBOARD_DLINK_DIR685=m ++# CONFIG_KEYBOARD_LKKBD is not set ++# CONFIG_KEYBOARD_GPIO is not set ++# CONFIG_KEYBOARD_GPIO_POLLED is not set ++# CONFIG_KEYBOARD_TCA6416 is not set ++# CONFIG_KEYBOARD_TCA8418 is not set ++# CONFIG_KEYBOARD_MATRIX is not set ++CONFIG_KEYBOARD_LM8323=m ++# CONFIG_KEYBOARD_LM8333 is not set ++# CONFIG_KEYBOARD_MAX7359 is not set ++# CONFIG_KEYBOARD_MCS is not set ++# CONFIG_KEYBOARD_MPR121 is not set ++# CONFIG_KEYBOARD_NEWTON is not set ++# CONFIG_KEYBOARD_OPENCORES is not set ++# CONFIG_KEYBOARD_SAMSUNG is not set ++# CONFIG_KEYBOARD_STOWAWAY is not set ++# CONFIG_KEYBOARD_SUNKBD is not set ++# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set ++# CONFIG_KEYBOARD_XTKBD is not set ++CONFIG_INPUT_MOUSE=y ++CONFIG_MOUSE_PS2=m ++CONFIG_MOUSE_PS2_ALPS=y ++# CONFIG_MOUSE_PS2_BYD is not set ++CONFIG_MOUSE_PS2_LOGIPS2PP=y ++CONFIG_MOUSE_PS2_SYNAPTICS=y ++CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y ++CONFIG_MOUSE_PS2_CYPRESS=y ++CONFIG_MOUSE_PS2_LIFEBOOK=y ++CONFIG_MOUSE_PS2_TRACKPOINT=y ++# CONFIG_MOUSE_PS2_ELANTECH is not set ++# CONFIG_MOUSE_PS2_SENTELIC is not set ++# CONFIG_MOUSE_PS2_TOUCHKIT is not set ++CONFIG_MOUSE_PS2_FOCALTECH=y ++# CONFIG_MOUSE_PS2_VMMOUSE is not set ++CONFIG_MOUSE_PS2_SMBUS=y ++# CONFIG_MOUSE_SERIAL is not set ++CONFIG_MOUSE_APPLETOUCH=m ++CONFIG_MOUSE_BCM5974=m ++# CONFIG_MOUSE_CYAPA is not set ++CONFIG_MOUSE_ELAN_I2C=m ++CONFIG_MOUSE_ELAN_I2C_I2C=y ++# CONFIG_MOUSE_ELAN_I2C_SMBUS is not set ++# CONFIG_MOUSE_VSXXXAA is not set ++# CONFIG_MOUSE_GPIO is not set ++CONFIG_MOUSE_SYNAPTICS_I2C=m ++CONFIG_MOUSE_SYNAPTICS_USB=m ++# CONFIG_INPUT_JOYSTICK is not set ++# CONFIG_INPUT_TABLET is not set ++CONFIG_INPUT_TOUCHSCREEN=y ++CONFIG_TOUCHSCREEN_PROPERTIES=y ++CONFIG_TOUCHSCREEN_ADS7846=m ++CONFIG_TOUCHSCREEN_AD7877=m ++CONFIG_TOUCHSCREEN_AD7879=m ++CONFIG_TOUCHSCREEN_AD7879_I2C=m ++CONFIG_TOUCHSCREEN_AD7879_SPI=m ++# CONFIG_TOUCHSCREEN_ADC is not set ++CONFIG_TOUCHSCREEN_ATMEL_MXT=m ++CONFIG_TOUCHSCREEN_ATMEL_MXT_T37=y ++CONFIG_TOUCHSCREEN_AUO_PIXCIR=m ++CONFIG_TOUCHSCREEN_BU21013=m ++# CONFIG_TOUCHSCREEN_BU21029 is not set ++# CONFIG_TOUCHSCREEN_CHIPONE_ICN8505 is not set ++CONFIG_TOUCHSCREEN_CY8CTMG110=m ++CONFIG_TOUCHSCREEN_CYTTSP_CORE=m ++CONFIG_TOUCHSCREEN_CYTTSP_I2C=m ++CONFIG_TOUCHSCREEN_CYTTSP_SPI=m ++CONFIG_TOUCHSCREEN_CYTTSP4_CORE=m ++CONFIG_TOUCHSCREEN_CYTTSP4_I2C=m ++CONFIG_TOUCHSCREEN_CYTTSP4_SPI=m ++CONFIG_TOUCHSCREEN_DYNAPRO=m ++CONFIG_TOUCHSCREEN_HAMPSHIRE=m ++CONFIG_TOUCHSCREEN_EETI=m ++CONFIG_TOUCHSCREEN_EGALAX_SERIAL=m ++CONFIG_TOUCHSCREEN_EXC3000=m ++CONFIG_TOUCHSCREEN_FUJITSU=m ++CONFIG_TOUCHSCREEN_GOODIX=m ++CONFIG_TOUCHSCREEN_HIDEEP=m ++CONFIG_TOUCHSCREEN_ILI210X=m ++CONFIG_TOUCHSCREEN_S6SY761=m ++CONFIG_TOUCHSCREEN_GUNZE=m ++CONFIG_TOUCHSCREEN_EKTF2127=m ++CONFIG_TOUCHSCREEN_ELAN=m ++CONFIG_TOUCHSCREEN_ELO=m ++CONFIG_TOUCHSCREEN_WACOM_W8001=m ++CONFIG_TOUCHSCREEN_WACOM_I2C=m ++CONFIG_TOUCHSCREEN_MAX11801=m ++CONFIG_TOUCHSCREEN_MCS5000=m ++CONFIG_TOUCHSCREEN_MMS114=m ++CONFIG_TOUCHSCREEN_MELFAS_MIP4=m ++CONFIG_TOUCHSCREEN_MTOUCH=m ++CONFIG_TOUCHSCREEN_INEXIO=m ++CONFIG_TOUCHSCREEN_MK712=m ++CONFIG_TOUCHSCREEN_PENMOUNT=m ++CONFIG_TOUCHSCREEN_EDT_FT5X06=m ++CONFIG_TOUCHSCREEN_TOUCHRIGHT=m ++CONFIG_TOUCHSCREEN_TOUCHWIN=m ++CONFIG_TOUCHSCREEN_PIXCIR=m ++CONFIG_TOUCHSCREEN_WDT87XX_I2C=m ++CONFIG_TOUCHSCREEN_WM97XX=m ++CONFIG_TOUCHSCREEN_WM9705=y ++CONFIG_TOUCHSCREEN_WM9712=y ++CONFIG_TOUCHSCREEN_WM9713=y ++CONFIG_TOUCHSCREEN_USB_COMPOSITE=m ++CONFIG_TOUCHSCREEN_USB_EGALAX=y ++CONFIG_TOUCHSCREEN_USB_PANJIT=y ++CONFIG_TOUCHSCREEN_USB_3M=y ++CONFIG_TOUCHSCREEN_USB_ITM=y ++CONFIG_TOUCHSCREEN_USB_ETURBO=y ++CONFIG_TOUCHSCREEN_USB_GUNZE=y ++CONFIG_TOUCHSCREEN_USB_DMC_TSC10=y ++CONFIG_TOUCHSCREEN_USB_IRTOUCH=y ++CONFIG_TOUCHSCREEN_USB_IDEALTEK=y ++CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH=y ++CONFIG_TOUCHSCREEN_USB_GOTOP=y ++CONFIG_TOUCHSCREEN_USB_JASTEC=y ++CONFIG_TOUCHSCREEN_USB_ELO=y ++CONFIG_TOUCHSCREEN_USB_E2I=y ++CONFIG_TOUCHSCREEN_USB_ZYTRONIC=y ++CONFIG_TOUCHSCREEN_USB_ETT_TC45USB=y ++CONFIG_TOUCHSCREEN_USB_NEXIO=y ++CONFIG_TOUCHSCREEN_USB_EASYTOUCH=y ++CONFIG_TOUCHSCREEN_TOUCHIT213=m ++CONFIG_TOUCHSCREEN_TSC_SERIO=m ++CONFIG_TOUCHSCREEN_TSC200X_CORE=m ++CONFIG_TOUCHSCREEN_TSC2004=m ++CONFIG_TOUCHSCREEN_TSC2005=m ++CONFIG_TOUCHSCREEN_TSC2007=m ++# CONFIG_TOUCHSCREEN_TSC2007_IIO is not set ++CONFIG_TOUCHSCREEN_RM_TS=m ++CONFIG_TOUCHSCREEN_SILEAD=m ++CONFIG_TOUCHSCREEN_SIS_I2C=m ++CONFIG_TOUCHSCREEN_ST1232=m ++CONFIG_TOUCHSCREEN_STMFTS=m ++CONFIG_TOUCHSCREEN_SUR40=m ++CONFIG_TOUCHSCREEN_SURFACE3_SPI=m ++CONFIG_TOUCHSCREEN_SX8654=m ++CONFIG_TOUCHSCREEN_TPS6507X=m ++CONFIG_TOUCHSCREEN_ZET6223=m ++CONFIG_TOUCHSCREEN_ZFORCE=m ++CONFIG_TOUCHSCREEN_ROHM_BU21023=m ++# CONFIG_TOUCHSCREEN_IQS5XX is not set ++CONFIG_INPUT_MISC=y ++# CONFIG_INPUT_AD714X is not set ++# CONFIG_INPUT_BMA150 is not set ++# CONFIG_INPUT_E3X0_BUTTON is not set ++# CONFIG_INPUT_MSM_VIBRATOR is not set ++# CONFIG_INPUT_MMA8450 is not set ++# CONFIG_INPUT_APANEL is not set ++# CONFIG_INPUT_GP2A is not set ++# CONFIG_INPUT_GPIO_BEEPER is not set ++# CONFIG_INPUT_GPIO_DECODER is not set ++# CONFIG_INPUT_GPIO_VIBRA is not set ++# CONFIG_INPUT_ATLAS_BTNS is not set ++# CONFIG_INPUT_ATI_REMOTE2 is not set ++# CONFIG_INPUT_KEYSPAN_REMOTE is not set ++# CONFIG_INPUT_KXTJ9 is not set ++# CONFIG_INPUT_POWERMATE is not set ++# CONFIG_INPUT_YEALINK is not set ++# CONFIG_INPUT_CM109 is not set ++CONFIG_INPUT_UINPUT=m ++# CONFIG_INPUT_PCF8574 is not set ++# CONFIG_INPUT_PWM_BEEPER is not set ++CONFIG_INPUT_PWM_VIBRA=m ++# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set ++# CONFIG_INPUT_ADXL34X is not set ++# CONFIG_INPUT_IMS_PCU is not set ++# CONFIG_INPUT_CMA3000 is not set ++# CONFIG_INPUT_IDEAPAD_SLIDEBAR is not set ++# CONFIG_INPUT_DRV260X_HAPTICS is not set ++# CONFIG_INPUT_DRV2665_HAPTICS is not set ++# CONFIG_INPUT_DRV2667_HAPTICS is not set ++CONFIG_RMI4_CORE=y ++CONFIG_RMI4_I2C=m ++CONFIG_RMI4_SPI=m ++CONFIG_RMI4_SMB=m ++CONFIG_RMI4_F03=y ++CONFIG_RMI4_F03_SERIO=y ++CONFIG_RMI4_2D_SENSOR=y ++CONFIG_RMI4_F11=y ++CONFIG_RMI4_F12=y ++CONFIG_RMI4_F30=y ++# CONFIG_RMI4_F34 is not set ++CONFIG_RMI4_F55=y ++ ++# ++# Hardware I/O ports ++# ++CONFIG_SERIO=y ++CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y ++CONFIG_SERIO_I8042=m ++CONFIG_SERIO_SERPORT=m ++# CONFIG_SERIO_CT82C710 is not set ++# CONFIG_SERIO_PCIPS2 is not set ++CONFIG_SERIO_LIBPS2=m ++CONFIG_SERIO_RAW=m ++# CONFIG_SERIO_ALTERA_PS2 is not set ++# CONFIG_SERIO_PS2MULT is not set ++# CONFIG_SERIO_ARC_PS2 is not set ++CONFIG_SERIO_GPIO_PS2=m ++# CONFIG_USERIO is not set ++# CONFIG_GAMEPORT is not set ++# end of Hardware I/O ports ++# end of Input device support ++ ++# ++# Character devices ++# ++CONFIG_TTY=y ++CONFIG_VT=y ++CONFIG_CONSOLE_TRANSLATIONS=y ++CONFIG_VT_CONSOLE=y ++CONFIG_VT_CONSOLE_SLEEP=y ++CONFIG_HW_CONSOLE=y ++CONFIG_VT_HW_CONSOLE_BINDING=y ++CONFIG_UNIX98_PTYS=y ++# CONFIG_LEGACY_PTYS is not set ++# CONFIG_SERIAL_NONSTANDARD is not set ++# CONFIG_NOZOMI is not set ++# CONFIG_N_GSM is not set ++# CONFIG_TRACE_SINK is not set ++# CONFIG_NULL_TTY is not set ++CONFIG_LDISC_AUTOLOAD=y ++# CONFIG_DEVMEM is not set ++# CONFIG_DEVKMEM is not set ++ ++# ++# Serial drivers ++# ++CONFIG_SERIAL_EARLYCON=y ++CONFIG_SERIAL_8250=y ++# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set ++CONFIG_SERIAL_8250_PNP=y ++CONFIG_SERIAL_8250_FINTEK=y ++CONFIG_SERIAL_8250_CONSOLE=y ++CONFIG_SERIAL_8250_DMA=y ++CONFIG_SERIAL_8250_PCI=y ++CONFIG_SERIAL_8250_EXAR=m ++CONFIG_SERIAL_8250_NR_UARTS=4 ++CONFIG_SERIAL_8250_RUNTIME_UARTS=3 ++CONFIG_SERIAL_8250_EXTENDED=y ++CONFIG_SERIAL_8250_MANY_PORTS=y ++CONFIG_SERIAL_8250_SHARE_IRQ=y ++CONFIG_SERIAL_8250_DETECT_IRQ=y ++CONFIG_SERIAL_8250_RSA=y ++CONFIG_SERIAL_8250_DWLIB=y ++CONFIG_SERIAL_8250_DW=y ++# CONFIG_SERIAL_8250_RT288X is not set ++CONFIG_SERIAL_8250_LPSS=y ++CONFIG_SERIAL_8250_MID=y ++ ++# ++# Non-8250 serial port support ++# ++# CONFIG_SERIAL_MAX3100 is not set ++# CONFIG_SERIAL_MAX310X is not set ++# CONFIG_SERIAL_UARTLITE is not set ++CONFIG_SERIAL_CORE=y ++CONFIG_SERIAL_CORE_CONSOLE=y ++# CONFIG_SERIAL_JSM is not set ++# CONFIG_SERIAL_SCCNXP is not set ++# CONFIG_SERIAL_SC16IS7XX is not set ++# CONFIG_SERIAL_ALTERA_JTAGUART is not set ++# CONFIG_SERIAL_ALTERA_UART is not set ++# CONFIG_SERIAL_IFX6X60 is not set ++# CONFIG_SERIAL_ARC is not set ++# CONFIG_SERIAL_RP2 is not set ++# CONFIG_SERIAL_FSL_LPUART is not set ++# CONFIG_SERIAL_FSL_LINFLEXUART is not set ++# end of Serial drivers ++ ++CONFIG_SERIAL_MCTRL_GPIO=y ++CONFIG_SERIAL_DEV_BUS=m ++CONFIG_TTY_PRINTK=y ++CONFIG_TTY_PRINTK_LEVEL=6 ++CONFIG_HVC_DRIVER=y ++CONFIG_VIRTIO_CONSOLE=y ++CONFIG_IPMI_HANDLER=m ++CONFIG_IPMI_DMI_DECODE=y ++CONFIG_IPMI_PLAT_DATA=y ++# CONFIG_IPMI_PANIC_EVENT is not set ++CONFIG_IPMI_DEVICE_INTERFACE=m ++CONFIG_IPMI_SI=m ++CONFIG_IPMI_SSIF=m ++CONFIG_IPMI_WATCHDOG=m ++CONFIG_IPMI_POWEROFF=m ++# CONFIG_IPMB_DEVICE_INTERFACE is not set ++CONFIG_HW_RANDOM=y ++# CONFIG_HW_RANDOM_TIMERIOMEM is not set ++CONFIG_HW_RANDOM_INTEL=y ++CONFIG_HW_RANDOM_AMD=y ++# CONFIG_HW_RANDOM_VIA is not set ++CONFIG_HW_RANDOM_VIRTIO=y ++CONFIG_NVRAM=m ++# CONFIG_APPLICOM is not set ++# CONFIG_MWAVE is not set ++# CONFIG_RAW_DRIVER is not set ++CONFIG_HPET=y ++CONFIG_HPET_MMAP=y ++CONFIG_HPET_MMAP_DEFAULT=y ++# CONFIG_HANGCHECK_TIMER is not set ++# CONFIG_TCG_TPM is not set ++# CONFIG_TELCLOCK is not set ++CONFIG_DEVPORT=y ++# CONFIG_XILLYBUS is not set ++# end of Character devices ++ ++# CONFIG_RANDOM_TRUST_CPU is not set ++# CONFIG_RANDOM_TRUST_BOOTLOADER is not set ++ ++# ++# I2C support ++# ++CONFIG_I2C=y ++CONFIG_ACPI_I2C_OPREGION=y ++CONFIG_I2C_BOARDINFO=y ++# CONFIG_I2C_COMPAT is not set ++# CONFIG_I2C_CHARDEV is not set ++CONFIG_I2C_MUX=m ++ ++# ++# Multiplexer I2C Chip support ++# ++CONFIG_I2C_MUX_GPIO=m ++CONFIG_I2C_MUX_LTC4306=m ++CONFIG_I2C_MUX_PCA9541=m ++CONFIG_I2C_MUX_PCA954x=m ++CONFIG_I2C_MUX_REG=m ++CONFIG_I2C_MUX_MLXCPLD=m ++# end of Multiplexer I2C Chip support ++ ++CONFIG_I2C_HELPER_AUTO=y ++CONFIG_I2C_SMBUS=m ++CONFIG_I2C_ALGOBIT=y ++ ++# ++# I2C Hardware Bus support ++# ++ ++# ++# PC SMBus host controller drivers ++# ++# CONFIG_I2C_ALI1535 is not set ++# CONFIG_I2C_ALI1563 is not set ++# CONFIG_I2C_ALI15X3 is not set ++CONFIG_I2C_AMD756=m ++CONFIG_I2C_AMD756_S4882=m ++CONFIG_I2C_AMD8111=m ++# CONFIG_I2C_AMD_MP2 is not set ++CONFIG_I2C_I801=m ++CONFIG_I2C_ISCH=m ++CONFIG_I2C_ISMT=m ++CONFIG_I2C_PIIX4=m ++# CONFIG_I2C_NFORCE2 is not set ++# CONFIG_I2C_NVIDIA_GPU is not set ++# CONFIG_I2C_SIS5595 is not set ++# CONFIG_I2C_SIS630 is not set ++# CONFIG_I2C_SIS96X is not set ++# CONFIG_I2C_VIA is not set ++# CONFIG_I2C_VIAPRO is not set ++ ++# ++# ACPI drivers ++# ++CONFIG_I2C_SCMI=y ++ ++# ++# I2C system bus drivers (mostly embedded / system-on-chip) ++# ++# CONFIG_I2C_CBUS_GPIO is not set ++CONFIG_I2C_DESIGNWARE_CORE=y ++CONFIG_I2C_DESIGNWARE_PLATFORM=y ++CONFIG_I2C_DESIGNWARE_SLAVE=y ++CONFIG_I2C_DESIGNWARE_PCI=y ++CONFIG_I2C_DESIGNWARE_BAYTRAIL=y ++# CONFIG_I2C_EMEV2 is not set ++# CONFIG_I2C_GPIO is not set ++# CONFIG_I2C_OCORES is not set ++# CONFIG_I2C_PCA_PLATFORM is not set ++# CONFIG_I2C_SIMTEC is not set ++# CONFIG_I2C_XILINX is not set ++ ++# ++# External I2C/SMBus adapter drivers ++# ++# CONFIG_I2C_DIOLAN_U2C is not set ++# CONFIG_I2C_PARPORT_LIGHT is not set ++# CONFIG_I2C_ROBOTFUZZ_OSIF is not set ++# CONFIG_I2C_TAOS_EVM is not set ++# CONFIG_I2C_TINY_USB is not set ++ ++# ++# Other I2C/SMBus bus drivers ++# ++# CONFIG_I2C_MLXCPLD is not set ++# end of I2C Hardware Bus support ++ ++# CONFIG_I2C_STUB is not set ++CONFIG_I2C_SLAVE=y ++CONFIG_I2C_SLAVE_EEPROM=m ++# CONFIG_I2C_DEBUG_CORE is not set ++# CONFIG_I2C_DEBUG_ALGO is not set ++# CONFIG_I2C_DEBUG_BUS is not set ++# end of I2C support ++ ++# CONFIG_I3C is not set ++CONFIG_SPI=y ++# CONFIG_SPI_DEBUG is not set ++CONFIG_SPI_MASTER=y ++# CONFIG_SPI_MEM is not set ++ ++# ++# SPI Master Controller Drivers ++# ++CONFIG_SPI_ALTERA=m ++# CONFIG_SPI_AXI_SPI_ENGINE is not set ++CONFIG_SPI_BITBANG=m ++CONFIG_SPI_CADENCE=m ++CONFIG_SPI_DESIGNWARE=m ++CONFIG_SPI_DW_PCI=m ++CONFIG_SPI_DW_MID_DMA=y ++CONFIG_SPI_DW_MMIO=m ++# CONFIG_SPI_NXP_FLEXSPI is not set ++# CONFIG_SPI_GPIO is not set ++# CONFIG_SPI_OC_TINY is not set ++CONFIG_SPI_PXA2XX=m ++CONFIG_SPI_PXA2XX_PCI=m ++# CONFIG_SPI_ROCKCHIP is not set ++CONFIG_SPI_SC18IS602=m ++# CONFIG_SPI_SIFIVE is not set ++# CONFIG_SPI_MXIC is not set ++CONFIG_SPI_XCOMM=m ++CONFIG_SPI_XILINX=m ++# CONFIG_SPI_ZYNQMP_GQSPI is not set ++ ++# ++# SPI Protocol Masters ++# ++# CONFIG_SPI_SPIDEV is not set ++# CONFIG_SPI_LOOPBACK_TEST is not set ++# CONFIG_SPI_TLE62X0 is not set ++# CONFIG_SPI_SLAVE is not set ++# CONFIG_SPMI is not set ++# CONFIG_HSI is not set ++CONFIG_PPS=y ++# CONFIG_PPS_DEBUG is not set ++ ++# ++# PPS clients support ++# ++# CONFIG_PPS_CLIENT_KTIMER is not set ++# CONFIG_PPS_CLIENT_LDISC is not set ++# CONFIG_PPS_CLIENT_GPIO is not set ++ ++# ++# PPS generators support ++# ++ ++# ++# PTP clock support ++# ++CONFIG_PTP_1588_CLOCK=y ++# CONFIG_DP83640_PHY is not set ++# end of PTP clock support ++ ++CONFIG_PINCTRL=y ++CONFIG_PINMUX=y ++CONFIG_PINCONF=y ++CONFIG_GENERIC_PINCONF=y ++# CONFIG_DEBUG_PINCTRL is not set ++CONFIG_PINCTRL_AMD=m ++# CONFIG_PINCTRL_MCP23S08 is not set ++# CONFIG_PINCTRL_SX150X is not set ++CONFIG_PINCTRL_BAYTRAIL=y ++CONFIG_PINCTRL_CHERRYVIEW=m ++CONFIG_PINCTRL_INTEL=m ++CONFIG_PINCTRL_BROXTON=m ++CONFIG_PINCTRL_CANNONLAKE=m ++CONFIG_PINCTRL_CEDARFORK=m ++CONFIG_PINCTRL_DENVERTON=m ++CONFIG_PINCTRL_GEMINILAKE=m ++# CONFIG_PINCTRL_ICELAKE is not set ++CONFIG_PINCTRL_LEWISBURG=m ++CONFIG_PINCTRL_SUNRISEPOINT=m ++CONFIG_GPIOLIB=y ++CONFIG_GPIOLIB_FASTPATH_LIMIT=512 ++CONFIG_GPIO_ACPI=y ++CONFIG_GPIOLIB_IRQCHIP=y ++# CONFIG_DEBUG_GPIO is not set ++# CONFIG_GPIO_SYSFS is not set ++ ++# ++# Memory mapped GPIO drivers ++# ++# CONFIG_GPIO_AMDPT is not set ++# CONFIG_GPIO_DWAPB is not set ++# CONFIG_GPIO_EXAR is not set ++# CONFIG_GPIO_GENERIC_PLATFORM is not set ++CONFIG_GPIO_ICH=m ++CONFIG_GPIO_LYNXPOINT=m ++# CONFIG_GPIO_MB86S7X is not set ++# CONFIG_GPIO_VX855 is not set ++# CONFIG_GPIO_XILINX is not set ++# CONFIG_GPIO_AMD_FCH is not set ++# end of Memory mapped GPIO drivers ++ ++# ++# Port-mapped I/O GPIO drivers ++# ++# CONFIG_GPIO_F7188X is not set ++# CONFIG_GPIO_IT87 is not set ++# CONFIG_GPIO_SCH is not set ++# CONFIG_GPIO_SCH311X is not set ++# CONFIG_GPIO_WINBOND is not set ++# CONFIG_GPIO_WS16C48 is not set ++# end of Port-mapped I/O GPIO drivers ++ ++# ++# I2C GPIO expanders ++# ++# CONFIG_GPIO_ADP5588 is not set ++# CONFIG_GPIO_MAX7300 is not set ++# CONFIG_GPIO_MAX732X is not set ++# CONFIG_GPIO_PCA953X is not set ++# CONFIG_GPIO_PCF857X is not set ++# CONFIG_GPIO_TPIC2810 is not set ++# end of I2C GPIO expanders ++ ++# ++# MFD GPIO expanders ++# ++# CONFIG_GPIO_BD9571MWV is not set ++CONFIG_GPIO_CRYSTAL_COVE=m ++CONFIG_GPIO_WHISKEY_COVE=m ++# end of MFD GPIO expanders ++ ++# ++# PCI GPIO expanders ++# ++# CONFIG_GPIO_AMD8111 is not set ++# CONFIG_GPIO_BT8XX is not set ++# CONFIG_GPIO_ML_IOH is not set ++# CONFIG_GPIO_PCI_IDIO_16 is not set ++# CONFIG_GPIO_PCIE_IDIO_24 is not set ++# CONFIG_GPIO_RDC321X is not set ++# end of PCI GPIO expanders ++ ++# ++# SPI GPIO expanders ++# ++# CONFIG_GPIO_MAX3191X is not set ++# CONFIG_GPIO_MAX7301 is not set ++# CONFIG_GPIO_MC33880 is not set ++# CONFIG_GPIO_PISOSR is not set ++# CONFIG_GPIO_XRA1403 is not set ++# end of SPI GPIO expanders ++ ++# ++# USB GPIO expanders ++# ++# end of USB GPIO expanders ++ ++# CONFIG_GPIO_MOCKUP is not set ++# CONFIG_W1 is not set ++# CONFIG_POWER_AVS is not set ++# CONFIG_POWER_RESET is not set ++CONFIG_POWER_SUPPLY=y ++# CONFIG_POWER_SUPPLY_DEBUG is not set ++CONFIG_POWER_SUPPLY_HWMON=y ++# CONFIG_PDA_POWER is not set ++# CONFIG_GENERIC_ADC_BATTERY is not set ++# CONFIG_TEST_POWER is not set ++# CONFIG_CHARGER_ADP5061 is not set ++# CONFIG_BATTERY_DS2780 is not set ++# CONFIG_BATTERY_DS2781 is not set ++# CONFIG_BATTERY_DS2782 is not set ++# CONFIG_BATTERY_SBS is not set ++# CONFIG_CHARGER_SBS is not set ++CONFIG_MANAGER_SBS=m ++# CONFIG_BATTERY_BQ27XXX is not set ++# CONFIG_BATTERY_MAX17040 is not set ++# CONFIG_BATTERY_MAX17042 is not set ++# CONFIG_CHARGER_ISP1704 is not set ++# CONFIG_CHARGER_MAX8903 is not set ++# CONFIG_CHARGER_LP8727 is not set ++# CONFIG_CHARGER_GPIO is not set ++# CONFIG_CHARGER_LT3651 is not set ++# CONFIG_CHARGER_BQ2415X is not set ++# CONFIG_CHARGER_BQ24190 is not set ++# CONFIG_CHARGER_BQ24257 is not set ++# CONFIG_CHARGER_BQ24735 is not set ++# CONFIG_CHARGER_BQ25890 is not set ++# CONFIG_CHARGER_SMB347 is not set ++# CONFIG_BATTERY_GAUGE_LTC2941 is not set ++# CONFIG_CHARGER_RT9455 is not set ++CONFIG_HWMON=y ++# CONFIG_HWMON_DEBUG_CHIP is not set ++ ++# ++# Native drivers ++# ++# CONFIG_SENSORS_ABITUGURU is not set ++# CONFIG_SENSORS_ABITUGURU3 is not set ++CONFIG_SENSORS_AD7314=m ++# CONFIG_SENSORS_AD7414 is not set ++# CONFIG_SENSORS_AD7418 is not set ++# CONFIG_SENSORS_ADM1021 is not set ++# CONFIG_SENSORS_ADM1025 is not set ++# CONFIG_SENSORS_ADM1026 is not set ++# CONFIG_SENSORS_ADM1029 is not set ++# CONFIG_SENSORS_ADM1031 is not set ++# CONFIG_SENSORS_ADM9240 is not set ++# CONFIG_SENSORS_ADT7310 is not set ++# CONFIG_SENSORS_ADT7410 is not set ++# CONFIG_SENSORS_ADT7411 is not set ++# CONFIG_SENSORS_ADT7462 is not set ++# CONFIG_SENSORS_ADT7470 is not set ++# CONFIG_SENSORS_ADT7475 is not set ++# CONFIG_SENSORS_AS370 is not set ++# CONFIG_SENSORS_ASC7621 is not set ++# CONFIG_SENSORS_K8TEMP is not set ++# CONFIG_SENSORS_K10TEMP is not set ++CONFIG_SENSORS_FAM15H_POWER=m ++CONFIG_SENSORS_APPLESMC=m ++# CONFIG_SENSORS_ASB100 is not set ++# CONFIG_SENSORS_ASPEED is not set ++# CONFIG_SENSORS_ATXP1 is not set ++# CONFIG_SENSORS_DS620 is not set ++# CONFIG_SENSORS_DS1621 is not set ++# CONFIG_SENSORS_DELL_SMM is not set ++# CONFIG_SENSORS_I5K_AMB is not set ++# CONFIG_SENSORS_F71805F is not set ++# CONFIG_SENSORS_F71882FG is not set ++# CONFIG_SENSORS_F75375S is not set ++# CONFIG_SENSORS_FSCHMD is not set ++# CONFIG_SENSORS_GL518SM is not set ++# CONFIG_SENSORS_GL520SM is not set ++# CONFIG_SENSORS_G760A is not set ++# CONFIG_SENSORS_G762 is not set ++# CONFIG_SENSORS_HIH6130 is not set ++# CONFIG_SENSORS_IBMAEM is not set ++# CONFIG_SENSORS_IBMPEX is not set ++# CONFIG_SENSORS_IIO_HWMON is not set ++CONFIG_SENSORS_I5500=m ++CONFIG_SENSORS_CORETEMP=y ++# CONFIG_SENSORS_IT87 is not set ++# CONFIG_SENSORS_JC42 is not set ++# CONFIG_SENSORS_POWR1220 is not set ++# CONFIG_SENSORS_LINEAGE is not set ++# CONFIG_SENSORS_LTC2945 is not set ++# CONFIG_SENSORS_LTC2990 is not set ++# CONFIG_SENSORS_LTC4151 is not set ++# CONFIG_SENSORS_LTC4215 is not set ++# CONFIG_SENSORS_LTC4222 is not set ++# CONFIG_SENSORS_LTC4245 is not set ++# CONFIG_SENSORS_LTC4260 is not set ++# CONFIG_SENSORS_LTC4261 is not set ++# CONFIG_SENSORS_MAX1111 is not set ++# CONFIG_SENSORS_MAX16065 is not set ++# CONFIG_SENSORS_MAX1619 is not set ++# CONFIG_SENSORS_MAX1668 is not set ++# CONFIG_SENSORS_MAX197 is not set ++# CONFIG_SENSORS_MAX31722 is not set ++# CONFIG_SENSORS_MAX6621 is not set ++# CONFIG_SENSORS_MAX6639 is not set ++# CONFIG_SENSORS_MAX6642 is not set ++# CONFIG_SENSORS_MAX6650 is not set ++# CONFIG_SENSORS_MAX6697 is not set ++# CONFIG_SENSORS_MAX31790 is not set ++# CONFIG_SENSORS_MCP3021 is not set ++# CONFIG_SENSORS_MLXREG_FAN is not set ++# CONFIG_SENSORS_TC654 is not set ++# CONFIG_SENSORS_ADCXX is not set ++# CONFIG_SENSORS_LM63 is not set ++# CONFIG_SENSORS_LM70 is not set ++# CONFIG_SENSORS_LM73 is not set ++# CONFIG_SENSORS_LM75 is not set ++# CONFIG_SENSORS_LM77 is not set ++# CONFIG_SENSORS_LM78 is not set ++# CONFIG_SENSORS_LM80 is not set ++# CONFIG_SENSORS_LM83 is not set ++# CONFIG_SENSORS_LM85 is not set ++# CONFIG_SENSORS_LM87 is not set ++# CONFIG_SENSORS_LM90 is not set ++# CONFIG_SENSORS_LM92 is not set ++# CONFIG_SENSORS_LM93 is not set ++# CONFIG_SENSORS_LM95234 is not set ++# CONFIG_SENSORS_LM95241 is not set ++# CONFIG_SENSORS_LM95245 is not set ++# CONFIG_SENSORS_PC87360 is not set ++# CONFIG_SENSORS_PC87427 is not set ++# CONFIG_SENSORS_NTC_THERMISTOR is not set ++# CONFIG_SENSORS_NCT6683 is not set ++# CONFIG_SENSORS_NCT6775 is not set ++# CONFIG_SENSORS_NCT7802 is not set ++# CONFIG_SENSORS_NCT7904 is not set ++# CONFIG_SENSORS_NPCM7XX is not set ++# CONFIG_SENSORS_PCF8591 is not set ++# CONFIG_PMBUS is not set ++# CONFIG_SENSORS_SHT15 is not set ++# CONFIG_SENSORS_SHT21 is not set ++# CONFIG_SENSORS_SHT3x is not set ++# CONFIG_SENSORS_SHTC1 is not set ++# CONFIG_SENSORS_SIS5595 is not set ++# CONFIG_SENSORS_DME1737 is not set ++# CONFIG_SENSORS_EMC1403 is not set ++# CONFIG_SENSORS_EMC2103 is not set ++# CONFIG_SENSORS_EMC6W201 is not set ++# CONFIG_SENSORS_SMSC47M1 is not set ++# CONFIG_SENSORS_SMSC47M192 is not set ++# CONFIG_SENSORS_SMSC47B397 is not set ++# CONFIG_SENSORS_STTS751 is not set ++# CONFIG_SENSORS_SMM665 is not set ++# CONFIG_SENSORS_ADC128D818 is not set ++# CONFIG_SENSORS_ADS7828 is not set ++# CONFIG_SENSORS_ADS7871 is not set ++# CONFIG_SENSORS_AMC6821 is not set ++# CONFIG_SENSORS_INA209 is not set ++# CONFIG_SENSORS_INA2XX is not set ++# CONFIG_SENSORS_INA3221 is not set ++# CONFIG_SENSORS_TC74 is not set ++# CONFIG_SENSORS_THMC50 is not set ++# CONFIG_SENSORS_TMP102 is not set ++# CONFIG_SENSORS_TMP103 is not set ++# CONFIG_SENSORS_TMP108 is not set ++# CONFIG_SENSORS_TMP401 is not set ++# CONFIG_SENSORS_TMP421 is not set ++# CONFIG_SENSORS_VIA_CPUTEMP is not set ++# CONFIG_SENSORS_VIA686A is not set ++# CONFIG_SENSORS_VT1211 is not set ++# CONFIG_SENSORS_VT8231 is not set ++# CONFIG_SENSORS_W83773G is not set ++# CONFIG_SENSORS_W83781D is not set ++# CONFIG_SENSORS_W83791D is not set ++# CONFIG_SENSORS_W83792D is not set ++# CONFIG_SENSORS_W83793 is not set ++# CONFIG_SENSORS_W83795 is not set ++# CONFIG_SENSORS_W83L785TS is not set ++# CONFIG_SENSORS_W83L786NG is not set ++# CONFIG_SENSORS_W83627HF is not set ++# CONFIG_SENSORS_W83627EHF is not set ++# CONFIG_SENSORS_XGENE is not set ++ ++# ++# ACPI drivers ++# ++CONFIG_SENSORS_ACPI_POWER=y ++# CONFIG_SENSORS_ATK0110 is not set ++CONFIG_THERMAL=y ++CONFIG_THERMAL_STATISTICS=y ++CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=100 ++CONFIG_THERMAL_HWMON=y ++CONFIG_THERMAL_WRITABLE_TRIPS=y ++CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y ++# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set ++# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set ++# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set ++CONFIG_THERMAL_GOV_FAIR_SHARE=y ++CONFIG_THERMAL_GOV_STEP_WISE=y ++CONFIG_THERMAL_GOV_BANG_BANG=y ++CONFIG_THERMAL_GOV_USER_SPACE=y ++# CONFIG_THERMAL_GOV_POWER_ALLOCATOR is not set ++# CONFIG_CLOCK_THERMAL is not set ++# CONFIG_DEVFREQ_THERMAL is not set ++CONFIG_THERMAL_EMULATION=y ++ ++# ++# Intel thermal drivers ++# ++CONFIG_INTEL_POWERCLAMP=y ++CONFIG_X86_PKG_TEMP_THERMAL=y ++CONFIG_INTEL_SOC_DTS_IOSF_CORE=m ++CONFIG_INTEL_SOC_DTS_THERMAL=m ++ ++# ++# ACPI INT340X thermal drivers ++# ++CONFIG_INT340X_THERMAL=m ++CONFIG_ACPI_THERMAL_REL=m ++CONFIG_INT3406_THERMAL=m ++CONFIG_PROC_THERMAL_MMIO_RAPL=y ++# end of ACPI INT340X thermal drivers ++ ++CONFIG_INTEL_BXT_PMIC_THERMAL=m ++CONFIG_INTEL_PCH_THERMAL=m ++# end of Intel thermal drivers ++ ++# CONFIG_GENERIC_ADC_THERMAL is not set ++# CONFIG_WATCHDOG is not set ++CONFIG_SSB_POSSIBLE=y ++CONFIG_SSB=m ++CONFIG_SSB_SPROM=y ++CONFIG_SSB_BLOCKIO=y ++CONFIG_SSB_PCIHOST_POSSIBLE=y ++CONFIG_SSB_PCIHOST=y ++CONFIG_SSB_B43_PCI_BRIDGE=y ++CONFIG_SSB_SDIOHOST_POSSIBLE=y ++# CONFIG_SSB_SDIOHOST is not set ++CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y ++CONFIG_SSB_DRIVER_PCICORE=y ++# CONFIG_SSB_DRIVER_GPIO is not set ++CONFIG_BCMA_POSSIBLE=y ++CONFIG_BCMA=m ++CONFIG_BCMA_BLOCKIO=y ++CONFIG_BCMA_HOST_PCI_POSSIBLE=y ++CONFIG_BCMA_HOST_PCI=y ++# CONFIG_BCMA_HOST_SOC is not set ++CONFIG_BCMA_DRIVER_PCI=y ++# CONFIG_BCMA_DRIVER_GMAC_CMN is not set ++# CONFIG_BCMA_DRIVER_GPIO is not set ++# CONFIG_BCMA_DEBUG is not set ++ ++# ++# Multifunction device drivers ++# ++CONFIG_MFD_CORE=y ++# CONFIG_MFD_AS3711 is not set ++# CONFIG_PMIC_ADP5520 is not set ++# CONFIG_MFD_AAT2870_CORE is not set ++CONFIG_MFD_BCM590XX=m ++CONFIG_MFD_BD9571MWV=m ++# CONFIG_MFD_AXP20X_I2C is not set ++# CONFIG_MFD_MADERA is not set ++# CONFIG_PMIC_DA903X is not set ++# CONFIG_MFD_DA9052_SPI is not set ++# CONFIG_MFD_DA9052_I2C is not set ++# CONFIG_MFD_DA9055 is not set ++# CONFIG_MFD_DA9062 is not set ++# CONFIG_MFD_DA9063 is not set ++# CONFIG_MFD_DA9150 is not set ++# CONFIG_MFD_DLN2 is not set ++# CONFIG_MFD_MC13XXX_SPI is not set ++# CONFIG_MFD_MC13XXX_I2C is not set ++# CONFIG_HTC_PASIC3 is not set ++# CONFIG_HTC_I2CPLD is not set ++# CONFIG_MFD_INTEL_QUARK_I2C_GPIO is not set ++CONFIG_LPC_ICH=m ++CONFIG_LPC_SCH=m ++CONFIG_INTEL_SOC_PMIC=y ++CONFIG_INTEL_SOC_PMIC_BXTWC=m ++# CONFIG_INTEL_SOC_PMIC_CHTWC is not set ++CONFIG_INTEL_SOC_PMIC_CHTDC_TI=m ++CONFIG_MFD_INTEL_LPSS=y ++CONFIG_MFD_INTEL_LPSS_ACPI=y ++CONFIG_MFD_INTEL_LPSS_PCI=y ++# CONFIG_MFD_JANZ_CMODIO is not set ++# CONFIG_MFD_KEMPLD is not set ++# CONFIG_MFD_88PM800 is not set ++# CONFIG_MFD_88PM805 is not set ++# CONFIG_MFD_88PM860X is not set ++# CONFIG_MFD_MAX14577 is not set ++# CONFIG_MFD_MAX77693 is not set ++# CONFIG_MFD_MAX77843 is not set ++# CONFIG_MFD_MAX8907 is not set ++# CONFIG_MFD_MAX8925 is not set ++# CONFIG_MFD_MAX8997 is not set ++# CONFIG_MFD_MAX8998 is not set ++# CONFIG_MFD_MT6397 is not set ++# CONFIG_MFD_MENF21BMC is not set ++# CONFIG_EZX_PCAP is not set ++# CONFIG_MFD_VIPERBOARD is not set ++# CONFIG_MFD_RETU is not set ++# CONFIG_MFD_PCF50633 is not set ++# CONFIG_UCB1400_CORE is not set ++# CONFIG_MFD_RDC321X is not set ++# CONFIG_MFD_RT5033 is not set ++# CONFIG_MFD_RC5T583 is not set ++# CONFIG_MFD_SEC_CORE is not set ++# CONFIG_MFD_SI476X_CORE is not set ++# CONFIG_MFD_SM501 is not set ++# CONFIG_MFD_SKY81452 is not set ++# CONFIG_MFD_SMSC is not set ++# CONFIG_ABX500_CORE is not set ++# CONFIG_MFD_SYSCON is not set ++# CONFIG_MFD_TI_AM335X_TSCADC is not set ++# CONFIG_MFD_LP3943 is not set ++# CONFIG_MFD_LP8788 is not set ++# CONFIG_MFD_TI_LMU is not set ++# CONFIG_MFD_PALMAS is not set ++# CONFIG_TPS6105X is not set ++# CONFIG_TPS65010 is not set ++# CONFIG_TPS6507X is not set ++# CONFIG_MFD_TPS65086 is not set ++# CONFIG_MFD_TPS65090 is not set ++# CONFIG_MFD_TPS68470 is not set ++# CONFIG_MFD_TI_LP873X is not set ++# CONFIG_MFD_TPS6586X is not set ++# CONFIG_MFD_TPS65910 is not set ++# CONFIG_MFD_TPS65912_I2C is not set ++# CONFIG_MFD_TPS65912_SPI is not set ++# CONFIG_MFD_TPS80031 is not set ++# CONFIG_TWL4030_CORE is not set ++# CONFIG_TWL6040_CORE is not set ++# CONFIG_MFD_WL1273_CORE is not set ++# CONFIG_MFD_LM3533 is not set ++# CONFIG_MFD_TQMX86 is not set ++# CONFIG_MFD_VX855 is not set ++# CONFIG_MFD_ARIZONA_I2C is not set ++# CONFIG_MFD_ARIZONA_SPI is not set ++# CONFIG_MFD_WM8400 is not set ++# CONFIG_MFD_WM831X_I2C is not set ++# CONFIG_MFD_WM831X_SPI is not set ++# CONFIG_MFD_WM8350_I2C is not set ++# CONFIG_MFD_WM8994 is not set ++# CONFIG_RAVE_SP_CORE is not set ++# end of Multifunction device drivers ++ ++# CONFIG_REGULATOR is not set ++# CONFIG_RC_CORE is not set ++CONFIG_MEDIA_SUPPORT=m ++ ++# ++# Multimedia core support ++# ++CONFIG_MEDIA_CAMERA_SUPPORT=y ++# CONFIG_MEDIA_ANALOG_TV_SUPPORT is not set ++# CONFIG_MEDIA_DIGITAL_TV_SUPPORT is not set ++# CONFIG_MEDIA_RADIO_SUPPORT is not set ++# CONFIG_MEDIA_SDR_SUPPORT is not set ++# CONFIG_MEDIA_CEC_SUPPORT is not set ++# CONFIG_MEDIA_CONTROLLER is not set ++CONFIG_VIDEO_DEV=m ++CONFIG_VIDEO_V4L2=m ++CONFIG_VIDEO_V4L2_I2C=y ++# CONFIG_VIDEO_ADV_DEBUG is not set ++# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set ++ ++# ++# Media drivers ++# ++CONFIG_MEDIA_USB_SUPPORT=y ++ ++# ++# Webcam devices ++# ++CONFIG_USB_VIDEO_CLASS=m ++CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y ++# CONFIG_USB_GSPCA is not set ++# CONFIG_USB_PWC is not set ++# CONFIG_VIDEO_CPIA2 is not set ++# CONFIG_USB_ZR364XX is not set ++# CONFIG_USB_STKWEBCAM is not set ++# CONFIG_USB_S2255 is not set ++# CONFIG_VIDEO_USBTV is not set ++ ++# ++# Webcam, TV (analog/digital) USB devices ++# ++# CONFIG_VIDEO_EM28XX is not set ++# CONFIG_MEDIA_PCI_SUPPORT is not set ++# CONFIG_V4L_PLATFORM_DRIVERS is not set ++# CONFIG_V4L_MEM2MEM_DRIVERS is not set ++# CONFIG_V4L_TEST_DRIVERS is not set ++ ++# ++# Supported MMC/SDIO adapters ++# ++# CONFIG_CYPRESS_FIRMWARE is not set ++CONFIG_VIDEOBUF2_CORE=m ++CONFIG_VIDEOBUF2_V4L2=m ++CONFIG_VIDEOBUF2_MEMOPS=m ++CONFIG_VIDEOBUF2_VMALLOC=m ++CONFIG_VIDEOBUF2_DMA_SG=m ++ ++# ++# Media ancillary drivers (tuners, sensors, i2c, spi, frontends) ++# ++CONFIG_MEDIA_SUBDRV_AUTOSELECT=y ++ ++# ++# I2C Encoders, decoders, sensors and other helper chips ++# ++ ++# ++# Audio decoders, processors and mixers ++# ++# CONFIG_VIDEO_TVAUDIO is not set ++# CONFIG_VIDEO_TDA7432 is not set ++# CONFIG_VIDEO_TDA9840 is not set ++# CONFIG_VIDEO_TEA6415C is not set ++# CONFIG_VIDEO_TEA6420 is not set ++# CONFIG_VIDEO_MSP3400 is not set ++# CONFIG_VIDEO_CS3308 is not set ++# CONFIG_VIDEO_CS5345 is not set ++# CONFIG_VIDEO_CS53L32A is not set ++# CONFIG_VIDEO_TLV320AIC23B is not set ++# CONFIG_VIDEO_UDA1342 is not set ++# CONFIG_VIDEO_WM8775 is not set ++# CONFIG_VIDEO_WM8739 is not set ++# CONFIG_VIDEO_VP27SMPX is not set ++# CONFIG_VIDEO_SONY_BTF_MPX is not set ++ ++# ++# RDS decoders ++# ++# CONFIG_VIDEO_SAA6588 is not set ++ ++# ++# Video decoders ++# ++# CONFIG_VIDEO_ADV7183 is not set ++# CONFIG_VIDEO_BT819 is not set ++# CONFIG_VIDEO_BT856 is not set ++# CONFIG_VIDEO_BT866 is not set ++# CONFIG_VIDEO_KS0127 is not set ++# CONFIG_VIDEO_ML86V7667 is not set ++# CONFIG_VIDEO_SAA7110 is not set ++# CONFIG_VIDEO_SAA711X is not set ++# CONFIG_VIDEO_TVP514X is not set ++# CONFIG_VIDEO_TVP5150 is not set ++# CONFIG_VIDEO_TVP7002 is not set ++# CONFIG_VIDEO_TW2804 is not set ++# CONFIG_VIDEO_TW9903 is not set ++# CONFIG_VIDEO_TW9906 is not set ++# CONFIG_VIDEO_TW9910 is not set ++# CONFIG_VIDEO_VPX3220 is not set ++ ++# ++# Video and audio decoders ++# ++# CONFIG_VIDEO_SAA717X is not set ++# CONFIG_VIDEO_CX25840 is not set ++ ++# ++# Video encoders ++# ++# CONFIG_VIDEO_SAA7127 is not set ++# CONFIG_VIDEO_SAA7185 is not set ++# CONFIG_VIDEO_ADV7170 is not set ++# CONFIG_VIDEO_ADV7175 is not set ++# CONFIG_VIDEO_ADV7343 is not set ++# CONFIG_VIDEO_ADV7393 is not set ++# CONFIG_VIDEO_AK881X is not set ++# CONFIG_VIDEO_THS8200 is not set ++ ++# ++# Camera sensor devices ++# ++# CONFIG_VIDEO_OV2640 is not set ++# CONFIG_VIDEO_OV2659 is not set ++# CONFIG_VIDEO_OV6650 is not set ++# CONFIG_VIDEO_OV5695 is not set ++# CONFIG_VIDEO_OV772X is not set ++# CONFIG_VIDEO_OV7640 is not set ++# CONFIG_VIDEO_OV7670 is not set ++# CONFIG_VIDEO_OV7740 is not set ++# CONFIG_VIDEO_OV9640 is not set ++# CONFIG_VIDEO_VS6624 is not set ++# CONFIG_VIDEO_MT9M111 is not set ++# CONFIG_VIDEO_MT9T112 is not set ++# CONFIG_VIDEO_MT9V011 is not set ++# CONFIG_VIDEO_MT9V111 is not set ++# CONFIG_VIDEO_SR030PC30 is not set ++# CONFIG_VIDEO_RJ54N1 is not set ++ ++# ++# Lens drivers ++# ++ ++# ++# Flash devices ++# ++ ++# ++# Video improvement chips ++# ++# CONFIG_VIDEO_UPD64031A is not set ++# CONFIG_VIDEO_UPD64083 is not set ++ ++# ++# Audio/Video compression chips ++# ++# CONFIG_VIDEO_SAA6752HS is not set ++ ++# ++# SDR tuner chips ++# ++ ++# ++# Miscellaneous helper chips ++# ++# CONFIG_VIDEO_THS7303 is not set ++# CONFIG_VIDEO_M52790 is not set ++# CONFIG_VIDEO_I2C is not set ++# end of I2C Encoders, decoders, sensors and other helper chips ++ ++# ++# SPI helper chips ++# ++# end of SPI helper chips ++ ++# ++# Media SPI Adapters ++# ++# end of Media SPI Adapters ++ ++# ++# Customise DVB Frontends ++# ++ ++# ++# Tools to develop new frontends ++# ++# end of Customise DVB Frontends ++ ++# ++# Graphics support ++# ++# CONFIG_AGP is not set ++CONFIG_INTEL_GTT=y ++CONFIG_VGA_ARB=y ++CONFIG_VGA_ARB_MAX_GPUS=10 ++# CONFIG_VGA_SWITCHEROO is not set ++CONFIG_DRM=y ++CONFIG_DRM_MIPI_DSI=y ++# CONFIG_DRM_DP_AUX_CHARDEV is not set ++# CONFIG_DRM_DEBUG_MM is not set ++# CONFIG_DRM_DEBUG_SELFTEST is not set ++CONFIG_DRM_KMS_HELPER=y ++CONFIG_DRM_KMS_FB_HELPER=y ++CONFIG_DRM_FBDEV_EMULATION=y ++CONFIG_DRM_FBDEV_OVERALLOC=100 ++# CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM is not set ++# CONFIG_DRM_LOAD_EDID_FIRMWARE is not set ++# CONFIG_DRM_DP_CEC is not set ++CONFIG_DRM_TTM=y ++CONFIG_DRM_VRAM_HELPER=y ++CONFIG_DRM_GEM_SHMEM_HELPER=y ++CONFIG_DRM_VM=y ++CONFIG_DRM_SCHED=m ++ ++# ++# I2C encoder or helper chips ++# ++# CONFIG_DRM_I2C_CH7006 is not set ++# CONFIG_DRM_I2C_SIL164 is not set ++# CONFIG_DRM_I2C_NXP_TDA998X is not set ++# CONFIG_DRM_I2C_NXP_TDA9950 is not set ++# end of I2C encoder or helper chips ++ ++# ++# ARM devices ++# ++# end of ARM devices ++ ++CONFIG_DRM_RADEON=m ++# CONFIG_DRM_RADEON_USERPTR is not set ++CONFIG_DRM_AMDGPU=m ++# CONFIG_DRM_AMDGPU_SI is not set ++# CONFIG_DRM_AMDGPU_CIK is not set ++# CONFIG_DRM_AMDGPU_USERPTR is not set ++# CONFIG_DRM_AMDGPU_GART_DEBUGFS is not set ++ ++# ++# ACP (Audio CoProcessor) Configuration ++# ++# CONFIG_DRM_AMD_ACP is not set ++# end of ACP (Audio CoProcessor) Configuration ++ ++# ++# Display Engine Configuration ++# ++CONFIG_DRM_AMD_DC=y ++CONFIG_DRM_AMD_DC_DCN1_0=y ++CONFIG_DRM_AMD_DC_DCN2_0=y ++# CONFIG_DRM_AMD_DC_DCN2_1 is not set ++CONFIG_DRM_AMD_DC_DSC_SUPPORT=y ++# CONFIG_DEBUG_KERNEL_DC is not set ++# end of Display Engine Configuration ++ ++# CONFIG_HSA_AMD is not set ++CONFIG_DRM_NOUVEAU=m ++CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT=y ++CONFIG_NOUVEAU_DEBUG=5 ++CONFIG_NOUVEAU_DEBUG_DEFAULT=3 ++# CONFIG_NOUVEAU_DEBUG_MMU is not set ++CONFIG_DRM_NOUVEAU_BACKLIGHT=y ++CONFIG_DRM_I915=y ++# CONFIG_DRM_I915_ALPHA_SUPPORT is not set ++CONFIG_DRM_I915_FORCE_PROBE="" ++CONFIG_DRM_I915_CAPTURE_ERROR=y ++CONFIG_DRM_I915_COMPRESS_ERROR=y ++CONFIG_DRM_I915_USERPTR=y ++CONFIG_DRM_I915_GVT=y ++ ++# ++# drm/i915 Debugging ++# ++# CONFIG_DRM_I915_WERROR is not set ++# CONFIG_DRM_I915_DEBUG is not set ++# CONFIG_DRM_I915_DEBUG_MMIO is not set ++# CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS is not set ++# CONFIG_DRM_I915_SW_FENCE_CHECK_DAG is not set ++# CONFIG_DRM_I915_DEBUG_GUC is not set ++# CONFIG_DRM_I915_SELFTEST is not set ++# CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS is not set ++# CONFIG_DRM_I915_DEBUG_VBLANK_EVADE is not set ++# CONFIG_DRM_I915_DEBUG_RUNTIME_PM is not set ++# end of drm/i915 Debugging ++ ++# ++# drm/i915 Profile Guided Optimisation ++# ++CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND=250 ++CONFIG_DRM_I915_SPIN_REQUEST=5 ++# end of drm/i915 Profile Guided Optimisation ++ ++# CONFIG_DRM_VGEM is not set ++# CONFIG_DRM_VKMS is not set ++CONFIG_DRM_ATI_PCIGART=y ++CONFIG_DRM_VMWGFX=m ++CONFIG_DRM_VMWGFX_FBCON=y ++# CONFIG_DRM_GMA500 is not set ++# CONFIG_DRM_UDL is not set ++# CONFIG_DRM_AST is not set ++# CONFIG_DRM_MGAG200 is not set ++CONFIG_DRM_CIRRUS_QEMU=y ++CONFIG_DRM_QXL=m ++CONFIG_DRM_BOCHS=y ++CONFIG_DRM_VIRTIO_GPU=y ++CONFIG_DRM_PANEL=y ++ ++# ++# Display Panels ++# ++# CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN is not set ++# end of Display Panels ++ ++CONFIG_DRM_BRIDGE=y ++CONFIG_DRM_PANEL_BRIDGE=y ++ ++# ++# Display Interface Bridges ++# ++# CONFIG_DRM_ANALOGIX_ANX78XX is not set ++# end of Display Interface Bridges ++ ++# CONFIG_DRM_ETNAVIV is not set ++# CONFIG_DRM_GM12U320 is not set ++# CONFIG_TINYDRM_HX8357D is not set ++# CONFIG_TINYDRM_ILI9225 is not set ++# CONFIG_TINYDRM_ILI9341 is not set ++# CONFIG_TINYDRM_MI0283QT is not set ++# CONFIG_TINYDRM_REPAPER is not set ++# CONFIG_TINYDRM_ST7586 is not set ++# CONFIG_TINYDRM_ST7735R is not set ++CONFIG_DRM_VBOXVIDEO=m ++CONFIG_DRM_LEGACY=y ++# CONFIG_DRM_TDFX is not set ++# CONFIG_DRM_R128 is not set ++# CONFIG_DRM_MGA is not set ++# CONFIG_DRM_VIA is not set ++# CONFIG_DRM_SAVAGE is not set ++CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y ++ ++# ++# Frame buffer Devices ++# ++CONFIG_FB_CMDLINE=y ++CONFIG_FB_NOTIFY=y ++CONFIG_FB=y ++CONFIG_FIRMWARE_EDID=y ++CONFIG_FB_DDC=m ++CONFIG_FB_CFB_FILLRECT=y ++CONFIG_FB_CFB_COPYAREA=y ++CONFIG_FB_CFB_IMAGEBLIT=y ++CONFIG_FB_SYS_FILLRECT=y ++CONFIG_FB_SYS_COPYAREA=y ++CONFIG_FB_SYS_IMAGEBLIT=y ++# CONFIG_FB_FOREIGN_ENDIAN is not set ++CONFIG_FB_SYS_FOPS=y ++CONFIG_FB_DEFERRED_IO=y ++CONFIG_FB_BACKLIGHT=m ++CONFIG_FB_MODE_HELPERS=y ++CONFIG_FB_TILEBLITTING=y ++ ++# ++# Frame buffer hardware drivers ++# ++# CONFIG_FB_CIRRUS is not set ++# CONFIG_FB_PM2 is not set ++# CONFIG_FB_CYBER2000 is not set ++# CONFIG_FB_ARC is not set ++# CONFIG_FB_ASILIANT is not set ++# CONFIG_FB_IMSTT is not set ++# CONFIG_FB_VGA16 is not set ++# CONFIG_FB_UVESA is not set ++# CONFIG_FB_VESA is not set ++CONFIG_FB_EFI=y ++# CONFIG_FB_N411 is not set ++# CONFIG_FB_HGA is not set ++# CONFIG_FB_OPENCORES is not set ++# CONFIG_FB_S1D13XXX is not set ++# CONFIG_FB_NVIDIA is not set ++# CONFIG_FB_RIVA is not set ++# CONFIG_FB_I740 is not set ++# CONFIG_FB_LE80578 is not set ++# CONFIG_FB_MATROX is not set ++CONFIG_FB_RADEON=m ++CONFIG_FB_RADEON_I2C=y ++CONFIG_FB_RADEON_BACKLIGHT=y ++# CONFIG_FB_RADEON_DEBUG is not set ++# CONFIG_FB_ATY128 is not set ++# CONFIG_FB_ATY is not set ++# CONFIG_FB_S3 is not set ++# CONFIG_FB_SAVAGE is not set ++# CONFIG_FB_SIS is not set ++# CONFIG_FB_VIA is not set ++# CONFIG_FB_NEOMAGIC is not set ++# CONFIG_FB_KYRO is not set ++# CONFIG_FB_3DFX is not set ++# CONFIG_FB_VOODOO1 is not set ++# CONFIG_FB_VT8623 is not set ++# CONFIG_FB_TRIDENT is not set ++# CONFIG_FB_ARK is not set ++# CONFIG_FB_PM3 is not set ++# CONFIG_FB_CARMINE is not set ++# CONFIG_FB_SMSCUFX is not set ++# CONFIG_FB_UDL is not set ++# CONFIG_FB_IBM_GXT4500 is not set ++# CONFIG_FB_VIRTUAL is not set ++# CONFIG_FB_METRONOME is not set ++# CONFIG_FB_MB862XX is not set ++# CONFIG_FB_SIMPLE is not set ++# CONFIG_FB_SM712 is not set ++# end of Frame buffer Devices ++ ++# ++# Backlight & LCD device support ++# ++# CONFIG_LCD_CLASS_DEVICE is not set ++CONFIG_BACKLIGHT_CLASS_DEVICE=y ++# CONFIG_BACKLIGHT_GENERIC is not set ++# CONFIG_BACKLIGHT_PWM is not set ++CONFIG_BACKLIGHT_APPLE=m ++# CONFIG_BACKLIGHT_PM8941_WLED is not set ++# CONFIG_BACKLIGHT_SAHARA is not set ++# CONFIG_BACKLIGHT_ADP8860 is not set ++# CONFIG_BACKLIGHT_ADP8870 is not set ++# CONFIG_BACKLIGHT_LM3630A is not set ++# CONFIG_BACKLIGHT_LM3639 is not set ++# CONFIG_BACKLIGHT_LP855X is not set ++# CONFIG_BACKLIGHT_GPIO is not set ++# CONFIG_BACKLIGHT_LV5207LP is not set ++# CONFIG_BACKLIGHT_BD6107 is not set ++# CONFIG_BACKLIGHT_ARCXCNN is not set ++# end of Backlight & LCD device support ++ ++CONFIG_HDMI=y ++ ++# ++# Console display driver support ++# ++CONFIG_VGA_CONSOLE=y ++CONFIG_VGACON_SOFT_SCROLLBACK=y ++CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64 ++# CONFIG_VGACON_SOFT_SCROLLBACK_PERSISTENT_ENABLE_BY_DEFAULT is not set ++CONFIG_DUMMY_CONSOLE=y ++CONFIG_DUMMY_CONSOLE_COLUMNS=80 ++CONFIG_DUMMY_CONSOLE_ROWS=25 ++CONFIG_FRAMEBUFFER_CONSOLE=y ++CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y ++# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set ++# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set ++# end of Console display driver support ++ ++# CONFIG_LOGO is not set ++# end of Graphics support ++ ++CONFIG_SOUND=y ++CONFIG_SND=y ++CONFIG_SND_TIMER=m ++CONFIG_SND_PCM=y ++CONFIG_SND_HWDEP=m ++CONFIG_SND_SEQ_DEVICE=m ++CONFIG_SND_RAWMIDI=m ++CONFIG_SND_COMPRESS_OFFLOAD=y ++CONFIG_SND_JACK=y ++CONFIG_SND_JACK_INPUT_DEV=y ++# CONFIG_SND_OSSEMUL is not set ++# CONFIG_SND_PCM_TIMER is not set ++CONFIG_SND_HRTIMER=m ++CONFIG_SND_DYNAMIC_MINORS=y ++CONFIG_SND_MAX_CARDS=32 ++# CONFIG_SND_SUPPORT_OLD_API is not set ++CONFIG_SND_PROC_FS=y ++CONFIG_SND_VERBOSE_PROCFS=y ++CONFIG_SND_VERBOSE_PRINTK=y ++CONFIG_SND_DEBUG=y ++CONFIG_SND_DEBUG_VERBOSE=y ++# CONFIG_SND_PCM_XRUN_DEBUG is not set ++CONFIG_SND_VMASTER=y ++CONFIG_SND_DMA_SGBUF=y ++CONFIG_SND_SEQUENCER=m ++# CONFIG_SND_SEQ_DUMMY is not set ++CONFIG_SND_SEQ_HRTIMER_DEFAULT=y ++CONFIG_SND_SEQ_MIDI_EVENT=m ++CONFIG_SND_SEQ_MIDI=m ++CONFIG_SND_AC97_CODEC=m ++# CONFIG_SND_DRIVERS is not set ++CONFIG_SND_PCI=y ++# CONFIG_SND_AD1889 is not set ++# CONFIG_SND_ALS300 is not set ++# CONFIG_SND_ALS4000 is not set ++# CONFIG_SND_ALI5451 is not set ++# CONFIG_SND_ASIHPI is not set ++# CONFIG_SND_ATIIXP is not set ++# CONFIG_SND_ATIIXP_MODEM is not set ++# CONFIG_SND_AU8810 is not set ++# CONFIG_SND_AU8820 is not set ++# CONFIG_SND_AU8830 is not set ++# CONFIG_SND_AW2 is not set ++# CONFIG_SND_AZT3328 is not set ++# CONFIG_SND_BT87X is not set ++# CONFIG_SND_CA0106 is not set ++# CONFIG_SND_CMIPCI is not set ++# CONFIG_SND_OXYGEN is not set ++# CONFIG_SND_CS4281 is not set ++# CONFIG_SND_CS46XX is not set ++# CONFIG_SND_CTXFI is not set ++# CONFIG_SND_DARLA20 is not set ++# CONFIG_SND_GINA20 is not set ++# CONFIG_SND_LAYLA20 is not set ++# CONFIG_SND_DARLA24 is not set ++# CONFIG_SND_GINA24 is not set ++# CONFIG_SND_LAYLA24 is not set ++# CONFIG_SND_MONA is not set ++# CONFIG_SND_MIA is not set ++# CONFIG_SND_ECHO3G is not set ++# CONFIG_SND_INDIGO is not set ++# CONFIG_SND_INDIGOIO is not set ++# CONFIG_SND_INDIGODJ is not set ++# CONFIG_SND_INDIGOIOX is not set ++# CONFIG_SND_INDIGODJX is not set ++# CONFIG_SND_EMU10K1 is not set ++# CONFIG_SND_EMU10K1X is not set ++# CONFIG_SND_ENS1370 is not set ++# CONFIG_SND_ENS1371 is not set ++# CONFIG_SND_ES1938 is not set ++# CONFIG_SND_ES1968 is not set ++# CONFIG_SND_FM801 is not set ++# CONFIG_SND_HDSP is not set ++# CONFIG_SND_HDSPM is not set ++# CONFIG_SND_ICE1712 is not set ++# CONFIG_SND_ICE1724 is not set ++CONFIG_SND_INTEL8X0=m ++# CONFIG_SND_INTEL8X0M is not set ++# CONFIG_SND_KORG1212 is not set ++# CONFIG_SND_LOLA is not set ++# CONFIG_SND_LX6464ES is not set ++# CONFIG_SND_MAESTRO3 is not set ++# CONFIG_SND_MIXART is not set ++# CONFIG_SND_NM256 is not set ++# CONFIG_SND_PCXHR is not set ++# CONFIG_SND_RIPTIDE is not set ++# CONFIG_SND_RME32 is not set ++# CONFIG_SND_RME96 is not set ++# CONFIG_SND_RME9652 is not set ++# CONFIG_SND_SE6X is not set ++# CONFIG_SND_SONICVIBES is not set ++# CONFIG_SND_TRIDENT is not set ++# CONFIG_SND_VIA82XX is not set ++# CONFIG_SND_VIA82XX_MODEM is not set ++# CONFIG_SND_VIRTUOSO is not set ++# CONFIG_SND_VX222 is not set ++# CONFIG_SND_YMFPCI is not set ++ ++# ++# HD-Audio ++# ++CONFIG_SND_HDA=m ++CONFIG_SND_HDA_INTEL=m ++# CONFIG_SND_HDA_INTEL_DETECT_DMIC is not set ++CONFIG_SND_HDA_HWDEP=y ++CONFIG_SND_HDA_RECONFIG=y ++# CONFIG_SND_HDA_INPUT_BEEP is not set ++# CONFIG_SND_HDA_PATCH_LOADER is not set ++CONFIG_SND_HDA_CODEC_REALTEK=m ++CONFIG_SND_HDA_CODEC_ANALOG=m ++# CONFIG_SND_HDA_CODEC_SIGMATEL is not set ++# CONFIG_SND_HDA_CODEC_VIA is not set ++CONFIG_SND_HDA_CODEC_HDMI=m ++# CONFIG_SND_HDA_CODEC_CIRRUS is not set ++# CONFIG_SND_HDA_CODEC_CONEXANT is not set ++# CONFIG_SND_HDA_CODEC_CA0110 is not set ++# CONFIG_SND_HDA_CODEC_CA0132 is not set ++# CONFIG_SND_HDA_CODEC_CMEDIA is not set ++# CONFIG_SND_HDA_CODEC_SI3054 is not set ++CONFIG_SND_HDA_GENERIC=m ++CONFIG_SND_HDA_POWER_SAVE_DEFAULT=10 ++# end of HD-Audio ++ ++CONFIG_SND_HDA_CORE=m ++CONFIG_SND_HDA_DSP_LOADER=y ++CONFIG_SND_HDA_COMPONENT=y ++CONFIG_SND_HDA_I915=y ++CONFIG_SND_HDA_EXT_CORE=m ++CONFIG_SND_HDA_PREALLOC_SIZE=64 ++CONFIG_SND_INTEL_NHLT=m ++# CONFIG_SND_SPI is not set ++CONFIG_SND_USB=y ++CONFIG_SND_USB_AUDIO=m ++# CONFIG_SND_USB_UA101 is not set ++# CONFIG_SND_USB_USX2Y is not set ++# CONFIG_SND_USB_CAIAQ is not set ++# CONFIG_SND_USB_US122L is not set ++# CONFIG_SND_USB_6FIRE is not set ++# CONFIG_SND_USB_HIFACE is not set ++# CONFIG_SND_BCD2000 is not set ++# CONFIG_SND_USB_POD is not set ++# CONFIG_SND_USB_PODHD is not set ++# CONFIG_SND_USB_TONEPORT is not set ++# CONFIG_SND_USB_VARIAX is not set ++CONFIG_SND_SOC=y ++CONFIG_SND_SOC_COMPRESS=y ++CONFIG_SND_SOC_TOPOLOGY=y ++CONFIG_SND_SOC_ACPI=y ++# CONFIG_SND_SOC_AMD_ACP is not set ++# CONFIG_SND_SOC_AMD_ACP3x is not set ++# CONFIG_SND_ATMEL_SOC is not set ++# CONFIG_SND_DESIGNWARE_I2S is not set ++ ++# ++# SoC Audio for Freescale CPUs ++# ++ ++# ++# Common SoC Audio options for Freescale CPUs: ++# ++# CONFIG_SND_SOC_FSL_ASRC is not set ++# CONFIG_SND_SOC_FSL_SAI is not set ++# CONFIG_SND_SOC_FSL_AUDMIX is not set ++# CONFIG_SND_SOC_FSL_SSI is not set ++# CONFIG_SND_SOC_FSL_SPDIF is not set ++# CONFIG_SND_SOC_FSL_ESAI is not set ++# CONFIG_SND_SOC_FSL_MICFIL is not set ++# CONFIG_SND_SOC_IMX_AUDMUX is not set ++# end of SoC Audio for Freescale CPUs ++ ++# CONFIG_SND_I2S_HI6210_I2S is not set ++# CONFIG_SND_SOC_IMG is not set ++CONFIG_SND_SOC_INTEL_SST_TOPLEVEL=y ++CONFIG_SND_SST_IPC=y ++CONFIG_SND_SST_IPC_ACPI=y ++CONFIG_SND_SOC_INTEL_SST=m ++# CONFIG_SND_SOC_INTEL_HASWELL is not set ++CONFIG_SND_SST_ATOM_HIFI2_PLATFORM=y ++# CONFIG_SND_SST_ATOM_HIFI2_PLATFORM_PCI is not set ++CONFIG_SND_SST_ATOM_HIFI2_PLATFORM_ACPI=y ++CONFIG_SND_SOC_INTEL_SKYLAKE=m ++CONFIG_SND_SOC_INTEL_SKL=m ++CONFIG_SND_SOC_INTEL_APL=m ++CONFIG_SND_SOC_INTEL_KBL=m ++CONFIG_SND_SOC_INTEL_GLK=m ++CONFIG_SND_SOC_INTEL_CNL=m ++CONFIG_SND_SOC_INTEL_CFL=m ++# CONFIG_SND_SOC_INTEL_CML_H is not set ++# CONFIG_SND_SOC_INTEL_CML_LP is not set ++CONFIG_SND_SOC_INTEL_SKYLAKE_FAMILY=m ++# CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC is not set ++CONFIG_SND_SOC_INTEL_SKYLAKE_COMMON=m ++CONFIG_SND_SOC_ACPI_INTEL_MATCH=y ++CONFIG_SND_SOC_INTEL_MACH=y ++# CONFIG_SND_SOC_INTEL_BYTCR_RT5640_MACH is not set ++# CONFIG_SND_SOC_INTEL_BYTCR_RT5651_MACH is not set ++# CONFIG_SND_SOC_INTEL_CHT_BSW_RT5672_MACH is not set ++# CONFIG_SND_SOC_INTEL_CHT_BSW_RT5645_MACH is not set ++# CONFIG_SND_SOC_INTEL_CHT_BSW_MAX98090_TI_MACH is not set ++# CONFIG_SND_SOC_INTEL_CHT_BSW_NAU8824_MACH is not set ++# CONFIG_SND_SOC_INTEL_BYT_CHT_CX2072X_MACH is not set ++# CONFIG_SND_SOC_INTEL_BYT_CHT_DA7213_MACH is not set ++# CONFIG_SND_SOC_INTEL_BYT_CHT_ES8316_MACH is not set ++# CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH is not set ++# CONFIG_SND_SOC_INTEL_SKL_RT286_MACH is not set ++# CONFIG_SND_SOC_INTEL_SKL_NAU88L25_SSM4567_MACH is not set ++# CONFIG_SND_SOC_INTEL_SKL_NAU88L25_MAX98357A_MACH is not set ++# CONFIG_SND_SOC_INTEL_BXT_DA7219_MAX98357A_MACH is not set ++# CONFIG_SND_SOC_INTEL_BXT_RT298_MACH is not set ++# CONFIG_SND_SOC_INTEL_KBL_RT5663_MAX98927_MACH is not set ++# CONFIG_SND_SOC_INTEL_KBL_RT5663_RT5514_MAX98927_MACH is not set ++# CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98357A_MACH is not set ++# CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98927_MACH is not set ++# CONFIG_SND_SOC_INTEL_KBL_RT5660_MACH is not set ++# CONFIG_SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH is not set ++# CONFIG_SND_SOC_MTK_BTCVSD is not set ++# CONFIG_SND_SOC_SOF_TOPLEVEL is not set ++ ++# ++# STMicroelectronics STM32 SOC audio support ++# ++# end of STMicroelectronics STM32 SOC audio support ++ ++# CONFIG_SND_SOC_XILINX_I2S is not set ++# CONFIG_SND_SOC_XILINX_AUDIO_FORMATTER is not set ++# CONFIG_SND_SOC_XILINX_SPDIF is not set ++# CONFIG_SND_SOC_XTFPGA_I2S is not set ++# CONFIG_ZX_TDM is not set ++CONFIG_SND_SOC_I2C_AND_SPI=y ++ ++# ++# CODEC drivers ++# ++# CONFIG_SND_SOC_AC97_CODEC is not set ++# CONFIG_SND_SOC_ADAU1701 is not set ++# CONFIG_SND_SOC_ADAU1761_I2C is not set ++# CONFIG_SND_SOC_ADAU1761_SPI is not set ++# CONFIG_SND_SOC_ADAU7002 is not set ++# CONFIG_SND_SOC_AK4104 is not set ++# CONFIG_SND_SOC_AK4118 is not set ++# CONFIG_SND_SOC_AK4458 is not set ++# CONFIG_SND_SOC_AK4554 is not set ++# CONFIG_SND_SOC_AK4613 is not set ++# CONFIG_SND_SOC_AK4642 is not set ++# CONFIG_SND_SOC_AK5386 is not set ++# CONFIG_SND_SOC_AK5558 is not set ++# CONFIG_SND_SOC_ALC5623 is not set ++# CONFIG_SND_SOC_BD28623 is not set ++# CONFIG_SND_SOC_BT_SCO is not set ++# CONFIG_SND_SOC_CS35L32 is not set ++# CONFIG_SND_SOC_CS35L33 is not set ++# CONFIG_SND_SOC_CS35L34 is not set ++# CONFIG_SND_SOC_CS35L35 is not set ++# CONFIG_SND_SOC_CS35L36 is not set ++# CONFIG_SND_SOC_CS42L42 is not set ++# CONFIG_SND_SOC_CS42L51_I2C is not set ++# CONFIG_SND_SOC_CS42L52 is not set ++# CONFIG_SND_SOC_CS42L56 is not set ++# CONFIG_SND_SOC_CS42L73 is not set ++# CONFIG_SND_SOC_CS4265 is not set ++# CONFIG_SND_SOC_CS4270 is not set ++# CONFIG_SND_SOC_CS4271_I2C is not set ++# CONFIG_SND_SOC_CS4271_SPI is not set ++# CONFIG_SND_SOC_CS42XX8_I2C is not set ++# CONFIG_SND_SOC_CS43130 is not set ++# CONFIG_SND_SOC_CS4341 is not set ++# CONFIG_SND_SOC_CS4349 is not set ++# CONFIG_SND_SOC_CS53L30 is not set ++# CONFIG_SND_SOC_CX2072X is not set ++# CONFIG_SND_SOC_DMIC is not set ++# CONFIG_SND_SOC_ES7134 is not set ++# CONFIG_SND_SOC_ES7241 is not set ++# CONFIG_SND_SOC_ES8316 is not set ++# CONFIG_SND_SOC_ES8328_I2C is not set ++# CONFIG_SND_SOC_ES8328_SPI is not set ++# CONFIG_SND_SOC_GTM601 is not set ++# CONFIG_SND_SOC_INNO_RK3036 is not set ++# CONFIG_SND_SOC_MAX98088 is not set ++# CONFIG_SND_SOC_MAX98357A is not set ++# CONFIG_SND_SOC_MAX98504 is not set ++# CONFIG_SND_SOC_MAX9867 is not set ++# CONFIG_SND_SOC_MAX98927 is not set ++# CONFIG_SND_SOC_MAX98373 is not set ++# CONFIG_SND_SOC_MAX9860 is not set ++# CONFIG_SND_SOC_MSM8916_WCD_DIGITAL is not set ++# CONFIG_SND_SOC_PCM1681 is not set ++# CONFIG_SND_SOC_PCM1789_I2C is not set ++# CONFIG_SND_SOC_PCM179X_I2C is not set ++# CONFIG_SND_SOC_PCM179X_SPI is not set ++# CONFIG_SND_SOC_PCM186X_I2C is not set ++# CONFIG_SND_SOC_PCM186X_SPI is not set ++# CONFIG_SND_SOC_PCM3060_I2C is not set ++# CONFIG_SND_SOC_PCM3060_SPI is not set ++# CONFIG_SND_SOC_PCM3168A_I2C is not set ++# CONFIG_SND_SOC_PCM3168A_SPI is not set ++# CONFIG_SND_SOC_PCM512x_I2C is not set ++# CONFIG_SND_SOC_PCM512x_SPI is not set ++# CONFIG_SND_SOC_RK3328 is not set ++# CONFIG_SND_SOC_RT5616 is not set ++# CONFIG_SND_SOC_RT5631 is not set ++# CONFIG_SND_SOC_SGTL5000 is not set ++# CONFIG_SND_SOC_SIMPLE_AMPLIFIER is not set ++# CONFIG_SND_SOC_SIRF_AUDIO_CODEC is not set ++# CONFIG_SND_SOC_SPDIF is not set ++# CONFIG_SND_SOC_SSM2305 is not set ++# CONFIG_SND_SOC_SSM2602_SPI is not set ++# CONFIG_SND_SOC_SSM2602_I2C is not set ++# CONFIG_SND_SOC_SSM4567 is not set ++# CONFIG_SND_SOC_STA32X is not set ++# CONFIG_SND_SOC_STA350 is not set ++# CONFIG_SND_SOC_STI_SAS is not set ++# CONFIG_SND_SOC_TAS2552 is not set ++# CONFIG_SND_SOC_TAS5086 is not set ++# CONFIG_SND_SOC_TAS571X is not set ++# CONFIG_SND_SOC_TAS5720 is not set ++# CONFIG_SND_SOC_TAS6424 is not set ++# CONFIG_SND_SOC_TDA7419 is not set ++# CONFIG_SND_SOC_TFA9879 is not set ++# CONFIG_SND_SOC_TLV320AIC23_I2C is not set ++# CONFIG_SND_SOC_TLV320AIC23_SPI is not set ++# CONFIG_SND_SOC_TLV320AIC31XX is not set ++# CONFIG_SND_SOC_TLV320AIC32X4_I2C is not set ++# CONFIG_SND_SOC_TLV320AIC32X4_SPI is not set ++# CONFIG_SND_SOC_TLV320AIC3X is not set ++# CONFIG_SND_SOC_TS3A227E is not set ++# CONFIG_SND_SOC_TSCS42XX is not set ++# CONFIG_SND_SOC_TSCS454 is not set ++# CONFIG_SND_SOC_UDA1334 is not set ++# CONFIG_SND_SOC_WM8510 is not set ++# CONFIG_SND_SOC_WM8523 is not set ++# CONFIG_SND_SOC_WM8524 is not set ++# CONFIG_SND_SOC_WM8580 is not set ++# CONFIG_SND_SOC_WM8711 is not set ++# CONFIG_SND_SOC_WM8728 is not set ++# CONFIG_SND_SOC_WM8731 is not set ++# CONFIG_SND_SOC_WM8737 is not set ++# CONFIG_SND_SOC_WM8741 is not set ++# CONFIG_SND_SOC_WM8750 is not set ++# CONFIG_SND_SOC_WM8753 is not set ++# CONFIG_SND_SOC_WM8770 is not set ++# CONFIG_SND_SOC_WM8776 is not set ++# CONFIG_SND_SOC_WM8782 is not set ++# CONFIG_SND_SOC_WM8804_I2C is not set ++# CONFIG_SND_SOC_WM8804_SPI is not set ++# CONFIG_SND_SOC_WM8903 is not set ++# CONFIG_SND_SOC_WM8904 is not set ++# CONFIG_SND_SOC_WM8960 is not set ++# CONFIG_SND_SOC_WM8962 is not set ++# CONFIG_SND_SOC_WM8974 is not set ++# CONFIG_SND_SOC_WM8978 is not set ++# CONFIG_SND_SOC_WM8985 is not set ++# CONFIG_SND_SOC_ZX_AUD96P22 is not set ++# CONFIG_SND_SOC_MAX9759 is not set ++# CONFIG_SND_SOC_MT6351 is not set ++# CONFIG_SND_SOC_MT6358 is not set ++# CONFIG_SND_SOC_NAU8540 is not set ++# CONFIG_SND_SOC_NAU8810 is not set ++# CONFIG_SND_SOC_NAU8822 is not set ++# CONFIG_SND_SOC_NAU8824 is not set ++# CONFIG_SND_SOC_TPA6130A2 is not set ++# end of CODEC drivers ++ ++# CONFIG_SND_SIMPLE_CARD is not set ++CONFIG_SND_X86=y ++CONFIG_HDMI_LPE_AUDIO=m ++CONFIG_AC97_BUS=m ++ ++# ++# HID support ++# ++CONFIG_HID=y ++# CONFIG_HID_BATTERY_STRENGTH is not set ++CONFIG_HIDRAW=y ++# CONFIG_UHID is not set ++CONFIG_HID_GENERIC=y ++ ++# ++# Special HID drivers ++# ++# CONFIG_HID_A4TECH is not set ++# CONFIG_HID_ACCUTOUCH is not set ++# CONFIG_HID_ACRUX is not set ++CONFIG_HID_APPLE=m ++CONFIG_HID_APPLEIR=m ++CONFIG_HID_ASUS=m ++CONFIG_HID_AUREAL=m ++CONFIG_HID_BELKIN=y ++CONFIG_HID_BETOP_FF=m ++# CONFIG_HID_BIGBEN_FF is not set ++CONFIG_HID_CHERRY=y ++CONFIG_HID_CHICONY=y ++CONFIG_HID_CORSAIR=m ++# CONFIG_HID_COUGAR is not set ++# CONFIG_HID_MACALLY is not set ++CONFIG_HID_PRODIKEYS=m ++CONFIG_HID_CMEDIA=m ++CONFIG_HID_CP2112=m ++# CONFIG_HID_CREATIVE_SB0540 is not set ++CONFIG_HID_CYPRESS=m ++CONFIG_HID_DRAGONRISE=m ++# CONFIG_DRAGONRISE_FF is not set ++CONFIG_HID_EMS_FF=m ++CONFIG_HID_ELAN=m ++CONFIG_HID_ELECOM=m ++CONFIG_HID_ELO=m ++CONFIG_HID_EZKEY=m ++CONFIG_HID_GEMBIRD=m ++CONFIG_HID_GFRM=m ++CONFIG_HID_HOLTEK=y ++# CONFIG_HOLTEK_FF is not set ++CONFIG_HID_GT683R=m ++CONFIG_HID_KEYTOUCH=m ++CONFIG_HID_KYE=m ++CONFIG_HID_UCLOGIC=m ++CONFIG_HID_WALTOP=m ++# CONFIG_HID_VIEWSONIC is not set ++CONFIG_HID_GYRATION=m ++CONFIG_HID_ICADE=m ++CONFIG_HID_ITE=m ++CONFIG_HID_JABRA=m ++CONFIG_HID_TWINHAN=m ++CONFIG_HID_KENSINGTON=y ++CONFIG_HID_LCPOWER=m ++CONFIG_HID_LED=m ++CONFIG_HID_LENOVO=y ++CONFIG_HID_LOGITECH=y ++CONFIG_HID_LOGITECH_DJ=m ++CONFIG_HID_LOGITECH_HIDPP=m ++CONFIG_LOGITECH_FF=y ++# CONFIG_LOGIRUMBLEPAD2_FF is not set ++# CONFIG_LOGIG940_FF is not set ++CONFIG_LOGIWHEELS_FF=y ++CONFIG_HID_MAGICMOUSE=m ++# CONFIG_HID_MALTRON is not set ++CONFIG_HID_MAYFLASH=m ++# CONFIG_HID_REDRAGON is not set ++CONFIG_HID_MICROSOFT=y ++CONFIG_HID_MONTEREY=m ++CONFIG_HID_MULTITOUCH=m ++CONFIG_HID_NTI=m ++CONFIG_HID_NTRIG=m ++CONFIG_HID_ORTEK=m ++CONFIG_HID_PANTHERLORD=m ++# CONFIG_PANTHERLORD_FF is not set ++CONFIG_HID_PENMOUNT=m ++CONFIG_HID_PETALYNX=m ++CONFIG_HID_PICOLCD=m ++# CONFIG_HID_PICOLCD_FB is not set ++# CONFIG_HID_PICOLCD_BACKLIGHT is not set ++# CONFIG_HID_PICOLCD_LEDS is not set ++CONFIG_HID_PLANTRONICS=m ++CONFIG_HID_PRIMAX=m ++CONFIG_HID_RETRODE=m ++CONFIG_HID_ROCCAT=m ++CONFIG_HID_SAITEK=m ++CONFIG_HID_SAMSUNG=y ++CONFIG_HID_SONY=m ++# CONFIG_SONY_FF is not set ++CONFIG_HID_SPEEDLINK=m ++# CONFIG_HID_STEAM is not set ++CONFIG_HID_STEELSERIES=m ++CONFIG_HID_SUNPLUS=m ++CONFIG_HID_RMI=m ++CONFIG_HID_GREENASIA=m ++# CONFIG_GREENASIA_FF is not set ++CONFIG_HID_SMARTJOYPLUS=m ++# CONFIG_SMARTJOYPLUS_FF is not set ++CONFIG_HID_TIVO=m ++CONFIG_HID_TOPSEED=m ++CONFIG_HID_THINGM=m ++CONFIG_HID_THRUSTMASTER=m ++# CONFIG_THRUSTMASTER_FF is not set ++CONFIG_HID_UDRAW_PS3=m ++# CONFIG_HID_U2FZERO is not set ++CONFIG_HID_WACOM=m ++CONFIG_HID_WIIMOTE=m ++CONFIG_HID_XINMO=m ++CONFIG_HID_ZEROPLUS=m ++# CONFIG_ZEROPLUS_FF is not set ++CONFIG_HID_ZYDACRON=m ++CONFIG_HID_SENSOR_HUB=m ++CONFIG_HID_SENSOR_CUSTOM_SENSOR=m ++CONFIG_HID_ALPS=m ++# end of Special HID drivers ++ ++# ++# USB HID support ++# ++CONFIG_USB_HID=y ++CONFIG_HID_PID=y ++CONFIG_USB_HIDDEV=y ++# end of USB HID support ++ ++# ++# I2C HID support ++# ++CONFIG_I2C_HID=m ++# end of I2C HID support ++ ++# ++# Intel ISH HID support ++# ++CONFIG_INTEL_ISH_HID=m ++# CONFIG_INTEL_ISH_FIRMWARE_DOWNLOADER is not set ++# end of Intel ISH HID support ++# end of HID support ++ ++CONFIG_USB_OHCI_LITTLE_ENDIAN=y ++CONFIG_USB_SUPPORT=y ++CONFIG_USB_COMMON=y ++# CONFIG_USB_LED_TRIG is not set ++# CONFIG_USB_ULPI_BUS is not set ++# CONFIG_USB_CONN_GPIO is not set ++CONFIG_USB_ARCH_HAS_HCD=y ++CONFIG_USB=y ++CONFIG_USB_PCI=y ++CONFIG_USB_ANNOUNCE_NEW_DEVICES=y ++ ++# ++# Miscellaneous USB options ++# ++CONFIG_USB_DEFAULT_PERSIST=y ++# CONFIG_USB_DYNAMIC_MINORS is not set ++# CONFIG_USB_OTG is not set ++# CONFIG_USB_OTG_WHITELIST is not set ++# CONFIG_USB_OTG_BLACKLIST_HUB is not set ++# CONFIG_USB_LEDS_TRIGGER_USBPORT is not set ++CONFIG_USB_AUTOSUSPEND_DELAY=2 ++CONFIG_USB_MON=m ++ ++# ++# USB Host Controller Drivers ++# ++# CONFIG_USB_C67X00_HCD is not set ++CONFIG_USB_XHCI_HCD=y ++# CONFIG_USB_XHCI_DBGCAP is not set ++CONFIG_USB_XHCI_PCI=y ++# CONFIG_USB_XHCI_PLATFORM is not set ++CONFIG_USB_EHCI_HCD=y ++CONFIG_USB_EHCI_ROOT_HUB_TT=y ++CONFIG_USB_EHCI_TT_NEWSCHED=y ++CONFIG_USB_EHCI_PCI=y ++# CONFIG_USB_EHCI_FSL is not set ++CONFIG_USB_EHCI_HCD_PLATFORM=m ++# CONFIG_USB_OXU210HP_HCD is not set ++# CONFIG_USB_ISP116X_HCD is not set ++# CONFIG_USB_FOTG210_HCD is not set ++CONFIG_USB_MAX3421_HCD=m ++# CONFIG_USB_OHCI_HCD is not set ++CONFIG_USB_UHCI_HCD=y ++# CONFIG_USB_SL811_HCD is not set ++# CONFIG_USB_R8A66597_HCD is not set ++CONFIG_USB_HCD_BCMA=m ++# CONFIG_USB_HCD_SSB is not set ++# CONFIG_USB_HCD_TEST_MODE is not set ++ ++# ++# USB Device Class drivers ++# ++CONFIG_USB_ACM=m ++# CONFIG_USB_PRINTER is not set ++CONFIG_USB_WDM=m ++# CONFIG_USB_TMC is not set ++ ++# ++# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may ++# ++ ++# ++# also be needed; see USB_STORAGE Help for more info ++# ++CONFIG_USB_STORAGE=y ++# CONFIG_USB_STORAGE_DEBUG is not set ++# CONFIG_USB_STORAGE_REALTEK is not set ++# CONFIG_USB_STORAGE_DATAFAB is not set ++# CONFIG_USB_STORAGE_FREECOM is not set ++# CONFIG_USB_STORAGE_ISD200 is not set ++# CONFIG_USB_STORAGE_USBAT is not set ++# CONFIG_USB_STORAGE_SDDR09 is not set ++# CONFIG_USB_STORAGE_SDDR55 is not set ++# CONFIG_USB_STORAGE_JUMPSHOT is not set ++# CONFIG_USB_STORAGE_ALAUDA is not set ++# CONFIG_USB_STORAGE_ONETOUCH is not set ++# CONFIG_USB_STORAGE_KARMA is not set ++# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set ++# CONFIG_USB_STORAGE_ENE_UB6250 is not set ++# CONFIG_USB_UAS is not set ++ ++# ++# USB Imaging devices ++# ++# CONFIG_USB_MDC800 is not set ++# CONFIG_USB_MICROTEK is not set ++# CONFIG_USBIP_CORE is not set ++# CONFIG_USB_CDNS3 is not set ++# CONFIG_USB_MUSB_HDRC is not set ++# CONFIG_USB_DWC3 is not set ++# CONFIG_USB_DWC2 is not set ++# CONFIG_USB_CHIPIDEA is not set ++# CONFIG_USB_ISP1760 is not set ++ ++# ++# USB port drivers ++# ++CONFIG_USB_SERIAL=m ++CONFIG_USB_SERIAL_GENERIC=y ++CONFIG_USB_SERIAL_SIMPLE=m ++CONFIG_USB_SERIAL_AIRCABLE=m ++CONFIG_USB_SERIAL_ARK3116=m ++CONFIG_USB_SERIAL_BELKIN=m ++CONFIG_USB_SERIAL_CH341=m ++CONFIG_USB_SERIAL_WHITEHEAT=m ++CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m ++CONFIG_USB_SERIAL_CP210X=m ++CONFIG_USB_SERIAL_CYPRESS_M8=m ++CONFIG_USB_SERIAL_EMPEG=m ++CONFIG_USB_SERIAL_FTDI_SIO=m ++CONFIG_USB_SERIAL_VISOR=m ++CONFIG_USB_SERIAL_IPAQ=m ++CONFIG_USB_SERIAL_IR=m ++CONFIG_USB_SERIAL_EDGEPORT=m ++CONFIG_USB_SERIAL_EDGEPORT_TI=m ++CONFIG_USB_SERIAL_F81232=m ++CONFIG_USB_SERIAL_F8153X=m ++CONFIG_USB_SERIAL_GARMIN=m ++CONFIG_USB_SERIAL_IPW=m ++CONFIG_USB_SERIAL_IUU=m ++CONFIG_USB_SERIAL_KEYSPAN_PDA=m ++CONFIG_USB_SERIAL_KEYSPAN=m ++CONFIG_USB_SERIAL_KLSI=m ++CONFIG_USB_SERIAL_KOBIL_SCT=m ++CONFIG_USB_SERIAL_MCT_U232=m ++CONFIG_USB_SERIAL_METRO=m ++CONFIG_USB_SERIAL_MOS7720=m ++CONFIG_USB_SERIAL_MOS7840=m ++CONFIG_USB_SERIAL_MXUPORT=m ++CONFIG_USB_SERIAL_NAVMAN=m ++CONFIG_USB_SERIAL_PL2303=m ++CONFIG_USB_SERIAL_OTI6858=m ++CONFIG_USB_SERIAL_QCAUX=m ++CONFIG_USB_SERIAL_QUALCOMM=m ++CONFIG_USB_SERIAL_SPCP8X5=m ++CONFIG_USB_SERIAL_SAFE=m ++CONFIG_USB_SERIAL_SAFE_PADDED=y ++CONFIG_USB_SERIAL_SIERRAWIRELESS=m ++CONFIG_USB_SERIAL_SYMBOL=m ++CONFIG_USB_SERIAL_TI=m ++CONFIG_USB_SERIAL_CYBERJACK=m ++CONFIG_USB_SERIAL_XIRCOM=m ++CONFIG_USB_SERIAL_WWAN=m ++CONFIG_USB_SERIAL_OPTION=m ++CONFIG_USB_SERIAL_OMNINET=m ++CONFIG_USB_SERIAL_OPTICON=m ++CONFIG_USB_SERIAL_XSENS_MT=m ++CONFIG_USB_SERIAL_WISHBONE=m ++CONFIG_USB_SERIAL_SSU100=m ++CONFIG_USB_SERIAL_QT2=m ++CONFIG_USB_SERIAL_UPD78F0730=m ++CONFIG_USB_SERIAL_DEBUG=m ++ ++# ++# USB Miscellaneous drivers ++# ++# CONFIG_USB_EMI62 is not set ++# CONFIG_USB_EMI26 is not set ++# CONFIG_USB_ADUTUX is not set ++# CONFIG_USB_SEVSEG is not set ++# CONFIG_USB_LEGOTOWER is not set ++# CONFIG_USB_LCD is not set ++# CONFIG_USB_CYPRESS_CY7C63 is not set ++# CONFIG_USB_CYTHERM is not set ++# CONFIG_USB_IDMOUSE is not set ++# CONFIG_USB_FTDI_ELAN is not set ++CONFIG_USB_APPLEDISPLAY=m ++# CONFIG_USB_SISUSBVGA is not set ++# CONFIG_USB_LD is not set ++# CONFIG_USB_TRANCEVIBRATOR is not set ++# CONFIG_USB_IOWARRIOR is not set ++# CONFIG_USB_TEST is not set ++# CONFIG_USB_EHSET_TEST_FIXTURE is not set ++# CONFIG_USB_ISIGHTFW is not set ++# CONFIG_USB_YUREX is not set ++CONFIG_USB_EZUSB_FX2=m ++# CONFIG_USB_HUB_USB251XB is not set ++# CONFIG_USB_HSIC_USB3503 is not set ++# CONFIG_USB_HSIC_USB4604 is not set ++# CONFIG_USB_LINK_LAYER_TEST is not set ++# CONFIG_USB_CHAOSKEY is not set ++ ++# ++# USB Physical Layer drivers ++# ++CONFIG_USB_PHY=y ++# CONFIG_NOP_USB_XCEIV is not set ++# CONFIG_USB_GPIO_VBUS is not set ++# CONFIG_USB_ISP1301 is not set ++# end of USB Physical Layer drivers ++ ++CONFIG_USB_GADGET=m ++# CONFIG_USB_GADGET_DEBUG is not set ++# CONFIG_USB_GADGET_DEBUG_FILES is not set ++# CONFIG_USB_GADGET_DEBUG_FS is not set ++CONFIG_USB_GADGET_VBUS_DRAW=2 ++CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2 ++ ++# ++# USB Peripheral Controller ++# ++# CONFIG_USB_FOTG210_UDC is not set ++# CONFIG_USB_GR_UDC is not set ++# CONFIG_USB_R8A66597 is not set ++# CONFIG_USB_PXA27X is not set ++# CONFIG_USB_MV_UDC is not set ++# CONFIG_USB_MV_U3D is not set ++# CONFIG_USB_M66592 is not set ++# CONFIG_USB_BDC_UDC is not set ++# CONFIG_USB_AMD5536UDC is not set ++# CONFIG_USB_NET2272 is not set ++# CONFIG_USB_NET2280 is not set ++# CONFIG_USB_GOKU is not set ++# CONFIG_USB_EG20T is not set ++# CONFIG_USB_DUMMY_HCD is not set ++# end of USB Peripheral Controller ++ ++CONFIG_USB_LIBCOMPOSITE=m ++CONFIG_USB_U_AUDIO=m ++CONFIG_USB_F_UAC2=m ++CONFIG_USB_F_HID=m ++# CONFIG_USB_CONFIGFS is not set ++# CONFIG_USB_ZERO is not set ++CONFIG_USB_AUDIO=m ++# CONFIG_GADGET_UAC1 is not set ++# CONFIG_USB_ETH is not set ++# CONFIG_USB_G_NCM is not set ++# CONFIG_USB_GADGETFS is not set ++# CONFIG_USB_FUNCTIONFS is not set ++# CONFIG_USB_MASS_STORAGE is not set ++# CONFIG_USB_G_SERIAL is not set ++# CONFIG_USB_MIDI_GADGET is not set ++# CONFIG_USB_G_PRINTER is not set ++# CONFIG_USB_CDC_COMPOSITE is not set ++# CONFIG_USB_G_ACM_MS is not set ++# CONFIG_USB_G_MULTI is not set ++CONFIG_USB_G_HID=m ++# CONFIG_USB_G_DBGP is not set ++# CONFIG_USB_G_WEBCAM is not set ++CONFIG_TYPEC=m ++CONFIG_TYPEC_TCPM=m ++CONFIG_TYPEC_TCPCI=m ++# CONFIG_TYPEC_RT1711H is not set ++CONFIG_TYPEC_FUSB302=m ++CONFIG_TYPEC_UCSI=m ++# CONFIG_UCSI_CCG is not set ++CONFIG_UCSI_ACPI=m ++CONFIG_TYPEC_TPS6598X=m ++ ++# ++# USB Type-C Multiplexer/DeMultiplexer Switch support ++# ++CONFIG_TYPEC_MUX_PI3USB30532=m ++# end of USB Type-C Multiplexer/DeMultiplexer Switch support ++ ++# ++# USB Type-C Alternate Mode drivers ++# ++# CONFIG_TYPEC_DP_ALTMODE is not set ++# end of USB Type-C Alternate Mode drivers ++ ++CONFIG_USB_ROLE_SWITCH=m ++CONFIG_USB_ROLES_INTEL_XHCI=m ++CONFIG_MMC=y ++CONFIG_MMC_BLOCK=y ++CONFIG_MMC_BLOCK_MINORS=8 ++# CONFIG_SDIO_UART is not set ++# CONFIG_MMC_TEST is not set ++ ++# ++# MMC/SD/SDIO Host Controller Drivers ++# ++# CONFIG_MMC_DEBUG is not set ++CONFIG_MMC_SDHCI=y ++CONFIG_MMC_SDHCI_IO_ACCESSORS=y ++CONFIG_MMC_SDHCI_PCI=y ++CONFIG_MMC_RICOH_MMC=y ++CONFIG_MMC_SDHCI_ACPI=y ++CONFIG_MMC_SDHCI_PLTFM=m ++CONFIG_MMC_SDHCI_F_SDH30=m ++# CONFIG_MMC_WBSD is not set ++# CONFIG_MMC_TIFM_SD is not set ++# CONFIG_MMC_SPI is not set ++# CONFIG_MMC_CB710 is not set ++# CONFIG_MMC_VIA_SDMMC is not set ++# CONFIG_MMC_VUB300 is not set ++# CONFIG_MMC_USHC is not set ++# CONFIG_MMC_USDHI6ROL0 is not set ++CONFIG_MMC_REALTEK_PCI=m ++CONFIG_MMC_REALTEK_USB=m ++CONFIG_MMC_CQHCI=y ++# CONFIG_MMC_TOSHIBA_PCI is not set ++# CONFIG_MMC_MTK is not set ++CONFIG_MMC_SDHCI_XENON=m ++# CONFIG_MEMSTICK is not set ++CONFIG_NEW_LEDS=y ++CONFIG_LEDS_CLASS=y ++# CONFIG_LEDS_CLASS_FLASH is not set ++# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set ++ ++# ++# LED drivers ++# ++# CONFIG_LEDS_APU is not set ++# CONFIG_LEDS_LM3530 is not set ++# CONFIG_LEDS_LM3532 is not set ++# CONFIG_LEDS_LM3642 is not set ++# CONFIG_LEDS_PCA9532 is not set ++# CONFIG_LEDS_GPIO is not set ++# CONFIG_LEDS_LP3944 is not set ++# CONFIG_LEDS_LP3952 is not set ++# CONFIG_LEDS_LP5521 is not set ++# CONFIG_LEDS_LP5523 is not set ++# CONFIG_LEDS_LP5562 is not set ++# CONFIG_LEDS_LP8501 is not set ++# CONFIG_LEDS_CLEVO_MAIL is not set ++# CONFIG_LEDS_PCA955X is not set ++# CONFIG_LEDS_PCA963X is not set ++# CONFIG_LEDS_DAC124S085 is not set ++# CONFIG_LEDS_PWM is not set ++# CONFIG_LEDS_BD2802 is not set ++# CONFIG_LEDS_INTEL_SS4200 is not set ++# CONFIG_LEDS_TCA6507 is not set ++# CONFIG_LEDS_TLC591XX is not set ++# CONFIG_LEDS_LM355x is not set ++ ++# ++# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) ++# ++# CONFIG_LEDS_BLINKM is not set ++# CONFIG_LEDS_MLXCPLD is not set ++CONFIG_LEDS_MLXREG=m ++# CONFIG_LEDS_USER is not set ++# CONFIG_LEDS_NIC78BX is not set ++# CONFIG_LEDS_TI_LMU_COMMON is not set ++ ++# ++# LED Triggers ++# ++CONFIG_LEDS_TRIGGERS=y ++# CONFIG_LEDS_TRIGGER_TIMER is not set ++# CONFIG_LEDS_TRIGGER_ONESHOT is not set ++# CONFIG_LEDS_TRIGGER_DISK is not set ++# CONFIG_LEDS_TRIGGER_MTD is not set ++# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set ++# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set ++# CONFIG_LEDS_TRIGGER_CPU is not set ++# CONFIG_LEDS_TRIGGER_ACTIVITY is not set ++# CONFIG_LEDS_TRIGGER_GPIO is not set ++# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set ++ ++# ++# iptables trigger is under Netfilter config (LED target) ++# ++# CONFIG_LEDS_TRIGGER_TRANSIENT is not set ++# CONFIG_LEDS_TRIGGER_CAMERA is not set ++# CONFIG_LEDS_TRIGGER_PANIC is not set ++CONFIG_LEDS_TRIGGER_NETDEV=m ++# CONFIG_LEDS_TRIGGER_PATTERN is not set ++CONFIG_LEDS_TRIGGER_AUDIO=m ++# CONFIG_ACCESSIBILITY is not set ++CONFIG_INFINIBAND=m ++CONFIG_INFINIBAND_USER_MAD=m ++CONFIG_INFINIBAND_USER_ACCESS=m ++# CONFIG_INFINIBAND_EXP_LEGACY_VERBS_NEW_UAPI is not set ++CONFIG_INFINIBAND_USER_MEM=y ++CONFIG_INFINIBAND_ON_DEMAND_PAGING=y ++CONFIG_INFINIBAND_ADDR_TRANS=y ++CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y ++CONFIG_INFINIBAND_MTHCA=m ++# CONFIG_INFINIBAND_MTHCA_DEBUG is not set ++CONFIG_INFINIBAND_QIB=m ++CONFIG_INFINIBAND_QIB_DCA=y ++# CONFIG_INFINIBAND_EFA is not set ++CONFIG_INFINIBAND_I40IW=m ++CONFIG_MLX4_INFINIBAND=m ++CONFIG_MLX5_INFINIBAND=m ++CONFIG_INFINIBAND_OCRDMA=m ++CONFIG_INFINIBAND_VMWARE_PVRDMA=m ++CONFIG_INFINIBAND_USNIC=m ++# CONFIG_INFINIBAND_BNXT_RE is not set ++CONFIG_INFINIBAND_HFI1=m ++# CONFIG_HFI1_DEBUG_SDMA_ORDER is not set ++# CONFIG_SDMA_VERBOSITY is not set ++CONFIG_INFINIBAND_RDMAVT=m ++CONFIG_RDMA_RXE=m ++# CONFIG_RDMA_SIW is not set ++CONFIG_INFINIBAND_IPOIB=m ++# CONFIG_INFINIBAND_IPOIB_CM is not set ++# CONFIG_INFINIBAND_IPOIB_DEBUG is not set ++CONFIG_INFINIBAND_SRP=m ++CONFIG_INFINIBAND_ISER=m ++CONFIG_INFINIBAND_OPA_VNIC=m ++CONFIG_EDAC_ATOMIC_SCRUB=y ++CONFIG_EDAC_SUPPORT=y ++CONFIG_RTC_LIB=y ++CONFIG_RTC_MC146818_LIB=y ++CONFIG_RTC_CLASS=y ++# CONFIG_RTC_HCTOSYS is not set ++# CONFIG_RTC_SYSTOHC is not set ++# CONFIG_RTC_DEBUG is not set ++# CONFIG_RTC_NVMEM is not set ++ ++# ++# RTC interfaces ++# ++CONFIG_RTC_INTF_SYSFS=y ++CONFIG_RTC_INTF_PROC=y ++CONFIG_RTC_INTF_DEV=y ++# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set ++# CONFIG_RTC_DRV_TEST is not set ++ ++# ++# I2C RTC drivers ++# ++# CONFIG_RTC_DRV_ABB5ZES3 is not set ++# CONFIG_RTC_DRV_ABEOZ9 is not set ++# CONFIG_RTC_DRV_ABX80X is not set ++# CONFIG_RTC_DRV_DS1307 is not set ++# CONFIG_RTC_DRV_DS1374 is not set ++# CONFIG_RTC_DRV_DS1672 is not set ++# CONFIG_RTC_DRV_MAX6900 is not set ++# CONFIG_RTC_DRV_RS5C372 is not set ++# CONFIG_RTC_DRV_ISL1208 is not set ++# CONFIG_RTC_DRV_ISL12022 is not set ++# CONFIG_RTC_DRV_X1205 is not set ++# CONFIG_RTC_DRV_PCF8523 is not set ++# CONFIG_RTC_DRV_PCF85063 is not set ++# CONFIG_RTC_DRV_PCF85363 is not set ++# CONFIG_RTC_DRV_PCF8563 is not set ++# CONFIG_RTC_DRV_PCF8583 is not set ++# CONFIG_RTC_DRV_M41T80 is not set ++# CONFIG_RTC_DRV_BQ32K is not set ++# CONFIG_RTC_DRV_S35390A is not set ++# CONFIG_RTC_DRV_FM3130 is not set ++# CONFIG_RTC_DRV_RX8010 is not set ++# CONFIG_RTC_DRV_RX8581 is not set ++# CONFIG_RTC_DRV_RX8025 is not set ++# CONFIG_RTC_DRV_EM3027 is not set ++# CONFIG_RTC_DRV_RV3028 is not set ++# CONFIG_RTC_DRV_RV8803 is not set ++# CONFIG_RTC_DRV_SD3078 is not set ++ ++# ++# SPI RTC drivers ++# ++# CONFIG_RTC_DRV_M41T93 is not set ++# CONFIG_RTC_DRV_M41T94 is not set ++# CONFIG_RTC_DRV_DS1302 is not set ++# CONFIG_RTC_DRV_DS1305 is not set ++# CONFIG_RTC_DRV_DS1343 is not set ++# CONFIG_RTC_DRV_DS1347 is not set ++# CONFIG_RTC_DRV_DS1390 is not set ++# CONFIG_RTC_DRV_MAX6916 is not set ++# CONFIG_RTC_DRV_R9701 is not set ++# CONFIG_RTC_DRV_RX4581 is not set ++# CONFIG_RTC_DRV_RX6110 is not set ++# CONFIG_RTC_DRV_RS5C348 is not set ++# CONFIG_RTC_DRV_MAX6902 is not set ++# CONFIG_RTC_DRV_PCF2123 is not set ++# CONFIG_RTC_DRV_MCP795 is not set ++CONFIG_RTC_I2C_AND_SPI=y ++ ++# ++# SPI and I2C RTC drivers ++# ++# CONFIG_RTC_DRV_DS3232 is not set ++# CONFIG_RTC_DRV_PCF2127 is not set ++# CONFIG_RTC_DRV_RV3029C2 is not set ++ ++# ++# Platform RTC drivers ++# ++CONFIG_RTC_DRV_CMOS=y ++# CONFIG_RTC_DRV_DS1286 is not set ++# CONFIG_RTC_DRV_DS1511 is not set ++# CONFIG_RTC_DRV_DS1553 is not set ++# CONFIG_RTC_DRV_DS1685_FAMILY is not set ++# CONFIG_RTC_DRV_DS1742 is not set ++# CONFIG_RTC_DRV_DS2404 is not set ++# CONFIG_RTC_DRV_STK17TA8 is not set ++# CONFIG_RTC_DRV_M48T86 is not set ++# CONFIG_RTC_DRV_M48T35 is not set ++# CONFIG_RTC_DRV_M48T59 is not set ++# CONFIG_RTC_DRV_MSM6242 is not set ++# CONFIG_RTC_DRV_BQ4802 is not set ++# CONFIG_RTC_DRV_RP5C01 is not set ++# CONFIG_RTC_DRV_V3020 is not set ++ ++# ++# on-CPU RTC drivers ++# ++# CONFIG_RTC_DRV_FTRTC010 is not set ++ ++# ++# HID Sensor RTC drivers ++# ++CONFIG_RTC_DRV_HID_SENSOR_TIME=m ++CONFIG_DMADEVICES=y ++# CONFIG_DMADEVICES_DEBUG is not set ++ ++# ++# DMA Devices ++# ++CONFIG_DMA_ENGINE=y ++CONFIG_DMA_VIRTUAL_CHANNELS=y ++CONFIG_DMA_ACPI=y ++CONFIG_ALTERA_MSGDMA=m ++# CONFIG_INTEL_IDMA64 is not set ++CONFIG_INTEL_IOATDMA=y ++# CONFIG_QCOM_HIDMA_MGMT is not set ++# CONFIG_QCOM_HIDMA is not set ++CONFIG_DW_DMAC_CORE=y ++# CONFIG_DW_DMAC is not set ++CONFIG_DW_DMAC_PCI=y ++# CONFIG_DW_EDMA is not set ++# CONFIG_DW_EDMA_PCIE is not set ++CONFIG_HSU_DMA=y ++ ++# ++# DMA Clients ++# ++# CONFIG_ASYNC_TX_DMA is not set ++# CONFIG_DMATEST is not set ++CONFIG_DMA_ENGINE_RAID=y ++ ++# ++# DMABUF options ++# ++CONFIG_SYNC_FILE=y ++# CONFIG_SW_SYNC is not set ++# CONFIG_UDMABUF is not set ++# CONFIG_DMABUF_SELFTESTS is not set ++# end of DMABUF options ++ ++CONFIG_DCA=y ++# CONFIG_AUXDISPLAY is not set ++CONFIG_UIO=m ++# CONFIG_UIO_CIF is not set ++# CONFIG_UIO_PDRV_GENIRQ is not set ++# CONFIG_UIO_DMEM_GENIRQ is not set ++# CONFIG_UIO_AEC is not set ++# CONFIG_UIO_SERCOS3 is not set ++CONFIG_UIO_PCI_GENERIC=m ++# CONFIG_UIO_NETX is not set ++# CONFIG_UIO_PRUSS is not set ++# CONFIG_UIO_MF624 is not set ++CONFIG_VFIO_IOMMU_TYPE1=m ++CONFIG_VFIO_VIRQFD=m ++CONFIG_VFIO=m ++# CONFIG_VFIO_NOIOMMU is not set ++CONFIG_VFIO_PCI=m ++# CONFIG_VFIO_PCI_VGA is not set ++CONFIG_VFIO_PCI_MMAP=y ++CONFIG_VFIO_PCI_INTX=y ++CONFIG_VFIO_PCI_IGD=y ++# CONFIG_VFIO_MDEV is not set ++CONFIG_IRQ_BYPASS_MANAGER=y ++CONFIG_VIRT_DRIVERS=y ++CONFIG_VBOXGUEST=y ++CONFIG_VIRTIO=y ++CONFIG_VIRTIO_MENU=y ++CONFIG_VIRTIO_PCI=y ++CONFIG_VIRTIO_PCI_LEGACY=y ++# CONFIG_VIRTIO_PMEM is not set ++CONFIG_VIRTIO_BALLOON=m ++# CONFIG_VIRTIO_INPUT is not set ++CONFIG_VIRTIO_MMIO=m ++# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set ++ ++# ++# Microsoft Hyper-V guest support ++# ++# CONFIG_HYPERV is not set ++# end of Microsoft Hyper-V guest support ++ ++# CONFIG_GREYBUS is not set ++CONFIG_STAGING=y ++CONFIG_PRISM2_USB=m ++# CONFIG_COMEDI is not set ++CONFIG_RTL8192U=m ++CONFIG_RTLLIB=m ++CONFIG_RTLLIB_CRYPTO_CCMP=m ++CONFIG_RTLLIB_CRYPTO_TKIP=m ++CONFIG_RTLLIB_CRYPTO_WEP=m ++CONFIG_RTL8192E=m ++CONFIG_RTL8723BS=m ++CONFIG_R8712U=m ++CONFIG_R8188EU=m ++# CONFIG_88EU_AP_MODE is not set ++CONFIG_RTS5208=m ++CONFIG_VT6655=m ++CONFIG_VT6656=m ++ ++# ++# IIO staging drivers ++# ++ ++# ++# Accelerometers ++# ++# CONFIG_ADIS16203 is not set ++# CONFIG_ADIS16240 is not set ++# end of Accelerometers ++ ++# ++# Analog to digital converters ++# ++# CONFIG_AD7816 is not set ++# CONFIG_AD7192 is not set ++# CONFIG_AD7280 is not set ++# end of Analog to digital converters ++ ++# ++# Analog digital bi-direction converters ++# ++# CONFIG_ADT7316 is not set ++# end of Analog digital bi-direction converters ++ ++# ++# Capacitance to digital converters ++# ++# CONFIG_AD7150 is not set ++# CONFIG_AD7746 is not set ++# end of Capacitance to digital converters ++ ++# ++# Direct Digital Synthesis ++# ++# CONFIG_AD9832 is not set ++# CONFIG_AD9834 is not set ++# end of Direct Digital Synthesis ++ ++# ++# Network Analyzer, Impedance Converters ++# ++# CONFIG_AD5933 is not set ++# end of Network Analyzer, Impedance Converters ++ ++# ++# Active energy metering IC ++# ++# CONFIG_ADE7854 is not set ++# end of Active energy metering IC ++ ++# ++# Resolver to digital converters ++# ++# CONFIG_AD2S1210 is not set ++# end of Resolver to digital converters ++# end of IIO staging drivers ++ ++# CONFIG_FB_SM750 is not set ++ ++# ++# Speakup console speech ++# ++# CONFIG_SPEAKUP is not set ++# end of Speakup console speech ++ ++# CONFIG_STAGING_MEDIA is not set ++ ++# ++# Android ++# ++# end of Android ++ ++# CONFIG_LTE_GDM724X is not set ++# CONFIG_GS_FPGABOOT is not set ++# CONFIG_UNISYSSPAR is not set ++# CONFIG_WILC1000_SDIO is not set ++# CONFIG_WILC1000_SPI is not set ++# CONFIG_MOST is not set ++# CONFIG_KS7010 is not set ++# CONFIG_PI433 is not set ++ ++# ++# Gasket devices ++# ++# CONFIG_STAGING_GASKET_FRAMEWORK is not set ++# end of Gasket devices ++ ++# CONFIG_FIELDBUS_DEV is not set ++# CONFIG_KPC2000 is not set ++# CONFIG_USB_WUSB_CBAF is not set ++# CONFIG_UWB is not set ++# CONFIG_EXFAT_FS is not set ++# CONFIG_QLGE is not set ++CONFIG_X86_PLATFORM_DEVICES=y ++CONFIG_ACER_WMI=m ++CONFIG_ACER_WIRELESS=m ++CONFIG_ACERHDF=m ++CONFIG_ALIENWARE_WMI=m ++CONFIG_ASUS_LAPTOP=m ++CONFIG_DCDBAS=m ++CONFIG_DELL_SMBIOS=m ++# CONFIG_DELL_SMBIOS_WMI is not set ++# CONFIG_DELL_SMBIOS_SMM is not set ++CONFIG_DELL_LAPTOP=m ++CONFIG_DELL_WMI=m ++CONFIG_DELL_WMI_DESCRIPTOR=m ++CONFIG_DELL_WMI_AIO=m ++CONFIG_DELL_WMI_LED=m ++CONFIG_DELL_SMO8800=m ++CONFIG_DELL_RBTN=m ++# CONFIG_DELL_RBU is not set ++CONFIG_FUJITSU_LAPTOP=m ++# CONFIG_FUJITSU_TABLET is not set ++# CONFIG_AMILO_RFKILL is not set ++CONFIG_GPD_POCKET_FAN=m ++# CONFIG_HP_ACCEL is not set ++# CONFIG_HP_WIRELESS is not set ++CONFIG_HP_WMI=m ++# CONFIG_LG_LAPTOP is not set ++CONFIG_MSI_LAPTOP=m ++# CONFIG_PANASONIC_LAPTOP is not set ++# CONFIG_COMPAL_LAPTOP is not set ++# CONFIG_SONY_LAPTOP is not set ++# CONFIG_IDEAPAD_LAPTOP is not set ++CONFIG_SURFACE3_WMI=m ++CONFIG_THINKPAD_ACPI=m ++CONFIG_THINKPAD_ACPI_ALSA_SUPPORT=y ++# CONFIG_THINKPAD_ACPI_DEBUGFACILITIES is not set ++# CONFIG_THINKPAD_ACPI_DEBUG is not set ++# CONFIG_THINKPAD_ACPI_UNSAFE_LEDS is not set ++CONFIG_THINKPAD_ACPI_VIDEO=y ++CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y ++# CONFIG_SENSORS_HDAPS is not set ++# CONFIG_INTEL_MENLOW is not set ++# CONFIG_EEEPC_LAPTOP is not set ++CONFIG_ASUS_WMI=m ++CONFIG_ASUS_NB_WMI=m ++# CONFIG_EEEPC_WMI is not set ++# CONFIG_ASUS_WIRELESS is not set ++CONFIG_ACPI_WMI=m ++CONFIG_WMI_BMOF=m ++CONFIG_INTEL_WMI_THUNDERBOLT=m ++# CONFIG_XIAOMI_WMI is not set ++CONFIG_MSI_WMI=m ++CONFIG_PEAQ_WMI=m ++# CONFIG_TOPSTAR_LAPTOP is not set ++CONFIG_ACPI_TOSHIBA=m ++# CONFIG_TOSHIBA_BT_RFKILL is not set ++# CONFIG_TOSHIBA_HAPS is not set ++CONFIG_TOSHIBA_WMI=m ++# CONFIG_ACPI_CMPC is not set ++CONFIG_INTEL_INT0002_VGPIO=m ++CONFIG_INTEL_HID_EVENT=m ++# CONFIG_INTEL_VBTN is not set ++CONFIG_INTEL_IPS=y ++# CONFIG_INTEL_PMC_CORE is not set ++# CONFIG_IBM_RTL is not set ++# CONFIG_SAMSUNG_LAPTOP is not set ++CONFIG_MXM_WMI=m ++# CONFIG_INTEL_OAKTRAIL is not set ++# CONFIG_SAMSUNG_Q10 is not set ++CONFIG_APPLE_GMUX=m ++CONFIG_INTEL_RST=m ++CONFIG_INTEL_SMARTCONNECT=m ++CONFIG_INTEL_PMC_IPC=m ++CONFIG_INTEL_BXTWC_PMIC_TMU=m ++# CONFIG_SURFACE_PRO3_BUTTON is not set ++# CONFIG_INTEL_PUNIT_IPC is not set ++# CONFIG_MLX_PLATFORM is not set ++CONFIG_INTEL_TURBO_MAX_3=y ++# CONFIG_TOUCHSCREEN_DMI is not set ++CONFIG_INTEL_CHTDC_TI_PWRBTN=m ++# CONFIG_I2C_MULTI_INSTANTIATE is not set ++# CONFIG_INTEL_ATOMISP2_PM is not set ++# CONFIG_HUAWEI_WMI is not set ++# CONFIG_PCENGINES_APU2 is not set ++ ++# ++# Intel Speed Select Technology interface support ++# ++# CONFIG_INTEL_SPEED_SELECT_INTERFACE is not set ++# end of Intel Speed Select Technology interface support ++ ++CONFIG_PMC_ATOM=y ++# CONFIG_MFD_CROS_EC is not set ++# CONFIG_CHROME_PLATFORMS is not set ++CONFIG_MELLANOX_PLATFORM=y ++CONFIG_MLXREG_HOTPLUG=m ++# CONFIG_MLXREG_IO is not set ++CONFIG_CLKDEV_LOOKUP=y ++CONFIG_HAVE_CLK_PREPARE=y ++CONFIG_COMMON_CLK=y ++ ++# ++# Common Clock Framework ++# ++# CONFIG_COMMON_CLK_MAX9485 is not set ++# CONFIG_COMMON_CLK_SI5341 is not set ++# CONFIG_COMMON_CLK_SI5351 is not set ++# CONFIG_COMMON_CLK_SI544 is not set ++# CONFIG_COMMON_CLK_CDCE706 is not set ++# CONFIG_COMMON_CLK_CS2000_CP is not set ++# CONFIG_COMMON_CLK_PWM is not set ++# end of Common Clock Framework ++ ++# CONFIG_HWSPINLOCK is not set ++ ++# ++# Clock Source drivers ++# ++CONFIG_CLKEVT_I8253=y ++CONFIG_CLKBLD_I8253=y ++# end of Clock Source drivers ++ ++CONFIG_MAILBOX=y ++CONFIG_PCC=y ++# CONFIG_ALTERA_MBOX is not set ++CONFIG_IOMMU_IOVA=y ++CONFIG_IOMMU_API=y ++CONFIG_IOMMU_SUPPORT=y ++ ++# ++# Generic IOMMU Pagetable Support ++# ++# end of Generic IOMMU Pagetable Support ++ ++# CONFIG_IOMMU_DEBUGFS is not set ++# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set ++# CONFIG_AMD_IOMMU is not set ++CONFIG_DMAR_TABLE=y ++CONFIG_INTEL_IOMMU=y ++# CONFIG_INTEL_IOMMU_SVM is not set ++CONFIG_INTEL_IOMMU_DEFAULT_ON=y ++CONFIG_INTEL_IOMMU_FLOPPY_WA=y ++CONFIG_IRQ_REMAP=y ++ ++# ++# Remoteproc drivers ++# ++# CONFIG_REMOTEPROC is not set ++# end of Remoteproc drivers ++ ++# ++# Rpmsg drivers ++# ++# CONFIG_RPMSG_QCOM_GLINK_RPM is not set ++# CONFIG_RPMSG_VIRTIO is not set ++# end of Rpmsg drivers ++ ++# CONFIG_SOUNDWIRE is not set ++ ++# ++# SOC (System On Chip) specific Drivers ++# ++ ++# ++# Amlogic SoC drivers ++# ++# end of Amlogic SoC drivers ++ ++# ++# Aspeed SoC drivers ++# ++# end of Aspeed SoC drivers ++ ++# ++# Broadcom SoC drivers ++# ++# end of Broadcom SoC drivers ++ ++# ++# NXP/Freescale QorIQ SoC drivers ++# ++# end of NXP/Freescale QorIQ SoC drivers ++ ++# ++# i.MX SoC drivers ++# ++# end of i.MX SoC drivers ++ ++# ++# Qualcomm SoC drivers ++# ++# end of Qualcomm SoC drivers ++ ++# CONFIG_SOC_TI is not set ++ ++# ++# Xilinx SoC drivers ++# ++CONFIG_XILINX_VCU=m ++# end of Xilinx SoC drivers ++# end of SOC (System On Chip) specific Drivers ++ ++CONFIG_PM_DEVFREQ=y ++ ++# ++# DEVFREQ Governors ++# ++CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=m ++CONFIG_DEVFREQ_GOV_PERFORMANCE=y ++# CONFIG_DEVFREQ_GOV_POWERSAVE is not set ++# CONFIG_DEVFREQ_GOV_USERSPACE is not set ++# CONFIG_DEVFREQ_GOV_PASSIVE is not set ++ ++# ++# DEVFREQ Drivers ++# ++# CONFIG_PM_DEVFREQ_EVENT is not set ++CONFIG_EXTCON=y ++ ++# ++# Extcon Device Drivers ++# ++# CONFIG_EXTCON_ADC_JACK is not set ++# CONFIG_EXTCON_FSA9480 is not set ++# CONFIG_EXTCON_GPIO is not set ++CONFIG_EXTCON_INTEL_INT3496=m ++# CONFIG_EXTCON_MAX3355 is not set ++# CONFIG_EXTCON_PTN5150 is not set ++# CONFIG_EXTCON_RT8973A is not set ++# CONFIG_EXTCON_SM5502 is not set ++# CONFIG_EXTCON_USB_GPIO is not set ++# CONFIG_MEMORY is not set ++CONFIG_IIO=m ++CONFIG_IIO_BUFFER=y ++# CONFIG_IIO_BUFFER_CB is not set ++CONFIG_IIO_BUFFER_HW_CONSUMER=m ++CONFIG_IIO_KFIFO_BUF=m ++CONFIG_IIO_TRIGGERED_BUFFER=m ++# CONFIG_IIO_CONFIGFS is not set ++CONFIG_IIO_TRIGGER=y ++CONFIG_IIO_CONSUMERS_PER_TRIGGER=2 ++# CONFIG_IIO_SW_DEVICE is not set ++# CONFIG_IIO_SW_TRIGGER is not set ++ ++# ++# Accelerometers ++# ++# CONFIG_ADIS16201 is not set ++# CONFIG_ADIS16209 is not set ++# CONFIG_ADXL345_I2C is not set ++# CONFIG_ADXL345_SPI is not set ++# CONFIG_ADXL372_SPI is not set ++# CONFIG_ADXL372_I2C is not set ++# CONFIG_BMA180 is not set ++# CONFIG_BMA220 is not set ++# CONFIG_BMC150_ACCEL is not set ++# CONFIG_DA280 is not set ++# CONFIG_DA311 is not set ++# CONFIG_DMARD09 is not set ++# CONFIG_DMARD10 is not set ++CONFIG_HID_SENSOR_ACCEL_3D=m ++# CONFIG_IIO_ST_ACCEL_3AXIS is not set ++# CONFIG_KXSD9 is not set ++# CONFIG_KXCJK1013 is not set ++# CONFIG_MC3230 is not set ++# CONFIG_MMA7455_I2C is not set ++# CONFIG_MMA7455_SPI is not set ++# CONFIG_MMA7660 is not set ++# CONFIG_MMA8452 is not set ++# CONFIG_MMA9551 is not set ++# CONFIG_MMA9553 is not set ++# CONFIG_MXC4005 is not set ++# CONFIG_MXC6255 is not set ++# CONFIG_SCA3000 is not set ++# CONFIG_STK8312 is not set ++# CONFIG_STK8BA50 is not set ++# end of Accelerometers ++ ++# ++# Analog to digital converters ++# ++# CONFIG_AD7124 is not set ++# CONFIG_AD7266 is not set ++# CONFIG_AD7291 is not set ++# CONFIG_AD7298 is not set ++# CONFIG_AD7476 is not set ++# CONFIG_AD7606_IFACE_PARALLEL is not set ++# CONFIG_AD7606_IFACE_SPI is not set ++# CONFIG_AD7766 is not set ++# CONFIG_AD7768_1 is not set ++# CONFIG_AD7780 is not set ++# CONFIG_AD7791 is not set ++# CONFIG_AD7793 is not set ++# CONFIG_AD7887 is not set ++# CONFIG_AD7923 is not set ++# CONFIG_AD7949 is not set ++# CONFIG_AD799X is not set ++# CONFIG_HI8435 is not set ++# CONFIG_HX711 is not set ++# CONFIG_INA2XX_ADC is not set ++# CONFIG_LTC2471 is not set ++# CONFIG_LTC2485 is not set ++# CONFIG_LTC2497 is not set ++# CONFIG_MAX1027 is not set ++# CONFIG_MAX11100 is not set ++# CONFIG_MAX1118 is not set ++# CONFIG_MAX1363 is not set ++# CONFIG_MAX9611 is not set ++# CONFIG_MCP320X is not set ++# CONFIG_MCP3422 is not set ++# CONFIG_MCP3911 is not set ++# CONFIG_NAU7802 is not set ++# CONFIG_TI_ADC081C is not set ++# CONFIG_TI_ADC0832 is not set ++# CONFIG_TI_ADC084S021 is not set ++# CONFIG_TI_ADC12138 is not set ++# CONFIG_TI_ADC108S102 is not set ++# CONFIG_TI_ADC128S052 is not set ++# CONFIG_TI_ADC161S626 is not set ++# CONFIG_TI_ADS1015 is not set ++# CONFIG_TI_ADS7950 is not set ++# CONFIG_TI_TLC4541 is not set ++# CONFIG_XILINX_XADC is not set ++# end of Analog to digital converters ++ ++# ++# Analog Front Ends ++# ++# end of Analog Front Ends ++ ++# ++# Amplifiers ++# ++# CONFIG_AD8366 is not set ++# end of Amplifiers ++ ++# ++# Chemical Sensors ++# ++# CONFIG_ATLAS_PH_SENSOR is not set ++# CONFIG_BME680 is not set ++# CONFIG_CCS811 is not set ++# CONFIG_IAQCORE is not set ++# CONFIG_PMS7003 is not set ++# CONFIG_SENSIRION_SGP30 is not set ++# CONFIG_SPS30 is not set ++# CONFIG_VZ89X is not set ++# end of Chemical Sensors ++ ++# ++# Hid Sensor IIO Common ++# ++CONFIG_HID_SENSOR_IIO_COMMON=m ++CONFIG_HID_SENSOR_IIO_TRIGGER=m ++# end of Hid Sensor IIO Common ++ ++# ++# SSP Sensor Common ++# ++CONFIG_IIO_SSP_SENSORS_COMMONS=m ++CONFIG_IIO_SSP_SENSORHUB=m ++# end of SSP Sensor Common ++ ++CONFIG_IIO_ST_SENSORS_I2C=m ++CONFIG_IIO_ST_SENSORS_SPI=m ++CONFIG_IIO_ST_SENSORS_CORE=m ++ ++# ++# Digital to analog converters ++# ++# CONFIG_AD5064 is not set ++# CONFIG_AD5360 is not set ++# CONFIG_AD5380 is not set ++# CONFIG_AD5421 is not set ++# CONFIG_AD5446 is not set ++# CONFIG_AD5449 is not set ++# CONFIG_AD5592R is not set ++# CONFIG_AD5593R is not set ++# CONFIG_AD5504 is not set ++# CONFIG_AD5624R_SPI is not set ++# CONFIG_LTC1660 is not set ++# CONFIG_LTC2632 is not set ++# CONFIG_AD5686_SPI is not set ++# CONFIG_AD5696_I2C is not set ++# CONFIG_AD5755 is not set ++# CONFIG_AD5758 is not set ++# CONFIG_AD5761 is not set ++# CONFIG_AD5764 is not set ++# CONFIG_AD5791 is not set ++# CONFIG_AD7303 is not set ++# CONFIG_AD8801 is not set ++# CONFIG_DS4424 is not set ++# CONFIG_M62332 is not set ++# CONFIG_MAX517 is not set ++# CONFIG_MCP4725 is not set ++# CONFIG_MCP4922 is not set ++# CONFIG_TI_DAC082S085 is not set ++# CONFIG_TI_DAC5571 is not set ++# CONFIG_TI_DAC7311 is not set ++# CONFIG_TI_DAC7612 is not set ++# end of Digital to analog converters ++ ++# ++# IIO dummy driver ++# ++# end of IIO dummy driver ++ ++# ++# Frequency Synthesizers DDS/PLL ++# ++ ++# ++# Clock Generator/Distribution ++# ++# CONFIG_AD9523 is not set ++# end of Clock Generator/Distribution ++ ++# ++# Phase-Locked Loop (PLL) frequency synthesizers ++# ++# CONFIG_ADF4350 is not set ++# CONFIG_ADF4371 is not set ++# end of Phase-Locked Loop (PLL) frequency synthesizers ++# end of Frequency Synthesizers DDS/PLL ++ ++# ++# Digital gyroscope sensors ++# ++# CONFIG_ADIS16080 is not set ++# CONFIG_ADIS16130 is not set ++# CONFIG_ADIS16136 is not set ++# CONFIG_ADIS16260 is not set ++# CONFIG_ADXRS450 is not set ++# CONFIG_BMG160 is not set ++# CONFIG_FXAS21002C is not set ++CONFIG_HID_SENSOR_GYRO_3D=m ++# CONFIG_MPU3050_I2C is not set ++CONFIG_IIO_ST_GYRO_3AXIS=m ++CONFIG_IIO_ST_GYRO_I2C_3AXIS=m ++CONFIG_IIO_ST_GYRO_SPI_3AXIS=m ++# CONFIG_ITG3200 is not set ++# end of Digital gyroscope sensors ++ ++# ++# Health Sensors ++# ++ ++# ++# Heart Rate Monitors ++# ++# CONFIG_AFE4403 is not set ++# CONFIG_AFE4404 is not set ++# CONFIG_MAX30100 is not set ++# CONFIG_MAX30102 is not set ++# end of Heart Rate Monitors ++# end of Health Sensors ++ ++# ++# Humidity sensors ++# ++# CONFIG_AM2315 is not set ++# CONFIG_DHT11 is not set ++# CONFIG_HDC100X is not set ++# CONFIG_HID_SENSOR_HUMIDITY is not set ++# CONFIG_HTS221 is not set ++# CONFIG_HTU21 is not set ++# CONFIG_SI7005 is not set ++# CONFIG_SI7020 is not set ++# end of Humidity sensors ++ ++# ++# Inertial measurement units ++# ++# CONFIG_ADIS16400 is not set ++# CONFIG_ADIS16460 is not set ++# CONFIG_ADIS16480 is not set ++# CONFIG_BMI160_I2C is not set ++# CONFIG_BMI160_SPI is not set ++# CONFIG_KMX61 is not set ++# CONFIG_INV_MPU6050_I2C is not set ++# CONFIG_INV_MPU6050_SPI is not set ++# CONFIG_IIO_ST_LSM6DSX is not set ++# end of Inertial measurement units ++ ++# ++# Light sensors ++# ++CONFIG_ACPI_ALS=m ++# CONFIG_ADJD_S311 is not set ++# CONFIG_AL3320A is not set ++# CONFIG_APDS9300 is not set ++# CONFIG_APDS9960 is not set ++# CONFIG_BH1750 is not set ++# CONFIG_BH1780 is not set ++# CONFIG_CM32181 is not set ++# CONFIG_CM3232 is not set ++# CONFIG_CM3323 is not set ++# CONFIG_CM36651 is not set ++# CONFIG_GP2AP020A00F is not set ++# CONFIG_SENSORS_ISL29018 is not set ++CONFIG_SENSORS_ISL29028=m ++# CONFIG_ISL29125 is not set ++CONFIG_HID_SENSOR_ALS=m ++CONFIG_HID_SENSOR_PROX=m ++# CONFIG_JSA1212 is not set ++# CONFIG_RPR0521 is not set ++# CONFIG_LTR501 is not set ++CONFIG_LV0104CS=m ++# CONFIG_MAX44000 is not set ++# CONFIG_MAX44009 is not set ++# CONFIG_NOA1305 is not set ++# CONFIG_OPT3001 is not set ++# CONFIG_PA12203001 is not set ++# CONFIG_SI1133 is not set ++# CONFIG_SI1145 is not set ++# CONFIG_STK3310 is not set ++# CONFIG_ST_UVIS25 is not set ++# CONFIG_TCS3414 is not set ++# CONFIG_TCS3472 is not set ++# CONFIG_SENSORS_TSL2563 is not set ++# CONFIG_TSL2583 is not set ++# CONFIG_TSL2772 is not set ++# CONFIG_TSL4531 is not set ++# CONFIG_US5182D is not set ++# CONFIG_VCNL4000 is not set ++# CONFIG_VCNL4035 is not set ++# CONFIG_VEML6070 is not set ++# CONFIG_VL6180 is not set ++# CONFIG_ZOPT2201 is not set ++# end of Light sensors ++ ++# ++# Magnetometer sensors ++# ++CONFIG_AK8975=m ++CONFIG_AK09911=m ++# CONFIG_BMC150_MAGN_I2C is not set ++# CONFIG_BMC150_MAGN_SPI is not set ++# CONFIG_MAG3110 is not set ++CONFIG_HID_SENSOR_MAGNETOMETER_3D=m ++# CONFIG_MMC35240 is not set ++# CONFIG_IIO_ST_MAGN_3AXIS is not set ++# CONFIG_SENSORS_HMC5843_I2C is not set ++# CONFIG_SENSORS_HMC5843_SPI is not set ++# CONFIG_SENSORS_RM3100_I2C is not set ++# CONFIG_SENSORS_RM3100_SPI is not set ++# end of Magnetometer sensors ++ ++# ++# Multiplexers ++# ++# end of Multiplexers ++ ++# ++# Inclinometer sensors ++# ++CONFIG_HID_SENSOR_INCLINOMETER_3D=m ++CONFIG_HID_SENSOR_DEVICE_ROTATION=m ++# end of Inclinometer sensors ++ ++# ++# Triggers - standalone ++# ++# CONFIG_IIO_INTERRUPT_TRIGGER is not set ++CONFIG_IIO_SYSFS_TRIGGER=m ++# end of Triggers - standalone ++ ++# ++# Digital potentiometers ++# ++# CONFIG_AD5272 is not set ++# CONFIG_DS1803 is not set ++# CONFIG_MAX5432 is not set ++# CONFIG_MAX5481 is not set ++# CONFIG_MAX5487 is not set ++# CONFIG_MCP4018 is not set ++# CONFIG_MCP4131 is not set ++# CONFIG_MCP4531 is not set ++# CONFIG_MCP41010 is not set ++# CONFIG_TPL0102 is not set ++# end of Digital potentiometers ++ ++# ++# Digital potentiostats ++# ++# CONFIG_LMP91000 is not set ++# end of Digital potentiostats ++ ++# ++# Pressure sensors ++# ++# CONFIG_ABP060MG is not set ++# CONFIG_BMP280 is not set ++# CONFIG_DPS310 is not set ++CONFIG_HID_SENSOR_PRESS=m ++# CONFIG_HP03 is not set ++# CONFIG_MPL115_I2C is not set ++# CONFIG_MPL115_SPI is not set ++# CONFIG_MPL3115 is not set ++# CONFIG_MS5611 is not set ++# CONFIG_MS5637 is not set ++# CONFIG_IIO_ST_PRESS is not set ++# CONFIG_T5403 is not set ++# CONFIG_HP206C is not set ++# CONFIG_ZPA2326 is not set ++# end of Pressure sensors ++ ++# ++# Lightning sensors ++# ++# CONFIG_AS3935 is not set ++# end of Lightning sensors ++ ++# ++# Proximity and distance sensors ++# ++# CONFIG_ISL29501 is not set ++# CONFIG_LIDAR_LITE_V2 is not set ++# CONFIG_MB1232 is not set ++# CONFIG_RFD77402 is not set ++CONFIG_SRF04=m ++# CONFIG_SX9500 is not set ++# CONFIG_SRF08 is not set ++# CONFIG_VL53L0X_I2C is not set ++# end of Proximity and distance sensors ++ ++# ++# Resolver to digital converters ++# ++# CONFIG_AD2S90 is not set ++# CONFIG_AD2S1200 is not set ++# end of Resolver to digital converters ++ ++# ++# Temperature sensors ++# ++# CONFIG_MAXIM_THERMOCOUPLE is not set ++CONFIG_HID_SENSOR_TEMP=m ++# CONFIG_MLX90614 is not set ++# CONFIG_MLX90632 is not set ++# CONFIG_TMP006 is not set ++# CONFIG_TMP007 is not set ++# CONFIG_TSYS01 is not set ++# CONFIG_TSYS02D is not set ++# CONFIG_MAX31856 is not set ++# end of Temperature sensors ++ ++# CONFIG_NTB is not set ++# CONFIG_VME_BUS is not set ++CONFIG_PWM=y ++CONFIG_PWM_SYSFS=y ++# CONFIG_PWM_CRC is not set ++CONFIG_PWM_LPSS=m ++CONFIG_PWM_LPSS_PCI=m ++CONFIG_PWM_LPSS_PLATFORM=m ++# CONFIG_PWM_PCA9685 is not set ++ ++# ++# IRQ chip support ++# ++# end of IRQ chip support ++ ++# CONFIG_IPACK_BUS is not set ++# CONFIG_RESET_CONTROLLER is not set ++ ++# ++# PHY Subsystem ++# ++CONFIG_GENERIC_PHY=y ++# CONFIG_BCM_KONA_USB2_PHY is not set ++# CONFIG_PHY_PXA_28NM_HSIC is not set ++# CONFIG_PHY_PXA_28NM_USB2 is not set ++CONFIG_PHY_CPCAP_USB=m ++# end of PHY Subsystem ++ ++CONFIG_POWERCAP=y ++CONFIG_INTEL_RAPL_CORE=y ++CONFIG_INTEL_RAPL=y ++# CONFIG_IDLE_INJECT is not set ++# CONFIG_MCB is not set ++ ++# ++# Performance monitor support ++# ++# end of Performance monitor support ++ ++# CONFIG_RAS is not set ++CONFIG_THUNDERBOLT=m ++ ++# ++# Android ++# ++# CONFIG_ANDROID is not set ++# end of Android ++ ++CONFIG_LIBNVDIMM=m ++CONFIG_BLK_DEV_PMEM=m ++CONFIG_ND_BLK=m ++CONFIG_ND_CLAIM=y ++CONFIG_ND_BTT=m ++CONFIG_BTT=y ++CONFIG_ND_PFN=m ++CONFIG_NVDIMM_PFN=y ++CONFIG_NVDIMM_DAX=y ++CONFIG_NVDIMM_KEYS=y ++CONFIG_DAX_DRIVER=y ++CONFIG_DAX=y ++CONFIG_DEV_DAX=y ++CONFIG_DEV_DAX_PMEM=m ++CONFIG_DEV_DAX_KMEM=y ++CONFIG_DEV_DAX_PMEM_COMPAT=m ++CONFIG_NVMEM=y ++CONFIG_NVMEM_SYSFS=y ++ ++# ++# HW tracing support ++# ++# CONFIG_STM is not set ++# CONFIG_INTEL_TH is not set ++# end of HW tracing support ++ ++CONFIG_FPGA=y ++CONFIG_ALTERA_PR_IP_CORE=m ++CONFIG_FPGA_MGR_ALTERA_PS_SPI=m ++CONFIG_FPGA_MGR_ALTERA_CVP=m ++CONFIG_FPGA_MGR_XILINX_SPI=m ++# CONFIG_FPGA_MGR_MACHXO2_SPI is not set ++CONFIG_FPGA_BRIDGE=m ++# CONFIG_ALTERA_FREEZE_BRIDGE is not set ++CONFIG_XILINX_PR_DECOUPLER=m ++CONFIG_FPGA_REGION=m ++# CONFIG_FPGA_DFL is not set ++CONFIG_PM_OPP=y ++# CONFIG_UNISYS_VISORBUS is not set ++# CONFIG_SIOX is not set ++# CONFIG_SLIMBUS is not set ++# CONFIG_INTERCONNECT is not set ++# CONFIG_COUNTER is not set ++# end of Device Drivers ++ ++# ++# File systems ++# ++CONFIG_DCACHE_WORD_ACCESS=y ++CONFIG_VALIDATE_FS_PARSER=y ++CONFIG_FS_IOMAP=y ++# CONFIG_EXT2_FS is not set ++# CONFIG_EXT3_FS is not set ++CONFIG_EXT4_FS=y ++CONFIG_EXT4_USE_FOR_EXT2=y ++CONFIG_EXT4_FS_POSIX_ACL=y ++CONFIG_EXT4_FS_SECURITY=y ++# CONFIG_EXT4_DEBUG is not set ++CONFIG_JBD2=y ++# CONFIG_JBD2_DEBUG is not set ++CONFIG_FS_MBCACHE=y ++# CONFIG_REISERFS_FS is not set ++# CONFIG_JFS_FS is not set ++CONFIG_XFS_FS=y ++# CONFIG_XFS_QUOTA is not set ++CONFIG_XFS_POSIX_ACL=y ++CONFIG_XFS_RT=y ++# CONFIG_XFS_ONLINE_SCRUB is not set ++# CONFIG_XFS_WARN is not set ++# CONFIG_XFS_DEBUG is not set ++# CONFIG_GFS2_FS is not set ++# CONFIG_OCFS2_FS is not set ++CONFIG_BTRFS_FS=y ++CONFIG_BTRFS_FS_POSIX_ACL=y ++# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set ++# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set ++# CONFIG_BTRFS_DEBUG is not set ++# CONFIG_BTRFS_ASSERT is not set ++# CONFIG_BTRFS_FS_REF_VERIFY is not set ++# CONFIG_NILFS2_FS is not set ++# CONFIG_F2FS_FS is not set ++CONFIG_FS_DAX=y ++CONFIG_FS_DAX_PMD=y ++CONFIG_FS_POSIX_ACL=y ++CONFIG_EXPORTFS=y ++CONFIG_EXPORTFS_BLOCK_OPS=y ++CONFIG_FILE_LOCKING=y ++CONFIG_MANDATORY_FILE_LOCKING=y ++# CONFIG_FS_ENCRYPTION is not set ++# CONFIG_FS_VERITY is not set ++CONFIG_FSNOTIFY=y ++CONFIG_DNOTIFY=y ++CONFIG_INOTIFY_USER=y ++CONFIG_FANOTIFY=y ++# CONFIG_FANOTIFY_ACCESS_PERMISSIONS is not set ++# CONFIG_QUOTA is not set ++CONFIG_AUTOFS4_FS=y ++CONFIG_AUTOFS_FS=y ++CONFIG_FUSE_FS=m ++# CONFIG_CUSE is not set ++# CONFIG_VIRTIO_FS is not set ++CONFIG_OVERLAY_FS=y ++CONFIG_OVERLAY_FS_REDIRECT_DIR=y ++CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y ++CONFIG_OVERLAY_FS_INDEX=y ++# CONFIG_OVERLAY_FS_NFS_EXPORT is not set ++# CONFIG_OVERLAY_FS_XINO_AUTO is not set ++# CONFIG_OVERLAY_FS_METACOPY is not set ++ ++# ++# Caches ++# ++CONFIG_FSCACHE=m ++# CONFIG_FSCACHE_STATS is not set ++# CONFIG_FSCACHE_HISTOGRAM is not set ++# CONFIG_FSCACHE_DEBUG is not set ++# CONFIG_FSCACHE_OBJECT_LIST is not set ++CONFIG_CACHEFILES=m ++# CONFIG_CACHEFILES_DEBUG is not set ++# CONFIG_CACHEFILES_HISTOGRAM is not set ++# end of Caches ++ ++# ++# CD-ROM/DVD Filesystems ++# ++CONFIG_ISO9660_FS=m ++# CONFIG_JOLIET is not set ++# CONFIG_ZISOFS is not set ++# CONFIG_UDF_FS is not set ++# end of CD-ROM/DVD Filesystems ++ ++# ++# DOS/FAT/NT Filesystems ++# ++CONFIG_FAT_FS=y ++# CONFIG_MSDOS_FS is not set ++CONFIG_VFAT_FS=y ++CONFIG_FAT_DEFAULT_CODEPAGE=437 ++CONFIG_FAT_DEFAULT_IOCHARSET="ascii" ++# CONFIG_FAT_DEFAULT_UTF8 is not set ++# CONFIG_NTFS_FS is not set ++# end of DOS/FAT/NT Filesystems ++ ++# ++# Pseudo filesystems ++# ++CONFIG_PROC_FS=y ++# CONFIG_PROC_KCORE is not set ++CONFIG_PROC_SYSCTL=y ++CONFIG_PROC_PAGE_MONITOR=y ++CONFIG_PROC_CHILDREN=y ++CONFIG_PROC_PID_ARCH_STATUS=y ++CONFIG_KERNFS=y ++CONFIG_SYSFS=y ++CONFIG_TMPFS=y ++CONFIG_TMPFS_POSIX_ACL=y ++CONFIG_TMPFS_XATTR=y ++CONFIG_HUGETLBFS=y ++CONFIG_HUGETLB_PAGE=y ++CONFIG_MEMFD_CREATE=y ++CONFIG_ARCH_HAS_GIGANTIC_PAGE=y ++CONFIG_CONFIGFS_FS=y ++CONFIG_EFIVAR_FS=y ++# end of Pseudo filesystems ++ ++CONFIG_MISC_FILESYSTEMS=y ++# CONFIG_ORANGEFS_FS is not set ++# CONFIG_ADFS_FS is not set ++# CONFIG_AFFS_FS is not set ++# CONFIG_ECRYPT_FS is not set ++# CONFIG_HFS_FS is not set ++# CONFIG_HFSPLUS_FS is not set ++# CONFIG_BEFS_FS is not set ++# CONFIG_BFS_FS is not set ++# CONFIG_EFS_FS is not set ++# CONFIG_JFFS2_FS is not set ++# CONFIG_CRAMFS is not set ++CONFIG_SQUASHFS=y ++CONFIG_SQUASHFS_FILE_CACHE=y ++# CONFIG_SQUASHFS_FILE_DIRECT is not set ++CONFIG_SQUASHFS_DECOMP_SINGLE=y ++# CONFIG_SQUASHFS_DECOMP_MULTI is not set ++# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set ++# CONFIG_SQUASHFS_XATTR is not set ++CONFIG_SQUASHFS_ZLIB=y ++# CONFIG_SQUASHFS_LZ4 is not set ++# CONFIG_SQUASHFS_LZO is not set ++# CONFIG_SQUASHFS_XZ is not set ++# CONFIG_SQUASHFS_ZSTD is not set ++# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set ++# CONFIG_SQUASHFS_EMBEDDED is not set ++CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 ++# CONFIG_VXFS_FS is not set ++# CONFIG_MINIX_FS is not set ++# CONFIG_OMFS_FS is not set ++# CONFIG_HPFS_FS is not set ++# CONFIG_QNX4FS_FS is not set ++# CONFIG_QNX6FS_FS is not set ++# CONFIG_ROMFS_FS is not set ++CONFIG_PSTORE=y ++CONFIG_PSTORE_DEFLATE_COMPRESS=m ++# CONFIG_PSTORE_LZO_COMPRESS is not set ++# CONFIG_PSTORE_LZ4_COMPRESS is not set ++CONFIG_PSTORE_LZ4HC_COMPRESS=m ++# CONFIG_PSTORE_842_COMPRESS is not set ++# CONFIG_PSTORE_ZSTD_COMPRESS is not set ++CONFIG_PSTORE_COMPRESS=y ++CONFIG_PSTORE_DEFLATE_COMPRESS_DEFAULT=y ++# CONFIG_PSTORE_LZ4HC_COMPRESS_DEFAULT is not set ++CONFIG_PSTORE_COMPRESS_DEFAULT="deflate" ++# CONFIG_PSTORE_CONSOLE is not set ++# CONFIG_PSTORE_PMSG is not set ++CONFIG_PSTORE_RAM=m ++# CONFIG_SYSV_FS is not set ++# CONFIG_UFS_FS is not set ++# CONFIG_EROFS_FS is not set ++CONFIG_NETWORK_FILESYSTEMS=y ++CONFIG_NFS_FS=m ++CONFIG_NFS_V2=m ++CONFIG_NFS_V3=m ++# CONFIG_NFS_V3_ACL is not set ++CONFIG_NFS_V4=m ++# CONFIG_NFS_SWAP is not set ++CONFIG_NFS_V4_1=y ++CONFIG_NFS_V4_2=y ++CONFIG_PNFS_FILE_LAYOUT=m ++CONFIG_PNFS_BLOCK=m ++CONFIG_PNFS_FLEXFILE_LAYOUT=m ++CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="clearlinux.org" ++# CONFIG_NFS_V4_1_MIGRATION is not set ++CONFIG_NFS_V4_SECURITY_LABEL=y ++# CONFIG_NFS_FSCACHE is not set ++# CONFIG_NFS_USE_LEGACY_DNS is not set ++CONFIG_NFS_USE_KERNEL_DNS=y ++CONFIG_NFSD=m ++CONFIG_NFSD_V3=y ++# CONFIG_NFSD_V3_ACL is not set ++CONFIG_NFSD_V4=y ++CONFIG_NFSD_PNFS=y ++CONFIG_NFSD_BLOCKLAYOUT=y ++CONFIG_NFSD_SCSILAYOUT=y ++# CONFIG_NFSD_FLEXFILELAYOUT is not set ++# CONFIG_NFSD_V4_SECURITY_LABEL is not set ++CONFIG_GRACE_PERIOD=m ++CONFIG_LOCKD=m ++CONFIG_LOCKD_V4=y ++CONFIG_NFS_COMMON=y ++CONFIG_SUNRPC=m ++CONFIG_SUNRPC_GSS=m ++CONFIG_SUNRPC_BACKCHANNEL=y ++# CONFIG_RPCSEC_GSS_KRB5 is not set ++# CONFIG_SUNRPC_DEBUG is not set ++CONFIG_SUNRPC_XPRT_RDMA=m ++CONFIG_CEPH_FS=m ++CONFIG_CEPH_FSCACHE=y ++CONFIG_CEPH_FS_POSIX_ACL=y ++# CONFIG_CEPH_FS_SECURITY_LABEL is not set ++CONFIG_CIFS=m ++# CONFIG_CIFS_STATS2 is not set ++CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y ++CONFIG_CIFS_WEAK_PW_HASH=y ++# CONFIG_CIFS_UPCALL is not set ++# CONFIG_CIFS_XATTR is not set ++# CONFIG_CIFS_DEBUG is not set ++CONFIG_CIFS_DFS_UPCALL=y ++# CONFIG_CIFS_SMB_DIRECT is not set ++# CONFIG_CIFS_FSCACHE is not set ++# CONFIG_CODA_FS is not set ++# CONFIG_AFS_FS is not set ++CONFIG_9P_FS=m ++# CONFIG_9P_FSCACHE is not set ++CONFIG_9P_FS_POSIX_ACL=y ++# CONFIG_9P_FS_SECURITY is not set ++CONFIG_NLS=y ++CONFIG_NLS_DEFAULT="utf8" ++CONFIG_NLS_CODEPAGE_437=y ++CONFIG_NLS_CODEPAGE_737=m ++CONFIG_NLS_CODEPAGE_775=m ++CONFIG_NLS_CODEPAGE_850=m ++CONFIG_NLS_CODEPAGE_852=m ++CONFIG_NLS_CODEPAGE_855=m ++CONFIG_NLS_CODEPAGE_857=m ++CONFIG_NLS_CODEPAGE_860=m ++CONFIG_NLS_CODEPAGE_861=m ++CONFIG_NLS_CODEPAGE_862=m ++CONFIG_NLS_CODEPAGE_863=m ++CONFIG_NLS_CODEPAGE_864=m ++CONFIG_NLS_CODEPAGE_865=m ++CONFIG_NLS_CODEPAGE_866=m ++CONFIG_NLS_CODEPAGE_869=m ++CONFIG_NLS_CODEPAGE_936=m ++CONFIG_NLS_CODEPAGE_950=m ++CONFIG_NLS_CODEPAGE_932=m ++CONFIG_NLS_CODEPAGE_949=m ++CONFIG_NLS_CODEPAGE_874=m ++CONFIG_NLS_ISO8859_8=m ++CONFIG_NLS_CODEPAGE_1250=m ++CONFIG_NLS_CODEPAGE_1251=m ++CONFIG_NLS_ASCII=y ++CONFIG_NLS_ISO8859_1=y ++CONFIG_NLS_ISO8859_2=m ++CONFIG_NLS_ISO8859_3=m ++CONFIG_NLS_ISO8859_4=m ++CONFIG_NLS_ISO8859_5=m ++CONFIG_NLS_ISO8859_6=m ++CONFIG_NLS_ISO8859_7=m ++CONFIG_NLS_ISO8859_9=m ++CONFIG_NLS_ISO8859_13=m ++CONFIG_NLS_ISO8859_14=m ++CONFIG_NLS_ISO8859_15=m ++CONFIG_NLS_KOI8_R=m ++CONFIG_NLS_KOI8_U=m ++CONFIG_NLS_MAC_ROMAN=m ++CONFIG_NLS_MAC_CELTIC=m ++CONFIG_NLS_MAC_CENTEURO=m ++CONFIG_NLS_MAC_CROATIAN=m ++CONFIG_NLS_MAC_CYRILLIC=m ++CONFIG_NLS_MAC_GAELIC=m ++CONFIG_NLS_MAC_GREEK=m ++CONFIG_NLS_MAC_ICELAND=m ++CONFIG_NLS_MAC_INUIT=m ++CONFIG_NLS_MAC_ROMANIAN=m ++CONFIG_NLS_MAC_TURKISH=m ++CONFIG_NLS_UTF8=y ++# CONFIG_DLM is not set ++# CONFIG_UNICODE is not set ++# end of File systems ++ ++# ++# Security options ++# ++CONFIG_KEYS=y ++CONFIG_KEYS_COMPAT=y ++# CONFIG_KEYS_REQUEST_CACHE is not set ++# CONFIG_PERSISTENT_KEYRINGS is not set ++# CONFIG_BIG_KEYS is not set ++CONFIG_ENCRYPTED_KEYS=m ++# CONFIG_KEY_DH_OPERATIONS is not set ++# CONFIG_SECURITY_DMESG_RESTRICT is not set ++CONFIG_SECURITY=y ++CONFIG_SECURITYFS=y ++CONFIG_SECURITY_NETWORK=y ++CONFIG_PAGE_TABLE_ISOLATION=y ++# CONFIG_SECURITY_INFINIBAND is not set ++# CONFIG_SECURITY_NETWORK_XFRM is not set ++CONFIG_SECURITY_PATH=y ++# CONFIG_INTEL_TXT is not set ++CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y ++CONFIG_HARDENED_USERCOPY=y ++# CONFIG_HARDENED_USERCOPY_FALLBACK is not set ++# CONFIG_HARDENED_USERCOPY_PAGESPAN is not set ++CONFIG_FORTIFY_SOURCE=y ++# CONFIG_STATIC_USERMODEHELPER is not set ++# CONFIG_SECURITY_SMACK is not set ++# CONFIG_SECURITY_TOMOYO is not set ++# CONFIG_SECURITY_APPARMOR is not set ++# CONFIG_SECURITY_LOADPIN is not set ++# CONFIG_SECURITY_YAMA is not set ++# CONFIG_SECURITY_SAFESETID is not set ++# CONFIG_SECURITY_LOCKDOWN_LSM is not set ++# CONFIG_INTEGRITY is not set ++CONFIG_DEFAULT_SECURITY_DAC=y ++CONFIG_LSM="yama,loadpin,safesetid,integrity" ++ ++# ++# Kernel hardening options ++# ++ ++# ++# Memory initialization ++# ++CONFIG_INIT_STACK_NONE=y ++# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set ++# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set ++# end of Memory initialization ++# end of Kernel hardening options ++# end of Security options ++ ++CONFIG_XOR_BLOCKS=y ++CONFIG_ASYNC_CORE=m ++CONFIG_ASYNC_MEMCPY=m ++CONFIG_ASYNC_XOR=m ++CONFIG_ASYNC_PQ=m ++CONFIG_ASYNC_RAID6_RECOV=m ++CONFIG_CRYPTO=y ++ ++# ++# Crypto core or helper ++# ++# CONFIG_CRYPTO_FIPS is not set ++CONFIG_CRYPTO_ALGAPI=y ++CONFIG_CRYPTO_ALGAPI2=y ++CONFIG_CRYPTO_AEAD=y ++CONFIG_CRYPTO_AEAD2=y ++CONFIG_CRYPTO_BLKCIPHER=y ++CONFIG_CRYPTO_BLKCIPHER2=y ++CONFIG_CRYPTO_HASH=y ++CONFIG_CRYPTO_HASH2=y ++CONFIG_CRYPTO_RNG=y ++CONFIG_CRYPTO_RNG2=y ++CONFIG_CRYPTO_RNG_DEFAULT=y ++CONFIG_CRYPTO_AKCIPHER2=y ++CONFIG_CRYPTO_AKCIPHER=y ++CONFIG_CRYPTO_KPP2=y ++CONFIG_CRYPTO_KPP=m ++CONFIG_CRYPTO_ACOMP2=y ++CONFIG_CRYPTO_MANAGER=y ++CONFIG_CRYPTO_MANAGER2=y ++# CONFIG_CRYPTO_USER is not set ++# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set ++# CONFIG_CRYPTO_MANAGER_EXTRA_TESTS is not set ++CONFIG_CRYPTO_GF128MUL=y ++CONFIG_CRYPTO_NULL=y ++CONFIG_CRYPTO_NULL2=y ++CONFIG_CRYPTO_PCRYPT=m ++CONFIG_CRYPTO_CRYPTD=y ++CONFIG_CRYPTO_AUTHENC=y ++CONFIG_CRYPTO_TEST=m ++CONFIG_CRYPTO_SIMD=y ++CONFIG_CRYPTO_GLUE_HELPER_X86=y ++CONFIG_CRYPTO_ENGINE=m ++ ++# ++# Public-key cryptography ++# ++CONFIG_CRYPTO_RSA=y ++CONFIG_CRYPTO_DH=m ++CONFIG_CRYPTO_ECC=m ++CONFIG_CRYPTO_ECDH=m ++# CONFIG_CRYPTO_ECRDSA is not set ++ ++# ++# Authenticated Encryption with Associated Data ++# ++CONFIG_CRYPTO_CCM=y ++CONFIG_CRYPTO_GCM=m ++# CONFIG_CRYPTO_CHACHA20POLY1305 is not set ++# CONFIG_CRYPTO_AEGIS128 is not set ++# CONFIG_CRYPTO_AEGIS128_AESNI_SSE2 is not set ++CONFIG_CRYPTO_SEQIV=y ++CONFIG_CRYPTO_ECHAINIV=y ++ ++# ++# Block modes ++# ++CONFIG_CRYPTO_CBC=y ++CONFIG_CRYPTO_CFB=m ++CONFIG_CRYPTO_CTR=y ++CONFIG_CRYPTO_CTS=m ++CONFIG_CRYPTO_ECB=y ++CONFIG_CRYPTO_LRW=y ++# CONFIG_CRYPTO_OFB is not set ++CONFIG_CRYPTO_PCBC=m ++CONFIG_CRYPTO_XTS=y ++CONFIG_CRYPTO_KEYWRAP=m ++# CONFIG_CRYPTO_NHPOLY1305_SSE2 is not set ++# CONFIG_CRYPTO_NHPOLY1305_AVX2 is not set ++# CONFIG_CRYPTO_ADIANTUM is not set ++CONFIG_CRYPTO_ESSIV=y ++ ++# ++# Hash modes ++# ++CONFIG_CRYPTO_CMAC=m ++CONFIG_CRYPTO_HMAC=y ++CONFIG_CRYPTO_XCBC=m ++CONFIG_CRYPTO_VMAC=m ++ ++# ++# Digest ++# ++CONFIG_CRYPTO_CRC32C=y ++CONFIG_CRYPTO_CRC32C_INTEL=y ++CONFIG_CRYPTO_CRC32=m ++# CONFIG_CRYPTO_CRC32_PCLMUL is not set ++# CONFIG_CRYPTO_XXHASH is not set ++CONFIG_CRYPTO_CRCT10DIF=y ++# CONFIG_CRYPTO_CRCT10DIF_PCLMUL is not set ++CONFIG_CRYPTO_GHASH=m ++# CONFIG_CRYPTO_POLY1305 is not set ++# CONFIG_CRYPTO_POLY1305_X86_64 is not set ++CONFIG_CRYPTO_MD4=m ++CONFIG_CRYPTO_MD5=y ++CONFIG_CRYPTO_MICHAEL_MIC=m ++# CONFIG_CRYPTO_RMD128 is not set ++# CONFIG_CRYPTO_RMD160 is not set ++# CONFIG_CRYPTO_RMD256 is not set ++# CONFIG_CRYPTO_RMD320 is not set ++CONFIG_CRYPTO_SHA1=y ++# CONFIG_CRYPTO_SHA1_SSSE3 is not set ++CONFIG_CRYPTO_SHA256_SSSE3=y ++CONFIG_CRYPTO_SHA512_SSSE3=y ++CONFIG_CRYPTO_LIB_SHA256=y ++CONFIG_CRYPTO_SHA256=y ++CONFIG_CRYPTO_SHA512=y ++# CONFIG_CRYPTO_SHA3 is not set ++# CONFIG_CRYPTO_SM3 is not set ++# CONFIG_CRYPTO_STREEBOG is not set ++CONFIG_CRYPTO_TGR192=m ++CONFIG_CRYPTO_WP512=m ++# CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL is not set ++ ++# ++# Ciphers ++# ++CONFIG_CRYPTO_LIB_AES=y ++CONFIG_CRYPTO_AES=y ++CONFIG_CRYPTO_AES_TI=y ++CONFIG_CRYPTO_AES_NI_INTEL=y ++CONFIG_CRYPTO_ANUBIS=m ++CONFIG_CRYPTO_LIB_ARC4=y ++CONFIG_CRYPTO_ARC4=y ++CONFIG_CRYPTO_BLOWFISH=m ++CONFIG_CRYPTO_BLOWFISH_COMMON=m ++CONFIG_CRYPTO_BLOWFISH_X86_64=m ++CONFIG_CRYPTO_CAMELLIA=m ++CONFIG_CRYPTO_CAMELLIA_X86_64=y ++CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64=y ++CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64=y ++CONFIG_CRYPTO_CAST_COMMON=m ++CONFIG_CRYPTO_CAST5=m ++CONFIG_CRYPTO_CAST5_AVX_X86_64=m ++CONFIG_CRYPTO_CAST6=m ++CONFIG_CRYPTO_CAST6_AVX_X86_64=m ++CONFIG_CRYPTO_LIB_DES=m ++CONFIG_CRYPTO_DES=m ++CONFIG_CRYPTO_DES3_EDE_X86_64=m ++CONFIG_CRYPTO_FCRYPT=m ++CONFIG_CRYPTO_KHAZAD=m ++CONFIG_CRYPTO_SALSA20=m ++CONFIG_CRYPTO_CHACHA20=m ++# CONFIG_CRYPTO_CHACHA20_X86_64 is not set ++CONFIG_CRYPTO_SEED=m ++CONFIG_CRYPTO_SERPENT=y ++# CONFIG_CRYPTO_SERPENT_SSE2_X86_64 is not set ++CONFIG_CRYPTO_SERPENT_AVX_X86_64=y ++CONFIG_CRYPTO_SERPENT_AVX2_X86_64=y ++# CONFIG_CRYPTO_SM4 is not set ++CONFIG_CRYPTO_TEA=m ++CONFIG_CRYPTO_TWOFISH=m ++CONFIG_CRYPTO_TWOFISH_COMMON=m ++# CONFIG_CRYPTO_TWOFISH_X86_64 is not set ++# CONFIG_CRYPTO_TWOFISH_X86_64_3WAY is not set ++# CONFIG_CRYPTO_TWOFISH_AVX_X86_64 is not set ++ ++# ++# Compression ++# ++CONFIG_CRYPTO_DEFLATE=y ++CONFIG_CRYPTO_LZO=m ++# CONFIG_CRYPTO_842 is not set ++# CONFIG_CRYPTO_LZ4 is not set ++CONFIG_CRYPTO_LZ4HC=m ++# CONFIG_CRYPTO_ZSTD is not set ++ ++# ++# Random Number Generation ++# ++# CONFIG_CRYPTO_ANSI_CPRNG is not set ++CONFIG_CRYPTO_DRBG_MENU=y ++CONFIG_CRYPTO_DRBG_HMAC=y ++# CONFIG_CRYPTO_DRBG_HASH is not set ++# CONFIG_CRYPTO_DRBG_CTR is not set ++CONFIG_CRYPTO_DRBG=y ++CONFIG_CRYPTO_JITTERENTROPY=y ++CONFIG_CRYPTO_USER_API=y ++CONFIG_CRYPTO_USER_API_HASH=y ++CONFIG_CRYPTO_USER_API_SKCIPHER=y ++# CONFIG_CRYPTO_USER_API_RNG is not set ++# CONFIG_CRYPTO_USER_API_AEAD is not set ++CONFIG_CRYPTO_HASH_INFO=y ++CONFIG_CRYPTO_HW=y ++# CONFIG_CRYPTO_DEV_PADLOCK is not set ++# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set ++# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set ++# CONFIG_CRYPTO_DEV_CCP is not set ++CONFIG_CRYPTO_DEV_QAT=m ++CONFIG_CRYPTO_DEV_QAT_DH895xCC=m ++# CONFIG_CRYPTO_DEV_QAT_C3XXX is not set ++# CONFIG_CRYPTO_DEV_QAT_C62X is not set ++# CONFIG_CRYPTO_DEV_QAT_DH895xCCVF is not set ++# CONFIG_CRYPTO_DEV_QAT_C3XXXVF is not set ++# CONFIG_CRYPTO_DEV_QAT_C62XVF is not set ++# CONFIG_CRYPTO_DEV_NITROX_CNN55XX is not set ++CONFIG_CRYPTO_DEV_VIRTIO=m ++# CONFIG_CRYPTO_DEV_SAFEXCEL is not set ++CONFIG_ASYMMETRIC_KEY_TYPE=y ++CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y ++CONFIG_X509_CERTIFICATE_PARSER=y ++# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set ++CONFIG_PKCS7_MESSAGE_PARSER=y ++# CONFIG_PKCS7_TEST_KEY is not set ++# CONFIG_SIGNED_PE_FILE_VERIFICATION is not set ++ ++# ++# Certificates for signature checking ++# ++CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" ++CONFIG_SYSTEM_TRUSTED_KEYRING=y ++CONFIG_SYSTEM_TRUSTED_KEYS="" ++# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set ++# CONFIG_SECONDARY_TRUSTED_KEYRING is not set ++# CONFIG_SYSTEM_BLACKLIST_KEYRING is not set ++# end of Certificates for signature checking ++ ++CONFIG_BINARY_PRINTF=y ++ ++# ++# Library routines ++# ++CONFIG_RAID6_PQ=y ++CONFIG_RAID6_PQ_BENCHMARK=y ++# CONFIG_PACKING is not set ++CONFIG_BITREVERSE=y ++CONFIG_GENERIC_STRNCPY_FROM_USER=y ++CONFIG_GENERIC_STRNLEN_USER=y ++CONFIG_GENERIC_NET_UTILS=y ++CONFIG_GENERIC_FIND_FIRST_BIT=y ++CONFIG_CORDIC=y ++CONFIG_RATIONAL=y ++CONFIG_GENERIC_PCI_IOMAP=y ++CONFIG_GENERIC_IOMAP=y ++CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y ++CONFIG_ARCH_HAS_FAST_MULTIPLIER=y ++CONFIG_CRC_CCITT=y ++CONFIG_CRC16=y ++CONFIG_CRC_T10DIF=y ++CONFIG_CRC_ITU_T=m ++CONFIG_CRC32=y ++# CONFIG_CRC32_SELFTEST is not set ++CONFIG_CRC32_SLICEBY8=y ++# CONFIG_CRC32_SLICEBY4 is not set ++# CONFIG_CRC32_SARWATE is not set ++# CONFIG_CRC32_BIT is not set ++CONFIG_CRC64=m ++CONFIG_CRC4=m ++# CONFIG_CRC7 is not set ++CONFIG_LIBCRC32C=y ++CONFIG_CRC8=y ++CONFIG_XXHASH=y ++# CONFIG_RANDOM32_SELFTEST is not set ++CONFIG_ZLIB_INFLATE=y ++CONFIG_ZLIB_DEFLATE=y ++CONFIG_LZO_COMPRESS=y ++CONFIG_LZO_DECOMPRESS=y ++CONFIG_LZ4HC_COMPRESS=m ++CONFIG_LZ4_DECOMPRESS=y ++CONFIG_ZSTD_COMPRESS=y ++CONFIG_ZSTD_DECOMPRESS=y ++CONFIG_XZ_DEC=y ++CONFIG_XZ_DEC_X86=y ++# CONFIG_XZ_DEC_POWERPC is not set ++# CONFIG_XZ_DEC_IA64 is not set ++# CONFIG_XZ_DEC_ARM is not set ++# CONFIG_XZ_DEC_ARMTHUMB is not set ++# CONFIG_XZ_DEC_SPARC is not set ++CONFIG_XZ_DEC_BCJ=y ++# CONFIG_XZ_DEC_TEST is not set ++CONFIG_DECOMPRESS_GZIP=y ++CONFIG_DECOMPRESS_XZ=y ++CONFIG_DECOMPRESS_LZO=y ++CONFIG_DECOMPRESS_LZ4=y ++CONFIG_GENERIC_ALLOCATOR=y ++CONFIG_REED_SOLOMON=m ++CONFIG_REED_SOLOMON_ENC8=y ++CONFIG_REED_SOLOMON_DEC8=y ++CONFIG_TEXTSEARCH=y ++CONFIG_TEXTSEARCH_KMP=m ++CONFIG_TEXTSEARCH_BM=m ++CONFIG_TEXTSEARCH_FSM=m ++CONFIG_INTERVAL_TREE=y ++CONFIG_XARRAY_MULTI=y ++CONFIG_ASSOCIATIVE_ARRAY=y ++CONFIG_HAS_IOMEM=y ++CONFIG_HAS_IOPORT_MAP=y ++CONFIG_HAS_DMA=y ++CONFIG_NEED_SG_DMA_LENGTH=y ++CONFIG_NEED_DMA_MAP_STATE=y ++CONFIG_ARCH_DMA_ADDR_T_64BIT=y ++CONFIG_DMA_VIRT_OPS=y ++CONFIG_SWIOTLB=y ++# CONFIG_DMA_API_DEBUG is not set ++CONFIG_SGL_ALLOC=y ++CONFIG_CHECK_SIGNATURE=y ++CONFIG_CPU_RMAP=y ++CONFIG_DQL=y ++CONFIG_GLOB=y ++# CONFIG_GLOB_SELFTEST is not set ++CONFIG_NLATTR=y ++CONFIG_CLZ_TAB=y ++CONFIG_IRQ_POLL=y ++CONFIG_MPILIB=y ++CONFIG_DIMLIB=y ++CONFIG_OID_REGISTRY=y ++CONFIG_UCS2_STRING=y ++CONFIG_HAVE_GENERIC_VDSO=y ++CONFIG_GENERIC_GETTIMEOFDAY=y ++CONFIG_FONT_SUPPORT=y ++CONFIG_FONTS=y ++CONFIG_FONT_8x8=y ++CONFIG_FONT_8x16=y ++# CONFIG_FONT_6x11 is not set ++CONFIG_FONT_7x14=y ++# CONFIG_FONT_PEARL_8x8 is not set ++# CONFIG_FONT_ACORN_8x8 is not set ++# CONFIG_FONT_MINI_4x6 is not set ++# CONFIG_FONT_6x10 is not set ++CONFIG_FONT_10x18=y ++# CONFIG_FONT_SUN8x16 is not set ++# CONFIG_FONT_SUN12x22 is not set ++# CONFIG_FONT_TER16x32 is not set ++CONFIG_SG_POOL=y ++CONFIG_ARCH_HAS_PMEM_API=y ++CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y ++CONFIG_ARCH_HAS_UACCESS_MCSAFE=y ++CONFIG_ARCH_STACKWALK=y ++CONFIG_SBITMAP=y ++# CONFIG_STRING_SELFTEST is not set ++# end of Library routines ++ ++# ++# Kernel hacking ++# ++ ++# ++# printk and dmesg options ++# ++CONFIG_PRINTK_TIME=y ++# CONFIG_PRINTK_CALLER is not set ++CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 ++CONFIG_CONSOLE_LOGLEVEL_QUIET=4 ++CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 ++CONFIG_BOOT_PRINTK_DELAY=y ++CONFIG_DYNAMIC_DEBUG=y ++# end of printk and dmesg options ++ ++# ++# Compile-time checks and compiler options ++# ++# CONFIG_DEBUG_INFO is not set ++CONFIG_ENABLE_MUST_CHECK=y ++CONFIG_FRAME_WARN=1024 ++# CONFIG_STRIP_ASM_SYMS is not set ++# CONFIG_READABLE_ASM is not set ++CONFIG_DEBUG_FS=y ++# CONFIG_HEADERS_INSTALL is not set ++CONFIG_OPTIMIZE_INLINING=y ++CONFIG_DEBUG_SECTION_MISMATCH=y ++# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set ++CONFIG_STACK_VALIDATION=y ++# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set ++# end of Compile-time checks and compiler options ++ ++CONFIG_MAGIC_SYSRQ=y ++CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 ++# CONFIG_MAGIC_SYSRQ_SERIAL is not set ++CONFIG_DEBUG_KERNEL=y ++CONFIG_DEBUG_MISC=y ++ ++# ++# Memory Debugging ++# ++# CONFIG_PAGE_EXTENSION is not set ++# CONFIG_DEBUG_PAGEALLOC is not set ++# CONFIG_PAGE_OWNER is not set ++# CONFIG_PAGE_POISONING is not set ++# CONFIG_DEBUG_PAGE_REF is not set ++# CONFIG_DEBUG_RODATA_TEST is not set ++# CONFIG_DEBUG_OBJECTS is not set ++# CONFIG_DEBUG_SLAB is not set ++CONFIG_HAVE_DEBUG_KMEMLEAK=y ++# CONFIG_DEBUG_KMEMLEAK is not set ++# CONFIG_DEBUG_STACK_USAGE is not set ++# CONFIG_DEBUG_VM is not set ++CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y ++# CONFIG_DEBUG_VIRTUAL is not set ++# CONFIG_DEBUG_MEMORY_INIT is not set ++# CONFIG_DEBUG_PER_CPU_MAPS is not set ++CONFIG_HAVE_ARCH_KASAN=y ++CONFIG_CC_HAS_KASAN_GENERIC=y ++# CONFIG_KASAN is not set ++CONFIG_KASAN_STACK=1 ++# end of Memory Debugging ++ ++CONFIG_ARCH_HAS_KCOV=y ++CONFIG_CC_HAS_SANCOV_TRACE_PC=y ++# CONFIG_KCOV is not set ++CONFIG_DEBUG_SHIRQ=y ++ ++# ++# Debug Lockups and Hangs ++# ++CONFIG_LOCKUP_DETECTOR=y ++CONFIG_SOFTLOCKUP_DETECTOR=y ++# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set ++CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 ++CONFIG_HARDLOCKUP_DETECTOR_PERF=y ++CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y ++CONFIG_HARDLOCKUP_DETECTOR=y ++# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set ++CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=0 ++# CONFIG_DETECT_HUNG_TASK is not set ++# CONFIG_WQ_WATCHDOG is not set ++# end of Debug Lockups and Hangs ++ ++# CONFIG_PANIC_ON_OOPS is not set ++CONFIG_PANIC_ON_OOPS_VALUE=0 ++CONFIG_PANIC_TIMEOUT=0 ++CONFIG_SCHED_DEBUG=y ++CONFIG_SCHED_INFO=y ++CONFIG_SCHEDSTATS=y ++CONFIG_SCHED_STACK_END_CHECK=y ++# CONFIG_DEBUG_TIMEKEEPING is not set ++ ++# ++# Lock Debugging (spinlocks, mutexes, etc...) ++# ++CONFIG_LOCK_DEBUGGING_SUPPORT=y ++# CONFIG_PROVE_LOCKING is not set ++# CONFIG_LOCK_STAT is not set ++# CONFIG_DEBUG_RT_MUTEXES is not set ++# CONFIG_DEBUG_SPINLOCK is not set ++# CONFIG_DEBUG_MUTEXES is not set ++# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set ++# CONFIG_DEBUG_RWSEMS is not set ++# CONFIG_DEBUG_LOCK_ALLOC is not set ++# CONFIG_DEBUG_ATOMIC_SLEEP is not set ++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set ++# CONFIG_LOCK_TORTURE_TEST is not set ++# CONFIG_WW_MUTEX_SELFTEST is not set ++# end of Lock Debugging (spinlocks, mutexes, etc...) ++ ++CONFIG_STACKTRACE=y ++# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set ++# CONFIG_DEBUG_KOBJECT is not set ++CONFIG_DEBUG_BUGVERBOSE=y ++CONFIG_DEBUG_LIST=y ++# CONFIG_DEBUG_PLIST is not set ++CONFIG_DEBUG_SG=y ++CONFIG_DEBUG_NOTIFIERS=y ++CONFIG_DEBUG_CREDENTIALS=y ++ ++# ++# RCU Debugging ++# ++# CONFIG_RCU_PERF_TEST is not set ++# CONFIG_RCU_TORTURE_TEST is not set ++CONFIG_RCU_CPU_STALL_TIMEOUT=60 ++# CONFIG_RCU_TRACE is not set ++# CONFIG_RCU_EQS_DEBUG is not set ++# end of RCU Debugging ++ ++# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set ++# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set ++# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set ++# CONFIG_NOTIFIER_ERROR_INJECTION is not set ++# CONFIG_FAULT_INJECTION is not set ++CONFIG_LATENCYTOP=y ++CONFIG_USER_STACKTRACE_SUPPORT=y ++CONFIG_NOP_TRACER=y ++CONFIG_HAVE_FUNCTION_TRACER=y ++CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y ++CONFIG_HAVE_DYNAMIC_FTRACE=y ++CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y ++CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y ++CONFIG_HAVE_SYSCALL_TRACEPOINTS=y ++CONFIG_HAVE_FENTRY=y ++CONFIG_HAVE_C_RECORDMCOUNT=y ++CONFIG_TRACER_MAX_TRACE=y ++CONFIG_TRACE_CLOCK=y ++CONFIG_RING_BUFFER=y ++CONFIG_EVENT_TRACING=y ++CONFIG_CONTEXT_SWITCH_TRACER=y ++CONFIG_TRACING=y ++CONFIG_GENERIC_TRACER=y ++CONFIG_TRACING_SUPPORT=y ++CONFIG_FTRACE=y ++# CONFIG_FUNCTION_TRACER is not set ++# CONFIG_PREEMPTIRQ_EVENTS is not set ++# CONFIG_IRQSOFF_TRACER is not set ++CONFIG_SCHED_TRACER=y ++# CONFIG_HWLAT_TRACER is not set ++CONFIG_FTRACE_SYSCALLS=y ++CONFIG_TRACER_SNAPSHOT=y ++# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set ++CONFIG_BRANCH_PROFILE_NONE=y ++# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set ++# CONFIG_STACK_TRACER is not set ++CONFIG_BLK_DEV_IO_TRACE=y ++# CONFIG_UPROBE_EVENTS is not set ++# CONFIG_FTRACE_STARTUP_TEST is not set ++# CONFIG_MMIOTRACE is not set ++# CONFIG_HIST_TRIGGERS is not set ++# CONFIG_TRACEPOINT_BENCHMARK is not set ++# CONFIG_RING_BUFFER_BENCHMARK is not set ++# CONFIG_RING_BUFFER_STARTUP_TEST is not set ++# CONFIG_PREEMPTIRQ_DELAY_TEST is not set ++# CONFIG_TRACE_EVAL_MAP_FILE is not set ++# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set ++CONFIG_RUNTIME_TESTING_MENU=y ++# CONFIG_LKDTM is not set ++# CONFIG_TEST_LIST_SORT is not set ++# CONFIG_TEST_SORT is not set ++# CONFIG_BACKTRACE_SELF_TEST is not set ++# CONFIG_RBTREE_TEST is not set ++# CONFIG_REED_SOLOMON_TEST is not set ++# CONFIG_INTERVAL_TREE_TEST is not set ++# CONFIG_PERCPU_TEST is not set ++# CONFIG_ATOMIC64_SELFTEST is not set ++# CONFIG_ASYNC_RAID6_TEST is not set ++# CONFIG_TEST_HEXDUMP is not set ++# CONFIG_TEST_STRING_HELPERS is not set ++# CONFIG_TEST_STRSCPY is not set ++# CONFIG_TEST_KSTRTOX is not set ++# CONFIG_TEST_PRINTF is not set ++# CONFIG_TEST_BITMAP is not set ++# CONFIG_TEST_BITFIELD is not set ++# CONFIG_TEST_UUID is not set ++# CONFIG_TEST_XARRAY is not set ++# CONFIG_TEST_OVERFLOW is not set ++# CONFIG_TEST_RHASHTABLE is not set ++# CONFIG_TEST_HASH is not set ++# CONFIG_TEST_IDA is not set ++# CONFIG_TEST_LKM is not set ++# CONFIG_TEST_VMALLOC is not set ++# CONFIG_TEST_USER_COPY is not set ++# CONFIG_TEST_BPF is not set ++# CONFIG_TEST_BLACKHOLE_DEV is not set ++# CONFIG_FIND_BIT_BENCHMARK is not set ++# CONFIG_TEST_FIRMWARE is not set ++# CONFIG_TEST_SYSCTL is not set ++# CONFIG_TEST_UDELAY is not set ++# CONFIG_TEST_STATIC_KEYS is not set ++# CONFIG_TEST_KMOD is not set ++# CONFIG_TEST_MEMCAT_P is not set ++# CONFIG_TEST_STACKINIT is not set ++# CONFIG_TEST_MEMINIT is not set ++# CONFIG_MEMTEST is not set ++# CONFIG_BUG_ON_DATA_CORRUPTION is not set ++# CONFIG_SAMPLES is not set ++CONFIG_HAVE_ARCH_KGDB=y ++# CONFIG_KGDB is not set ++CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y ++# CONFIG_UBSAN is not set ++CONFIG_UBSAN_ALIGNMENT=y ++CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y ++CONFIG_TRACE_IRQFLAGS_SUPPORT=y ++CONFIG_X86_VERBOSE_BOOTUP=y ++CONFIG_EARLY_PRINTK=y ++# CONFIG_EARLY_PRINTK_DBGP is not set ++# CONFIG_EARLY_PRINTK_USB_XDBC is not set ++# CONFIG_X86_PTDUMP is not set ++# CONFIG_EFI_PGT_DUMP is not set ++# CONFIG_DEBUG_WX is not set ++CONFIG_DOUBLEFAULT=y ++# CONFIG_DEBUG_TLBFLUSH is not set ++CONFIG_HAVE_MMIOTRACE_SUPPORT=y ++# CONFIG_X86_DECODER_SELFTEST is not set ++CONFIG_IO_DELAY_0X80=y ++# CONFIG_IO_DELAY_0XED is not set ++# CONFIG_IO_DELAY_UDELAY is not set ++# CONFIG_IO_DELAY_NONE is not set ++CONFIG_DEBUG_BOOT_PARAMS=y ++# CONFIG_CPA_DEBUG is not set ++# CONFIG_DEBUG_ENTRY is not set ++# CONFIG_DEBUG_NMI_SELFTEST is not set ++# CONFIG_X86_DEBUG_FPU is not set ++# CONFIG_PUNIT_ATOM_DEBUG is not set ++CONFIG_UNWINDER_ORC=y ++# CONFIG_UNWINDER_FRAME_POINTER is not set ++# CONFIG_UNWINDER_GUESS is not set ++# end of Kernel hacking +-- +2.7.4 + diff --git a/patches/0001-trusty-Add-trusty-driver.trusty b/patches/0001-trusty-Add-trusty-driver.trusty new file mode 100644 index 0000000000..2527ba1972 --- /dev/null +++ b/patches/0001-trusty-Add-trusty-driver.trusty @@ -0,0 +1,559 @@ +From c0a23e2abbd606220d57c5e9e82030858a45657f Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?= +Date: Mon, 18 Nov 2013 20:46:48 -0800 +Subject: [PATCH 01/63] trusty: Add trusty driver +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +includes: Add arm64 support + +includes: Add trusty_fast_call64 api on 64 bit systems. + +includes: move probe to subsys_initcall + +Child devices of trusty like FIQ-based debuggers and watchdogs may +want to probe early, move trusty from module init to subsys init +to allow it and its children to probe earlier. + +includes: Retry std_calls on SM_ERR_BUSY + +If the trusty spinlock is held, or if the strex fails for another +reason, trusty returns SM_ERR_BUSY. Add retry code to handle this. + +Without this retry code, std_calls can fail. If the previous smc +call had returned SM_ERR_INTERRUPTED, this failure would cause +the driver to get out of sync with trusty. All later calls would +then fail with SM_ERR_INTERLEAVED_SMC. + +Change-Id: Idc0bbe78b557bc5d95dbec448e4085e3ab9111b4 +Signed-off-by: Arve Hjønnevåg +--- + .../devicetree/bindings/trusty/trusty-smc.txt | 6 + + drivers/Kconfig | 2 + + drivers/Makefile | 1 + + drivers/trusty/Kconfig | 11 + + drivers/trusty/Makefile | 5 + + drivers/trusty/trusty.c | 258 ++++++++++++++++++ + include/linux/trusty/sm_err.h | 39 +++ + include/linux/trusty/smcall.h | 75 +++++ + include/linux/trusty/trusty.h | 46 ++++ + 9 files changed, 443 insertions(+) + create mode 100644 Documentation/devicetree/bindings/trusty/trusty-smc.txt + create mode 100644 drivers/trusty/Kconfig + create mode 100644 drivers/trusty/Makefile + create mode 100644 drivers/trusty/trusty.c + create mode 100644 include/linux/trusty/sm_err.h + create mode 100644 include/linux/trusty/smcall.h + create mode 100644 include/linux/trusty/trusty.h + +diff --git a/Documentation/devicetree/bindings/trusty/trusty-smc.txt b/Documentation/devicetree/bindings/trusty/trusty-smc.txt +new file mode 100644 +index 000000000000..1b39ad317c67 +--- /dev/null ++++ b/Documentation/devicetree/bindings/trusty/trusty-smc.txt +@@ -0,0 +1,6 @@ ++Trusty smc interface ++ ++Trusty is running in secure mode on the same (arm) cpu(s) as the current os. ++ ++Required properties: ++- compatible: "android,trusty-smc-v1" +diff --git a/drivers/Kconfig b/drivers/Kconfig +index 8befa53f43be..0f8c38ce4a16 100644 +--- a/drivers/Kconfig ++++ b/drivers/Kconfig +@@ -86,6 +86,8 @@ source "drivers/hwmon/Kconfig" + + source "drivers/thermal/Kconfig" + ++source "drivers/trusty/Kconfig" ++ + source "drivers/watchdog/Kconfig" + + source "drivers/ssb/Kconfig" +diff --git a/drivers/Makefile b/drivers/Makefile +index aaef17cc6512..327caa3a106f 100644 +--- a/drivers/Makefile ++++ b/drivers/Makefile +@@ -117,6 +117,7 @@ obj-$(CONFIG_W1) += w1/ + obj-y += power/ + obj-$(CONFIG_HWMON) += hwmon/ + obj-$(CONFIG_THERMAL) += thermal/ ++obj-$(CONFIG_TRUSTY) += trusty/ + obj-$(CONFIG_WATCHDOG) += watchdog/ + obj-$(CONFIG_MD) += md/ + obj-$(CONFIG_BT) += bluetooth/ +diff --git a/drivers/trusty/Kconfig b/drivers/trusty/Kconfig +new file mode 100644 +index 000000000000..f577ae8acad3 +--- /dev/null ++++ b/drivers/trusty/Kconfig +@@ -0,0 +1,11 @@ ++# ++# Trusty ++# ++ ++menu "Trusty" ++ ++config TRUSTY ++ tristate "Trusty" ++ default n ++ ++endmenu +diff --git a/drivers/trusty/Makefile b/drivers/trusty/Makefile +new file mode 100644 +index 000000000000..1d77805d7dd6 +--- /dev/null ++++ b/drivers/trusty/Makefile +@@ -0,0 +1,5 @@ ++# ++# Makefile for trusty components ++# ++ ++obj-$(CONFIG_TRUSTY) += trusty.o +diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c +new file mode 100644 +index 000000000000..59ecf60fc050 +--- /dev/null ++++ b/drivers/trusty/trusty.c +@@ -0,0 +1,258 @@ ++/* ++ * Copyright (C) 2013 Google, Inc. ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++struct trusty_state { ++ struct mutex smc_lock; ++}; ++ ++#ifdef CONFIG_ARM64 ++#define SMC_ARG0 "x0" ++#define SMC_ARG1 "x1" ++#define SMC_ARG2 "x2" ++#define SMC_ARG3 "x3" ++#define SMC_ARCH_EXTENSION "" ++#define SMC_REGISTERS_TRASHED "x4","x5","x6","x7","x8","x9","x10","x11", \ ++ "x12","x13","x14","x15","x16","x17" ++#else ++#define SMC_ARG0 "r0" ++#define SMC_ARG1 "r1" ++#define SMC_ARG2 "r2" ++#define SMC_ARG3 "r3" ++#define SMC_ARCH_EXTENSION ".arch_extension sec\n" ++#define SMC_REGISTERS_TRASHED "ip" ++#endif ++ ++static inline ulong smc(ulong r0, ulong r1, ulong r2, ulong r3) ++{ ++ register ulong _r0 asm(SMC_ARG0) = r0; ++ register ulong _r1 asm(SMC_ARG1) = r1; ++ register ulong _r2 asm(SMC_ARG2) = r2; ++ register ulong _r3 asm(SMC_ARG3) = r3; ++ ++ asm volatile( ++ __asmeq("%0", SMC_ARG0) ++ __asmeq("%1", SMC_ARG1) ++ __asmeq("%2", SMC_ARG2) ++ __asmeq("%3", SMC_ARG3) ++ __asmeq("%4", SMC_ARG0) ++ __asmeq("%5", SMC_ARG1) ++ __asmeq("%6", SMC_ARG2) ++ __asmeq("%7", SMC_ARG3) ++ SMC_ARCH_EXTENSION ++ "smc #0" /* switch to secure world */ ++ : "=r" (_r0), "=r" (_r1), "=r" (_r2), "=r" (_r3) ++ : "r" (_r0), "r" (_r1), "r" (_r2), "r" (_r3) ++ : SMC_REGISTERS_TRASHED); ++ return _r0; ++} ++ ++s32 trusty_fast_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2) ++{ ++ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); ++ ++ BUG_ON(!s); ++ BUG_ON(!SMC_IS_FASTCALL(smcnr)); ++ BUG_ON(SMC_IS_SMC64(smcnr)); ++ ++ return smc(smcnr, a0, a1, a2); ++} ++EXPORT_SYMBOL(trusty_fast_call32); ++ ++#ifdef CONFIG_64BIT ++s64 trusty_fast_call64(struct device *dev, u64 smcnr, u64 a0, u64 a1, u64 a2) ++{ ++ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); ++ ++ BUG_ON(!s); ++ BUG_ON(!SMC_IS_FASTCALL(smcnr)); ++ BUG_ON(!SMC_IS_SMC64(smcnr)); ++ ++ return smc(smcnr, a0, a1, a2); ++} ++#endif ++ ++static ulong trusty_std_call_inner(struct device *dev, ulong smcnr, ++ ulong a0, ulong a1, ulong a2) ++{ ++ ulong ret; ++ int retry = 5; ++ ++ dev_dbg(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx)\n", ++ __func__, smcnr, a0, a1, a2); ++ while (true) { ++ ret = smc(smcnr, a0, a1, a2); ++ if ((int)ret != SM_ERR_BUSY || !retry) ++ break; ++ ++ dev_dbg(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) returned busy, retry\n", ++ __func__, smcnr, a0, a1, a2); ++ retry--; ++ } ++ ++ return ret; ++} ++ ++static ulong trusty_std_call_helper(struct device *dev, ulong smcnr, ++ ulong a0, ulong a1, ulong a2) ++{ ++ ulong ret; ++ int sleep_time = 1; ++ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); ++ ++ while (true) { ++ ret = trusty_std_call_inner(dev, smcnr, a0, a1, a2); ++ if ((int)ret != SM_ERR_BUSY) ++ break; ++ ++ if (sleep_time == 256) ++ dev_warn(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) returned busy\n", ++ __func__, smcnr, a0, a1, a2); ++ dev_dbg(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) returned busy, wait %d ms\n", ++ __func__, smcnr, a0, a1, a2, sleep_time); ++ ++ msleep(sleep_time); ++ if (sleep_time < 1000) ++ sleep_time <<= 1; ++ ++ dev_dbg(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) retry\n", ++ __func__, smcnr, a0, a1, a2); ++ } ++ ++ if (sleep_time > 256) ++ dev_warn(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) busy cleared\n", ++ __func__, smcnr, a0, a1, a2); ++ ++ return ret; ++} ++ ++s32 trusty_std_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2) ++{ ++ int ret; ++ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); ++ ++ BUG_ON(SMC_IS_FASTCALL(smcnr)); ++ BUG_ON(SMC_IS_SMC64(smcnr)); ++ ++ mutex_lock(&s->smc_lock); ++ ++ dev_dbg(dev, "%s(0x%x 0x%x 0x%x 0x%x) started\n", ++ __func__, smcnr, a0, a1, a2); ++ ++ ret = trusty_std_call_helper(dev, smcnr, a0, a1, a2); ++ while (ret == SM_ERR_INTERRUPTED) { ++ dev_dbg(dev, "%s(0x%x 0x%x 0x%x 0x%x) interrupted\n", ++ __func__, smcnr, a0, a1, a2); ++ ret = trusty_std_call_helper(dev, SMC_SC_RESTART_LAST, 0, 0, 0); ++ } ++ dev_dbg(dev, "%s(0x%x 0x%x 0x%x 0x%x) returned 0x%x\n", ++ __func__, smcnr, a0, a1, a2, ret); ++ ++ WARN_ONCE(ret == SM_ERR_PANIC, "trusty crashed"); ++ ++ mutex_unlock(&s->smc_lock); ++ ++ return ret; ++} ++EXPORT_SYMBOL(trusty_std_call32); ++ ++static int trusty_remove_child(struct device *dev, void *data) ++{ ++ platform_device_unregister(to_platform_device(dev)); ++ return 0; ++} ++ ++static int trusty_probe(struct platform_device *pdev) ++{ ++ int ret; ++ struct trusty_state *s; ++ struct device_node *node = pdev->dev.of_node; ++ ++ if (!node) { ++ dev_err(&pdev->dev, "of_node required\n"); ++ return -EINVAL; ++ } ++ ++ s = kzalloc(sizeof(*s), GFP_KERNEL); ++ if (!s) { ++ ret = -ENOMEM; ++ goto err_allocate_state; ++ } ++ mutex_init(&s->smc_lock); ++ platform_set_drvdata(pdev, s); ++ ++ ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); ++ if (ret < 0) { ++ dev_err(&pdev->dev, "Failed to add children: %d\n", ret); ++ goto err_add_children; ++ } ++ ++ return 0; ++ ++err_add_children: ++ device_for_each_child(&pdev->dev, NULL, trusty_remove_child); ++ mutex_destroy(&s->smc_lock); ++ kfree(s); ++err_allocate_state: ++ return ret; ++} ++ ++static int trusty_remove(struct platform_device *pdev) ++{ ++ struct trusty_state *s = platform_get_drvdata(pdev); ++ ++ device_for_each_child(&pdev->dev, NULL, trusty_remove_child); ++ mutex_destroy(&s->smc_lock); ++ kfree(s); ++ return 0; ++} ++ ++static const struct of_device_id trusty_of_match[] = { ++ { .compatible = "android,trusty-smc-v1", }, ++ {}, ++}; ++ ++static struct platform_driver trusty_driver = { ++ .probe = trusty_probe, ++ .remove = trusty_remove, ++ .driver = { ++ .name = "trusty", ++ .owner = THIS_MODULE, ++ .of_match_table = trusty_of_match, ++ }, ++}; ++ ++static int __init trusty_driver_init(void) ++{ ++ return platform_driver_register(&trusty_driver); ++} ++ ++static void __exit trusty_driver_exit(void) ++{ ++ platform_driver_unregister(&trusty_driver); ++} ++ ++subsys_initcall(trusty_driver_init); ++module_exit(trusty_driver_exit); +diff --git a/include/linux/trusty/sm_err.h b/include/linux/trusty/sm_err.h +new file mode 100644 +index 000000000000..4ee67589ce63 +--- /dev/null ++++ b/include/linux/trusty/sm_err.h +@@ -0,0 +1,39 @@ ++/* ++ * Copyright (c) 2013 Google Inc. All rights reserved ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files ++ * (the "Software"), to deal in the Software without restriction, ++ * including without limitation the rights to use, copy, modify, merge, ++ * publish, distribute, sublicense, and/or sell copies of the Software, ++ * and to permit persons to whom the Software is furnished to do so, ++ * subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be ++ * included in all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY ++ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, ++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE ++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++#ifndef __LINUX_TRUSTY_SM_ERR_H ++#define __LINUX_TRUSTY_SM_ERR_H ++ ++/* Errors from the secure monitor */ ++#define SM_ERR_UNDEFINED_SMC 0xFFFFFFFF /* Unknown SMC (defined by ARM DEN 0028A(0.9.0) */ ++#define SM_ERR_INVALID_PARAMETERS -2 ++#define SM_ERR_INTERRUPTED -3 /* Got interrupted. Call back with restart SMC */ ++#define SM_ERR_UNEXPECTED_RESTART -4 /* Got an restart SMC when we didn't expect it */ ++#define SM_ERR_BUSY -5 /* Temporarily busy. Call back with original args */ ++#define SM_ERR_INTERLEAVED_SMC -6 /* Got a trusted_service SMC when a restart SMC is required */ ++#define SM_ERR_INTERNAL_FAILURE -7 /* Unknown error */ ++#define SM_ERR_NOT_SUPPORTED -8 ++#define SM_ERR_NOT_ALLOWED -9 /* SMC call not allowed */ ++#define SM_ERR_END_OF_INPUT -10 ++#define SM_ERR_PANIC -11 /* Secure OS crashed */ ++ ++#endif +diff --git a/include/linux/trusty/smcall.h b/include/linux/trusty/smcall.h +new file mode 100644 +index 000000000000..278a4b256fbc +--- /dev/null ++++ b/include/linux/trusty/smcall.h +@@ -0,0 +1,75 @@ ++/* ++ * Copyright (c) 2013-2014 Google Inc. All rights reserved ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files ++ * (the "Software"), to deal in the Software without restriction, ++ * including without limitation the rights to use, copy, modify, merge, ++ * publish, distribute, sublicense, and/or sell copies of the Software, ++ * and to permit persons to whom the Software is furnished to do so, ++ * subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be ++ * included in all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY ++ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, ++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE ++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++#ifndef __LINUX_TRUSTY_SMCALL_H ++#define __LINUX_TRUSTY_SMCALL_H ++ ++#define SMC_NUM_ENTITIES 64 ++#define SMC_NUM_ARGS 4 ++#define SMC_NUM_PARAMS (SMC_NUM_ARGS - 1) ++ ++#define SMC_IS_FASTCALL(smc_nr) ((smc_nr) & 0x80000000) ++#define SMC_IS_SMC64(smc_nr) ((smc_nr) & 0x40000000) ++#define SMC_ENTITY(smc_nr) (((smc_nr) & 0x3F000000) >> 24) ++#define SMC_FUNCTION(smc_nr) ((smc_nr) & 0x0000FFFF) ++ ++#define SMC_NR(entity, fn, fastcall, smc64) ((((fastcall) & 0x1) << 31) | \ ++ (((smc64) & 0x1) << 30) | \ ++ (((entity) & 0x3F) << 24) | \ ++ ((fn) & 0xFFFF) \ ++ ) ++ ++#define SMC_FASTCALL_NR(entity, fn) SMC_NR((entity), (fn), 1, 0) ++#define SMC_STDCALL_NR(entity, fn) SMC_NR((entity), (fn), 0, 0) ++#define SMC_FASTCALL64_NR(entity, fn) SMC_NR((entity), (fn), 1, 1) ++#define SMC_STDCALL64_NR(entity, fn) SMC_NR((entity), (fn), 0, 1) ++ ++#define SMC_ENTITY_ARCH 0 /* ARM Architecture calls */ ++#define SMC_ENTITY_CPU 1 /* CPU Service calls */ ++#define SMC_ENTITY_SIP 2 /* SIP Service calls */ ++#define SMC_ENTITY_OEM 3 /* OEM Service calls */ ++#define SMC_ENTITY_STD 4 /* Standard Service calls */ ++#define SMC_ENTITY_RESERVED 5 /* Reserved for future use */ ++#define SMC_ENTITY_TRUSTED_APP 48 /* Trusted Application calls */ ++#define SMC_ENTITY_TRUSTED_OS 50 /* Trusted OS calls */ ++#define SMC_ENTITY_SECURE_MONITOR 60 /* Trusted OS calls internal to secure monitor */ ++ ++/* FC = Fast call, SC = Standard call */ ++#define SMC_SC_RESTART_LAST SMC_STDCALL_NR (SMC_ENTITY_SECURE_MONITOR, 0) ++#define SMC_SC_NOP SMC_STDCALL_NR (SMC_ENTITY_SECURE_MONITOR, 1) ++ ++/* ++ * Return from secure os to non-secure os with return value in r1 ++ */ ++#define SMC_SC_NS_RETURN SMC_STDCALL_NR (SMC_ENTITY_SECURE_MONITOR, 0) ++ ++#define SMC_FC_RESERVED SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 0) ++#define SMC_FC_FIQ_EXIT SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 1) ++#define SMC_FC_REQUEST_FIQ SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 2) ++#define SMC_FC_GET_NEXT_IRQ SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 3) ++ ++#define SMC_FC_CPU_SUSPEND SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 7) ++#define SMC_FC_CPU_RESUME SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 8) ++ ++#define SMC_FC_AARCH_SWITCH SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 9) ++ ++#endif /* __LINUX_TRUSTY_SMCALL_H */ +diff --git a/include/linux/trusty/trusty.h b/include/linux/trusty/trusty.h +new file mode 100644 +index 000000000000..30d4300ba301 +--- /dev/null ++++ b/include/linux/trusty/trusty.h +@@ -0,0 +1,46 @@ ++/* ++ * Copyright (C) 2013 Google, Inc. ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++#ifndef __LINUX_TRUSTY_TRUSTY_H ++#define __LINUX_TRUSTY_TRUSTY_H ++ ++#include ++#include ++ ++#ifdef CONFIG_TRUSTY ++s32 trusty_std_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2); ++s32 trusty_fast_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2); ++#ifdef CONFIG_64BIT ++s64 trusty_fast_call64(struct device *dev, u64 smcnr, u64 a0, u64 a1, u64 a2); ++#endif ++#else ++static inline s32 trusty_std_call32(struct device *dev, u32 smcnr, ++ u32 a0, u32 a1, u32 a2) ++{ ++ return SM_ERR_UNDEFINED_SMC; ++} ++static inline s32 trusty_fast_call32(struct device *dev, u32 smcnr, ++ u32 a0, u32 a1, u32 a2) ++{ ++ return SM_ERR_UNDEFINED_SMC; ++} ++#ifdef CONFIG_64BIT ++static inline s64 trusty_fast_call64(struct device *dev, ++ u64 smcnr, u64 a0, u64 a1, u64 a2) ++{ ++ return SM_ERR_UNDEFINED_SMC; ++} ++#endif ++#endif ++ ++#endif +-- +2.17.1 + diff --git a/patches/0001-usb-remove-Link-Powermanagement-LPM-disable-befor.usb-xhci b/patches/0001-usb-remove-Link-Powermanagement-LPM-disable-befor.usb-xhci new file mode 100644 index 0000000000..68f43d5ad3 --- /dev/null +++ b/patches/0001-usb-remove-Link-Powermanagement-LPM-disable-befor.usb-xhci @@ -0,0 +1,78 @@ +From 33be5f69151bacd8c571ea9254a563f2789e966d Mon Sep 17 00:00:00 2001 +From: Mathias Nyman +Date: Fri, 9 Feb 2018 17:28:36 +0200 +Subject: [PATCH 1/4] usb: remove Link Powermanagement (LPM) disable before + port reset. + +Trying to disable Link Powermanagement (LPM) before port reset is +unnecessary and causes issues if host can't communicate with the device, +which is often the reason why device is reset in the first place. + +This was seen when xHC host controller was reset at resume from suspend +or hibernate, causing all devices to be reset before the hub driver +resets the ports at resume. As disabling LPM failed the device was +logically disconnected and re-enumerated. + +usb_disable_lpm() will +- zero usb U1/U2 timeouts for the hub downstream port +- send ENABLE U1/U2 clear feature requests to the connected device. +- increase internal reference count for udev->lpm_disable_count + +There is no need to zero U1/U2 hub port timeouts, or clearing the +U1/U2 enable for the connected device before reset. These are the +default valus after reset. + +USB 3.1 section 10.2.2 "HUB Downstream port U1/U2 timers" states that: +"the U1 and U2 timeout values for a downstream port reset to the default +values when the port receives a SetPortFeature request for a port reset" + +Set the udev->lpm_disable_count to "1" after port reset, which is the +default lpm_disable_count value when allocating udev, representing +disabled LPM. + +Signed-off-by: Mathias Nyman +--- + drivers/usb/core/hub.c | 13 +------------ + 1 file changed, 1 insertion(+), 12 deletions(-) + +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c +index 236313f41f4a..493d1c344a5f 100644 +--- a/drivers/usb/core/hub.c ++++ b/drivers/usb/core/hub.c +@@ -4837,6 +4837,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, + retval = usb_get_bos_descriptor(udev); + if (!retval) { + udev->lpm_capable = usb_device_supports_lpm(udev); ++ udev->lpm_disable_count = 1; + usb_set_lpm_parameters(udev); + } + } +@@ -5695,16 +5696,6 @@ static int usb_reset_and_verify_device(struct usb_device *udev) + */ + usb_disable_usb2_hardware_lpm(udev); + +- /* Disable LPM while we reset the device and reinstall the alt settings. +- * Device-initiated LPM, and system exit latency settings are cleared +- * when the device is reset, so we have to set them up again. +- */ +- ret = usb_unlocked_disable_lpm(udev); +- if (ret) { +- dev_err(&udev->dev, "%s Failed to disable LPM\n", __func__); +- goto re_enumerate_no_bos; +- } +- + bos = udev->bos; + udev->bos = NULL; + +@@ -5806,8 +5797,6 @@ static int usb_reset_and_verify_device(struct usb_device *udev) + re_enumerate: + usb_release_bos_descriptor(udev); + udev->bos = bos; +-re_enumerate_no_bos: +- /* LPM state doesn't matter when we're about to destroy the device. */ + hub_port_logical_disconnect(parent_hub, port1); + return -ENODEV; + } +-- +2.17.1 + diff --git a/patches/0002-ASoC-Intel-Skylake-Update-firmware-manifest-parsing-.audio b/patches/0002-ASoC-Intel-Skylake-Update-firmware-manifest-parsing-.audio new file mode 100644 index 0000000000..22fa43c958 --- /dev/null +++ b/patches/0002-ASoC-Intel-Skylake-Update-firmware-manifest-parsing-.audio @@ -0,0 +1,330 @@ +From 255279ca3315546dfb9bb0e8da6916a88bc64c69 Mon Sep 17 00:00:00 2001 +From: Piotr Maziarz +Date: Fri, 10 May 2019 15:33:35 +0200 +Subject: [PATCH 002/193] ASoC: Intel: Skylake: Update firmware manifest + parsing mechanism + +Since not only "UUIDs" are taken from FW manifest, current parsing +member names are misleading, so rename them. + +This patch also fixes bug in counting FW size. + +Change-Id: I1918be28332589a23754922f26b662a96c4f53d4 +Signed-off-by: Piotr Maziarz +--- + sound/soc/intel/skylake/bxt-sst.c | 6 +-- + sound/soc/intel/skylake/cnl-sst.c | 7 ++-- + sound/soc/intel/skylake/skl-pcm.c | 6 +-- + sound/soc/intel/skylake/skl-sst-dsp.h | 3 +- + sound/soc/intel/skylake/skl-sst-utils.c | 54 +++++++++---------------- + sound/soc/intel/skylake/skl-sst.c | 8 ++-- + sound/soc/intel/skylake/skl-topology.c | 2 +- + sound/soc/intel/skylake/skl.h | 2 +- + 8 files changed, 34 insertions(+), 54 deletions(-) + +diff --git a/sound/soc/intel/skylake/bxt-sst.c b/sound/soc/intel/skylake/bxt-sst.c +index 92a82e6b5fe6..dd0eaee0a77a 100644 +--- a/sound/soc/intel/skylake/bxt-sst.c ++++ b/sound/soc/intel/skylake/bxt-sst.c +@@ -195,9 +195,9 @@ static int bxt_load_base_firmware(struct sst_dsp *ctx) + } + } + +- /* prase uuids on first boot */ + if (skl->is_first_boot) { +- ret = snd_skl_parse_uuids(ctx, ctx->fw, BXT_ADSP_FW_BIN_HDR_OFFSET, 0); ++ ret = snd_skl_parse_manifest(ctx, ctx->fw, ++ BXT_ADSP_FW_BIN_HDR_OFFSET, 0); + if (ret < 0) + goto sst_load_base_firmware_failed; + } +@@ -624,7 +624,7 @@ void bxt_sst_dsp_cleanup(struct device *dev, struct skl_dev *skl) + skl_release_library(skl->lib_info, skl->lib_count); + if (skl->dsp->fw) + release_firmware(skl->dsp->fw); +- skl_freeup_uuid_list(skl); ++ list_del_init(&skl->module_list); + skl_ipc_free(&skl->ipc); + skl->dsp->ops->free(skl->dsp); + } +diff --git a/sound/soc/intel/skylake/cnl-sst.c b/sound/soc/intel/skylake/cnl-sst.c +index 4f64f097e9ae..1b4379fb3ffb 100644 +--- a/sound/soc/intel/skylake/cnl-sst.c ++++ b/sound/soc/intel/skylake/cnl-sst.c +@@ -119,10 +119,9 @@ static int cnl_load_base_firmware(struct sst_dsp *ctx) + } + } + +- /* parse uuids if first boot */ + if (cnl->is_first_boot) { +- ret = snd_skl_parse_uuids(ctx, ctx->fw, +- CNL_ADSP_FW_HDR_OFFSET, 0); ++ ret = snd_skl_parse_manifest(ctx, ctx->fw, ++ CNL_ADSP_FW_HDR_OFFSET, 0); + if (ret < 0) + goto cnl_load_base_firmware_failed; + } +@@ -477,7 +476,7 @@ void cnl_sst_dsp_cleanup(struct device *dev, struct skl_dev *skl) + if (skl->dsp->fw) + release_firmware(skl->dsp->fw); + +- skl_freeup_uuid_list(skl); ++ list_del_init(&skl->module_list); + cnl_ipc_free(&skl->ipc); + + skl->dsp->ops->free(skl->dsp); +diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c +index 7f287424af9b..eea38868b700 100644 +--- a/sound/soc/intel/skylake/skl-pcm.c ++++ b/sound/soc/intel/skylake/skl-pcm.c +@@ -1328,12 +1328,12 @@ static int skl_get_module_info(struct skl_dev *skl, + + uuid_mod = (guid_t *)mconfig->guid; + +- if (list_empty(&skl->uuid_list)) { ++ if (list_empty(&skl->module_list)) { + dev_err(skl->dev, "Module list is empty\n"); + return -EIO; + } + +- list_for_each_entry(module, &skl->uuid_list, list) { ++ list_for_each_entry(module, &skl->module_list, list) { + if (guid_equal(uuid_mod, &module->uuid)) { + mconfig->id.module_id = module->id; + if (mconfig->module) +@@ -1360,7 +1360,7 @@ static int skl_get_module_info(struct skl_dev *skl, + if (skl->nr_modules && ret) + return ret; + +- list_for_each_entry(module, &skl->uuid_list, list) { ++ list_for_each_entry(module, &skl->module_list, list) { + for (i = 0; i < MAX_IN_QUEUE; i++) { + pin_id = &mconfig->m_in_pin[i].id; + if (guid_equal(&pin_id->mod_uuid, &module->uuid)) +diff --git a/sound/soc/intel/skylake/skl-sst-dsp.h b/sound/soc/intel/skylake/skl-sst-dsp.h +index cdfec0fca577..ecd33f1d0064 100644 +--- a/sound/soc/intel/skylake/skl-sst-dsp.h ++++ b/sound/soc/intel/skylake/skl-sst-dsp.h +@@ -231,13 +231,12 @@ int bxt_sst_init_fw(struct device *dev, struct skl_dev *skl); + void skl_sst_dsp_cleanup(struct device *dev, struct skl_dev *skl); + void bxt_sst_dsp_cleanup(struct device *dev, struct skl_dev *skl); + +-int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw, ++int snd_skl_parse_manifest(struct sst_dsp *ctx, const struct firmware *fw, + unsigned int offset, int index); + int skl_get_pvt_id(struct skl_dev *skl, guid_t *uuid_mod, int instance_id); + int skl_put_pvt_id(struct skl_dev *skl, guid_t *uuid_mod, int *pvt_id); + int skl_get_pvt_instance_id_map(struct skl_dev *skl, + int module_id, int instance_id); +-void skl_freeup_uuid_list(struct skl_dev *skl); + + int skl_dsp_strip_extended_manifest(struct firmware *fw); + +diff --git a/sound/soc/intel/skylake/skl-sst-utils.c b/sound/soc/intel/skylake/skl-sst-utils.c +index d43cbf4a71ef..ff24d57ebbda 100644 +--- a/sound/soc/intel/skylake/skl-sst-utils.c ++++ b/sound/soc/intel/skylake/skl-sst-utils.c +@@ -103,7 +103,7 @@ int skl_get_pvt_instance_id_map(struct skl_dev *skl, + { + struct uuid_module *module; + +- list_for_each_entry(module, &skl->uuid_list, list) { ++ list_for_each_entry(module, &skl->module_list, list) { + if (module->id == module_id) + return skl_get_pvtid_map(module, instance_id); + } +@@ -174,9 +174,8 @@ int skl_get_pvt_id(struct skl_dev *skl, guid_t *uuid_mod, int instance_id) + struct uuid_module *module; + int pvt_id; + +- list_for_each_entry(module, &skl->uuid_list, list) { ++ list_for_each_entry(module, &skl->module_list, list) { + if (guid_equal(uuid_mod, &module->uuid)) { +- + pvt_id = skl_pvtid_128(module); + if (pvt_id >= 0) { + module->instance_id[pvt_id] = instance_id; +@@ -204,9 +203,8 @@ int skl_put_pvt_id(struct skl_dev *skl, guid_t *uuid_mod, int *pvt_id) + int i; + struct uuid_module *module; + +- list_for_each_entry(module, &skl->uuid_list, list) { ++ list_for_each_entry(module, &skl->module_list, list) { + if (guid_equal(uuid_mod, &module->uuid)) { +- + if (*pvt_id != 0) + i = (*pvt_id) / 64; + else +@@ -226,7 +224,7 @@ EXPORT_SYMBOL_GPL(skl_put_pvt_id); + * Parse the firmware binary to get the UUID, module id + * and loadable flags + */ +-int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw, ++int snd_skl_parse_manifest(struct sst_dsp *ctx, const struct firmware *fw, + unsigned int offset, int index) + { + struct adsp_fw_hdr *adsp_hdr; +@@ -237,7 +235,6 @@ int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw, + struct uuid_module *module; + struct firmware stripped_fw; + unsigned int safe_file; +- int ret = 0; + + /* Get the FW pointer to derive ADSP header */ + stripped_fw.data = fw->data; +@@ -255,16 +252,11 @@ int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw, + } + + adsp_hdr = (struct adsp_fw_hdr *)(buf + offset); +- +- /* check 1st module entry is in file */ +- safe_file += adsp_hdr->len + sizeof(*mod_entry); +- if (stripped_fw.size <= safe_file) { +- dev_err(ctx->dev, "Small fw file size, No module entry\n"); ++ if (adsp_hdr->len != sizeof(*adsp_hdr)) { ++ dev_err(ctx->dev, "Header corrupted or unsupported FW version\n"); + return -EINVAL; + } + +- mod_entry = (struct adsp_module_entry *)(buf + offset + adsp_hdr->len); +- + num_entry = adsp_hdr->num_modules; + + /* check all entries are in file */ +@@ -274,6 +266,8 @@ int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw, + return -EINVAL; + } + ++ mod_entry = (struct adsp_module_entry *) ++ (buf + offset + adsp_hdr->len); + + /* + * Read the UUID(GUID) from FW Manifest. +@@ -284,10 +278,10 @@ int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw, + */ + + for (i = 0; i < num_entry; i++, mod_entry++) { +- module = kzalloc(sizeof(*module), GFP_KERNEL); ++ module = devm_kzalloc(ctx->dev, sizeof(*module), GFP_KERNEL); + if (!module) { +- ret = -ENOMEM; +- goto free_uuid_list; ++ list_del_init(&skl->module_list); ++ return -ENOMEM; + } + + guid_copy(&module->uuid, (guid_t *)&mod_entry->uuid); +@@ -298,11 +292,11 @@ int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw, + size = sizeof(int) * mod_entry->instance_max_count; + module->instance_id = devm_kzalloc(ctx->dev, size, GFP_KERNEL); + if (!module->instance_id) { +- ret = -ENOMEM; +- goto free_uuid_list; ++ list_del_init(&skl->module_list); ++ return -ENOMEM; + } + +- list_add_tail(&module->list, &skl->uuid_list); ++ list_add_tail(&module->list, &skl->module_list); + + dev_dbg(ctx->dev, + "Adding uuid :%pUL mod id: %d Loadable: %d\n", +@@ -310,21 +304,8 @@ int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw, + } + + return 0; +- +-free_uuid_list: +- skl_freeup_uuid_list(skl); +- return ret; +-} +- +-void skl_freeup_uuid_list(struct skl_dev *skl) +-{ +- struct uuid_module *uuid, *_uuid; +- +- list_for_each_entry_safe(uuid, _uuid, &skl->uuid_list, list) { +- list_del(&uuid->list); +- kfree(uuid); +- } + } ++EXPORT_SYMBOL(snd_skl_parse_manifest); + + /* + * some firmware binary contains some extended manifest. This needs +@@ -362,7 +343,7 @@ int skl_sst_ctx_init(struct device *dev, int irq, const char *fw_name, + + skl->dev = dev; + skl_dev->thread_context = skl; +- INIT_LIST_HEAD(&skl->uuid_list); ++ INIT_LIST_HEAD(&skl->module_list); + skl->dsp = skl_dsp_ctx_init(dev, skl_dev, irq); + if (!skl->dsp) { + dev_err(skl->dev, "%s: no device\n", __func__); +@@ -398,7 +379,8 @@ int skl_prepare_lib_load(struct skl_dev *skl, struct skl_lib_info *linfo, + } + + if (skl->is_first_boot) { +- ret = snd_skl_parse_uuids(dsp, linfo->fw, hdr_offset, index); ++ ret = snd_skl_parse_manifest(dsp, linfo->fw, hdr_offset, ++ index); + if (ret < 0) + return ret; + } +diff --git a/sound/soc/intel/skylake/skl-sst.c b/sound/soc/intel/skylake/skl-sst.c +index 61a8e4756a2b..c3796ac1ae78 100644 +--- a/sound/soc/intel/skylake/skl-sst.c ++++ b/sound/soc/intel/skylake/skl-sst.c +@@ -81,11 +81,11 @@ static int skl_load_base_firmware(struct sst_dsp *ctx) + } + } + +- /* prase uuids on first boot */ + if (skl->is_first_boot) { +- ret = snd_skl_parse_uuids(ctx, ctx->fw, SKL_ADSP_FW_BIN_HDR_OFFSET, 0); ++ ret = snd_skl_parse_manifest(ctx, ctx->fw, ++ SKL_ADSP_FW_BIN_HDR_OFFSET, 0); + if (ret < 0) { +- dev_err(ctx->dev, "UUID parsing err: %d\n", ret); ++ dev_err(ctx->dev, "Manifest parsing err: %d\n", ret); + release_firmware(ctx->fw); + skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK); + return ret; +@@ -587,7 +587,7 @@ void skl_sst_dsp_cleanup(struct device *dev, struct skl_dev *skl) + if (skl->dsp->fw) + release_firmware(skl->dsp->fw); + skl_clear_module_table(skl->dsp); +- skl_freeup_uuid_list(skl); ++ list_del_init(&skl->module_list); + skl_ipc_free(&skl->ipc); + skl->dsp->ops->free(skl->dsp); + if (skl->boot_complete) { +diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c +index 69cd7a81bf2a..b1f7cd5c5291 100644 +--- a/sound/soc/intel/skylake/skl-topology.c ++++ b/sound/soc/intel/skylake/skl-topology.c +@@ -801,7 +801,7 @@ static int skl_get_module_id(struct skl_dev *skl, guid_t *uuid) + { + struct uuid_module *module; + +- list_for_each_entry(module, &skl->uuid_list, list) { ++ list_for_each_entry(module, &skl->module_list, list) { + if (guid_equal(uuid, &module->uuid)) + return module->id; + } +diff --git a/sound/soc/intel/skylake/skl.h b/sound/soc/intel/skylake/skl.h +index 2bfbf59277c4..6379ac571fb0 100644 +--- a/sound/soc/intel/skylake/skl.h ++++ b/sound/soc/intel/skylake/skl.h +@@ -108,7 +108,7 @@ struct skl_dev { + bool miscbdcg_disabled; + + /* Populate module information */ +- struct list_head uuid_list; ++ struct list_head module_list; + + /* Is firmware loaded */ + bool fw_loaded; +-- +2.17.1 + diff --git a/patches/0002-EDAC-igen6-Add-EDAC-driver-for-Intel-client-SoC-platf.edac b/patches/0002-EDAC-igen6-Add-EDAC-driver-for-Intel-client-SoC-platf.edac new file mode 100644 index 0000000000..ea1b63eab0 --- /dev/null +++ b/patches/0002-EDAC-igen6-Add-EDAC-driver-for-Intel-client-SoC-platf.edac @@ -0,0 +1,877 @@ +From 19e255d65c4741fc1a5ff2660de21f1428f22587 Mon Sep 17 00:00:00 2001 +From: Qiuxu Zhuo +Date: Sat, 27 Oct 2018 23:47:36 +0800 +Subject: [PATCH 2/5] EDAC, igen6: Add EDAC driver for Intel client SoC + platforms + +This driver is meant to support Intel client SoCs with integrated +memory controller using In-Band ECC (IBECC) IP. The memory correctable +and uncorrectable errors are reported via NMIs. The driver handles +the NMIs and decodes the error address to platform specific address. +The first supported platform is Ice Lake Neural Network Processor +for Inference (ICL-NNPI). + +Signed-off-by: Qiuxu Zhuo +--- + drivers/edac/Kconfig | 9 + + drivers/edac/Makefile | 1 + + drivers/edac/igen6_edac.c | 815 ++++++++++++++++++++++++++++++++++++++ + 3 files changed, 825 insertions(+) + create mode 100644 drivers/edac/igen6_edac.c + +diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig +index 417dad635526..f75c8595e12d 100644 +--- a/drivers/edac/Kconfig ++++ b/drivers/edac/Kconfig +@@ -262,6 +262,15 @@ config EDAC_PND2 + first used on the Apollo Lake platform and Denverton + micro-server but may appear on others in the future. + ++config EDAC_IGEN6 ++ tristate "Intel client SoC Integrated MC" ++ depends on PCI && X86_64 && PCI_MMCONFIG && ARCH_HAVE_NMI_SAFE_CMPXCHG ++ help ++ Support for error detection and correction on the Intel ++ client SoC Integrated Memory Controller using In-Band ECC ++ IP. This SoC IP is first used on the Ice Lake-NNPI platform ++ but may appear on others in the future. ++ + config EDAC_MPC85XX + bool "Freescale MPC83xx / MPC85xx" + depends on FSL_SOC && EDAC=y +diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile +index d77200c9680b..b71644e1c7d4 100644 +--- a/drivers/edac/Makefile ++++ b/drivers/edac/Makefile +@@ -31,6 +31,7 @@ obj-$(CONFIG_EDAC_I7300) += i7300_edac.o + obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o + obj-$(CONFIG_EDAC_SBRIDGE) += sb_edac.o + obj-$(CONFIG_EDAC_PND2) += pnd2_edac.o ++obj-$(CONFIG_EDAC_IGEN6) += igen6_edac.o + obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o + obj-$(CONFIG_EDAC_E752X) += e752x_edac.o + obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o +diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c +new file mode 100644 +index 000000000000..fb020b8fa989 +--- /dev/null ++++ b/drivers/edac/igen6_edac.c +@@ -0,0 +1,815 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Driver for Intel client SoC with integrated memory controller using IBECC ++ * ++ * Copyright (C) 2019 Intel Corporation ++ * ++ * The In-Band ECC (IBECC) IP provides ECC protection to all or specific ++ * regions of the physical memory space. It's used for memory controllers ++ * that don't support the out-of-band ECC which often needs an additional ++ * storage device to each channel for storing ECC data. The first supported ++ * platform is Ice Lake Neural Network Processor for Inference (ICL-NNPI). ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "edac_mc.h" ++#include "edac_module.h" ++ ++#define IGEN6_REVISION "v1.1.5" ++ ++#define EDAC_MOD_STR "igen6_edac" ++#define IGEN6_NMI_NAME "igen6_ibecc" ++ ++/* Debug macros */ ++#define igen6_printk(level, fmt, arg...) \ ++ edac_printk(level, "igen6", fmt, ##arg) ++ ++#define igen6_mc_printk(mci, level, fmt, arg...) \ ++ edac_mc_chipset_printk(mci, level, "igen6", fmt, ##arg) ++ ++#define GET_BITFIELD(v, lo, hi) (((v) & GENMASK_ULL(hi, lo)) >> (lo)) ++ ++#define NUM_CHANNELS 2 /* Max channels */ ++#define NUM_DIMMS 2 /* Max DIMMs per channel */ ++ ++#define IGEN6_TOM_OFF 0xa0 ++#define IGEN6_TOUUD_OFF 0xa8 ++#define IGEN6_TOLUD_OFF 0xbc ++#define IGEN6_CAPID_C_OFF 0xec ++#define IGEN6_CAPID_C_IBECC BIT(15) ++#define _4GB BIT_ULL(32) ++ ++#define IGEN6_ERRSTS_OFF 0xc8 ++#define IGEN6_ERRSTS_CE BIT_ULL(6) ++#define IGEN6_ERRSTS_UE BIT_ULL(7) ++ ++#define IGEN6_ECC_BASE (ibecc_cfg->ibecc_offset) ++#define IGEN6_ECCACTIVATE_OFF IGEN6_ECC_BASE ++#define IGEN6_ECCACTIVATE_EN BIT(0) ++ ++#define IGEN6_ECCERRLOG_OFF (IGEN6_ECC_BASE + 0x170) ++#define IGEN6_ECCERRLOG_CE BIT_ULL(62) ++#define IGEN6_ECCERRLOG_UE BIT_ULL(63) ++#define IGEN6_ECCERRLOG_ADDR_SHIFT 5 ++#define IGEN6_ECCERRLOG_ADDR(v) GET_BITFIELD(v, 5, 38) ++#define IGEN6_ECCERRLOG_SYND(v) GET_BITFIELD(v, 46, 61) ++ ++#define IGEN6_MCHBAR_HI_OFF 0x4c ++#define IGEN6_MCHBAR_LO_OFF 0x48 ++#define IGEN6_MCHBAR_EN BIT_ULL(0) ++#define IGEN6_MCHBAR_BASE(v) (GET_BITFIELD(v, 16, 38) << 16) ++#define IGEN6_MCHBAR_SIZE 0x10000 ++ ++#define IGEN6_MAD_INTER_OFF 0x5000 ++#define IGEN6_MAD_INTRA_OFF 0x5004 ++#define IGEN6_MAD_DIMM_OFF 0x500c ++#define IGEN6_HASH_OFF 0X5024 ++#define IGEN6_EHASH_OFF 0X5028 ++ ++#define IGEN6_MAD_INTER_DDR_TYPE(v) GET_BITFIELD(v, 0, 2) ++#define IGEN6_MAD_INTER_ECHM(v) GET_BITFIELD(v, 3, 3) ++#define IGEN6_MAD_INTER_CH_L_MAP(v) GET_BITFIELD(v, 4, 4) ++#define IGEN6_MAD_INTER_CH_S_SIZE(v) ((u64)GET_BITFIELD(v, 12, 19) << 29) ++#define IGEN6_MAD_INTRA_DIMM_L_MAP(v) GET_BITFIELD(v, 0, 0) ++#define IGEN6_MAD_INTRA_RI(v) GET_BITFIELD(v, 4, 4) ++#define IGEN6_MAD_INTRA_EIM(v) GET_BITFIELD(v, 8, 8) ++#define IGEN6_MAD_INTRA_ECC(v) (GET_BITFIELD(v, 12, 13) == 0x3) ++ ++#define IGEN6_DIMM_CH_DIMM_L_SIZE(v) ((u64)GET_BITFIELD(v, 0, 6) << 29) ++#define IGEN6_DIMM_CH_DLW(v) GET_BITFIELD(v, 7, 8) ++#define IGEN6_DIMM_CH_DLNOR(v) (GET_BITFIELD(v, 9, 10) + 1) ++#define IGEN6_DIMM_CH_DIMM_S_SIZE(v) ((u64)GET_BITFIELD(v, 16, 22) << 29) ++#define IGEN6_DIMM_CH_DSW(v) GET_BITFIELD(v, 24, 25) ++#define IGEN6_DIMM_CH_DSNOR(v) (GET_BITFIELD(v, 26, 27) + 1) ++#define IGEN6_DIMM_CH_DLS_BG0(v) GET_BITFIELD(v, 29, 29) ++ ++#define IGEN6_HASH_MASK(v) (GET_BITFIELD(v, 6, 19) << 6) ++#define IGEN6_HASH_LSB_MASK_BIT(v) GET_BITFIELD(v, 24, 26) ++#define IGEN6_HASH_MODE(v) GET_BITFIELD(v, 28, 28) ++ ++#define igen6_getreg(type, offset) \ ++ (*(type *)(igen6_pvt->mchbar + (offset))) ++#define igen6_setreg(type, offset, val) \ ++ (*(type *)(igen6_pvt->mchbar + (offset)) = (val)) ++ ++static struct igen6_pvt { ++ struct mem_ctl_info *mci; ++ struct pci_dev *pdev; ++ void __iomem *mchbar; ++ u64 ch_s_size; ++ int ch_l_map; ++ u64 dimm_s_size[NUM_CHANNELS]; ++ u64 dimm_l_size[NUM_CHANNELS]; ++ int dimm_l_map[NUM_CHANNELS]; ++} *igen6_pvt; ++ ++/* The top of upper usable DRAM */ ++static u64 igen6_touud; ++/* The top of low usable DRAM */ ++static u32 igen6_tolud; ++/* The size of physical memory */ ++static u64 igen6_tom; ++ ++struct decoded_addr { ++ u64 mem_addr; ++ u64 sys_addr; ++ u64 chan_addr; ++ int chan; ++ u64 sub_chan_addr; ++ int sub_chan; ++}; ++ ++struct ecclog_node { ++ struct llist_node llnode; ++ u64 ecclog; ++}; ++ ++static struct ibecc_config { ++ u32 ibecc_offset; ++ bool (*ibecc_available)(u32 capid); ++} *ibecc_cfg; ++ ++/* ++ * An NMI is broadcast to all CPU cores on a CE/UE error on the ICL-NNPI ++ * platform. Make sure only one concurrent NMI handler for it. ++ */ ++static DEFINE_RAW_SPINLOCK(ecclog_lock); ++static u64 last_handle_jiffies; ++#define MAX_NMI_GAP_JIFFIES msecs_to_jiffies(8) ++ ++/* ++ * printk() is not safe in NMI context. So in NMI handler, the driver uses ++ * the lock-less memory alocator to allocate memory for ECC error log and ++ * saves it to a lock-less list. Delay the printk() and the work of error ++ * reporting to EDAC core in a worker. ++ */ ++#define ECCLOG_POOLSZ PAGE_SIZE ++LLIST_HEAD(ecclog_llist); ++static struct gen_pool *ecclog_pool; ++static char ecclog_buf[ECCLOG_POOLSZ]; ++static struct irq_work ecclog_irq_work; ++static struct work_struct ecclog_work; ++ ++/* Compute die IDs for ICL-NNPI with IBECC */ ++#define DID_ICL_SKU8 0x4581 ++#define DID_ICL_SKU10 0x4585 ++#define DID_ICL_SKU11 0x4589 ++#define DID_ICL_SKU12 0x458d ++ ++static bool icl_ibecc_available(u32 capid) ++{ ++ /* Capid IBECC bit for ICL: 0 - available, 1 - unavailable */ ++ return !(IGEN6_CAPID_C_IBECC & capid) && ++ (boot_cpu_data.x86_stepping >= 1); ++} ++ ++static struct ibecc_config icl_cfg = { ++ .ibecc_offset = 0xd800, ++ .ibecc_available = icl_ibecc_available, ++}; ++ ++static const struct pci_device_id igen6_pci_tbl[] = { ++ { PCI_VDEVICE(INTEL, DID_ICL_SKU8), (kernel_ulong_t)&icl_cfg }, ++ { PCI_VDEVICE(INTEL, DID_ICL_SKU10), (kernel_ulong_t)&icl_cfg }, ++ { PCI_VDEVICE(INTEL, DID_ICL_SKU11), (kernel_ulong_t)&icl_cfg }, ++ { PCI_VDEVICE(INTEL, DID_ICL_SKU12), (kernel_ulong_t)&icl_cfg }, ++ { }, ++}; ++MODULE_DEVICE_TABLE(pci, igen6_pci_tbl); ++ ++static enum dev_type get_width(int dimm_l, u32 mad_dimm) ++{ ++ u32 w = dimm_l ? IGEN6_DIMM_CH_DLW(mad_dimm) : ++ IGEN6_DIMM_CH_DSW(mad_dimm); ++ ++ switch (w) { ++ case 0: ++ return DEV_X8; ++ case 1: ++ return DEV_X16; ++ case 2: ++ return DEV_X32; ++ default: ++ return DEV_UNKNOWN; ++ } ++} ++ ++static enum mem_type get_memory_type(u32 mad_inter) ++{ ++ u32 t = IGEN6_MAD_INTER_DDR_TYPE(mad_inter); ++ ++ switch (t) { ++ case 0: ++ return MEM_DDR4; ++ case 1: ++ return MEM_DDR3; ++ case 2: ++ return MEM_LPDDR3; ++ case 3: ++ return MEM_LPDDR4; ++ case 4: ++ return MEM_WIO2; ++ default: ++ return MEM_UNKNOWN; ++ } ++} ++ ++static u64 convert_saddr_to_maddr(u64 addr) ++{ ++ if (addr < igen6_tolud) ++ return addr; ++ ++ if (igen6_tom <= _4GB) ++ return addr + igen6_tolud - _4GB; ++ ++ if (addr < _4GB) ++ return addr + igen6_tolud - igen6_tom; ++ ++ return addr; ++} ++ ++static int decode_chan_idx(u64 addr, u64 mask, int intlv_bit) ++{ ++ u64 hash_addr = addr & mask, hash = 0; ++ u64 intlv = (addr >> intlv_bit) & 1; ++ int i; ++ ++ for (i = 6; i < 20; i++) ++ hash ^= (hash_addr >> i) & 1; ++ ++ return (int)hash ^ intlv; ++} ++ ++static u64 decode_chan_addr(u64 addr, int intlv_bit) ++{ ++ u64 chan_addr; ++ ++ /* Remove the interleave bit and shift upper part down to fill gap */ ++ chan_addr = GET_BITFIELD(addr, intlv_bit + 1, 63) << intlv_bit; ++ chan_addr |= GET_BITFIELD(addr, 0, intlv_bit - 1); ++ ++ return chan_addr; ++} ++ ++static void decode_addr(u64 addr, u32 hash, u64 s_size, int l_map, ++ int *sel, u64 *sub_addr) ++{ ++ int intlv_bit = IGEN6_HASH_LSB_MASK_BIT(hash) + 6; ++ ++ if (addr > 2 * s_size) { ++ *sub_addr = addr - s_size; ++ *sel = l_map; ++ return; ++ } ++ ++ if (IGEN6_HASH_MODE(hash)) { ++ *sub_addr = decode_chan_addr(addr, intlv_bit); ++ *sel = decode_chan_idx(addr, IGEN6_HASH_MASK(hash), intlv_bit); ++ } else { ++ *sub_addr = decode_chan_addr(addr, 6); ++ *sel = GET_BITFIELD(addr, 6, 6); ++ } ++} ++ ++static int igen6_decode(struct decoded_addr *res) ++{ ++ u64 addr = res->mem_addr, sub_addr, s_size; ++ struct igen6_pvt *pvt = igen6_pvt; ++ int sel, l_map; ++ u32 hash; ++ ++ if (addr >= igen6_tom) { ++ edac_dbg(0, "Address 0x%llx out of range\n", addr); ++ return -EINVAL; ++ } ++ ++ /* Decode channel */ ++ hash = igen6_getreg(u32, IGEN6_HASH_OFF); ++ s_size = pvt->ch_s_size; ++ l_map = pvt->ch_l_map; ++ decode_addr(addr, hash, s_size, l_map, &sel, &sub_addr); ++ res->chan = sel; ++ res->chan_addr = sub_addr; ++ ++ /* Decode sub-channel/DIMM */ ++ hash = igen6_getreg(u32, IGEN6_EHASH_OFF); ++ s_size = pvt->dimm_s_size[sel]; ++ l_map = pvt->dimm_l_map[sel]; ++ decode_addr(res->chan_addr, hash, s_size, l_map, &sel, &sub_addr); ++ res->sub_chan = sel; ++ res->sub_chan_addr = sub_addr; ++ ++ return 0; ++} ++ ++static void igen6_output_error(struct decoded_addr *res, u64 ecclog) ++{ ++ enum hw_event_mc_err_type type = ecclog & IGEN6_ECCERRLOG_UE ? ++ HW_EVENT_ERR_UNCORRECTED : ++ HW_EVENT_ERR_CORRECTED; ++ ++ edac_mc_handle_error(type, igen6_pvt->mci, 1, ++ res->sys_addr >> PAGE_SHIFT, ++ res->sys_addr & ~PAGE_MASK, ++ IGEN6_ECCERRLOG_SYND(ecclog), ++ res->chan, res->sub_chan, ++ -1, "", ""); ++} ++ ++static struct gen_pool *ecclog_gen_pool_create(void) ++{ ++ struct gen_pool *pool; ++ ++ pool = gen_pool_create(ilog2(sizeof(struct ecclog_node)), -1); ++ if (!pool) ++ return NULL; ++ ++ if (gen_pool_add(pool, (unsigned long)ecclog_buf, ECCLOG_POOLSZ, -1)) { ++ gen_pool_destroy(pool); ++ return NULL; ++ } ++ ++ return pool; ++} ++ ++static int ecclog_gen_pool_add(u64 ecclog) ++{ ++ struct ecclog_node *node; ++ ++ node = (void *)gen_pool_alloc(ecclog_pool, sizeof(*node)); ++ if (!node) ++ return -ENOMEM; ++ ++ node->ecclog = ecclog; ++ llist_add(&node->llnode, &ecclog_llist); ++ ++ return 0; ++} ++ ++static u64 ecclog_read(void) ++{ ++ u64 ecclog = igen6_getreg(u64, IGEN6_ECCERRLOG_OFF); ++ ++ if (ecclog & (IGEN6_ECCERRLOG_CE | IGEN6_ECCERRLOG_UE)) ++ return ecclog; ++ ++ return 0; ++} ++ ++static void ecclog_clear(u64 ecclog) ++{ ++ /* Clear CE/UE bits in IBECC register by writing 1 to it */ ++ ecclog |= IGEN6_ECCERRLOG_CE | IGEN6_ECCERRLOG_UE; ++ igen6_setreg(u64, IGEN6_ECCERRLOG_OFF, ecclog); ++} ++ ++static void errsts_clear(void) ++{ ++ u16 errsts; ++ ++ if (pci_read_config_word(igen6_pvt->pdev, IGEN6_ERRSTS_OFF, &errsts)) { ++ igen6_printk(KERN_ERR, "Failed to read ERRSTS\n"); ++ return; ++ } ++ ++ if (!(errsts & (IGEN6_ERRSTS_CE | IGEN6_ERRSTS_UE))) ++ return; ++ ++ /* Clear CE/UE bits in PCI ERRSTS register by writing 1 to it */ ++ errsts |= IGEN6_ERRSTS_CE | IGEN6_ERRSTS_UE; ++ pci_write_config_word(igen6_pvt->pdev, IGEN6_ERRSTS_OFF, errsts); ++} ++ ++static u64 ecclog_check(void) ++{ ++ u64 ecclog = ecclog_read(); ++ ++ if (ecclog) ++ ecclog_clear(ecclog); ++ /* errsts_clear() is not NMI safe, delay it in irq_work */ ++ ++ return ecclog; ++} ++ ++static void ecclog_work_cb(struct work_struct *work) ++{ ++ struct mem_ctl_info *mci = igen6_pvt->mci; ++ struct ecclog_node *node, *tmp; ++ struct llist_node *head; ++ struct decoded_addr res; ++ ++ head = llist_del_all(&ecclog_llist); ++ if (!head) ++ return; ++ ++ llist_for_each_entry_safe(node, tmp, head, llnode) { ++ memset(&res, 0, sizeof(res)); ++ res.sys_addr = IGEN6_ECCERRLOG_ADDR(node->ecclog) << ++ IGEN6_ECCERRLOG_ADDR_SHIFT; ++ res.mem_addr = convert_saddr_to_maddr(res.sys_addr); ++ ++ edac_dbg(2, "ecc_error_log = 0x%llx\n", node->ecclog); ++ igen6_mc_printk(mci, KERN_DEBUG, "HANDLING IBECC MEMORY ERROR\n"); ++ igen6_mc_printk(mci, KERN_DEBUG, "ADDR 0x%llx ", res.sys_addr); ++ ++ if (!igen6_decode(&res)) ++ igen6_output_error(&res, node->ecclog); ++ ++ gen_pool_free(ecclog_pool, (unsigned long)node, sizeof(*node)); ++ } ++} ++ ++static void ecclog_irq_work_cb(struct irq_work *irq_work) ++{ ++ errsts_clear(); ++ ++ if (!llist_empty(&ecclog_llist)) ++ schedule_work(&ecclog_work); ++} ++ ++static int ecclog_nmi_handler(unsigned int cmd, struct pt_regs *regs) ++{ ++ u64 delta, ecclog; ++ ++ raw_spin_lock(&ecclog_lock); ++ ++ ecclog = ecclog_check(); ++ if (!ecclog) { ++ delta = jiffies - last_handle_jiffies; ++ raw_spin_unlock(&ecclog_lock); ++ /* ++ * When a CE/UE error occurs, an NMI is delivered to all CPU ++ * cores. Only one core handles the error, and the rest cores ++ * see no error so that they complain they receive NMIs for ++ * unknown reason. A workaround for the complaint is to get a ++ * core to see if another core had "recently" handled the error. ++ * If it did, then return value from the handler could be faked ++ * to say this core handled one too. ++ */ ++ return delta < MAX_NMI_GAP_JIFFIES ? NMI_HANDLED : NMI_DONE; ++ } ++ ++ if (!ecclog_gen_pool_add(ecclog)) ++ irq_work_queue(&ecclog_irq_work); ++ last_handle_jiffies = jiffies; ++ ++ raw_spin_unlock(&ecclog_lock); ++ ++ return NMI_HANDLED; ++} ++ ++static bool igen6_check_ecc(void) ++{ ++ u32 activate = igen6_getreg(u32, IGEN6_ECCACTIVATE_OFF); ++ ++ return !!(activate & IGEN6_ECCACTIVATE_EN); ++} ++ ++static int igen6_get_dimm_config(struct mem_ctl_info *mci) ++{ ++ struct igen6_pvt *pvt = mci->pvt_info; ++ u32 mad_inter, mad_intra, mad_dimm; ++ int i, j, ndimms, tot_dimms = 0; ++ struct dimm_info *dimm; ++ enum mem_type mtype; ++ enum dev_type dtype; ++ u64 dsize; ++ bool ecc; ++ ++ mad_inter = igen6_getreg(u32, IGEN6_MAD_INTER_OFF); ++ mtype = get_memory_type(mad_inter); ++ ecc = igen6_check_ecc(); ++ pvt->ch_s_size = IGEN6_MAD_INTER_CH_S_SIZE(mad_inter); ++ pvt->ch_l_map = IGEN6_MAD_INTER_CH_L_MAP(mad_inter); ++ ++ for (i = 0; i < NUM_CHANNELS; i++) { ++ mad_intra = igen6_getreg(u32, IGEN6_MAD_INTRA_OFF + i * 4); ++ mad_dimm = igen6_getreg(u32, IGEN6_MAD_DIMM_OFF + i * 4); ++ ++ pvt->dimm_l_size[i] = IGEN6_DIMM_CH_DIMM_L_SIZE(mad_dimm); ++ pvt->dimm_s_size[i] = IGEN6_DIMM_CH_DIMM_S_SIZE(mad_dimm); ++ pvt->dimm_l_map[i] = IGEN6_MAD_INTRA_DIMM_L_MAP(mad_intra); ++ ndimms = 0; ++ ++ for (j = 0; j < NUM_DIMMS; j++) { ++ dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, ++ mci->n_layers, i, j, 0); ++ ++ if (j ^ pvt->dimm_l_map[i]) { ++ dtype = get_width(0, mad_dimm); ++ dsize = pvt->dimm_s_size[i]; ++ } else { ++ dtype = get_width(1, mad_dimm); ++ dsize = pvt->dimm_l_size[i]; ++ } ++ ++ if (!dsize) ++ continue; ++ ++ dimm->grain = 32; ++ dimm->mtype = mtype; ++ dimm->dtype = dtype; ++ dimm->nr_pages = MiB_TO_PAGES(dsize >> 20); ++ dimm->edac_mode = EDAC_SECDED; ++ snprintf(dimm->label, sizeof(dimm->label), ++ "Chan#%d_DIMM#%d", i, j); ++ edac_dbg(0, "Channel %d, DIMM %d, Size %llu MiB (%u pages)\n", ++ i, j, dsize >> 20, dimm->nr_pages); ++ ++ ndimms++; ++ } ++ ++ if (ndimms && !ecc) { ++ igen6_printk(KERN_ERR, "ECC is disabled\n"); ++ return -ENODEV; ++ } ++ ++ tot_dimms += ndimms; ++ } ++ ++ if (!tot_dimms) { ++ igen6_printk(KERN_ERR, "No DIMMs found\n"); ++ return -ENODEV; ++ } ++ ++ return 0; ++} ++ ++static void __iomem *igen6_pci_setup(struct pci_dev *pdev) ++{ ++ union { ++ u64 v; ++ struct { ++ u32 v_lo; ++ u32 v_hi; ++ }; ++ } u; ++ void __iomem *mchbar; ++ ++ edac_dbg(2, "\n"); ++ ++ if (pci_enable_device(pdev)) { ++ igen6_printk(KERN_ERR, "Failed to enable device %04x:%04x\n", ++ pdev->vendor, pdev->device); ++ return NULL; ++ } ++ ++ if (pci_read_config_dword(pdev, IGEN6_CAPID_C_OFF, &u.v_lo)) { ++ igen6_printk(KERN_ERR, "Failed to read CAPID_C\n"); ++ goto fail; ++ } ++ if (!ibecc_cfg->ibecc_available(u.v_lo)) { ++ edac_dbg(2, "No In-Band ECC IP\n"); ++ goto fail; ++ } ++ ++ if (pci_read_config_dword(pdev, IGEN6_TOUUD_OFF, &u.v_lo)) { ++ igen6_printk(KERN_ERR, "Failed to read TOUUD low part\n"); ++ goto fail; ++ } ++ if (pci_read_config_dword(pdev, IGEN6_TOUUD_OFF + 4, &u.v_hi)) { ++ igen6_printk(KERN_ERR, "Failed to read TOUUD high part\n"); ++ goto fail; ++ } ++ igen6_touud = u.v & GENMASK_ULL(38, 20); ++ ++ if (pci_read_config_dword(pdev, IGEN6_TOLUD_OFF, &igen6_tolud)) { ++ igen6_printk(KERN_ERR, "Failed to read TOLUD\n"); ++ goto fail; ++ } ++ igen6_tolud &= GENMASK(31, 20); ++ ++ if (pci_read_config_dword(pdev, IGEN6_TOM_OFF, &u.v_lo)) { ++ igen6_printk(KERN_ERR, "Failed to read TOM low part\n"); ++ goto fail; ++ } ++ if (pci_read_config_dword(pdev, IGEN6_TOM_OFF + 4, &u.v_hi)) { ++ igen6_printk(KERN_ERR, "Failed to read TOM high part\n"); ++ goto fail; ++ } ++ igen6_tom = u.v & GENMASK_ULL(38, 20); ++ ++ if (pci_read_config_dword(pdev, IGEN6_MCHBAR_LO_OFF, &u.v_lo)) { ++ igen6_printk(KERN_ERR, "Failed to read MCHBAR\n"); ++ goto fail; ++ } ++ if (pci_read_config_dword(pdev, IGEN6_MCHBAR_HI_OFF, &u.v_hi)) { ++ igen6_printk(KERN_ERR, "Failed to read MCHBAR1\n"); ++ goto fail; ++ } ++ if (!(u.v & IGEN6_MCHBAR_EN)) { ++ igen6_printk(KERN_ERR, "MCHBAR is disabled\n"); ++ goto fail; ++ } ++ mchbar = ioremap_nocache(IGEN6_MCHBAR_BASE(u.v), IGEN6_MCHBAR_SIZE); ++ if (!mchbar) { ++ igen6_printk(KERN_ERR, "Failed to ioremap mchbar 0x%llx\n", ++ IGEN6_MCHBAR_BASE(u.v)); ++ goto fail; ++ } ++ ++ return mchbar; ++fail: ++ pci_disable_device(pdev); ++ return NULL; ++} ++ ++#ifdef CONFIG_EDAC_DEBUG ++static void igen6_reg_dump(void) ++{ ++ int i; ++ ++ edac_dbg(2, "Hash : 0x%x\n", ++ igen6_getreg(u32, IGEN6_HASH_OFF)); ++ edac_dbg(2, "Ehash : 0x%x\n", ++ igen6_getreg(u32, IGEN6_EHASH_OFF)); ++ edac_dbg(2, "Mad_inter : 0x%x\n", ++ igen6_getreg(u32, IGEN6_MAD_INTER_OFF)); ++ edac_dbg(2, "Eccerrlog : 0x%llx\n", ++ igen6_getreg(u64, IGEN6_ECCERRLOG_OFF)); ++ ++ for (i = 0; i < NUM_CHANNELS; i++) { ++ edac_dbg(2, "Mad_intra_%d : 0x%x\n", i, ++ igen6_getreg(u32, IGEN6_MAD_INTRA_OFF + i * 4)); ++ edac_dbg(2, "Mad_dimm_%d : 0x%x\n", i, ++ igen6_getreg(u32, IGEN6_MAD_DIMM_OFF + i * 4)); ++ } ++ edac_dbg(2, "Touud : 0x%llx", igen6_touud); ++ edac_dbg(2, "Tolud : 0x%x", igen6_tolud); ++ edac_dbg(2, "Tom : 0x%llx", igen6_tom); ++} ++#else ++static void igen6_reg_dump(void) {} ++#endif ++ ++static int igen6_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ++{ ++ struct edac_mc_layer layers[2]; ++ struct mem_ctl_info *mci; ++ void __iomem *mchbar; ++ struct igen6_pvt *pvt; ++ int rc = -ENODEV; ++ u64 ecclog; ++ ++ edac_dbg(2, "\n"); ++ ++ ibecc_cfg = (struct ibecc_config *)ent->driver_data; ++ mchbar = igen6_pci_setup(pdev); ++ if (!mchbar) ++ return -ENODEV; ++ ++ layers[0].type = EDAC_MC_LAYER_CHANNEL; ++ layers[0].size = NUM_CHANNELS; ++ layers[0].is_virt_csrow = false; ++ layers[1].type = EDAC_MC_LAYER_SLOT; ++ layers[1].size = NUM_DIMMS; ++ layers[1].is_virt_csrow = true; ++ ++ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt)); ++ if (!mci) { ++ rc = -ENOMEM; ++ goto fail; ++ } ++ ++ mci->ctl_name = "Intel_client_SoC"; ++ mci->mtype_cap = MEM_FLAG_LPDDR4 | MEM_FLAG_DDR4; ++ mci->edac_ctl_cap = EDAC_FLAG_SECDED; ++ mci->edac_cap = EDAC_FLAG_SECDED; ++ mci->mod_name = EDAC_MOD_STR; ++ mci->dev_name = pci_name(pdev); ++ mci->pdev = &pdev->dev; ++ pvt = mci->pvt_info; ++ pvt->mci = mci; ++ pvt->mchbar = mchbar; ++ pvt->pdev = pdev; ++ igen6_pvt = pvt; ++ ++ igen6_reg_dump(); ++ ++ rc = igen6_get_dimm_config(mci); ++ if (rc) ++ goto fail0; ++ ++ rc = edac_mc_add_mc(mci); ++ if (rc) { ++ igen6_printk(KERN_ERR, "Failed to register mci\n"); ++ goto fail0; ++ } ++ ++ ecclog_pool = ecclog_gen_pool_create(); ++ if (!ecclog_pool) { ++ rc = -ENOMEM; ++ goto fail1; ++ } ++ ++ INIT_WORK(&ecclog_work, ecclog_work_cb); ++ init_irq_work(&ecclog_irq_work, ecclog_irq_work_cb); ++ ++ /* Check if any pending error before registering the NMI handler */ ++ ecclog = ecclog_check(); ++ if (ecclog) { ++ if (!ecclog_gen_pool_add(ecclog)) ++ irq_work_queue(&ecclog_irq_work); ++ last_handle_jiffies = jiffies; ++ } ++ ++ rc = register_nmi_handler(NMI_LOCAL, ecclog_nmi_handler, ++ 0, IGEN6_NMI_NAME); ++ if (rc) { ++ igen6_printk(KERN_ERR, "Failed to register nmi handler\n"); ++ goto fail2; ++ } ++ ++ return 0; ++ ++fail2: ++ gen_pool_destroy(ecclog_pool); ++fail1: ++ edac_mc_del_mc(mci->pdev); ++fail0: ++ edac_mc_free(mci); ++fail: ++ iounmap(mchbar); ++ return rc; ++} ++ ++static void igen6_remove(struct pci_dev *pdev) ++{ ++ struct mem_ctl_info *mci; ++ struct igen6_pvt *pvt; ++ ++ edac_dbg(2, "\n"); ++ ++ unregister_nmi_handler(NMI_LOCAL, IGEN6_NMI_NAME); ++ irq_work_sync(&ecclog_irq_work); ++ flush_work(&ecclog_work); ++ gen_pool_destroy(ecclog_pool); ++ mci = edac_mc_del_mc(&pdev->dev); ++ if (!mci) { ++ edac_dbg(0, "mci should not be null\n"); ++ return; ++ } ++ pvt = mci->pvt_info; ++ edac_mc_free(mci); ++ iounmap(pvt->mchbar); ++ pci_disable_device(pdev); ++} ++ ++static struct pci_driver igen6_driver = { ++ .name = EDAC_MOD_STR, ++ .probe = igen6_probe, ++ .remove = igen6_remove, ++ .id_table = igen6_pci_tbl, ++}; ++ ++static int __init igen6_init(void) ++{ ++ const char *owner; ++ int rc; ++ ++ edac_dbg(2, "\n"); ++ ++ owner = edac_get_owner(); ++ if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR))) ++ return -ENODEV; ++ ++ edac_op_state = EDAC_OPSTATE_NMI; ++ ++ rc = pci_register_driver(&igen6_driver); ++ if (rc) ++ return rc; ++ ++ igen6_printk(KERN_INFO, "%s\n", IGEN6_REVISION); ++ ++ return 0; ++} ++ ++static void __exit igen6_exit(void) ++{ ++ edac_dbg(2, "\n"); ++ ++ pci_unregister_driver(&igen6_driver); ++} ++ ++module_init(igen6_init); ++module_exit(igen6_exit); ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("Qiuxu Zhuo"); ++MODULE_DESCRIPTION("MC Driver for Intel client SoC using In-Band ECC"); +-- +2.17.1 + diff --git a/patches/0002-Fix-for-socwatch-build-error-that-occurs-if-C.sep-socwatch b/patches/0002-Fix-for-socwatch-build-error-that-occurs-if-C.sep-socwatch new file mode 100644 index 0000000000..988d1757b2 --- /dev/null +++ b/patches/0002-Fix-for-socwatch-build-error-that-occurs-if-C.sep-socwatch @@ -0,0 +1,61 @@ +From 1632253d56c2f5cf17bdc5799ffebe9da67254e0 Mon Sep 17 00:00:00 2001 +From: Jon Moeller +Date: Tue, 13 Nov 2018 13:31:51 -0600 +Subject: [PATCH 02/27] Fix for socwatch build error that occurs if + CONFIG_TRACEPOINTS is disabled. + +Signed-off-by: Jon Moeller +--- + .../platform/x86/socwatch/sw_trace_notifier_provider.c | 8 +++++--- + 1 file changed, 5 insertions(+), 3 deletions(-) + +diff --git a/drivers/platform/x86/socwatch/sw_trace_notifier_provider.c b/drivers/platform/x86/socwatch/sw_trace_notifier_provider.c +index 0c414423de09..a20e0566cfca 100644 +--- a/drivers/platform/x86/socwatch/sw_trace_notifier_provider.c ++++ b/drivers/platform/x86/socwatch/sw_trace_notifier_provider.c +@@ -123,7 +123,7 @@ struct cpu_workqueue_struct; // Forward declaration to avoid compiler warnings + * Tracepoint probe register/unregister functions and + * helper macros. + */ +-#ifdef CONFIG_TRACEPOINTS ++#if IS_ENABLED(CONFIG_TRACEPOINTS) + #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35) + #define DO_REGISTER_SW_TRACEPOINT_PROBE(node, name, probe) \ + WARN_ON(register_trace_##name(probe)) +@@ -532,7 +532,7 @@ static const struct sw_trace_notifier_name s_notifier_names[] = { + "HOTCPU-NOTIFIER" }, + }; + +-#ifdef CONFIG_TRACEPOINTS ++#if IS_ENABLED(CONFIG_TRACEPOINTS) + /* + * A list of supported tracepoints. + */ +@@ -2131,7 +2131,7 @@ static void sw_extract_tracepoint_callback(struct tracepoint *tp, void *priv) + int sw_extract_trace_notifier_providers(void) + { + #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) && \ +- defined(CONFIG_TRACEPOINTS) ++ IS_ENABLED(CONFIG_TRACEPOINTS) + int numCallbacks = 0; + + for_each_kernel_tracepoint(&sw_extract_tracepoint_callback, +@@ -2212,6 +2212,7 @@ int sw_add_trace_notifier_providers(void) + return -EIO; + } + } ++#if IS_ENABLED(CONFIG_TRACEPOINTS) + /* + * Add the cpu hot plug notifier. + */ +@@ -2223,6 +2224,7 @@ int sw_add_trace_notifier_providers(void) + return -EIO; + } + } ++#endif // CONFIG_TRACEPOINTS + return PW_SUCCESS; + } + /* +-- +2.17.1 + diff --git a/patches/0002-SPI-designware-pci-Switch-over-to-MSI-interrup.felipeb-5.4 b/patches/0002-SPI-designware-pci-Switch-over-to-MSI-interrup.felipeb-5.4 new file mode 100644 index 0000000000..612058f8dd --- /dev/null +++ b/patches/0002-SPI-designware-pci-Switch-over-to-MSI-interrup.felipeb-5.4 @@ -0,0 +1,65 @@ +From a203a23fa5a8efb41d56058a0022ef7a4cc61eb8 Mon Sep 17 00:00:00 2001 +From: Felipe Balbi +Date: Thu, 2 May 2019 08:34:31 +0300 +Subject: [PATCH 02/14] SPI: designware: pci: Switch over to MSI interrupts + +Some devices support MSI interrupts. Let's at least try to use them in +platforms that provide MSI capability. + +Signed-off-by: Felipe Balbi +--- + drivers/spi/spi-dw-pci.c | 13 +++++++++++-- + 1 file changed, 11 insertions(+), 2 deletions(-) + +diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c +index 140644913e6c..7ab53f4d04b9 100644 +--- a/drivers/spi/spi-dw-pci.c ++++ b/drivers/spi/spi-dw-pci.c +@@ -57,13 +57,18 @@ static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + + /* Get basic io resource and map it */ + dws->paddr = pci_resource_start(pdev, pci_bar); ++ pci_set_master(pdev); + + ret = pcim_iomap_regions(pdev, 1 << pci_bar, pci_name(pdev)); + if (ret) + return ret; + ++ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); ++ if (ret < 0) ++ return ret; ++ + dws->regs = pcim_iomap_table(pdev)[pci_bar]; +- dws->irq = pdev->irq; ++ dws->irq = pci_irq_vector(pdev, 0); + + /* + * Specific handling for platforms, like dma setup, +@@ -80,12 +85,15 @@ static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + return ret; + } + } else { ++ pci_free_irq_vectors(pdev); + return -ENODEV; + } + + ret = dw_spi_add_host(&pdev->dev, dws); +- if (ret) ++ if (ret) { ++ pci_free_irq_vectors(pdev); + return ret; ++ } + + /* PCI hook and SPI hook use the same drv data */ + pci_set_drvdata(pdev, dws); +@@ -101,6 +109,7 @@ static void spi_pci_remove(struct pci_dev *pdev) + struct dw_spi *dws = pci_get_drvdata(pdev); + + dw_spi_remove_host(dws); ++ pci_free_irq_vectors(pdev); + } + + #ifdef CONFIG_PM_SLEEP +-- +2.17.1 + diff --git a/patches/0002-TEE-OPTEE-Adds-a-kernel-internal-TEE-client-inter.security b/patches/0002-TEE-OPTEE-Adds-a-kernel-internal-TEE-client-inter.security new file mode 100644 index 0000000000..9b5ab004ec --- /dev/null +++ b/patches/0002-TEE-OPTEE-Adds-a-kernel-internal-TEE-client-inter.security @@ -0,0 +1,293 @@ +From 5ad758460d27485437d1eb9320081c39947e04dc Mon Sep 17 00:00:00 2001 +From: Vincent Cao +Date: Tue, 19 Jun 2018 16:49:01 -0700 +Subject: [PATCH 02/65] TEE/OPTEE: Adds a kernel internal TEE client interface + +Enable TEE core additional kernel interface client interface to be +used by other drivers. + +all patch credited to Jens Wiklander +https://patchwork.kernel.org/patch/9845351/ + +Change-Id: Ibd4f50350d10e005fc7930f2230f689b0ecf3854 +Signed-off-by: Vincent Cao +Signed-off-by: Jens Wiklander +--- + README.intel.optee | 13 +++++++ + drivers/tee/tee_core.c | 80 +++-------------------------------------- + include/linux/tee_drv.h | 74 +++++++------------------------------- + 3 files changed, 30 insertions(+), 137 deletions(-) + create mode 100644 README.intel.optee + +diff --git a/README.intel.optee b/README.intel.optee +new file mode 100644 +index 000000000000..0945d23cc882 +--- /dev/null ++++ b/README.intel.optee +@@ -0,0 +1,13 @@ ++IP Block: OPTEE core driver ++Platform Affect: (ARM64) IOTG KeemBay ++BugFix: None ++ ++This branch pulls in driver interface enhancement to OPTEE TEE driver to expose ++an kernel internal TEE client interface to be used by other drivers. ++ ++This patch was pushed to the upstream kernel mailinglist by Jens Wiklander (OPTEE maintainer) ++nearly a year ago but no effort to include yet in any recently release. ++ ++https://patchwork.kernel.org/patch/9845351/ ++ ++CONFIG_TEE and CONFIG_OPTEE in defconfig must be enabled for driver to compile. +diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c +index 3ca3d2ea92f8..7f33ba2dd5fe 100644 +--- a/drivers/tee/tee_core.c ++++ b/drivers/tee/tee_core.c +@@ -6,6 +6,7 @@ + #define pr_fmt(fmt) "%s: " fmt, __func__ + + #include ++#include + #include + #include + #include +@@ -54,7 +55,6 @@ static struct tee_context *teedev_open(struct tee_device *teedev) + kfree(ctx); + tee_device_put(teedev); + return ERR_PTR(rc); +- + } + + void teedev_ctx_get(struct tee_context *ctx) +@@ -96,11 +96,6 @@ static int tee_open(struct inode *inode, struct file *filp) + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + +- /* +- * Default user-space behaviour is to wait for tee-supplicant +- * if not present for any requests in this context. +- */ +- ctx->supp_nowait = false; + filp->private_data = ctx; + return 0; + } +@@ -977,16 +972,6 @@ tee_client_open_context(struct tee_context *start, + } while (IS_ERR(ctx) && PTR_ERR(ctx) != -ENOMEM); + + put_device(put_dev); +- /* +- * Default behaviour for in kernel client is to not wait for +- * tee-supplicant if not present for any requests in this context. +- * Also this flag could be configured again before call to +- * tee_client_open_session() if any in kernel client requires +- * different behaviour. +- */ +- if (!IS_ERR(ctx)) +- ctx->supp_nowait = true; +- + return ctx; + } + EXPORT_SYMBOL_GPL(tee_client_open_context); +@@ -1032,49 +1017,6 @@ int tee_client_invoke_func(struct tee_context *ctx, + } + EXPORT_SYMBOL_GPL(tee_client_invoke_func); + +-int tee_client_cancel_req(struct tee_context *ctx, +- struct tee_ioctl_cancel_arg *arg) +-{ +- if (!ctx->teedev->desc->ops->cancel_req) +- return -EINVAL; +- return ctx->teedev->desc->ops->cancel_req(ctx, arg->cancel_id, +- arg->session); +-} +-EXPORT_SYMBOL_GPL(tee_client_cancel_req); +- +-static int tee_client_device_match(struct device *dev, +- struct device_driver *drv) +-{ +- const struct tee_client_device_id *id_table; +- struct tee_client_device *tee_device; +- +- id_table = to_tee_client_driver(drv)->id_table; +- tee_device = to_tee_client_device(dev); +- +- while (!uuid_is_null(&id_table->uuid)) { +- if (uuid_equal(&tee_device->id.uuid, &id_table->uuid)) +- return 1; +- id_table++; +- } +- +- return 0; +-} +- +-static int tee_client_device_uevent(struct device *dev, +- struct kobj_uevent_env *env) +-{ +- uuid_t *dev_id = &to_tee_client_device(dev)->id.uuid; +- +- return add_uevent_var(env, "MODALIAS=tee:%pUb", dev_id); +-} +- +-struct bus_type tee_bus_type = { +- .name = "tee", +- .match = tee_client_device_match, +- .uevent = tee_client_device_uevent, +-}; +-EXPORT_SYMBOL_GPL(tee_bus_type); +- + static int __init tee_init(void) + { + int rc; +@@ -1088,32 +1030,18 @@ static int __init tee_init(void) + rc = alloc_chrdev_region(&tee_devt, 0, TEE_NUM_DEVICES, "tee"); + if (rc) { + pr_err("failed to allocate char dev region\n"); +- goto out_unreg_class; +- } +- +- rc = bus_register(&tee_bus_type); +- if (rc) { +- pr_err("failed to register tee bus\n"); +- goto out_unreg_chrdev; ++ class_destroy(tee_class); ++ tee_class = NULL; + } + +- return 0; +- +-out_unreg_chrdev: +- unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES); +-out_unreg_class: +- class_destroy(tee_class); +- tee_class = NULL; +- + return rc; + } + + static void __exit tee_exit(void) + { +- bus_unregister(&tee_bus_type); +- unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES); + class_destroy(tee_class); + tee_class = NULL; ++ unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES); + } + + subsys_initcall(tee_init); +diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h +index 7a03f68fb982..7ce4dd8c76ae 100644 +--- a/include/linux/tee_drv.h ++++ b/include/linux/tee_drv.h +@@ -6,14 +6,11 @@ + #ifndef __TEE_DRV_H + #define __TEE_DRV_H + +-#include ++#include + #include + #include + #include +-#include + #include +-#include +-#include + + /* + * The file describes the API provided by the generic TEE driver to the +@@ -41,11 +38,6 @@ struct tee_shm_pool; + * @releasing: flag that indicates if context is being released right now. + * It is needed to break circular dependency on context during + * shared memory release. +- * @supp_nowait: flag that indicates that requests in this context should not +- * wait for tee-supplicant daemon to be started if not present +- * and just return with an error code. It is needed for requests +- * that arises from TEE based kernel drivers that should be +- * non-blocking in nature. + */ + struct tee_context { + struct tee_device *teedev; +@@ -53,7 +45,6 @@ struct tee_context { + void *data; + struct kref refcount; + bool releasing; +- bool supp_nowait; + }; + + struct tee_param_memref { +@@ -453,6 +444,18 @@ static inline int tee_shm_get_id(struct tee_shm *shm) + */ + struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id); + ++static inline bool tee_param_is_memref(struct tee_param *param) ++{ ++ switch (param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) { ++ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: ++ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: ++ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: ++ return true; ++ default: ++ return false; ++ } ++} ++ + /** + * tee_client_open_context() - Open a TEE context + * @start: if not NULL, continue search after this context +@@ -526,55 +529,4 @@ int tee_client_invoke_func(struct tee_context *ctx, + struct tee_ioctl_invoke_arg *arg, + struct tee_param *param); + +-/** +- * tee_client_cancel_req() - Request cancellation of the previous open-session +- * or invoke-command operations in a Trusted Application +- * @ctx: TEE Context +- * @arg: Cancellation arguments, see description of +- * struct tee_ioctl_cancel_arg +- * +- * Returns < 0 on error else 0 if the cancellation was successfully requested. +- */ +-int tee_client_cancel_req(struct tee_context *ctx, +- struct tee_ioctl_cancel_arg *arg); +- +-static inline bool tee_param_is_memref(struct tee_param *param) +-{ +- switch (param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) { +- case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: +- case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: +- case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: +- return true; +- default: +- return false; +- } +-} +- +-extern struct bus_type tee_bus_type; +- +-/** +- * struct tee_client_device - tee based device +- * @id: device identifier +- * @dev: device structure +- */ +-struct tee_client_device { +- struct tee_client_device_id id; +- struct device dev; +-}; +- +-#define to_tee_client_device(d) container_of(d, struct tee_client_device, dev) +- +-/** +- * struct tee_client_driver - tee client driver +- * @id_table: device id table supported by this driver +- * @driver: driver structure +- */ +-struct tee_client_driver { +- const struct tee_client_device_id *id_table; +- struct device_driver driver; +-}; +- +-#define to_tee_client_driver(d) \ +- container_of(d, struct tee_client_driver, driver) +- + #endif /*__TEE_DRV_H*/ +-- +2.17.1 + diff --git a/patches/0002-VHM-add-vhm-char-device-driver.acrn b/patches/0002-VHM-add-vhm-char-device-driver.acrn new file mode 100644 index 0000000000..6a83032f0d --- /dev/null +++ b/patches/0002-VHM-add-vhm-char-device-driver.acrn @@ -0,0 +1,1057 @@ +From 097bff613390947468d4f9e692049b513288069f Mon Sep 17 00:00:00 2001 +From: liang ding +Date: Fri, 31 Aug 2018 10:58:55 +0800 +Subject: [PATCH 002/150] VHM: add vhm char device driver + +VHM(virtio and hypervisor service module) is the important middle layer +to run virtio and hypervisor services in linux kernel for ACRN hypervisor. +The vhm char device is the main interface. It provides ioctls to +applications and interacts with ACRN hypervisor through different +hypercalls. + +This patch enable ACRN vhm service based on CONFIG_ACRN; added a basic vhm +char device which contains services for VM management in drivers/char/vhm; +and located vhm service lib in drivers/vhm. + +Change-Id: Ib6c95d810581abd226692cbec9649a24b466a93b +Tracked-On: 218445 +Signed-off-by: liang ding +Signed-off-by: Jason Zeng +Signed-off-by: Xiao Zheng +Signed-off-by: Jason Chen CJ +Signed-off-by: Jack Ren +Signed-off-by: Mingqiang Chi +Reviewed-on: +Reviewed-by: Dong, Eddie +Tested-by: Dong, Eddie +--- + drivers/Makefile | 1 + + drivers/char/Makefile | 1 + + drivers/char/vhm/Makefile | 1 + + drivers/char/vhm/vhm_dev.c | 247 +++++++++++++++++++++++++++++ + drivers/vhm/Makefile | 1 + + drivers/vhm/vhm_hypercall.c | 134 ++++++++++++++++ + drivers/vhm/vhm_vm_mngt.c | 107 +++++++++++++ + include/linux/vhm/acrn_common.h | 71 +++++++++ + include/linux/vhm/acrn_hv_defs.h | 81 ++++++++++ + include/linux/vhm/vhm_hypercall.h | 148 +++++++++++++++++ + include/linux/vhm/vhm_ioctl_defs.h | 67 ++++++++ + include/linux/vhm/vhm_vm_mngt.h | 78 +++++++++ + 12 files changed, 937 insertions(+) + create mode 100644 drivers/char/vhm/Makefile + create mode 100644 drivers/char/vhm/vhm_dev.c + create mode 100644 drivers/vhm/Makefile + create mode 100644 drivers/vhm/vhm_hypercall.c + create mode 100644 drivers/vhm/vhm_vm_mngt.c + create mode 100644 include/linux/vhm/acrn_common.h + create mode 100644 include/linux/vhm/acrn_hv_defs.h + create mode 100644 include/linux/vhm/vhm_hypercall.h + create mode 100644 include/linux/vhm/vhm_ioctl_defs.h + create mode 100644 include/linux/vhm/vhm_vm_mngt.h + +Index: kernel-coe-tracker/drivers/Makefile +=================================================================== +--- kernel-coe-tracker.orig/drivers/Makefile ++++ kernel-coe-tracker/drivers/Makefile +@@ -182,6 +182,7 @@ obj-$(CONFIG_FPGA) += fpga/ + obj-$(CONFIG_FSI) += fsi/ + obj-$(CONFIG_TEE) += tee/ + obj-$(CONFIG_MULTIPLEXER) += mux/ ++obj-$(CONFIG_ACRN_GUEST) += vhm/ + obj-$(CONFIG_UNISYS_VISORBUS) += visorbus/ + obj-$(CONFIG_SIOX) += siox/ + obj-$(CONFIG_GNSS) += gnss/ +Index: kernel-coe-tracker/drivers/char/Makefile +=================================================================== +--- kernel-coe-tracker.orig/drivers/char/Makefile ++++ kernel-coe-tracker/drivers/char/Makefile +@@ -53,3 +53,4 @@ obj-$(CONFIG_XILLYBUS) += xillybus/ + obj-$(CONFIG_POWERNV_OP_PANEL) += powernv-op-panel.o + obj-$(CONFIG_ADI) += adi.o + obj-$(CONFIG_RPMB_SUPPORT) += rpmb/ ++obj-$(CONFIG_ACRN_GUEST) += vhm/ +Index: kernel-coe-tracker/drivers/char/vhm/Makefile +=================================================================== +--- /dev/null ++++ kernel-coe-tracker/drivers/char/vhm/Makefile +@@ -0,0 +1 @@ ++obj-y += vhm_dev.o +Index: kernel-coe-tracker/drivers/char/vhm/vhm_dev.c +=================================================================== +--- /dev/null ++++ kernel-coe-tracker/drivers/char/vhm/vhm_dev.c +@@ -0,0 +1,247 @@ ++/* ++ * virtio and hyperviosr service module (VHM): main framework ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright (c) 2017 Intel Corporation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * BSD LICENSE ++ * ++ * Copyright (C) 2017 Intel Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * ++ * Liang Ding ++ * Jason Zeng ++ * Xiao Zheng ++ * Jason Chen CJ ++ * Jack Ren ++ * Mingqiang Chi ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++ ++#define DEVICE_NAME "acrn_vhm" ++#define CLASS_NAME "vhm" ++ ++static int major; ++static struct class *vhm_class; ++static struct device *vhm_device; ++ ++static int vhm_dev_open(struct inode *inodep, struct file *filep) ++{ ++ struct vhm_vm *vm; ++ ++ vm = kzalloc(sizeof(struct vhm_vm), GFP_KERNEL); ++ pr_info("vhm_dev_open: opening device node\n"); ++ ++ if (!vm) ++ return -ENOMEM; ++ vm->vmid = ACRN_INVALID_VMID; ++ vm->dev = vhm_device; ++ ++ vm_mutex_lock(&vhm_vm_list_lock); ++ vm->refcnt = 1; ++ vm_list_add(&vm->list); ++ vm_mutex_unlock(&vhm_vm_list_lock); ++ filep->private_data = vm; ++ return 0; ++} ++ ++static ssize_t vhm_dev_read(struct file *filep, char *buffer, size_t len, ++ loff_t *offset) ++{ ++ /* Does Nothing */ ++ pr_info("vhm_dev_read: reading device node\n"); ++ return 0; ++} ++ ++static ssize_t vhm_dev_write(struct file *filep, const char *buffer, ++ size_t len, loff_t *offset) ++{ ++ /* Does Nothing */ ++ pr_info("vhm_dev_read: writing device node\n"); ++ return 0; ++} ++ ++static long vhm_dev_ioctl(struct file *filep, ++ unsigned int ioctl_num, unsigned long ioctl_param) ++{ ++ long ret = 0; ++ struct vhm_vm *vm; ++ ++ trace_printk("[%s] ioctl_num=0x%x\n", __func__, ioctl_num); ++ ++ vm = (struct vhm_vm *)filep->private_data; ++ if (vm == NULL) { ++ pr_err("vhm: invalid VM !\n"); ++ return -EFAULT; ++ } ++ if ((vm->vmid == ACRN_INVALID_VMID) && (ioctl_num != IC_CREATE_VM)) { ++ pr_err("vhm: invalid VM ID !\n"); ++ return -EFAULT; ++ } ++ ++ switch (ioctl_num) { ++ case IC_CREATE_VM: ++ ret = vhm_create_vm(vm, ioctl_param); ++ break; ++ ++ case IC_RESUME_VM: ++ ret = vhm_resume_vm(vm); ++ break; ++ ++ case IC_PAUSE_VM: ++ ret = vhm_pause_vm(vm); ++ break; ++ ++ case IC_DESTROY_VM: ++ ret = vhm_destroy_vm(vm); ++ break; ++ ++ case IC_QUERY_VMSTATE: ++ ret = vhm_query_vm_state(vm); ++ break; ++ ++ default: ++ pr_warn("Unknown IOCTL 0x%x\n", ioctl_num); ++ ret = 0; ++ break; ++ } ++ ++ return ret; ++} ++ ++static int vhm_dev_release(struct inode *inodep, struct file *filep) ++{ ++ struct vhm_vm *vm = filep->private_data; ++ ++ if (vm == NULL) { ++ pr_err("vhm: invalid VM !\n"); ++ return -EFAULT; ++ } ++ put_vm(vm); ++ filep->private_data = NULL; ++ return 0; ++} ++ ++static const struct file_operations fops = { ++ .open = vhm_dev_open, ++ .read = vhm_dev_read, ++ .write = vhm_dev_write, ++ .release = vhm_dev_release, ++ .unlocked_ioctl = vhm_dev_ioctl, ++}; ++ ++static int __init vhm_init(void) ++{ ++ pr_info("vhm: initializing\n"); ++ ++ /* Try to dynamically allocate a major number for the device */ ++ major = register_chrdev(0, DEVICE_NAME, &fops); ++ if (major < 0) { ++ pr_warn("vhm: failed to register a major number\n"); ++ return major; ++ } ++ pr_info("vhm: registered correctly with major number %d\n", major); ++ ++ /* Register the device class */ ++ vhm_class = class_create(THIS_MODULE, CLASS_NAME); ++ if (IS_ERR(vhm_class)) { ++ unregister_chrdev(major, DEVICE_NAME); ++ pr_warn("vhm: failed to register device class\n"); ++ return PTR_ERR(vhm_class); ++ } ++ pr_info("vhm: device class registered correctly\n"); ++ ++ /* Register the device driver */ ++ vhm_device = device_create(vhm_class, NULL, MKDEV(major, 0), ++ NULL, DEVICE_NAME); ++ if (IS_ERR(vhm_device)) { ++ class_destroy(vhm_class); ++ unregister_chrdev(major, DEVICE_NAME); ++ pr_warn("vhm: failed to create the device\n"); ++ return PTR_ERR(vhm_device); ++ } ++ ++ pr_info("vhm: Virtio & Hypervisor service module initialized\n"); ++ return 0; ++} ++static void __exit vhm_exit(void) ++{ ++ device_destroy(vhm_class, MKDEV(major, 0)); ++ class_unregister(vhm_class); ++ class_destroy(vhm_class); ++ unregister_chrdev(major, DEVICE_NAME); ++ pr_info("vhm: exit\n"); ++} ++ ++module_init(vhm_init); ++module_exit(vhm_exit); ++ ++MODULE_AUTHOR("Intel"); ++MODULE_LICENSE("GPL"); ++MODULE_DESCRIPTION("This is a char device driver, acts as a route " ++ "responsible for transferring IO requsts from other modules " ++ "either in user-space or in kernel to and from hypervisor"); ++MODULE_VERSION("0.1"); +Index: kernel-coe-tracker/drivers/vhm/Makefile +=================================================================== +--- /dev/null ++++ kernel-coe-tracker/drivers/vhm/Makefile +@@ -0,0 +1 @@ ++obj-y += vhm_vm_mngt.o vhm_hypercall.o +Index: kernel-coe-tracker/drivers/vhm/vhm_hypercall.c +=================================================================== +--- /dev/null ++++ kernel-coe-tracker/drivers/vhm/vhm_hypercall.c +@@ -0,0 +1,134 @@ ++/* ++ * virtio and hyperviosr service module (VHM): hypercall wrap ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright (c) 2017 Intel Corporation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * BSD LICENSE ++ * ++ * Copyright (C) 2017 Intel Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * ++ */ ++#include ++#include ++#include ++#include ++ ++inline long vhm_create_vm(struct vhm_vm *vm, unsigned long ioctl_param) ++{ ++ long ret = 0; ++ struct acrn_create_vm created_vm; ++ ++ if (copy_from_user(&created_vm, (void *)ioctl_param, ++ sizeof(struct acrn_create_vm))) ++ return -EFAULT; ++ ++ ret = acrn_hypercall2(HC_CREATE_VM, 0, ++ virt_to_phys(&created_vm)); ++ if ((ret < 0) || ++ (created_vm.vmid == ACRN_INVALID_VMID)) { ++ pr_err("vhm: failed to create VM from Hypervisor !\n"); ++ return -EFAULT; ++ } ++ ++ if (copy_to_user((void *)ioctl_param, &created_vm, ++ sizeof(struct acrn_create_vm))) ++ return -EFAULT; ++ ++ vm->vmid = created_vm.vmid; ++ pr_info("vhm: VM %ld created\n", created_vm.vmid); ++ ++ return ret; ++} ++ ++inline long vhm_resume_vm(struct vhm_vm *vm) ++{ ++ long ret = 0; ++ ++ ret = acrn_hypercall1(HC_RESUME_VM, vm->vmid); ++ if (ret < 0) { ++ pr_err("vhm: failed to start VM %ld!\n", vm->vmid); ++ return -EFAULT; ++ } ++ ++ return ret; ++} ++ ++inline long vhm_pause_vm(struct vhm_vm *vm) ++{ ++ long ret = 0; ++ ++ ret = acrn_hypercall1(HC_PAUSE_VM, vm->vmid); ++ if (ret < 0) { ++ pr_err("vhm: failed to pause VM %ld!\n", vm->vmid); ++ return -EFAULT; ++ } ++ ++ return ret; ++} ++ ++inline long vhm_destroy_vm(struct vhm_vm *vm) ++{ ++ long ret = 0; ++ ++ ret = acrn_hypercall1(HC_DESTROY_VM, vm->vmid); ++ if (ret < 0) { ++ pr_err("failed to destroy VM %ld\n", vm->vmid); ++ return -EFAULT; ++ } ++ vm->vmid = ACRN_INVALID_VMID; ++ ++ return ret; ++} ++ ++inline long vhm_query_vm_state(struct vhm_vm *vm) ++{ ++ long ret = 0; ++ ++ ret = acrn_hypercall1(HC_QUERY_VMSTATE, vm->vmid); ++ if (ret < 0) { ++ pr_err("vhm: failed to query VM State%ld!\n", vm->vmid); ++ return -EFAULT; ++ } ++ ++ return ret; ++} +Index: kernel-coe-tracker/drivers/vhm/vhm_vm_mngt.c +=================================================================== +--- /dev/null ++++ kernel-coe-tracker/drivers/vhm/vhm_vm_mngt.c +@@ -0,0 +1,107 @@ ++/* ++ * virtio and hyperviosr service module (VHM): vm management ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright (c) 2017 Intel Corporation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * BSD LICENSE ++ * ++ * Copyright (C) 2017 Intel Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * ++ * Liang Ding ++ * Jason Zeng ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++LIST_HEAD(vhm_vm_list); ++DEFINE_MUTEX(vhm_vm_list_lock); ++ ++struct vhm_vm *find_get_vm(unsigned long vmid) ++{ ++ struct vhm_vm *vm; ++ ++ mutex_lock(&vhm_vm_list_lock); ++ list_for_each_entry(vm, &vhm_vm_list, list) { ++ if (vm->vmid == vmid) { ++ vm->refcnt++; ++ mutex_unlock(&vhm_vm_list_lock); ++ return vm; ++ } ++ } ++ mutex_unlock(&vhm_vm_list_lock); ++ return NULL; ++} ++ ++void put_vm(struct vhm_vm *vm) ++{ ++ mutex_lock(&vhm_vm_list_lock); ++ vm->refcnt--; ++ if (vm->refcnt == 0) { ++ list_del(&vm->list); ++ kfree(vm); ++ pr_info("vhm: freed vm\n"); ++ } ++ mutex_unlock(&vhm_vm_list_lock); ++} ++ ++void vm_list_add(struct list_head *list) ++{ ++ list_add(list, &vhm_vm_list); ++} ++ ++void vm_mutex_lock(struct mutex *mlock) ++{ ++ mutex_lock(mlock); ++} ++ ++void vm_mutex_unlock(struct mutex *mlock) ++{ ++ mutex_unlock(mlock); ++} +Index: kernel-coe-tracker/include/linux/vhm/acrn_common.h +=================================================================== +--- /dev/null ++++ kernel-coe-tracker/include/linux/vhm/acrn_common.h +@@ -0,0 +1,71 @@ ++/* ++ * virtio and hyperviosr service module (VHM): commom.h ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright (c) 2017 Intel Corporation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * BSD LICENSE ++ * ++ * Copyright (C) 2017 Intel Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * ++ */ ++ ++#ifndef ACRN_COMMON_H ++#define ACRN_COMMON_H ++ ++#ifdef __KERNEL__ ++#include ++#else ++#include ++#endif ++ ++/* ++ * Commmon structures for ACRN/VHM/DM ++ */ ++ ++/* Common API params */ ++struct acrn_create_vm { ++ unsigned long vmid; /* OUT: HV return vmid to VHM */ ++ unsigned long vcpu_num; /* IN: VM vcpu number */ ++} __attribute__((aligned(8))); ++ ++#endif /* ACRN_COMMON_H */ +Index: kernel-coe-tracker/include/linux/vhm/acrn_hv_defs.h +=================================================================== +--- /dev/null ++++ kernel-coe-tracker/include/linux/vhm/acrn_hv_defs.h +@@ -0,0 +1,81 @@ ++/* ++ * virtio and hyperviosr service module (VHM): hypercall header ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright (c) 2017 Intel Corporation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * BSD LICENSE ++ * ++ * Copyright (C) 2017 Intel Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * ++ */ ++ ++#ifndef ACRN_HV_DEFS_H ++#define ACRN_HV_DEFS_H ++ ++/* ++ * Commmon structures for ACRN/VHM/DM ++ */ ++#include "acrn_common.h" ++ ++/* ++ * Commmon structures for HV/VHM ++ */ ++ ++#define _HC_ID(x, y) (((x)<<24)|(y)) ++ ++#define HC_ID 0x7FUL ++ ++/* VM management */ ++#define HC_ID_VM_BASE 0x0UL ++#define HC_GET_API_VERSION _HC_ID(HC_ID, HC_ID_VM_BASE + 0x00) ++#define HC_CREATE_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x01) ++#define HC_DESTROY_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x02) ++#define HC_RESUME_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x03) ++#define HC_PAUSE_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x04) ++#define HC_QUERY_VMSTATE _HC_ID(HC_ID, HC_ID_VM_BASE + 0x05) ++ ++#define ACRN_DOM0_VMID (0UL) ++#define ACRN_INVALID_VMID (-1UL) ++#define ACRN_INVALID_HPA (-1UL) ++ ++#endif /* ACRN_HV_DEFS_H */ +Index: kernel-coe-tracker/include/linux/vhm/vhm_hypercall.h +=================================================================== +--- /dev/null ++++ kernel-coe-tracker/include/linux/vhm/vhm_hypercall.h +@@ -0,0 +1,148 @@ ++/* ++ * virtio and hyperviosr service module (VHM): hypercall.h ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright (c) 2017 Intel Corporation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * BSD LICENSE ++ * ++ * Copyright (C) 2017 Intel Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * ++ */ ++ ++#ifndef VHM_HYPERCALL_H ++#define VHM_HYPERCALL_H ++ ++#include ++ ++static inline long acrn_hypercall0(unsigned long hyp_id) ++{ ++ ++ /* x86-64 System V ABI register usage */ ++ register signed long result asm("rax"); ++ register unsigned long r8 asm("r8") = hyp_id; ++ ++ /* Execute vmcall */ ++ asm volatile(".byte 0x0F,0x01,0xC1\n" ++ : "=r"(result) ++ : "r"(r8)); ++ ++ /* Return result to caller */ ++ return result; ++} ++ ++static inline long acrn_hypercall1(unsigned long hyp_id, unsigned long param1) ++{ ++ ++ /* x86-64 System V ABI register usage */ ++ register signed long result asm("rax"); ++ register unsigned long r8 asm("r8") = hyp_id; ++ ++ /* Execute vmcall */ ++ asm volatile(".byte 0x0F,0x01,0xC1\n" ++ : "=r"(result) ++ : "D"(param1), "r"(r8)); ++ ++ /* Return result to caller */ ++ return result; ++} ++ ++static inline long acrn_hypercall2(unsigned long hyp_id, unsigned long param1, ++ unsigned long param2) ++{ ++ ++ /* x86-64 System V ABI register usage */ ++ register signed long result asm("rax"); ++ register unsigned long r8 asm("r8") = hyp_id; ++ ++ /* Execute vmcall */ ++ asm volatile(".byte 0x0F,0x01,0xC1\n" ++ : "=r"(result) ++ : "D"(param1), "S"(param2), "r"(r8)); ++ ++ /* Return result to caller */ ++ return result; ++} ++ ++static inline long acrn_hypercall3(unsigned long hyp_id, unsigned long param1, ++ unsigned long param2, unsigned long param3) ++{ ++ ++ /* x86-64 System V ABI register usage */ ++ register signed long result asm("rax"); ++ register unsigned long r8 asm("r8") = hyp_id; ++ ++ /* Execute vmcall */ ++ asm volatile(".byte 0x0F,0x01,0xC1\n" ++ : "=r"(result) ++ : "D"(param1), "S"(param2), "d"(param3), "r"(r8)); ++ ++ /* Return result to caller */ ++ return result; ++} ++ ++static inline long acrn_hypercall4(unsigned long hyp_id, unsigned long param1, ++ unsigned long param2, unsigned long param3, ++ unsigned long param4) ++{ ++ ++ /* x86-64 System V ABI register usage */ ++ register signed long result asm("rax"); ++ register unsigned long r8 asm("r8") = hyp_id; ++ ++ /* Execute vmcall */ ++ asm volatile(".byte 0x0F,0x01,0xC1\n" ++ : "=r"(result) ++ : "D"(param1), "S"(param2), "d"(param3), ++ "c"(param4), "r"(r8)); ++ ++ /* Return result to caller */ ++ return result; ++} ++ ++inline long vhm_create_vm(struct vhm_vm *vm, unsigned long ioctl_param); ++inline long vhm_resume_vm(struct vhm_vm *vm); ++inline long vhm_pause_vm(struct vhm_vm *vm); ++inline long vhm_destroy_vm(struct vhm_vm *vm); ++inline long vhm_query_vm_state(struct vhm_vm *vm); ++ ++#endif /* VHM_HYPERCALL_H */ +Index: kernel-coe-tracker/include/linux/vhm/vhm_ioctl_defs.h +=================================================================== +--- /dev/null ++++ kernel-coe-tracker/include/linux/vhm/vhm_ioctl_defs.h +@@ -0,0 +1,67 @@ ++/* ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright (c) 2017 Intel Corporation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * BSD LICENSE ++ * ++ * Copyright (C) 2017 Intel Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * 1. Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * 2. Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * ++ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND ++ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE ++ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL ++ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS ++ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) ++ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF ++ * SUCH DAMAGE. ++ * ++ * $FreeBSD$ ++ */ ++ ++#ifndef _VHM_IOCTL_DEFS_H_ ++#define _VHM_IOCTL_DEFS_H_ ++ ++/* Commmon structures for ACRN/VHM/DM */ ++#include "acrn_common.h" ++ ++/* ++ * Commmon IOCTL ID defination for VHM/DM ++ */ ++#define _IC_ID(x, y) (((x)<<24)|(y)) ++#define IC_ID 0x5FUL ++ ++/* VM management */ ++#define IC_ID_VM_BASE 0x0UL ++#define IC_GET_API_VERSION _IC_ID(IC_ID, IC_ID_VM_BASE + 0x00) ++#define IC_CREATE_VM _IC_ID(IC_ID, IC_ID_VM_BASE + 0x01) ++#define IC_DESTROY_VM _IC_ID(IC_ID, IC_ID_VM_BASE + 0x02) ++#define IC_RESUME_VM _IC_ID(IC_ID, IC_ID_VM_BASE + 0x03) ++#define IC_PAUSE_VM _IC_ID(IC_ID, IC_ID_VM_BASE + 0x04) ++#define IC_QUERY_VMSTATE _IC_ID(IC_ID, IC_ID_VM_BASE + 0x05) ++ ++#endif /* VHM_IOCTL_DEFS_H */ +Index: kernel-coe-tracker/include/linux/vhm/vhm_vm_mngt.h +=================================================================== +--- /dev/null ++++ kernel-coe-tracker/include/linux/vhm/vhm_vm_mngt.h +@@ -0,0 +1,78 @@ ++/* ++ * virtio and hyperviosr service module (VHM): vm management ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright (c) 2017 Intel Corporation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * BSD LICENSE ++ * ++ * Copyright (C) 2017 Intel Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * ++ * Liang Ding ++ * Jason Zeng ++ * Xiao Zheng ++ * Jason Chen CJ ++ * ++ */ ++#ifndef VHM_VM_MNGT_H ++#define VHM_VM_MNGT_H ++ ++#include ++ ++extern struct list_head vhm_vm_list; ++extern struct mutex vhm_vm_list_lock; ++ ++struct vhm_vm { ++ struct device *dev; ++ struct list_head list; ++ unsigned long vmid; ++ long refcnt; ++}; ++ ++struct vhm_vm *find_get_vm(unsigned long vmid); ++void put_vm(struct vhm_vm *vm); ++ ++void vm_list_add(struct list_head *list); ++void vm_mutex_lock(struct mutex *mlock); ++void vm_mutex_unlock(struct mutex *mlock); ++ ++#endif diff --git a/patches/0002-drm-i915-Add-transcoder-restriction-to-PSR2.drm b/patches/0002-drm-i915-Add-transcoder-restriction-to-PSR2.drm new file mode 100644 index 0000000000..cc6eaf9bc4 --- /dev/null +++ b/patches/0002-drm-i915-Add-transcoder-restriction-to-PSR2.drm @@ -0,0 +1,69 @@ +From 91eae8b39d6a2f33a152b19319b161a95d5d54d7 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Jos=C3=A9=20Roberto=20de=20Souza?= +Date: Tue, 20 Aug 2019 15:33:24 -0700 +Subject: [PATCH 002/690] drm/i915: Add transcoder restriction to PSR2 +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +According to PSR2_CTL definition in BSpec there is only one instance +of PSR2_CTL. Platforms gen < 12 with EDP transcoder only support PSR2 +on TRANSCODER_EDP while on TGL PSR2 is only supported by +TRANSCODER_A. + +Since BDW PSR is allowed on any port, but we need to restrict by +transcoder. + +v8: Renamed _psr2_supported_in_trans() to psr2_supported() (Lucas) + +v9: Renamed psr2_supported() to transcoder_has_psr2() (Ville) + +BSpec: 7713 +BSpec: 20584 +Cc: Dhinakaran Pandiyan +Cc: Rodrigo Vivi +Reviewed-by: Lucas De Marchi +Signed-off-by: José Roberto de Souza +Signed-off-by: Lucas De Marchi +Reviewed-by: Anshuman Gupta +Link: https://patchwork.freedesktop.org/patch/msgid/20190820223325.27490-2-jose.souza@intel.com +--- + drivers/gpu/drm/i915/display/intel_psr.c | 15 +++++++++++++++ + 1 file changed, 15 insertions(+) + +diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c +index 77232f6bca17..771d9a40bf12 100644 +--- a/drivers/gpu/drm/i915/display/intel_psr.c ++++ b/drivers/gpu/drm/i915/display/intel_psr.c +@@ -534,6 +534,15 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) + I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val); + } + ++static bool ++transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans) ++{ ++ if (INTEL_GEN(dev_priv) >= 12) ++ return trans == TRANSCODER_A; ++ else ++ return trans == TRANSCODER_EDP; ++} ++ + static bool intel_psr2_config_valid(struct intel_dp *intel_dp, + struct intel_crtc_state *crtc_state) + { +@@ -545,6 +554,12 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, + if (!dev_priv->psr.sink_psr2_support) + return false; + ++ if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) { ++ DRM_DEBUG_KMS("PSR2 not supported in transcoder %s\n", ++ transcoder_name(crtc_state->cpu_transcoder)); ++ return false; ++ } ++ + /* + * DSC and PSR2 cannot be enabled simultaneously. If a requested + * resolution requires DSC to be enabled, priority is given to DSC +-- +2.17.1 + diff --git a/patches/0002-mfd-intel_soc_pmic_bxtwc-Create-connection-for-t.usb-typec b/patches/0002-mfd-intel_soc_pmic_bxtwc-Create-connection-for-t.usb-typec new file mode 100644 index 0000000000..cdff6113e6 --- /dev/null +++ b/patches/0002-mfd-intel_soc_pmic_bxtwc-Create-connection-for-t.usb-typec @@ -0,0 +1,62 @@ +From a4d7a0f85ecef0fa4d24d07235f0e56bb77a5c58 Mon Sep 17 00:00:00 2001 +From: Heikki Krogerus +Date: Mon, 5 Aug 2019 14:54:37 +0300 +Subject: [PATCH 02/18] mfd: intel_soc_pmic_bxtwc: Create connection for the + USB role switch + +Intel WhiskeyCove PMIC can detect both VBUS and ID pin level +changes. That information can be used for determining the +USB role with micro-AB connectors. The device connection +is describede between the wcove charger and Intel xHCI USB +role switch devices. + +Signed-off-by: Heikki Krogerus +--- + drivers/mfd/intel_soc_pmic_bxtwc.c | 17 +++++++++++++++++ + 1 file changed, 17 insertions(+) + +diff --git a/drivers/mfd/intel_soc_pmic_bxtwc.c b/drivers/mfd/intel_soc_pmic_bxtwc.c +index 739cfb5b69fe..de79901d8c93 100644 +--- a/drivers/mfd/intel_soc_pmic_bxtwc.c ++++ b/drivers/mfd/intel_soc_pmic_bxtwc.c +@@ -225,6 +225,18 @@ static struct resource tmu_resources[] = { + DEFINE_RES_IRQ_NAMED(BXTWC_TMU_IRQ, "TMU"), + }; + ++static struct software_node_ref_args usb_mux_ref; ++ ++static const struct software_node_reference charger_refs[] = { ++ { "usb-role-switch", 1, &usb_mux_ref }, ++ { } ++}; ++ ++static const struct software_node charger_node = { ++ .name = "charger", ++ .references = charger_refs ++}; ++ + static struct mfd_cell bxt_wc_dev[] = { + { + .name = "bxt_wcove_gpadc", +@@ -245,6 +257,7 @@ static struct mfd_cell bxt_wc_dev[] = { + .name = "bxt_wcove_ext_charger", + .num_resources = ARRAY_SIZE(charger_resources), + .resources = charger_resources, ++ .node = &charger_node + }, + { + .name = "bxt_wcove_bcu", +@@ -546,6 +559,10 @@ static int bxtwc_probe(struct platform_device *pdev) + return ret; + } + ++ usb_mux_ref.node = software_node_find_by_name(NULL, "intel-xhci-usb-sw"); ++ if (!usb_mux_ref.node) ++ return -EPROBE_DEFER; ++ + ret = devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE, bxt_wc_dev, + ARRAY_SIZE(bxt_wc_dev), NULL, 0, NULL); + if (ret) { +-- +2.17.1 + diff --git a/patches/0002-net-stmmac-Fix-missing-IFF_MULTICAST-check-in.connectivity b/patches/0002-net-stmmac-Fix-missing-IFF_MULTICAST-check-in.connectivity new file mode 100644 index 0000000000..00ffa88aac --- /dev/null +++ b/patches/0002-net-stmmac-Fix-missing-IFF_MULTICAST-check-in.connectivity @@ -0,0 +1,34 @@ +From e9ec2f3daeca7b9148fb442033c3a15769045886 Mon Sep 17 00:00:00 2001 +From: "Verma, Aashish" +Date: Thu, 25 Jul 2019 01:06:19 +0800 +Subject: [PATCH 002/108] net: stmmac: Fix missing IFF_MULTICAST check in + dwmac4_set_filter + +Without checking for IFF_MULTICAST flag, it is wrong to assume multicast +filtering is always enabled. As a result, we cannot disable multicast +filtering in MAC. + +Fixes: 477286b53f55 stmmac: add GMAC4 core support + +Signed-off-by: Verma, Aashish +Signed-off-by: Ong Boon Leong +--- + drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +index 5a7b0aca1d31..422fd1c54dfe 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +@@ -420,7 +420,7 @@ static void dwmac4_set_filter(struct mac_device_info *hw, + value |= GMAC_PACKET_FILTER_PM; + /* Set all the bits of the HASH tab */ + memset(mc_filter, 0xff, sizeof(mc_filter)); +- } else if (!netdev_mc_empty(dev)) { ++ } else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) { + struct netdev_hw_addr *ha; + + /* Hash filter for multicast */ +-- +2.17.1 + diff --git a/patches/0002-perf-x86-intel-Add-Elkhart-Lake-support.core-ehl b/patches/0002-perf-x86-intel-Add-Elkhart-Lake-support.core-ehl new file mode 100644 index 0000000000..4991f88aa8 --- /dev/null +++ b/patches/0002-perf-x86-intel-Add-Elkhart-Lake-support.core-ehl @@ -0,0 +1,55 @@ +From 9903063d52d3aae1bbda0fd11ea893695b54ae13 Mon Sep 17 00:00:00 2001 +From: Kan Liang +Date: Tue, 1 Oct 2019 06:20:52 -0700 +Subject: [PATCH 02/12] perf/x86/intel: Add Elkhart Lake support + +Add topdown support as well + +Signed-off-by: Kan Liang +--- + arch/x86/events/intel/core.c | 15 +++++++++++++++ + 1 file changed, 15 insertions(+) + +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c +index fcef678c3423..3f426aff32a8 100644 +--- a/arch/x86/events/intel/core.c ++++ b/arch/x86/events/intel/core.c +@@ -1890,6 +1890,19 @@ static __initconst const u64 tnt_hw_cache_extra_regs + }, + }; + ++EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound_tnt, "event=0x71,umask=0x0"); ++EVENT_ATTR_STR(topdown-retiring, td_retiring_tnt, "event=0x72,umask=0x0"); ++EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec_tnt, "event=0x73,umask=0x0"); ++EVENT_ATTR_STR(topdown-be-bound, td_be_bound_tnt, "event=0x74,umask=0x0"); ++ ++static struct attribute *tnt_events_attrs[] = { ++ EVENT_PTR(td_fe_bound_tnt), ++ EVENT_PTR(td_retiring_tnt), ++ EVENT_PTR(td_bad_spec_tnt), ++ EVENT_PTR(td_be_bound_tnt), ++ NULL ++}; ++ + static struct extra_reg intel_tnt_extra_regs[] __read_mostly = { + /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ + INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffffff9fffull, RSP_0), +@@ -4746,6 +4759,7 @@ __init int intel_pmu_init(void) + break; + + case INTEL_FAM6_ATOM_TREMONT_D: ++ case INTEL_FAM6_ATOM_TREMONT: + x86_pmu.late_ack = true; + memcpy(hw_cache_event_ids, glp_hw_cache_event_ids, + sizeof(hw_cache_event_ids)); +@@ -4766,6 +4780,7 @@ __init int intel_pmu_init(void) + x86_pmu.lbr_pt_coexist = true; + x86_pmu.flags |= PMU_FL_HAS_RSP_1; + x86_pmu.get_event_constraints = tnt_get_event_constraints; ++ td_attr = tnt_events_attrs;; + extra_attr = slm_format_attr; + pr_cont("Tremont events, "); + name = "Tremont"; +-- +2.17.1 + diff --git a/patches/0002-pinctrl-intel-Add-Coffee-Lake-S-ACPI-ID.lpss b/patches/0002-pinctrl-intel-Add-Coffee-Lake-S-ACPI-ID.lpss new file mode 100644 index 0000000000..bccb886350 --- /dev/null +++ b/patches/0002-pinctrl-intel-Add-Coffee-Lake-S-ACPI-ID.lpss @@ -0,0 +1,29 @@ +From e72f8266f4696e7368d2abb3085aa6a826664791 Mon Sep 17 00:00:00 2001 +From: Mika Westerberg +Date: Tue, 26 Sep 2017 13:10:23 +0300 +Subject: [PATCH 02/40] pinctrl: intel: Add Coffee Lake-S ACPI ID + +Intel Coffee Lake-S PCH has the same GPIO hardware than Sunrisepoint-H +PCH but the ACPI ID is different. Add this new ACPI ID to the list of +supported devices. + +Signed-off-by: Mika Westerberg +--- + drivers/pinctrl/intel/pinctrl-sunrisepoint.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c +index 44d7f50bbc82..11c23cf48dc2 100644 +--- a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c ++++ b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c +@@ -589,6 +589,7 @@ static const struct intel_pinctrl_soc_data spth_soc_data = { + static const struct acpi_device_id spt_pinctrl_acpi_match[] = { + { "INT344B", (kernel_ulong_t)&sptlp_soc_data }, + { "INT345D", (kernel_ulong_t)&spth_soc_data }, ++ { "INT3451", (kernel_ulong_t)&spth_soc_data }, + { } + }; + MODULE_DEVICE_TABLE(acpi, spt_pinctrl_acpi_match); +-- +2.17.1 + diff --git a/patches/0002-trusty-Add-notifier-before-and-after-every-smc-call.trusty b/patches/0002-trusty-Add-notifier-before-and-after-every-smc-call.trusty new file mode 100644 index 0000000000..124500e0d5 --- /dev/null +++ b/patches/0002-trusty-Add-notifier-before-and-after-every-smc-call.trusty @@ -0,0 +1,98 @@ +From c28c3cdfa2ffe5db15d216352dbcb163d125b924 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?= +Date: Thu, 5 May 2016 15:43:44 -0700 +Subject: [PATCH 02/63] trusty: Add notifier before and after every smc call. +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Called with local interrupts disabled. + +Change-Id: I5d2b15ce0fee29f067d8403a6f7127046fc185e9 +Signed-off-by: Arve Hjønnevåg +--- + drivers/trusty/trusty.c | 26 ++++++++++++++++++++++++++ + include/linux/trusty/trusty.h | 10 ++++++++++ + 2 files changed, 36 insertions(+) + +diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c +index 59ecf60fc050..7efcff89610c 100644 +--- a/drivers/trusty/trusty.c ++++ b/drivers/trusty/trusty.c +@@ -26,6 +26,7 @@ + + struct trusty_state { + struct mutex smc_lock; ++ struct atomic_notifier_head notifier; + }; + + #ifdef CONFIG_ARM64 +@@ -123,7 +124,14 @@ static ulong trusty_std_call_helper(struct device *dev, ulong smcnr, + struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); + + while (true) { ++ local_irq_disable(); ++ atomic_notifier_call_chain(&s->notifier, TRUSTY_CALL_PREPARE, ++ NULL); + ret = trusty_std_call_inner(dev, smcnr, a0, a1, a2); ++ atomic_notifier_call_chain(&s->notifier, TRUSTY_CALL_RETURNED, ++ NULL); ++ local_irq_enable(); ++ + if ((int)ret != SM_ERR_BUSY) + break; + +@@ -178,6 +186,23 @@ s32 trusty_std_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2) + } + EXPORT_SYMBOL(trusty_std_call32); + ++int trusty_call_notifier_register(struct device *dev, struct notifier_block *n) ++{ ++ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); ++ ++ return atomic_notifier_chain_register(&s->notifier, n); ++} ++EXPORT_SYMBOL(trusty_call_notifier_register); ++ ++int trusty_call_notifier_unregister(struct device *dev, ++ struct notifier_block *n) ++{ ++ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); ++ ++ return atomic_notifier_chain_unregister(&s->notifier, n); ++} ++EXPORT_SYMBOL(trusty_call_notifier_unregister); ++ + static int trusty_remove_child(struct device *dev, void *data) + { + platform_device_unregister(to_platform_device(dev)); +@@ -201,6 +226,7 @@ static int trusty_probe(struct platform_device *pdev) + goto err_allocate_state; + } + mutex_init(&s->smc_lock); ++ ATOMIC_INIT_NOTIFIER_HEAD(&s->notifier); + platform_set_drvdata(pdev, s); + + ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); +diff --git a/include/linux/trusty/trusty.h b/include/linux/trusty/trusty.h +index 30d4300ba301..ce00c1d46a5e 100644 +--- a/include/linux/trusty/trusty.h ++++ b/include/linux/trusty/trusty.h +@@ -43,4 +43,14 @@ static inline s64 trusty_fast_call64(struct device *dev, + #endif + #endif + ++struct notifier_block; ++enum { ++ TRUSTY_CALL_PREPARE, ++ TRUSTY_CALL_RETURNED, ++}; ++int trusty_call_notifier_register(struct device *dev, ++ struct notifier_block *n); ++int trusty_call_notifier_unregister(struct device *dev, ++ struct notifier_block *n); ++ + #endif +-- +2.17.1 + diff --git a/patches/0002-xhci-Apply-XHCI_TRUST_TX_LENGTH-quirk-for-Tiger-L.usb-xhci b/patches/0002-xhci-Apply-XHCI_TRUST_TX_LENGTH-quirk-for-Tiger-L.usb-xhci new file mode 100644 index 0000000000..5bc17eda7f --- /dev/null +++ b/patches/0002-xhci-Apply-XHCI_TRUST_TX_LENGTH-quirk-for-Tiger-L.usb-xhci @@ -0,0 +1,40 @@ +From 813252799c8a86160c69c138782dcb4d5b70fe77 Mon Sep 17 00:00:00 2001 +From: Zhengjun Xing +Date: Thu, 22 Mar 2018 11:05:45 +0800 +Subject: [PATCH 2/4] xhci: Apply XHCI_TRUST_TX_LENGTH quirk for Tiger Lake + +Intel Tiger Lake platform was verified to require +XHCI_TRUST_TX_LENGTH quirk. + +Signed-off-by: Zhengjun Xing +Signed-off-by: Mathias Nyman +--- + drivers/usb/host/xhci-pci.c | 5 +++++ + 1 file changed, 5 insertions(+) + +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c +index 1e0236e90687..40a8454075fb 100644 +--- a/drivers/usb/host/xhci-pci.c ++++ b/drivers/usb/host/xhci-pci.c +@@ -48,6 +48,7 @@ + #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_XHCI 0x15e9 + #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_XHCI 0x15ec + #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI 0x15f0 ++#define PCI_DEVICE_ID_INTEL_TGL_XHCI 0x9a13 + + #define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9 + #define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba +@@ -215,6 +216,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) + pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI)) + xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW; + ++ if (pdev->vendor == PCI_VENDOR_ID_INTEL && ++ pdev->device == PCI_DEVICE_ID_INTEL_TGL_XHCI) ++ xhci->quirks |= XHCI_TRUST_TX_LENGTH; ++ + if (pdev->vendor == PCI_VENDOR_ID_ETRON && + pdev->device == PCI_DEVICE_ID_EJ168) { + xhci->quirks |= XHCI_RESET_ON_RESUME; +-- +2.17.1 + diff --git a/patches/0003-ASoC-Intel-Skylake-Parse-module-configuration-from-m.audio b/patches/0003-ASoC-Intel-Skylake-Parse-module-configuration-from-m.audio new file mode 100644 index 0000000000..34cb8c0ed2 --- /dev/null +++ b/patches/0003-ASoC-Intel-Skylake-Parse-module-configuration-from-m.audio @@ -0,0 +1,115 @@ +From bc70b7012e5fbb36bcb850ceac0e42ac5a35cca0 Mon Sep 17 00:00:00 2001 +From: Piotr Maziarz +Date: Fri, 10 May 2019 16:28:07 +0200 +Subject: [PATCH 003/193] ASoC: Intel: Skylake: Parse module configuration from + manifest + +Module configuration contains valuable information about module, +primarly obs, ibs and is_bytes. + +Change-Id: I9b4b452010a508084e1ed2f63bfc7c9a519ba3c1 +Signed-off-by: Piotr Maziarz +--- + sound/soc/intel/skylake/skl-sst-dsp.h | 13 +++++++++ + sound/soc/intel/skylake/skl-sst-utils.c | 36 ++++++++++++++++++++++--- + 2 files changed, 46 insertions(+), 3 deletions(-) + +diff --git a/sound/soc/intel/skylake/skl-sst-dsp.h b/sound/soc/intel/skylake/skl-sst-dsp.h +index ecd33f1d0064..1295ca1cc5c3 100644 +--- a/sound/soc/intel/skylake/skl-sst-dsp.h ++++ b/sound/soc/intel/skylake/skl-sst-dsp.h +@@ -166,6 +166,17 @@ struct skl_dsp_loader_ops { + int stream_tag); + }; + ++struct adsp_module_config { ++ u32 par[4]; /* module parameters */ ++ u32 is_bytes; /* actual size of instance .bss (bytes) */ ++ u32 cps; /* cycles per second */ ++ u32 ibs; /* input buffer size (bytes) */ ++ u32 obs; /* output buffer size (bytes) */ ++ u32 module_flags; /* flags, res for future use */ ++ u32 cpc; /* cycles per single run */ ++ u32 obls; /* output block size, res for future use */ ++} __packed; ++ + #define MAX_INSTANCE_BUFF 2 + + struct uuid_module { +@@ -175,6 +186,8 @@ struct uuid_module { + int max_instance; + u64 pvt_id[MAX_INSTANCE_BUFF]; + int *instance_id; ++ struct adsp_module_config *configs; ++ int num_configs; + + struct list_head list; + }; +diff --git a/sound/soc/intel/skylake/skl-sst-utils.c b/sound/soc/intel/skylake/skl-sst-utils.c +index ff24d57ebbda..d0f750eb1f9c 100644 +--- a/sound/soc/intel/skylake/skl-sst-utils.c ++++ b/sound/soc/intel/skylake/skl-sst-utils.c +@@ -235,6 +235,7 @@ int snd_skl_parse_manifest(struct sst_dsp *ctx, const struct firmware *fw, + struct uuid_module *module; + struct firmware stripped_fw; + unsigned int safe_file; ++ struct adsp_module_config *mod_configs; + + /* Get the FW pointer to derive ADSP header */ + stripped_fw.data = fw->data; +@@ -268,13 +269,24 @@ int snd_skl_parse_manifest(struct sst_dsp *ctx, const struct firmware *fw, + + mod_entry = (struct adsp_module_entry *) + (buf + offset + adsp_hdr->len); ++ mod_configs = (struct adsp_module_config *) ++ (buf + safe_file); + + /* +- * Read the UUID(GUID) from FW Manifest. ++ * Read modules data from FW Manifest. + * + * The 16 byte UUID format is: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX +- * Populate the UUID table to store module_id and loadable flags +- * for the module. ++ * Populate the table to store module_id, loadable flags and ++ * configurations array for the module. ++ * ++ * Manifest structure: ++ * header ++ * N * module entry (N specified in header) ++ * M * module configuration ++ * ++ * Each module entry can have 0 or more configurations. Configurations ++ * are linked to entries by offset and counter stored in entry ++ * (offset + conter <= M). + */ + + for (i = 0; i < num_entry; i++, mod_entry++) { +@@ -296,6 +308,24 @@ int snd_skl_parse_manifest(struct sst_dsp *ctx, const struct firmware *fw, + return -ENOMEM; + } + ++ if (mod_entry->cfg_count) { ++ size = sizeof(*mod_configs) * (mod_entry->cfg_offset ++ + mod_entry->cfg_count); ++ if (stripped_fw.size <= safe_file + size) { ++ dev_err(ctx->dev, "Small fw file size, no space for module cfgs\n"); ++ return -EINVAL; ++ } ++ module->num_configs = mod_entry->cfg_count; ++ size = sizeof(*mod_configs) * mod_entry->cfg_count; ++ module->configs = devm_kmemdup(ctx->dev, ++ &mod_configs[mod_entry->cfg_offset], ++ size, GFP_KERNEL); ++ if (!module->configs) { ++ list_del_init(&skl->module_list); ++ return -ENOMEM; ++ } ++ } ++ + list_add_tail(&module->list, &skl->module_list); + + dev_dbg(ctx->dev, +-- +2.17.1 + diff --git a/patches/0003-EDAC-igen6-Add-debugfs-interface-for-Intel-client-SoC.edac b/patches/0003-EDAC-igen6-Add-debugfs-interface-for-Intel-client-SoC.edac new file mode 100644 index 0000000000..8f9f556908 --- /dev/null +++ b/patches/0003-EDAC-igen6-Add-debugfs-interface-for-Intel-client-SoC.edac @@ -0,0 +1,94 @@ +From 6936a2ba734f69f35159e415d401fdd1c124b88e Mon Sep 17 00:00:00 2001 +From: Qiuxu Zhuo +Date: Mon, 5 Nov 2018 16:49:14 +0800 +Subject: [PATCH 3/5] EDAC, igen6: Add debugfs interface for Intel client SoC + EDAC driver + +This patch adds debugfs support to fake memory correctable errors +to test the error reporting path and the error address decoding +logic in the igen6_edac driver. + +Please note that the fake errors are also reported to EDAC core and +then the CE counter in EDAC sysfs is also increased. + +Signed-off-by: Qiuxu Zhuo +--- + drivers/edac/igen6_edac.c | 46 +++++++++++++++++++++++++++++++++++++++ + 1 file changed, 46 insertions(+) + +diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c +index fb020b8fa989..405f3dad73aa 100644 +--- a/drivers/edac/igen6_edac.c ++++ b/drivers/edac/igen6_edac.c +@@ -654,8 +654,52 @@ static void igen6_reg_dump(void) + edac_dbg(2, "Tolud : 0x%x", igen6_tolud); + edac_dbg(2, "Tom : 0x%llx", igen6_tom); + } ++ ++static struct dentry *igen6_test; ++ ++static int debugfs_u64_set(void *data, u64 val) ++{ ++ u64 ecclog; ++ ++ if ((val >= igen6_tolud && val < _4GB) || val >= igen6_touud) { ++ edac_dbg(0, "Address 0x%llx out of range\n", val); ++ return 0; ++ } ++ ++ pr_warn_once("Fake error to 0x%llx injected via debugfs\n", val); ++ ++ val >>= IGEN6_ECCERRLOG_ADDR_SHIFT; ++ ecclog = (val << IGEN6_ECCERRLOG_ADDR_SHIFT) | IGEN6_ECCERRLOG_CE; ++ ++ if (!ecclog_gen_pool_add(ecclog)) ++ irq_work_queue(&ecclog_irq_work); ++ ++ return 0; ++} ++DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n"); ++ ++static void igen6_debug_setup(void) ++{ ++ igen6_test = edac_debugfs_create_dir("igen6_test"); ++ if (!igen6_test) ++ return; ++ ++ if (!edac_debugfs_create_file("addr", 0200, igen6_test, ++ NULL, &fops_u64_wo)) { ++ debugfs_remove(igen6_test); ++ igen6_test = NULL; ++ } ++} ++ ++static void igen6_debug_teardown(void) ++{ ++ debugfs_remove_recursive(igen6_test); ++} ++ + #else + static void igen6_reg_dump(void) {} ++static void igen6_debug_setup(void) {} ++static void igen6_debug_teardown(void) {} + #endif + + static int igen6_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +@@ -736,6 +780,7 @@ static int igen6_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + goto fail2; + } + ++ igen6_debug_setup(); + return 0; + + fail2: +@@ -756,6 +801,7 @@ static void igen6_remove(struct pci_dev *pdev) + + edac_dbg(2, "\n"); + ++ igen6_debug_teardown(); + unregister_nmi_handler(NMI_LOCAL, IGEN6_NMI_NAME); + irq_work_sync(&ecclog_irq_work); + flush_work(&ecclog_work); +-- +2.17.1 + diff --git a/patches/0003-SEP-some-cleanup.sep-socwatch b/patches/0003-SEP-some-cleanup.sep-socwatch new file mode 100644 index 0000000000..c572bbf9b9 --- /dev/null +++ b/patches/0003-SEP-some-cleanup.sep-socwatch @@ -0,0 +1,196 @@ +From a83e7c90f997d8a4877393e44f74a916f625a19b Mon Sep 17 00:00:00 2001 +From: Manisha Chinthapally +Date: Tue, 13 Nov 2018 12:27:51 -0800 +Subject: [PATCH 03/27] SEP: some cleanup + +Signed-off-by: Manisha Chinthapally +--- + drivers/platform/x86/sepdk/inc/utility.h | 2 +- + drivers/platform/x86/sepdk/sep/linuxos.c | 3 ++- + drivers/platform/x86/sepdk/sep/pebs.c | 3 --- + drivers/platform/x86/sepdk/sep/sys_info.c | 3 --- + drivers/platform/x86/sepdk/sep/unc_common.c | 3 --- + drivers/platform/x86/sepdk/sep/unc_gt.c | 2 -- + drivers/platform/x86/sepdk/sep/unc_mmio.c | 12 +----------- + 7 files changed, 4 insertions(+), 24 deletions(-) + +diff --git a/drivers/platform/x86/sepdk/inc/utility.h b/drivers/platform/x86/sepdk/inc/utility.h +index c5eca9612b00..d470a656a4a1 100644 +--- a/drivers/platform/x86/sepdk/inc/utility.h ++++ b/drivers/platform/x86/sepdk/inc/utility.h +@@ -53,7 +53,7 @@ extern DISPATCH_NODE unc_power_dispatch; + #include + + #define SYS_MMIO_Read32(base, offset) \ +- ((base) ? readl((void __iomem *)((UIOP)(base) + (offset))) : 0) ++ ((base) ? readl((void __iomem *)(base) + (offset)) : 0) + extern U64 SYS_MMIO_Read64(U64 baseAddress, U64 offset); + + extern U64 SYS_Read_MSR(U32 msr); +diff --git a/drivers/platform/x86/sepdk/sep/linuxos.c b/drivers/platform/x86/sepdk/sep/linuxos.c +index 08da10e614d8..fa29d7f0a3a8 100755 +--- a/drivers/platform/x86/sepdk/sep/linuxos.c ++++ b/drivers/platform/x86/sepdk/sep/linuxos.c +@@ -263,6 +263,7 @@ static DRV_BOOL linuxos_Equal_VM_Exe_File(struct vm_area_struct *vma) + */ + static S32 linuxos_Map_Kernel_Modules(void) + { ++#if defined(CONFIG_MODULES) + struct module *current_module; + struct list_head *modules; + U16 exec_mode; +@@ -274,7 +275,7 @@ static S32 linuxos_Map_Kernel_Modules(void) + + SEP_DRV_LOG_TRACE_IN(""); + +-#if defined(CONFIG_MODULES) ++ + mutex_lock(&module_mutex); + + #if defined(DRV_EM64T) +diff --git a/drivers/platform/x86/sepdk/sep/pebs.c b/drivers/platform/x86/sepdk/sep/pebs.c +index 7537c1136ec4..0a428dc5a7bd 100755 +--- a/drivers/platform/x86/sepdk/sep/pebs.c ++++ b/drivers/platform/x86/sepdk/sep/pebs.c +@@ -961,7 +961,6 @@ VOID PEBS_Flush_Buffer(VOID *param) + U64 pebs_overflow_status = 0; + U64 lbr_tos_from_ip = 0ULL; + DRV_BOOL counter_overflowed = FALSE; +- // ECB pecb; + CPU_STATE pcpu; + EVENT_DESC evt_desc; + BUFFER_DESC bd; +@@ -1003,8 +1002,6 @@ VOID PEBS_Flush_Buffer(VOID *param) + SEP_DRV_LOG_TRACE("Pebs_overflow_status = 0x%llx, i=%d.", + pebs_overflow_status, i); + +- // pecb = LWPMU_DEVICE_PMU_register_data( +- // &devices[dev_idx])[cur_grp]; + FOR_EACH_DATA_REG(pecb, j) + { + if ((!DEV_CONFIG_enable_adaptive_pebs(pcfg) && +diff --git a/drivers/platform/x86/sepdk/sep/sys_info.c b/drivers/platform/x86/sepdk/sep/sys_info.c +index b72ce2894c82..cf5a90c6c543 100755 +--- a/drivers/platform/x86/sepdk/sep/sys_info.c ++++ b/drivers/platform/x86/sepdk/sep/sys_info.c +@@ -317,7 +317,6 @@ static void sys_info_Fill_CPUID(U32 num_cpuids, U32 basic_funcs, + VTSA_CPUID *cpuid_el; + U32 shift_nbits_core = 0; + U32 shift_nbits_pkg = 0; +- // U32 family = 0; + U32 model = 0; + DRV_BOOL ht_supported = FALSE; + U32 apic_id = 0; +@@ -490,11 +489,9 @@ static void sys_info_Fill_CPUID(U32 num_cpuids, U32 basic_funcs, + MSR_FB_PCARD_ID_FUSE); + } + } else if (cpuid_function == 1) { +- // family = (U32)(rax >> 8 & 0x0f); + /* extended model bits */ + model = (U32)(rax >> 12 & 0xf0) | + (U32)(rax >> 4 & 0x0f); +- // model |= (U32)(rax >> 4 & 0x0f); + ht_supported = (rdx >> 28) & 1 ? TRUE : FALSE; + num_logical_per_physical = + (U32)((rbx & 0xff0000) >> 16); +diff --git a/drivers/platform/x86/sepdk/sep/unc_common.c b/drivers/platform/x86/sepdk/sep/unc_common.c +index 5442734a91f7..9ad1632aaafb 100755 +--- a/drivers/platform/x86/sepdk/sep/unc_common.c ++++ b/drivers/platform/x86/sepdk/sep/unc_common.c +@@ -266,7 +266,6 @@ VOID UNC_COMMON_PCI_Scan_For_Uncore(PVOID param, U32 dev_node, + VOID UNC_COMMON_Get_Platform_Topology(U32 dev_node) + { + U32 num_registers = 0; +- // U32 device_index = 0; + U32 bus_num = 0; + U32 i = 0; + U32 func_num = 0; +@@ -298,8 +297,6 @@ VOID UNC_COMMON_Get_Platform_Topology(U32 dev_node) + &platform_topology_prog_node, dev_node); + topology_regs = PLATFORM_TOPOLOGY_PROG_topology_topology_regs( + &platform_topology_prog_node, dev_node); +- // device_index = PLATFORM_TOPOLOGY_PROG_topology_device_device_index( +- // &platform_topology_prog_node, dev_node); + + for (i = 0; i < num_pkgs; i++) { + for (len = 0; len < num_registers; len++) { +diff --git a/drivers/platform/x86/sepdk/sep/unc_gt.c b/drivers/platform/x86/sepdk/sep/unc_gt.c +index 34e7650da94b..3d07888dac33 100755 +--- a/drivers/platform/x86/sepdk/sep/unc_gt.c ++++ b/drivers/platform/x86/sepdk/sep/unc_gt.c +@@ -399,7 +399,6 @@ static VOID unc_gt_Read_PMU_Data(PVOID param) + U32 dev_idx; + U32 this_cpu; + CPU_STATE pcpu; +- // U32 cur_grp; + U32 offset_delta; + U32 tmp_value_lo = 0; + U32 tmp_value_hi = 0; +@@ -418,7 +417,6 @@ static VOID unc_gt_Read_PMU_Data(PVOID param) + } + + package_num = core_to_package_map[this_cpu]; +- // cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[package_num]; + + FOR_EACH_PCI_DATA_REG_RAW(pecb, i, dev_idx) + { +diff --git a/drivers/platform/x86/sepdk/sep/unc_mmio.c b/drivers/platform/x86/sepdk/sep/unc_mmio.c +index b1d997d0f405..148925dea263 100755 +--- a/drivers/platform/x86/sepdk/sep/unc_mmio.c ++++ b/drivers/platform/x86/sepdk/sep/unc_mmio.c +@@ -251,9 +251,7 @@ static void unc_mmio_Enable_PMU(PVOID param) + U32 idx_w = 0; + U32 event_code = 0; + U32 counter = 0; +- // U32 num_events = 0; + U32 entry = 0; +- // U32 num_pkgs = num_packages; + U32 dev_node = 0; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); +@@ -282,16 +280,11 @@ static void unc_mmio_Enable_PMU(PVOID param) + return; + } + +- // if (DEV_UNC_CONFIG_device_type(pcfg_unc) == DRV_SINGLE_INSTANCE) { +- // num_pkgs = 1; +- // } +- + virtual_addr = virtual_address_table(dev_node, entry); + + // NOTE THAT the enable function currently captures previous values + // for EMON collection to avoid unnecessary memory copy. + if (DRV_CONFIG_emon_mode(drv_cfg)) { +- // num_events = ECB_num_events(pecb); + idx_w = ECB_operations_register_start(pecb, + PMU_OPERATION_WRITE); + FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, +@@ -577,7 +570,7 @@ static VOID unc_mmio_Trigger_Read(PVOID param, U32 id) + } + value = SYS_MMIO_Read64(virtual_addr, offset_delta); + } else { +- value = SYS_MMIO_Read32((volatile unsigned int *)virtual_addr, offset_delta); ++ value = SYS_MMIO_Read32(virtual_addr, offset_delta); + } + value &= (U64)ECB_entries_max_bits(pecb, idx); + +@@ -640,7 +633,6 @@ static VOID unc_mmio_Read_PMU_Data(PVOID param) + U32 idx_w = 0; + U32 event_code = 0; + U32 counter = 0; +- // U32 num_events = 0; + U32 package_num; + U32 entry = 0; + U32 dev_node = 0; +@@ -673,8 +665,6 @@ static VOID unc_mmio_Read_PMU_Data(PVOID param) + + virtual_addr = virtual_address_table(dev_node, entry); + +- // num_events = ECB_num_events(pecb); +- + idx_w = ECB_operations_register_start(pecb, PMU_OPERATION_WRITE); + + FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, PMU_OPERATION_READ) +-- +2.17.1 + diff --git a/patches/0003-VHM-add-guest-memory-management-support.acrn b/patches/0003-VHM-add-guest-memory-management-support.acrn new file mode 100644 index 0000000000..4a07df142c --- /dev/null +++ b/patches/0003-VHM-add-guest-memory-management-support.acrn @@ -0,0 +1,715 @@ +From b4213766de30cffe459102fbb1a746818a39fd4d Mon Sep 17 00:00:00 2001 +From: Jason Chen CJ +Date: Fri, 31 Aug 2018 10:58:55 +0800 +Subject: [PATCH 003/150] VHM: add guest memory management support + +VHM provides guest memory management services for application. +It allocates/frees contiguous physical memory for guest based on dma-cma, +and provides corresponding EPT mapping for the allocated memory segment. + +Change-Id: Ibbe26b0ccf8436700f44bca899b1ee38c2e4ef72 +Tracked-On: 218445 +Signed-off-by: liang ding +Signed-off-by: Jason Chen CJ +Signed-off-by: Jason Zeng +Signed-off-by: Xiao Zheng +Signed-off-by: Mingqiang Chi +Reviewed-on: +Reviewed-by: Dong, Eddie +Tested-by: Dong, Eddie +--- + drivers/char/vhm/vhm_dev.c | 26 +++ + drivers/vhm/Makefile | 2 +- + drivers/vhm/vhm_hypercall.c | 5 + + drivers/vhm/vhm_mm.c | 364 +++++++++++++++++++++++++++++ + drivers/vhm/vhm_vm_mngt.c | 2 + + include/linux/vhm/acrn_hv_defs.h | 26 +++ + include/linux/vhm/acrn_vhm_mm.h | 88 +++++++ + include/linux/vhm/vhm_hypercall.h | 1 + + include/linux/vhm/vhm_ioctl_defs.h | 40 ++++ + include/linux/vhm/vhm_vm_mngt.h | 2 + + 10 files changed, 555 insertions(+), 1 deletion(-) + create mode 100644 drivers/vhm/vhm_mm.c + create mode 100644 include/linux/vhm/acrn_vhm_mm.h + +diff --git a/drivers/char/vhm/vhm_dev.c b/drivers/char/vhm/vhm_dev.c +index 527e90b187cf..3ea8de27cb3e 100644 +--- a/drivers/char/vhm/vhm_dev.c ++++ b/drivers/char/vhm/vhm_dev.c +@@ -78,6 +78,7 @@ + + #include + #include ++#include + #include + #include + +@@ -100,6 +101,9 @@ static int vhm_dev_open(struct inode *inodep, struct file *filep) + vm->vmid = ACRN_INVALID_VMID; + vm->dev = vhm_device; + ++ INIT_LIST_HEAD(&vm->memseg_list); ++ mutex_init(&vm->seg_lock); ++ + vm_mutex_lock(&vhm_vm_list_lock); + vm->refcnt = 1; + vm_list_add(&vm->list); +@@ -163,6 +167,27 @@ static long vhm_dev_ioctl(struct file *filep, + ret = vhm_query_vm_state(vm); + break; + ++ case IC_ALLOC_MEMSEG: { ++ struct vm_memseg memseg; ++ ++ if (copy_from_user(&memseg, (void *)ioctl_param, ++ sizeof(struct vm_memseg))) ++ return -EFAULT; ++ ++ return alloc_guest_memseg(vm, &memseg); ++ } ++ ++ case IC_SET_MEMSEG: { ++ struct vm_memmap memmap; ++ ++ if (copy_from_user(&memmap, (void *)ioctl_param, ++ sizeof(struct vm_memmap))) ++ return -EFAULT; ++ ++ ret = map_guest_memseg(vm, &memmap); ++ break; ++ } ++ + default: + pr_warn("Unknown IOCTL 0x%x\n", ioctl_num); + ret = 0; +@@ -189,6 +214,7 @@ static const struct file_operations fops = { + .open = vhm_dev_open, + .read = vhm_dev_read, + .write = vhm_dev_write, ++ .mmap = vhm_dev_mmap, + .release = vhm_dev_release, + .unlocked_ioctl = vhm_dev_ioctl, + }; +diff --git a/drivers/vhm/Makefile b/drivers/vhm/Makefile +index 220697aaccb7..7e5ec421fbc7 100644 +--- a/drivers/vhm/Makefile ++++ b/drivers/vhm/Makefile +@@ -1 +1 @@ +-obj-y += vhm_vm_mngt.o vhm_hypercall.o ++obj-y += vhm_mm.o vhm_vm_mngt.o vhm_hypercall.o +diff --git a/drivers/vhm/vhm_hypercall.c b/drivers/vhm/vhm_hypercall.c +index ddc085d0fa11..d80087bcb5fb 100644 +--- a/drivers/vhm/vhm_hypercall.c ++++ b/drivers/vhm/vhm_hypercall.c +@@ -53,6 +53,11 @@ + #include + #include + ++inline long hcall_set_memmap(unsigned long vmid, unsigned long memmap) ++{ ++ return acrn_hypercall2(HC_VM_SET_MEMMAP, vmid, memmap); ++} ++ + inline long vhm_create_vm(struct vhm_vm *vm, unsigned long ioctl_param) + { + long ret = 0; +diff --git a/drivers/vhm/vhm_mm.c b/drivers/vhm/vhm_mm.c +new file mode 100644 +index 000000000000..9dd0b9414d3a +--- /dev/null ++++ b/drivers/vhm/vhm_mm.c +@@ -0,0 +1,364 @@ ++/* ++ * virtio and hyperviosr service module (VHM): memory map ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright (c) 2017 Intel Corporation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * BSD LICENSE ++ * ++ * Copyright (C) 2017 Intel Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * ++ * ++ * Jason Zeng ++ * Jason Chen CJ ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++ ++struct guest_memseg { ++ struct list_head list; ++ int segid; ++ u64 base; ++ size_t len; ++ char name[SPECNAMELEN + 1]; ++ u64 gpa; ++ int prot; /* RWX */ ++ long vma_count; ++}; ++ ++static u64 _alloc_memblk(struct device *dev, size_t len) ++{ ++ unsigned int count; ++ struct page *page; ++ ++ if (!PAGE_ALIGNED(len)) { ++ pr_warn("alloc size of memblk must be page aligned\n"); ++ return 0ULL; ++ } ++ ++ count = PAGE_ALIGN(len) >> PAGE_SHIFT; ++ page = dma_alloc_from_contiguous(dev, count, 1, GFP_KERNEL); ++ if (page) ++ return page_to_phys(page); ++ else ++ return 0ULL; ++} ++ ++static bool _free_memblk(struct device *dev, u64 base, size_t len) ++{ ++ unsigned int count = PAGE_ALIGN(len) >> PAGE_SHIFT; ++ struct page *page = pfn_to_page(base >> PAGE_SHIFT); ++ ++ return dma_release_from_contiguous(dev, page, count); ++} ++ ++int alloc_guest_memseg(struct vhm_vm *vm, struct vm_memseg *memseg) ++{ ++ struct guest_memseg *seg; ++ u64 base; ++ ++ seg = kzalloc(sizeof(struct guest_memseg), GFP_KERNEL); ++ if (seg == NULL) ++ return -ENOMEM; ++ ++ base = _alloc_memblk(vm->dev, memseg->len); ++ if (base == 0ULL) { ++ kfree(seg); ++ return -ENOMEM; ++ } ++ ++ seg->segid = memseg->segid; ++ seg->base = base; ++ seg->len = memseg->len; ++ strncpy(seg->name, memseg->name, SPECNAMELEN + 1); ++ seg->gpa = memseg->gpa; ++ ++ pr_info("VHM: alloc memseg[%s] with len=0x%lx, base=0x%llx," ++ " and its guest gpa = 0x%llx\n", ++ seg->name, seg->len, seg->base, seg->gpa); ++ ++ seg->vma_count = 0; ++ mutex_lock(&vm->seg_lock); ++ list_add(&seg->list, &vm->memseg_list); ++ mutex_unlock(&vm->seg_lock); ++ ++ return 0; ++} ++ ++static int _mem_set_memmap(unsigned long vmid, unsigned long guest_gpa, ++ unsigned long host_gpa, unsigned long len, int prot, int type) ++{ ++ struct vm_set_memmap set_memmap; ++ ++ set_memmap.type = type; ++ set_memmap.foreign_gpa = guest_gpa; ++ set_memmap.hvm_gpa = host_gpa; ++ set_memmap.length = len; ++ set_memmap.prot = prot; ++ ++ /* hypercall to notify hv the guest EPT setting*/ ++ if (hcall_set_memmap(vmid, ++ virt_to_phys(&set_memmap)) < 0) { ++ pr_err("vhm: failed to set memmap %ld!\n", vmid); ++ return -EFAULT; ++ } ++ ++ pr_debug("VHM: set ept for mem map[type=0x%x, host_gpa=0x%lx," ++ "guest_gpa=0x%lx,len=0x%lx, prot=0x%x]\n", ++ type, host_gpa, guest_gpa, len, prot); ++ ++ return 0; ++} ++ ++int set_mmio_map(unsigned long vmid, unsigned long guest_gpa, ++ unsigned long host_gpa, unsigned long len, int prot) ++{ ++ return _mem_set_memmap(vmid, guest_gpa, host_gpa, len, ++ prot, MAP_MMIO); ++} ++ ++int unset_mmio_map(unsigned long vmid, unsigned long guest_gpa, ++ unsigned long host_gpa, unsigned long len, int prot) ++{ ++ return _mem_set_memmap(vmid, guest_gpa, host_gpa, len, ++ prot, MAP_UNMAP); ++} ++ ++int update_mem_map(unsigned long vmid, unsigned long guest_gpa, ++ unsigned long host_gpa, unsigned long len, int prot) ++{ ++ return _mem_set_memmap(vmid, guest_gpa, host_gpa, len, ++ prot, MAP_UPDATE); ++} ++ ++int map_guest_memseg(struct vhm_vm *vm, struct vm_memmap *memmap) ++{ ++ struct guest_memseg *seg = NULL; ++ struct vm_set_memmap set_memmap; ++ ++ mutex_lock(&vm->seg_lock); ++ ++ if (memmap->segid != VM_MMIO) { ++ list_for_each_entry(seg, &vm->memseg_list, list) { ++ if (seg->segid == memmap->segid ++ && seg->gpa == memmap->mem.gpa ++ && seg->len == memmap->mem.len) ++ break; ++ } ++ if (&seg->list == &vm->memseg_list) { ++ mutex_unlock(&vm->seg_lock); ++ return -EINVAL; ++ } ++ seg->prot = memmap->mem.prot; ++ set_memmap.type = MAP_MEM; ++ set_memmap.foreign_gpa = seg->gpa; ++ set_memmap.hvm_gpa = seg->base; ++ set_memmap.length = seg->len; ++ set_memmap.prot = seg->prot; ++ set_memmap.prot |= MMU_MEM_ATTR_WB_CACHE; ++ } else { ++ set_memmap.type = MAP_MMIO; ++ set_memmap.foreign_gpa = memmap->mmio.gpa; ++ set_memmap.hvm_gpa = memmap->mmio.hpa; ++ set_memmap.length = memmap->mmio.len; ++ set_memmap.prot = memmap->mmio.prot; ++ set_memmap.prot |= MMU_MEM_ATTR_UNCACHED; ++ } ++ ++ /* hypercall to notify hv the guest EPT setting*/ ++ if (hcall_set_memmap(vm->vmid, virt_to_phys(&set_memmap)) < 0) { ++ pr_err("vhm: failed to set memmap %ld!\n", vm->vmid); ++ mutex_unlock(&vm->seg_lock); ++ return -EFAULT; ++ } ++ ++ mutex_unlock(&vm->seg_lock); ++ ++ if (memmap->segid != VM_MMIO) ++ pr_debug("VHM: set ept for memseg [hvm_gpa=0x%llx," ++ "guest_gpa=0x%llx,len=0x%lx, prot=0x%x]\n", ++ seg->base, seg->gpa, seg->len, seg->prot); ++ else ++ pr_debug("VHM: set ept for mmio [hpa=0x%llx," ++ "gpa=0x%llx,len=0x%lx, prot=0x%x]\n", ++ memmap->mmio.hpa, memmap->mmio.gpa, ++ memmap->mmio.len, memmap->mmio.prot); ++ ++ return 0; ++} ++ ++void free_guest_mem(struct vhm_vm *vm) ++{ ++ struct guest_memseg *seg; ++ ++ mutex_lock(&vm->seg_lock); ++ while (!list_empty(&vm->memseg_list)) { ++ seg = list_first_entry(&vm->memseg_list, ++ struct guest_memseg, list); ++ if (!_free_memblk(vm->dev, seg->base, seg->len)) ++ pr_warn("failed to free memblk\n"); ++ list_del(&seg->list); ++ kfree(seg); ++ } ++ mutex_unlock(&vm->seg_lock); ++} ++ ++int check_guest_mem(struct vhm_vm *vm) ++{ ++ struct guest_memseg *seg; ++ ++ mutex_lock(&vm->seg_lock); ++ list_for_each_entry(seg, &vm->memseg_list, list) { ++ if (seg->segid != VM_SYSMEM) ++ continue; ++ ++ if (seg->vma_count == 0) ++ continue; ++ ++ mutex_unlock(&vm->seg_lock); ++ return -EAGAIN; ++ } ++ mutex_unlock(&vm->seg_lock); ++ return 0; ++} ++ ++static void guest_vm_open(struct vm_area_struct *vma) ++{ ++ struct vhm_vm *vm = vma->vm_file->private_data; ++ struct guest_memseg *seg = vma->vm_private_data; ++ ++ mutex_lock(&vm->seg_lock); ++ seg->vma_count++; ++ mutex_unlock(&vm->seg_lock); ++} ++ ++static void guest_vm_close(struct vm_area_struct *vma) ++{ ++ struct vhm_vm *vm = vma->vm_file->private_data; ++ struct guest_memseg *seg = vma->vm_private_data; ++ ++ mutex_lock(&vm->seg_lock); ++ seg->vma_count--; ++ BUG_ON(seg->vma_count < 0); ++ mutex_unlock(&vm->seg_lock); ++} ++ ++static const struct vm_operations_struct guest_vm_ops = { ++ .open = guest_vm_open, ++ .close = guest_vm_close, ++}; ++ ++static int do_mmap_guest(struct file *file, ++ struct vm_area_struct *vma, struct guest_memseg *seg) ++{ ++ struct page *page; ++ size_t size = seg->len; ++ unsigned long pfn; ++ unsigned long start_addr; ++ ++ vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTCOPY; ++ pfn = seg->base >> PAGE_SHIFT; ++ start_addr = vma->vm_start; ++ while (size > 0) { ++ page = pfn_to_page(pfn); ++ if (vm_insert_page(vma, start_addr, page)) ++ return -EINVAL; ++ size -= PAGE_SIZE; ++ start_addr += PAGE_SIZE; ++ pfn++; ++ } ++ seg->vma_count++; ++ vma->vm_ops = &guest_vm_ops; ++ vma->vm_private_data = (void *)seg; ++ ++ pr_info("VHM: mmap for memseg [seg base=0x%llx, gpa=0x%llx] " ++ "to start addr 0x%lx\n", ++ seg->base, seg->gpa, start_addr); ++ ++ return 0; ++} ++ ++int vhm_dev_mmap(struct file *file, struct vm_area_struct *vma) ++{ ++ struct vhm_vm *vm = file->private_data; ++ struct guest_memseg *seg; ++ u64 offset = vma->vm_pgoff << PAGE_SHIFT; ++ size_t len = vma->vm_end - vma->vm_start; ++ int ret; ++ ++ mutex_lock(&vm->seg_lock); ++ list_for_each_entry(seg, &vm->memseg_list, list) { ++ if (seg->segid != VM_SYSMEM) ++ continue; ++ ++ if (seg->gpa != offset || seg->len != len) ++ continue; ++ ++ ret = do_mmap_guest(file, vma, seg); ++ mutex_unlock(&vm->seg_lock); ++ return ret; ++ } ++ mutex_unlock(&vm->seg_lock); ++ return -EINVAL; ++} +diff --git a/drivers/vhm/vhm_vm_mngt.c b/drivers/vhm/vhm_vm_mngt.c +index 61db04b57362..3c4e6d2b2f23 100644 +--- a/drivers/vhm/vhm_vm_mngt.c ++++ b/drivers/vhm/vhm_vm_mngt.c +@@ -58,6 +58,7 @@ + #include + #include + #include ++#include + #include + + LIST_HEAD(vhm_vm_list); +@@ -85,6 +86,7 @@ void put_vm(struct vhm_vm *vm) + vm->refcnt--; + if (vm->refcnt == 0) { + list_del(&vm->list); ++ free_guest_mem(vm); + kfree(vm); + pr_info("vhm: freed vm\n"); + } +diff --git a/include/linux/vhm/acrn_hv_defs.h b/include/linux/vhm/acrn_hv_defs.h +index f338a8fbad3d..ab6554d017cb 100644 +--- a/include/linux/vhm/acrn_hv_defs.h ++++ b/include/linux/vhm/acrn_hv_defs.h +@@ -74,8 +74,34 @@ + #define HC_PAUSE_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x04) + #define HC_QUERY_VMSTATE _HC_ID(HC_ID, HC_ID_VM_BASE + 0x05) + ++/* Guest memory management */ ++#define HC_ID_MEM_BASE 0x300UL ++#define HC_VM_SET_MEMMAP _HC_ID(HC_ID, HC_ID_MEM_BASE + 0x00) ++ + #define ACRN_DOM0_VMID (0UL) + #define ACRN_INVALID_VMID (-1UL) + #define ACRN_INVALID_HPA (-1UL) + ++enum vm_memmap_type { ++ MAP_MEM = 0, ++ MAP_MMIO, ++ MAP_UNMAP, ++ MAP_UPDATE, ++}; ++ ++struct vm_set_memmap { ++ enum vm_memmap_type type; ++ /* IN: beginning guest GPA to map */ ++ unsigned long foreign_gpa; ++ ++ /* IN: VM0's GPA which foreign gpa will be mapped to */ ++ unsigned long hvm_gpa; ++ ++ /* IN: length of the range */ ++ unsigned long length; ++ ++ /* IN: not used right now */ ++ int prot; ++} __attribute__((aligned(8))); ++ + #endif /* ACRN_HV_DEFS_H */ +diff --git a/include/linux/vhm/acrn_vhm_mm.h b/include/linux/vhm/acrn_vhm_mm.h +new file mode 100644 +index 000000000000..325f2b2026e8 +--- /dev/null ++++ b/include/linux/vhm/acrn_vhm_mm.h +@@ -0,0 +1,88 @@ ++/* ++ * virtio and hyperviosr service module (VHM): memory map ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright (c) 2017 Intel Corporation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * BSD LICENSE ++ * ++ * Copyright (C) 2017 Intel Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * ++ * Jason Chen CJ ++ * ++ */ ++ ++#ifndef __ACRN_VHM_MM_H__ ++#define __ACRN_VHM_MM_H__ ++ ++#include ++#include ++ ++#define MMU_MEM_ATTR_READ 0x00000001 ++#define MMU_MEM_ATTR_WRITE 0x00000002 ++#define MMU_MEM_ATTR_EXECUTE 0x00000004 ++#define MMU_MEM_ATTR_WB_CACHE 0x00000040 ++#define MMU_MEM_ATTR_WT_CACHE 0x00000080 ++#define MMU_MEM_ATTR_UNCACHED 0x00000100 ++#define MMU_MEM_ATTR_WC 0x00000200 ++ ++#define MMU_MEM_ATTR_ALL 0x00000007 ++#define MMU_MEM_ATTR_WP 0x00000005 ++#define MMU_MEM_ATTR_ALL_WB 0x00000047 ++#define MMU_MEM_ATTR_ALL_WC 0x00000207 ++ ++int set_mmio_map(unsigned long vmid, unsigned long guest_gpa, ++ unsigned long host_gpa, unsigned long len, int prot); ++int unset_mmio_map(unsigned long vmid, unsigned long guest_gpa, ++ unsigned long host_gpa, unsigned long len, int prot); ++int update_mem_map(unsigned long vmid, unsigned long guest_gpa, ++ unsigned long host_gpa, unsigned long len, int prot); ++ ++int vhm_dev_mmap(struct file *file, struct vm_area_struct *vma); ++ ++int check_guest_mem(struct vhm_vm *vm); ++void free_guest_mem(struct vhm_vm *vm); ++ ++int alloc_guest_memseg(struct vhm_vm *vm, struct vm_memseg *memseg); ++int map_guest_memseg(struct vhm_vm *vm, struct vm_memmap *memmap); ++ ++#endif +diff --git a/include/linux/vhm/vhm_hypercall.h b/include/linux/vhm/vhm_hypercall.h +index c07163dbc3bd..e098a1f959bf 100644 +--- a/include/linux/vhm/vhm_hypercall.h ++++ b/include/linux/vhm/vhm_hypercall.h +@@ -139,6 +139,7 @@ static inline long acrn_hypercall4(unsigned long hyp_id, unsigned long param1, + return result; + } + ++inline long hcall_set_memmap(unsigned long vmid, unsigned long memmap); + inline long vhm_create_vm(struct vhm_vm *vm, unsigned long ioctl_param); + inline long vhm_resume_vm(struct vhm_vm *vm); + inline long vhm_pause_vm(struct vhm_vm *vm); +diff --git a/include/linux/vhm/vhm_ioctl_defs.h b/include/linux/vhm/vhm_ioctl_defs.h +index d8c81b6e9306..872092490259 100644 +--- a/include/linux/vhm/vhm_ioctl_defs.h ++++ b/include/linux/vhm/vhm_ioctl_defs.h +@@ -64,4 +64,44 @@ + #define IC_PAUSE_VM _IC_ID(IC_ID, IC_ID_VM_BASE + 0x04) + #define IC_QUERY_VMSTATE _IC_ID(IC_ID, IC_ID_VM_BASE + 0x05) + ++/* Guest memory management */ ++#define IC_ID_MEM_BASE 0x300UL ++#define IC_ALLOC_MEMSEG _IC_ID(IC_ID, IC_ID_MEM_BASE + 0x00) ++#define IC_SET_MEMSEG _IC_ID(IC_ID, IC_ID_MEM_BASE + 0x01) ++ ++#define SPECNAMELEN 63 ++ ++enum { ++ VM_SYSMEM, ++ VM_BOOTROM, ++ VM_FRAMEBUFFER, ++ VM_MMIO, ++}; ++ ++struct vm_memseg { ++ int segid; ++ size_t len; ++ char name[SPECNAMELEN + 1]; ++ unsigned long gpa; ++}; ++ ++struct vm_memmap { ++ int segid; /* memory segment */ ++ union { ++ struct { ++ uint64_t gpa; ++ uint64_t segoff; /* offset into memory segment */ ++ size_t len; /* mmap length */ ++ int prot; /* RWX */ ++ int flags; ++ } mem; ++ struct { ++ uint64_t gpa; ++ uint64_t hpa; ++ size_t len; ++ int prot; ++ } mmio; ++ }; ++}; ++ + #endif /* VHM_IOCTL_DEFS_H */ +diff --git a/include/linux/vhm/vhm_vm_mngt.h b/include/linux/vhm/vhm_vm_mngt.h +index dcb246af561a..4f1a0db2c54d 100644 +--- a/include/linux/vhm/vhm_vm_mngt.h ++++ b/include/linux/vhm/vhm_vm_mngt.h +@@ -66,6 +66,8 @@ struct vhm_vm { + struct list_head list; + unsigned long vmid; + long refcnt; ++ struct mutex seg_lock; ++ struct list_head memseg_list; + }; + + struct vhm_vm *find_get_vm(unsigned long vmid); +-- +2.17.1 + diff --git a/patches/0003-counter-add-support-for-Quadrature-x4-with-swa.felipeb-5.4 b/patches/0003-counter-add-support-for-Quadrature-x4-with-swa.felipeb-5.4 new file mode 100644 index 0000000000..92eb63f324 --- /dev/null +++ b/patches/0003-counter-add-support-for-Quadrature-x4-with-swa.felipeb-5.4 @@ -0,0 +1,63 @@ +From cce4858d495162d350341f3dc934bb8533b1448c Mon Sep 17 00:00:00 2001 +From: Felipe Balbi +Date: Thu, 19 Sep 2019 10:41:21 +0300 +Subject: [PATCH 03/14] counter: add support for Quadrature x4 with swapped + inputs + +Some Quadrature Encoders can swap phase inputs A and B +internally. This new function will allow drivers to configure input +swap mode. + +Signed-off-by: Felipe Balbi +--- + Documentation/ABI/testing/sysfs-bus-counter | 4 ++++ + drivers/counter/counter.c | 3 ++- + include/linux/counter.h | 3 ++- + 3 files changed, 8 insertions(+), 2 deletions(-) + +diff --git a/Documentation/ABI/testing/sysfs-bus-counter b/Documentation/ABI/testing/sysfs-bus-counter +index 566bd99fe0a5..8f1e3de88c77 100644 +--- a/Documentation/ABI/testing/sysfs-bus-counter ++++ b/Documentation/ABI/testing/sysfs-bus-counter +@@ -146,6 +146,10 @@ Description: + updates the respective count. Quadrature encoding + determines the direction. + ++ quadrature x4 swapped: ++ Same as quadrature x4, however Phase A and Phase B ++ signals are swapped. ++ + What: /sys/bus/counter/devices/counterX/countY/name + KernelVersion: 5.2 + Contact: linux-iio@vger.kernel.org +diff --git a/drivers/counter/counter.c b/drivers/counter/counter.c +index 106bc7180cd8..b818ae9e85f2 100644 +--- a/drivers/counter/counter.c ++++ b/drivers/counter/counter.c +@@ -823,7 +823,8 @@ static const char *const counter_count_function_str[] = { + [COUNTER_COUNT_FUNCTION_QUADRATURE_X1_B] = "quadrature x1 b", + [COUNTER_COUNT_FUNCTION_QUADRATURE_X2_A] = "quadrature x2 a", + [COUNTER_COUNT_FUNCTION_QUADRATURE_X2_B] = "quadrature x2 b", +- [COUNTER_COUNT_FUNCTION_QUADRATURE_X4] = "quadrature x4" ++ [COUNTER_COUNT_FUNCTION_QUADRATURE_X4] = "quadrature x4", ++ [COUNTER_COUNT_FUNCTION_QUADRATURE_X4_SWAPPED] = "quadrature x4 swapped" + }; + + static ssize_t counter_function_show(struct device *dev, +diff --git a/include/linux/counter.h b/include/linux/counter.h +index a061cdcdef7c..860769250f89 100644 +--- a/include/linux/counter.h ++++ b/include/linux/counter.h +@@ -170,7 +170,8 @@ enum counter_count_function { + COUNTER_COUNT_FUNCTION_QUADRATURE_X1_B, + COUNTER_COUNT_FUNCTION_QUADRATURE_X2_A, + COUNTER_COUNT_FUNCTION_QUADRATURE_X2_B, +- COUNTER_COUNT_FUNCTION_QUADRATURE_X4 ++ COUNTER_COUNT_FUNCTION_QUADRATURE_X4, ++ COUNTER_COUNT_FUNCTION_QUADRATURE_X4_SWAPPED, + }; + + /** +-- +2.17.1 + diff --git a/patches/0003-drm-i915-Do-not-unmask-PSR-interruption-in-IRQ-postins.drm b/patches/0003-drm-i915-Do-not-unmask-PSR-interruption-in-IRQ-postins.drm new file mode 100644 index 0000000000..7c42f94283 --- /dev/null +++ b/patches/0003-drm-i915-Do-not-unmask-PSR-interruption-in-IRQ-postins.drm @@ -0,0 +1,92 @@ +From 481ae7edfe94aee9fe36f2cf827e474c316a17b2 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Jos=C3=A9=20Roberto=20de=20Souza?= +Date: Tue, 20 Aug 2019 15:33:25 -0700 +Subject: [PATCH 003/690] drm/i915: Do not unmask PSR interruption in IRQ + postinstall +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +No need to unmask PSR interrutpion if PSR is not enabled, better move +the call to intel_psr_enable_source(). + +v2: Renamed intel_psr_irq_control() to psr_irq_control() (Lucas) + +Cc: Rodrigo Vivi +Cc: Dhinakaran Pandiyan +Signed-off-by: José Roberto de Souza +Signed-off-by: Lucas De Marchi +Reviewed-by: Lucas De Marchi +Link: https://patchwork.freedesktop.org/patch/msgid/20190820223325.27490-3-jose.souza@intel.com +--- + drivers/gpu/drm/i915/display/intel_psr.c | 6 ++++-- + drivers/gpu/drm/i915/display/intel_psr.h | 1 - + drivers/gpu/drm/i915/i915_irq.c | 2 -- + 3 files changed, 4 insertions(+), 5 deletions(-) + +diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c +index 771d9a40bf12..28b62e587204 100644 +--- a/drivers/gpu/drm/i915/display/intel_psr.c ++++ b/drivers/gpu/drm/i915/display/intel_psr.c +@@ -105,7 +105,7 @@ static int edp_psr_shift(enum transcoder cpu_transcoder) + } + } + +-void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug) ++static void psr_irq_control(struct drm_i915_private *dev_priv, u32 debug) + { + u32 debug_mask, mask; + enum transcoder cpu_transcoder; +@@ -736,6 +736,8 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, + mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE; + + I915_WRITE(EDP_PSR_DEBUG(dev_priv->psr.transcoder), mask); ++ ++ psr_irq_control(dev_priv, dev_priv->psr.debug); + } + + static void intel_psr_enable_locked(struct drm_i915_private *dev_priv, +@@ -1108,7 +1110,7 @@ int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val) + + old_mode = dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK; + dev_priv->psr.debug = val; +- intel_psr_irq_control(dev_priv, dev_priv->psr.debug); ++ psr_irq_control(dev_priv, dev_priv->psr.debug); + + mutex_unlock(&dev_priv->psr.lock); + +diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h +index dc818826f36d..46e4de8b8cd5 100644 +--- a/drivers/gpu/drm/i915/display/intel_psr.h ++++ b/drivers/gpu/drm/i915/display/intel_psr.h +@@ -30,7 +30,6 @@ void intel_psr_flush(struct drm_i915_private *dev_priv, + void intel_psr_init(struct drm_i915_private *dev_priv); + void intel_psr_compute_config(struct intel_dp *intel_dp, + struct intel_crtc_state *crtc_state); +-void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug); + void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir); + void intel_psr_short_pulse(struct intel_dp *intel_dp); + int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state, +diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c +index 37e3dd3c1a9d..77391d8325bf 100644 +--- a/drivers/gpu/drm/i915/i915_irq.c ++++ b/drivers/gpu/drm/i915/i915_irq.c +@@ -3684,7 +3684,6 @@ static void ironlake_irq_postinstall(struct drm_i915_private *dev_priv) + + if (IS_HASWELL(dev_priv)) { + gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR); +- intel_psr_irq_control(dev_priv, dev_priv->psr.debug); + display_mask |= DE_EDP_PSR_INT_HSW; + } + +@@ -3795,7 +3794,6 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) + de_port_enables |= GEN8_PORT_DP_A_HOTPLUG; + + gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR); +- intel_psr_irq_control(dev_priv, dev_priv->psr.debug); + + for_each_pipe(dev_priv, pipe) { + dev_priv->de_irq_mask[pipe] = ~de_pipe_masked; +-- +2.17.1 + diff --git a/patches/0003-intel_idle-Add-Elkhartlake-support.core-ehl b/patches/0003-intel_idle-Add-Elkhartlake-support.core-ehl new file mode 100644 index 0000000000..5b4f44ca70 --- /dev/null +++ b/patches/0003-intel_idle-Add-Elkhartlake-support.core-ehl @@ -0,0 +1,29 @@ +From 1949f7a2ef72229581e579d8b3f060467bc3ecb0 Mon Sep 17 00:00:00 2001 +From: Gayatri Kammela +Date: Mon, 30 Sep 2019 18:20:50 -0700 +Subject: [PATCH 03/12] intel_idle: Add Elkhartlake support + +This adds ELKHARTLAKE CPU support to intel idle driver and uses +GEMINILAKE CSTATE table. + +Signed-off-by: Rajneesh Bhardwaj +Signed-off-by: Gayatri Kammela +--- + drivers/idle/intel_idle.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c +index 347b08b56042..203117e887e5 100644 +--- a/drivers/idle/intel_idle.c ++++ b/drivers/idle/intel_idle.c +@@ -1092,6 +1092,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { + INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, idle_cpu_bxt), + INTEL_CPU_FAM6(ATOM_GOLDMONT_D, idle_cpu_dnv), + INTEL_CPU_FAM6(ATOM_TREMONT_D, idle_cpu_dnv), ++ INTEL_CPU_FAM6(ATOM_TREMONT, idle_cpu_bxt), + {} + }; + +-- +2.17.1 + diff --git a/patches/0003-mfd-intel_soc_pmic_bxtwc-Add-device-for-the-I2C-.usb-typec b/patches/0003-mfd-intel_soc_pmic_bxtwc-Add-device-for-the-I2C-.usb-typec new file mode 100644 index 0000000000..a547731dce --- /dev/null +++ b/patches/0003-mfd-intel_soc_pmic_bxtwc-Add-device-for-the-I2C-.usb-typec @@ -0,0 +1,63 @@ +From 094099dfad187e42b998addbbdb5469b6b3e3c71 Mon Sep 17 00:00:00 2001 +From: Heikki Krogerus +Date: Mon, 5 Aug 2019 14:54:37 +0300 +Subject: [PATCH 03/18] mfd: intel_soc_pmic_bxtwc: Add device for the I2C + master + +Intel WhiskeyCove PMIC has also I2C master interface that is +designed to be used for controlling a discrete battery +charger IC. + +Signed-off-by: Heikki Krogerus +--- + drivers/mfd/intel_soc_pmic_bxtwc.c | 13 ++++++++++++- + 1 file changed, 12 insertions(+), 1 deletion(-) + +diff --git a/drivers/mfd/intel_soc_pmic_bxtwc.c b/drivers/mfd/intel_soc_pmic_bxtwc.c +index de79901d8c93..21c77aa69f98 100644 +--- a/drivers/mfd/intel_soc_pmic_bxtwc.c ++++ b/drivers/mfd/intel_soc_pmic_bxtwc.c +@@ -84,6 +84,7 @@ enum bxtwc_irqs_adc { + + enum bxtwc_irqs_chgr { + BXTWC_USBC_IRQ = 0, ++ BXTWC_I2C_IRQ, + BXTWC_CHGR0_IRQ, + BXTWC_CHGR1_IRQ, + }; +@@ -121,7 +122,8 @@ static const struct regmap_irq bxtwc_regmap_irqs_adc[] = { + + static const struct regmap_irq bxtwc_regmap_irqs_chgr[] = { + REGMAP_IRQ_REG(BXTWC_USBC_IRQ, 0, 0x20), +- REGMAP_IRQ_REG(BXTWC_CHGR0_IRQ, 0, 0x1f), ++ REGMAP_IRQ_REG(BXTWC_I2C_IRQ, 0, 0x0f), ++ REGMAP_IRQ_REG(BXTWC_CHGR0_IRQ, 0, 0x10), + REGMAP_IRQ_REG(BXTWC_CHGR1_IRQ, 1, 0x1f), + }; + +@@ -208,6 +210,10 @@ static struct resource usbc_resources[] = { + DEFINE_RES_IRQ(BXTWC_USBC_IRQ), + }; + ++static struct resource i2c_resources[] = { ++ DEFINE_RES_IRQ(BXTWC_I2C_IRQ), ++}; ++ + static struct resource charger_resources[] = { + DEFINE_RES_IRQ_NAMED(BXTWC_CHGR0_IRQ, "CHARGER"), + DEFINE_RES_IRQ_NAMED(BXTWC_CHGR1_IRQ, "CHARGER1"), +@@ -253,6 +259,11 @@ static struct mfd_cell bxt_wc_dev[] = { + .num_resources = ARRAY_SIZE(usbc_resources), + .resources = usbc_resources, + }, ++ { ++ .name = "bxt_wcove_i2c", ++ .num_resources = ARRAY_SIZE(i2c_resources), ++ .resources = i2c_resources, ++ }, + { + .name = "bxt_wcove_ext_charger", + .num_resources = ARRAY_SIZE(charger_resources), +-- +2.17.1 + diff --git a/patches/0003-net-stmmac-Fix-incorrect-location-to-set-real.connectivity b/patches/0003-net-stmmac-Fix-incorrect-location-to-set-real.connectivity new file mode 100644 index 0000000000..1f0e3cab11 --- /dev/null +++ b/patches/0003-net-stmmac-Fix-incorrect-location-to-set-real.connectivity @@ -0,0 +1,48 @@ +From 4eed5f7ec6cd72552f54984ab5f6ee04ee463f07 Mon Sep 17 00:00:00 2001 +From: Aashish Verma +Date: Thu, 25 Jul 2019 02:21:38 +0800 +Subject: [PATCH 003/108] net: stmmac: Fix incorrect location to set + real_num_rx|tx_queues + +netif_set_real_num_tx_queues() & netif_set_real_num_rx_queues() should be +used to inform network stack about the real Tx & Rx queue (active) number +in both stmmac_open() and stmmac_resume(), therefore, we move the code +from stmmac_dvr_probe() to stmmac_hw_setup(). + +Fixes: c02b7a914551 net: stmmac: use netif_set_real_num_{rx,tx}_queues + +Signed-off-by: Aashish Verma +Signed-off-by: Ong Boon Leong +--- + drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index e433fa8d7b7a..23c9570bbe59 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -2631,6 +2631,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) + if (priv->dma_cap.vlins) + stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT); + ++ /* Configure real RX and TX queues */ ++ netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use); ++ netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use); ++ + /* Start the ball rolling... */ + stmmac_start_all_dma(priv); + +@@ -4487,10 +4491,6 @@ int stmmac_dvr_probe(struct device *device, + + stmmac_check_ether_addr(priv); + +- /* Configure real RX and TX queues */ +- netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use); +- netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use); +- + ndev->netdev_ops = &stmmac_netdev_ops; + + ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | +-- +2.17.1 + diff --git a/patches/0003-pinctrl-intel-Add-Intel-Elkhart-Lake-pin-controller-s.lpss b/patches/0003-pinctrl-intel-Add-Intel-Elkhart-Lake-pin-controller-s.lpss new file mode 100644 index 0000000000..0ea8a2f983 --- /dev/null +++ b/patches/0003-pinctrl-intel-Add-Intel-Elkhart-Lake-pin-controller-s.lpss @@ -0,0 +1,524 @@ +From 05a783c95b44797877b17edeb509ab182f398079 Mon Sep 17 00:00:00 2001 +From: Andy Shevchenko +Date: Fri, 27 Jan 2017 13:07:16 +0300 +Subject: [PATCH 03/40] pinctrl: intel: Add Intel Elkhart Lake pin controller + support + +This driver adds pinctrl/GPIO support for Intel Elkhart Lake SoC. The +GPIO controller is based on the next generation GPIO hardware but still +compatible with the one supported by the Intel core pinctrl/GPIO driver. + +TODO: Populate groups and functions, test on HW + +TO BE FOLDED: pinctrl: elkhartlake: Update groups (soc-mcc-a0-19ww22_RDL) +TO BE FOLDED: pinctrl: elkhartlake: Update pin list according to B0 v1.0 + +Cc: Mika Westerberg +Signed-off-by: Andy Shevchenko +--- + drivers/pinctrl/intel/Kconfig | 8 + + drivers/pinctrl/intel/Makefile | 1 + + drivers/pinctrl/intel/pinctrl-elkhartlake.c | 460 ++++++++++++++++++++ + 3 files changed, 469 insertions(+) + create mode 100644 drivers/pinctrl/intel/pinctrl-elkhartlake.c + +diff --git a/drivers/pinctrl/intel/Kconfig b/drivers/pinctrl/intel/Kconfig +index 452a14f78707..e20d83af0b4c 100644 +--- a/drivers/pinctrl/intel/Kconfig ++++ b/drivers/pinctrl/intel/Kconfig +@@ -82,6 +82,14 @@ config PINCTRL_DENVERTON + This pinctrl driver provides an interface that allows configuring + of Intel Denverton SoC pins and using them as GPIOs. + ++config PINCTRL_ELKHARTLAKE ++ tristate "Intel Elkhart Lake SoC pinctrl and GPIO driver" ++ depends on ACPI ++ select PINCTRL_INTEL ++ help ++ This pinctrl driver provides an interface that allows configuring ++ of Intel Elkhart Lake SoC pins and using them as GPIOs. ++ + config PINCTRL_GEMINILAKE + tristate "Intel Gemini Lake SoC pinctrl and GPIO driver" + depends on ACPI +diff --git a/drivers/pinctrl/intel/Makefile b/drivers/pinctrl/intel/Makefile +index cb491e655749..ec353993d545 100644 +--- a/drivers/pinctrl/intel/Makefile ++++ b/drivers/pinctrl/intel/Makefile +@@ -9,6 +9,7 @@ obj-$(CONFIG_PINCTRL_BROXTON) += pinctrl-broxton.o + obj-$(CONFIG_PINCTRL_CANNONLAKE) += pinctrl-cannonlake.o + obj-$(CONFIG_PINCTRL_CEDARFORK) += pinctrl-cedarfork.o + obj-$(CONFIG_PINCTRL_DENVERTON) += pinctrl-denverton.o ++obj-$(CONFIG_PINCTRL_ELKHARTLAKE) += pinctrl-elkhartlake.o + obj-$(CONFIG_PINCTRL_GEMINILAKE) += pinctrl-geminilake.o + obj-$(CONFIG_PINCTRL_ICELAKE) += pinctrl-icelake.o + obj-$(CONFIG_PINCTRL_LEWISBURG) += pinctrl-lewisburg.o +diff --git a/drivers/pinctrl/intel/pinctrl-elkhartlake.c b/drivers/pinctrl/intel/pinctrl-elkhartlake.c +new file mode 100644 +index 000000000000..de31a8cc1c0f +--- /dev/null ++++ b/drivers/pinctrl/intel/pinctrl-elkhartlake.c +@@ -0,0 +1,460 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Intel Elkhart Lake PCH pinctrl/GPIO driver ++ * ++ * Copyright (C) 2019, Intel Corporation ++ * Author: Andy Shevchenko ++ */ ++ ++#include ++#include ++#include ++ ++#include ++ ++#include "pinctrl-intel.h" ++ ++#define EHL_PAD_OWN 0x020 ++#define EHL_PADCFGLOCK 0x080 ++#define EHL_HOSTSW_OWN 0x0b0 ++#define EHL_GPI_IS 0x100 ++#define EHL_GPI_IE 0x120 ++ ++#define EHL_GPP(r, s, e) \ ++ { \ ++ .reg_num = (r), \ ++ .base = (s), \ ++ .size = ((e) - (s) + 1), \ ++ } ++ ++#define EHL_COMMUNITY(b, s, e, g) \ ++ { \ ++ .barno = (b), \ ++ .padown_offset = EHL_PAD_OWN, \ ++ .padcfglock_offset = EHL_PADCFGLOCK, \ ++ .hostown_offset = EHL_HOSTSW_OWN, \ ++ .is_offset = EHL_GPI_IS, \ ++ .ie_offset = EHL_GPI_IE, \ ++ .pin_base = (s), \ ++ .npins = ((e) - (s) + 1), \ ++ .gpps = (g), \ ++ .ngpps = ARRAY_SIZE(g), \ ++ } ++ ++/* Elkhart Lake */ ++static const struct pinctrl_pin_desc ehl_pins[] = { ++ /* GPP_B */ ++ PINCTRL_PIN(0, "CORE_VID_0"), ++ PINCTRL_PIN(1, "CORE_VID_1"), ++ PINCTRL_PIN(2, "VRALERTB"), ++ PINCTRL_PIN(3, "CPU_GP_2"), ++ PINCTRL_PIN(4, "CPU_GP_3"), ++ PINCTRL_PIN(5, "OSE_I2C0_SCLK"), ++ PINCTRL_PIN(6, "OSE_I2C0_SDAT"), ++ PINCTRL_PIN(7, "OSE_I2C1_SCLK"), ++ PINCTRL_PIN(8, "OSE_I2C1_SDAT"), ++ PINCTRL_PIN(9, "I2C5_SDA"), ++ PINCTRL_PIN(10, "I2C5_SCL"), ++ PINCTRL_PIN(11, "PMCALERTB"), ++ PINCTRL_PIN(12, "SLP_S0B"), ++ PINCTRL_PIN(13, "PLTRSTB"), ++ PINCTRL_PIN(14, "SPKR"), ++ PINCTRL_PIN(15, "GSPI0_CS0B"), ++ PINCTRL_PIN(16, "GSPI0_CLK"), ++ PINCTRL_PIN(17, "GSPI0_MISO"), ++ PINCTRL_PIN(18, "GSPI0_MOSI"), ++ PINCTRL_PIN(19, "GSPI1_CS0B"), ++ PINCTRL_PIN(20, "GSPI1_CLK"), ++ PINCTRL_PIN(21, "GSPI1_MISO"), ++ PINCTRL_PIN(22, "GSPI1_MOSI"), ++ PINCTRL_PIN(23, "GPPC_B_23"), ++ PINCTRL_PIN(24, "GSPI0_CLK_LOOPBK"), ++ PINCTRL_PIN(25, "GSPI1_CLK_LOOPBK"), ++ /* GPP_T */ ++ PINCTRL_PIN(26, "OSE_QEPA_2"), ++ PINCTRL_PIN(27, "OSE_QEPB_2"), ++ PINCTRL_PIN(28, "OSE_QEPI_2"), ++ PINCTRL_PIN(29, "GPPC_T_3"), ++ PINCTRL_PIN(30, "RGMII0_INT"), ++ PINCTRL_PIN(31, "RGMII0_RESETB"), ++ PINCTRL_PIN(32, "RGMII0_AUXTS"), ++ PINCTRL_PIN(33, "RGMII0_PPS"), ++ PINCTRL_PIN(34, "USB2_OCB_2"), ++ PINCTRL_PIN(35, "OSE_HSUART2_EN"), ++ PINCTRL_PIN(36, "OSE_HSUART2_RE"), ++ PINCTRL_PIN(37, "USB2_OCB_3"), ++ PINCTRL_PIN(38, "OSE_UART2_RXD"), ++ PINCTRL_PIN(39, "OSE_UART2_TXD"), ++ PINCTRL_PIN(40, "OSE_UART2_RTSB"), ++ PINCTRL_PIN(41, "OSE_UART2_CTSB"), ++ /* GPP_G */ ++ PINCTRL_PIN(42, "SD3_CMD"), ++ PINCTRL_PIN(43, "SD3_D0"), ++ PINCTRL_PIN(44, "SD3_D1"), ++ PINCTRL_PIN(45, "SD3_D2"), ++ PINCTRL_PIN(46, "SD3_D3"), ++ PINCTRL_PIN(47, "SD3_CDB"), ++ PINCTRL_PIN(48, "SD3_CLK"), ++ PINCTRL_PIN(49, "I2S2_SCLK"), ++ PINCTRL_PIN(50, "I2S2_SFRM"), ++ PINCTRL_PIN(51, "I2S2_TXD"), ++ PINCTRL_PIN(52, "I2S2_RXD"), ++ PINCTRL_PIN(53, "I2S3_SCLK"), ++ PINCTRL_PIN(54, "I2S3_SFRM"), ++ PINCTRL_PIN(55, "I2S3_TXD"), ++ PINCTRL_PIN(56, "I2S3_RXD"), ++ PINCTRL_PIN(57, "ESPI_IO_0"), ++ PINCTRL_PIN(58, "ESPI_IO_1"), ++ PINCTRL_PIN(59, "ESPI_IO_2"), ++ PINCTRL_PIN(60, "ESPI_IO_3"), ++ PINCTRL_PIN(61, "I2S1_SCLK"), ++ PINCTRL_PIN(62, "ESPI_CSB"), ++ PINCTRL_PIN(63, "ESPI_CLK"), ++ PINCTRL_PIN(64, "ESPI_RESETB"), ++ PINCTRL_PIN(65, "SD3_WP"), ++ PINCTRL_PIN(66, "ESPI_CLK_LOOPBK"), ++ /* GPP_V */ ++ PINCTRL_PIN(67, "EMMC_CMD"), ++ PINCTRL_PIN(68, "EMMC_DATA0"), ++ PINCTRL_PIN(69, "EMMC_DATA1"), ++ PINCTRL_PIN(70, "EMMC_DATA2"), ++ PINCTRL_PIN(71, "EMMC_DATA3"), ++ PINCTRL_PIN(72, "EMMC_DATA4"), ++ PINCTRL_PIN(73, "EMMC_DATA5"), ++ PINCTRL_PIN(74, "EMMC_DATA6"), ++ PINCTRL_PIN(75, "EMMC_DATA7"), ++ PINCTRL_PIN(76, "EMMC_RCLK"), ++ PINCTRL_PIN(77, "EMMC_CLK"), ++ PINCTRL_PIN(78, "EMMC_RESETB"), ++ PINCTRL_PIN(79, "OSE_TGPIO0"), ++ PINCTRL_PIN(80, "OSE_TGPIO1"), ++ PINCTRL_PIN(81, "OSE_TGPIO2"), ++ PINCTRL_PIN(82, "OSE_TGPIO3"), ++ /* GPP_H */ ++ PINCTRL_PIN(83, "RGMII1_INT"), ++ PINCTRL_PIN(84, "RGMII1_RESETB"), ++ PINCTRL_PIN(85, "RGMII1_AUXTS"), ++ PINCTRL_PIN(86, "RGMII1_PPS"), ++ PINCTRL_PIN(87, "I2C2_SDA"), ++ PINCTRL_PIN(88, "I2C2_SCL"), ++ PINCTRL_PIN(89, "I2C3_SDA"), ++ PINCTRL_PIN(90, "I2C3_SCL"), ++ PINCTRL_PIN(91, "I2C4_SDA"), ++ PINCTRL_PIN(92, "I2C4_SCL"), ++ PINCTRL_PIN(93, "SRCCLKREQB_4"), ++ PINCTRL_PIN(94, "SRCCLKREQB_5"), ++ PINCTRL_PIN(95, "OSE_UART1_RXD"), ++ PINCTRL_PIN(96, "OSE_UART1_TXD"), ++ PINCTRL_PIN(97, "GPPC_H_14"), ++ PINCTRL_PIN(98, "OSE_UART1_CTSB"), ++ PINCTRL_PIN(99, "PCIE_LNK_DOWN"), ++ PINCTRL_PIN(100, "SD_PWR_EN_B"), ++ PINCTRL_PIN(101, "CPU_C10_GATEB"), ++ PINCTRL_PIN(102, "GPPC_H_19"), ++ PINCTRL_PIN(103, "OSE_PWM7"), ++ PINCTRL_PIN(104, "OSE_HSUART1_DE"), ++ PINCTRL_PIN(105, "OSE_HSUART1_RE"), ++ PINCTRL_PIN(106, "OSE_HSUART1_EN"), ++ /* GPP_D */ ++ PINCTRL_PIN(107, "OSE_QEPA_0"), ++ PINCTRL_PIN(108, "OSE_QEPB_0"), ++ PINCTRL_PIN(109, "OSE_QEPI_0"), ++ PINCTRL_PIN(110, "OSE_PWM6"), ++ PINCTRL_PIN(111, "OSE_PWM2"), ++ PINCTRL_PIN(112, "SRCCLKREQB_0"), ++ PINCTRL_PIN(113, "SRCCLKREQB_1"), ++ PINCTRL_PIN(114, "SRCCLKREQB_2"), ++ PINCTRL_PIN(115, "SRCCLKREQB_3"), ++ PINCTRL_PIN(116, "OSE_SPI0_CSB"), ++ PINCTRL_PIN(117, "OSE_SPI0_SCLK"), ++ PINCTRL_PIN(118, "OSE_SPI0_MISO"), ++ PINCTRL_PIN(119, "OSE_SPI0_MOSI"), ++ PINCTRL_PIN(120, "OSE_QEPA_1"), ++ PINCTRL_PIN(121, "OSE_QEPB_1"), ++ PINCTRL_PIN(122, "OSE_PWM3"), ++ PINCTRL_PIN(123, "OSE_QEPI_1"), ++ PINCTRL_PIN(124, "OSE_PWM4"), ++ PINCTRL_PIN(125, "OSE_PWM5"), ++ PINCTRL_PIN(126, "I2S_MCLK1_OUT"), ++ PINCTRL_PIN(127, "GSPI2_CLK_LOOPBK"), ++ /* GPP_U */ ++ PINCTRL_PIN(128, "RGMII2_INT"), ++ PINCTRL_PIN(129, "RGMII2_RESETB"), ++ PINCTRL_PIN(130, "RGMII2_PPS"), ++ PINCTRL_PIN(131, "RGMII2_AUXTS"), ++ PINCTRL_PIN(132, "ISI_SPIM_CS"), ++ PINCTRL_PIN(133, "ISI_SPIM_SCLK"), ++ PINCTRL_PIN(134, "ISI_SPIM_MISO"), ++ PINCTRL_PIN(135, "OSE_QEPA_3"), ++ PINCTRL_PIN(136, "ISI_SPIS_CS"), ++ PINCTRL_PIN(137, "ISI_SPIS_SCLK"), ++ PINCTRL_PIN(138, "ISI_SPIS_MISO"), ++ PINCTRL_PIN(139, "OSE_QEPB_3"), ++ PINCTRL_PIN(140, "ISI_CHX_OKNOK_0"), ++ PINCTRL_PIN(141, "ISI_CHX_OKNOK_1"), ++ PINCTRL_PIN(142, "ISI_CHX_RLY_SWTCH"), ++ PINCTRL_PIN(143, "ISI_CHX_PMIC_EN"), ++ PINCTRL_PIN(144, "ISI_OKNOK_0"), ++ PINCTRL_PIN(145, "ISI_OKNOK_1"), ++ PINCTRL_PIN(146, "ISI_ALERT"), ++ PINCTRL_PIN(147, "OSE_QEPI_3"), ++ PINCTRL_PIN(148, "GSPI3_CLK_LOOPBK"), ++ PINCTRL_PIN(149, "GSPI4_CLK_LOOPBK"), ++ PINCTRL_PIN(150, "GSPI5_CLK_LOOPBK"), ++ PINCTRL_PIN(151, "GSPI6_CLK_LOOPBK"), ++ /* vGPIO */ ++ PINCTRL_PIN(152, "CNV_BTEN"), ++ PINCTRL_PIN(153, "CNV_BT_HOST_WAKEB"), ++ PINCTRL_PIN(154, "CNV_BT_IF_SELECT"), ++ PINCTRL_PIN(155, "vCNV_BT_UART_TXD"), ++ PINCTRL_PIN(156, "vCNV_BT_UART_RXD"), ++ PINCTRL_PIN(157, "vCNV_BT_UART_CTS_B"), ++ PINCTRL_PIN(158, "vCNV_BT_UART_RTS_B"), ++ PINCTRL_PIN(159, "vCNV_MFUART1_TXD"), ++ PINCTRL_PIN(160, "vCNV_MFUART1_RXD"), ++ PINCTRL_PIN(161, "vCNV_MFUART1_CTS_B"), ++ PINCTRL_PIN(162, "vCNV_MFUART1_RTS_B"), ++ PINCTRL_PIN(163, "vUART0_TXD"), ++ PINCTRL_PIN(164, "vUART0_RXD"), ++ PINCTRL_PIN(165, "vUART0_CTS_B"), ++ PINCTRL_PIN(166, "vUART0_RTS_B"), ++ PINCTRL_PIN(167, "vOSE_UART0_TXD"), ++ PINCTRL_PIN(168, "vOSE_UART0_RXD"), ++ PINCTRL_PIN(169, "vOSE_UART0_CTS_B"), ++ PINCTRL_PIN(170, "vOSE_UART0_RTS_B"), ++ PINCTRL_PIN(171, "vCNV_BT_I2S_BCLK"), ++ PINCTRL_PIN(172, "vCNV_BT_I2S_WS_SYNC"), ++ PINCTRL_PIN(173, "vCNV_BT_I2S_SDO"), ++ PINCTRL_PIN(174, "vCNV_BT_I2S_SDI"), ++ PINCTRL_PIN(175, "vI2S2_SCLK"), ++ PINCTRL_PIN(176, "vI2S2_SFRM"), ++ PINCTRL_PIN(177, "vI2S2_TXD"), ++ PINCTRL_PIN(178, "vI2S2_RXD"), ++ PINCTRL_PIN(179, "vSD3_CD_B"), ++ /* CPU */ ++ PINCTRL_PIN(180, "HDACPU_SDI"), ++ PINCTRL_PIN(181, "HDACPU_SDO"), ++ PINCTRL_PIN(182, "HDACPU_BCLK"), ++ PINCTRL_PIN(183, "PM_SYNC"), ++ PINCTRL_PIN(184, "PECI"), ++ PINCTRL_PIN(185, "CPUPWRGD"), ++ PINCTRL_PIN(186, "THRMTRIPB"), ++ PINCTRL_PIN(187, "PLTRST_CPUB"), ++ PINCTRL_PIN(188, "PM_DOWN"), ++ PINCTRL_PIN(189, "TRIGGER_IN"), ++ PINCTRL_PIN(190, "TRIGGER_OUT"), ++ PINCTRL_PIN(191, "UFS_RESETB"), ++ PINCTRL_PIN(192, "CLKOUT_CPURTC"), ++ PINCTRL_PIN(193, "VCCST_OVERRIDE"), ++ PINCTRL_PIN(194, "C10_WAKE"), ++ PINCTRL_PIN(195, "PROCHOTB"), ++ PINCTRL_PIN(196, "CATERRB"), ++ /* GPP_S */ ++ PINCTRL_PIN(197, "UFS_REF_CLK_0"), ++ PINCTRL_PIN(198, "UFS_REF_CLK_1"), ++ /* GPP_A */ ++ PINCTRL_PIN(199, "RGMII0_TXDATA_3"), ++ PINCTRL_PIN(200, "RGMII0_TXDATA_2"), ++ PINCTRL_PIN(201, "RGMII0_TXDATA_1"), ++ PINCTRL_PIN(202, "RGMII0_TXDATA_0"), ++ PINCTRL_PIN(203, "RGMII0_TXCLK"), ++ PINCTRL_PIN(204, "RGMII0_TXCTL"), ++ PINCTRL_PIN(205, "RGMII0_RXCLK"), ++ PINCTRL_PIN(206, "RGMII0_RXDATA_3"), ++ PINCTRL_PIN(207, "RGMII0_RXDATA_2"), ++ PINCTRL_PIN(208, "RGMII0_RXDATA_1"), ++ PINCTRL_PIN(209, "RGMII0_RXDATA_0"), ++ PINCTRL_PIN(210, "RGMII1_TXDATA_3"), ++ PINCTRL_PIN(211, "RGMII1_TXDATA_2"), ++ PINCTRL_PIN(212, "RGMII1_TXDATA_1"), ++ PINCTRL_PIN(213, "RGMII1_TXDATA_0"), ++ PINCTRL_PIN(214, "RGMII1_TXCLK"), ++ PINCTRL_PIN(215, "RGMII1_TXCTL"), ++ PINCTRL_PIN(216, "RGMII1_RXCLK"), ++ PINCTRL_PIN(217, "RGMII1_RXCTL"), ++ PINCTRL_PIN(218, "RGMII1_RXDATA_3"), ++ PINCTRL_PIN(219, "RGMII1_RXDATA_2"), ++ PINCTRL_PIN(220, "RGMII1_RXDATA_1"), ++ PINCTRL_PIN(221, "RGMII1_RXDATA_0"), ++ PINCTRL_PIN(222, "RGMII0_RXCTL"), ++ /* vGPIO_3 */ ++ PINCTRL_PIN(223, "ESPI_USB_OCB_0"), ++ PINCTRL_PIN(224, "ESPI_USB_OCB_1"), ++ PINCTRL_PIN(225, "ESPI_USB_OCB_2"), ++ PINCTRL_PIN(226, "ESPI_USB_OCB_3"), ++ /* GPP_C */ ++ PINCTRL_PIN(227, "SMBCLK"), ++ PINCTRL_PIN(228, "SMBDATA"), ++ PINCTRL_PIN(229, "OSE_PWM0"), ++ PINCTRL_PIN(230, "RGMII0_MDC"), ++ PINCTRL_PIN(231, "RGMII0_MDIO"), ++ PINCTRL_PIN(232, "OSE_PWM1"), ++ PINCTRL_PIN(233, "RGMII1_MDC"), ++ PINCTRL_PIN(234, "RGMII1_MDIO"), ++ PINCTRL_PIN(235, "OSE_TGPIO4"), ++ PINCTRL_PIN(236, "OSE_HSUART0_EN"), ++ PINCTRL_PIN(237, "OSE_TGPIO5"), ++ PINCTRL_PIN(238, "OSE_HSUART0_RE"), ++ PINCTRL_PIN(239, "OSE_UART0_RXD"), ++ PINCTRL_PIN(240, "OSE_UART0_TXD"), ++ PINCTRL_PIN(241, "OSE_UART0_RTSB"), ++ PINCTRL_PIN(242, "OSE_UART0_CTSB"), ++ PINCTRL_PIN(243, "RGMII2_MDIO"), ++ PINCTRL_PIN(244, "RGMII2_MDC"), ++ PINCTRL_PIN(245, "OSE_I2C4_SDAT"), ++ PINCTRL_PIN(246, "OSE_I2C4_SCLK"), ++ PINCTRL_PIN(247, "OSE_UART4_RXD"), ++ PINCTRL_PIN(248, "OSE_UART4_TXD"), ++ PINCTRL_PIN(249, "OSE_UART4_RTSB"), ++ PINCTRL_PIN(250, "OSE_UART4_CTSB"), ++ /* GPP_F */ ++ PINCTRL_PIN(251, "CNV_BRI_DT"), ++ PINCTRL_PIN(252, "CNV_BRI_RSP"), ++ PINCTRL_PIN(253, "CNV_RGI_DT"), ++ PINCTRL_PIN(254, "CNV_RGI_RSP"), ++ PINCTRL_PIN(255, "CNV_RF_RESET_B"), ++ PINCTRL_PIN(256, "EMMC_HIP_MON"), ++ PINCTRL_PIN(257, "CNV_PA_BLANKING"), ++ PINCTRL_PIN(258, "OSE_I2S1_SCLK"), ++ PINCTRL_PIN(259, "I2S_MCLK2_INOUT"), ++ PINCTRL_PIN(260, "BOOTMPC"), ++ PINCTRL_PIN(261, "OSE_I2S1_SFRM"), ++ PINCTRL_PIN(262, "GPPC_F_11"), ++ PINCTRL_PIN(263, "GSXDOUT"), ++ PINCTRL_PIN(264, "GSXSLOAD"), ++ PINCTRL_PIN(265, "GSXDIN"), ++ PINCTRL_PIN(266, "GSXSRESETB"), ++ PINCTRL_PIN(267, "GSXCLK"), ++ PINCTRL_PIN(268, "GPPC_F_17"), ++ PINCTRL_PIN(269, "OSE_I2S1_TXD"), ++ PINCTRL_PIN(270, "OSE_I2S1_RXD"), ++ PINCTRL_PIN(271, "EXT_PWR_GATEB"), ++ PINCTRL_PIN(272, "EXT_PWR_GATE2B"), ++ PINCTRL_PIN(273, "VNN_CTRL"), ++ PINCTRL_PIN(274, "V1P05_CTRL"), ++ PINCTRL_PIN(275, "GPPF_CLK_LOOPBACK"), ++ /* HVCMOS */ ++ PINCTRL_PIN(276, "L_BKLTEN"), ++ PINCTRL_PIN(277, "L_BKLTCTL"), ++ PINCTRL_PIN(278, "L_VDDEN"), ++ PINCTRL_PIN(279, "SYS_PWROK"), ++ PINCTRL_PIN(280, "SYS_RESETB"), ++ PINCTRL_PIN(281, "MLK_RSTB"), ++ /* GPP_E */ ++ PINCTRL_PIN(282, "SATA_LEDB"), ++ PINCTRL_PIN(283, "GPPC_E_1"), ++ PINCTRL_PIN(284, "GPPC_E_2"), ++ PINCTRL_PIN(285, "DDSP_HPD_B"), ++ PINCTRL_PIN(286, "SATA_DEVSLP_0"), ++ PINCTRL_PIN(287, "DDPB_CTRLDATA"), ++ PINCTRL_PIN(288, "GPPC_E_6"), ++ PINCTRL_PIN(289, "DDPB_CTRLCLK"), ++ PINCTRL_PIN(290, "GPPC_E_8"), ++ PINCTRL_PIN(291, "USB2_OCB_0"), ++ PINCTRL_PIN(292, "GPPC_E_10"), ++ PINCTRL_PIN(293, "GPPC_E_11"), ++ PINCTRL_PIN(294, "GPPC_E_12"), ++ PINCTRL_PIN(295, "GPPC_E_13"), ++ PINCTRL_PIN(296, "DDSP_HPD_A"), ++ PINCTRL_PIN(297, "OSE_I2S0_RXD"), ++ PINCTRL_PIN(298, "OSE_I2S0_TXD"), ++ PINCTRL_PIN(299, "DDSP_HPD_C"), ++ PINCTRL_PIN(300, "DDPA_CTRLDATA"), ++ PINCTRL_PIN(301, "DDPA_CTRLCLK"), ++ PINCTRL_PIN(302, "OSE_I2S0_SCLK"), ++ PINCTRL_PIN(303, "OSE_I2S0_SFRM"), ++ PINCTRL_PIN(304, "DDPC_CTRLDATA"), ++ PINCTRL_PIN(305, "DDPC_CTRLCLK"), ++ PINCTRL_PIN(306, "SPI1_CLK_LOOPBK"), ++ /* GPP_R */ ++ PINCTRL_PIN(307, "HDA_BCLK"), ++ PINCTRL_PIN(308, "HDA_SYNC"), ++ PINCTRL_PIN(309, "HDA_SDO"), ++ PINCTRL_PIN(310, "HDA_SDI_0"), ++ PINCTRL_PIN(311, "HDA_RSTB"), ++ PINCTRL_PIN(312, "HDA_SDI_1"), ++ PINCTRL_PIN(313, "GPP_R_6"), ++ PINCTRL_PIN(314, "GPP_R_7"), ++}; ++ ++static const struct intel_padgroup ehl_community0_gpps[] = { ++ EHL_GPP(0, 0, 25), /* GPP_B */ ++ EHL_GPP(1, 26, 41), /* GPP_T */ ++ EHL_GPP(2, 42, 66), /* GPP_G */ ++}; ++ ++static const struct intel_padgroup ehl_community1_gpps[] = { ++ EHL_GPP(0, 67, 82), /* GPP_V */ ++ EHL_GPP(1, 83, 106), /* GPP_H */ ++ EHL_GPP(2, 107, 127), /* GPP_D */ ++ EHL_GPP(3, 128, 151), /* GPP_U */ ++ EHL_GPP(4, 152, 179), /* vGPIO */ ++}; ++ ++static const struct intel_padgroup ehl_community3_gpps[] = { ++ EHL_GPP(0, 180, 196), /* CPU */ ++ EHL_GPP(1, 197, 198), /* GPP_S */ ++ EHL_GPP(2, 199, 222), /* GPP_A */ ++ EHL_GPP(3, 223, 226), /* vGPIO_3 */ ++}; ++ ++static const struct intel_padgroup ehl_community4_gpps[] = { ++ EHL_GPP(0, 227, 250), /* GPP_C */ ++ EHL_GPP(1, 251, 275), /* GPP_F */ ++ EHL_GPP(2, 276, 281), /* HVCMOS */ ++ EHL_GPP(3, 282, 306), /* GPP_E */ ++}; ++ ++static const struct intel_padgroup ehl_community5_gpps[] = { ++ EHL_GPP(0, 307, 314), /* GPP_R */ ++}; ++ ++static const struct intel_community ehl_communities[] = { ++ EHL_COMMUNITY(0, 0, 66, ehl_community0_gpps), ++ EHL_COMMUNITY(1, 67, 179, ehl_community1_gpps), ++ EHL_COMMUNITY(2, 180, 226, ehl_community3_gpps), ++ EHL_COMMUNITY(3, 227, 306, ehl_community4_gpps), ++ EHL_COMMUNITY(4, 307, 314, ehl_community5_gpps), ++}; ++ ++static const struct intel_pingroup ehl_groups[] = { ++ /* PLACE HOLDER */ ++}; ++ ++static const struct intel_function ehl_functions[] = { ++ /* PLACE HOLDER */ ++}; ++ ++static const struct intel_pinctrl_soc_data ehl_soc_data = { ++ .pins = ehl_pins, ++ .npins = ARRAY_SIZE(ehl_pins), ++ .groups = ehl_groups, ++ .ngroups = ARRAY_SIZE(ehl_groups), ++ .functions = ehl_functions, ++ .nfunctions = ARRAY_SIZE(ehl_functions), ++ .communities = ehl_communities, ++ .ncommunities = ARRAY_SIZE(ehl_communities), ++}; ++ ++static const struct acpi_device_id ehl_pinctrl_acpi_match[] = { ++ { "INTC1020", (kernel_ulong_t)&ehl_soc_data }, ++ { } ++}; ++MODULE_DEVICE_TABLE(acpi, ehl_pinctrl_acpi_match); ++ ++static INTEL_PINCTRL_PM_OPS(ehl_pinctrl_pm_ops); ++ ++static struct platform_driver ehl_pinctrl_driver = { ++ .probe = intel_pinctrl_probe_by_hid, ++ .driver = { ++ .name = "elkhartlake-pinctrl", ++ .acpi_match_table = ehl_pinctrl_acpi_match, ++ .pm = &ehl_pinctrl_pm_ops, ++ }, ++}; ++ ++module_platform_driver(ehl_pinctrl_driver); ++ ++MODULE_AUTHOR("Andy Shevchenko "); ++MODULE_DESCRIPTION("Intel Elkhart Lake PCH pinctrl/GPIO driver"); ++MODULE_LICENSE("GPL v2"); +-- +2.17.1 + diff --git a/patches/0003-scsi-ufs-Add-UME-support.security b/patches/0003-scsi-ufs-Add-UME-support.security new file mode 100644 index 0000000000..f534bd168d --- /dev/null +++ b/patches/0003-scsi-ufs-Add-UME-support.security @@ -0,0 +1,574 @@ +From 21a8bbc4401b7743719ff15e3d2449574e884e96 Mon Sep 17 00:00:00 2001 +From: Adrian Hunter +Date: Tue, 26 Feb 2019 14:49:18 +0200 +Subject: [PATCH 03/65] scsi: ufs: Add UME support + +Add support for UFS Unified Memory Extension. + +This patch assumes memory is allocated which means using CMA e.g. + + Kernel config: + CONFIG_CMA + CONFIG_DMA_CMA + + Kernel command line: + cma=128M@4G + +Signed-off-by: Adrian Hunter +--- + drivers/scsi/ufs/ufs-sysfs.c | 2 +- + drivers/scsi/ufs/ufs.h | 17 ++- + drivers/scsi/ufs/ufshcd.c | 251 +++++++++++++++++++++++++++++++++-- + drivers/scsi/ufs/ufshcd.h | 10 ++ + drivers/scsi/ufs/ufshci.h | 13 ++ + 5 files changed, 281 insertions(+), 12 deletions(-) + +diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c +index 969a36b15897..0926b1c0c3f7 100644 +--- a/drivers/scsi/ufs/ufs-sysfs.c ++++ b/drivers/scsi/ufs/ufs-sysfs.c +@@ -129,7 +129,7 @@ static void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit) + if (hba->ahit == ahit) + goto out_unlock; + hba->ahit = ahit; +- if (!pm_runtime_suspended(hba->dev)) ++ if (!pm_runtime_suspended(hba->dev) && !hba->ahit_disabled) + ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER); + out_unlock: + spin_unlock_irqrestore(hba->host->host_lock, flags); +diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h +index 3327981ef894..ed54eac4e6d7 100644 +--- a/drivers/scsi/ufs/ufs.h ++++ b/drivers/scsi/ufs/ufs.h +@@ -139,8 +139,10 @@ enum flag_idn { + QUERY_FLAG_IDN_RESERVED2 = 0x07, + QUERY_FLAG_IDN_FPHYRESOURCEREMOVAL = 0x08, + QUERY_FLAG_IDN_BUSY_RTC = 0x09, +- QUERY_FLAG_IDN_RESERVED3 = 0x0A, ++ QUERY_FLAG_IDN_UNIFIED_MEMORY = 0x0A, + QUERY_FLAG_IDN_PERMANENTLY_DISABLE_FW_UPDATE = 0x0B, ++ QUERY_FLAG_IDN_SUSPEND_UM = 0x0C, ++ QUERY_FLAG_IDN_UM_SUSPENDED = 0x0D, + }; + + /* Attribute idn for Query requests */ +@@ -163,8 +165,8 @@ enum attr_idn { + QUERY_ATTR_IDN_SECONDS_PASSED = 0x0F, + QUERY_ATTR_IDN_CNTX_CONF = 0x10, + QUERY_ATTR_IDN_CORR_PRG_BLK_NUM = 0x11, +- QUERY_ATTR_IDN_RESERVED2 = 0x12, +- QUERY_ATTR_IDN_RESERVED3 = 0x13, ++ QUERY_ATTR_IDN_UM_AREA_SIZE = 0x12, ++ QUERY_ATTR_IDN_MAX_UMPIU_REQS = 0x13, + QUERY_ATTR_IDN_FFU_STATUS = 0x14, + QUERY_ATTR_IDN_PSA_STATE = 0x15, + QUERY_ATTR_IDN_PSA_DATA_SIZE = 0x16, +@@ -258,8 +260,13 @@ enum device_desc_param { + DEVICE_DESC_PARAM_PSA_MAX_DATA = 0x25, + DEVICE_DESC_PARAM_PSA_TMT = 0x29, + DEVICE_DESC_PARAM_PRDCT_REV = 0x2A, ++ DEVICE_DESC_PARAM_MIN_UMA_SZ = 0x31, + }; + ++#define UFS_DEVICE_SUB_CLASS_NONBOOTABLE 0x01 ++#define UFS_DEVICE_SUB_CLASS_REMOVABLE 0x02 ++#define UFS_DEVICE_SUB_CLASS_UM_SUPPORT 0x04 ++ + /* Interconnect descriptor parameters offsets in bytes*/ + enum interconnect_desc_param { + INTERCONNECT_DESC_PARAM_LEN = 0x0, +@@ -536,10 +543,14 @@ struct ufs_dev_info { + /** + * ufs_dev_desc - ufs device details from the device descriptor + * ++ * @subclass: device subclass ++ * @min_uma_sz: minimum UM area size + * @wmanufacturerid: card details + * @model: card model + */ + struct ufs_dev_desc { ++ u8 subclass; ++ u32 min_uma_sz; + u16 wmanufacturerid; + u8 *model; + }; +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c +index 11a87f51c442..7789b6f9ad1b 100644 +--- a/drivers/scsi/ufs/ufshcd.c ++++ b/drivers/scsi/ufs/ufshcd.c +@@ -403,7 +403,13 @@ static void ufshcd_print_err_hist(struct ufs_hba *hba, + + static void ufshcd_print_host_regs(struct ufs_hba *hba) + { +- ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: "); ++ unsigned int sz; ++ ++ sz = hba->capabilities & MASK_DEVICE_BUS_MASTER_MODE_SUPPORT ? ++ UFSHCI_UMA_REG_SPACE_SIZE : ++ UFSHCI_REG_SPACE_SIZE; ++ ++ ufshcd_dump_regs(hba, 0, sz, "host_regs: "); + dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n", + hba->ufs_version, hba->capabilities); + dev_err(hba->dev, +@@ -2839,11 +2845,13 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, + * @index: index field + * @selector: selector field + * @attr_val: the attribute value after the query request completes ++ * @quiet: suppress error message + * + * Returns 0 for success, non-zero in case of failure + */ +-int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, +- enum attr_idn idn, u8 index, u8 selector, u32 *attr_val) ++int __ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, ++ enum attr_idn idn, u8 index, u8 selector, u32 *attr_val, ++ bool quiet) + { + struct ufs_query_req *request = NULL; + struct ufs_query_res *response = NULL; +@@ -2881,7 +2889,8 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, + err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); + + if (err) { +- dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", ++ if (!quiet) ++ dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", + __func__, opcode, idn, index, err); + goto out_unlock; + } +@@ -2895,6 +2904,13 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, + return err; + } + ++int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, ++ enum attr_idn idn, u8 index, u8 selector, u32 *attr_val) ++{ ++ return __ufshcd_query_attr(hba, opcode, idn, index, selector, attr_val, ++ false); ++} ++ + /** + * ufshcd_query_attr_retry() - API function for sending query + * attribute with retries +@@ -3871,6 +3887,39 @@ static int ufshcd_link_recovery(struct ufs_hba *hba) + return ret; + } + ++static int ufshcd_uma_suspend(struct ufs_hba *hba) ++{ ++ bool flag_res = 0; ++ int ret; ++ int i; ++ ++ ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, ++ QUERY_FLAG_IDN_SUSPEND_UM, NULL); ++ ++ /* Poll for max. 1000 iterations for fUMSuspended flag to set */ ++ for (i = 0; i < 1000 && !ret && !flag_res; i++) ++ ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG, ++ QUERY_FLAG_IDN_UM_SUSPENDED, &flag_res); ++ if (!ret && !flag_res) ++ ret = -ETIMEDOUT; ++ if (ret) ++ dev_err(hba->dev, "UMA suspend failed. ret = %d\n", ret); ++ ++ return ret; ++} ++ ++static int ufshcd_uma_unsuspend(struct ufs_hba *hba) ++{ ++ int ret; ++ ++ ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG, ++ QUERY_FLAG_IDN_SUSPEND_UM, NULL); ++ if (ret) ++ dev_err(hba->dev, "UMA unsuspend failed. ret = %d\n", ret); ++ ++ return ret; ++} ++ + static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba) + { + int ret; +@@ -3904,6 +3953,15 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba) + static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba) + { + int ret = 0, retries; ++ bool uma_suspended = false; ++ ++ /* UMA must be suspended prior to hibernate */ ++ if (hba->uma && ufshcd_is_ufs_dev_active(hba)) { ++ ret = ufshcd_uma_suspend(hba); ++ if (ret) ++ return ret; ++ uma_suspended = true; ++ } + + for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) { + ret = __ufshcd_uic_hibern8_enter(hba); +@@ -3911,6 +3969,9 @@ static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba) + goto out; + } + out: ++ if (ret && ret != -ENOLINK && uma_suspended) ++ ufshcd_uma_unsuspend(hba); ++ + return ret; + } + +@@ -3931,28 +3992,50 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) + dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n", + __func__, ret); + ret = ufshcd_link_recovery(hba); ++ ++ if (!ret && hba->uma && ufshcd_is_ufs_dev_active(hba)) ++ ufshcd_uma_unsuspend(hba); + } else { + ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, + POST_CHANGE); + hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get(); + hba->ufs_stats.hibern8_exit_cnt++; ++ ++ /* UMA is suspended prior to hibernate and unsuspended here */ ++ if (hba->uma && ufshcd_is_ufs_dev_active(hba)) ++ ufshcd_uma_unsuspend(hba); + } + + return ret; + } + +-static void ufshcd_auto_hibern8_enable(struct ufs_hba *hba) ++static void __ufshcd_auto_hibern8_disable(struct ufs_hba *hba, bool disable) + { + unsigned long flags; ++ u32 val; + +- if (!ufshcd_is_auto_hibern8_supported(hba) || !hba->ahit) ++ if (!ufshcd_is_auto_hibern8_supported(hba)) + return; + + spin_lock_irqsave(hba->host->host_lock, flags); +- ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER); ++ hba->ahit_disabled = disable; ++ if (hba->ahit) { ++ val = disable ? 0 : hba->ahit; ++ ufshcd_writel(hba, val, REG_AUTO_HIBERNATE_IDLE_TIMER); ++ } + spin_unlock_irqrestore(hba->host->host_lock, flags); + } + ++static void ufshcd_auto_hibern8_enable(struct ufs_hba *hba) ++{ ++ __ufshcd_auto_hibern8_disable(hba, false); ++} ++ ++static void ufshcd_auto_hibern8_disable(struct ufs_hba *hba) ++{ ++ __ufshcd_auto_hibern8_disable(hba, true); ++} ++ + /** + * ufshcd_init_pwr_info - setting the POR (power on reset) + * values in hba power info +@@ -6499,13 +6582,14 @@ static int ufs_get_device_desc(struct ufs_hba *hba, + size_t buff_len; + u8 model_index; + u8 *desc_buf; ++ __be32 val; + + if (!dev_desc) + return -EINVAL; + + buff_len = max_t(size_t, hba->desc_size.dev_desc, + QUERY_DESC_MAX_SIZE + 1); +- desc_buf = kmalloc(buff_len, GFP_KERNEL); ++ desc_buf = kzalloc(buff_len, GFP_KERNEL); + if (!desc_buf) { + err = -ENOMEM; + goto out; +@@ -6518,6 +6602,11 @@ static int ufs_get_device_desc(struct ufs_hba *hba, + goto out; + } + ++ dev_desc->subclass = desc_buf[DEVICE_DESC_PARAM_DEVICE_SUB_CLASS]; ++ ++ memcpy(&val, desc_buf + DEVICE_DESC_PARAM_MIN_UMA_SZ, sizeof(val)); ++ dev_desc->min_uma_sz = be32_to_cpu(val); ++ + /* + * getting vendor (manufacturerID) and Bank Index in big endian + * format +@@ -6855,6 +6944,129 @@ static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba) + return err; + } + ++static int __ufshcd_uma_reenable(struct ufs_hba *hba, bool init_dev) ++{ ++ u32 max_umpiu_reqs; ++ u32 config; ++ int err; ++ ++ config = ufshcd_readl(hba, REG_UMA_CONFIG); ++ ++ if (!(config & UFSHCI_UMA_ENABLE)) { ++ /* Set UMA address and size for the host controller */ ++ ufshcd_writel(hba, lower_32_bits(hba->uma_addr), ++ REG_UMA_BASE_ADDR_L); ++ ufshcd_writel(hba, upper_32_bits(hba->uma_addr), ++ REG_UMA_BASE_ADDR_H); ++ ufshcd_writel(hba, hba->uma_size, REG_UMA_OFFSET_MAX); ++ } ++ ++ if (init_dev) { ++ u32 retries; ++ ++ /* ++ * Set UMA size for the device. This is valid only after a power ++ * cycle or hardware reset. Here it is assumed that if it has ++ * already been written, then it has the correct value. ++ */ ++ for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { ++ err = __ufshcd_query_attr(hba, ++ UPIU_QUERY_OPCODE_WRITE_ATTR, ++ QUERY_ATTR_IDN_UM_AREA_SIZE, 0, 0, ++ &hba->uma_size, true); ++ if (err == QUERY_RESULT_ALREADY_WRITTEN) { ++ err = 0; ++ break; ++ } ++ } ++ if (err) ++ return err; ++ ++ /* Set max. UMPIU requests */ ++ max_umpiu_reqs = ufshcd_readl(hba, REG_UMA_CAP) & ++ UFSHCI_UMA_MNOOUR; ++ err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, ++ QUERY_ATTR_IDN_MAX_UMPIU_REQS, 0, ++ 0, &max_umpiu_reqs); ++ if (err) ++ return err; ++ } ++ ++ /* Enable UMA for the host controller */ ++ if (!(config & UFSHCI_UMA_ENABLE)) { ++ config |= UFSHCI_UMA_ENABLE; ++ ufshcd_writel(hba, config, REG_UMA_CONFIG); ++ } ++ ++ if (init_dev) { ++ bool flag_res = 1; ++ int i; ++ ++ /* Enable UMA for the device */ ++ err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, ++ QUERY_FLAG_IDN_UNIFIED_MEMORY, NULL); ++ ++ /* Poll for max. 1000 iterations for fUM flag to clear */ ++ for (i = 0; i < 1000 && !err && flag_res; i++) ++ err = ufshcd_query_flag_retry(hba, ++ UPIU_QUERY_OPCODE_READ_FLAG, ++ QUERY_FLAG_IDN_UNIFIED_MEMORY, ++ &flag_res); ++ if (err) ++ return err; ++ ++ if (flag_res) ++ dev_err(hba->dev, "fUM was not cleared by the device\n"); ++ } ++ ++ return 0; ++} ++ ++static void ufshcd_uma_reenable(struct ufs_hba *hba) ++{ ++ if (!hba->uma) ++ return; ++ ++ __ufshcd_uma_reenable(hba, false); ++} ++ ++static int ufshcd_uma_enable(struct ufs_hba *hba, struct ufs_dev_desc *dev_desc) ++{ ++ int err; ++ ++ if (!(hba->capabilities & MASK_DEVICE_BUS_MASTER_MODE_SUPPORT) || ++ !(dev_desc->subclass & UFS_DEVICE_SUB_CLASS_UM_SUPPORT)) ++ return 0; ++ ++ if (!hba->uma_size) { ++ gfp_t flags = GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY; ++ u32 sz = PAGE_ALIGN(dev_desc->min_uma_sz); ++ ++ if (!sz) ++ return 0; ++ ++ hba->uma_base_addr = dmam_alloc_coherent(hba->dev, sz, ++ &hba->uma_addr, flags); ++ if (!hba->uma_base_addr) { ++ dev_err(hba->dev, "Failed to allocate unified memory area\n"); ++ return -ENOMEM; ++ } ++ ++ hba->uma_size = sz; ++ } ++ ++ err = __ufshcd_uma_reenable(hba, true); ++ if (err) ++ return err; ++ ++ hba->uma = true; ++ ++ dev_info(hba->dev, "Enabled %u KiB unified memory area\n", ++ hba->uma_size / 1024); ++ ++ return 0; ++} ++ + /** + * ufshcd_probe_hba - probe hba to detect device and initialize + * @hba: per-adapter instance +@@ -6907,6 +7119,12 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) + + ufshcd_tune_unipro_params(hba); + ++ ret = ufshcd_uma_enable(hba, &card); ++ if (ret) { ++ dev_err(hba->dev, "Failed to enable unified memory area\n"); ++ goto out; ++ } ++ + /* UFS device is also active now */ + ufshcd_set_ufs_dev_active(hba); + ufshcd_force_reset_auto_bkops(hba); +@@ -7537,6 +7755,9 @@ ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp) + * + * Returns 0 if requested power mode is set successfully + * Returns non-zero if failed to set the requested power mode ++ * ++ * Note for UMA, power transitions have IMMED==0 so no polling is needed to ++ * prevent subsequently entering HIBERNATE while there are outstanding UMPIUs. + */ + static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, + enum ufs_dev_pwr_mode pwr_mode) +@@ -7811,6 +8032,13 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) + !ufshcd_is_runtime_pm(pm_op))) { + /* ensure that bkops is disabled */ + ufshcd_disable_auto_bkops(hba); ++ /* ++ * If UMA is enabled, auto-hibernate is not permitted during ++ * power mode transitions. ++ */ ++ if (hba->uma) ++ ufshcd_auto_hibern8_disable(hba); ++ + ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode); + if (ret) + goto enable_gating; +@@ -7865,6 +8093,8 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) + ufshcd_resume_clkscaling(hba); + hba->clk_gating.is_suspended = false; + ufshcd_release(hba); ++ if (hba->uma) ++ ufshcd_auto_hibern8_enable(hba); + out: + hba->pm_op_in_progress = 0; + if (ret) +@@ -7886,6 +8116,7 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) + { + int ret; + enum uic_link_state old_link_state; ++ bool reinit = true; + + hba->pm_op_in_progress = 1; + old_link_state = hba->uic_link_state; +@@ -7928,8 +8159,12 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) + */ + if (ret || !ufshcd_is_link_active(hba)) + goto vendor_suspend; ++ reinit = false; + } + ++ if (reinit) ++ ufshcd_uma_reenable(hba); ++ + if (!ufshcd_is_ufs_dev_active(hba)) { + ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE); + if (ret) +diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h +index c94cfda52829..3b9a3b56c6b9 100644 +--- a/drivers/scsi/ufs/ufshcd.h ++++ b/drivers/scsi/ufs/ufshcd.h +@@ -559,6 +559,13 @@ struct ufs_hba { + + /* Auto-Hibernate Idle Timer register value */ + u32 ahit; ++ bool ahit_disabled; ++ ++ /* Unified memory area */ ++ void *uma_base_addr; ++ dma_addr_t uma_addr; ++ u32 uma_size; ++ bool uma; + + struct ufshcd_lrb *lrb; + unsigned long lrb_in_use; +@@ -911,6 +918,9 @@ int ufshcd_read_desc_param(struct ufs_hba *hba, + u8 param_offset, + u8 *param_read_buf, + u8 param_size); ++int __ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, ++ enum attr_idn idn, u8 index, u8 selector, u32 *attr_val, ++ bool quiet); + int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, + enum attr_idn idn, u8 index, u8 selector, u32 *attr_val); + int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, +diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h +index dbb75cd28dc8..e7e86ba12cb6 100644 +--- a/drivers/scsi/ufs/ufshci.h ++++ b/drivers/scsi/ufs/ufshci.h +@@ -45,6 +45,7 @@ enum { + /* UFSHCI Registers */ + enum { + REG_CONTROLLER_CAPABILITIES = 0x00, ++ REG_UMA_CAP = 0x04, + REG_UFS_VERSION = 0x08, + REG_CONTROLLER_DEV_ID = 0x10, + REG_CONTROLLER_PROD_ID = 0x14, +@@ -76,6 +77,13 @@ enum { + + UFSHCI_REG_SPACE_SIZE = 0xA0, + ++ REG_UMA_BASE_ADDR_L = 0xB0, ++ REG_UMA_BASE_ADDR_H = 0xB4, ++ REG_UMA_OFFSET_MAX = 0xB8, ++ REG_UMA_CONFIG = 0xBC, ++ ++ UFSHCI_UMA_REG_SPACE_SIZE = 0xC0, ++ + REG_UFS_CCAP = 0x100, + REG_UFS_CRYPTOCAP = 0x104, + +@@ -90,6 +98,7 @@ enum { + MASK_64_ADDRESSING_SUPPORT = 0x01000000, + MASK_OUT_OF_ORDER_DATA_DELIVERY_SUPPORT = 0x02000000, + MASK_UIC_DME_TEST_MODE_SUPPORT = 0x04000000, ++ MASK_DEVICE_BUS_MASTER_MODE_SUPPORT = 0x08000000, + }; + + #define UFS_MASK(mask, offset) ((mask) << (offset)) +@@ -244,6 +253,10 @@ enum { + #define COMMAND_OPCODE_MASK 0xFF + #define GEN_SELECTOR_INDEX_MASK 0xFFFF + ++/* UMA - Unified Memory Area */ ++#define UFSHCI_UMA_MNOOUR GENMASK(2, 0) ++#define UFSHCI_UMA_ENABLE 0x1 ++ + #define MIB_ATTRIBUTE_MASK UFS_MASK(0xFFFF, 16) + #define RESET_LEVEL 0xFF + +-- +2.17.1 + diff --git a/patches/0003-trusty-Get-version-string-from-trusty.trusty b/patches/0003-trusty-Get-version-string-from-trusty.trusty new file mode 100644 index 0000000000..b46784b93f --- /dev/null +++ b/patches/0003-trusty-Get-version-string-from-trusty.trusty @@ -0,0 +1,155 @@ +From acecc64cf48fa17e9f088ff571a1ae6cf74268bb Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?= +Date: Thu, 9 Oct 2014 21:24:17 -0700 +Subject: [PATCH 03/63] trusty: Get version string from trusty +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Print trusty version to kernel log on startup, and export it in sysfs. + +Change-Id: Ib8e3d856ed9cf86a71d334f5ab753af1ec8a8bd3 +Signed-off-by: Arve Hjønnevåg +--- + drivers/trusty/trusty.c | 66 +++++++++++++++++++++++++++++++++++ + include/linux/trusty/smcall.h | 1 + + include/linux/trusty/trusty.h | 2 +- + 3 files changed, 68 insertions(+), 1 deletion(-) + +diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c +index 7efcff89610c..16c595bf5e29 100644 +--- a/drivers/trusty/trusty.c ++++ b/drivers/trusty/trusty.c +@@ -19,6 +19,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -27,6 +28,7 @@ + struct trusty_state { + struct mutex smc_lock; + struct atomic_notifier_head notifier; ++ char *version_str; + }; + + #ifdef CONFIG_ARM64 +@@ -209,6 +211,60 @@ static int trusty_remove_child(struct device *dev, void *data) + return 0; + } + ++ssize_t trusty_version_show(struct device *dev, struct device_attribute *attr, ++ char *buf) ++{ ++ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); ++ ++ return scnprintf(buf, PAGE_SIZE, "%s\n", s->version_str); ++} ++ ++DEVICE_ATTR(trusty_version, S_IRUSR, trusty_version_show, NULL); ++ ++const char *trusty_version_str_get(struct device *dev) ++{ ++ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); ++ ++ return s->version_str; ++} ++EXPORT_SYMBOL(trusty_version_str_get); ++ ++static void trusty_init_version(struct trusty_state *s, struct device *dev) ++{ ++ int ret; ++ int i; ++ int version_str_len; ++ ++ ret = trusty_fast_call32(dev, SMC_FC_GET_VERSION_STR, -1, 0, 0); ++ if (ret <= 0) ++ goto err_get_size; ++ ++ version_str_len = ret; ++ ++ s->version_str = kmalloc(version_str_len + 1, GFP_KERNEL); ++ for (i = 0; i < version_str_len; i++) { ++ ret = trusty_fast_call32(dev, SMC_FC_GET_VERSION_STR, i, 0, 0); ++ if (ret < 0) ++ goto err_get_char; ++ s->version_str[i] = ret; ++ } ++ s->version_str[i] = '\0'; ++ ++ dev_info(dev, "trusty version: %s\n", s->version_str); ++ ++ ret = device_create_file(dev, &dev_attr_trusty_version); ++ if (ret) ++ goto err_create_file; ++ return; ++ ++err_create_file: ++err_get_char: ++ kfree(s->version_str); ++ s->version_str = NULL; ++err_get_size: ++ dev_err(dev, "failed to get version: %d\n", ret); ++} ++ + static int trusty_probe(struct platform_device *pdev) + { + int ret; +@@ -229,6 +285,8 @@ static int trusty_probe(struct platform_device *pdev) + ATOMIC_INIT_NOTIFIER_HEAD(&s->notifier); + platform_set_drvdata(pdev, s); + ++ trusty_init_version(s, &pdev->dev); ++ + ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to add children: %d\n", ret); +@@ -238,6 +296,10 @@ static int trusty_probe(struct platform_device *pdev) + return 0; + + err_add_children: ++ if (s->version_str) { ++ device_remove_file(&pdev->dev, &dev_attr_trusty_version); ++ kfree(s->version_str); ++ } + device_for_each_child(&pdev->dev, NULL, trusty_remove_child); + mutex_destroy(&s->smc_lock); + kfree(s); +@@ -251,6 +313,10 @@ static int trusty_remove(struct platform_device *pdev) + + device_for_each_child(&pdev->dev, NULL, trusty_remove_child); + mutex_destroy(&s->smc_lock); ++ if (s->version_str) { ++ device_remove_file(&pdev->dev, &dev_attr_trusty_version); ++ kfree(s->version_str); ++ } + kfree(s); + return 0; + } +diff --git a/include/linux/trusty/smcall.h b/include/linux/trusty/smcall.h +index 278a4b256fbc..4344683f6c61 100644 +--- a/include/linux/trusty/smcall.h ++++ b/include/linux/trusty/smcall.h +@@ -71,5 +71,6 @@ + #define SMC_FC_CPU_RESUME SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 8) + + #define SMC_FC_AARCH_SWITCH SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 9) ++#define SMC_FC_GET_VERSION_STR SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 10) + + #endif /* __LINUX_TRUSTY_SMCALL_H */ +diff --git a/include/linux/trusty/trusty.h b/include/linux/trusty/trusty.h +index ce00c1d46a5e..abb77f1db74d 100644 +--- a/include/linux/trusty/trusty.h ++++ b/include/linux/trusty/trusty.h +@@ -52,5 +52,5 @@ int trusty_call_notifier_register(struct device *dev, + struct notifier_block *n); + int trusty_call_notifier_unregister(struct device *dev, + struct notifier_block *n); +- ++const char *trusty_version_str_get(struct device *dev); + #endif +-- +2.17.1 + diff --git a/patches/0003-usb-host-xhci-Support-running-urb-giveback-in-tas.usb-xhci b/patches/0003-usb-host-xhci-Support-running-urb-giveback-in-tas.usb-xhci new file mode 100644 index 0000000000..d73f4ae1fa --- /dev/null +++ b/patches/0003-usb-host-xhci-Support-running-urb-giveback-in-tas.usb-xhci @@ -0,0 +1,143 @@ +From f520118456d4db73652494a481ea8882ec108f75 Mon Sep 17 00:00:00 2001 +From: Suwan Kim +Date: Mon, 1 Apr 2019 23:16:11 +0900 +Subject: [PATCH 3/4] usb: host: xhci: Support running urb giveback in tasklet + context + +Patch "USB: HCD: support giveback of URB in tasklet context"[1] +introduced giveback of urb in tasklet context. [1] This patch was +applied to ehci but not xhci. [2] This patch significantly reduces +the hard irq time of xhci. Especially for uvc driver, the hard irq +including the uvc completion function runs quite long but applying +this patch reduces the hard irq time of xhci. + +I have tested four SS devices to check if performance degradation +occurs when urb completion functions run in the tasklet context. + +As a result of the test, all devices works well and shows very +similar performance with the upstream kernel. Moreover, usb ethernet +adapter show better performance than the upstream kernel about 5% for +RX and 2% for TX. Four SS devices is as follows. + +SS devices for test + +1. WD My Passport 2TB (external hard drive) +2. Sandisk Ultra Flair USB 3.0 32GB +3. Logitech Brio webcam +4. Iptime 1gigabit ethernet adapter (Mediatek RTL8153) + +Test description + +1. Mass storage (hard drive) performance test +- run below command 10 times and compute the average performance + + dd if=/dev/sdN iflag=direct of=/dev/null bs=1G count=1 + +2. Mass storage (flash memory) performance test +- run below command 10 times and compute the average performance + + dd if=/dev/sdN iflag=direct of=/dev/null bs=1G count=1 + +3. Webcam streaming performance test +- run simple capture program and get the average frame rate per second +- capture 1500 frames +- program link + + https://github.com/asfaca/Webcam-performance-analyzing-tool + +- video resolution : 4096 X 2160 (4K) at 30 or 24 fps +- device (Logitech Brio) spec url for the highest resolution and fps + + https://support.logitech.com/en_gb/product/brio-stream/specs + +4. USB Ethernet adapter performance test +- directly connect two linux machines with ethernet cable +- run pktgen of linux kernel and send 1500 bytes packets +- run vnstat to measure the network bandwidth for 180 seconds + +Test machine + +- CPU : Intel i5-7600 @ 3.5GHz + +Test results + +1. Mass storage (hard drive) performance test + + WD My Passport 2TB (external hard drive) +-------------------------------------------------------------------- + xhci without tasklet | xhci with tasklet +-------------------------------------------------------------------- + 103.667MB/s | 103.692MB/s +-------------------------------------------------------------------- + +2. Mass storage (flash memory) performance test + + Sandisk Ultra Flair USB 3.0 32GB +-------------------------------------------------------------------- + xhci without tasklet | xhci with tasklet +-------------------------------------------------------------------- + 129.727MB/s | 130.2MB/s +-------------------------------------------------------------------- + +3. Webcam streaming performance test + + Logitech Brio webcam +-------------------------------------------------------------------- + xhci without tasklet | xhci with tasklet +-------------------------------------------------------------------- + 26.4451 fps | 26.3949 fps +-------------------------------------------------------------------- + +4. USB Ethernet adapter performance test + + Iptime 1gigabit ethernet adapter (Mediatek RTL8153) +-------------------------------------------------------------------- + xhci without tasklet | xhci with tasklet +-------------------------------------------------------------------- +RX 933.86 Mbit/s | 983.86 Mbit/s +-------------------------------------------------------------------- +TX 830.18 Mbit/s | 882.75 Mbit/s +-------------------------------------------------------------------- + +[1], https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=94dfd7edfd5c9b605caf7b562de7a813d216e011 +[2], https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=428aac8a81058e2303677a8fbf26670229e51d3a + +Signed-off-by: Suwan Kim +Signed-off-by: Mathias Nyman +--- + drivers/usb/host/xhci-ring.c | 2 -- + drivers/usb/host/xhci.c | 3 ++- + 2 files changed, 2 insertions(+), 3 deletions(-) + +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c +index 85ceb43e3405..019de58e2d51 100644 +--- a/drivers/usb/host/xhci-ring.c ++++ b/drivers/usb/host/xhci-ring.c +@@ -651,10 +651,8 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci, + } + xhci_urb_free_priv(urb_priv); + usb_hcd_unlink_urb_from_ep(hcd, urb); +- spin_unlock(&xhci->lock); + trace_xhci_urb_giveback(urb); + usb_hcd_giveback_urb(hcd, urb, status); +- spin_lock(&xhci->lock); + } + + static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c +index 517ec3206f6e..6c10dc0fd7dc 100644 +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -5266,7 +5266,8 @@ static const struct hc_driver xhci_hc_driver = { + * generic hardware linkage + */ + .irq = xhci_irq, +- .flags = HCD_MEMORY | HCD_DMA | HCD_USB3 | HCD_SHARED, ++ .flags = HCD_MEMORY | HCD_DMA | HCD_USB3 | HCD_SHARED | ++ HCD_BH, + + /* + * basic lifecycle operations +-- +2.17.1 + diff --git a/patches/0004-ASoC-Intel-Skylake-Skip-adding-NULL-UUID-from-FW-Man.audio b/patches/0004-ASoC-Intel-Skylake-Skip-adding-NULL-UUID-from-FW-Man.audio new file mode 100644 index 0000000000..86a0dc1c93 --- /dev/null +++ b/patches/0004-ASoC-Intel-Skylake-Skip-adding-NULL-UUID-from-FW-Man.audio @@ -0,0 +1,52 @@ +From 9c6d18ae40d185d7999805e84e019fcd24085ffe Mon Sep 17 00:00:00 2001 +From: Szymon Mielczarek +Date: Mon, 21 Jan 2019 15:29:42 +0100 +Subject: [PATCH 004/193] ASoC: Intel: Skylake: Skip adding NULL UUID from FW + Manifest + +Intel libraries contain a special purpose module with zero UUID. This +module should be loaded to memory but is not used by the driver. +Moreover, when more than one external library is being loaded then we +will have modules with the same UUID in the modules list what leads +to duplicated filenames when creating sysfs directories. + +Change-Id: Ib63ebb7b7b9f71fd040b8ffc9c1313af8d1f772e +Signed-off-by: Szymon Mielczarek +--- + sound/soc/intel/skylake/skl-sst-utils.c | 7 ++++++- + 1 file changed, 6 insertions(+), 1 deletion(-) + +diff --git a/sound/soc/intel/skylake/skl-sst-utils.c b/sound/soc/intel/skylake/skl-sst-utils.c +index d0f750eb1f9c..24004b754e0e 100644 +--- a/sound/soc/intel/skylake/skl-sst-utils.c ++++ b/sound/soc/intel/skylake/skl-sst-utils.c +@@ -230,6 +230,7 @@ int snd_skl_parse_manifest(struct sst_dsp *ctx, const struct firmware *fw, + struct adsp_fw_hdr *adsp_hdr; + struct adsp_module_entry *mod_entry; + int i, num_entry, size; ++ guid_t *uuid_bin; + const char *buf; + struct skl_dev *skl = ctx->thread_context; + struct uuid_module *module; +@@ -290,13 +291,17 @@ int snd_skl_parse_manifest(struct sst_dsp *ctx, const struct firmware *fw, + */ + + for (i = 0; i < num_entry; i++, mod_entry++) { ++ uuid_bin = (guid_t *)mod_entry->uuid; ++ if (guid_is_null(uuid_bin)) ++ continue; ++ + module = devm_kzalloc(ctx->dev, sizeof(*module), GFP_KERNEL); + if (!module) { + list_del_init(&skl->module_list); + return -ENOMEM; + } + +- guid_copy(&module->uuid, (guid_t *)&mod_entry->uuid); ++ guid_copy(&module->uuid, uuid_bin); + + module->id = (i | (index << 12)); + module->is_loadable = mod_entry->type.load_type; +-- +2.17.1 + diff --git a/patches/0004-EDAC-igen6-Add-registration-APIs-for-In-Band-ECC-erro.edac b/patches/0004-EDAC-igen6-Add-registration-APIs-for-In-Band-ECC-erro.edac new file mode 100644 index 0000000000..f49cdc5bae --- /dev/null +++ b/patches/0004-EDAC-igen6-Add-registration-APIs-for-In-Band-ECC-erro.edac @@ -0,0 +1,105 @@ +From e703c6c0ed7e109256b49ce25db05ce3e8c8d893 Mon Sep 17 00:00:00 2001 +From: Qiuxu Zhuo +Date: Fri, 10 May 2019 23:04:28 +0800 +Subject: [PATCH 4/5] EDAC, igen6: Add registration APIs for In-Band ECC error + notification + +There are some external modules which want to be notified about the +In-Band ECC error for specific error handling. Because the In-Band ECC +error event is captured by the igen6_edac driver via NMI, so add the +registration APIs for the In-Band ECC error event that those modules +can be notified for further error handling. + +Signed-off-by: Qiuxu Zhuo +--- + drivers/edac/igen6_edac.c | 23 +++++++++++++++++++++++ + drivers/edac/igen6_edac.h | 22 ++++++++++++++++++++++ + 2 files changed, 45 insertions(+) + create mode 100644 drivers/edac/igen6_edac.h + +diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c +index 405f3dad73aa..ae8c1afe672c 100644 +--- a/drivers/edac/igen6_edac.c ++++ b/drivers/edac/igen6_edac.c +@@ -30,6 +30,7 @@ + + #include "edac_mc.h" + #include "edac_module.h" ++#include "igen6_edac.h" + + #define IGEN6_REVISION "v1.1.5" + +@@ -193,6 +194,20 @@ static const struct pci_device_id igen6_pci_tbl[] = { + }; + MODULE_DEVICE_TABLE(pci, igen6_pci_tbl); + ++static BLOCKING_NOTIFIER_HEAD(ibecc_err_handler_chain); ++ ++int ibecc_err_register_notifer(struct notifier_block *nb) ++{ ++ return blocking_notifier_chain_register(&ibecc_err_handler_chain, nb); ++} ++EXPORT_SYMBOL_GPL(ibecc_err_register_notifer); ++ ++int ibecc_err_unregister_notifer(struct notifier_block *nb) ++{ ++ return blocking_notifier_chain_unregister(&ibecc_err_handler_chain, nb); ++} ++EXPORT_SYMBOL_GPL(ibecc_err_unregister_notifer); ++ + static enum dev_type get_width(int dimm_l, u32 mad_dimm) + { + u32 w = dimm_l ? IGEN6_DIMM_CH_DLW(mad_dimm) : +@@ -323,6 +338,7 @@ static void igen6_output_error(struct decoded_addr *res, u64 ecclog) + enum hw_event_mc_err_type type = ecclog & IGEN6_ECCERRLOG_UE ? + HW_EVENT_ERR_UNCORRECTED : + HW_EVENT_ERR_CORRECTED; ++ struct ibecc_err_info e; + + edac_mc_handle_error(type, igen6_pvt->mci, 1, + res->sys_addr >> PAGE_SHIFT, +@@ -330,6 +346,13 @@ static void igen6_output_error(struct decoded_addr *res, u64 ecclog) + IGEN6_ECCERRLOG_SYND(ecclog), + res->chan, res->sub_chan, + -1, "", ""); ++ ++ /* Notify other handlers for further IBECC error handling */ ++ memset(&e, 0, sizeof(e)); ++ e.type = type; ++ e.sys_addr = res->sys_addr; ++ e.ecc_log = ecclog; ++ blocking_notifier_call_chain(&ibecc_err_handler_chain, 0, &e); + } + + static struct gen_pool *ecclog_gen_pool_create(void) +diff --git a/drivers/edac/igen6_edac.h b/drivers/edac/igen6_edac.h +new file mode 100644 +index 000000000000..9175b0ef433c +--- /dev/null ++++ b/drivers/edac/igen6_edac.h +@@ -0,0 +1,22 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Registration for IBECC error notification ++ * Copyright (C) 2019 Intel Corporation ++ */ ++ ++#ifndef _IGEN6_EDAC_H ++#define _IGEN6_EDAC_H ++ ++#include ++#include ++ ++struct ibecc_err_info { ++ enum hw_event_mc_err_type type; ++ u64 sys_addr; ++ u64 ecc_log; ++}; ++ ++int ibecc_err_register_notifer(struct notifier_block *nb); ++int ibecc_err_unregister_notifer(struct notifier_block *nb); ++ ++#endif /* _IGEN6_EDAC_H */ +-- +2.17.1 + diff --git a/patches/0004-HACK-scsi-ufs-Add-module-parameters-max_gear-dflt.security b/patches/0004-HACK-scsi-ufs-Add-module-parameters-max_gear-dflt.security new file mode 100644 index 0000000000..a74d5e8da6 --- /dev/null +++ b/patches/0004-HACK-scsi-ufs-Add-module-parameters-max_gear-dflt.security @@ -0,0 +1,76 @@ +From aff345f26ac2fa896b217e5aa85ba8ff4176e9e4 Mon Sep 17 00:00:00 2001 +From: Adrian Hunter +Date: Tue, 22 May 2018 10:44:51 +0300 +Subject: [PATCH 04/65] HACK: scsi: ufs: Add module parameters max_gear, + dflt_hs_rate and dflt_hs_mode + +Signed-off-by: Adrian Hunter +--- + drivers/scsi/ufs/ufshcd.c | 30 +++++++++++++++++++++++++++--- + 1 file changed, 27 insertions(+), 3 deletions(-) + +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c +index 7789b6f9ad1b..35767da207de 100644 +--- a/drivers/scsi/ufs/ufshcd.c ++++ b/drivers/scsi/ufs/ufshcd.c +@@ -237,6 +237,10 @@ static struct ufs_dev_fix ufs_fixups[] = { + END_FIX + }; + ++static int max_gear; ++static int dflt_hs_rate; ++static int dflt_hs_mode; ++ + static void ufshcd_tmc_handler(struct ufs_hba *hba); + static void ufshcd_async_scan(void *data, async_cookie_t cookie); + static int ufshcd_reset_and_restore(struct ufs_hba *hba); +@@ -4063,9 +4067,15 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba) + if (hba->max_pwr_info.is_valid) + return 0; + +- pwr_info->pwr_tx = FAST_MODE; +- pwr_info->pwr_rx = FAST_MODE; +- pwr_info->hs_rate = PA_HS_MODE_B; ++ if (dflt_hs_mode != FAST_MODE && dflt_hs_mode != FASTAUTO_MODE) ++ dflt_hs_mode = FAST_MODE; ++ ++ if (dflt_hs_rate != PA_HS_MODE_A && dflt_hs_rate != PA_HS_MODE_B) ++ dflt_hs_rate = PA_HS_MODE_B; ++ ++ pwr_info->pwr_tx = dflt_hs_mode; ++ pwr_info->pwr_rx = dflt_hs_mode; ++ pwr_info->hs_rate = dflt_hs_rate; + + /* Get the connected lane count */ + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), +@@ -4111,6 +4121,12 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba) + pwr_info->pwr_tx = SLOW_MODE; + } + ++ if (max_gear > 0 && ++ (pwr_info->gear_rx > max_gear || pwr_info->gear_tx > max_gear)) { ++ pwr_info->gear_rx = max_gear; ++ pwr_info->gear_tx = max_gear; ++ } ++ + hba->max_pwr_info.is_valid = true; + return 0; + } +@@ -8669,6 +8685,14 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) + } + EXPORT_SYMBOL_GPL(ufshcd_init); + ++module_param(max_gear, int, 0444); ++module_param(dflt_hs_rate, int, 0444); ++module_param(dflt_hs_mode, int, 0444); ++ ++MODULE_PARM_DESC(, "Maximum gear: 1, 2 , 3 ..."); ++MODULE_PARM_DESC(, "Default high speed rate series : 1 (= rate A), 2 (= rate B)"); ++MODULE_PARM_DESC(, "Default high speed power mode: 1 (= FAST), 4 (= FASTAUTO)"); ++ + MODULE_AUTHOR("Santosh Yaragnavi "); + MODULE_AUTHOR("Vinayak Holikatti "); + MODULE_DESCRIPTION("Generic UFS host controller driver Core"); +-- +2.17.1 + diff --git a/patches/0004-VHM-add-guest-memory-remote-mapping-support.acrn b/patches/0004-VHM-add-guest-memory-remote-mapping-support.acrn new file mode 100644 index 0000000000..9a28bc2cd0 --- /dev/null +++ b/patches/0004-VHM-add-guest-memory-remote-mapping-support.acrn @@ -0,0 +1,127 @@ +From d716141ab7ce42e0b3c483cf1de83cc9404575c9 Mon Sep 17 00:00:00 2001 +From: Jason Zeng +Date: Fri, 31 Aug 2018 10:58:55 +0800 +Subject: [PATCH 004/150] VHM: add guest memory remote mapping support + +There is use case which needs do data operation based on guest physical +address. This patch added such support to do remote mapping for guest +physical memory. + +Change-Id: I37755ddcf742129d272f535e99a070965e01c01e +Tracked-On: 218445 +Signed-off-by: Jason Zeng +Signed-off-by: liang ding +Signed-off-by: Jason Chen CJ +Signed-off-by: Min He +Reviewed-on: +Reviewed-by: Chi, Mingqiang +Reviewed-by: Dong, Eddie +Tested-by: Dong, Eddie +--- + drivers/vhm/vhm_mm.c | 79 +++++++++++++++++++++++++++++++++ + include/linux/vhm/acrn_vhm_mm.h | 2 + + 2 files changed, 81 insertions(+) + +diff --git a/drivers/vhm/vhm_mm.c b/drivers/vhm/vhm_mm.c +index 9dd0b9414d3a..ea7604b19aaf 100644 +--- a/drivers/vhm/vhm_mm.c ++++ b/drivers/vhm/vhm_mm.c +@@ -362,3 +362,82 @@ int vhm_dev_mmap(struct file *file, struct vm_area_struct *vma) + mutex_unlock(&vm->seg_lock); + return -EINVAL; + } ++ ++static void *do_map_guest_phys(struct vhm_vm *vm, u64 guest_phys, size_t size) ++{ ++ struct guest_memseg *seg; ++ ++ mutex_lock(&vm->seg_lock); ++ list_for_each_entry(seg, &vm->memseg_list, list) { ++ if (seg->segid != VM_SYSMEM) ++ continue; ++ ++ if (seg->gpa > guest_phys || ++ guest_phys >= seg->gpa + seg->len) ++ continue; ++ ++ if (guest_phys + size > seg->gpa + seg->len) { ++ mutex_unlock(&vm->seg_lock); ++ return NULL; ++ } ++ ++ mutex_unlock(&vm->seg_lock); ++ return phys_to_virt(seg->base + guest_phys - seg->gpa); ++ } ++ mutex_unlock(&vm->seg_lock); ++ return NULL; ++} ++ ++void *map_guest_phys(unsigned long vmid, u64 guest_phys, size_t size) ++{ ++ struct vhm_vm *vm; ++ void *ret; ++ ++ vm = find_get_vm(vmid); ++ if (vm == NULL) ++ return NULL; ++ ++ ret = do_map_guest_phys(vm, guest_phys, size); ++ ++ put_vm(vm); ++ ++ return ret; ++} ++EXPORT_SYMBOL(map_guest_phys); ++ ++static int do_unmap_guest_phys(struct vhm_vm *vm, u64 guest_phys) ++{ ++ struct guest_memseg *seg; ++ ++ mutex_lock(&vm->seg_lock); ++ list_for_each_entry(seg, &vm->memseg_list, list) { ++ if (seg->segid != VM_SYSMEM) ++ continue; ++ ++ if (seg->gpa <= guest_phys && ++ guest_phys < seg->gpa + seg->len) { ++ mutex_unlock(&vm->seg_lock); ++ return 0; ++ } ++ } ++ mutex_unlock(&vm->seg_lock); ++ ++ return -ESRCH; ++} ++ ++int unmap_guest_phys(unsigned long vmid, u64 guest_phys) ++{ ++ struct vhm_vm *vm; ++ int ret; ++ ++ vm = find_get_vm(vmid); ++ if (vm == NULL) { ++ pr_warn("vm_list corrupted\n"); ++ return -ESRCH; ++ } ++ ++ ret = do_unmap_guest_phys(vm, guest_phys); ++ put_vm(vm); ++ return ret; ++} ++EXPORT_SYMBOL(unmap_guest_phys); +diff --git a/include/linux/vhm/acrn_vhm_mm.h b/include/linux/vhm/acrn_vhm_mm.h +index 325f2b2026e8..e701254bc249 100644 +--- a/include/linux/vhm/acrn_vhm_mm.h ++++ b/include/linux/vhm/acrn_vhm_mm.h +@@ -70,6 +70,8 @@ + #define MMU_MEM_ATTR_ALL_WB 0x00000047 + #define MMU_MEM_ATTR_ALL_WC 0x00000207 + ++void *map_guest_phys(unsigned long vmid, u64 uos_phys, size_t size); ++int unmap_guest_phys(unsigned long vmid, u64 uos_phys); + int set_mmio_map(unsigned long vmid, unsigned long guest_gpa, + unsigned long host_gpa, unsigned long len, int prot); + int unset_mmio_map(unsigned long vmid, unsigned long guest_gpa, +-- +2.17.1 + diff --git a/patches/0004-cpufreq-intel_pstate-Add-Elkhart-lake-support-to-.core-ehl b/patches/0004-cpufreq-intel_pstate-Add-Elkhart-lake-support-to-.core-ehl new file mode 100644 index 0000000000..803edd7d35 --- /dev/null +++ b/patches/0004-cpufreq-intel_pstate-Add-Elkhart-lake-support-to-.core-ehl @@ -0,0 +1,30 @@ +From 2db31317fd86cad028b7f05bbde8f2246e662f3e Mon Sep 17 00:00:00 2001 +From: Gayatri Kammela +Date: Tue, 6 Aug 2019 21:10:40 -0700 +Subject: [PATCH 04/12] cpufreq: intel_pstate: Add Elkhart lake support to + intel_pstate + +Add Atom based EHL support to intel_pstate. + +Cc: David E Box +Cc: Rajneesh Bharadwaj +Signed-off-by: Gayatri Kammela +--- + drivers/cpufreq/intel_pstate.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c +index 9f02de9a1b47..2c9fead4ca85 100644 +--- a/drivers/cpufreq/intel_pstate.c ++++ b/drivers/cpufreq/intel_pstate.c +@@ -1935,6 +1935,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = { + ICPU(INTEL_FAM6_XEON_PHI_KNM, knl_funcs), + ICPU(INTEL_FAM6_ATOM_GOLDMONT, core_funcs), + ICPU(INTEL_FAM6_ATOM_GOLDMONT_PLUS, core_funcs), ++ ICPU(INTEL_FAM6_ATOM_TREMONT, core_funcs), + ICPU(INTEL_FAM6_SKYLAKE_X, core_funcs), + {} + }; +-- +2.17.1 + diff --git a/patches/0004-drm-dp-dsc-Add-Support-for-all-BPCs-supported-by-TGL.drm b/patches/0004-drm-dp-dsc-Add-Support-for-all-BPCs-supported-by-TGL.drm new file mode 100644 index 0000000000..0bb1044abd --- /dev/null +++ b/patches/0004-drm-dp-dsc-Add-Support-for-all-BPCs-supported-by-TGL.drm @@ -0,0 +1,67 @@ +From 9a09766d3a5a0d8eac6ae856672714b892277778 Mon Sep 17 00:00:00 2001 +From: Anusha Srivatsa +Date: Tue, 20 Aug 2019 15:30:59 -0700 +Subject: [PATCH 004/690] drm/dp/dsc: Add Support for all BPCs supported by TGL +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +DSC engine on ICL supports only 8 and 10 BPC as the input +BPC. But DSC engine in TGL supports 8, 10 and 12 BPC. +Add 12 BPC support for DSC while calculating compression +configuration. + +v2: Remove the separate define TGL_DP_DSC_MAX_SUPPORTED_BPC +and use the value directly.(More such defines can be removed +as part of future patches). (Ville) + +v3: Use values directly instead of accessing the defines +everytime for min and max DSC BPC. + +Cc: Ville Syrjälä +Cc: Manasi Navare +Signed-off-by: Anusha Srivatsa +Reviewed-by: Manasi Navare +Signed-off-by: Manasi Navare +Link: https://patchwork.freedesktop.org/patch/msgid/20190820223059.18052-1-anusha.srivatsa@intel.com +--- + drivers/gpu/drm/i915/display/intel_dp.c | 14 +++++++++----- + 1 file changed, 9 insertions(+), 5 deletions(-) + +diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c +index 57e9f0ba331b..842d8b016638 100644 +--- a/drivers/gpu/drm/i915/display/intel_dp.c ++++ b/drivers/gpu/drm/i915/display/intel_dp.c +@@ -70,8 +70,6 @@ + + /* DP DSC small joiner has 2 FIFOs each of 640 x 6 bytes */ + #define DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER 61440 +-#define DP_DSC_MIN_SUPPORTED_BPC 8 +-#define DP_DSC_MAX_SUPPORTED_BPC 10 + + /* DP DSC throughput values used for slice count calculations KPixels/s */ + #define DP_DSC_PEAK_PIXEL_RATE 2720000 +@@ -2006,11 +2004,17 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, + if (!intel_dp_supports_dsc(intel_dp, pipe_config)) + return -EINVAL; + +- dsc_max_bpc = min_t(u8, DP_DSC_MAX_SUPPORTED_BPC, +- conn_state->max_requested_bpc); ++ /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */ ++ if (INTEL_GEN(dev_priv) >= 12) ++ dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc); ++ else ++ dsc_max_bpc = min_t(u8, 10, ++ conn_state->max_requested_bpc); + + pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc); +- if (pipe_bpp < DP_DSC_MIN_SUPPORTED_BPC * 3) { ++ ++ /* Min Input BPC for ICL+ is 8 */ ++ if (pipe_bpp < 8 * 3) { + DRM_DEBUG_KMS("No DSC support for less than 8bpc\n"); + return -EINVAL; + } +-- +2.17.1 + diff --git a/patches/0004-net-stmmac-fix-missing-netdev-features-in-stm.connectivity b/patches/0004-net-stmmac-fix-missing-netdev-features-in-stm.connectivity new file mode 100644 index 0000000000..762e20dede --- /dev/null +++ b/patches/0004-net-stmmac-fix-missing-netdev-features-in-stm.connectivity @@ -0,0 +1,29 @@ +From c8772d44369c6a6d5b2253a1392414ed331d40d3 Mon Sep 17 00:00:00 2001 +From: Ong Boon Leong +Date: Sun, 28 Jul 2019 14:46:49 +0800 +Subject: [PATCH 004/108] net: stmmac: fix missing netdev->features in + stmmac_set_features + +Fixes: d2afb5bdffdei stmmac: fix the rx csum feature + +Signed-off-by: Ong Boon Leong +--- + drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index 23c9570bbe59..ff93b270ef47 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -3814,6 +3814,8 @@ static int stmmac_set_features(struct net_device *netdev, + for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++) + stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); + ++ netdev->features = features; ++ + return 0; + } + +-- +2.17.1 + diff --git a/patches/0004-pinctrl-intel-Add-Intel-Tiger-Lake-pin-controller-sup.lpss b/patches/0004-pinctrl-intel-Add-Intel-Tiger-Lake-pin-controller-sup.lpss new file mode 100644 index 0000000000..50cecf0bfb --- /dev/null +++ b/patches/0004-pinctrl-intel-Add-Intel-Tiger-Lake-pin-controller-sup.lpss @@ -0,0 +1,955 @@ +From 83d7d7c9db778fab7e6bfcaaad31862d338346c6 Mon Sep 17 00:00:00 2001 +From: Andy Shevchenko +Date: Thu, 15 Feb 2018 18:10:14 +0300 +Subject: [PATCH 04/40] pinctrl: intel: Add Intel Tiger Lake pin controller + support + +This driver adds pinctrl/GPIO support for Intel Tiger Lake SoC. The +GPIO controller is based on the next generation GPIO hardware but still +compatible with the one supported by the Intel core pinctrl/GPIO driver. + +Signed-off-by: Andy Shevchenko +Signed-off-by: Mika Westerberg +--- + drivers/pinctrl/intel/Kconfig | 7 + + drivers/pinctrl/intel/Makefile | 1 + + drivers/pinctrl/intel/pinctrl-tigerlake.c | 902 ++++++++++++++++++++++ + 3 files changed, 910 insertions(+) + create mode 100644 drivers/pinctrl/intel/pinctrl-tigerlake.c + +diff --git a/drivers/pinctrl/intel/Kconfig b/drivers/pinctrl/intel/Kconfig +index e20d83af0b4c..c091eb2f0aeb 100644 +--- a/drivers/pinctrl/intel/Kconfig ++++ b/drivers/pinctrl/intel/Kconfig +@@ -123,4 +123,11 @@ config PINCTRL_SUNRISEPOINT + provides an interface that allows configuring of PCH pins and + using them as GPIOs. + ++config PINCTRL_TIGERLAKE ++ tristate "Intel Tiger Lake pinctrl and GPIO driver" ++ depends on ACPI ++ select PINCTRL_INTEL ++ help ++ This pinctrl driver provides an interface that allows configuring ++ of Intel Tiger Lake PCH pins and using them as GPIOs. + endif +diff --git a/drivers/pinctrl/intel/Makefile b/drivers/pinctrl/intel/Makefile +index ec353993d545..5e92aba018ac 100644 +--- a/drivers/pinctrl/intel/Makefile ++++ b/drivers/pinctrl/intel/Makefile +@@ -14,3 +14,4 @@ obj-$(CONFIG_PINCTRL_GEMINILAKE) += pinctrl-geminilake.o + obj-$(CONFIG_PINCTRL_ICELAKE) += pinctrl-icelake.o + obj-$(CONFIG_PINCTRL_LEWISBURG) += pinctrl-lewisburg.o + obj-$(CONFIG_PINCTRL_SUNRISEPOINT) += pinctrl-sunrisepoint.o ++obj-$(CONFIG_PINCTRL_TIGERLAKE) += pinctrl-tigerlake.o +diff --git a/drivers/pinctrl/intel/pinctrl-tigerlake.c b/drivers/pinctrl/intel/pinctrl-tigerlake.c +new file mode 100644 +index 000000000000..db92db288636 +--- /dev/null ++++ b/drivers/pinctrl/intel/pinctrl-tigerlake.c +@@ -0,0 +1,902 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Intel Tiger Lake PCH pinctrl/GPIO driver ++ * ++ * Copyright (C) 2018, Intel Corporation ++ * Authors: Andy Shevchenko ++ * Mika Westerberg ++ */ ++ ++#include ++#include ++#include ++ ++#include ++ ++#include "pinctrl-intel.h" ++ ++#define TGL_PAD_OWN 0x020 ++#define TGL_PADCFGLOCK 0x080 ++#define TGL_HOSTSW_OWN 0x0b0 ++#define TGL_GPI_IS 0x100 ++#define TGL_GPI_IE 0x120 ++ ++#define TGL_GPP(r, s, e) \ ++ { \ ++ .reg_num = (r), \ ++ .base = (s), \ ++ .size = ((e) - (s) + 1), \ ++ } ++ ++#define TGL_COMMUNITY(b, s, e, g) \ ++ { \ ++ .barno = (b), \ ++ .padown_offset = TGL_PAD_OWN, \ ++ .padcfglock_offset = TGL_PADCFGLOCK, \ ++ .hostown_offset = TGL_HOSTSW_OWN, \ ++ .is_offset = TGL_GPI_IS, \ ++ .ie_offset = TGL_GPI_IE, \ ++ .pin_base = (s), \ ++ .npins = ((e) - (s) + 1), \ ++ .gpps = (g), \ ++ .ngpps = ARRAY_SIZE(g), \ ++ } ++ ++/* Tiger Lake-LP */ ++static const struct pinctrl_pin_desc tgllp_pins[] = { ++ /* GPP_B */ ++ PINCTRL_PIN(0, "CORE_VID_0"), ++ PINCTRL_PIN(1, "CORE_VID_1"), ++ PINCTRL_PIN(2, "VRALERTB"), ++ PINCTRL_PIN(3, "CPU_GP_2"), ++ PINCTRL_PIN(4, "CPU_GP_3"), ++ PINCTRL_PIN(5, "ISH_I2C0_SDA"), ++ PINCTRL_PIN(6, "ISH_I2C0_SCL"), ++ PINCTRL_PIN(7, "ISH_I2C1_SDA"), ++ PINCTRL_PIN(8, "ISH_I2C1_SCL"), ++ PINCTRL_PIN(9, "I2C5_SDA"), ++ PINCTRL_PIN(10, "I2C5_SCL"), ++ PINCTRL_PIN(11, "PMCALERTB"), ++ PINCTRL_PIN(12, "SLP_S0B"), ++ PINCTRL_PIN(13, "PLTRSTB"), ++ PINCTRL_PIN(14, "SPKR"), ++ PINCTRL_PIN(15, "GSPI0_CS0B"), ++ PINCTRL_PIN(16, "GSPI0_CLK"), ++ PINCTRL_PIN(17, "GSPI0_MISO"), ++ PINCTRL_PIN(18, "GSPI0_MOSI"), ++ PINCTRL_PIN(19, "GSPI1_CS0B"), ++ PINCTRL_PIN(20, "GSPI1_CLK"), ++ PINCTRL_PIN(21, "GSPI1_MISO"), ++ PINCTRL_PIN(22, "GSPI1_MOSI"), ++ PINCTRL_PIN(23, "SML1ALERTB"), ++ PINCTRL_PIN(24, "GSPI0_CLK_LOOPBK"), ++ PINCTRL_PIN(25, "GSPI1_CLK_LOOPBK"), ++ /* GPP_T */ ++ PINCTRL_PIN(26, "I2C6_SDA"), ++ PINCTRL_PIN(27, "I2C6_SCL"), ++ PINCTRL_PIN(28, "I2C7_SDA"), ++ PINCTRL_PIN(29, "I2C7_SCL"), ++ PINCTRL_PIN(30, "UART4_RXD"), ++ PINCTRL_PIN(31, "UART4_TXD"), ++ PINCTRL_PIN(32, "UART4_RTSB"), ++ PINCTRL_PIN(33, "UART4_CTSB"), ++ PINCTRL_PIN(34, "UART5_RXD"), ++ PINCTRL_PIN(35, "UART5_TXD"), ++ PINCTRL_PIN(36, "UART5_RTSB"), ++ PINCTRL_PIN(37, "UART5_CTSB"), ++ PINCTRL_PIN(38, "UART6_RXD"), ++ PINCTRL_PIN(39, "UART6_TXD"), ++ PINCTRL_PIN(40, "UART6_RTSB"), ++ PINCTRL_PIN(41, "UART6_CTSB"), ++ /* GPP_A */ ++ PINCTRL_PIN(42, "ESPI_IO_0"), ++ PINCTRL_PIN(43, "ESPI_IO_1"), ++ PINCTRL_PIN(44, "ESPI_IO_2"), ++ PINCTRL_PIN(45, "ESPI_IO_3"), ++ PINCTRL_PIN(46, "ESPI_CSB"), ++ PINCTRL_PIN(47, "ESPI_CLK"), ++ PINCTRL_PIN(48, "ESPI_RESETB"), ++ PINCTRL_PIN(49, "I2S2_SCLK"), ++ PINCTRL_PIN(50, "I2S2_SFRM"), ++ PINCTRL_PIN(51, "I2S2_TXD"), ++ PINCTRL_PIN(52, "I2S2_RXD"), ++ PINCTRL_PIN(53, "PMC_I2C_SDA"), ++ PINCTRL_PIN(54, "SATAXPCIE_1"), ++ PINCTRL_PIN(55, "PMC_I2C_SCL"), ++ PINCTRL_PIN(56, "USB2_OCB_1"), ++ PINCTRL_PIN(57, "USB2_OCB_2"), ++ PINCTRL_PIN(58, "USB2_OCB_3"), ++ PINCTRL_PIN(59, "DDSP_HPD_C"), ++ PINCTRL_PIN(60, "DDSP_HPD_B"), ++ PINCTRL_PIN(61, "DDSP_HPD_1"), ++ PINCTRL_PIN(62, "DDSP_HPD_2"), ++ PINCTRL_PIN(63, "GPPC_A_21"), ++ PINCTRL_PIN(64, "GPPC_A_22"), ++ PINCTRL_PIN(65, "I2S1_SCLK"), ++ PINCTRL_PIN(66, "ESPI_CLK_LOOPBK"), ++ /* GPP_S */ ++ PINCTRL_PIN(67, "SNDW0_CLK"), ++ PINCTRL_PIN(68, "SNDW0_DATA"), ++ PINCTRL_PIN(69, "SNDW1_CLK"), ++ PINCTRL_PIN(70, "SNDW1_DATA"), ++ PINCTRL_PIN(71, "SNDW2_CLK"), ++ PINCTRL_PIN(72, "SNDW2_DATA"), ++ PINCTRL_PIN(73, "SNDW3_CLK"), ++ PINCTRL_PIN(74, "SNDW3_DATA"), ++ /* GPP_H */ ++ PINCTRL_PIN(75, "GPPC_H_0"), ++ PINCTRL_PIN(76, "GPPC_H_1"), ++ PINCTRL_PIN(77, "GPPC_H_2"), ++ PINCTRL_PIN(78, "SX_EXIT_HOLDOFFB"), ++ PINCTRL_PIN(79, "I2C2_SDA"), ++ PINCTRL_PIN(80, "I2C2_SCL"), ++ PINCTRL_PIN(81, "I2C3_SDA"), ++ PINCTRL_PIN(82, "I2C3_SCL"), ++ PINCTRL_PIN(83, "I2C4_SDA"), ++ PINCTRL_PIN(84, "I2C4_SCL"), ++ PINCTRL_PIN(85, "SRCCLKREQB_4"), ++ PINCTRL_PIN(86, "SRCCLKREQB_5"), ++ PINCTRL_PIN(87, "M2_SKT2_CFG_0"), ++ PINCTRL_PIN(88, "M2_SKT2_CFG_1"), ++ PINCTRL_PIN(89, "M2_SKT2_CFG_2"), ++ PINCTRL_PIN(90, "M2_SKT2_CFG_3"), ++ PINCTRL_PIN(91, "DDPB_CTRLCLK"), ++ PINCTRL_PIN(92, "DDPB_CTRLDATA"), ++ PINCTRL_PIN(93, "CPU_C10_GATEB"), ++ PINCTRL_PIN(94, "TIME_SYNC_0"), ++ PINCTRL_PIN(95, "IMGCLKOUT_1"), ++ PINCTRL_PIN(96, "IMGCLKOUT_2"), ++ PINCTRL_PIN(97, "IMGCLKOUT_3"), ++ PINCTRL_PIN(98, "IMGCLKOUT_4"), ++ /* GPP_D */ ++ PINCTRL_PIN(99, "ISH_GP_0"), ++ PINCTRL_PIN(100, "ISH_GP_1"), ++ PINCTRL_PIN(101, "ISH_GP_2"), ++ PINCTRL_PIN(102, "ISH_GP_3"), ++ PINCTRL_PIN(103, "IMGCLKOUT_0"), ++ PINCTRL_PIN(104, "SRCCLKREQB_0"), ++ PINCTRL_PIN(105, "SRCCLKREQB_1"), ++ PINCTRL_PIN(106, "SRCCLKREQB_2"), ++ PINCTRL_PIN(107, "SRCCLKREQB_3"), ++ PINCTRL_PIN(108, "ISH_SPI_CSB"), ++ PINCTRL_PIN(109, "ISH_SPI_CLK"), ++ PINCTRL_PIN(110, "ISH_SPI_MISO"), ++ PINCTRL_PIN(111, "ISH_SPI_MOSI"), ++ PINCTRL_PIN(112, "ISH_UART0_RXD"), ++ PINCTRL_PIN(113, "ISH_UART0_TXD"), ++ PINCTRL_PIN(114, "ISH_UART0_RTSB"), ++ PINCTRL_PIN(115, "ISH_UART0_CTSB"), ++ PINCTRL_PIN(116, "ISH_GP_4"), ++ PINCTRL_PIN(117, "ISH_GP_5"), ++ PINCTRL_PIN(118, "I2S_MCLK1_OUT"), ++ PINCTRL_PIN(119, "GSPI2_CLK_LOOPBK"), ++ /* GPP_U */ ++ PINCTRL_PIN(120, "UART3_RXD"), ++ PINCTRL_PIN(121, "UART3_TXD"), ++ PINCTRL_PIN(122, "UART3_RTSB"), ++ PINCTRL_PIN(123, "UART3_CTSB"), ++ PINCTRL_PIN(124, "GSPI3_CS0B"), ++ PINCTRL_PIN(125, "GSPI3_CLK"), ++ PINCTRL_PIN(126, "GSPI3_MISO"), ++ PINCTRL_PIN(127, "GSPI3_MOSI"), ++ PINCTRL_PIN(128, "GSPI4_CS0B"), ++ PINCTRL_PIN(129, "GSPI4_CLK"), ++ PINCTRL_PIN(130, "GSPI4_MISO"), ++ PINCTRL_PIN(131, "GSPI4_MOSI"), ++ PINCTRL_PIN(132, "GSPI5_CS0B"), ++ PINCTRL_PIN(133, "GSPI5_CLK"), ++ PINCTRL_PIN(134, "GSPI5_MISO"), ++ PINCTRL_PIN(135, "GSPI5_MOSI"), ++ PINCTRL_PIN(136, "GSPI6_CS0B"), ++ PINCTRL_PIN(137, "GSPI6_CLK"), ++ PINCTRL_PIN(138, "GSPI6_MISO"), ++ PINCTRL_PIN(139, "GSPI6_MOSI"), ++ PINCTRL_PIN(140, "GSPI3_CLK_LOOPBK"), ++ PINCTRL_PIN(141, "GSPI4_CLK_LOOPBK"), ++ PINCTRL_PIN(142, "GSPI5_CLK_LOOPBK"), ++ PINCTRL_PIN(143, "GSPI6_CLK_LOOPBK"), ++ /* vGPIO */ ++ PINCTRL_PIN(144, "CNV_BTEN"), ++ PINCTRL_PIN(145, "CNV_BT_HOST_WAKEB"), ++ PINCTRL_PIN(146, "CNV_BT_IF_SELECT"), ++ PINCTRL_PIN(147, "vCNV_BT_UART_TXD"), ++ PINCTRL_PIN(148, "vCNV_BT_UART_RXD"), ++ PINCTRL_PIN(149, "vCNV_BT_UART_CTS_B"), ++ PINCTRL_PIN(150, "vCNV_BT_UART_RTS_B"), ++ PINCTRL_PIN(151, "vCNV_MFUART1_TXD"), ++ PINCTRL_PIN(152, "vCNV_MFUART1_RXD"), ++ PINCTRL_PIN(153, "vCNV_MFUART1_CTS_B"), ++ PINCTRL_PIN(154, "vCNV_MFUART1_RTS_B"), ++ PINCTRL_PIN(155, "vUART0_TXD"), ++ PINCTRL_PIN(156, "vUART0_RXD"), ++ PINCTRL_PIN(157, "vUART0_CTS_B"), ++ PINCTRL_PIN(158, "vUART0_RTS_B"), ++ PINCTRL_PIN(159, "vISH_UART0_TXD"), ++ PINCTRL_PIN(160, "vISH_UART0_RXD"), ++ PINCTRL_PIN(161, "vISH_UART0_CTS_B"), ++ PINCTRL_PIN(162, "vISH_UART0_RTS_B"), ++ PINCTRL_PIN(163, "vCNV_BT_I2S_BCLK"), ++ PINCTRL_PIN(164, "vCNV_BT_I2S_WS_SYNC"), ++ PINCTRL_PIN(165, "vCNV_BT_I2S_SDO"), ++ PINCTRL_PIN(166, "vCNV_BT_I2S_SDI"), ++ PINCTRL_PIN(167, "vI2S2_SCLK"), ++ PINCTRL_PIN(168, "vI2S2_SFRM"), ++ PINCTRL_PIN(169, "vI2S2_TXD"), ++ PINCTRL_PIN(170, "vI2S2_RXD"), ++ /* GPP_C */ ++ PINCTRL_PIN(171, "SMBCLK"), ++ PINCTRL_PIN(172, "SMBDATA"), ++ PINCTRL_PIN(173, "SMBALERTB"), ++ PINCTRL_PIN(174, "SML0CLK"), ++ PINCTRL_PIN(175, "SML0DATA"), ++ PINCTRL_PIN(176, "SML0ALERTB"), ++ PINCTRL_PIN(177, "SML1CLK"), ++ PINCTRL_PIN(178, "SML1DATA"), ++ PINCTRL_PIN(179, "UART0_RXD"), ++ PINCTRL_PIN(180, "UART0_TXD"), ++ PINCTRL_PIN(181, "UART0_RTSB"), ++ PINCTRL_PIN(182, "UART0_CTSB"), ++ PINCTRL_PIN(183, "UART1_RXD"), ++ PINCTRL_PIN(184, "UART1_TXD"), ++ PINCTRL_PIN(185, "UART1_RTSB"), ++ PINCTRL_PIN(186, "UART1_CTSB"), ++ PINCTRL_PIN(187, "I2C0_SDA"), ++ PINCTRL_PIN(188, "I2C0_SCL"), ++ PINCTRL_PIN(189, "I2C1_SDA"), ++ PINCTRL_PIN(190, "I2C1_SCL"), ++ PINCTRL_PIN(191, "UART2_RXD"), ++ PINCTRL_PIN(192, "UART2_TXD"), ++ PINCTRL_PIN(193, "UART2_RTSB"), ++ PINCTRL_PIN(194, "UART2_CTSB"), ++ /* GPP_F */ ++ PINCTRL_PIN(195, "CNV_BRI_DT"), ++ PINCTRL_PIN(196, "CNV_BRI_RSP"), ++ PINCTRL_PIN(197, "CNV_RGI_DT"), ++ PINCTRL_PIN(198, "CNV_RGI_RSP"), ++ PINCTRL_PIN(199, "CNV_RF_RESET_B"), ++ PINCTRL_PIN(200, "GPPC_F_5"), ++ PINCTRL_PIN(201, "CNV_PA_BLANKING"), ++ PINCTRL_PIN(202, "GPPC_F_7"), ++ PINCTRL_PIN(203, "I2S_MCLK2_INOUT"), ++ PINCTRL_PIN(204, "BOOTMPC"), ++ PINCTRL_PIN(205, "GPPC_F_10"), ++ PINCTRL_PIN(206, "GPPC_F_11"), ++ PINCTRL_PIN(207, "GSXDOUT"), ++ PINCTRL_PIN(208, "GSXSLOAD"), ++ PINCTRL_PIN(209, "GSXDIN"), ++ PINCTRL_PIN(210, "GSXSRESETB"), ++ PINCTRL_PIN(211, "GSXCLK"), ++ PINCTRL_PIN(212, "GPPC_F_17"), ++ PINCTRL_PIN(213, "GPPC_F_18"), ++ PINCTRL_PIN(214, "SRCCLKREQB_6"), ++ PINCTRL_PIN(215, "EXT_PWR_GATEB"), ++ PINCTRL_PIN(216, "EXT_PWR_GATE2B"), ++ PINCTRL_PIN(217, "VNN_CTRL"), ++ PINCTRL_PIN(218, "V1P05_CTRL"), ++ PINCTRL_PIN(219, "GPPF_CLK_LOOPBACK"), ++ /* HVCMOS */ ++ PINCTRL_PIN(220, "L_BKLTEN"), ++ PINCTRL_PIN(221, "L_BKLTCTL"), ++ PINCTRL_PIN(222, "L_VDDEN"), ++ PINCTRL_PIN(223, "SYS_PWROK"), ++ PINCTRL_PIN(224, "SYS_RESETB"), ++ PINCTRL_PIN(225, "MLK_RSTB"), ++ /* GPP_E */ ++ PINCTRL_PIN(226, "SATAXPCIE_0"), ++ PINCTRL_PIN(227, "SPI1_IO_2"), ++ PINCTRL_PIN(228, "SPI1_IO_3"), ++ PINCTRL_PIN(229, "CPU_GP_0"), ++ PINCTRL_PIN(230, "SATA_DEVSLP_0"), ++ PINCTRL_PIN(231, "SATA_DEVSLP_1"), ++ PINCTRL_PIN(232, "GPPC_E_6"), ++ PINCTRL_PIN(233, "CPU_GP_1"), ++ PINCTRL_PIN(234, "SPI1_CS1B"), ++ PINCTRL_PIN(235, "USB2_OCB_0"), ++ PINCTRL_PIN(236, "SPI1_CSB"), ++ PINCTRL_PIN(237, "SPI1_CLK"), ++ PINCTRL_PIN(238, "SPI1_MISO_IO_1"), ++ PINCTRL_PIN(239, "SPI1_MOSI_IO_0"), ++ PINCTRL_PIN(240, "DDSP_HPD_A"), ++ PINCTRL_PIN(241, "ISH_GP_6"), ++ PINCTRL_PIN(242, "ISH_GP_7"), ++ PINCTRL_PIN(243, "GPPC_E_17"), ++ PINCTRL_PIN(244, "DDP1_CTRLCLK"), ++ PINCTRL_PIN(245, "DDP1_CTRLDATA"), ++ PINCTRL_PIN(246, "DDP2_CTRLCLK"), ++ PINCTRL_PIN(247, "DDP2_CTRLDATA"), ++ PINCTRL_PIN(248, "DDPA_CTRLCLK"), ++ PINCTRL_PIN(249, "DDPA_CTRLDATA"), ++ PINCTRL_PIN(250, "SPI1_CLK_LOOPBK"), ++ /* JTAG */ ++ PINCTRL_PIN(251, "JTAG_TDO"), ++ PINCTRL_PIN(252, "JTAGX"), ++ PINCTRL_PIN(253, "PRDYB"), ++ PINCTRL_PIN(254, "PREQB"), ++ PINCTRL_PIN(255, "CPU_TRSTB"), ++ PINCTRL_PIN(256, "JTAG_TDI"), ++ PINCTRL_PIN(257, "JTAG_TMS"), ++ PINCTRL_PIN(258, "JTAG_TCK"), ++ PINCTRL_PIN(259, "DBG_PMODE"), ++ /* GPP_R */ ++ PINCTRL_PIN(260, "HDA_BCLK"), ++ PINCTRL_PIN(261, "HDA_SYNC"), ++ PINCTRL_PIN(262, "HDA_SDO"), ++ PINCTRL_PIN(263, "HDA_SDI_0"), ++ PINCTRL_PIN(264, "HDA_RSTB"), ++ PINCTRL_PIN(265, "HDA_SDI_1"), ++ PINCTRL_PIN(266, "GPP_R_6"), ++ PINCTRL_PIN(267, "GPP_R_7"), ++ /* SPI */ ++ PINCTRL_PIN(268, "SPI0_IO_2"), ++ PINCTRL_PIN(269, "SPI0_IO_3"), ++ PINCTRL_PIN(270, "SPI0_MOSI_IO_0"), ++ PINCTRL_PIN(271, "SPI0_MISO_IO_1"), ++ PINCTRL_PIN(272, "SPI0_TPM_CSB"), ++ PINCTRL_PIN(273, "SPI0_FLASH_0_CSB"), ++ PINCTRL_PIN(274, "SPI0_FLASH_1_CSB"), ++ PINCTRL_PIN(275, "SPI0_CLK"), ++ PINCTRL_PIN(276, "SPI0_CLK_LOOPBK"), ++}; ++ ++static const struct intel_padgroup tgllp_community0_gpps[] = { ++ TGL_GPP(0, 0, 25), /* GPP_B */ ++ TGL_GPP(1, 26, 41), /* GPP_T */ ++ TGL_GPP(2, 42, 66), /* GPP_A */ ++}; ++ ++static const struct intel_padgroup tgllp_community1_gpps[] = { ++ TGL_GPP(0, 67, 74), /* GPP_S */ ++ TGL_GPP(1, 75, 98), /* GPP_H */ ++ TGL_GPP(2, 99, 119), /* GPP_D */ ++ TGL_GPP(3, 120, 143), /* GPP_U */ ++ TGL_GPP(4, 144, 170), /* vGPIO */ ++}; ++ ++static const struct intel_padgroup tgllp_community4_gpps[] = { ++ TGL_GPP(0, 171, 194), /* GPP_C */ ++ TGL_GPP(1, 195, 219), /* GPP_F */ ++ TGL_GPP(2, 220, 225), /* HVCMOS */ ++ TGL_GPP(3, 226, 250), /* GPP_E */ ++ TGL_GPP(4, 251, 259), /* JTAG */ ++}; ++ ++static const struct intel_padgroup tgllp_community5_gpps[] = { ++ TGL_GPP(0, 260, 267), /* GPP_R */ ++ TGL_GPP(1, 268, 276), /* SPI */ ++}; ++ ++static const struct intel_community tgllp_communities[] = { ++ TGL_COMMUNITY(0, 0, 66, tgllp_community0_gpps), ++ TGL_COMMUNITY(1, 67, 170, tgllp_community1_gpps), ++ TGL_COMMUNITY(2, 171, 259, tgllp_community4_gpps), ++ TGL_COMMUNITY(3, 260, 276, tgllp_community5_gpps), ++}; ++ ++static const struct intel_pinctrl_soc_data tgllp_soc_data = { ++ .pins = tgllp_pins, ++ .npins = ARRAY_SIZE(tgllp_pins), ++ .communities = tgllp_communities, ++ .ncommunities = ARRAY_SIZE(tgllp_communities), ++}; ++ ++/* Tiger Lake-H */ ++static const struct pinctrl_pin_desc tglh_pins[] = { ++ /* SPI */ ++ PINCTRL_PIN(0, "SPI0_IO_2"), ++ PINCTRL_PIN(1, "SPI0_IO_3"), ++ PINCTRL_PIN(2, "SPI0_MOSI_IO_0"), ++ PINCTRL_PIN(3, "SPI0_MISO_IO_1"), ++ PINCTRL_PIN(4, "SPI0_TPM_CSB"), ++ PINCTRL_PIN(5, "SPI0_FLASH_0_CSB"), ++ PINCTRL_PIN(6, "SPI0_FLASH_1_CSB"), ++ PINCTRL_PIN(7, "SPI0_CLK"), ++ PINCTRL_PIN(8, "SPI0_CLK_LOOPBK"), ++ /* GPP_A */ ++ PINCTRL_PIN(9, "ESPI_ALERT1B"), ++ PINCTRL_PIN(10, "ESPI_IO_0"), ++ PINCTRL_PIN(11, "ESPI_IO_1"), ++ PINCTRL_PIN(12, "ESPI_IO_2"), ++ PINCTRL_PIN(13, "ESPI_IO_3"), ++ PINCTRL_PIN(14, "ESPI_CS0B"), ++ PINCTRL_PIN(15, "ESPI_CS1B"), ++ PINCTRL_PIN(16, "ESPI_ALERT0B"), ++ PINCTRL_PIN(17, "PCIE_LNK_DOWN"), ++ PINCTRL_PIN(18, "ESPI_CLK"), ++ PINCTRL_PIN(19, "GPP_A_10"), ++ PINCTRL_PIN(20, "ISH_UART0_RTSB"), ++ PINCTRL_PIN(21, "SX_EXIT_HOLDOFFB"), ++ PINCTRL_PIN(22, "SUSWARNB_SUSPWRDNACK"), ++ PINCTRL_PIN(23, "ESPI_RESETB"), ++ PINCTRL_PIN(24, "SUSACKB"), ++ PINCTRL_PIN(25, "CLKOUT_48"), ++ PINCTRL_PIN(26, "ISH_GP_7"), ++ PINCTRL_PIN(27, "ISH_GP_0"), ++ PINCTRL_PIN(28, "ISH_GP_1"), ++ PINCTRL_PIN(29, "ISH_GP_2"), ++ PINCTRL_PIN(30, "ISH_GP_3"), ++ PINCTRL_PIN(31, "ISH_GP_4"), ++ PINCTRL_PIN(32, "ISH_GP_5"), ++ PINCTRL_PIN(33, "ESPI_CLK_LOOPBK"), ++ /* GPP_B */ ++ PINCTRL_PIN(34, "GSPI0_CS1B"), ++ PINCTRL_PIN(35, "GSPI1_CS1B"), ++ PINCTRL_PIN(36, "VRALERTB"), ++ PINCTRL_PIN(37, "CPU_GP_2"), ++ PINCTRL_PIN(38, "CPU_GP_3"), ++ PINCTRL_PIN(39, "SRCCLKREQB_0"), ++ PINCTRL_PIN(40, "SRCCLKREQB_1"), ++ PINCTRL_PIN(41, "SRCCLKREQB_2"), ++ PINCTRL_PIN(42, "SRCCLKREQB_3"), ++ PINCTRL_PIN(43, "SRCCLKREQB_4"), ++ PINCTRL_PIN(44, "SRCCLKREQB_5"), ++ PINCTRL_PIN(45, "I2S_MCLK"), ++ PINCTRL_PIN(46, "SLP_S0B"), ++ PINCTRL_PIN(47, "PLTRSTB"), ++ PINCTRL_PIN(48, "SPKR"), ++ PINCTRL_PIN(49, "GSPI0_CS0B"), ++ PINCTRL_PIN(50, "GSPI0_CLK"), ++ PINCTRL_PIN(51, "GSPI0_MISO"), ++ PINCTRL_PIN(52, "GSPI0_MOSI"), ++ PINCTRL_PIN(53, "GSPI1_CS0B"), ++ PINCTRL_PIN(54, "GSPI1_CLK"), ++ PINCTRL_PIN(55, "GSPI1_MISO"), ++ PINCTRL_PIN(56, "GSPI1_MOSI"), ++ PINCTRL_PIN(57, "SML1ALERTB"), ++ PINCTRL_PIN(58, "GSPI0_CLK_LOOPBK"), ++ PINCTRL_PIN(59, "GSPI1_CLK_LOOPBK"), ++ /* vGPIO_0 */ ++ PINCTRL_PIN(60, "ESPI_USB_OCB_0"), ++ PINCTRL_PIN(61, "ESPI_USB_OCB_1"), ++ PINCTRL_PIN(62, "ESPI_USB_OCB_2"), ++ PINCTRL_PIN(63, "ESPI_USB_OCB_3"), ++ PINCTRL_PIN(64, "USB_CPU_OCB_0"), ++ PINCTRL_PIN(65, "USB_CPU_OCB_1"), ++ PINCTRL_PIN(66, "USB_CPU_OCB_2"), ++ PINCTRL_PIN(67, "USB_CPU_OCB_3"), ++ /* GPP_C */ ++ PINCTRL_PIN(68, "SMBCLK"), ++ PINCTRL_PIN(69, "SMBDATA"), ++ PINCTRL_PIN(70, "SMBALERTB"), ++ PINCTRL_PIN(71, "ISH_UART0_RXD"), ++ PINCTRL_PIN(72, "ISH_UART0_TXD"), ++ PINCTRL_PIN(73, "SML0ALERTB"), ++ PINCTRL_PIN(74, "ISH_I2C2_SDA"), ++ PINCTRL_PIN(75, "ISH_I2C2_SCL"), ++ PINCTRL_PIN(76, "UART0_RXD"), ++ PINCTRL_PIN(77, "UART0_TXD"), ++ PINCTRL_PIN(78, "UART0_RTSB"), ++ PINCTRL_PIN(79, "UART0_CTSB"), ++ PINCTRL_PIN(80, "UART1_RXD"), ++ PINCTRL_PIN(81, "UART1_TXD"), ++ PINCTRL_PIN(82, "UART1_RTSB"), ++ PINCTRL_PIN(83, "UART1_CTSB"), ++ PINCTRL_PIN(84, "I2C0_SDA"), ++ PINCTRL_PIN(85, "I2C0_SCL"), ++ PINCTRL_PIN(86, "I2C1_SDA"), ++ PINCTRL_PIN(87, "I2C1_SCL"), ++ PINCTRL_PIN(88, "UART2_RXD"), ++ PINCTRL_PIN(89, "UART2_TXD"), ++ PINCTRL_PIN(90, "UART2_RTSB"), ++ PINCTRL_PIN(91, "UART2_CTSB"), ++ /* GPP_D */ ++ PINCTRL_PIN(92, "SPI1_CSB"), ++ PINCTRL_PIN(93, "SPI1_CLK"), ++ PINCTRL_PIN(94, "SPI1_MISO_IO_1"), ++ PINCTRL_PIN(95, "SPI1_MOSI_IO_0"), ++ PINCTRL_PIN(96, "SML1CLK"), ++ PINCTRL_PIN(97, "I2S2_SFRM"), ++ PINCTRL_PIN(98, "I2S2_TXD"), ++ PINCTRL_PIN(99, "I2S2_RXD"), ++ PINCTRL_PIN(100, "I2S2_SCLK"), ++ PINCTRL_PIN(101, "SML0CLK"), ++ PINCTRL_PIN(102, "SML0DATA"), ++ PINCTRL_PIN(103, "I2S1_SCLK"), ++ PINCTRL_PIN(104, "ISH_UART0_CTSB"), ++ PINCTRL_PIN(105, "SPI1_IO_2"), ++ PINCTRL_PIN(106, "SPI1_IO_3"), ++ PINCTRL_PIN(107, "SML1DATA"), ++ PINCTRL_PIN(108, "GSPI3_CS0B"), ++ PINCTRL_PIN(109, "GSPI3_CLK"), ++ PINCTRL_PIN(110, "GSPI3_MISO"), ++ PINCTRL_PIN(111, "GSPI3_MOSI"), ++ PINCTRL_PIN(112, "UART3_RXD"), ++ PINCTRL_PIN(113, "UART3_TXD"), ++ PINCTRL_PIN(114, "UART3_RTSB"), ++ PINCTRL_PIN(115, "UART3_CTSB"), ++ PINCTRL_PIN(116, "GSPI2_CLK_LOOPBK"), ++ PINCTRL_PIN(117, "SPI1_CLK_LOOPBK"), ++ /* GPP_R */ ++ PINCTRL_PIN(118, "HDA_BCLK"), ++ PINCTRL_PIN(119, "HDA_SYNC"), ++ PINCTRL_PIN(120, "HDA_SDO"), ++ PINCTRL_PIN(121, "HDA_SDI_0"), ++ PINCTRL_PIN(122, "HDA_RSTB"), ++ PINCTRL_PIN(123, "HDA_SDI_1"), ++ PINCTRL_PIN(124, "GPP_R_6"), ++ PINCTRL_PIN(125, "GPP_R_7"), ++ /* GPP_S */ ++ PINCTRL_PIN(126, "SNDW1_CLK"), ++ PINCTRL_PIN(127, "SNDW1_DATA"), ++ PINCTRL_PIN(128, "SNDW2_CLK"), ++ PINCTRL_PIN(129, "SNDW2_DATA"), ++ PINCTRL_PIN(130, "SNDW3_CLK"), ++ PINCTRL_PIN(131, "SNDW3_DATA"), ++ PINCTRL_PIN(132, "SNDW4_CLK"), ++ PINCTRL_PIN(133, "SNDW4_DATA"), ++ /* GPP_G */ ++ PINCTRL_PIN(134, "DDPA_CTRLCLK"), ++ PINCTRL_PIN(135, "DDPA_CTRLDATA"), ++ PINCTRL_PIN(136, "DNX_FORCE_RELOAD"), ++ PINCTRL_PIN(137, "GPPC_G_3"), ++ PINCTRL_PIN(138, "GPPC_G_4"), ++ PINCTRL_PIN(139, "GPPC_G_5"), ++ PINCTRL_PIN(140, "GPPC_G_6"), ++ PINCTRL_PIN(141, "GPPC_G_7"), ++ PINCTRL_PIN(142, "ISH_SPI_CSB"), ++ PINCTRL_PIN(143, "ISH_SPI_CLK"), ++ PINCTRL_PIN(144, "ISH_SPI_MISO"), ++ PINCTRL_PIN(145, "ISH_SPI_MOSI"), ++ PINCTRL_PIN(146, "DDP1_CTRLCLK"), ++ PINCTRL_PIN(147, "DDP1_CTRLDATA"), ++ PINCTRL_PIN(148, "DDP2_CTRLCLK"), ++ PINCTRL_PIN(149, "DDP2_CTRLDATA"), ++ /* vGPIO */ ++ PINCTRL_PIN(150, "CNV_BTEN"), ++ PINCTRL_PIN(151, "CNV_BT_HOST_WAKEB"), ++ PINCTRL_PIN(152, "CNV_BT_IF_SELECT"), ++ PINCTRL_PIN(153, "vCNV_BT_UART_TXD"), ++ PINCTRL_PIN(154, "vCNV_BT_UART_RXD"), ++ PINCTRL_PIN(155, "vCNV_BT_UART_CTS_B"), ++ PINCTRL_PIN(156, "vCNV_BT_UART_RTS_B"), ++ PINCTRL_PIN(157, "vCNV_MFUART1_TXD"), ++ PINCTRL_PIN(158, "vCNV_MFUART1_RXD"), ++ PINCTRL_PIN(159, "vCNV_MFUART1_CTS_B"), ++ PINCTRL_PIN(160, "vCNV_MFUART1_RTS_B"), ++ PINCTRL_PIN(161, "vUART0_TXD"), ++ PINCTRL_PIN(162, "vUART0_RXD"), ++ PINCTRL_PIN(163, "vUART0_CTS_B"), ++ PINCTRL_PIN(164, "vUART0_RTS_B"), ++ PINCTRL_PIN(165, "vISH_UART0_TXD"), ++ PINCTRL_PIN(166, "vISH_UART0_RXD"), ++ PINCTRL_PIN(167, "vISH_UART0_CTS_B"), ++ PINCTRL_PIN(168, "vISH_UART0_RTS_B"), ++ PINCTRL_PIN(169, "vCNV_BT_I2S_BCLK"), ++ PINCTRL_PIN(170, "vCNV_BT_I2S_WS_SYNC"), ++ PINCTRL_PIN(171, "vCNV_BT_I2S_SDO"), ++ PINCTRL_PIN(172, "vCNV_BT_I2S_SDI"), ++ PINCTRL_PIN(173, "vI2S2_SCLK"), ++ PINCTRL_PIN(174, "vI2S2_SFRM"), ++ PINCTRL_PIN(175, "vI2S2_TXD"), ++ PINCTRL_PIN(176, "vI2S2_RXD"), ++ /* GPP_E */ ++ PINCTRL_PIN(177, "SATAXPCIE_0"), ++ PINCTRL_PIN(178, "SATAXPCIE_1"), ++ PINCTRL_PIN(179, "SATAXPCIE_2"), ++ PINCTRL_PIN(180, "CPU_GP_0"), ++ PINCTRL_PIN(181, "SATA_DEVSLP_0"), ++ PINCTRL_PIN(182, "SATA_DEVSLP_1"), ++ PINCTRL_PIN(183, "SATA_DEVSLP_2"), ++ PINCTRL_PIN(184, "CPU_GP_1"), ++ PINCTRL_PIN(185, "SATA_LEDB"), ++ PINCTRL_PIN(186, "USB2_OCB_0"), ++ PINCTRL_PIN(187, "USB2_OCB_1"), ++ PINCTRL_PIN(188, "USB2_OCB_2"), ++ PINCTRL_PIN(189, "USB2_OCB_3"), ++ /* GPP_F */ ++ PINCTRL_PIN(190, "SATAXPCIE_3"), ++ PINCTRL_PIN(191, "SATAXPCIE_4"), ++ PINCTRL_PIN(192, "SATAXPCIE_5"), ++ PINCTRL_PIN(193, "SATAXPCIE_6"), ++ PINCTRL_PIN(194, "SATAXPCIE_7"), ++ PINCTRL_PIN(195, "SATA_DEVSLP_3"), ++ PINCTRL_PIN(196, "SATA_DEVSLP_4"), ++ PINCTRL_PIN(197, "SATA_DEVSLP_5"), ++ PINCTRL_PIN(198, "SATA_DEVSLP_6"), ++ PINCTRL_PIN(199, "SATA_DEVSLP_7"), ++ PINCTRL_PIN(200, "SATA_SCLOCK"), ++ PINCTRL_PIN(201, "SATA_SLOAD"), ++ PINCTRL_PIN(202, "SATA_SDATAOUT1"), ++ PINCTRL_PIN(203, "SATA_SDATAOUT0"), ++ PINCTRL_PIN(204, "PS_ONB"), ++ PINCTRL_PIN(205, "M2_SKT2_CFG_0"), ++ PINCTRL_PIN(206, "M2_SKT2_CFG_1"), ++ PINCTRL_PIN(207, "M2_SKT2_CFG_2"), ++ PINCTRL_PIN(208, "M2_SKT2_CFG_3"), ++ PINCTRL_PIN(209, "L_VDDEN"), ++ PINCTRL_PIN(210, "L_BKLTEN"), ++ PINCTRL_PIN(211, "L_BKLTCTL"), ++ PINCTRL_PIN(212, "VNN_CTRL"), ++ PINCTRL_PIN(213, "GPP_F_23"), ++ /* GPP_H */ ++ PINCTRL_PIN(214, "SRCCLKREQB_6"), ++ PINCTRL_PIN(215, "SRCCLKREQB_7"), ++ PINCTRL_PIN(216, "SRCCLKREQB_8"), ++ PINCTRL_PIN(217, "SRCCLKREQB_9"), ++ PINCTRL_PIN(218, "SRCCLKREQB_10"), ++ PINCTRL_PIN(219, "SRCCLKREQB_11"), ++ PINCTRL_PIN(220, "SRCCLKREQB_12"), ++ PINCTRL_PIN(221, "SRCCLKREQB_13"), ++ PINCTRL_PIN(222, "SRCCLKREQB_14"), ++ PINCTRL_PIN(223, "SRCCLKREQB_15"), ++ PINCTRL_PIN(224, "SML2CLK"), ++ PINCTRL_PIN(225, "SML2DATA"), ++ PINCTRL_PIN(226, "SML2ALERTB"), ++ PINCTRL_PIN(227, "SML3CLK"), ++ PINCTRL_PIN(228, "SML3DATA"), ++ PINCTRL_PIN(229, "SML3ALERTB"), ++ PINCTRL_PIN(230, "SML4CLK"), ++ PINCTRL_PIN(231, "SML4DATA"), ++ PINCTRL_PIN(232, "SML4ALERTB"), ++ PINCTRL_PIN(233, "ISH_I2C0_SDA"), ++ PINCTRL_PIN(234, "ISH_I2C0_SCL"), ++ PINCTRL_PIN(235, "ISH_I2C1_SDA"), ++ PINCTRL_PIN(236, "ISH_I2C1_SCL"), ++ PINCTRL_PIN(237, "TIME_SYNC_0"), ++ /* GPP_K */ ++ PINCTRL_PIN(238, "GSXDOUT"), ++ PINCTRL_PIN(239, "GSXSLOAD"), ++ PINCTRL_PIN(240, "GSXDIN"), ++ PINCTRL_PIN(241, "GSXSRESETB"), ++ PINCTRL_PIN(242, "GSXCLK"), ++ PINCTRL_PIN(243, "ADR_COMPLETE"), ++ PINCTRL_PIN(244, "DDSP_HPD_A"), ++ PINCTRL_PIN(245, "DDSP_HPD_B"), ++ PINCTRL_PIN(246, "CORE_VID_0"), ++ PINCTRL_PIN(247, "CORE_VID_1"), ++ PINCTRL_PIN(248, "DDSP_HPD_C"), ++ PINCTRL_PIN(249, "GPP_K_11"), ++ /* GPP_J */ ++ PINCTRL_PIN(250, "CNV_PA_BLANKING"), ++ PINCTRL_PIN(251, "CPU_C10_GATEB"), ++ PINCTRL_PIN(252, "CNV_BRI_DT"), ++ PINCTRL_PIN(253, "CNV_BRI_RSP"), ++ PINCTRL_PIN(254, "CNV_RGI_DT"), ++ PINCTRL_PIN(255, "CNV_RGI_RSP"), ++ PINCTRL_PIN(256, "CNV_MFUART2_RXD"), ++ PINCTRL_PIN(257, "CNV_MFUART2_TXD"), ++ PINCTRL_PIN(258, "GPP_J_8"), ++ PINCTRL_PIN(259, "GPP_J_9"), ++ /* GPP_I */ ++ PINCTRL_PIN(260, "PMCALERTB"), ++ PINCTRL_PIN(261, "DDSP_HPD_1"), ++ PINCTRL_PIN(262, "DDSP_HPD_2"), ++ PINCTRL_PIN(263, "DDSP_HPD_3"), ++ PINCTRL_PIN(264, "DDSP_HPD_4"), ++ PINCTRL_PIN(265, "DDPB_CTRLCLK"), ++ PINCTRL_PIN(266, "DDPB_CTRLDATA"), ++ PINCTRL_PIN(267, "DDPC_CTRLCLK"), ++ PINCTRL_PIN(268, "DDPC_CTRLDATA"), ++ PINCTRL_PIN(269, "FUSA_DIAGTEST_EN"), ++ PINCTRL_PIN(270, "FUSA_DIAGTEST_MODE"), ++ PINCTRL_PIN(271, "USB2_OCB_4"), ++ PINCTRL_PIN(272, "USB2_OCB_5"), ++ PINCTRL_PIN(273, "USB2_OCB_6"), ++ PINCTRL_PIN(274, "USB2_OCB_7"), ++ PINCTRL_PIN(275, "SYS_PWROK"), ++ PINCTRL_PIN(276, "SYS_RESETB"), ++ PINCTRL_PIN(277, "MLK_RSTB"), ++ /* JTAG */ ++ PINCTRL_PIN(278, "JTAG_TDO"), ++ PINCTRL_PIN(279, "JTAGX"), ++ PINCTRL_PIN(280, "PRDYB"), ++ PINCTRL_PIN(281, "PREQB"), ++ PINCTRL_PIN(282, "CPU_TRSTB"), ++ PINCTRL_PIN(283, "JTAG_TDI"), ++ PINCTRL_PIN(284, "JTAG_TMS"), ++ PINCTRL_PIN(285, "JTAG_TCK"), ++ PINCTRL_PIN(286, "DBG_PMODE"), ++}; ++ ++static const struct intel_padgroup tglh_community0_gpps[] = { ++ TGL_GPP(0, 0, 8), /* SPI */ ++ TGL_GPP(1, 9, 33), /* GPP_A */ ++ TGL_GPP(2, 34, 59), /* GPP_B */ ++ TGL_GPP(3, 60, 67), /* vGPIO_0 */ ++}; ++ ++static const struct intel_padgroup tglh_community1_gpps[] = { ++ TGL_GPP(0, 68, 91), /* GPP_C */ ++ TGL_GPP(1, 92, 117), /* GPP_D */ ++ TGL_GPP(2, 118, 125), /* GPP_R */ ++ TGL_GPP(3, 126, 133), /* GPP_S */ ++ TGL_GPP(4, 134, 149), /* GPP_G */ ++ TGL_GPP(5, 150, 176), /* vGPIO */ ++}; ++ ++static const struct intel_padgroup tglh_community3_gpps[] = { ++ TGL_GPP(0, 177, 189), /* GPP_E */ ++ TGL_GPP(1, 190, 213), /* GPP_F */ ++}; ++ ++static const struct intel_padgroup tglh_community4_gpps[] = { ++ TGL_GPP(0, 214, 237), /* GPP_H */ ++ TGL_GPP(1, 238, 249), /* GPP_K */ ++ TGL_GPP(2, 250, 259), /* GPP_J */ ++}; ++ ++static const struct intel_padgroup tglh_community5_gpps[] = { ++ TGL_GPP(0, 260, 277), /* GPP_I */ ++ TGL_GPP(1, 278, 286), /* JTAG */ ++}; ++ ++static const struct intel_community tglh_communities[] = { ++ TGL_COMMUNITY(0, 0, 67, tglh_community0_gpps), ++ TGL_COMMUNITY(1, 68, 176, tglh_community1_gpps), ++ TGL_COMMUNITY(2, 177, 213, tglh_community3_gpps), ++ TGL_COMMUNITY(3, 214, 259, tglh_community4_gpps), ++ TGL_COMMUNITY(4, 260, 286, tglh_community5_gpps), ++}; ++ ++static const struct intel_pinctrl_soc_data tglh_soc_data = { ++ .pins = tglh_pins, ++ .npins = ARRAY_SIZE(tglh_pins), ++ .communities = tglh_communities, ++ .ncommunities = ARRAY_SIZE(tglh_communities), ++}; ++ ++/* Tiger Lake-K */ ++static const struct pinctrl_pin_desc tglk_pins[] = { ++ /* GPP_A */ ++ PINCTRL_PIN(0, "ESPI_IO_0"), ++ PINCTRL_PIN(1, "ESPI_IO_1"), ++ PINCTRL_PIN(2, "ESPI_IO_2"), ++ PINCTRL_PIN(3, "ESPI_IO_3"), ++ PINCTRL_PIN(4, "ESPI_CSB"), ++ PINCTRL_PIN(5, "ESPI_CLK"), ++ PINCTRL_PIN(6, "ESPI_RESETB"), ++ PINCTRL_PIN(7, "SMBCLK"), ++ PINCTRL_PIN(8, "SMBDATA"), ++ PINCTRL_PIN(9, "SMBALERTB"), ++ PINCTRL_PIN(10, "CPU_GP_0"), ++ PINCTRL_PIN(11, "CPU_GP_1"), ++ PINCTRL_PIN(12, "TIME_SYNC_0"), ++ PINCTRL_PIN(13, "TIME_SYNC_1"), ++ PINCTRL_PIN(14, "SML0CLK"), ++ PINCTRL_PIN(15, "SML0DATA"), ++ PINCTRL_PIN(16, "CPU_C10_GATEB"), ++ PINCTRL_PIN(17, "DDSP_HPD_A"), ++ PINCTRL_PIN(18, "L_BKLTEN"), ++ PINCTRL_PIN(19, "L_BKLTCTL"), ++ PINCTRL_PIN(20, "L_VDDEN"), ++ PINCTRL_PIN(21, "SYS_PWROK"), ++ PINCTRL_PIN(22, "SYS_RESETB"), ++ PINCTRL_PIN(23, "ESPI_CLK_LOOPBK"), ++ /* GPP_B */ ++ PINCTRL_PIN(24, "CORE_VID_0"), ++ PINCTRL_PIN(25, "CORE_VID_1"), ++ PINCTRL_PIN(26, "VRALERTB"), ++ PINCTRL_PIN(27, "CPU_GP_2"), ++ PINCTRL_PIN(28, "CPU_GP_3"), ++ PINCTRL_PIN(29, "SRCCLKREQB_0"), ++ PINCTRL_PIN(30, "SRCCLKREQB_1"), ++ PINCTRL_PIN(31, "SRCCLKREQB_2"), ++ PINCTRL_PIN(32, "SRCCLKREQB_3"), ++ PINCTRL_PIN(33, "SRCCLKREQB_4"), ++ PINCTRL_PIN(34, "SRCCLKREQB_5"), ++ PINCTRL_PIN(35, "PMCALERTB"), ++ PINCTRL_PIN(36, "SLP_S0B"), ++ PINCTRL_PIN(37, "PLTRSTB"), ++ PINCTRL_PIN(38, "UART0_RTSB"), ++ PINCTRL_PIN(39, "UART0_RXD"), ++ PINCTRL_PIN(40, "UART0_TXD"), ++ PINCTRL_PIN(41, "UART0_CTSB"), ++ PINCTRL_PIN(42, "UART2_RXD"), ++ PINCTRL_PIN(43, "UART2_TXD"), ++ PINCTRL_PIN(44, "USB2_OCB_0"), ++ PINCTRL_PIN(45, "USB2_OCB_1"), ++ PINCTRL_PIN(46, "GPP_B_22"), ++ PINCTRL_PIN(47, "GPP_B_23"), ++ /* GPP_C */ ++ PINCTRL_PIN(48, "GPP_C_0"), ++ PINCTRL_PIN(49, "GPP_C_1"), ++ PINCTRL_PIN(50, "GPP_C_2"), ++ PINCTRL_PIN(51, "GPP_C_3"), ++ PINCTRL_PIN(52, "GPP_C_4"), ++ PINCTRL_PIN(53, "GPP_C_5"), ++ PINCTRL_PIN(54, "GPP_C_6"), ++ PINCTRL_PIN(55, "GPP_C_7"), ++ PINCTRL_PIN(56, "GPP_C_8"), ++ PINCTRL_PIN(57, "TBT_LSX0_A"), ++ PINCTRL_PIN(58, "TBT_LSX0_B"), ++ PINCTRL_PIN(59, "TBT_LSX1_A"), ++ PINCTRL_PIN(60, "TBT_LSX1_B"), ++ PINCTRL_PIN(61, "TBT_LSX2_A"), ++ PINCTRL_PIN(62, "TBT_LSX2_B"), ++ PINCTRL_PIN(63, "TBT_LSX3_A"), ++ PINCTRL_PIN(64, "TBT_LSX3_B"), ++ PINCTRL_PIN(65, "TBT_LSX4_A"), ++ PINCTRL_PIN(66, "TBT_LSX4_B"), ++ PINCTRL_PIN(67, "TBT_LSX5_A"), ++ PINCTRL_PIN(68, "TBT_LSX5_B"), ++ PINCTRL_PIN(69, "PCIE_LNK_DOWN"), ++ PINCTRL_PIN(70, "PCHHOTB"), ++ PINCTRL_PIN(71, "DNX_FORCE_RELOAD"), ++ /* GPP_D */ ++ PINCTRL_PIN(72, "GPP_D_0"), ++ PINCTRL_PIN(73, "GPP_D_1"), ++ PINCTRL_PIN(74, "GPP_D_2"), ++ PINCTRL_PIN(75, "GPP_D_3"), ++ PINCTRL_PIN(76, "GPP_D_4"), ++ PINCTRL_PIN(77, "GPP_D_5"), ++ PINCTRL_PIN(78, "GPP_D_6"), ++ PINCTRL_PIN(79, "GPP_D_7"), ++ PINCTRL_PIN(80, "GPP_D_8"), ++ PINCTRL_PIN(81, "GPP_D_9"), ++ PINCTRL_PIN(82, "GPP_D_10"), ++ PINCTRL_PIN(83, "GPP_D_11"), ++ PINCTRL_PIN(84, "GPP_D_12"), ++ PINCTRL_PIN(85, "GPP_D_13"), ++ PINCTRL_PIN(86, "GPP_D_14"), ++ PINCTRL_PIN(87, "GPP_D_15"), ++ PINCTRL_PIN(88, "GPP_D_16"), ++ PINCTRL_PIN(89, "GPP_D_17"), ++ PINCTRL_PIN(90, "GPP_D_18"), ++ PINCTRL_PIN(91, "GPP_D_19"), ++ PINCTRL_PIN(92, "EXT_PWR_GATEB"), ++ PINCTRL_PIN(93, "EXT_PWR_GATE2B"), ++ PINCTRL_PIN(94, "VNN_CTRL"), ++ PINCTRL_PIN(95, "V1P05_CTRL"), ++ /* JTAG */ ++ PINCTRL_PIN(96, "JTAG_TDO"), ++ PINCTRL_PIN(97, "JTAGX"), ++ PINCTRL_PIN(98, "PRDYB"), ++ PINCTRL_PIN(99, "PREQB"), ++ PINCTRL_PIN(100, "CPU_TRSTB"), ++ PINCTRL_PIN(101, "JTAG_TDI"), ++ PINCTRL_PIN(102, "JTAG_TMS"), ++ PINCTRL_PIN(103, "JTAG_TCK"), ++ PINCTRL_PIN(104, "DBG_PMODE"), ++}; ++ ++static const struct intel_padgroup tglk_community0_gpps[] = { ++ TGL_GPP(0, 0, 23), /* GPP_A */ ++ TGL_GPP(1, 24, 47), /* GPP_B */ ++}; ++ ++static const struct intel_padgroup tglk_community1_gpps[] = { ++ TGL_GPP(0, 48, 71), /* GPP_C */ ++ TGL_GPP(1, 72, 95), /* GPP_D */ ++}; ++ ++static const struct intel_padgroup tglk_community4_gpps[] = { ++ TGL_GPP(0, 96, 104), /* JTAG */ ++}; ++ ++static const struct intel_community tglk_communities[] = { ++ TGL_COMMUNITY(0, 0, 47, tglk_community0_gpps), ++ TGL_COMMUNITY(1, 48, 95, tglk_community1_gpps), ++ TGL_COMMUNITY(2, 96, 104, tglk_community4_gpps), ++}; ++ ++static const struct intel_pinctrl_soc_data tglk_soc_data = { ++ .pins = tglk_pins, ++ .npins = ARRAY_SIZE(tglk_pins), ++ .communities = tglk_communities, ++ .ncommunities = ARRAY_SIZE(tglk_communities), ++}; ++ ++static const struct acpi_device_id tgl_pinctrl_acpi_match[] = { ++ { "INT34C5", (kernel_ulong_t)&tgllp_soc_data }, ++ { "INT34C6", (kernel_ulong_t)&tglh_soc_data }, ++ { "INT34C7", (kernel_ulong_t)&tglk_soc_data }, ++ { }, ++}; ++MODULE_DEVICE_TABLE(acpi, tgl_pinctrl_acpi_match); ++ ++static INTEL_PINCTRL_PM_OPS(tgl_pinctrl_pm_ops); ++ ++static struct platform_driver tgl_pinctrl_driver = { ++ .probe = intel_pinctrl_probe_by_hid, ++ .driver = { ++ .name = "tigerlake-pinctrl", ++ .acpi_match_table = tgl_pinctrl_acpi_match, ++ .pm = &tgl_pinctrl_pm_ops, ++ }, ++}; ++ ++module_platform_driver(tgl_pinctrl_driver); ++ ++MODULE_AUTHOR("Andy Shevchenko "); ++MODULE_AUTHOR("Mika Westerberg "); ++MODULE_DESCRIPTION("Intel Tiger Lake PCH pinctrl/GPIO driver"); ++MODULE_LICENSE("GPL v2"); +-- +2.17.1 + diff --git a/patches/0004-platform-x86-Fix-SEP-driver-license-header-So.sep-socwatch b/patches/0004-platform-x86-Fix-SEP-driver-license-header-So.sep-socwatch new file mode 100644 index 0000000000..74004b1278 --- /dev/null +++ b/patches/0004-platform-x86-Fix-SEP-driver-license-header-So.sep-socwatch @@ -0,0 +1,1768 @@ +From 68f8fe68cce17a6055f734ca4ac81aae8a47ec94 Mon Sep 17 00:00:00 2001 +From: Jon Moeller +Date: Mon, 26 Nov 2018 15:10:46 -0600 +Subject: [PATCH 04/27] platform/x86: Fix SEP driver license header, SoCWatch + warnings + +Fix wrong license header on SEP lwpmudrv header files. +Fix a few warnings in SoCWatch driver when built with CONFIG_TRACEPOINTS + or CCONFIG_X86_LOCAL_APIC disabled. +Change inline comments to C-style in sw_trace_notifier_provider.c + +Signed-off-by: Jon Moeller +--- + .../x86/sepdk/include/error_reporting_utils.h | 35 +- + .../x86/sepdk/include/lwpmudrv_chipset.h | 34 +- + .../x86/sepdk/include/lwpmudrv_defines.h | 34 +- + .../platform/x86/sepdk/include/lwpmudrv_ecb.h | 35 +- + .../platform/x86/sepdk/include/lwpmudrv_gfx.h | 34 +- + .../x86/sepdk/include/lwpmudrv_ioctl.h | 34 +- + .../platform/x86/sepdk/include/lwpmudrv_pwr.h | 34 +- + .../x86/sepdk/include/lwpmudrv_struct.h | 34 +- + .../x86/sepdk/include/lwpmudrv_types.h | 34 +- + .../x86/sepdk/include/lwpmudrv_version.h | 35 +- + .../platform/x86/sepdk/include/pax_shared.h | 34 +- + .../platform/x86/sepdk/include/rise_errors.h | 34 +- + .../x86/socwatch/sw_trace_notifier_provider.c | 369 ++++++++++++------ + 13 files changed, 526 insertions(+), 254 deletions(-) + +diff --git a/drivers/platform/x86/sepdk/include/error_reporting_utils.h b/drivers/platform/x86/sepdk/include/error_reporting_utils.h +index c1e90c441cc1..9df1bf3380cd 100644 +--- a/drivers/platform/x86/sepdk/include/error_reporting_utils.h ++++ b/drivers/platform/x86/sepdk/include/error_reporting_utils.h +@@ -1,14 +1,27 @@ +- +-/*** +- * ------------------------------------------------------------------------- +- * INTEL CORPORATION PROPRIETARY INFORMATION +- * This software is supplied under the terms of the accompanying license +- * agreement or nondisclosure agreement with Intel Corporation and may not +- * be copied or disclosed except in accordance with the terms of that +- * agreement. +- * Copyright(C) 2002-2018 Intel Corporation. All Rights Reserved. +- * ------------------------------------------------------------------------- +- ***/ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ + + #ifndef __ERROR_REPORTING_UTILS_H__ + #define __ERROR_REPORTING_UTILS_H__ +diff --git a/drivers/platform/x86/sepdk/include/lwpmudrv_chipset.h b/drivers/platform/x86/sepdk/include/lwpmudrv_chipset.h +index 82531312af75..755d2799bcd5 100644 +--- a/drivers/platform/x86/sepdk/include/lwpmudrv_chipset.h ++++ b/drivers/platform/x86/sepdk/include/lwpmudrv_chipset.h +@@ -1,13 +1,27 @@ +-/*** +- * ------------------------------------------------------------------------- +- * INTEL CORPORATION PROPRIETARY INFORMATION +- * This software is supplied under the terms of the accompanying license +- * agreement or nondisclosure agreement with Intel Corporation and may not +- * be copied or disclosed except in accordance with the terms of that +- * agreement. +- * Copyright(C) 2007-2018 Intel Corporation. All Rights Reserved. +- * ------------------------------------------------------------------------- +- ***/ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ + + #ifndef _LWPMUDRV_CHIPSET_UTILS_H_ + #define _LWPMUDRV_CHIPSET_UTILS_H_ +diff --git a/drivers/platform/x86/sepdk/include/lwpmudrv_defines.h b/drivers/platform/x86/sepdk/include/lwpmudrv_defines.h +index d6889982ada0..aeee9516bef2 100644 +--- a/drivers/platform/x86/sepdk/include/lwpmudrv_defines.h ++++ b/drivers/platform/x86/sepdk/include/lwpmudrv_defines.h +@@ -1,13 +1,27 @@ +-/*** +- * ------------------------------------------------------------------------- +- * INTEL CORPORATION PROPRIETARY INFORMATION +- * This software is supplied under the terms of the accompanying license +- * agreement or nondisclosure agreement with Intel Corporation and may not +- * be copied or disclosed except in accordance with the terms of that +- * agreement. +- * Copyright(C) 2007-2018 Intel Corporation. All Rights Reserved. +- * ------------------------------------------------------------------------- +- ***/ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ + + #ifndef _LWPMUDRV_DEFINES_H_ + #define _LWPMUDRV_DEFINES_H_ +diff --git a/drivers/platform/x86/sepdk/include/lwpmudrv_ecb.h b/drivers/platform/x86/sepdk/include/lwpmudrv_ecb.h +index 792ae65191b3..a8b5fced897d 100644 +--- a/drivers/platform/x86/sepdk/include/lwpmudrv_ecb.h ++++ b/drivers/platform/x86/sepdk/include/lwpmudrv_ecb.h +@@ -1,14 +1,27 @@ +- +-/*** +- * ------------------------------------------------------------------------- +- * INTEL CORPORATION PROPRIETARY INFORMATION +- * This software is supplied under the terms of the accompanying license +- * agreement or nondisclosure agreement with Intel Corporation and may not +- * be copied or disclosed except in accordance with the terms of that +- * agreement. +- * Copyright(C) 2007-2018 Intel Corporation. All Rights Reserved. +- * ------------------------------------------------------------------------- +- ***/ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ + + #ifndef _LWPMUDRV_ECB_UTILS_H_ + #define _LWPMUDRV_ECB_UTILS_H_ +diff --git a/drivers/platform/x86/sepdk/include/lwpmudrv_gfx.h b/drivers/platform/x86/sepdk/include/lwpmudrv_gfx.h +index fe6583e2c44c..3d8cf5290f72 100644 +--- a/drivers/platform/x86/sepdk/include/lwpmudrv_gfx.h ++++ b/drivers/platform/x86/sepdk/include/lwpmudrv_gfx.h +@@ -1,13 +1,27 @@ +-/*** +- * ------------------------------------------------------------------------- +- * INTEL CORPORATION PROPRIETARY INFORMATION +- * This software is supplied under the terms of the accompanying license +- * agreement or nondisclosure agreement with Intel Corporation and may not +- * be copied or disclosed except in accordance with the terms of that +- * agreement. +- * Copyright(C) 2011-2018 Intel Corporation. All Rights Reserved. +- * ------------------------------------------------------------------------- +- ***/ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ + + #ifndef _LWPMUDRV_GFX_H_ + #define _LWPMUDRV_GFX_H_ +diff --git a/drivers/platform/x86/sepdk/include/lwpmudrv_ioctl.h b/drivers/platform/x86/sepdk/include/lwpmudrv_ioctl.h +index a8d32466a4bd..3b60274826c6 100644 +--- a/drivers/platform/x86/sepdk/include/lwpmudrv_ioctl.h ++++ b/drivers/platform/x86/sepdk/include/lwpmudrv_ioctl.h +@@ -1,13 +1,27 @@ +-/**** +- * ------------------------------------------------------------------------- +- * INTEL CORPORATION PROPRIETARY INFORMATION +- * This software is supplied under the terms of the accompanying license +- * agreement or nondisclosure agreement with Intel Corporation and may not +- * be copied or disclosed except in accordance with the terms of that +- * agreement. +- * Copyright(C) 2007-2018 Intel Corporation. All Rights Reserved. +- * ------------------------------------------------------------------------- +- ****/ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ + + #ifndef _LWPMUDRV_IOCTL_H_ + #define _LWPMUDRV_IOCTL_H_ +diff --git a/drivers/platform/x86/sepdk/include/lwpmudrv_pwr.h b/drivers/platform/x86/sepdk/include/lwpmudrv_pwr.h +index e26a478a9bb1..d67f5ea1ffa4 100644 +--- a/drivers/platform/x86/sepdk/include/lwpmudrv_pwr.h ++++ b/drivers/platform/x86/sepdk/include/lwpmudrv_pwr.h +@@ -1,13 +1,27 @@ +-/**** +- * ------------------------------------------------------------------------- +- * INTEL CORPORATION PROPRIETARY INFORMATION +- * This software is supplied under the terms of the accompanying license +- * agreement or nondisclosure agreement with Intel Corporation and may not +- * be copied or disclosed except in accordance with the terms of that +- * agreement. +- * Copyright(C) 2011-2018 Intel Corporation. All Rights Reserved. +- * ------------------------------------------------------------------------- +-****/ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ + + #ifndef _LWPMUDRV_PWR_H_ + #define _LWPMUDRV_PWR_H_ +diff --git a/drivers/platform/x86/sepdk/include/lwpmudrv_struct.h b/drivers/platform/x86/sepdk/include/lwpmudrv_struct.h +index c76ef5fa0e67..82819e5e11b7 100644 +--- a/drivers/platform/x86/sepdk/include/lwpmudrv_struct.h ++++ b/drivers/platform/x86/sepdk/include/lwpmudrv_struct.h +@@ -1,13 +1,27 @@ +-/*** +- * ------------------------------------------------------------------------- +- * INTEL CORPORATION PROPRIETARY INFORMATION +- * This software is supplied under the terms of the accompanying license +- * agreement or nondisclosure agreement with Intel Corporation and may not +- * be copied or disclosed except in accordance with the terms of that +- * agreement. +- * Copyright(C) 2007-2018 Intel Corporation. All Rights Reserved. +- * ------------------------------------------------------------------------- +-***/ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ + + #ifndef _LWPMUDRV_STRUCT_UTILS_H_ + #define _LWPMUDRV_STRUCT_UTILS_H_ +diff --git a/drivers/platform/x86/sepdk/include/lwpmudrv_types.h b/drivers/platform/x86/sepdk/include/lwpmudrv_types.h +index 7fe842eee890..05574ada85ec 100644 +--- a/drivers/platform/x86/sepdk/include/lwpmudrv_types.h ++++ b/drivers/platform/x86/sepdk/include/lwpmudrv_types.h +@@ -1,13 +1,27 @@ +-/*** +- * ------------------------------------------------------------------------- +- * INTEL CORPORATION PROPRIETARY INFORMATION +- * This software is supplied under the terms of the accompanying license +- * agreement or nondisclosure agreement with Intel Corporation and may not +- * be copied or disclosed except in accordance with the terms of that +- * agreement. +- * Copyright(C) 2007-2018 Intel Corporation. All Rights Reserved. +- * ------------------------------------------------------------------------- +- ***/ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ + + #ifndef _LWPMUDRV_TYPES_H_ + #define _LWPMUDRV_TYPES_H_ +diff --git a/drivers/platform/x86/sepdk/include/lwpmudrv_version.h b/drivers/platform/x86/sepdk/include/lwpmudrv_version.h +index a2cbedd44573..364fcc38048b 100644 +--- a/drivers/platform/x86/sepdk/include/lwpmudrv_version.h ++++ b/drivers/platform/x86/sepdk/include/lwpmudrv_version.h +@@ -1,15 +1,26 @@ +-/**** +- * ------------------------------------------------------------------------- +- * INTEL CORPORATION PROPRIETARY INFORMATION +- * This software is supplied under the terms of the accompanying license +- * agreement or nondisclosure agreement with Intel Corporation and may not +- * be copied or disclosed except in accordance with the terms of that +- * agreement. +- * Copyright(C) 2010-2018 Intel Corporation. All Rights Reserved. +- * ------------------------------------------------------------------------- +-****/ +-/* +- * File : lwpmudrv_version.h ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** + */ + + #ifndef _LWPMUDRV_VERSION_H_ +diff --git a/drivers/platform/x86/sepdk/include/pax_shared.h b/drivers/platform/x86/sepdk/include/pax_shared.h +index a706232c9b4a..6f35197a51fc 100644 +--- a/drivers/platform/x86/sepdk/include/pax_shared.h ++++ b/drivers/platform/x86/sepdk/include/pax_shared.h +@@ -1,13 +1,27 @@ +-/**** +- * ------------------------------------------------------------------------- +- * INTEL CORPORATION PROPRIETARY INFORMATION +- * This software is supplied under the terms of the accompanying license +- * agreement or nondisclosure agreement with Intel Corporation and may not +- * be copied or disclosed except in accordance with the terms of that +- * agreement. +- * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. +- * ------------------------------------------------------------------------- +-****/ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ + + /* + * +diff --git a/drivers/platform/x86/sepdk/include/rise_errors.h b/drivers/platform/x86/sepdk/include/rise_errors.h +index 29fb278def7d..7db811e855d6 100644 +--- a/drivers/platform/x86/sepdk/include/rise_errors.h ++++ b/drivers/platform/x86/sepdk/include/rise_errors.h +@@ -1,13 +1,27 @@ +-/*** +- * ------------------------------------------------------------------------- +- * INTEL CORPORATION PROPRIETARY INFORMATION +- * This software is supplied under the terms of the accompanying license +- * agreement or nondisclosure agreement with Intel Corporation and may not +- * be copied or disclosed except in accordance with the terms of that +- * agreement. +- * Copyright(C) 2004-2018 Intel Corporation. All Rights Reserved. +- * ------------------------------------------------------------------------- +-***/ ++/* **************************************************************************** ++ * Copyright(C) 2009-2018 Intel Corporation. All Rights Reserved. ++ * ++ * This file is part of SEP Development Kit ++ * ++ * SEP Development Kit is free software; you can redistribute it ++ * and/or modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * SEP Development Kit is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * As a special exception, you may use this file as part of a free software ++ * library without restriction. Specifically, if other files instantiate ++ * templates or use macros or inline functions from this file, or you ++ * compile this file and link it with other files to produce an executable ++ * this file does not by itself cause the resulting executable to be ++ * covered by the GNU General Public License. This exception does not ++ * however invalidate any other reasons why the executable file might be ++ * covered by the GNU General Public License. ++ * **************************************************************************** ++ */ + + #ifndef _RISE_ERRORS_H_ + #define _RISE_ERRORS_H_ +diff --git a/drivers/platform/x86/socwatch/sw_trace_notifier_provider.c b/drivers/platform/x86/socwatch/sw_trace_notifier_provider.c +index a20e0566cfca..d6ff34b65638 100644 +--- a/drivers/platform/x86/socwatch/sw_trace_notifier_provider.c ++++ b/drivers/platform/x86/socwatch/sw_trace_notifier_provider.c +@@ -53,7 +53,7 @@ + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + */ +-#include // "LINUX_VERSION_CODE" ++#include /* "LINUX_VERSION_CODE" */ + #include + #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) + #include +@@ -69,14 +69,16 @@ + #include + #include + #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) +-#include // for the various APIC vector tracepoints (e.g. "thermal_apic", "local_timer" etc.) +-#endif // LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) +-struct pool_workqueue; // Forward declaration to avoid compiler warnings +-struct cpu_workqueue_struct; // Forward declaration to avoid compiler warnings ++#include /* for the various APIC vector tracepoints ++ * (e.g. "thermal_apic", ++ * "local_timer" etc.) */ ++#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) */ ++struct pool_workqueue; ++struct cpu_workqueue_struct; + #include +-#include // for 'pm_notifier' +-#include // for "cpufreq_notifier" +-#include // for 'CPU_UP_PREPARE' etc ++#include /* for 'pm_notifier' */ ++#include /* for "cpufreq_notifier" */ ++#include /* for 'CPU_UP_PREPARE' etc */ + + #include "sw_kernel_defines.h" + #include "sw_collector.h" +@@ -92,11 +94,11 @@ struct cpu_workqueue_struct; // Forward declaration to avoid compiler warnings + */ + #ifndef __get_cpu_var + /* +- * Kernels >= 3.19 don't include a definition +- * of '__get_cpu_var'. Create one now. +- */ ++ * Kernels >= 3.19 don't include a definition ++ * of '__get_cpu_var'. Create one now. ++ */ + #define __get_cpu_var(var) (*this_cpu_ptr(&var)) +-#endif // __get_cpu_var ++#endif /* __get_cpu_var */ + + #define BEGIN_LOCAL_IRQ_STATS_READ(p) \ + do { \ +@@ -115,10 +117,11 @@ struct cpu_workqueue_struct; // Forward declaration to avoid compiler warnings + */ + #ifdef CONFIG_TIMER_STATS + #define GET_TIMER_THREAD_ID(t) \ +- ((t)->start_pid) /* 'start_pid' is actually the thread ID of the thread that initialized the timer */ ++ ((t)->start_pid) /* 'start_pid' is actually the thread ID ++ * of the thread that initialized the timer */ + #else + #define GET_TIMER_THREAD_ID(t) (-1) +-#endif // CONFIG_TIMER_STATS ++#endif /* CONFIG_TIMER_STATS */ + /* + * Tracepoint probe register/unregister functions and + * helper macros. +@@ -140,10 +143,10 @@ struct cpu_workqueue_struct; // Forward declaration to avoid compiler warnings + #define DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, name, probe) \ + tracepoint_probe_unregister(node->tp, probe, NULL) + #endif +-#else // CONFIG_TRACEPOINTS ++#else /* CONFIG_TRACEPOINTS */ + #define DO_REGISTER_SW_TRACEPOINT_PROBE(...) /* NOP */ + #define DO_UNREGISTER_SW_TRACEPOINT_PROBE(...) /* NOP */ +-#endif // CONFIG_TRACEPOINTS ++#endif /* CONFIG_TRACEPOINTS */ + #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35) + #define _DEFINE_PROBE_FUNCTION(name, ...) static void name(__VA_ARGS__) + #else +@@ -196,8 +199,8 @@ struct cpu_workqueue_struct; // Forward declaration to avoid compiler warnings + #define PROBE_WAKE_UNLOCK_PARAMS \ + sw_probe_wakeup_source_deactivate_i, const char *name, \ + unsigned int state +-#endif // version +-#endif // CONFIG_ANDROID ++#endif /* version */ ++#endif /* CONFIG_ANDROID */ + + #if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35) + #define PROBE_WORKQUEUE_PARAMS \ +@@ -252,7 +255,8 @@ struct cpu_workqueue_struct; // Forward declaration to avoid compiler warnings + (node = &s_notifier_collector_lists[idx]); \ + ++idx) + /* +- * Use these macros if all tracepoint ID numbers ARE contiguous from 0 -- max tracepoint ID # ++ * Use these macros if all tracepoint ID numbers ++ * ARE contiguous from 0 -- max tracepoint ID # + */ + #if 0 + #define IS_VALID_TRACE_NOTIFIER_ID(id) \ +@@ -260,9 +264,10 @@ struct cpu_workqueue_struct; // Forward declaration to avoid compiler warnings + #define GET_COLLECTOR_TRACE_NODE(id) (&s_trace_collector_lists[id]) + #define FOR_EACH_trace_notifier_id(idx) \ + for (idx = 0; idx < SW_ARRAY_SIZE(s_trace_collector_lists); ++idx) +-#endif // if 0 ++#endif /* if 0 */ + /* +- * Use these macros if all tracepoint ID numbers are NOT contiguous from 0 -- max tracepoint ID # ++ * Use these macros if all tracepoint ID numbers ++ * are NOT contiguous from 0 -- max tracepoint ID # + */ + #define GET_COLLECTOR_TRACE_NODE(idx) \ + ({ \ +@@ -308,7 +313,8 @@ int sw_unregister_trace_cpu_idle_i(struct sw_trace_notifier_data *node); + int sw_register_trace_cpu_frequency_i(struct sw_trace_notifier_data *node); + int sw_unregister_trace_cpu_frequency_i(struct sw_trace_notifier_data *node); + int sw_register_trace_irq_handler_entry_i(struct sw_trace_notifier_data *node); +-int sw_unregister_trace_irq_handler_entry_i(struct sw_trace_notifier_data *node); ++int sw_unregister_trace_irq_handler_entry_i(struct sw_trace_notifier_data ++ *node); + int sw_register_trace_timer_expire_entry_i(struct sw_trace_notifier_data *node); + int sw_unregister_trace_timer_expire_entry_i( + struct sw_trace_notifier_data *node); +@@ -329,15 +335,16 @@ int sw_register_trace_thermal_apic_entry_i(struct sw_trace_notifier_data *node); + int sw_unregister_trace_thermal_apic_entry_i( + struct sw_trace_notifier_data *node); + int sw_register_trace_thermal_apic_exit_i(struct sw_trace_notifier_data *node); +-int sw_unregister_trace_thermal_apic_exit_i(struct sw_trace_notifier_data *node); +-#endif // LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) ++int sw_unregister_trace_thermal_apic_exit_i(struct sw_trace_notifier_data ++ *node); ++#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) */ + #if IS_ENABLED(CONFIG_ANDROID) + #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0) + int sw_register_trace_wake_lock_i(struct sw_trace_notifier_data *node); + int sw_unregister_trace_wake_lock_i(struct sw_trace_notifier_data *node); + int sw_register_trace_wake_unlock_i(struct sw_trace_notifier_data *node); + int sw_unregister_trace_wake_unlock_i(struct sw_trace_notifier_data *node); +-#else // LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0) ++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0) */ + int sw_register_trace_wakeup_source_activate_i( + struct sw_trace_notifier_data *node); + int sw_unregister_trace_wakeup_source_activate_i( +@@ -346,9 +353,10 @@ int sw_register_trace_wakeup_source_deactivate_i( + struct sw_trace_notifier_data *node); + int sw_unregister_trace_wakeup_source_deactivate_i( + struct sw_trace_notifier_data *node); +-#endif // LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) +-#endif // CONFIG_ANDROID +-int sw_register_trace_workqueue_execution_i(struct sw_trace_notifier_data *node); ++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) */ ++#endif /* CONFIG_ANDROID */ ++int sw_register_trace_workqueue_execution_i(struct sw_trace_notifier_data ++ *node); + int sw_unregister_trace_workqueue_execution_i( + struct sw_trace_notifier_data *node); + int sw_register_trace_sched_switch_i(struct sw_trace_notifier_data *node); +@@ -399,15 +407,16 @@ void sw_handle_reset_messages_i(struct sw_trace_notifier_data *node); + * For overhead measurements. + */ + DECLARE_OVERHEAD_VARS( +- sw_handle_timer_wakeup_helper_i); // for the "timer_expire" family of probes +-DECLARE_OVERHEAD_VARS(sw_handle_irq_wakeup_i); // for IRQ wakeups +-DECLARE_OVERHEAD_VARS(sw_handle_sched_wakeup_i); // for SCHED +-DECLARE_OVERHEAD_VARS(sw_tps_i); // for TPS +-DECLARE_OVERHEAD_VARS(sw_tpf_i); // for TPF ++ sw_handle_timer_wakeup_helper_i); /* for the "timer_expire" ++ family of probes */ ++DECLARE_OVERHEAD_VARS(sw_handle_irq_wakeup_i); /* for IRQ wakeups */ ++DECLARE_OVERHEAD_VARS(sw_handle_sched_wakeup_i); /* for SCHED */ ++DECLARE_OVERHEAD_VARS(sw_tps_i); /* for TPS */ ++DECLARE_OVERHEAD_VARS(sw_tpf_i); /* for TPF */ + DECLARE_OVERHEAD_VARS(sw_process_fork_exit_helper_i); + #if IS_ENABLED(CONFIG_ANDROID) +-DECLARE_OVERHEAD_VARS(sw_handle_wakelock_i); // for wake lock/unlock +-#endif // CONFIG_ANDROID ++DECLARE_OVERHEAD_VARS(sw_handle_wakelock_i); /* for wake lock/unlock */ ++#endif /* CONFIG_ANDROID */ + DECLARE_OVERHEAD_VARS(sw_handle_workqueue_wakeup_helper_i); + DECLARE_OVERHEAD_VARS(sw_handle_sched_switch_helper_i); + /* +@@ -428,10 +437,14 @@ static DEFINE_PER_CPU(u64, sw_num_local_apic_timer_inters); + * Set to 'false' in TPS probe. + */ + static bool sw_wakeup_event_flag = true; ++ ++#if IS_ENABLED(CONFIG_TRACEPOINTS) + /* + * Scheduler-based polling emulation. + */ + static DEFINE_PER_CPU(unsigned long, sw_pcpu_polling_jiff); ++#endif /* CONFIG_TRACEPOINTS */ ++ + pw_u16_t sw_min_polling_interval_msecs; + + /* +@@ -454,11 +467,12 @@ enum sw_trace_id { + SW_TRACE_ID_WORKQUEUE_EXECUTE_START, + SW_TRACE_ID_SCHED_SWITCH, + }; ++ + /* + * IDs for supported notifiers. + */ + enum sw_notifier_id { +- SW_NOTIFIER_ID_SUSPEND, // TODO: change name? ++ SW_NOTIFIER_ID_SUSPEND, /* TODO: change name? */ + SW_NOTIFIER_ID_SUSPEND_ENTER, + SW_NOTIFIER_ID_SUSPEND_EXIT, + SW_NOTIFIER_ID_HIBERNATE, +@@ -468,6 +482,7 @@ enum sw_notifier_id { + SW_NOTIFIER_ID_CPUFREQ, + SW_NOTIFIER_ID_HOTCPU, + }; ++ + /* + * Names for supported tracepoints. A tracepoint + * 'name' consists of two strings: a "kernel" string +@@ -494,12 +509,12 @@ static const struct sw_trace_notifier_name s_trace_names[] = { + "THERMAL-THROTTLE-ENTRY" }, + [SW_TRACE_ID_THERMAL_APIC_EXIT] = { "thermal_apic_exit", + "THERMAL-THROTTLE-EXIT" }, +-#endif // LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) ++#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) */ + #if IS_ENABLED(CONFIG_ANDROID) + #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0) + [SW_TRACE_ID_WAKE_LOCK] = { "wake_lock", "WAKE-LOCK" }, + [SW_TRACE_ID_WAKE_UNLOCK] = { "wake_unlock", "WAKE-UNLOCK" }, +-#else // LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0) ++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0) */ + [SW_TRACE_ID_WAKE_LOCK] = { "wakeup_source_activate", "WAKE-LOCK" }, + [SW_TRACE_ID_WAKE_UNLOCK] = { "wakeup_source_deactivate", + "WAKE-UNLOCK" }, +@@ -588,7 +603,7 @@ static struct sw_trace_notifier_data s_trace_collector_lists[] = { + &s_trace_names[SW_TRACE_ID_THERMAL_APIC_EXIT], + &sw_register_trace_thermal_apic_exit_i, + &sw_unregister_trace_thermal_apic_exit_i, NULL }, +-#endif // LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) ++#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) */ + /* Wakelocks have multiple tracepoints, depending on kernel version */ + #if IS_ENABLED(CONFIG_ANDROID) + #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0) +@@ -599,7 +614,7 @@ static struct sw_trace_notifier_data s_trace_collector_lists[] = { + &s_trace_names[SW_TRACE_ID_WAKE_UNLOCK], + &sw_register_trace_wake_unlock_i, &sw_unregister_trace_wake_unlock_i, + NULL }, +-#else // LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0) ++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0) */ + { SW_TRACE_COLLECTOR_TRACEPOINT, &s_trace_names[SW_TRACE_ID_WAKE_LOCK], + &sw_register_trace_wakeup_source_activate_i, + &sw_unregister_trace_wakeup_source_activate_i, NULL }, +@@ -607,8 +622,8 @@ static struct sw_trace_notifier_data s_trace_collector_lists[] = { + &s_trace_names[SW_TRACE_ID_WAKE_UNLOCK], + &sw_register_trace_wakeup_source_deactivate_i, + &sw_unregister_trace_wakeup_source_deactivate_i, NULL }, +-#endif // LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) +-#endif // CONFIG_ANDROID ++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) */ ++#endif /* CONFIG_ANDROID */ + { SW_TRACE_COLLECTOR_TRACEPOINT, + &s_trace_names[SW_TRACE_ID_WORKQUEUE_EXECUTE_START], + &sw_register_trace_workqueue_execution_i, +@@ -618,6 +633,7 @@ static struct sw_trace_notifier_data s_trace_collector_lists[] = { + &sw_register_trace_sched_switch_i, + &sw_unregister_trace_sched_switch_i, NULL }, + }; ++ + /* + * List of supported notifiers. + */ +@@ -645,6 +661,7 @@ static struct sw_trace_notifier_data s_notifier_collector_lists[] = { + &s_notifier_names[SW_NOTIFIER_ID_CPUFREQ], + &sw_register_cpufreq_notifier_i, &sw_unregister_cpufreq_notifier_i }, + }; ++ + /* + * Special entry for CPU notifier (i.e. "hotplug" notifier) + * We don't want these to be visible to the user. +@@ -657,8 +674,7 @@ static struct sw_trace_notifier_data s_hotplug_notifier_data = { + NULL, + true /* always register */ + }; +- +-#else // !CONFIG_TRACEPOINTS ++#else /* !CONFIG_TRACEPOINTS */ + /* + * A list of supported tracepoints. + */ +@@ -670,7 +686,7 @@ static struct sw_trace_notifier_data s_trace_collector_lists[] = { + static struct sw_trace_notifier_data s_notifier_collector_lists[] = { + /* EMPTY */ }; + +-#endif // CONFIG_TRACEPOINTS ++#endif /* CONFIG_TRACEPOINTS */ + + /* + * Macros to retrieve tracepoint and notifier IDs. +@@ -687,6 +703,7 @@ static struct sw_trace_notifier_data s_notifier_collector_lists[] = { + * Function definitions. + * ------------------------------------------------- + */ ++ + /* + * Retrieve a TSC value + */ +@@ -697,6 +714,7 @@ static inline u64 sw_tscval(void) + asm volatile("rdtsc" : "=a"(low), "=d"(high)); + return low | ((unsigned long long)high) << 32; + }; ++ + u64 sw_timestamp(void) + { + struct timespec ts; +@@ -704,6 +722,7 @@ u64 sw_timestamp(void) + getnstimeofday(&ts); + return (ts.tv_sec * 1000000000ULL + ts.tv_nsec); + } ++ + /* + * Basically the same as arch/x86/kernel/irq.c --> "arch_irq_stat_cpu(cpu)" + */ +@@ -718,16 +737,17 @@ u64 sw_my_local_arch_irq_stats_cpu_i(void) + { + #ifndef __arm__ + sum += stats->__nmi_count; +- // #ifdef CONFIG_X86_LOCAL_APIC ++#if IS_ENABLED(CONFIG_X86_LOCAL_APIC) + sum += stats->apic_timer_irqs; +-// #endif ++ sum += stats->irq_spurious_count; ++#endif + #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 34) + sum += stats->x86_platform_ipis; +-#endif // 2,6,34 ++#endif /* 2,6,34 */ + sum += stats->apic_perf_irqs; + #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0) + sum += stats->apic_irq_work_irqs; +-#endif // 3,5,0 ++#endif /* 3,5,0 */ + #ifdef CONFIG_SMP + sum += stats->irq_call_count; + sum += stats->irq_resched_count; +@@ -736,7 +756,7 @@ u64 sw_my_local_arch_irq_stats_cpu_i(void) + #ifdef CONFIG_X86_THERMAL_VECTOR + sum += stats->irq_thermal_count; + #endif +- sum += stats->irq_spurious_count; // should NEVER be non-zero!!! ++ + #else + sum += stats->__softirq_pending; + #ifdef CONFIG_SMP +@@ -769,6 +789,7 @@ void sw_handle_trace_notifier_i(struct sw_trace_notifier_data *node) + sw_handle_per_cpu_msg(curr); + } + }; ++ + /* + * Generic tracepoint/notifier handling function. + */ +@@ -784,6 +805,7 @@ void sw_handle_trace_notifier_on_cpu_i(int cpu, + sw_handle_per_cpu_msg_on_cpu(cpu, curr); + } + }; ++ + void sw_handle_reset_messages_i(struct sw_trace_notifier_data *node) + { + struct sw_collector_data *curr = NULL; +@@ -797,32 +819,11 @@ void sw_handle_reset_messages_i(struct sw_trace_notifier_data *node) + sw_schedule_work(&curr->cpumask, &sw_handle_per_cpu_msg, curr); + } + } ++ + /* + * Tracepoint helpers. + */ +-/* +- * IRQ wakeup handling function. +- */ +-static void sw_handle_irq_wakeup_i(struct sw_collector_data *node, int irq) +-{ +- int cpu = RAW_CPU(); +- sw_driver_msg_t *msg = GET_MSG_SLOT_FOR_CPU(node->msg, cpu, +- node->per_msg_payload_size); +- // char *dst_vals = (char *)(unsigned long)msg->p_payload; +- char *dst_vals = msg->p_payload; + +- // msg->tsc = sw_timestamp(); // msg TSC assigned when msg is written to buffer +- msg->cpuidx = cpu; +- +- /* +- * IRQ handling ==> only return the irq number +- */ +- *((int *)dst_vals) = irq; +- +- if (sw_produce_generic_msg(msg, SW_WAKEUP_ACTION_DIRECT)) { +- pw_pr_warn("WARNING: could NOT produce message!\n"); +- } +-}; + /* + * TIMER wakeup handling function. + */ +@@ -832,10 +833,11 @@ static void sw_handle_timer_wakeup_i(struct sw_collector_data *node, pid_t pid, + int cpu = RAW_CPU(); + sw_driver_msg_t *msg = GET_MSG_SLOT_FOR_CPU(node->msg, cpu, + node->per_msg_payload_size); +- // char *dst_vals = (char *)(unsigned long)msg->p_payload; ++ /* char *dst_vals = (char *)(unsigned long)msg->p_payload; */ + char *dst_vals = msg->p_payload; + +- // msg->tsc = sw_timestamp(); // msg TSC assigned when msg is written to buffer ++ /* msg->tsc = sw_timestamp(); */ ++ /* msg TSC assigned when msg is written to buffer */ + msg->cpuidx = cpu; + + /* +@@ -850,6 +852,7 @@ static void sw_handle_timer_wakeup_i(struct sw_collector_data *node, pid_t pid, + } + pw_pr_debug("HANDLED timer expire for %d, %d\n", pid, tid); + }; ++ + /* + * Helper function for {hr}timer expires. Required for overhead tracking. + */ +@@ -872,6 +875,7 @@ void sw_handle_timer_wakeup_helper_i(struct sw_collector_data *curr, + sw_handle_timer_wakeup_i(curr, pid, tid); + } + }; ++ + /* + * SCHED wakeup handling function. + */ +@@ -881,10 +885,11 @@ void sw_handle_sched_wakeup_i(struct sw_collector_data *node, int source_cpu, + int cpu = source_cpu; + sw_driver_msg_t *msg = GET_MSG_SLOT_FOR_CPU(node->msg, cpu, + node->per_msg_payload_size); +- // char *dst_vals = (char *)(unsigned long)msg->p_payload; ++ /* char *dst_vals = (char *)(unsigned long)msg->p_payload; */ + char *dst_vals = msg->p_payload; + +- // msg->tsc = sw_timestamp(); // msg TSC assigned when msg is written to buffer ++ /* msg->tsc = sw_timestamp(); */ ++ /* msg TSC assigned when msg is written to buffer */ + msg->cpuidx = source_cpu; + + /* +@@ -898,6 +903,7 @@ void sw_handle_sched_wakeup_i(struct sw_collector_data *node, int source_cpu, + pw_pr_warn("WARNING: could NOT produce message!\n"); + } + }; ++ + /* + * APIC timer wakeup + */ +@@ -909,9 +915,10 @@ void sw_handle_apic_timer_wakeup_i(struct sw_collector_data *node) + int cpu = RAW_CPU(); + sw_driver_msg_t *msg = GET_MSG_SLOT_FOR_CPU(node->msg, cpu, + node->per_msg_payload_size); +- // char *dst_vals = (char *)(unsigned long)msg->p_payload; ++ /* char *dst_vals = (char *)(unsigned long)msg->p_payload; */ + +- // msg->tsc = sw_timestamp(); // msg TSC assigned when msg is written to buffer ++ /* msg->tsc = sw_timestamp(); */ ++ /* msg TSC assigned when msg is written to buffer */ + msg->cpuidx = cpu; + + if (sw_produce_generic_msg(msg, SW_WAKEUP_ACTION_DIRECT)) { +@@ -919,6 +926,7 @@ void sw_handle_apic_timer_wakeup_i(struct sw_collector_data *node) + } + pw_pr_debug("HANDLED APIC timer wakeup for cpu = %d\n", cpu); + }; ++ + /* + * Helper function for workqueue executions. Required for overhead tracking. + */ +@@ -928,7 +936,8 @@ void sw_handle_workqueue_wakeup_helper_i(int cpu, + sw_driver_msg_t *msg = GET_MSG_SLOT_FOR_CPU(node->msg, cpu, + node->per_msg_payload_size); + +- // msg->tsc = sw_timestamp(); // msg TSC assigned when msg is written to buffer ++ /* msg->tsc = sw_timestamp(); */ ++ /* msg TSC assigned when msg is written to buffer */ + msg->cpuidx = cpu; + + /* +@@ -938,6 +947,7 @@ void sw_handle_workqueue_wakeup_helper_i(int cpu, + pw_pr_error("WARNING: could NOT produce message!\n"); + } + }; ++ + /* + * Helper function for sched_switch. Required for overhead tracking. + */ +@@ -1003,9 +1013,11 @@ void sw_handle_sched_switch_helper_i(void) + /* + * Probe functions. + */ ++ + /* + * 1. TPS + */ ++ + /* + * Check IPI wakeups within the cpu_idle tracepoint. + */ +@@ -1039,6 +1051,7 @@ void sw_tps_apic_i(int cpu) + } + } + }; ++ + /* + * Perform any user-defined tasks within the + * cpu_idle tracepoint. +@@ -1053,6 +1066,7 @@ void sw_tps_tps_i(int cpu) + } + sw_handle_trace_notifier_i(tps_node); + }; ++ + /* + * Perform any wakeup-related tasks within the + * cpu_idle tracepoint. +@@ -1067,6 +1081,7 @@ void sw_tps_wakeup_i(int cpu) + sw_wakeup_event_flag = false; + RESET_VALID_WAKEUP_EVENT_COUNTER(cpu); + }; ++ + void sw_tps_i(void) + { + /* +@@ -1080,19 +1095,10 @@ void sw_tps_i(void) + sw_tps_wakeup_i(cpu); + }; + +-DEFINE_PROBE_FUNCTION(PROBE_TPS_PARAMS) +-{ +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38) +- if (state == PWR_EVENT_EXIT) { +- return; +- } +-#endif +- DO_PER_CPU_OVERHEAD_FUNC(sw_tps_i); +-}; +- + /* + * 2. TPF + */ ++ + /* + * Helper function for overhead measurements. + */ +@@ -1101,11 +1107,22 @@ void sw_tpf_i(int cpu, struct sw_trace_notifier_data *node) + sw_handle_trace_notifier_on_cpu_i((int)cpu, node); + }; + ++#if IS_ENABLED(CONFIG_TRACEPOINTS) ++DEFINE_PROBE_FUNCTION(PROBE_TPS_PARAMS) ++{ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38) ++ if (state == PWR_EVENT_EXIT) { ++ return; ++ } ++#endif ++ DO_PER_CPU_OVERHEAD_FUNC(sw_tps_i); ++}; ++ + DEFINE_PROBE_FUNCTION(PROBE_TPF_PARAMS) + { + #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38) + int cpu = RAW_CPU(); +-#endif // version < 2.6.38 ++#endif /* version < 2.6.38 */ + static struct sw_trace_notifier_data *node; + + if (unlikely(node == NULL)) { +@@ -1115,6 +1132,31 @@ DEFINE_PROBE_FUNCTION(PROBE_TPF_PARAMS) + DO_PER_CPU_OVERHEAD_FUNC(sw_tpf_i, (int)cpu, node); + }; + ++/* ++ * IRQ wakeup handling function. ++ */ ++static void sw_handle_irq_wakeup_i(struct sw_collector_data *node, int irq) ++{ ++ int cpu = RAW_CPU(); ++ sw_driver_msg_t *msg = GET_MSG_SLOT_FOR_CPU(node->msg, cpu, ++ node->per_msg_payload_size); ++ /* char *dst_vals = (char *)(unsigned long)msg->p_payload; */ ++ char *dst_vals = msg->p_payload; ++ ++ /* msg->tsc = sw_timestamp(); */ ++ /* msg TSC assigned when msg is written to buffer */ ++ msg->cpuidx = cpu; ++ ++ /* ++ * IRQ handling ==> only return the irq number ++ */ ++ *((int *)dst_vals) = irq; ++ ++ if (sw_produce_generic_msg(msg, SW_WAKEUP_ACTION_DIRECT)) { ++ pw_pr_warn("WARNING: could NOT produce message!\n"); ++ } ++}; ++ + /* + * 3. IRQ handler entry + */ +@@ -1136,6 +1178,7 @@ DEFINE_PROBE_FUNCTION(PROBE_IRQ_PARAMS) + DO_PER_CPU_OVERHEAD_FUNC(sw_handle_irq_wakeup_i, curr, irq); + } + }; ++ + /* + * 4. TIMER expire + */ +@@ -1158,6 +1201,7 @@ DEFINE_PROBE_FUNCTION(PROBE_TIMER_ARGS) + DO_PER_CPU_OVERHEAD_FUNC(sw_handle_timer_wakeup_helper_i, curr, node, + tid); + }; ++ + /* + * 5. HRTIMER expire + */ +@@ -1180,6 +1224,7 @@ DEFINE_PROBE_FUNCTION(PROBE_HRTIMER_PARAMS) + DO_PER_CPU_OVERHEAD_FUNC(sw_handle_timer_wakeup_helper_i, curr, node, + tid); + }; ++ + /* + * 6. SCHED wakeup + */ +@@ -1206,14 +1251,16 @@ DEFINE_PROBE_FUNCTION(PROBE_SCHED_WAKEUP_PARAMS) + return; + } + list_for_each_entry(curr, &node->list, list) { +- // sw_handle_sched_wakeup_i(curr, source_cpu, target_cpu); ++ /* sw_handle_sched_wakeup_i(curr, source_cpu, target_cpu); */ + DO_PER_CPU_OVERHEAD_FUNC(sw_handle_sched_wakeup_i, curr, + source_cpu, target_cpu); + } + }; ++ + /* + * 8. PROCESS fork + */ ++ + /* + * Helper for PROCESS fork, PROCESS exit + */ +@@ -1266,6 +1313,7 @@ DEFINE_PROBE_FUNCTION(PROBE_PROCESS_FORK_PARAMS) + child, true /* true ==> fork */); + } + }; ++ + /* + * 9. PROCESS exit + */ +@@ -1302,6 +1350,7 @@ DEFINE_PROBE_FUNCTION(PROBE_THERMAL_APIC_ENTRY_PARAMS) + } + DO_PER_CPU_OVERHEAD_FUNC(sw_tpf_i, (int)cpu, node); + }; ++ + /* + * 10. THERMAL_APIC exit + */ +@@ -1316,12 +1365,13 @@ DEFINE_PROBE_FUNCTION(PROBE_THERMAL_APIC_EXIT_PARAMS) + } + DO_PER_CPU_OVERHEAD_FUNC(sw_tpf_i, (int)cpu, node); + }; +-#endif // LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) ++#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) */ + + #if IS_ENABLED(CONFIG_ANDROID) + /* + * 11. WAKE lock / WAKEUP source activate. + */ ++ + /* + * Helper function to produce wake lock/unlock messages. + */ +@@ -1358,6 +1408,7 @@ void sw_produce_wakelock_msg_i(int cpu, struct sw_collector_data *node, + pw_pr_warn("WARNING: could NOT produce message!\n"); + } + }; ++ + /* + * Helper function to handle wake lock/unlock callbacks. + */ +@@ -1377,6 +1428,7 @@ void sw_handle_wakelock_i(int cpu, struct sw_trace_notifier_data *node, + tid, proc_name); + } + }; ++ + DEFINE_PROBE_FUNCTION(PROBE_WAKE_LOCK_PARAMS) + { + int cpu = RAW_CPU(); +@@ -1400,10 +1452,11 @@ DEFINE_PROBE_FUNCTION(PROBE_WAKE_LOCK_PARAMS) + type = SW_WAKE_LOCK_TIMEOUT; + timeout = jiffies_to_msecs(lock->expires - jiffies); + } +-#endif //LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) ++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) */ + DO_PER_CPU_OVERHEAD_FUNC(sw_handle_wakelock_i, cpu, node, name, + (int)type, timeout); + }; ++ + /* + * 11. WAKE unlock / WAKEUP source deactivate. + */ +@@ -1423,7 +1476,7 @@ DEFINE_PROBE_FUNCTION(PROBE_WAKE_UNLOCK_PARAMS) + DO_PER_CPU_OVERHEAD_FUNC(sw_handle_wakelock_i, cpu, node, name, + (int)type, 0 /*timeout*/); + }; +-#endif // CONFIG_ANDROID ++#endif /* CONFIG_ANDROID */ + + /* + * 12. WORKQUEUE +@@ -1474,10 +1527,10 @@ static void sw_send_pm_notification_i(int value) + msg = (struct sw_driver_msg *)buffer; + msg->tsc = sw_timestamp(); + msg->cpuidx = RAW_CPU(); +- msg->plugin_id = 0; // "0" indicates a system message +- msg->metric_id = 1; // "1" indicates a suspend/resume message (TODO) +- msg->msg_id = +- 0; /* don't care; TODO: use the 'msg_id' to encode the 'value'? */ ++ msg->plugin_id = 0; /* "0" indicates a system message */ ++ msg->metric_id = 1; /* "1" indicates a suspend/resume message (TODO) */ ++ msg->msg_id = 0; ++ /* don't care; TODO: use the 'msg_id' to encode the 'value'? */ + msg->payload_len = sizeof(value); + msg->p_payload = buffer + sizeof(*msg); + *((int *)msg->p_payload) = value; +@@ -1501,6 +1554,7 @@ static bool sw_is_reset_i(void) + + return is_reset; + } ++ + static void sw_probe_pm_helper_i(int id, int both_id, bool is_enter, + enum sw_pm_action action, enum sw_pm_mode mode) + { +@@ -1541,8 +1595,9 @@ static bool sw_is_suspend_via_firmware(void) + return true; + } + +-static int sw_probe_pm_notifier_i(struct notifier_block *block, unsigned long state, +- void *dummy) ++static int sw_probe_pm_notifier_i(struct notifier_block *block, ++ unsigned long state, ++ void *dummy) + { + static const struct { + enum sw_pm_action action; +@@ -1582,8 +1637,9 @@ static int sw_probe_pm_notifier_i(struct notifier_block *block, unsigned long st + return NOTIFY_DONE; + } + +-static void sw_store_topology_change_i(enum cpu_action type, int cpu, int core_id, +- int pkg_id) ++static void sw_store_topology_change_i(enum cpu_action type, ++ int cpu, int core_id, ++ int pkg_id) + { + struct sw_topology_node *node = sw_kmalloc(sizeof(*node), GFP_ATOMIC); + +@@ -1601,6 +1657,7 @@ static void sw_store_topology_change_i(enum cpu_action type, int cpu, int core_i + SW_LIST_ADD(&sw_topology_list, node, list); + ++sw_num_topology_entries; + } ++ + #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) + int sw_probe_hotplug_notifier_i(struct notifier_block *block, + unsigned long action, void *pcpu) +@@ -1679,6 +1736,7 @@ static void sw_probe_cpuhp_helper_i(unsigned int cpu, enum cpu_action action) + break; + } + } ++ + static int sw_probe_cpu_offline_i(unsigned int cpu) + { + printk(KERN_INFO "DEBUG: offline notification for cpu %u at %llu\n", +@@ -1686,6 +1744,7 @@ static int sw_probe_cpu_offline_i(unsigned int cpu) + sw_probe_cpuhp_helper_i(cpu, SW_CPU_ACTION_OFFLINE); + return 0; + } ++ + static int sw_probe_cpu_online_i(unsigned int cpu) + { + printk(KERN_INFO "DEBUG: online notification for cpu %u at %llu\n", cpu, +@@ -1694,7 +1753,7 @@ static int sw_probe_cpu_online_i(unsigned int cpu) + sw_probe_cpuhp_helper_i(cpu, SW_CPU_ACTION_ONLINE); + return 0; + } +-#endif // LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0) ++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0) */ + + /* + * 2. CPUFREQ notifier +@@ -1722,6 +1781,7 @@ static int sw_probe_cpufreq_notifier_i(struct notifier_block *block, + } + return NOTIFY_DONE; + } ++ + /* + * 1. TPS. + */ +@@ -1730,21 +1790,23 @@ int sw_register_trace_cpu_idle_i(struct sw_trace_notifier_data *node) + #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38) + DO_REGISTER_SW_TRACEPOINT_PROBE(node, power_start, + sw_probe_power_start_i); +-#else // kernel version >= 2.6.38 ++#else /* kernel version >= 2.6.38 */ + DO_REGISTER_SW_TRACEPOINT_PROBE(node, cpu_idle, sw_probe_cpu_idle_i); +-#endif // LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) */ + return PW_SUCCESS; + }; ++ + int sw_unregister_trace_cpu_idle_i(struct sw_trace_notifier_data *node) + { + #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38) + DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, power_start, + sw_probe_power_start_i); +-#else // kernel version >= 2.6.38 ++#else /* kernel version >= 2.6.38 */ + DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, cpu_idle, sw_probe_cpu_idle_i); +-#endif // LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) */ + return PW_SUCCESS; + }; ++ + /* + * 2. TPF + */ +@@ -1753,23 +1815,25 @@ int sw_register_trace_cpu_frequency_i(struct sw_trace_notifier_data *node) + #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38) + DO_REGISTER_SW_TRACEPOINT_PROBE(node, power_frequency, + sw_probe_power_frequency_i); +-#else // kernel version >= 2.6.38 ++#else /* kernel version >= 2.6.38 */ + DO_REGISTER_SW_TRACEPOINT_PROBE(node, cpu_frequency, + sw_probe_cpu_frequency_i); +-#endif // LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) */ + return PW_SUCCESS; + }; ++ + int sw_unregister_trace_cpu_frequency_i(struct sw_trace_notifier_data *node) + { + #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38) + DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, power_frequency, + sw_probe_power_frequency_i); +-#else // kernel version >= 2.6.38 ++#else /* kernel version >= 2.6.38 */ + DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, cpu_frequency, + sw_probe_cpu_frequency_i); +-#endif // LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) */ + return PW_SUCCESS; + }; ++ + /* + * 3. IRQ handler entry + */ +@@ -1779,12 +1843,14 @@ int sw_register_trace_irq_handler_entry_i(struct sw_trace_notifier_data *node) + sw_probe_irq_handler_entry_i); + return PW_SUCCESS; + }; ++ + int sw_unregister_trace_irq_handler_entry_i(struct sw_trace_notifier_data *node) + { + DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, irq_handler_entry, + sw_probe_irq_handler_entry_i); + return PW_SUCCESS; + }; ++ + /* + * 4. TIMER expire. + */ +@@ -1794,21 +1860,26 @@ int sw_register_trace_timer_expire_entry_i(struct sw_trace_notifier_data *node) + sw_probe_timer_expire_entry_i); + return PW_SUCCESS; + }; +-int sw_unregister_trace_timer_expire_entry_i(struct sw_trace_notifier_data *node) ++ ++int sw_unregister_trace_timer_expire_entry_i(struct sw_trace_notifier_data ++ *node) + { + DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, timer_expire_entry, + sw_probe_timer_expire_entry_i); + return PW_SUCCESS; + }; ++ + /* + * 5. HRTIMER expire. + */ +-int sw_register_trace_hrtimer_expire_entry_i(struct sw_trace_notifier_data *node) ++int sw_register_trace_hrtimer_expire_entry_i(struct sw_trace_notifier_data ++ *node) + { + DO_REGISTER_SW_TRACEPOINT_PROBE(node, hrtimer_expire_entry, + sw_probe_hrtimer_expire_entry_i); + return PW_SUCCESS; + }; ++ + int sw_unregister_trace_hrtimer_expire_entry_i( + struct sw_trace_notifier_data *node) + { +@@ -1816,6 +1887,7 @@ int sw_unregister_trace_hrtimer_expire_entry_i( + sw_probe_hrtimer_expire_entry_i); + return PW_SUCCESS; + }; ++ + /* + * 6. SCHED wakeup + */ +@@ -1825,12 +1897,14 @@ int sw_register_trace_sched_wakeup_i(struct sw_trace_notifier_data *node) + sw_probe_sched_wakeup_i); + return PW_SUCCESS; + }; ++ + int sw_unregister_trace_sched_wakeup_i(struct sw_trace_notifier_data *node) + { + DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, sched_wakeup, + sw_probe_sched_wakeup_i); + return PW_SUCCESS; + }; ++ + /* + * 8. PROCESS fork + */ +@@ -1840,12 +1914,15 @@ int sw_register_trace_sched_process_fork_i(struct sw_trace_notifier_data *node) + sw_probe_sched_process_fork_i); + return PW_SUCCESS; + }; +-int sw_unregister_trace_sched_process_fork_i(struct sw_trace_notifier_data *node) ++ ++int sw_unregister_trace_sched_process_fork_i(struct sw_trace_notifier_data ++ *node) + { + DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, sched_process_fork, + sw_probe_sched_process_fork_i); + return PW_SUCCESS; + }; ++ + /* + * 9. PROCESS exit + */ +@@ -1855,12 +1932,15 @@ int sw_register_trace_sched_process_exit_i(struct sw_trace_notifier_data *node) + sw_probe_sched_process_exit_i); + return PW_SUCCESS; + }; +-int sw_unregister_trace_sched_process_exit_i(struct sw_trace_notifier_data *node) ++ ++int sw_unregister_trace_sched_process_exit_i(struct sw_trace_notifier_data ++ *node) + { + DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, sched_process_exit, + sw_probe_sched_process_exit_i); + return PW_SUCCESS; + }; ++ + /* + * 10. THERMAL_APIC entry + */ +@@ -1871,12 +1951,15 @@ int sw_register_trace_thermal_apic_entry_i(struct sw_trace_notifier_data *node) + sw_probe_thermal_apic_entry_i); + return PW_SUCCESS; + }; +-int sw_unregister_trace_thermal_apic_entry_i(struct sw_trace_notifier_data *node) ++ ++int sw_unregister_trace_thermal_apic_entry_i(struct sw_trace_notifier_data ++ *node) + { + DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, thermal_apic_entry, + sw_probe_thermal_apic_entry_i); + return PW_SUCCESS; + }; ++ + /* + * 10. THERMAL_APIC exit + */ +@@ -1886,13 +1969,15 @@ int sw_register_trace_thermal_apic_exit_i(struct sw_trace_notifier_data *node) + sw_probe_thermal_apic_exit_i); + return PW_SUCCESS; + }; ++ + int sw_unregister_trace_thermal_apic_exit_i(struct sw_trace_notifier_data *node) + { + DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, thermal_apic_exit, + sw_probe_thermal_apic_exit_i); + return PW_SUCCESS; + }; +-#endif // LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) ++#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) */ ++ + /* + * 11. WAKE lock / WAKEUP source activate. + */ +@@ -1903,13 +1988,14 @@ int sw_register_trace_wake_lock_i(struct sw_trace_notifier_data *node) + DO_REGISTER_SW_TRACEPOINT_PROBE(node, wake_lock, sw_probe_wake_lock_i); + return PW_SUCCESS; + }; ++ + int sw_unregister_trace_wake_lock_i(struct sw_trace_notifier_data *node) + { + DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, wake_lock, + sw_probe_wake_lock_i); + return PW_SUCCESS; + }; +-#else // LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0) ++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0) */ + int sw_register_trace_wakeup_source_activate_i( + struct sw_trace_notifier_data *node) + { +@@ -1917,6 +2003,7 @@ int sw_register_trace_wakeup_source_activate_i( + sw_probe_wakeup_source_activate_i); + return PW_SUCCESS; + }; ++ + int sw_unregister_trace_wakeup_source_activate_i( + struct sw_trace_notifier_data *node) + { +@@ -1924,7 +2011,8 @@ int sw_unregister_trace_wakeup_source_activate_i( + sw_probe_wakeup_source_activate_i); + return PW_SUCCESS; + }; +-#endif // LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) ++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) */ ++ + /* + * 11. WAKE unlock / WAKEUP source deactivate. + */ +@@ -1935,13 +2023,15 @@ int sw_register_trace_wake_unlock_i(struct sw_trace_notifier_data *node) + sw_probe_wake_unlock_i); + return PW_SUCCESS; + }; ++ + int sw_unregister_trace_wake_unlock_i(struct sw_trace_notifier_data *node) + { + DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, wake_unlock, + sw_probe_wake_unlock_i); + return PW_SUCCESS; + }; +-#else // LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0) ++ ++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0) */ + int sw_register_trace_wakeup_source_deactivate_i( + struct sw_trace_notifier_data *node) + { +@@ -1949,6 +2039,7 @@ int sw_register_trace_wakeup_source_deactivate_i( + sw_probe_wakeup_source_deactivate_i); + return PW_SUCCESS; + }; ++ + int sw_unregister_trace_wakeup_source_deactivate_i( + struct sw_trace_notifier_data *node) + { +@@ -1956,8 +2047,9 @@ int sw_unregister_trace_wakeup_source_deactivate_i( + sw_probe_wakeup_source_deactivate_i); + return PW_SUCCESS; + }; +-#endif // LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) +-#endif // CONFIG_ANDROID ++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) */ ++#endif /* CONFIG_ANDROID */ ++ + /* + * 12. WORKQUEUE execution. + */ +@@ -1972,6 +2064,7 @@ int sw_register_trace_workqueue_execution_i(struct sw_trace_notifier_data *node) + #endif + return PW_SUCCESS; + }; ++ + int sw_unregister_trace_workqueue_execution_i( + struct sw_trace_notifier_data *node) + { +@@ -1984,6 +2077,7 @@ int sw_unregister_trace_workqueue_execution_i( + #endif + return PW_SUCCESS; + }; ++ + /* + * 13. SCHED switch + */ +@@ -2005,49 +2099,58 @@ int sw_register_trace_sched_switch_i(struct sw_trace_notifier_data *node) + sw_probe_sched_switch_i); + return PW_SUCCESS; + }; ++ + int sw_unregister_trace_sched_switch_i(struct sw_trace_notifier_data *node) + { + DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, sched_switch, + sw_probe_sched_switch_i); + return PW_SUCCESS; + }; ++ + /* + * Notifier register/unregister functions. + */ ++ + /* + * 1. SUSPEND notifier. + */ + static struct notifier_block sw_pm_notifier = { + .notifier_call = &sw_probe_pm_notifier_i, + }; ++ + int sw_register_pm_notifier_i(struct sw_trace_notifier_data *node) + { + register_pm_notifier(&sw_pm_notifier); + return PW_SUCCESS; + }; ++ + int sw_unregister_pm_notifier_i(struct sw_trace_notifier_data *node) + { + unregister_pm_notifier(&sw_pm_notifier); + return PW_SUCCESS; + }; ++ + /* + * 2. CPUFREQ notifier. + */ + static struct notifier_block sw_cpufreq_notifier = { + .notifier_call = &sw_probe_cpufreq_notifier_i, + }; ++ + int sw_register_cpufreq_notifier_i(struct sw_trace_notifier_data *node) + { + cpufreq_register_notifier(&sw_cpufreq_notifier, + CPUFREQ_TRANSITION_NOTIFIER); + return PW_SUCCESS; + }; ++ + int sw_unregister_cpufreq_notifier_i(struct sw_trace_notifier_data *node) + { + cpufreq_unregister_notifier(&sw_cpufreq_notifier, + CPUFREQ_TRANSITION_NOTIFIER); + return PW_SUCCESS; + }; ++ + #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) + /* + * 3. CPU hot plug notifier. +@@ -2061,12 +2164,14 @@ int sw_register_hotcpu_notifier_i(struct sw_trace_notifier_data *node) + register_hotcpu_notifier(&sw_cpu_hotplug_notifier); + return PW_SUCCESS; + }; ++ + int sw_unregister_hotcpu_notifier_i(struct sw_trace_notifier_data *node) + { + unregister_hotcpu_notifier(&sw_cpu_hotplug_notifier); + return PW_SUCCESS; + }; +-#else // LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0) ++ ++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0) */ + static int sw_cpuhp_state = -1; + int sw_register_hotcpu_notifier_i(struct sw_trace_notifier_data *node) + { +@@ -2080,6 +2185,7 @@ int sw_register_hotcpu_notifier_i(struct sw_trace_notifier_data *node) + } + return 0; + }; ++ + int sw_unregister_hotcpu_notifier_i(struct sw_trace_notifier_data *node) + { + if (sw_cpuhp_state >= 0) { +@@ -2087,7 +2193,7 @@ int sw_unregister_hotcpu_notifier_i(struct sw_trace_notifier_data *node) + } + return 0; + }; +-#endif // LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0) ++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0) */ + + /* + * Tracepoint extraction routines. +@@ -2122,11 +2228,12 @@ static void sw_extract_tracepoint_callback(struct tracepoint *tp, void *priv) + } + } + }; +-#endif // LINUX_VERSION_CODE >= KERNEL_VERSION(3,15,0) ++#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,15,0) */ ++#endif /* CONFIG_TRACEPOINTS */ + + /* +- * Retrieve the list of tracepoint structs to use when registering and unregistering +- * tracepoint handlers. ++ * Retrieve the list of tracepoint structs to use ++ * when registering and unregistering tracepoint handlers. + */ + int sw_extract_trace_notifier_providers(void) + { +@@ -2143,7 +2250,7 @@ int sw_extract_trace_notifier_providers(void) + printk(KERN_WARNING + "WARNING: Could NOT find tracepoint structs for some tracepoints!\n"); + } +-#endif // LINUX_VERSION_CODE >= KERNEL_VERSION(3,15,0) ++#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,15,0) */ + return PW_SUCCESS; + }; + +@@ -2183,12 +2290,13 @@ void sw_print_trace_notifier_provider_overheads(void) + #if IS_ENABLED(CONFIG_ANDROID) + PRINT_CUMULATIVE_OVERHEAD_PARAMS(sw_handle_wakelock_i, + "WAKE LOCK/UNLOCK"); +-#endif // CONFIG_ANDROID ++#endif /* CONFIG_ANDROID */ + PRINT_CUMULATIVE_OVERHEAD_PARAMS(sw_handle_workqueue_wakeup_helper_i, + "WORKQUEUE"); + PRINT_CUMULATIVE_OVERHEAD_PARAMS(sw_handle_sched_switch_helper_i, + "SCHED SWITCH"); + }; ++ + /* + * Add all trace/notifier providers. + */ +@@ -2224,9 +2332,10 @@ int sw_add_trace_notifier_providers(void) + return -EIO; + } + } +-#endif // CONFIG_TRACEPOINTS ++#endif /* CONFIG_TRACEPOINTS */ + return PW_SUCCESS; + } ++ + /* + * Remove previously added providers. + */ +-- +2.17.1 + diff --git a/patches/0004-power-supply-Driver-for-Intel-WhiskeyCove-PMIC-C.usb-typec b/patches/0004-power-supply-Driver-for-Intel-WhiskeyCove-PMIC-C.usb-typec new file mode 100644 index 0000000000..378a989af3 --- /dev/null +++ b/patches/0004-power-supply-Driver-for-Intel-WhiskeyCove-PMIC-C.usb-typec @@ -0,0 +1,434 @@ +From 1193f4b3dfbb9a5349b0bf33a70257d472a147a5 Mon Sep 17 00:00:00 2001 +From: Heikki Krogerus +Date: Mon, 5 Aug 2019 14:54:37 +0300 +Subject: [PATCH 04/18] power: supply: Driver for Intel WhiskeyCove PMIC + Charger + +Intel WhiskeyCove PMIC Charger is part of the "battery +subsystem" on the WhiskeyCove used on Intel Broxton based +platforms, and its main purpose is to detect the input +source. Both mains DC adapters and USB chargers (BC1.2) are +supported. The driver will register separate psy for both of +them. + +On top of input source detection, WhiskeyCove charger +can also detect ID pin level changes with USB micro-AB +connectors. The driver will use that information to +configure the USB role switch if the system uses it. + +Signed-off-by: Heikki Krogerus +--- + drivers/power/supply/Kconfig | 8 + + drivers/power/supply/Makefile | 1 + + drivers/power/supply/wcove_charger.c | 373 +++++++++++++++++++++++++++ + 3 files changed, 382 insertions(+) + create mode 100644 drivers/power/supply/wcove_charger.c + +diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig +index c84a7b1caeb6..a2f404157d71 100644 +--- a/drivers/power/supply/Kconfig ++++ b/drivers/power/supply/Kconfig +@@ -710,4 +710,12 @@ config CHARGER_WILCO + information can be found in + Documentation/ABI/testing/sysfs-class-power-wilco + ++config CHARGER_WCOVE ++ tristate "Intel WhiskeyCove PMIC charger driver" ++ depends on INTEL_SOC_PMIC ++ depends on INTEL_PMC_IPC ++ help ++ Say Y to enable support for Intel WhiskeyCove PMIC on Intel Broxton ++ platforms. ++ + endif # POWER_SUPPLY +diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile +index 6c7da920ea83..fb9ee621c24c 100644 +--- a/drivers/power/supply/Makefile ++++ b/drivers/power/supply/Makefile +@@ -92,3 +92,4 @@ obj-$(CONFIG_FUEL_GAUGE_SC27XX) += sc27xx_fuel_gauge.o + obj-$(CONFIG_CHARGER_UCS1002) += ucs1002_power.o + obj-$(CONFIG_CHARGER_BD70528) += bd70528-charger.o + obj-$(CONFIG_CHARGER_WILCO) += wilco-charger.o ++obj-$(CONFIG_CHARGER_WCOVE) += wcove_charger.o +diff --git a/drivers/power/supply/wcove_charger.c b/drivers/power/supply/wcove_charger.c +new file mode 100644 +index 000000000000..91f3b924e54c +--- /dev/null ++++ b/drivers/power/supply/wcove_charger.c +@@ -0,0 +1,373 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/** ++ * Intel WhiskeyCove PMIC Charger Driver ++ * ++ * Copyright (C) 2019 Intel Corporation ++ * Author: Heikki Krogerus ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* Registers */ ++#define WCOVE_CHRGIRQ0 0x4e09 ++#define WCOVE_CHRGIRQ1 0x4e0a ++#define WCOVE_PWRSRC 0x4e20 ++#define WCOVE_USBSRC 0x4e29 ++#define WCOVE_USBC_STATUS1 0x7007 ++ ++/* IRQ masks */ ++#define WCOVE_CHRGIRQ0_MASK 0x10 ++#define WCOVE_CHRGIRQ1_MASK 0x1f ++ ++/* Register Bits */ ++ ++#define WCOVE_CHRGIRQ1_VBUS BIT(0) ++#define WCOVE_CHRGIRQ1_DC BIT(1) ++#define WCOVE_CHRGIRQ1_BATT BIT(2) ++#define WCOVE_CHRGIRQ1_USBIDFLD BIT(3) ++#define WCOVE_CHRGIRQ1_USBIDGND BIT(4) ++ ++#define WCOVE_PWRSRC_VBUS BIT(0) ++#define WCOVE_PWRSRC_DC BIT(1) ++#define WCOVE_PWRSRC_BATT BIT(2) ++#define WCOVE_PWRSRC_USBID(p) (GENMASK(4, 3) >> 3) ++#define WCOVE_PWRSRC_USBID_ACA 0 ++#define WCOVE_PWRSRC_USBID_GND 1 ++#define WCOVE_PWRSRC_USBID_FLOAT 2 ++ ++#define WCOVE_USBSRC_DET(p) GENMASK(1, 0) ++#define WCOVE_USBSRC_DET_NOT_STARTED 0 ++#define WCOVE_USBSRC_DET_ON_GOING 1 ++#define WCOVE_USBSRC_DET_COMPLETE 2 ++#define WCOVE_USBSRC_DET_FAILED 3 ++#define WCOVE_USBSRC_RESULT(p) (GENMASK(5, 2) >> 2) ++#define WCOVE_USBSRC_RESULT_NOT_DET 0 ++#define WCOVE_USBSRC_RESULT_SDP 1 ++#define WCOVE_USBSRC_RESULT_DCP 2 ++#define WCOVE_USBSRC_RESULT_CDP 3 ++#define WCOVE_USBSRC_RESULT_ACA 4 ++#define WCOVE_USBSRC_RESULT_SE1 5 ++#define WCOVE_USBSRC_RESULT_MHL 6 ++#define WCOVE_USBSRC_RESULT_FLOAT 7 ++#define WCOVE_USBSRC_RESULT_DCP_EXT 9 ++ ++struct wcove_charger { ++ struct mutex lock; /* device lock */ ++ struct device *dev; ++ struct regmap *regmap; ++ struct usb_role_switch *sw; ++ ++ struct power_supply *usb; ++ struct power_supply *mains; ++}; ++ ++static irqreturn_t wcove_chrg_irq(int irq, void *data) ++{ ++ struct wcove_charger *wcove = data; ++ u32 chrgirq; ++ u32 usbsrc; ++ int ret; ++ ++ mutex_lock(&wcove->lock); ++ ++ ret = regmap_read(wcove->regmap, WCOVE_CHRGIRQ0, &chrgirq); ++ if (ret) ++ goto err_unlock; ++ ++ ret = regmap_read(wcove->regmap, WCOVE_USBSRC, &usbsrc); ++ if (ret) ++ goto err_clear_irq; ++ ++ if (WCOVE_USBSRC_DET(usbsrc) != WCOVE_USBSRC_DET_COMPLETE) ++ goto err_clear_irq; ++ ++ switch (WCOVE_USBSRC_DET(usbsrc)) { ++ case WCOVE_USBSRC_RESULT_DCP: ++ case WCOVE_USBSRC_RESULT_DCP_EXT: ++ if (usb_role_switch_set_role(wcove->sw, USB_ROLE_NONE)) ++ dev_err(wcove->dev, "failed to set USB role\n"); ++ break; ++ default: ++ break; ++ } ++ ++ power_supply_changed(wcove->usb); ++ ++err_clear_irq: ++ regmap_write(wcove->regmap, WCOVE_CHRGIRQ0, ++ chrgirq & WCOVE_CHRGIRQ0_MASK); ++ ++err_unlock: ++ mutex_unlock(&wcove->lock); ++ ++ return IRQ_HANDLED; ++} ++ ++static irqreturn_t wcove_chrg1_irq(int irq, void *data) ++{ ++ struct wcove_charger *wcove = data; ++ enum usb_role role = USB_ROLE_NONE; ++ u32 chrgirq1; ++ u32 pwrsrc; ++ u32 typec; ++ int ret; ++ ++ mutex_lock(&wcove->lock); ++ ++ ret = regmap_read(wcove->regmap, WCOVE_CHRGIRQ1, &chrgirq1); ++ if (ret) ++ goto err_unlock; ++ ++ ret = regmap_read(wcove->regmap, WCOVE_PWRSRC, &pwrsrc); ++ if (ret) ++ goto err_clear_irq; ++ ++ if (chrgirq1 & WCOVE_CHRGIRQ1_DC) ++ power_supply_changed(wcove->mains); ++ ++ /* USB Type-C connector is handled separately. */ ++ ret = regmap_read(wcove->regmap, WCOVE_USBC_STATUS1, &typec); ++ if (!ret && typec) ++ goto err_clear_irq; ++ ++ if (chrgirq1 & WCOVE_CHRGIRQ1_USBIDGND && ++ WCOVE_PWRSRC_USBID(pwrsrc) == WCOVE_PWRSRC_USBID_GND) { ++ role = USB_ROLE_HOST; ++ } else if (chrgirq1 & WCOVE_CHRGIRQ1_VBUS) { ++ if (pwrsrc & WCOVE_PWRSRC_VBUS) ++ role = USB_ROLE_DEVICE; ++ power_supply_changed(wcove->usb); ++ } ++ ++ if (usb_role_switch_set_role(wcove->sw, role)) ++ dev_err(wcove->dev, "failed to set USB role\n"); ++ ++err_clear_irq: ++ regmap_write(wcove->regmap, WCOVE_CHRGIRQ1, ++ chrgirq1 & WCOVE_CHRGIRQ1_MASK); ++ ++err_unlock: ++ mutex_unlock(&wcove->lock); ++ ++ return IRQ_HANDLED; ++} ++ ++static int wcove_mains_get_prop(struct power_supply *psy, ++ enum power_supply_property psp, ++ union power_supply_propval *val) ++{ ++ struct wcove_charger *wcove = power_supply_get_drvdata(psy); ++ int ret = 0; ++ u32 pwrsrc; ++ ++ mutex_lock(&wcove->lock); ++ ++ ret = regmap_read(wcove->regmap, WCOVE_PWRSRC, &pwrsrc); ++ if (ret) ++ goto err_unlock; ++ ++ switch (psp) { ++ case POWER_SUPPLY_PROP_ONLINE: ++ val->intval = !!(pwrsrc & WCOVE_PWRSRC_DC); ++ break; ++ default: ++ ret = -EINVAL; ++ break; ++ } ++ ++err_unlock: ++ mutex_unlock(&wcove->lock); ++ ++ return ret; ++} ++ ++static enum power_supply_property wcove_mains_props[] = { ++ POWER_SUPPLY_PROP_ONLINE, ++}; ++ ++static const struct power_supply_desc dc_desc = { ++ .name = "wcove_mains", ++ .type = POWER_SUPPLY_TYPE_MAINS, ++ .properties = wcove_mains_props, ++ .num_properties = ARRAY_SIZE(wcove_mains_props), ++ .get_property = wcove_mains_get_prop, ++}; ++ ++static int wcove_usb_get_prop(struct power_supply *psy, ++ enum power_supply_property psp, ++ union power_supply_propval *val) ++{ ++ struct wcove_charger *wcove = power_supply_get_drvdata(psy); ++ int ret = 0; ++ u32 pwrsrc; ++ u32 usbsrc; ++ ++ mutex_lock(&wcove->lock); ++ ++ ret = regmap_read(wcove->regmap, WCOVE_PWRSRC, &pwrsrc); ++ if (ret) ++ goto err_unlock; ++ ++ ret = regmap_read(wcove->regmap, WCOVE_USBSRC, &usbsrc); ++ if (ret) ++ goto err_unlock; ++ ++ switch (psp) { ++ case POWER_SUPPLY_PROP_USB_TYPE: ++ switch (WCOVE_USBSRC_RESULT(usbsrc)) { ++ case WCOVE_USBSRC_RESULT_SDP: ++ val->intval = POWER_SUPPLY_USB_TYPE_SDP; ++ break; ++ case WCOVE_USBSRC_RESULT_DCP: ++ case WCOVE_USBSRC_RESULT_DCP_EXT: ++ val->intval = POWER_SUPPLY_USB_TYPE_DCP; ++ break; ++ case WCOVE_USBSRC_RESULT_CDP: ++ val->intval = POWER_SUPPLY_USB_TYPE_CDP; ++ break; ++ case WCOVE_USBSRC_RESULT_ACA: ++ val->intval = POWER_SUPPLY_USB_TYPE_ACA; ++ break; ++ case WCOVE_USBSRC_RESULT_SE1: ++ val->intval = POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID; ++ break; ++ default: ++ val->intval = POWER_SUPPLY_USB_TYPE_UNKNOWN; ++ break; ++ } ++ break; ++ case POWER_SUPPLY_PROP_ONLINE: ++ /* Make sure we are not the one driving VBUS */ ++ if (WCOVE_PWRSRC_USBID(pwrsrc) != WCOVE_PWRSRC_USBID_GND) ++ val->intval = !!(pwrsrc & WCOVE_PWRSRC_VBUS); ++ else ++ val->intval = 0; ++ break; ++ default: ++ ret = -EINVAL; ++ break; ++ } ++ ++err_unlock: ++ mutex_unlock(&wcove->lock); ++ ++ return ret; ++} ++ ++static enum power_supply_usb_type wcove_usb_types[] = { ++ POWER_SUPPLY_USB_TYPE_UNKNOWN, ++ POWER_SUPPLY_USB_TYPE_SDP, ++ POWER_SUPPLY_USB_TYPE_DCP, ++ POWER_SUPPLY_USB_TYPE_CDP, ++ POWER_SUPPLY_USB_TYPE_ACA, ++ POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID, ++}; ++ ++static enum power_supply_property wcove_usb_props[] = { ++ POWER_SUPPLY_PROP_ONLINE, ++ POWER_SUPPLY_PROP_USB_TYPE, ++}; ++ ++static const struct power_supply_desc usb_desc = { ++ .name = "wcove_usb", ++ .type = POWER_SUPPLY_TYPE_USB, ++ .usb_types = wcove_usb_types, ++ .num_usb_types = ARRAY_SIZE(wcove_usb_types), ++ .properties = wcove_usb_props, ++ .num_properties = ARRAY_SIZE(wcove_usb_props), ++ .get_property = wcove_usb_get_prop, ++}; ++ ++static int wcove_charger_probe(struct platform_device *pdev) ++{ ++ struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent); ++ struct power_supply_config cfg = {}; ++ struct wcove_charger *wcove; ++ int irq1; ++ int irq; ++ int ret; ++ ++ irq = regmap_irq_get_virq(pmic->irq_chip_data_chgr, ++ platform_get_irq_byname(pdev, "CHARGER")); ++ if (irq < 0) ++ return irq; ++ ++ irq1 = regmap_irq_get_virq(pmic->irq_chip_data_chgr, ++ platform_get_irq_byname(pdev, "CHARGER1")); ++ if (irq1 < 0) ++ return irq1; ++ ++ wcove = devm_kzalloc(&pdev->dev, sizeof(*wcove), GFP_KERNEL); ++ if (!wcove) ++ return -ENOMEM; ++ ++ mutex_init(&wcove->lock); ++ wcove->regmap = pmic->regmap; ++ wcove->dev = &pdev->dev; ++ ++ wcove->sw = usb_role_switch_get(&pdev->dev); ++ if (IS_ERR(wcove->sw)) ++ return PTR_ERR(wcove->sw); ++ ++ cfg.drv_data = wcove; ++ ++ wcove->mains = devm_power_supply_register(&pdev->dev, &dc_desc, &cfg); ++ if (IS_ERR(wcove->mains)) { ++ ret = PTR_ERR(wcove->mains); ++ goto err_release_switch; ++ } ++ ++ wcove->usb = devm_power_supply_register(&pdev->dev, &usb_desc, &cfg); ++ if (IS_ERR(wcove->usb)) { ++ ret = PTR_ERR(wcove->usb); ++ goto err_release_switch; ++ } ++ ++ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, ++ wcove_chrg_irq, IRQF_ONESHOT, ++ "wcove_charger", wcove); ++ if (ret) ++ goto err_release_switch; ++ ++ ret = devm_request_threaded_irq(&pdev->dev, irq1, NULL, ++ wcove_chrg1_irq, IRQF_ONESHOT, ++ "wcove_charger1", wcove); ++ if (ret) ++ goto err_release_switch; ++ ++ platform_set_drvdata(pdev, wcove); ++ ++ return 0; ++ ++err_release_switch: ++ usb_role_switch_put(wcove->sw); ++ ++ return ret; ++} ++ ++static int wcove_charger_remove(struct platform_device *pdev) ++{ ++ struct wcove_charger *wcove = platform_get_drvdata(pdev); ++ ++ usb_role_switch_put(wcove->sw); ++ ++ return 0; ++} ++ ++static struct platform_driver wcove_charger_driver = { ++ .driver = { ++ .name = "bxt_wcove_ext_charger", ++ }, ++ .probe = wcove_charger_probe, ++ .remove = wcove_charger_remove, ++}; ++module_platform_driver(wcove_charger_driver); ++ ++MODULE_AUTHOR("Heikki Krogerus "); ++MODULE_DESCRIPTION("Intel WhiskeyCove PMIC Charger Driver"); ++MODULE_LICENSE("GPL v2"); +-- +2.17.1 + diff --git a/patches/0004-trusty-Add-interrupt-support.trusty b/patches/0004-trusty-Add-interrupt-support.trusty new file mode 100644 index 0000000000..a3272e6989 --- /dev/null +++ b/patches/0004-trusty-Add-interrupt-support.trusty @@ -0,0 +1,593 @@ +From bddb409d52f7b895fe20d0bf2a711c4f21de159f Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?= +Date: Mon, 18 Nov 2013 20:52:55 -0800 +Subject: [PATCH 04/63] trusty: Add interrupt support +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Get list of interrupts from secure mode and register handlers for them. +When an interrupt triggers, disable the interrupt and schedule a work +function. The work functions then masks interrupts at the cpu, reenables +the interrupt and calls into secure mode. + +Edge triggered interrupts are not supported. + +Change-Id: I6df62e791140f0f2a8b5718b30edd86cca3dde5b +Signed-off-by: Arve Hjønnevåg +--- + .../devicetree/bindings/trusty/trusty-irq.txt | 8 + + drivers/trusty/Makefile | 1 + + drivers/trusty/trusty-irq.c | 536 ++++++++++++++++++ + 3 files changed, 545 insertions(+) + create mode 100644 Documentation/devicetree/bindings/trusty/trusty-irq.txt + create mode 100644 drivers/trusty/trusty-irq.c + +diff --git a/Documentation/devicetree/bindings/trusty/trusty-irq.txt b/Documentation/devicetree/bindings/trusty/trusty-irq.txt +new file mode 100644 +index 000000000000..85fe1f1c7458 +--- /dev/null ++++ b/Documentation/devicetree/bindings/trusty/trusty-irq.txt +@@ -0,0 +1,8 @@ ++Trusty irq interface ++ ++Trusty requires non-secure irqs to be forwarded to the secure OS. ++ ++Required properties: ++- compatible: "android,trusty-irq-v1" ++ ++Must be a child of the node that provides the trusty std/fast call interface. +diff --git a/drivers/trusty/Makefile b/drivers/trusty/Makefile +index 1d77805d7dd6..89acb6f7868a 100644 +--- a/drivers/trusty/Makefile ++++ b/drivers/trusty/Makefile +@@ -3,3 +3,4 @@ + # + + obj-$(CONFIG_TRUSTY) += trusty.o ++obj-$(CONFIG_TRUSTY) += trusty-irq.o +diff --git a/drivers/trusty/trusty-irq.c b/drivers/trusty/trusty-irq.c +new file mode 100644 +index 000000000000..ae9535af77dd +--- /dev/null ++++ b/drivers/trusty/trusty-irq.c +@@ -0,0 +1,536 @@ ++/* ++ * Copyright (C) 2013 Google, Inc. ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++struct trusty_irq { ++ struct trusty_irq_state *is; ++ struct hlist_node node; ++ unsigned int irq; ++ bool percpu; ++ bool enable; ++ struct trusty_irq __percpu *percpu_ptr; ++}; ++ ++struct trusty_irq_work { ++ struct trusty_irq_state *is; ++ struct work_struct work; ++}; ++ ++struct trusty_irq_irqset { ++ struct hlist_head pending; ++ struct hlist_head inactive; ++}; ++ ++struct trusty_irq_state { ++ struct device *dev; ++ struct device *trusty_dev; ++ struct trusty_irq_work __percpu *irq_work; ++ struct trusty_irq_irqset normal_irqs; ++ spinlock_t normal_irqs_lock; ++ struct trusty_irq_irqset __percpu *percpu_irqs; ++ struct notifier_block trusty_call_notifier; ++ struct notifier_block cpu_notifier; ++}; ++ ++static void trusty_irq_enable_pending_irqs(struct trusty_irq_state *is, ++ struct trusty_irq_irqset *irqset, ++ bool percpu) ++{ ++ struct hlist_node *n; ++ struct trusty_irq *trusty_irq; ++ ++ hlist_for_each_entry_safe(trusty_irq, n, &irqset->pending, node) { ++ dev_dbg(is->dev, ++ "%s: enable pending irq %d, percpu %d, cpu %d\n", ++ __func__, trusty_irq->irq, percpu, smp_processor_id()); ++ if (percpu) ++ enable_percpu_irq(trusty_irq->irq, 0); ++ else ++ enable_irq(trusty_irq->irq); ++ hlist_del(&trusty_irq->node); ++ hlist_add_head(&trusty_irq->node, &irqset->inactive); ++ } ++} ++ ++static void trusty_irq_enable_irqset(struct trusty_irq_state *is, ++ struct trusty_irq_irqset *irqset) ++{ ++ struct trusty_irq *trusty_irq; ++ ++ hlist_for_each_entry(trusty_irq, &irqset->inactive, node) { ++ if (trusty_irq->enable) { ++ dev_warn(is->dev, ++ "%s: percpu irq %d already enabled, cpu %d\n", ++ __func__, trusty_irq->irq, smp_processor_id()); ++ continue; ++ } ++ dev_dbg(is->dev, "%s: enable percpu irq %d, cpu %d\n", ++ __func__, trusty_irq->irq, smp_processor_id()); ++ enable_percpu_irq(trusty_irq->irq, 0); ++ trusty_irq->enable = true; ++ } ++} ++ ++static void trusty_irq_disable_irqset(struct trusty_irq_state *is, ++ struct trusty_irq_irqset *irqset) ++{ ++ struct hlist_node *n; ++ struct trusty_irq *trusty_irq; ++ ++ hlist_for_each_entry(trusty_irq, &irqset->inactive, node) { ++ if (!trusty_irq->enable) { ++ dev_warn(is->dev, ++ "irq %d already disabled, percpu %d, cpu %d\n", ++ trusty_irq->irq, trusty_irq->percpu, ++ smp_processor_id()); ++ continue; ++ } ++ dev_dbg(is->dev, "%s: disable irq %d, percpu %d, cpu %d\n", ++ __func__, trusty_irq->irq, trusty_irq->percpu, ++ smp_processor_id()); ++ trusty_irq->enable = false; ++ if (trusty_irq->percpu) ++ disable_percpu_irq(trusty_irq->irq); ++ else ++ disable_irq_nosync(trusty_irq->irq); ++ } ++ hlist_for_each_entry_safe(trusty_irq, n, &irqset->pending, node) { ++ if (!trusty_irq->enable) { ++ dev_warn(is->dev, ++ "pending irq %d already disabled, percpu %d, cpu %d\n", ++ trusty_irq->irq, trusty_irq->percpu, ++ smp_processor_id()); ++ } ++ dev_dbg(is->dev, ++ "%s: disable pending irq %d, percpu %d, cpu %d\n", ++ __func__, trusty_irq->irq, trusty_irq->percpu, ++ smp_processor_id()); ++ trusty_irq->enable = false; ++ hlist_del(&trusty_irq->node); ++ hlist_add_head(&trusty_irq->node, &irqset->inactive); ++ } ++} ++ ++static int trusty_irq_call_notify(struct notifier_block *nb, ++ unsigned long action, void *data) ++{ ++ struct trusty_irq_state *is; ++ ++ BUG_ON(!irqs_disabled()); ++ ++ if (action != TRUSTY_CALL_PREPARE) ++ return NOTIFY_DONE; ++ ++ is = container_of(nb, struct trusty_irq_state, trusty_call_notifier); ++ ++ spin_lock(&is->normal_irqs_lock); ++ trusty_irq_enable_pending_irqs(is, &is->normal_irqs, false); ++ spin_unlock(&is->normal_irqs_lock); ++ trusty_irq_enable_pending_irqs(is, this_cpu_ptr(is->percpu_irqs), true); ++ ++ return NOTIFY_OK; ++} ++ ++ ++static void trusty_irq_work_func(struct work_struct *work) ++{ ++ int ret; ++ struct trusty_irq_state *is = ++ container_of(work, struct trusty_irq_work, work)->is; ++ ++ dev_dbg(is->dev, "%s\n", __func__); ++ ++ ret = trusty_std_call32(is->trusty_dev, SMC_SC_NOP, 0, 0, 0); ++ if (ret != 0) ++ dev_err(is->dev, "%s: SMC_SC_NOP failed %d", __func__, ret); ++ ++ dev_dbg(is->dev, "%s: done\n", __func__); ++} ++ ++irqreturn_t trusty_irq_handler(int irq, void *data) ++{ ++ struct trusty_irq *trusty_irq = data; ++ struct trusty_irq_state *is = trusty_irq->is; ++ struct trusty_irq_work *trusty_irq_work = this_cpu_ptr(is->irq_work); ++ struct trusty_irq_irqset *irqset; ++ ++ dev_dbg(is->dev, "%s: irq %d, percpu %d, cpu %d, enable %d\n", ++ __func__, irq, trusty_irq->irq, smp_processor_id(), ++ trusty_irq->enable); ++ ++ if (trusty_irq->percpu) { ++ disable_percpu_irq(irq); ++ irqset = this_cpu_ptr(is->percpu_irqs); ++ } else { ++ disable_irq_nosync(irq); ++ irqset = &is->normal_irqs; ++ } ++ ++ spin_lock(&is->normal_irqs_lock); ++ if (trusty_irq->enable) { ++ hlist_del(&trusty_irq->node); ++ hlist_add_head(&trusty_irq->node, &irqset->pending); ++ } ++ spin_unlock(&is->normal_irqs_lock); ++ ++ schedule_work_on(raw_smp_processor_id(), &trusty_irq_work->work); ++ ++ dev_dbg(is->dev, "%s: irq %d done\n", __func__, irq); ++ ++ return IRQ_HANDLED; ++} ++ ++static void trusty_irq_cpu_up(void *info) ++{ ++ unsigned long irq_flags; ++ struct trusty_irq_state *is = info; ++ ++ dev_dbg(is->dev, "%s: cpu %d\n", __func__, smp_processor_id()); ++ ++ local_irq_save(irq_flags); ++ trusty_irq_enable_irqset(is, this_cpu_ptr(is->percpu_irqs)); ++ local_irq_restore(irq_flags); ++} ++ ++static void trusty_irq_cpu_down(void *info) ++{ ++ unsigned long irq_flags; ++ struct trusty_irq_state *is = info; ++ ++ dev_dbg(is->dev, "%s: cpu %d\n", __func__, smp_processor_id()); ++ ++ local_irq_save(irq_flags); ++ trusty_irq_disable_irqset(is, this_cpu_ptr(is->percpu_irqs)); ++ local_irq_restore(irq_flags); ++} ++ ++static int trusty_irq_cpu_notify(struct notifier_block *nb, ++ unsigned long action, void *hcpu) ++{ ++ struct trusty_irq_state *is; ++ ++ is = container_of(nb, struct trusty_irq_state, cpu_notifier); ++ ++ dev_dbg(is->dev, "%s: 0x%lx\n", __func__, action); ++ ++ switch (action & ~CPU_TASKS_FROZEN) { ++ case CPU_STARTING: ++ trusty_irq_cpu_up(is); ++ break; ++ case CPU_DYING: ++ trusty_irq_cpu_down(is); ++ break; ++ } ++ ++ return NOTIFY_OK; ++} ++ ++static int trusty_irq_init_normal_irq(struct trusty_irq_state *is, int irq) ++{ ++ int ret; ++ unsigned long irq_flags; ++ struct trusty_irq *trusty_irq; ++ ++ dev_dbg(is->dev, "%s: irq %d\n", __func__, irq); ++ ++ trusty_irq = kzalloc(sizeof(*trusty_irq), GFP_KERNEL); ++ if (!trusty_irq) ++ return -ENOMEM; ++ ++ trusty_irq->is = is; ++ trusty_irq->irq = irq; ++ trusty_irq->enable = true; ++ ++ spin_lock_irqsave(&is->normal_irqs_lock, irq_flags); ++ hlist_add_head(&trusty_irq->node, &is->normal_irqs.inactive); ++ spin_unlock_irqrestore(&is->normal_irqs_lock, irq_flags); ++ ++ ret = request_irq(irq, trusty_irq_handler, IRQF_NO_THREAD, ++ "trusty", trusty_irq); ++ if (ret) { ++ dev_err(is->dev, "request_irq failed %d\n", ret); ++ goto err_request_irq; ++ } ++ return 0; ++ ++err_request_irq: ++ spin_lock_irqsave(&is->normal_irqs_lock, irq_flags); ++ hlist_del(&trusty_irq->node); ++ spin_unlock_irqrestore(&is->normal_irqs_lock, irq_flags); ++ kfree(trusty_irq); ++ return ret; ++} ++ ++static int trusty_irq_init_per_cpu_irq(struct trusty_irq_state *is, int irq) ++{ ++ int ret; ++ unsigned int cpu; ++ struct trusty_irq __percpu *trusty_irq_handler_data; ++ ++ dev_dbg(is->dev, "%s: irq %d\n", __func__, irq); ++ ++ trusty_irq_handler_data = alloc_percpu(struct trusty_irq); ++ if (!trusty_irq_handler_data) ++ return -ENOMEM; ++ ++ for_each_possible_cpu(cpu) { ++ struct trusty_irq *trusty_irq; ++ struct trusty_irq_irqset *irqset; ++ ++ trusty_irq = per_cpu_ptr(trusty_irq_handler_data, cpu); ++ irqset = per_cpu_ptr(is->percpu_irqs, cpu); ++ ++ trusty_irq->is = is; ++ hlist_add_head(&trusty_irq->node, &irqset->inactive); ++ trusty_irq->irq = irq; ++ trusty_irq->percpu = true; ++ trusty_irq->percpu_ptr = trusty_irq_handler_data; ++ } ++ ++ ret = request_percpu_irq(irq, trusty_irq_handler, "trusty", ++ trusty_irq_handler_data); ++ if (ret) { ++ dev_err(is->dev, "request_percpu_irq failed %d\n", ret); ++ goto err_request_percpu_irq; ++ } ++ ++ return 0; ++ ++err_request_percpu_irq: ++ for_each_possible_cpu(cpu) { ++ struct trusty_irq *trusty_irq; ++ ++ trusty_irq = per_cpu_ptr(trusty_irq_handler_data, cpu); ++ hlist_del(&trusty_irq->node); ++ } ++ ++ free_percpu(trusty_irq_handler_data); ++ return ret; ++} ++ ++static int trusty_smc_get_next_irq(struct trusty_irq_state *is, ++ unsigned long min_irq, bool per_cpu) ++{ ++ return trusty_fast_call32(is->trusty_dev, SMC_FC_GET_NEXT_IRQ, ++ min_irq, per_cpu, 0); ++} ++ ++static int trusty_irq_init_one(struct trusty_irq_state *is, ++ int irq, bool per_cpu) ++{ ++ int ret; ++ ++ irq = trusty_smc_get_next_irq(is, irq, per_cpu); ++ if (irq < 0) ++ return irq; ++ ++ if (per_cpu) ++ ret = trusty_irq_init_per_cpu_irq(is, irq); ++ else ++ ret = trusty_irq_init_normal_irq(is, irq); ++ ++ if (ret) { ++ dev_warn(is->dev, ++ "failed to initialize irq %d, irq will be ignored\n", ++ irq); ++ } ++ ++ return irq + 1; ++} ++ ++static void trusty_irq_free_irqs(struct trusty_irq_state *is) ++{ ++ struct trusty_irq *irq; ++ struct hlist_node *n; ++ unsigned int cpu; ++ ++ hlist_for_each_entry_safe(irq, n, &is->normal_irqs.inactive, node) { ++ dev_dbg(is->dev, "%s: irq %d\n", __func__, irq->irq); ++ free_irq(irq->irq, irq); ++ hlist_del(&irq->node); ++ kfree(irq); ++ } ++ hlist_for_each_entry_safe(irq, n, ++ &this_cpu_ptr(is->percpu_irqs)->inactive, ++ node) { ++ struct trusty_irq __percpu *trusty_irq_handler_data; ++ ++ dev_dbg(is->dev, "%s: percpu irq %d\n", __func__, irq->irq); ++ trusty_irq_handler_data = irq->percpu_ptr; ++ free_percpu_irq(irq->irq, trusty_irq_handler_data); ++ for_each_possible_cpu(cpu) { ++ struct trusty_irq *irq_tmp; ++ ++ irq_tmp = per_cpu_ptr(trusty_irq_handler_data, cpu); ++ hlist_del(&irq_tmp->node); ++ } ++ free_percpu(trusty_irq_handler_data); ++ } ++} ++ ++static int trusty_irq_probe(struct platform_device *pdev) ++{ ++ int ret; ++ int irq; ++ unsigned int cpu; ++ unsigned long irq_flags; ++ struct trusty_irq_state *is; ++ ++ dev_dbg(&pdev->dev, "%s\n", __func__); ++ ++ is = kzalloc(sizeof(*is), GFP_KERNEL); ++ if (!is) { ++ ret = -ENOMEM; ++ goto err_alloc_is; ++ } ++ ++ is->dev = &pdev->dev; ++ is->trusty_dev = is->dev->parent; ++ is->irq_work = alloc_percpu(struct trusty_irq_work); ++ if (!is->irq_work) { ++ ret = -ENOMEM; ++ goto err_alloc_irq_work; ++ } ++ spin_lock_init(&is->normal_irqs_lock); ++ is->percpu_irqs = alloc_percpu(struct trusty_irq_irqset); ++ if (!is->percpu_irqs) { ++ ret = -ENOMEM; ++ goto err_alloc_pending_percpu_irqs; ++ } ++ ++ platform_set_drvdata(pdev, is); ++ ++ is->trusty_call_notifier.notifier_call = trusty_irq_call_notify; ++ ret = trusty_call_notifier_register(is->trusty_dev, ++ &is->trusty_call_notifier); ++ if (ret) { ++ dev_err(&pdev->dev, ++ "failed to register trusty call notifier\n"); ++ goto err_trusty_call_notifier_register; ++ } ++ ++ for_each_possible_cpu(cpu) { ++ struct trusty_irq_work *trusty_irq_work; ++ ++ trusty_irq_work = per_cpu_ptr(is->irq_work, cpu); ++ trusty_irq_work->is = is; ++ INIT_WORK(&trusty_irq_work->work, trusty_irq_work_func); ++ } ++ ++ for (irq = 0; irq >= 0;) ++ irq = trusty_irq_init_one(is, irq, true); ++ for (irq = 0; irq >= 0;) ++ irq = trusty_irq_init_one(is, irq, false); ++ ++ is->cpu_notifier.notifier_call = trusty_irq_cpu_notify; ++ ret = register_hotcpu_notifier(&is->cpu_notifier); ++ if (ret) { ++ dev_err(&pdev->dev, "register_cpu_notifier failed %d\n", ret); ++ goto err_register_hotcpu_notifier; ++ } ++ ret = on_each_cpu(trusty_irq_cpu_up, is, 0); ++ if (ret) { ++ dev_err(&pdev->dev, "register_cpu_notifier failed %d\n", ret); ++ goto err_on_each_cpu; ++ } ++ ++ return 0; ++ ++err_on_each_cpu: ++ unregister_hotcpu_notifier(&is->cpu_notifier); ++ on_each_cpu(trusty_irq_cpu_down, is, 1); ++err_register_hotcpu_notifier: ++ spin_lock_irqsave(&is->normal_irqs_lock, irq_flags); ++ trusty_irq_disable_irqset(is, &is->normal_irqs); ++ spin_unlock_irqrestore(&is->normal_irqs_lock, irq_flags); ++ trusty_irq_free_irqs(is); ++ trusty_call_notifier_unregister(is->trusty_dev, ++ &is->trusty_call_notifier); ++err_trusty_call_notifier_register: ++ free_percpu(is->percpu_irqs); ++err_alloc_pending_percpu_irqs: ++ for_each_possible_cpu(cpu) { ++ struct trusty_irq_work *trusty_irq_work; ++ ++ trusty_irq_work = per_cpu_ptr(is->irq_work, cpu); ++ flush_work(&trusty_irq_work->work); ++ } ++ free_percpu(is->irq_work); ++err_alloc_irq_work: ++ kfree(is); ++err_alloc_is: ++ return ret; ++} ++ ++static int trusty_irq_remove(struct platform_device *pdev) ++{ ++ int ret; ++ unsigned int cpu; ++ unsigned long irq_flags; ++ struct trusty_irq_state *is = platform_get_drvdata(pdev); ++ ++ dev_dbg(&pdev->dev, "%s\n", __func__); ++ ++ unregister_hotcpu_notifier(&is->cpu_notifier); ++ ret = on_each_cpu(trusty_irq_cpu_down, is, 1); ++ if (ret) ++ dev_err(&pdev->dev, "on_each_cpu failed %d\n", ret); ++ spin_lock_irqsave(&is->normal_irqs_lock, irq_flags); ++ trusty_irq_disable_irqset(is, &is->normal_irqs); ++ spin_unlock_irqrestore(&is->normal_irqs_lock, irq_flags); ++ ++ trusty_irq_free_irqs(is); ++ ++ trusty_call_notifier_unregister(is->trusty_dev, ++ &is->trusty_call_notifier); ++ free_percpu(is->percpu_irqs); ++ for_each_possible_cpu(cpu) { ++ struct trusty_irq_work *trusty_irq_work; ++ ++ trusty_irq_work = per_cpu_ptr(is->irq_work, cpu); ++ flush_work(&trusty_irq_work->work); ++ } ++ free_percpu(is->irq_work); ++ kfree(is); ++ ++ return 0; ++} ++ ++static const struct of_device_id trusty_test_of_match[] = { ++ { .compatible = "android,trusty-irq-v1", }, ++ {}, ++}; ++ ++static struct platform_driver trusty_irq_driver = { ++ .probe = trusty_irq_probe, ++ .remove = trusty_irq_remove, ++ .driver = { ++ .name = "trusty-irq", ++ .owner = THIS_MODULE, ++ .of_match_table = trusty_test_of_match, ++ }, ++}; ++ ++module_platform_driver(trusty_irq_driver); +-- +2.17.1 + diff --git a/patches/0004-usb-host-xhci-update-event-ring-dequeue-pointer-o.usb-xhci b/patches/0004-usb-host-xhci-update-event-ring-dequeue-pointer-o.usb-xhci new file mode 100644 index 0000000000..f3bc65b317 --- /dev/null +++ b/patches/0004-usb-host-xhci-update-event-ring-dequeue-pointer-o.usb-xhci @@ -0,0 +1,114 @@ +From dd5716856e2f41fece9decfb10d2834b25c84425 Mon Sep 17 00:00:00 2001 +From: Peter Chen +Date: Sun, 29 Sep 2019 06:07:39 +0000 +Subject: [PATCH 4/4] usb: host: xhci: update event ring dequeue pointer on + purpose + +On some situations, the software handles TRB events slower +than adding TRBs, then xhci_handle_event can't return zero +long time, the xHC will consider the event ring is full, +and trigger "Event Ring Full" error, but in fact, the software +has already finished lots of events, just no chance to +update ERDP (event ring dequeue pointer). + +In this commit, we force update ERDP if half of TRBS_PER_SEGMENT +events have handled to avoid "Event Ring Full" error. + +Signed-off-by: Peter Chen +Signed-off-by: Mathias Nyman +--- + drivers/usb/host/xhci-ring.c | 60 ++++++++++++++++++++++++++---------- + 1 file changed, 43 insertions(+), 17 deletions(-) + +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c +index 019de58e2d51..a59ccd44bc92 100644 +--- a/drivers/usb/host/xhci-ring.c ++++ b/drivers/usb/host/xhci-ring.c +@@ -2738,6 +2738,42 @@ static int xhci_handle_event(struct xhci_hcd *xhci) + return 1; + } + ++/* ++ * Update Event Ring Dequeue Pointer: ++ * - When all events have finished ++ * - To avoid "Event Ring Full Error" condition ++ */ ++static void xhci_update_erst_dequeue(struct xhci_hcd *xhci, ++ union xhci_trb *event_ring_deq) ++{ ++ u64 temp_64; ++ dma_addr_t deq; ++ ++ temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); ++ /* If necessary, update the HW's version of the event ring deq ptr. */ ++ if (event_ring_deq != xhci->event_ring->dequeue) { ++ deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, ++ xhci->event_ring->dequeue); ++ if (deq == 0) ++ xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr\n"); ++ /* ++ * Per 4.9.4, Software writes to the ERDP register shall ++ * always advance the Event Ring Dequeue Pointer value. ++ */ ++ if ((temp_64 & (u64) ~ERST_PTR_MASK) == ++ ((u64) deq & (u64) ~ERST_PTR_MASK)) ++ return; ++ ++ /* Update HC event ring dequeue pointer */ ++ temp_64 &= ERST_PTR_MASK; ++ temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK); ++ } ++ ++ /* Clear the event handler busy flag (RW1C) */ ++ temp_64 |= ERST_EHB; ++ xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue); ++} ++ + /* + * xHCI spec says we can get an interrupt, and if the HC has an error condition, + * we might get bad data out of the event ring. Section 4.10.2.7 has a list of +@@ -2749,9 +2785,9 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) + union xhci_trb *event_ring_deq; + irqreturn_t ret = IRQ_NONE; + unsigned long flags; +- dma_addr_t deq; + u64 temp_64; + u32 status; ++ int event_loop = 0; + + spin_lock_irqsave(&xhci->lock, flags); + /* Check if the xHC generated the interrupt, or the irq is shared */ +@@ -2805,24 +2841,14 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) + /* FIXME this should be a delayed service routine + * that clears the EHB. + */ +- while (xhci_handle_event(xhci) > 0) {} +- +- temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); +- /* If necessary, update the HW's version of the event ring deq ptr. */ +- if (event_ring_deq != xhci->event_ring->dequeue) { +- deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, +- xhci->event_ring->dequeue); +- if (deq == 0) +- xhci_warn(xhci, "WARN something wrong with SW event " +- "ring dequeue ptr.\n"); +- /* Update HC event ring dequeue pointer */ +- temp_64 &= ERST_PTR_MASK; +- temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK); ++ while (xhci_handle_event(xhci) > 0) { ++ if (event_loop++ < TRBS_PER_SEGMENT / 2) ++ continue; ++ xhci_update_erst_dequeue(xhci, event_ring_deq); ++ event_loop = 0; + } + +- /* Clear the event handler busy flag (RW1C); event ring is empty. */ +- temp_64 |= ERST_EHB; +- xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue); ++ xhci_update_erst_dequeue(xhci, event_ring_deq); + ret = IRQ_HANDLED; + + out: +-- +2.17.1 + diff --git a/patches/0004-x86-tsc-add-tsc-to-art-helpers.felipeb-5.4 b/patches/0004-x86-tsc-add-tsc-to-art-helpers.felipeb-5.4 new file mode 100644 index 0000000000..d0f70df35f --- /dev/null +++ b/patches/0004-x86-tsc-add-tsc-to-art-helpers.felipeb-5.4 @@ -0,0 +1,73 @@ +From 88a4671df1c2ef5b76cc54ecbfb0595326da23c1 Mon Sep 17 00:00:00 2001 +From: Felipe Balbi +Date: Wed, 20 Feb 2019 11:23:38 +0200 +Subject: [PATCH 04/14] x86: tsc: add tsc to art helpers + +Provide a helper which converts TSC to ART ns. This helper does the +inverse of convert_art_ns_to_tsc(). + +Signed-off-by: Felipe Balbi +--- + arch/x86/include/asm/tsc.h | 2 ++ + arch/x86/kernel/tsc.c | 32 ++++++++++++++++++++++++++++++++ + 2 files changed, 34 insertions(+) + +diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h +index 8a0c25c6bf09..b7a9f4385a82 100644 +--- a/arch/x86/include/asm/tsc.h ++++ b/arch/x86/include/asm/tsc.h +@@ -32,6 +32,8 @@ static inline cycles_t get_cycles(void) + + extern struct system_counterval_t convert_art_to_tsc(u64 art); + extern struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns); ++extern void get_tsc_ns(struct system_counterval_t *tsc_counterval, u64 *tsc_ns); ++extern u64 get_art_ns_now(void); + + extern void tsc_early_init(void); + extern void tsc_init(void); +diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c +index c59454c382fd..9df1faa9e8c7 100644 +--- a/arch/x86/kernel/tsc.c ++++ b/arch/x86/kernel/tsc.c +@@ -1232,6 +1232,38 @@ struct system_counterval_t convert_art_to_tsc(u64 art) + } + EXPORT_SYMBOL(convert_art_to_tsc); + ++void get_tsc_ns(struct system_counterval_t *tsc_counterval, u64 *tsc_ns) ++{ ++ u64 tmp, res, rem; ++ u64 cycles; ++ ++ tsc_counterval->cycles = clocksource_tsc.read(NULL); ++ cycles = tsc_counterval->cycles; ++ tsc_counterval->cs = art_related_clocksource; ++ ++ rem = do_div(cycles, tsc_khz); ++ ++ res = cycles * USEC_PER_SEC; ++ tmp = rem * USEC_PER_SEC; ++ ++ do_div(tmp, tsc_khz); ++ res += tmp; ++ ++ *tsc_ns = res; ++} ++EXPORT_SYMBOL(get_tsc_ns); ++ ++u64 get_art_ns_now(void) ++{ ++ struct system_counterval_t tsc_cycles; ++ u64 tsc_ns; ++ ++ get_tsc_ns(&tsc_cycles, &tsc_ns); ++ ++ return tsc_ns; ++} ++EXPORT_SYMBOL(get_art_ns_now); ++ + /** + * convert_art_ns_to_tsc() - Convert ART in nanoseconds to TSC. + * @art_ns: ART (Always Running Timer) in unit of nanoseconds +-- +2.17.1 + diff --git a/patches/0005-ASoC-Intel-Skylake-Expose-skl_find_module-and-skl_ge.audio b/patches/0005-ASoC-Intel-Skylake-Expose-skl_find_module-and-skl_ge.audio new file mode 100644 index 0000000000..5a1dd0e852 --- /dev/null +++ b/patches/0005-ASoC-Intel-Skylake-Expose-skl_find_module-and-skl_ge.audio @@ -0,0 +1,103 @@ +From fe878ca89ab852ad16e352e2cc5ffd7b88dbcce1 Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Wed, 6 Mar 2019 14:05:44 +0100 +Subject: [PATCH 005/193] ASoC: Intel: Skylake: Expose skl_find_module and + skl_get_module_id +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Add simple search method for obtaining Skylake modules in form of +skl_find_module. While at it, expose skl_get_module_id so function can +be used freely as module id is one of the most commonly used attributes +in IPCs. + +Change-Id: Id1209bad938a08b2693d8f6040709dae96dc50ea +Signed-off-by: Cezary Rojewski +Signed-off-by: Amadeusz Sławiński +Reviewed-on: +Tested-by: gkblditp +Reviewed-by: Rojewski, Cezary +--- + sound/soc/intel/skylake/skl-sst-dsp.h | 5 ++++- + sound/soc/intel/skylake/skl-sst-utils.c | 24 ++++++++++++++++++++++++ + sound/soc/intel/skylake/skl-topology.c | 12 ------------ + 3 files changed, 28 insertions(+), 13 deletions(-) + +diff --git a/sound/soc/intel/skylake/skl-sst-dsp.h b/sound/soc/intel/skylake/skl-sst-dsp.h +index 1295ca1cc5c3..f8878d0bb765 100644 +--- a/sound/soc/intel/skylake/skl-sst-dsp.h ++++ b/sound/soc/intel/skylake/skl-sst-dsp.h +@@ -245,7 +245,10 @@ void skl_sst_dsp_cleanup(struct device *dev, struct skl_dev *skl); + void bxt_sst_dsp_cleanup(struct device *dev, struct skl_dev *skl); + + int snd_skl_parse_manifest(struct sst_dsp *ctx, const struct firmware *fw, +- unsigned int offset, int index); ++ unsigned int offset, int index); ++struct skl_module_entry *skl_find_module(struct skl_dev *skl, ++ const guid_t *uuid); ++int skl_get_module_id(struct skl_dev *skl, const guid_t *uuid); + int skl_get_pvt_id(struct skl_dev *skl, guid_t *uuid_mod, int instance_id); + int skl_put_pvt_id(struct skl_dev *skl, guid_t *uuid_mod, int *pvt_id); + int skl_get_pvt_instance_id_map(struct skl_dev *skl, +diff --git a/sound/soc/intel/skylake/skl-sst-utils.c b/sound/soc/intel/skylake/skl-sst-utils.c +index 24004b754e0e..fa1c73077551 100644 +--- a/sound/soc/intel/skylake/skl-sst-utils.c ++++ b/sound/soc/intel/skylake/skl-sst-utils.c +@@ -342,6 +342,30 @@ int snd_skl_parse_manifest(struct sst_dsp *ctx, const struct firmware *fw, + } + EXPORT_SYMBOL(snd_skl_parse_manifest); + ++struct skl_module_entry *skl_find_module(struct skl_dev *skl, ++ const guid_t *uuid) ++{ ++ struct skl_module_entry *module_entries; ++ int i; ++ ++ module_entries = skl->fw_modules_info->module_entry; ++ ++ for (i = 0; i < skl->fw_modules_info->count; i++) ++ if (guid_equal(&module_entries[i].uuid, uuid)) ++ return &module_entries[i]; ++ ++ return NULL; ++} ++EXPORT_SYMBOL(skl_find_module); ++ ++int skl_get_module_id(struct skl_dev *skl, const guid_t *uuid) ++{ ++ struct skl_module_entry *module = skl_find_module(skl, uuid); ++ ++ return module ? module->module_id : -ENOENT; ++} ++EXPORT_SYMBOL(skl_get_module_id); ++ + /* + * some firmware binary contains some extended manifest. This needs + * to be stripped in that case before we load and use that image. +diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c +index b1f7cd5c5291..3af4bcaacf92 100644 +--- a/sound/soc/intel/skylake/skl-topology.c ++++ b/sound/soc/intel/skylake/skl-topology.c +@@ -797,18 +797,6 @@ static int skl_tplg_set_module_bind_params(struct snd_soc_dapm_widget *w, + return 0; + } + +-static int skl_get_module_id(struct skl_dev *skl, guid_t *uuid) +-{ +- struct uuid_module *module; +- +- list_for_each_entry(module, &skl->module_list, list) { +- if (guid_equal(uuid, &module->uuid)) +- return module->id; +- } +- +- return -EINVAL; +-} +- + static int skl_tplg_find_moduleid_from_uuid(struct skl_dev *skl, + const struct snd_kcontrol_new *k) + { +-- +2.17.1 + diff --git a/patches/0005-EDAC-igen6-Add-Intel-Elkhart-Lake-SoC-support.edac b/patches/0005-EDAC-igen6-Add-Intel-Elkhart-Lake-SoC-support.edac new file mode 100644 index 0000000000..7072fa5b4b --- /dev/null +++ b/patches/0005-EDAC-igen6-Add-Intel-Elkhart-Lake-SoC-support.edac @@ -0,0 +1,79 @@ +From 80dcacb7721a7102f504aea78f07d7c4af1a6131 Mon Sep 17 00:00:00 2001 +From: Qiuxu Zhuo +Date: Tue, 25 Jun 2019 15:56:24 +0800 +Subject: [PATCH 5/5] EDAC, igen6: Add Intel Elkhart Lake SoC support + +The Elkhart Lake SoC shares the same memory controller and In-Band +ECC IP with Ice Lake-NNPI SoC. Add the Elkhart Lake SoC compute die IDs +for EDAC support. + +Signed-off-by: Qiuxu Zhuo +--- + drivers/edac/igen6_edac.c | 34 ++++++++++++++++++++++++++++++++++ + 1 file changed, 34 insertions(+) + +diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c +index ae8c1afe672c..c87af998c733 100644 +--- a/drivers/edac/igen6_edac.c ++++ b/drivers/edac/igen6_edac.c +@@ -173,6 +173,19 @@ static struct work_struct ecclog_work; + #define DID_ICL_SKU11 0x4589 + #define DID_ICL_SKU12 0x458d + ++/* Compute die IDs for Elkhart Lake with IBECC */ ++#define DID_EHL_SKU5 0x4514 ++#define DID_EHL_SKU6 0x4528 ++#define DID_EHL_SKU7 0x452a ++#define DID_EHL_SKU8 0x4516 ++#define DID_EHL_SKU9 0x452c ++#define DID_EHL_SKU10 0x452e ++#define DID_EHL_SKU11 0x4532 ++#define DID_EHL_SKU12 0x4518 ++#define DID_EHL_SKU13 0x451a ++#define DID_EHL_SKU14 0x4534 ++#define DID_EHL_SKU15 0x4536 ++ + static bool icl_ibecc_available(u32 capid) + { + /* Capid IBECC bit for ICL: 0 - available, 1 - unavailable */ +@@ -180,16 +193,37 @@ static bool icl_ibecc_available(u32 capid) + (boot_cpu_data.x86_stepping >= 1); + } + ++static bool ehl_ibecc_available(u32 capid) ++{ ++ return !!(IGEN6_CAPID_C_IBECC & capid); ++} ++ + static struct ibecc_config icl_cfg = { + .ibecc_offset = 0xd800, + .ibecc_available = icl_ibecc_available, + }; + ++static struct ibecc_config ehl_cfg = { ++ .ibecc_offset = 0xdc00, ++ .ibecc_available = ehl_ibecc_available, ++}; ++ + static const struct pci_device_id igen6_pci_tbl[] = { + { PCI_VDEVICE(INTEL, DID_ICL_SKU8), (kernel_ulong_t)&icl_cfg }, + { PCI_VDEVICE(INTEL, DID_ICL_SKU10), (kernel_ulong_t)&icl_cfg }, + { PCI_VDEVICE(INTEL, DID_ICL_SKU11), (kernel_ulong_t)&icl_cfg }, + { PCI_VDEVICE(INTEL, DID_ICL_SKU12), (kernel_ulong_t)&icl_cfg }, ++ { PCI_VDEVICE(INTEL, DID_EHL_SKU5), (kernel_ulong_t)&ehl_cfg }, ++ { PCI_VDEVICE(INTEL, DID_EHL_SKU6), (kernel_ulong_t)&ehl_cfg }, ++ { PCI_VDEVICE(INTEL, DID_EHL_SKU7), (kernel_ulong_t)&ehl_cfg }, ++ { PCI_VDEVICE(INTEL, DID_EHL_SKU8), (kernel_ulong_t)&ehl_cfg }, ++ { PCI_VDEVICE(INTEL, DID_EHL_SKU9), (kernel_ulong_t)&ehl_cfg }, ++ { PCI_VDEVICE(INTEL, DID_EHL_SKU10), (kernel_ulong_t)&ehl_cfg }, ++ { PCI_VDEVICE(INTEL, DID_EHL_SKU11), (kernel_ulong_t)&ehl_cfg }, ++ { PCI_VDEVICE(INTEL, DID_EHL_SKU12), (kernel_ulong_t)&ehl_cfg }, ++ { PCI_VDEVICE(INTEL, DID_EHL_SKU13), (kernel_ulong_t)&ehl_cfg }, ++ { PCI_VDEVICE(INTEL, DID_EHL_SKU14), (kernel_ulong_t)&ehl_cfg }, ++ { PCI_VDEVICE(INTEL, DID_EHL_SKU15), (kernel_ulong_t)&ehl_cfg }, + { }, + }; + MODULE_DEVICE_TABLE(pci, igen6_pci_tbl); +-- +2.17.1 + diff --git a/patches/0005-PTP-add-a-callback-for-counting-timestamp-even.felipeb-5.4 b/patches/0005-PTP-add-a-callback-for-counting-timestamp-even.felipeb-5.4 new file mode 100644 index 0000000000..771fae7c62 --- /dev/null +++ b/patches/0005-PTP-add-a-callback-for-counting-timestamp-even.felipeb-5.4 @@ -0,0 +1,52 @@ +From 676edb1ad4bb80410d5b6d22c0c9a7b2a119651c Mon Sep 17 00:00:00 2001 +From: Felipe Balbi +Date: Thu, 7 Mar 2019 10:39:50 +0200 +Subject: [PATCH 05/14] PTP: add a callback for counting timestamp events + +This will be used for frequency discipline adjustments. + +Signed-off-by: Felipe Balbi +--- + include/linux/ptp_clock_kernel.h | 2 ++ + include/uapi/linux/ptp_clock.h | 12 ++++++++++++ + 2 files changed, 14 insertions(+) + +diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h +index 93cc4f1d444a..8223f6f656dd 100644 +--- a/include/linux/ptp_clock_kernel.h ++++ b/include/linux/ptp_clock_kernel.h +@@ -134,6 +134,8 @@ struct ptp_clock_info { + struct ptp_system_timestamp *sts); + int (*getcrosststamp)(struct ptp_clock_info *ptp, + struct system_device_crosststamp *cts); ++ int (*counttstamp)(struct ptp_clock_info *ptp, ++ struct ptp_event_count_tstamp *count); + int (*settime64)(struct ptp_clock_info *p, const struct timespec64 *ts); + int (*enable)(struct ptp_clock_info *ptp, + struct ptp_clock_request *request, int on); +diff --git a/include/uapi/linux/ptp_clock.h b/include/uapi/linux/ptp_clock.h +index 59e89a1bc3bb..35318884fcc3 100644 +--- a/include/uapi/linux/ptp_clock.h ++++ b/include/uapi/linux/ptp_clock.h +@@ -103,6 +103,18 @@ struct ptp_perout_request { + unsigned int rsv[4]; /* Reserved for future use. */ + }; + ++struct ptp_event_count_tstamp { ++ unsigned int index; ++ ++#define PTP_EVENT_COUNT_TSTAMP_POL_HIGH 0 ++#define PTP_EVENT_COUNT_TSTAMP_POL_LOW BIT(0) ++ unsigned int flags; ++ ++ struct ptp_clock_time device_time; ++ unsigned long long event_count; ++ unsigned int rsv[2]; /* Reserved for future use. */ ++}; ++ + #define PTP_MAX_SAMPLES 25 /* Maximum allowed offset measurement samples. */ + + struct ptp_sys_offset { +-- +2.17.1 + diff --git a/patches/0005-TO-BE-FOLDED-pinctrl-tigerlake-Update-pin-list-accord.lpss b/patches/0005-TO-BE-FOLDED-pinctrl-tigerlake-Update-pin-list-accord.lpss new file mode 100644 index 0000000000..5e763c8445 --- /dev/null +++ b/patches/0005-TO-BE-FOLDED-pinctrl-tigerlake-Update-pin-list-accord.lpss @@ -0,0 +1,29 @@ +From 2a179fd7b70ad135927f1282091d707d4cdf8384 Mon Sep 17 00:00:00 2001 +From: Andy Shevchenko +Date: Tue, 9 Jul 2019 19:47:13 +0300 +Subject: [PATCH 05/40] TO BE FOLDED: pinctrl: tigerlake: Update pin list + according to LP v1.0 Z0 + +Signed-off-by: Andy Shevchenko +--- + drivers/pinctrl/intel/pinctrl-tigerlake.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/drivers/pinctrl/intel/pinctrl-tigerlake.c b/drivers/pinctrl/intel/pinctrl-tigerlake.c +index db92db288636..3e208070319a 100644 +--- a/drivers/pinctrl/intel/pinctrl-tigerlake.c ++++ b/drivers/pinctrl/intel/pinctrl-tigerlake.c +@@ -266,8 +266,8 @@ static const struct pinctrl_pin_desc tgllp_pins[] = { + PINCTRL_PIN(209, "GSXDIN"), + PINCTRL_PIN(210, "GSXSRESETB"), + PINCTRL_PIN(211, "GSXCLK"), +- PINCTRL_PIN(212, "GPPC_F_17"), +- PINCTRL_PIN(213, "GPPC_F_18"), ++ PINCTRL_PIN(212, "GMII_MDC"), ++ PINCTRL_PIN(213, "GMII_MDIO"), + PINCTRL_PIN(214, "SRCCLKREQB_6"), + PINCTRL_PIN(215, "EXT_PWR_GATEB"), + PINCTRL_PIN(216, "EXT_PWR_GATE2B"), +-- +2.17.1 + diff --git a/patches/0005-VHM-add-ioreq-service-support.acrn b/patches/0005-VHM-add-ioreq-service-support.acrn new file mode 100644 index 0000000000..80cd5089ba --- /dev/null +++ b/patches/0005-VHM-add-ioreq-service-support.acrn @@ -0,0 +1,1471 @@ +From 8f1d66ca89370f505408a7889a8da0859c02b06b Mon Sep 17 00:00:00 2001 +From: Jason Chen CJ +Date: Fri, 31 Aug 2018 10:58:55 +0800 +Subject: [PATCH 005/150] VHM: add ioreq service support + +Once there is an IO request, a virtual irq will be injected into service +OS by ACRN hypervisor. +The VHM handles this virtual irq (which is based on an ipi vector), parses +corresponding IO request from shared IOReq buffer then distributes it to +different ioreq client. + +This patch added ioreq service, and defines IOReq APIs like below: + int acrn_ioreq_create_client(unsigned long vmid, ioreq_handler_t handler, + char *name); + void acrn_ioreq_destroy_client(int client_id); + int acrn_ioreq_add_iorange(int client_id, enum request_type type, + long start, long end); + int acrn_ioreq_del_iorange(int client_id, enum request_type type, + long start, long end); + struct vhm_request * acrn_ioreq_get_reqbuf(int client_id); + int acrn_ioreq_attach_client(int client_id); + int acrn_ioreq_distribute_request(struct vhm_vm *vm); + int acrn_ioreq_complete_request(int client_id); + +Change-Id: I828744cb60e1c77543e1fafaa372597173039846 +Tracked-On: 218445 +Signed-off-by: Jason Chen CJ +Signed-off-by: liang ding +Signed-off-by: Xiao Zheng +Signed-off-by: Mingqiang Chi +Reviewed-on: +Reviewed-by: Dong, Eddie +Tested-by: Dong, Eddie +--- + drivers/char/vhm/vhm_dev.c | 88 +++ + drivers/vhm/Makefile | 2 +- + drivers/vhm/vhm_hypercall.c | 10 + + drivers/vhm/vhm_ioreq.c | 922 +++++++++++++++++++++++++++++ + drivers/vhm/vhm_vm_mngt.c | 2 + + include/linux/vhm/acrn_common.h | 117 ++++ + include/linux/vhm/acrn_hv_defs.h | 5 + + include/linux/vhm/acrn_vhm_ioreq.h | 86 +++ + include/linux/vhm/vhm_hypercall.h | 3 + + include/linux/vhm/vhm_ioctl_defs.h | 8 + + include/linux/vhm/vhm_vm_mngt.h | 5 + + 11 files changed, 1247 insertions(+), 1 deletion(-) + create mode 100644 drivers/vhm/vhm_ioreq.c + create mode 100644 include/linux/vhm/acrn_vhm_ioreq.h + +diff --git a/drivers/char/vhm/vhm_dev.c b/drivers/char/vhm/vhm_dev.c +index 3ea8de27cb3e..454211466e5d 100644 +--- a/drivers/char/vhm/vhm_dev.c ++++ b/drivers/char/vhm/vhm_dev.c +@@ -78,6 +78,7 @@ + + #include + #include ++#include + #include + #include + #include +@@ -88,6 +89,8 @@ + static int major; + static struct class *vhm_class; + static struct device *vhm_device; ++static struct tasklet_struct vhm_io_req_tasklet; ++static atomic_t ioreq_retry = ATOMIC_INIT(0); + + static int vhm_dev_open(struct inode *inodep, struct file *filep) + { +@@ -104,6 +107,9 @@ static int vhm_dev_open(struct inode *inodep, struct file *filep) + INIT_LIST_HEAD(&vm->memseg_list); + mutex_init(&vm->seg_lock); + ++ INIT_LIST_HEAD(&vm->ioreq_client_list); ++ spin_lock_init(&vm->ioreq_client_lock); ++ + vm_mutex_lock(&vhm_vm_list_lock); + vm->refcnt = 1; + vm_list_add(&vm->list); +@@ -188,6 +194,50 @@ static long vhm_dev_ioctl(struct file *filep, + break; + } + ++ case IC_SET_IOREQ_BUFFER: { ++ /* init ioreq buffer */ ++ ret = acrn_ioreq_init(vm, (unsigned long)ioctl_param); ++ if (ret < 0) ++ return ret; ++ break; ++ } ++ ++ case IC_CREATE_IOREQ_CLIENT: { ++ int client_id; ++ ++ client_id = acrn_ioreq_create_fallback_client(vm->vmid, "acrndm"); ++ if (client_id < 0) ++ return -EFAULT; ++ return client_id; ++ } ++ ++ case IC_DESTROY_IOREQ_CLIENT: { ++ int client = ioctl_param; ++ ++ acrn_ioreq_destroy_client(client); ++ break; ++ } ++ ++ case IC_ATTACH_IOREQ_CLIENT: { ++ int client = ioctl_param; ++ ++ return acrn_ioreq_attach_client(client, 0); ++ } ++ ++ case IC_NOTIFY_REQUEST_FINISH: { ++ struct acrn_ioreq_notify notify; ++ ++ if (copy_from_user(¬ify, (void *)ioctl_param, ++ sizeof(notify))) ++ return -EFAULT; ++ ++ ret = acrn_ioreq_complete_request(notify.client_id, ++ notify.vcpu_mask); ++ if (ret < 0) ++ return -EFAULT; ++ break; ++ } ++ + default: + pr_warn("Unknown IOCTL 0x%x\n", ioctl_num); + ret = 0; +@@ -197,6 +247,31 @@ static long vhm_dev_ioctl(struct file *filep, + return ret; + } + ++static void io_req_tasklet(unsigned long data) ++{ ++ struct vhm_vm *vm; ++ ++ list_for_each_entry(vm, &vhm_vm_list, list) { ++ if (!vm || !vm->req_buf) ++ continue; ++ ++ acrn_ioreq_distribute_request(vm); ++ } ++ ++ if (atomic_read(&ioreq_retry) > 0) { ++ atomic_dec(&ioreq_retry); ++ tasklet_schedule(&vhm_io_req_tasklet); ++ } ++} ++ ++static void vhm_intr_handler(void) ++{ ++ if (test_bit(TASKLET_STATE_SCHED, &(vhm_io_req_tasklet.state))) ++ atomic_inc(&ioreq_retry); ++ else ++ tasklet_schedule(&vhm_io_req_tasklet); ++} ++ + static int vhm_dev_release(struct inode *inodep, struct file *filep) + { + struct vhm_vm *vm = filep->private_data; +@@ -217,10 +292,13 @@ static const struct file_operations fops = { + .mmap = vhm_dev_mmap, + .release = vhm_dev_release, + .unlocked_ioctl = vhm_dev_ioctl, ++ .poll = vhm_dev_poll, + }; + + static int __init vhm_init(void) + { ++ unsigned long flag; ++ + pr_info("vhm: initializing\n"); + + /* Try to dynamically allocate a major number for the device */ +@@ -249,12 +327,22 @@ static int __init vhm_init(void) + pr_warn("vhm: failed to create the device\n"); + return PTR_ERR(vhm_device); + } ++ pr_info("register IPI handler\n"); ++ tasklet_init(&vhm_io_req_tasklet, io_req_tasklet, 0); ++ if (x86_platform_ipi_callback) { ++ pr_warn("vhm: ipi callback was occupied\n"); ++ return -EINVAL; ++ } ++ local_irq_save(flag); ++ x86_platform_ipi_callback = vhm_intr_handler; ++ local_irq_restore(flag); + + pr_info("vhm: Virtio & Hypervisor service module initialized\n"); + return 0; + } + static void __exit vhm_exit(void) + { ++ tasklet_kill(&vhm_io_req_tasklet); + device_destroy(vhm_class, MKDEV(major, 0)); + class_unregister(vhm_class); + class_destroy(vhm_class); +diff --git a/drivers/vhm/Makefile b/drivers/vhm/Makefile +index 7e5ec421fbc7..4bd960d564b3 100644 +--- a/drivers/vhm/Makefile ++++ b/drivers/vhm/Makefile +@@ -1 +1 @@ +-obj-y += vhm_mm.o vhm_vm_mngt.o vhm_hypercall.o ++obj-y += vhm_mm.o vhm_ioreq.o vhm_vm_mngt.o vhm_hypercall.o +diff --git a/drivers/vhm/vhm_hypercall.c b/drivers/vhm/vhm_hypercall.c +index d80087bcb5fb..1b25f4ec4d06 100644 +--- a/drivers/vhm/vhm_hypercall.c ++++ b/drivers/vhm/vhm_hypercall.c +@@ -53,6 +53,16 @@ + #include + #include + ++inline long hcall_set_ioreq_buffer(unsigned long vmid, unsigned long buffer) ++{ ++ return acrn_hypercall2(HC_SET_IOREQ_BUFFER, vmid, buffer); ++} ++ ++inline long hcall_notify_req_finish(unsigned long vmid, unsigned long vcpu_mask) ++{ ++ return acrn_hypercall2(HC_NOTIFY_REQUEST_FINISH, vmid, vcpu_mask); ++} ++ + inline long hcall_set_memmap(unsigned long vmid, unsigned long memmap) + { + return acrn_hypercall2(HC_VM_SET_MEMMAP, vmid, memmap); +diff --git a/drivers/vhm/vhm_ioreq.c b/drivers/vhm/vhm_ioreq.c +new file mode 100644 +index 000000000000..6054e3d00eb2 +--- /dev/null ++++ b/drivers/vhm/vhm_ioreq.c +@@ -0,0 +1,922 @@ ++/* ++ * virtio and hyperviosr service module (VHM): ioreq multi client feature ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright (c) 2017 Intel Corporation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * BSD LICENSE ++ * ++ * Copyright (C) 2017 Intel Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * ++ * Jason Chen CJ ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++struct ioreq_range { ++ struct list_head list; ++ enum request_type type; ++ long start; ++ long end; ++}; ++ ++struct ioreq_client { ++ /* client name */ ++ char name[16]; ++ /* client id */ ++ int id; ++ /* vm this client belongs to */ ++ unsigned long vmid; ++ /* list node for this ioreq_client */ ++ struct list_head list; ++ /* ++ * is this client fallback? ++ * there is only one fallback client in a vm - dm ++ * a fallback client shares IOReq buffer pages ++ * a fallback client handles all left IOReq not handled by other clients ++ * a fallback client does not need add io ranges ++ * a fallback client handles ioreq in its own context ++ */ ++ bool fallback; ++ ++ bool destroying; ++ bool kthread_exit; ++ ++ /* client covered io ranges - N/A for fallback client */ ++ struct list_head range_list; ++ spinlock_t range_lock; ++ ++ /* ++ * this req records the req number this client need handle ++ */ ++ atomic_t req; ++ ++ /* ++ * client ioreq handler: ++ * if client provides a handler, it means vhm need create a kthread ++ * to call the handler while there is ioreq. ++ * if client doesn't provide a handler, client should handle ioreq ++ * in its own context when calls acrn_ioreq_attach_client. ++ * ++ * NOTE: for fallback client, there is no ioreq handler. ++ */ ++ ioreq_handler_t handler; ++ bool vhm_create_kthread; ++ struct task_struct *thread; ++ wait_queue_head_t wq; ++ ++ /* pci bdf trap */ ++ bool trap_bdf; ++ int pci_bus; ++ int pci_dev; ++ int pci_func; ++}; ++ ++#define MAX_CLIENT 64 ++static struct ioreq_client *clients[MAX_CLIENT]; ++static DECLARE_BITMAP(client_bitmap, MAX_CLIENT); ++ ++static void acrn_ioreq_notify_client(struct ioreq_client *client); ++ ++static inline bool is_range_type(enum request_type type) ++{ ++ return (type == REQ_MMIO || type == REQ_PORTIO || type == REQ_WP); ++} ++ ++static int alloc_client(void) ++{ ++ struct ioreq_client *client; ++ int i; ++ ++ i = find_first_zero_bit(client_bitmap, MAX_CLIENT); ++ if (i >= MAX_CLIENT) ++ return -ENOMEM; ++ set_bit(i, client_bitmap); ++ ++ client = kzalloc(sizeof(struct ioreq_client), GFP_KERNEL); ++ if (!client) ++ return -ENOMEM; ++ client->id = i; ++ clients[i] = client; ++ ++ return i; ++} ++ ++static void free_client(int i) ++{ ++ if (i < MAX_CLIENT && i >= 0) { ++ if (test_and_clear_bit(i, client_bitmap)) { ++ kfree(clients[i]); ++ clients[i] = NULL; ++ } ++ } ++} ++ ++int acrn_ioreq_create_client(unsigned long vmid, ioreq_handler_t handler, ++ char *name) ++{ ++ struct vhm_vm *vm; ++ struct ioreq_client *client; ++ unsigned long flags; ++ int client_id; ++ ++ might_sleep(); ++ ++ vm = find_get_vm(vmid); ++ if (unlikely(vm == NULL)) { ++ pr_err("vhm-ioreq: failed to find vm from vmid %ld\n", ++ vmid); ++ return -EINVAL; ++ } ++ if (unlikely(vm->req_buf == NULL)) { ++ pr_err("vhm-ioreq: vm[%ld]'s reqbuf is not ready\n", ++ vmid); ++ put_vm(vm); ++ return -EINVAL; ++ } ++ ++ client_id = alloc_client(); ++ if (unlikely(client_id < 0)) { ++ pr_err("vhm-ioreq: vm[%ld] failed to alloc ioreq " ++ "client id\n", vmid); ++ put_vm(vm); ++ return -EINVAL; ++ } ++ ++ client = clients[client_id]; ++ ++ if (handler) { ++ client->handler = handler; ++ client->vhm_create_kthread = true; ++ } ++ ++ client->vmid = vmid; ++ if (name) ++ strncpy(client->name, name, 16); ++ spin_lock_init(&client->range_lock); ++ INIT_LIST_HEAD(&client->range_list); ++ init_waitqueue_head(&client->wq); ++ ++ spin_lock_irqsave(&vm->ioreq_client_lock, flags); ++ list_add(&client->list, &vm->ioreq_client_list); ++ spin_unlock_irqrestore(&vm->ioreq_client_lock, flags); ++ ++ put_vm(vm); ++ ++ pr_info("vhm-ioreq: created ioreq client %d\n", client_id); ++ ++ return client_id; ++} ++ ++int acrn_ioreq_create_fallback_client(unsigned long vmid, char *name) ++{ ++ struct vhm_vm *vm; ++ int client_id; ++ ++ vm = find_get_vm(vmid); ++ if (unlikely(vm == NULL)) { ++ pr_err("vhm-ioreq: failed to find vm from vmid %ld\n", ++ vmid); ++ return -EINVAL; ++ } ++ ++ if (unlikely(vm->ioreq_fallback_client > 0)) { ++ pr_err("vhm-ioreq: there is already fallback " ++ "client exist for vm %ld\n", ++ vmid); ++ put_vm(vm); ++ return -EINVAL; ++ } ++ ++ client_id = acrn_ioreq_create_client(vmid, NULL, name); ++ if (unlikely(client_id < 0)) { ++ put_vm(vm); ++ return -EINVAL; ++ } ++ ++ clients[client_id]->fallback = true; ++ vm->ioreq_fallback_client = client_id; ++ ++ put_vm(vm); ++ ++ return client_id; ++} ++ ++static void acrn_ioreq_destroy_client_pervm(struct ioreq_client *client, ++ struct vhm_vm *vm) ++{ ++ struct list_head *pos, *tmp; ++ unsigned long flags; ++ ++ /* blocking operation: notify client for cleanup ++ * if waitqueue not active, it means client is handling request, ++ * at that time, we need wait client finish its handling. ++ */ ++ while (!waitqueue_active(&client->wq) && !client->kthread_exit) ++ msleep(10); ++ client->destroying = true; ++ acrn_ioreq_notify_client(client); ++ ++ spin_lock_irqsave(&client->range_lock, flags); ++ list_for_each_safe(pos, tmp, &client->range_list) { ++ struct ioreq_range *range = ++ container_of(pos, struct ioreq_range, list); ++ list_del(&range->list); ++ kfree(range); ++ } ++ spin_unlock_irqrestore(&client->range_lock, flags); ++ ++ spin_lock_irqsave(&vm->ioreq_client_lock, flags); ++ list_del(&client->list); ++ spin_unlock_irqrestore(&vm->ioreq_client_lock, flags); ++ free_client(client->id); ++ ++ if (client->id == vm->ioreq_fallback_client) ++ vm->ioreq_fallback_client = -1; ++} ++ ++void acrn_ioreq_destroy_client(int client_id) ++{ ++ struct vhm_vm *vm; ++ struct ioreq_client *client; ++ ++ if (client_id < 0 || client_id >= MAX_CLIENT) { ++ pr_err("vhm-ioreq: no client for id %d\n", client_id); ++ return; ++ } ++ client = clients[client_id]; ++ if (!client) { ++ pr_err("vhm-ioreq: no client for id %d\n", client_id); ++ return; ++ } ++ ++ might_sleep(); ++ ++ vm = find_get_vm(client->vmid); ++ if (unlikely(vm == NULL)) { ++ pr_err("vhm-ioreq: failed to find vm from vmid %ld\n", ++ client->vmid); ++ return; ++ } ++ ++ acrn_ioreq_destroy_client_pervm(client, vm); ++ ++ put_vm(vm); ++} ++ ++static void __attribute__((unused)) dump_iorange(struct ioreq_client *client) ++{ ++ struct list_head *pos; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&client->range_lock, flags); ++ list_for_each(pos, &client->range_list) { ++ struct ioreq_range *range = ++ container_of(pos, struct ioreq_range, list); ++ pr_debug("\tio range: type %d, start 0x%lx, " ++ "end 0x%lx\n", range->type, range->start, range->end); ++ } ++ spin_unlock_irqrestore(&client->range_lock, flags); ++} ++ ++/* ++ * NOTE: here just add iorange entry directly, no check for the overlap.. ++ * please client take care of it ++ */ ++int acrn_ioreq_add_iorange(int client_id, enum request_type type, ++ long start, long end) ++{ ++ struct ioreq_client *client; ++ struct ioreq_range *range; ++ unsigned long flags; ++ ++ if (client_id < 0 || client_id >= MAX_CLIENT) { ++ pr_err("vhm-ioreq: no client for id %d\n", client_id); ++ return -EFAULT; ++ } ++ client = clients[client_id]; ++ if (!client) { ++ pr_err("vhm-ioreq: no client for id %d\n", client_id); ++ return -EFAULT; ++ } ++ ++ if (end < start) { ++ pr_err("vhm-ioreq: end < start\n"); ++ return -EFAULT; ++ } ++ ++ might_sleep(); ++ ++ range = kzalloc(sizeof(struct ioreq_range), GFP_KERNEL); ++ if (!range) { ++ pr_err("vhm-ioreq: failed to alloc ioreq range\n"); ++ return -ENOMEM; ++ } ++ range->type = type; ++ range->start = start; ++ range->end = end; ++ ++ spin_lock_irqsave(&client->range_lock, flags); ++ list_add(&range->list, &client->range_list); ++ spin_unlock_irqrestore(&client->range_lock, flags); ++ ++ return 0; ++} ++ ++int acrn_ioreq_del_iorange(int client_id, enum request_type type, ++ long start, long end) ++{ ++ struct ioreq_client *client; ++ struct ioreq_range *range; ++ struct list_head *pos, *tmp; ++ unsigned long flags; ++ ++ if (client_id < 0 || client_id >= MAX_CLIENT) { ++ pr_err("vhm-ioreq: no client for id %d\n", client_id); ++ return -EFAULT; ++ } ++ client = clients[client_id]; ++ if (!client) { ++ pr_err("vhm-ioreq: no client for id %d\n", client_id); ++ return -EFAULT; ++ } ++ ++ if (end < start) { ++ pr_err("vhm-ioreq: end < start\n"); ++ return -EFAULT; ++ } ++ ++ might_sleep(); ++ ++ spin_lock_irqsave(&client->range_lock, flags); ++ list_for_each_safe(pos, tmp, &client->range_list) { ++ range = container_of(pos, struct ioreq_range, list); ++ if (range->type == type) { ++ if (is_range_type(type)) { ++ if (start == range->start && ++ end == range->end) { ++ list_del(&range->list); ++ kfree(range); ++ break; ++ } ++ } else { ++ list_del(&range->list); ++ kfree(range); ++ break; ++ } ++ } ++ } ++ spin_unlock_irqrestore(&client->range_lock, flags); ++ ++ return 0; ++} ++ ++static inline bool is_destroying(struct ioreq_client *client) ++{ ++ if (client) ++ return client->destroying; ++ else ++ return true; ++} ++ ++static inline bool has_pending_request(struct ioreq_client *client) ++{ ++ if (client) ++ return (atomic_read(&client->req) > 0); ++ else ++ return false; ++} ++ ++struct vhm_request *acrn_ioreq_get_reqbuf(int client_id) ++{ ++ struct ioreq_client *client; ++ struct vhm_vm *vm; ++ ++ if (client_id < 0 || client_id >= MAX_CLIENT) { ++ pr_err("vhm-ioreq: no client for id %d\n", client_id); ++ return NULL; ++ } ++ client = clients[client_id]; ++ if (!client) { ++ pr_err("vhm-ioreq: no client for id %d\n", client_id); ++ return NULL; ++ } ++ vm = find_get_vm(client->vmid); ++ if (unlikely(vm == NULL)) { ++ pr_err("vhm-ioreq: failed to find vm from vmid %ld\n", ++ client->vmid); ++ return NULL; ++ } ++ ++ if (vm->req_buf == NULL) { ++ pr_warn("vhm-ioreq: the req buf page not ready yet " ++ "for vmid %ld\n", client->vmid); ++ } ++ put_vm(vm); ++ return (struct vhm_request *)vm->req_buf; ++} ++ ++static int ioreq_client_thread(void *data) ++{ ++ struct ioreq_client *client; ++ int ret, client_id = (unsigned long)data; ++ ++ while (1) { ++ client = clients[client_id]; ++ if (is_destroying(client)) { ++ pr_info("vhm-ioreq: client destroying->stop thread\n"); ++ break; ++ } ++ if (has_pending_request(client)) { ++ if (client->handler) { ++ ret = client->handler(client->id, ++ client->req.counter); ++ if (ret < 0) ++ BUG(); ++ } else { ++ pr_err("vhm-ioreq: no ioreq handler\n"); ++ break; ++ } ++ } else ++ wait_event_freezable(client->wq, ++ (has_pending_request(client) || ++ is_destroying(client))); ++ } ++ ++ return 0; ++} ++ ++int acrn_ioreq_attach_client(int client_id, bool check_kthread_stop) ++{ ++ struct ioreq_client *client; ++ ++ if (client_id < 0 || client_id >= MAX_CLIENT) { ++ pr_err("vhm-ioreq: no client for id %d\n", client_id); ++ return -EFAULT; ++ } ++ client = clients[client_id]; ++ if (!client) { ++ pr_err("vhm-ioreq: no client for id %d\n", client_id); ++ return -EFAULT; ++ } ++ ++ if (client->vhm_create_kthread) { ++ if (client->thread) { ++ pr_warn("vhm-ioreq: kthread already exist" ++ " for client %s\n", client->name); ++ return 0; ++ } ++ client->thread = kthread_run(ioreq_client_thread, ++ (void *)(unsigned long)client_id, ++ "ioreq_client[%ld]:%s", ++ client->vmid, client->name); ++ if (IS_ERR(client->thread)) { ++ pr_err("vhm-ioreq: failed to run kthread " ++ "for client %s\n", client->name); ++ return -ENOMEM; ++ } ++ } else { ++ might_sleep(); ++ ++ if (check_kthread_stop) { ++ wait_event_freezable(client->wq, ++ (kthread_should_stop() || ++ has_pending_request(client) || ++ is_destroying(client))); ++ if (kthread_should_stop()) ++ client->kthread_exit = true; ++ } else { ++ wait_event_freezable(client->wq, ++ (has_pending_request(client) || ++ is_destroying(client))); ++ } ++ ++ if (is_destroying(client)) ++ return 1; ++ } ++ ++ return 0; ++} ++ ++void acrn_ioreq_intercept_bdf(int client_id, int bus, int dev, int func) ++{ ++ struct ioreq_client *client; ++ ++ if (client_id < 0 || client_id >= MAX_CLIENT) { ++ pr_err("vhm-ioreq: no client for id %d\n", client_id); ++ return; ++ } ++ client = clients[client_id]; ++ if (!client) { ++ pr_err("vhm-ioreq: no client for id %d\n", client_id); ++ return; ++ } ++ client->trap_bdf = true; ++ client->pci_bus = bus; ++ client->pci_dev = dev; ++ client->pci_func = func; ++} ++ ++void acrn_ioreq_unintercept_bdf(int client_id) ++{ ++ struct ioreq_client *client; ++ ++ if (client_id < 0 || client_id >= MAX_CLIENT) { ++ pr_err("vhm-ioreq: no client for id %d\n", client_id); ++ return; ++ } ++ client = clients[client_id]; ++ if (!client) { ++ pr_err("vhm-ioreq: no client for id %d\n", client_id); ++ return; ++ } ++ client->trap_bdf = false; ++ client->pci_bus = -1; ++ client->pci_dev = -1; ++ client->pci_func = -1; ++} ++ ++static void acrn_ioreq_notify_client(struct ioreq_client *client) ++{ ++ /* if client thread is in waitqueue, wake up it */ ++ if (waitqueue_active(&client->wq)) ++ wake_up_interruptible(&client->wq); ++} ++ ++static bool req_in_range(struct ioreq_range *range, struct vhm_request *req) ++{ ++ bool ret = false; ++ ++ if (range->type == req->type) { ++ switch (req->type) { ++ case REQ_MMIO: ++ case REQ_WP: ++ { ++ if (req->reqs.mmio_request.address >= range->start && ++ (req->reqs.mmio_request.address + ++ req->reqs.mmio_request.size - 1) <= range->end) ++ ret = true; ++ break; ++ } ++ case REQ_PORTIO: { ++ if (req->reqs.pio_request.address >= range->start && ++ (req->reqs.pio_request.address + ++ req->reqs.pio_request.size - 1) <= range->end) ++ ret = true; ++ break; ++ } ++ case REQ_MSR: /*TODO: add bitmap for MSR range */ ++ case REQ_CPUID: ++ case REQ_EXIT: ++ { ++ ret = true; ++ break; ++ } ++ ++ default: ++ ret = false; ++ break; ++ } ++ } ++ ++ return ret; ++} ++ ++static bool is_cfg_addr(struct vhm_request *req) ++{ ++ return (req->type == REQ_PORTIO && ++ (req->reqs.pio_request.address >= 0xcf8 && ++ req->reqs.pio_request.address < 0xcf8+4)); ++} ++ ++static bool is_cfg_data(struct vhm_request *req) ++{ ++ return (req->type == REQ_PORTIO && ++ (req->reqs.pio_request.address >= 0xcfc && ++ req->reqs.pio_request.address < 0xcfc+4)); ++} ++ ++static int cached_bus; ++static int cached_dev; ++static int cached_func; ++static int cached_reg; ++static int cached_enable; ++#define PCI_REGMAX 255 /* highest supported config register addr.*/ ++#define PCI_FUNCMAX 7 /* highest supported function number */ ++#define PCI_SLOTMAX 31 /* highest supported slot number */ ++#define PCI_BUSMAX 255 /* highest supported bus number */ ++#define CONF1_ENABLE 0x80000000ul ++static int handle_cf8cfc(struct vhm_vm *vm, struct vhm_request *req, int vcpu) ++{ ++ int req_handled = 0; ++ ++ /*XXX: like DM, assume cfg address write is size 4 */ ++ if (is_cfg_addr(req)) { ++ if (req->reqs.pio_request.direction == REQUEST_WRITE) { ++ if (req->reqs.pio_request.size == 4) { ++ int value = req->reqs.pio_request.value; ++ ++ cached_bus = (value >> 16) & PCI_BUSMAX; ++ cached_dev = (value >> 11) & PCI_SLOTMAX; ++ cached_func = (value >> 8) & PCI_FUNCMAX; ++ cached_reg = value & PCI_REGMAX; ++ cached_enable = ++ (value & CONF1_ENABLE) == CONF1_ENABLE; ++ req_handled = 1; ++ } ++ } else { ++ if (req->reqs.pio_request.size == 4) { ++ req->reqs.pio_request.value = ++ (cached_bus << 16) | ++ (cached_dev << 11) | (cached_func << 8) ++ | cached_reg; ++ if (cached_enable) ++ req->reqs.pio_request.value |= ++ CONF1_ENABLE; ++ req_handled = 1; ++ } ++ } ++ } else if (is_cfg_data(req)) { ++ if (!cached_enable) { ++ if (req->reqs.pio_request.direction == REQUEST_READ) ++ req->reqs.pio_request.value = 0xffffffff; ++ req_handled = 1; ++ } else { ++ /* pci request is same as io request at top */ ++ int offset = req->reqs.pio_request.address - 0xcfc; ++ ++ req->type = REQ_PCICFG; ++ req->reqs.pci_request.bus = cached_bus; ++ req->reqs.pci_request.dev = cached_dev; ++ req->reqs.pci_request.func = cached_func; ++ req->reqs.pci_request.reg = cached_reg + offset; ++ } ++ } ++ ++ if (req_handled) { ++ req->processed = REQ_STATE_SUCCESS; ++ if (hcall_notify_req_finish(vm->vmid, 1 << vcpu) < 0) { ++ pr_err("vhm-ioreq: failed to " ++ "notify request finished !\n"); ++ return -EFAULT; ++ } ++ } ++ ++ return req_handled; ++} ++ ++static bool bdf_match(struct ioreq_client *client) ++{ ++ return (client->trap_bdf && ++ client->pci_bus == cached_bus && ++ client->pci_dev == cached_dev && ++ client->pci_func == cached_func); ++} ++ ++static struct ioreq_client *acrn_ioreq_find_client_by_request(struct vhm_vm *vm, ++ struct vhm_request *req) ++{ ++ struct list_head *pos, *range_pos; ++ struct ioreq_client *client; ++ struct ioreq_client *target_client = NULL, *fallback_client = NULL; ++ struct ioreq_range *range; ++ bool found = false; ++ ++ spin_lock(&vm->ioreq_client_lock); ++ list_for_each(pos, &vm->ioreq_client_list) { ++ client = container_of(pos, struct ioreq_client, list); ++ ++ if (client->fallback) { ++ fallback_client = client; ++ continue; ++ } ++ ++ if (req->type == REQ_PCICFG) { ++ if (bdf_match(client)) { /* bdf match client */ ++ target_client = client; ++ break; ++ } else /* other or fallback client */ ++ continue; ++ } ++ ++ spin_lock(&client->range_lock); ++ list_for_each(range_pos, &client->range_list) { ++ range = ++ container_of(range_pos, struct ioreq_range, list); ++ if (req_in_range(range, req)) { ++ found = true; ++ target_client = client; ++ break; ++ } ++ } ++ spin_unlock(&client->range_lock); ++ ++ if (found) ++ break; ++ } ++ spin_unlock(&vm->ioreq_client_lock); ++ ++ if (target_client) ++ return target_client; ++ ++ if (fallback_client) ++ return fallback_client; ++ ++ return NULL; ++} ++ ++int acrn_ioreq_distribute_request(struct vhm_vm *vm) ++{ ++ struct vhm_request *req; ++ struct list_head *pos; ++ struct ioreq_client *client; ++ int i; ++ ++ /* TODO: replace VHM_REQUEST_MAX with vcpu num get at runtime */ ++ for (i = 0; i < VHM_REQUEST_MAX; i++) { ++ req = vm->req_buf->req_queue + i; ++ if (req->valid && (req->processed == REQ_STATE_PENDING)) { ++ if (handle_cf8cfc(vm, req, i)) ++ continue; ++ client = acrn_ioreq_find_client_by_request(vm, req); ++ if (client == NULL) { ++ pr_err("vhm-ioreq: failed to " ++ "find ioreq client -> " ++ "BUG\n"); ++ BUG(); ++ } else { ++ req->processed = REQ_STATE_PROCESSING; ++ req->client = client->id; ++ atomic_inc(&client->req); ++ } ++ } ++ } ++ ++ spin_lock(&vm->ioreq_client_lock); ++ list_for_each(pos, &vm->ioreq_client_list) { ++ client = container_of(pos, struct ioreq_client, list); ++ if (has_pending_request(client)) ++ acrn_ioreq_notify_client(client); ++ } ++ spin_unlock(&vm->ioreq_client_lock); ++ ++ return 0; ++} ++ ++int acrn_ioreq_complete_request(int client_id, uint64_t vcpu_mask) ++{ ++ struct ioreq_client *client; ++ int ret; ++ ++ if (client_id < 0 || client_id >= MAX_CLIENT) { ++ pr_err("vhm-ioreq: no client for id %d\n", client_id); ++ return -EINVAL; ++ } ++ client = clients[client_id]; ++ if (!client) { ++ pr_err("vhm-ioreq: no client for id %d\n", client_id); ++ return -EINVAL; ++ } ++ ++ atomic_sub(bitmap_weight((unsigned long *)&vcpu_mask, ++ VHM_REQUEST_MAX), &client->req); ++ ret = hcall_notify_req_finish(client->vmid, vcpu_mask); ++ if (ret < 0) { ++ pr_err("vhm-ioreq: failed to notify request finished !\n"); ++ return -EFAULT; ++ } ++ ++ return 0; ++} ++ ++unsigned int vhm_dev_poll(struct file *filep, poll_table *wait) ++{ ++ struct vhm_vm *vm = filep->private_data; ++ struct ioreq_client *fallback_client; ++ unsigned int ret = 0; ++ ++ if (vm == NULL || vm->req_buf == NULL || ++ vm->ioreq_fallback_client <= 0) { ++ pr_err("vhm: invalid VM !\n"); ++ ret = POLLERR; ++ return ret; ++ } ++ ++ fallback_client = clients[vm->ioreq_fallback_client]; ++ if (!fallback_client) { ++ pr_err("vhm-ioreq: no client for id %d\n", ++ vm->ioreq_fallback_client); ++ return -EINVAL; ++ } ++ ++ poll_wait(filep, &fallback_client->wq, wait); ++ if (has_pending_request(fallback_client) || ++ is_destroying(fallback_client)) ++ ret = POLLIN | POLLRDNORM; ++ ++ return ret; ++} ++ ++int acrn_ioreq_init(struct vhm_vm *vm, unsigned long vma) ++{ ++ struct acrn_set_ioreq_buffer set_buffer; ++ struct page *page; ++ int ret; ++ ++ if (vm->req_buf) ++ BUG(); ++ ++ ret = get_user_pages_fast(vma, 1, 1, &page); ++ if (unlikely(ret != 1) || (page == NULL)) { ++ pr_err("vhm-ioreq: failed to pin request buffer!\n"); ++ return -ENOMEM; ++ } ++ ++ vm->req_buf = page_address(page); ++ vm->pg = page; ++ ++ set_buffer.req_buf = (long) page_to_phys(page); ++ ++ ret = hcall_set_ioreq_buffer(vm->vmid, virt_to_phys(&set_buffer)); ++ if (ret < 0) { ++ pr_err("vhm-ioreq: failed to set request buffer !\n"); ++ return -EFAULT; ++ } ++ ++ /* reserve 0, let client_id start from 1 */ ++ set_bit(0, client_bitmap); ++ ++ pr_info("vhm-ioreq: init request buffer @ %p!\n", ++ vm->req_buf); ++ ++ return 0; ++} ++ ++void acrn_ioreq_free(struct vhm_vm *vm) ++{ ++ struct list_head *pos, *tmp; ++ ++ list_for_each_safe(pos, tmp, &vm->ioreq_client_list) { ++ struct ioreq_client *client = ++ container_of(pos, struct ioreq_client, list); ++ acrn_ioreq_destroy_client_pervm(client, vm); ++ } ++ ++ if (vm->req_buf && vm->pg) { ++ put_page(vm->pg); ++ vm->pg = NULL; ++ vm->req_buf = NULL; ++ } ++} +diff --git a/drivers/vhm/vhm_vm_mngt.c b/drivers/vhm/vhm_vm_mngt.c +index 3c4e6d2b2f23..564435f2bb40 100644 +--- a/drivers/vhm/vhm_vm_mngt.c ++++ b/drivers/vhm/vhm_vm_mngt.c +@@ -58,6 +58,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -87,6 +88,7 @@ void put_vm(struct vhm_vm *vm) + if (vm->refcnt == 0) { + list_del(&vm->list); + free_guest_mem(vm); ++ acrn_ioreq_free(vm); + kfree(vm); + pr_info("vhm: freed vm\n"); + } +diff --git a/include/linux/vhm/acrn_common.h b/include/linux/vhm/acrn_common.h +index 965978aa5610..47c3542c9cc6 100644 +--- a/include/linux/vhm/acrn_common.h ++++ b/include/linux/vhm/acrn_common.h +@@ -62,10 +62,127 @@ + * Commmon structures for ACRN/VHM/DM + */ + ++/* ++ * IO request ++ */ ++#define VHM_REQUEST_MAX 16 ++ ++enum request_state { ++ REQ_STATE_SUCCESS = 1, ++ REQ_STATE_PENDING = 0, ++ REQ_STATE_PROCESSING = 2, ++ REQ_STATE_FAILED = -1, ++} __attribute__((aligned(4))); ++ ++enum request_type { ++ REQ_MSR, ++ REQ_CPUID, ++ REQ_PORTIO, ++ REQ_MMIO, ++ REQ_PCICFG, ++ REQ_WP, ++ REQ_EXIT, ++ REQ_MAX, ++} __attribute__((aligned(4))); ++ ++enum request_direction { ++ REQUEST_READ, ++ REQUEST_WRITE, ++ DIRECTION_MAX, ++} __attribute__((aligned(4))); ++ ++struct msr_request { ++ enum request_direction direction; ++ long index; ++ long value; ++} __attribute__((aligned(8))); ++ ++struct cpuid_request { ++ long eax_in; ++ long ecx_in; ++ long eax_out; ++ long ebx_out; ++ long ecx_out; ++ long edx_out; ++} __attribute__((aligned(8))); ++ ++struct mmio_request { ++ enum request_direction direction; ++ long address; ++ long size; ++ long value; ++} __attribute__((aligned(8))); ++ ++struct io_request { ++ enum request_direction direction; ++ long address; ++ long size; ++ int value; ++} __attribute__((aligned(8))); ++ ++struct pci_request { ++ enum request_direction direction; ++ long reserve; /*io_request address*/ ++ long size; ++ int value; ++ int bus; ++ int dev; ++ int func; ++ int reg; ++} __attribute__((aligned(8))); ++ ++/* vhm_request are 256Bytes aligned */ ++struct vhm_request { ++ /* offset: 0bytes - 63bytes */ ++ enum request_type type; ++ int reserved0[15]; ++ ++ /* offset: 64bytes-127bytes */ ++ union { ++ struct msr_request msr_request; ++ struct cpuid_request cpuid_request; ++ struct io_request pio_request; ++ struct pci_request pci_request; ++ struct mmio_request mmio_request; ++ long reserved1[8]; ++ } reqs; ++ ++ /* True: valid req which need VHM to process. ++ * ACRN write, VHM read only ++ **/ ++ int valid; ++ ++ /* the client which is distributed to handle this request */ ++ int client; ++ ++ /* 1: VHM had processed and success ++ * 0: VHM had not yet processed ++ * -1: VHM failed to process. Invalid request ++ * VHM write, ACRN read only ++ **/ ++ enum request_state processed; ++} __attribute__((aligned(256))); ++ ++struct vhm_request_buffer { ++ union { ++ struct vhm_request req_queue[VHM_REQUEST_MAX]; ++ char reserved[4096]; ++ }; ++} __attribute__((aligned(4096))); ++ + /* Common API params */ + struct acrn_create_vm { + unsigned long vmid; /* OUT: HV return vmid to VHM */ + unsigned long vcpu_num; /* IN: VM vcpu number */ + } __attribute__((aligned(8))); + ++struct acrn_set_ioreq_buffer { ++ long req_buf; /* IN: gpa of per VM request_buffer*/ ++} __attribute__((aligned(8))); ++ ++struct acrn_ioreq_notify { ++ int client_id; ++ unsigned long vcpu_mask; ++} __attribute__((aligned(8))); ++ + #endif /* ACRN_COMMON_H */ +diff --git a/include/linux/vhm/acrn_hv_defs.h b/include/linux/vhm/acrn_hv_defs.h +index ab6554d017cb..f57f2b62e972 100644 +--- a/include/linux/vhm/acrn_hv_defs.h ++++ b/include/linux/vhm/acrn_hv_defs.h +@@ -74,6 +74,11 @@ + #define HC_PAUSE_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x04) + #define HC_QUERY_VMSTATE _HC_ID(HC_ID, HC_ID_VM_BASE + 0x05) + ++/* DM ioreq management */ ++#define HC_ID_IOREQ_BASE 0x200UL ++#define HC_SET_IOREQ_BUFFER _HC_ID(HC_ID, HC_ID_IOREQ_BASE + 0x00) ++#define HC_NOTIFY_REQUEST_FINISH _HC_ID(HC_ID, HC_ID_IOREQ_BASE + 0x01) ++ + /* Guest memory management */ + #define HC_ID_MEM_BASE 0x300UL + #define HC_VM_SET_MEMMAP _HC_ID(HC_ID, HC_ID_MEM_BASE + 0x00) +diff --git a/include/linux/vhm/acrn_vhm_ioreq.h b/include/linux/vhm/acrn_vhm_ioreq.h +new file mode 100644 +index 000000000000..0daf46dcf9f7 +--- /dev/null ++++ b/include/linux/vhm/acrn_vhm_ioreq.h +@@ -0,0 +1,86 @@ ++/* ++ * virtio and hyperviosr service module (VHM): ioreq multi client feature ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright (c) 2017 Intel Corporation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * BSD LICENSE ++ * ++ * Copyright (C) 2017 Intel Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * ++ * Jason Chen CJ ++ * ++ */ ++ ++#ifndef __ACRN_VHM_IOREQ_H__ ++#define __ACRN_VHM_IOREQ_H__ ++ ++#include ++#include ++ ++typedef int (*ioreq_handler_t)(int client_id, int req); ++ ++int acrn_ioreq_create_client(unsigned long vmid, ioreq_handler_t handler, ++ char *name); ++void acrn_ioreq_destroy_client(int client_id); ++ ++int acrn_ioreq_add_iorange(int client_id, enum request_type type, ++ long start, long end); ++int acrn_ioreq_del_iorange(int client_id, enum request_type type, ++ long start, long end); ++ ++struct vhm_request *acrn_ioreq_get_reqbuf(int client_id); ++int acrn_ioreq_attach_client(int client_id, bool check_kthread_stop); ++ ++int acrn_ioreq_distribute_request(struct vhm_vm *vm); ++int acrn_ioreq_complete_request(int client_id, uint64_t vcpu_mask); ++ ++void acrn_ioreq_intercept_bdf(int client_id, int bus, int dev, int func); ++void acrn_ioreq_unintercept_bdf(int client_id); ++ ++/* IOReq APIs */ ++int acrn_ioreq_init(struct vhm_vm *vm, unsigned long vma); ++void acrn_ioreq_free(struct vhm_vm *vm); ++int acrn_ioreq_create_fallback_client(unsigned long vmid, char *name); ++unsigned int vhm_dev_poll(struct file *filep, poll_table *wait); ++ ++#endif +diff --git a/include/linux/vhm/vhm_hypercall.h b/include/linux/vhm/vhm_hypercall.h +index e098a1f959bf..86b5f579687a 100644 +--- a/include/linux/vhm/vhm_hypercall.h ++++ b/include/linux/vhm/vhm_hypercall.h +@@ -139,6 +139,9 @@ static inline long acrn_hypercall4(unsigned long hyp_id, unsigned long param1, + return result; + } + ++inline long hcall_set_ioreq_buffer(unsigned long vmid, unsigned long buffer); ++inline long hcall_notify_req_finish(unsigned long vmid, ++ unsigned long vcpu_mask); + inline long hcall_set_memmap(unsigned long vmid, unsigned long memmap); + inline long vhm_create_vm(struct vhm_vm *vm, unsigned long ioctl_param); + inline long vhm_resume_vm(struct vhm_vm *vm); +diff --git a/include/linux/vhm/vhm_ioctl_defs.h b/include/linux/vhm/vhm_ioctl_defs.h +index 872092490259..01adcfade99c 100644 +--- a/include/linux/vhm/vhm_ioctl_defs.h ++++ b/include/linux/vhm/vhm_ioctl_defs.h +@@ -64,6 +64,14 @@ + #define IC_PAUSE_VM _IC_ID(IC_ID, IC_ID_VM_BASE + 0x04) + #define IC_QUERY_VMSTATE _IC_ID(IC_ID, IC_ID_VM_BASE + 0x05) + ++/* DM ioreq management */ ++#define IC_ID_IOREQ_BASE 0x200UL ++#define IC_SET_IOREQ_BUFFER _IC_ID(IC_ID, IC_ID_IOREQ_BASE + 0x00) ++#define IC_NOTIFY_REQUEST_FINISH _IC_ID(IC_ID, IC_ID_IOREQ_BASE + 0x01) ++#define IC_CREATE_IOREQ_CLIENT _IC_ID(IC_ID, IC_ID_IOREQ_BASE + 0x02) ++#define IC_ATTACH_IOREQ_CLIENT _IC_ID(IC_ID, IC_ID_IOREQ_BASE + 0x03) ++#define IC_DESTROY_IOREQ_CLIENT _IC_ID(IC_ID, IC_ID_IOREQ_BASE + 0x04) ++ + /* Guest memory management */ + #define IC_ID_MEM_BASE 0x300UL + #define IC_ALLOC_MEMSEG _IC_ID(IC_ID, IC_ID_MEM_BASE + 0x00) +diff --git a/include/linux/vhm/vhm_vm_mngt.h b/include/linux/vhm/vhm_vm_mngt.h +index 4f1a0db2c54d..eb410024157f 100644 +--- a/include/linux/vhm/vhm_vm_mngt.h ++++ b/include/linux/vhm/vhm_vm_mngt.h +@@ -65,9 +65,14 @@ struct vhm_vm { + struct device *dev; + struct list_head list; + unsigned long vmid; ++ int ioreq_fallback_client; + long refcnt; + struct mutex seg_lock; + struct list_head memseg_list; ++ spinlock_t ioreq_client_lock; ++ struct list_head ioreq_client_list; ++ struct vhm_request_buffer *req_buf; ++ struct page *pg; + }; + + struct vhm_vm *find_get_vm(unsigned long vmid); +-- +2.17.1 + diff --git a/patches/0005-fbdev-drop-res_id-parameter-from-remove_conflicting_pc.drm b/patches/0005-fbdev-drop-res_id-parameter-from-remove_conflicting_pc.drm new file mode 100644 index 0000000000..fef9f30748 --- /dev/null +++ b/patches/0005-fbdev-drop-res_id-parameter-from-remove_conflicting_pc.drm @@ -0,0 +1,101 @@ +From a9b699f2e396dc4fc5ff0d1687e716c08def61ef Mon Sep 17 00:00:00 2001 +From: Gerd Hoffmann +Date: Thu, 22 Aug 2019 11:06:43 +0200 +Subject: [PATCH 005/690] fbdev: drop res_id parameter from + remove_conflicting_pci_framebuffers + +Since commit b0e999c95581 ("fbdev: list all pci memory bars as +conflicting apertures") the parameter was used for some sanity checks +only, to make sure we detect any issues with the new approach to just +list all memory bars as apertures. + +No issues turned up so far, so continue to cleanup: Drop the res_id +parameter, drop the sanity checks. Also downgrade the logging from +"info" level to "debug" level and update documentation. + +Signed-off-by: Gerd Hoffmann +Reviewed-by: Daniel Vetter +Link: http://patchwork.freedesktop.org/patch/msgid/20190822090645.25410-2-kraxel@redhat.com +--- + drivers/video/fbdev/core/fbmem.c | 17 +++++------------ + include/drm/drm_fb_helper.h | 2 +- + include/linux/fb.h | 2 +- + 3 files changed, 7 insertions(+), 14 deletions(-) + +diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c +index e6a1c805064f..95c32952fa8a 100644 +--- a/drivers/video/fbdev/core/fbmem.c ++++ b/drivers/video/fbdev/core/fbmem.c +@@ -1758,21 +1758,19 @@ EXPORT_SYMBOL(remove_conflicting_framebuffers); + /** + * remove_conflicting_pci_framebuffers - remove firmware-configured framebuffers for PCI devices + * @pdev: PCI device +- * @res_id: index of PCI BAR configuring framebuffer memory + * @name: requesting driver name + * + * This function removes framebuffer devices (eg. initialized by firmware) +- * using memory range configured for @pdev's BAR @res_id. ++ * using memory range configured for any of @pdev's memory bars. + * + * The function assumes that PCI device with shadowed ROM drives a primary + * display and so kicks out vga16fb. + */ +-int remove_conflicting_pci_framebuffers(struct pci_dev *pdev, int res_id, const char *name) ++int remove_conflicting_pci_framebuffers(struct pci_dev *pdev, const char *name) + { + struct apertures_struct *ap; + bool primary = false; + int err, idx, bar; +- bool res_id_found = false; + + for (idx = 0, bar = 0; bar < PCI_ROM_RESOURCE; bar++) { + if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) +@@ -1789,16 +1787,11 @@ int remove_conflicting_pci_framebuffers(struct pci_dev *pdev, int res_id, const + continue; + ap->ranges[idx].base = pci_resource_start(pdev, bar); + ap->ranges[idx].size = pci_resource_len(pdev, bar); +- pci_info(pdev, "%s: bar %d: 0x%lx -> 0x%lx\n", __func__, bar, +- (unsigned long)pci_resource_start(pdev, bar), +- (unsigned long)pci_resource_end(pdev, bar)); ++ pci_dbg(pdev, "%s: bar %d: 0x%lx -> 0x%lx\n", __func__, bar, ++ (unsigned long)pci_resource_start(pdev, bar), ++ (unsigned long)pci_resource_end(pdev, bar)); + idx++; +- if (res_id == bar) +- res_id_found = true; + } +- if (!res_id_found) +- pci_warn(pdev, "%s: passed res_id (%d) is not a memory bar\n", +- __func__, res_id); + + #ifdef CONFIG_X86 + primary = pdev->resource[PCI_ROM_RESOURCE].flags & +diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h +index c8a8ae2a678a..5a5f4b1d8241 100644 +--- a/include/drm/drm_fb_helper.h ++++ b/include/drm/drm_fb_helper.h +@@ -560,7 +560,7 @@ drm_fb_helper_remove_conflicting_pci_framebuffers(struct pci_dev *pdev, + * otherwise the vga fbdev driver falls over. + */ + #if IS_REACHABLE(CONFIG_FB) +- ret = remove_conflicting_pci_framebuffers(pdev, resource_id, name); ++ ret = remove_conflicting_pci_framebuffers(pdev, name); + #endif + if (ret == 0) + ret = vga_remove_vgacon(pdev); +diff --git a/include/linux/fb.h b/include/linux/fb.h +index 756706b666a1..41e0069eca0a 100644 +--- a/include/linux/fb.h ++++ b/include/linux/fb.h +@@ -607,7 +607,7 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf, + extern int register_framebuffer(struct fb_info *fb_info); + extern void unregister_framebuffer(struct fb_info *fb_info); + extern void unlink_framebuffer(struct fb_info *fb_info); +-extern int remove_conflicting_pci_framebuffers(struct pci_dev *pdev, int res_id, ++extern int remove_conflicting_pci_framebuffers(struct pci_dev *pdev, + const char *name); + extern int remove_conflicting_framebuffers(struct apertures_struct *a, + const char *name, bool primary); +-- +2.17.1 + diff --git a/patches/0005-net-stmmac-Fix-priority-steering-for-tx-rx-qu.connectivity b/patches/0005-net-stmmac-Fix-priority-steering-for-tx-rx-qu.connectivity new file mode 100644 index 0000000000..4b799ec457 --- /dev/null +++ b/patches/0005-net-stmmac-Fix-priority-steering-for-tx-rx-qu.connectivity @@ -0,0 +1,41 @@ +From 65798228e504ed8def72b29d4c8e35e88d103615 Mon Sep 17 00:00:00 2001 +From: Voon Weifeng +Date: Tue, 6 Aug 2019 17:29:27 +0800 +Subject: [PATCH 005/108] net: stmmac: Fix priority steering for tx/rx queue >3 + +Fix MACRO function define for TX and RX user priority queue steering for +register masking and shifting. + +Fixes: a8f5102af2a7 net: stmmac: TX and RX queue priority configuration + +Signed-off-by: Voon Weifeng +--- + drivers/net/ethernet/stmicro/stmmac/dwmac4.h | 10 ++++++---- + 1 file changed, 6 insertions(+), 4 deletions(-) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +index 89a3420eba42..bd3e75b47613 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +@@ -91,12 +91,14 @@ + #define GMAC_RX_FLOW_CTRL_RFE BIT(0) + + /* RX Queues Priorities */ +-#define GMAC_RXQCTRL_PSRQX_MASK(x) GENMASK(7 + ((x) * 8), 0 + ((x) * 8)) +-#define GMAC_RXQCTRL_PSRQX_SHIFT(x) ((x) * 8) ++#define GMAC_RXQCTRL_PSRQX_MASK(x) GENMASK(7 + (((x) % 4) * 8), \ ++ 0 + (((x) % 4) * 8)) ++#define GMAC_RXQCTRL_PSRQX_SHIFT(x) (((x) % 4) * 8) + + /* TX Queues Priorities */ +-#define GMAC_TXQCTRL_PSTQX_MASK(x) GENMASK(7 + ((x) * 8), 0 + ((x) * 8)) +-#define GMAC_TXQCTRL_PSTQX_SHIFT(x) ((x) * 8) ++#define GMAC_TXQCTRL_PSTQX_MASK(x) GENMASK(7 + (((x) % 4) * 8), \ ++ 0 + (((x) % 4) * 8)) ++#define GMAC_TXQCTRL_PSTQX_SHIFT(x) (((x) % 4) * 8) + + /* MAC Flow Control TX */ + #define GMAC_TX_FLOW_CTRL_TFE BIT(1) +-- +2.17.1 + diff --git a/patches/0005-platform-x86-sep-socwatchhv-driver-and-code-f.sep-socwatch b/patches/0005-platform-x86-sep-socwatchhv-driver-and-code-f.sep-socwatch new file mode 100644 index 0000000000..87f3ccaea0 --- /dev/null +++ b/patches/0005-platform-x86-sep-socwatchhv-driver-and-code-f.sep-socwatch @@ -0,0 +1,3819 @@ +From 04978401e039cf5815aa857d23b41e22b02f6c20 Mon Sep 17 00:00:00 2001 +From: Jon Moeller +Date: Sat, 15 Dec 2018 09:40:13 -0600 +Subject: [PATCH 05/27] platform/x86: sep, socwatchhv driver and + code-formatting fixes + +Fixed some critical bugs in the sep driver for ACRN and remove the +SEP_PRIVATE_BUILD Kconfig flag. + +Fixed bug in socwatchhv ACRN driver with dropped samples for longer +collections by making the file read non-blocking. + +Update swhv_structs.h vm_switch structs to match latest definition. + +Changed C++ comments to C-style and fixed several minor checkpatch items. + +Signed-off-by: Jon Moeller +--- + drivers/platform/x86/sepdk/Kconfig | 9 - + drivers/platform/x86/sepdk/inc/control.h | 4 + + drivers/platform/x86/sepdk/inc/pmi.h | 2 +- + drivers/platform/x86/sepdk/sep/Makefile | 17 +- + drivers/platform/x86/sepdk/sep/linuxos.c | 63 ------- + drivers/platform/x86/sepdk/sep/lwpmudrv.c | 162 ++++++++++++++---- + drivers/platform/x86/sepdk/sep/pmi.c | 58 +++---- + .../platform/x86/socwatch/inc/sw_collector.h | 4 +- + .../platform/x86/socwatch/inc/sw_defines.h | 4 +- + .../platform/x86/socwatch/inc/sw_file_ops.h | 2 +- + .../x86/socwatch/inc/sw_hardware_io.h | 8 +- + .../platform/x86/socwatch/inc/sw_internal.h | 32 ++-- + drivers/platform/x86/socwatch/inc/sw_ioctl.h | 54 +++--- + .../x86/socwatch/inc/sw_kernel_defines.h | 16 +- + drivers/platform/x86/socwatch/inc/sw_list.h | 2 +- + .../platform/x86/socwatch/inc/sw_lock_defs.h | 2 +- + drivers/platform/x86/socwatch/inc/sw_mem.h | 2 +- + .../x86/socwatch/inc/sw_ops_provider.h | 2 +- + .../x86/socwatch/inc/sw_output_buffer.h | 4 +- + .../socwatch/inc/sw_overhead_measurements.h | 22 +-- + .../platform/x86/socwatch/inc/sw_structs.h | 142 +++++++++------ + drivers/platform/x86/socwatch/inc/sw_telem.h | 4 +- + .../socwatch/inc/sw_trace_notifier_provider.h | 2 +- + .../x86/socwatch/inc/sw_tracepoint_handlers.h | 41 +++-- + drivers/platform/x86/socwatch/inc/sw_types.h | 22 +-- + .../platform/x86/socwatch/inc/sw_version.h | 2 +- + drivers/platform/x86/socwatch/sw_collector.c | 5 +- + drivers/platform/x86/socwatch/sw_driver.c | 54 +++--- + drivers/platform/x86/socwatch/sw_file_ops.c | 32 ++-- + drivers/platform/x86/socwatch/sw_internal.c | 10 +- + drivers/platform/x86/socwatch/sw_mem.c | 6 +- + .../platform/x86/socwatch/sw_ops_provider.c | 88 +++++----- + .../platform/x86/socwatch/sw_output_buffer.c | 14 +- + drivers/platform/x86/socwatch/sw_reader.c | 4 +- + drivers/platform/x86/socwatch/sw_telem.c | 15 +- + .../x86/socwatch/sw_trace_notifier_provider.c | 33 ++-- + .../x86/socwatch/sw_tracepoint_handlers.c | 11 +- + drivers/platform/x86/socwatchhv/control.c | 11 +- + .../platform/x86/socwatchhv/inc/asm_helper.h | 2 +- + .../platform/x86/socwatchhv/inc/pw_types.h | 10 +- + .../platform/x86/socwatchhv/inc/pw_version.h | 2 +- + .../platform/x86/socwatchhv/inc/sw_defines.h | 8 +- + .../platform/x86/socwatchhv/inc/sw_ioctl.h | 54 +++--- + .../x86/socwatchhv/inc/sw_kernel_defines.h | 16 +- + .../platform/x86/socwatchhv/inc/sw_structs.h | 138 +++++++++------ + .../platform/x86/socwatchhv/inc/sw_types.h | 22 +-- + .../platform/x86/socwatchhv/inc/sw_version.h | 2 +- + .../platform/x86/socwatchhv/inc/swhv_acrn.h | 8 +- + .../x86/socwatchhv/inc/swhv_acrn_sbuf.h | 8 +- + .../x86/socwatchhv/inc/swhv_defines.h | 36 ++-- + .../platform/x86/socwatchhv/inc/swhv_driver.h | 18 +- + .../platform/x86/socwatchhv/inc/swhv_ioctl.h | 12 +- + .../x86/socwatchhv/inc/swhv_structs.h | 64 ++++--- + drivers/platform/x86/socwatchhv/swhv_acrn.c | 107 +++++------- + drivers/platform/x86/socwatchhv/swhv_driver.c | 15 +- + 55 files changed, 800 insertions(+), 687 deletions(-) + +diff --git a/drivers/platform/x86/sepdk/Kconfig b/drivers/platform/x86/sepdk/Kconfig +index ed9c94ec3d07..884c5055d304 100755 +--- a/drivers/platform/x86/sepdk/Kconfig ++++ b/drivers/platform/x86/sepdk/Kconfig +@@ -52,12 +52,3 @@ config SEP_MAXLOG_MODE + This may increase overhead + + endchoice +- +-config SEP_PRIVATE_BUILD +- bool "Is this build an Internal and Private build" +- depends on INTEL_SEP +- default y +- help +- Select Yes if this is an Intel Internal Build +- +- +diff --git a/drivers/platform/x86/sepdk/inc/control.h b/drivers/platform/x86/sepdk/inc/control.h +index ecc93ceab221..73ecc2efeaa5 100644 +--- a/drivers/platform/x86/sepdk/inc/control.h ++++ b/drivers/platform/x86/sepdk/inc/control.h +@@ -300,7 +300,11 @@ extern U32 *occupied_core_ids; + * CPU number of the processor being executed on + * + */ ++#if !defined(DRV_SEP_ACRN_ON) + #define CONTROL_THIS_CPU() smp_processor_id() ++#else ++#define CONTROL_THIS_CPU() raw_smp_processor_id() ++#endif + + /* + * CONTROL_THIS_RAW_CPU() +diff --git a/drivers/platform/x86/sepdk/inc/pmi.h b/drivers/platform/x86/sepdk/inc/pmi.h +index 4fd71f74ceb9..88b02b1a04b4 100644 +--- a/drivers/platform/x86/sepdk/inc/pmi.h ++++ b/drivers/platform/x86/sepdk/inc/pmi.h +@@ -57,7 +57,7 @@ + asmlinkage VOID PMI_Interrupt_Handler(struct pt_regs *regs); + + #if defined(DRV_SEP_ACRN_ON) +-S32 PMI_Buffer_Handler(PVOID data); ++extern VOID PMI_Buffer_Handler(PVOID); + #endif + + extern U32 pmi_Get_CSD(U32, U32 *, U32 *); +diff --git a/drivers/platform/x86/sepdk/sep/Makefile b/drivers/platform/x86/sepdk/sep/Makefile +index 405e55d53c97..c616fc1f7ce8 100755 +--- a/drivers/platform/x86/sepdk/sep/Makefile ++++ b/drivers/platform/x86/sepdk/sep/Makefile +@@ -2,7 +2,7 @@ ccflags-y := -I$(src)/../include -I$(src)/../inc -I$(src)/.. + ccflags-y += -DSEP_CONFIG_MODULE_LAYOUT + # TODO: verify kaiser.h + #ccflags-y += -DKAISER_HEADER_PRESENT +-ccflags-y += -DDRV_CPU_HOTPLUG -DDRV_USE_TASKLET_WORKAROUND ++ccflags-y += -DDRV_CPU_HOTPLUG -DDRV_USE_TASKLET_WORKAROUND -DENABLE_CPUS -DBUILD_CHIPSET -DBUILD_GFX + + asflags-y := -I$(src)/.. + +@@ -18,10 +18,6 @@ ifdef CONFIG_SEP_MAXLOG_MODE + ccflags-y += -DDRV_MAXIMAL_LOGGING + endif + +-ifdef CONFIG_SEP_PRIVATE_BUILD +- ccflags-y += -DENABLE_CPUS -DBUILD_CHIPSET -DBUILD_GFX +-endif +- + ifdef CONFIG_SEP_ACRN + ccflags-y += -DDRV_SEP_ACRN_ON + endif +@@ -50,7 +46,11 @@ sep5-y := lwpmudrv.o \ + unc_msr.o \ + unc_common.o \ + unc_pci.o \ +- sepdrv_p_state.o ++ sepdrv_p_state.o \ ++ chap.o \ ++ gmch.o \ ++ gfx.o \ ++ unc_sa.o + + + ifdef CONFIG_X86_64 +@@ -60,8 +60,3 @@ endif + ifdef CONFIG_X86_32 + sep5-y += sys32.o + endif +- +-sep5-$(CONFIG_SEP_PRIVATE_BUILD) += chap.o \ +- gmch.o \ +- gfx.o \ +- unc_sa.o +diff --git a/drivers/platform/x86/sepdk/sep/linuxos.c b/drivers/platform/x86/sepdk/sep/linuxos.c +index fa29d7f0a3a8..1f877e6e4bc8 100755 +--- a/drivers/platform/x86/sepdk/sep/linuxos.c ++++ b/drivers/platform/x86/sepdk/sep/linuxos.c +@@ -81,8 +81,6 @@ static enum cpuhp_state cpuhp_sepdrv_state; + #endif + extern wait_queue_head_t wait_exit; + +-static PVOID local_tasklist_lock; +- + #define MY_TASK PROFILE_TASK_EXIT + #define MY_UNMAP PROFILE_MUNMAP + #ifdef CONFIG_X86_64 +@@ -869,25 +867,6 @@ OS_STATUS LINUXOS_Enum_Process_Modules(DRV_BOOL at_end) + return OS_SUCCESS; + } + +- if (!local_tasklist_lock) { +- local_tasklist_lock = +- (PVOID)(UIOP)UTILITY_Find_Symbol("tasklist_lock"); +- if (!local_tasklist_lock) { +- SEP_DRV_LOG_WARNING("Could not find tasklist_lock."); +- } +- } +- +- // In some machines the tasklist_lock symbol does not exist. +- // For temporary solution we skip the lock if there is no tasklist_lock +- if (local_tasklist_lock) { +-#if defined( \ +- DEFINE_QRWLOCK) // assuming that if DEFINE_QRWLOCK is defined, then tasklist_lock was defined using it +- qread_lock(local_tasklist_lock); +-#else +- read_lock(local_tasklist_lock); +-#endif +- } +- + FOR_EACH_TASK(p) + { + struct mm_struct *mm; +@@ -922,14 +901,6 @@ OS_STATUS LINUXOS_Enum_Process_Modules(DRV_BOOL at_end) + n++; + } + +- if (local_tasklist_lock) { +-#if defined(DEFINE_QRWLOCK) +- qread_unlock(local_tasklist_lock); +-#else +- read_unlock(local_tasklist_lock); +-#endif +- } +- + SEP_DRV_LOG_TRACE("Enum_Process_Modules done with %d tasks.", n); + + SEP_DRV_LOG_TRACE_OUT("OS_SUCCESS."); +@@ -1424,24 +1395,6 @@ DRV_BOOL LINUXOS_Check_KVM_Guest_Process(void) + + SEP_DRV_LOG_TRACE_IN(""); + +- if (!local_tasklist_lock) { +- local_tasklist_lock = +- (PVOID)(UIOP)UTILITY_Find_Symbol("tasklist_lock"); +- if (!local_tasklist_lock) { +- SEP_DRV_LOG_WARNING("Could not find tasklist_lock."); +- } +- } +- +- // In some machines the tasklist_lock symbol does not exist. +- // For temporary solution we skip the lock if there is no tasklist_lock +- if (local_tasklist_lock) { +-#if defined(DEFINE_QRWLOCK) +- qread_lock(local_tasklist_lock); +-#else +- read_lock(local_tasklist_lock); +-#endif +- } +- + FOR_EACH_TASK(p) + { + // if (p == NULL) { +@@ -1452,27 +1405,11 @@ DRV_BOOL LINUXOS_Check_KVM_Guest_Process(void) + 0; // making sure there is a trailing 0 + + if (!strncmp(p->comm, "qemu-kvm", 8)) { +- if (local_tasklist_lock) { +-#if defined(DEFINE_QRWLOCK) +- qread_unlock(local_tasklist_lock); +-#else +- read_unlock(local_tasklist_lock); +-#endif +- } +- + SEP_DRV_LOG_INIT_TRACE_OUT("TRUE (found qemu-kvm!)."); + return TRUE; + } + } + +- if (local_tasklist_lock) { +-#if defined(DEFINE_QRWLOCK) +- qread_unlock(local_tasklist_lock); +-#else +- read_unlock(local_tasklist_lock); +-#endif +- } +- + SEP_DRV_LOG_TRACE_OUT("FALSE"); + return FALSE; + } +diff --git a/drivers/platform/x86/sepdk/sep/lwpmudrv.c b/drivers/platform/x86/sepdk/sep/lwpmudrv.c +index f13552c20774..bb53962d2695 100755 +--- a/drivers/platform/x86/sepdk/sep/lwpmudrv.c ++++ b/drivers/platform/x86/sepdk/sep/lwpmudrv.c +@@ -100,9 +100,6 @@ MODULE_VERSION(SEP_NAME "_" SEP_VERSION_STR); + MODULE_LICENSE("Dual BSD/GPL"); + + static struct task_struct *abnormal_handler; +-#if defined(DRV_SEP_ACRN_ON) +-static struct task_struct *acrn_buffer_handler[MAX_NR_PCPUS] = { NULL }; +-#endif + + typedef struct LWPMU_DEV_NODE_S LWPMU_DEV_NODE; + typedef LWPMU_DEV_NODE * LWPMU_DEV; +@@ -159,8 +156,10 @@ U32 osid = OS_ID_NATIVE; + DRV_BOOL sched_switch_enabled = FALSE; + + #if defined(DRV_SEP_ACRN_ON) +-struct profiling_vm_info_list *vm_info_list; +-shared_buf_t **samp_buf_per_cpu; ++struct profiling_vm_info_list *vm_info_list; ++static struct timer_list *buffer_read_timer; ++static unsigned long buffer_timer_interval; ++shared_buf_t **samp_buf_per_cpu; + #endif + + #define UNCORE_EM_GROUP_SWAP_FACTOR 100 +@@ -1875,14 +1874,17 @@ static VOID lwpmudrv_Write_Uncore(PVOID param) + */ + static VOID lwpmudrv_Write_Op(PVOID param) + { +- U32 this_cpu = CONTROL_THIS_CPU(); +- +- U32 dev_idx = core_to_dev_map[this_cpu]; +- DISPATCH dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); ++ U32 this_cpu; ++ U32 dev_idx; ++ DISPATCH dispatch; + U32 switch_grp = 0; + + SEP_DRV_LOG_TRACE_IN(""); + ++ this_cpu = CONTROL_THIS_CPU(); ++ dev_idx = core_to_dev_map[this_cpu]; ++ dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); ++ + if (dispatch != NULL && dispatch->write != NULL) { + dispatch->write((VOID *)(size_t)0); + } +@@ -2217,6 +2219,49 @@ static VOID lwpmudrv_Trigger_Read( + SEP_DRV_LOG_TRACE_OUT("Success."); + } + ++ ++#if defined(DRV_SEP_ACRN_ON) ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static VOID lwpmudrv_ACRN_Buffer_Read(void) ++ * ++ * @param - none ++ * ++ * @return - OS_STATUS ++ * ++ * @brief Read the ACRN Buffer Data. ++ * ++ * Special Notes ++ */ ++static VOID lwpmudrv_ACRN_Buffer_Read( ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) ++ struct timer_list *tl ++#else ++ unsigned long arg ++#endif ++) ++{ ++ SEP_DRV_LOG_TRACE_IN(""); ++ ++ if (GET_DRIVER_STATE() != DRV_STATE_RUNNING) { ++ SEP_DRV_LOG_TRACE_OUT("Success: driver state is not RUNNING"); ++ return; ++ } ++ ++ CONTROL_Invoke_Parallel(PMI_Buffer_Handler, NULL); ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) ++ mod_timer(buffer_read_timer, jiffies + buffer_timer_interval); ++#else ++ buffer_read_timer->expires = jiffies + buffer_timer_interval; ++ add_timer(buffer_read_timer); ++#endif ++ ++ SEP_DRV_LOG_TRACE_OUT("Success."); ++} ++#endif ++ ++ + /* ------------------------------------------------------------------------- */ + /*! + * @fn static void lwmudrv_Read_Specific_TSC (PVOID param) +@@ -2310,6 +2355,73 @@ static VOID lwpmudrv_Uncore_Start_Timer(void) + SEP_DRV_LOG_FLOW_OUT(""); + } + ++ ++#if defined(DRV_SEP_ACRN_ON) ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID lwpmudrv_ACRN_Flush_Stop_Timer (void) ++ * ++ * @brief Stop the ACRN buffer read timer ++ * ++ * @param none ++ * ++ * @return none ++ * ++ * Special Notes: ++ */ ++static VOID lwpmudrv_ACRN_Flush_Stop_Timer(void) ++{ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ if (buffer_read_timer == NULL) { ++ return; ++ } ++ ++ del_timer_sync(buffer_read_timer); ++ buffer_read_timer = CONTROL_Free_Memory(buffer_read_timer); ++ ++ SEP_DRV_LOG_FLOW_OUT(""); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn OS_STATUS lwpmudrv_ACRN_Flush_Start_Timer (void) ++ * ++ * @brief Start the ACRN buffer read timer ++ * ++ * @param none ++ * ++ * @return OS_STATUS ++ * ++ * Special Notes: ++ */ ++static VOID lwpmudrv_ACRN_Flush_Start_Timer(void) ++{ ++ SEP_DRV_LOG_FLOW_IN(""); ++ ++ buffer_timer_interval = msecs_to_jiffies(10); ++ buffer_read_timer = CONTROL_Allocate_Memory(sizeof(struct timer_list)); ++ if (buffer_read_timer == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "Memory allocation failure for buffer_read_timer!"); ++ return; ++ } ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) ++ timer_setup(buffer_read_timer, lwpmudrv_ACRN_Buffer_Read, 0); ++ mod_timer(buffer_read_timer, jiffies + buffer_timer_interval); ++#else ++ init_timer(buffer_read_timer); ++ buffer_read_timer->function = lwpmudrv_ACRN_Buffer_Read; ++ buffer_read_timer->expires = jiffies + buffer_timer_interval; ++ add_timer(buffer_read_timer); ++#endif ++ ++ SEP_DRV_LOG_FLOW_OUT(""); ++} ++#endif ++ ++ + /* ------------------------------------------------------------------------- */ + /*! + * @fn static OS_STATUS lwpmudrv_Init_Op(void) +@@ -3918,7 +4030,6 @@ static OS_STATUS lwpmudrv_Start(void) + #endif + #if defined(DRV_SEP_ACRN_ON) + struct profiling_control *control = NULL; +- S32 i; + #endif + + SEP_DRV_LOG_FLOW_IN(""); +@@ -3988,12 +4099,11 @@ static OS_STATUS lwpmudrv_Start(void) + acrn_hypercall2(HC_PROFILING_OPS, PROFILING_GET_CONTROL_SWITCH, + virt_to_phys(control)); + +- SEP_PRINT_DEBUG("ACRN profiling collection running 0x%llx\n", ++ SEP_DRV_LOG_TRACE("ACRN profiling collection running 0x%llx\n", + control->switches); + + if (DRV_CONFIG_counting_mode(drv_cfg) == FALSE) { +- control->switches |= (1 << CORE_PMU_SAMPLING) | +- (1 << VM_SWITCH_TRACING); ++ control->switches |= (1 << CORE_PMU_SAMPLING); + if (DEV_CONFIG_collect_lbrs(cur_pcfg)) { + control->switches |= (1 << LBR_PMU_SAMPLING); + } +@@ -4005,23 +4115,7 @@ static OS_STATUS lwpmudrv_Start(void) + virt_to_phys(control)); + control = CONTROL_Free_Memory(control); + +- if (DRV_CONFIG_counting_mode(drv_cfg) == FALSE) { +- char kthread_name[MAXNAMELEN]; +- for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); +- i++) { +- snprintf(kthread_name, MAXNAMELEN, "%s_%d", +- "SEPDRV_BUFFER_HANDLER", i); +- acrn_buffer_handler[i] = +- kthread_create(PMI_Buffer_Handler, +- (VOID *)(size_t)i, +- kthread_name); +- if (acrn_buffer_handler[i]) { +- wake_up_process(acrn_buffer_handler[i]); +- } +- } +- SEP_PRINT_DEBUG( +- "lwpmudrv_Prepare_Stop: flushed all the remaining buffer\n"); +- } ++ lwpmudrv_ACRN_Flush_Start_Timer(); + #endif + + #if defined(BUILD_CHIPSET) +@@ -4151,12 +4245,12 @@ static OS_STATUS lwpmudrv_Prepare_Stop(void) + acrn_hypercall2(HC_PROFILING_OPS, PROFILING_GET_CONTROL_SWITCH, + virt_to_phys(control)); + +- SEP_PRINT_DEBUG("ACRN profiling collection running 0x%llx\n", ++ SEP_DRV_LOG_TRACE("ACRN profiling collection running 0x%llx\n", + control->switches); + + if (DRV_CONFIG_counting_mode(drv_cfg) == FALSE) { + control->switches &= +- ~((1 << CORE_PMU_SAMPLING) | (1 << VM_SWITCH_TRACING)); ++ ~(1 << CORE_PMU_SAMPLING); + } else { + control->switches &= ~(1 << CORE_PMU_COUNTING); + } +@@ -4164,6 +4258,10 @@ static OS_STATUS lwpmudrv_Prepare_Stop(void) + acrn_hypercall2(HC_PROFILING_OPS, PROFILING_SET_CONTROL_SWITCH, + virt_to_phys(control)); + control = CONTROL_Free_Memory(control); ++ ++ lwpmudrv_ACRN_Flush_Stop_Timer(); ++ SEP_DRV_LOG_TRACE("Calling final PMI_Buffer_Handler\n"); ++ CONTROL_Invoke_Parallel(PMI_Buffer_Handler, NULL); + #endif + + SEP_DRV_LOG_TRACE("Outside of all interrupts."); +diff --git a/drivers/platform/x86/sepdk/sep/pmi.c b/drivers/platform/x86/sepdk/sep/pmi.c +index 44f335dbc885..934473f57efa 100755 +--- a/drivers/platform/x86/sepdk/sep/pmi.c ++++ b/drivers/platform/x86/sepdk/sep/pmi.c +@@ -422,7 +422,7 @@ asmlinkage VOID PMI_Interrupt_Handler(struct pt_regs *regs) + * + * Special Notes + */ +-S32 PMI_Buffer_Handler(PVOID data) ++VOID PMI_Buffer_Handler(PVOID data) + { + SampleRecordPC *psamp; + CPU_STATE pcpu; +@@ -442,10 +442,10 @@ S32 PMI_Buffer_Handler(PVOID data) + U64 overflow_status = 0; + + if (!pcb || !cpu_buf || !devices) { +- return 0; ++ return; + } +- cpu_id = (S32)(size_t)data; + ++ cpu_id = (S32)CONTROL_THIS_CPU(); + pcpu = &pcb[cpu_id]; + bd = &cpu_buf[cpu_id]; + dev_idx = core_to_dev_map[cpu_id]; +@@ -456,8 +456,6 @@ S32 PMI_Buffer_Handler(PVOID data) + + while (1) { + if ((GLOBAL_STATE_current_phase(driver_state) == +- DRV_STATE_PREPARE_STOP) || +- (GLOBAL_STATE_current_phase(driver_state) == + DRV_STATE_TERMINATING) || + (GLOBAL_STATE_current_phase(driver_state) == + DRV_STATE_STOPPED)) { +@@ -467,8 +465,8 @@ S32 PMI_Buffer_Handler(PVOID data) + data_size = + sbuf_get(samp_buf_per_cpu[cpu_id], (uint8_t *)&header); + if (data_size <= 0) { +- continue; +- } ++ goto handler_cleanup; ++ } + payload_size = 0; + if ((header.data_type == (1 << CORE_PMU_SAMPLING)) || + (header.data_type == (1 << LBR_PMU_SAMPLING))) { +@@ -482,51 +480,35 @@ S32 PMI_Buffer_Handler(PVOID data) + expected_payload_size = 0; + } + for (j = 0; j < (expected_payload_size - 1) / +- TRACE_ELEMENT_SIZE + +- 1; +- j++) { +- while (1) { ++ TRACE_ELEMENT_SIZE + 1; j++) { + data_size = sbuf_get( + samp_buf_per_cpu[cpu_id], + (uint8_t *)&psample + + j * TRACE_ELEMENT_SIZE); + if (data_size <= 0) { +- if ((GLOBAL_STATE_current_phase( +- driver_state) == +- DRV_STATE_PREPARE_STOP) || +- (GLOBAL_STATE_current_phase( +- driver_state) == +- DRV_STATE_TERMINATING) || +- (GLOBAL_STATE_current_phase( +- driver_state) == +- DRV_STATE_STOPPED)) { +- goto handler_cleanup; +- } +- } else { + break; + } +- } +- + payload_size += data_size; + } ++ SEP_DRV_LOG_TRACE("payload_size = %x\n", payload_size); + if (header.payload_size > payload_size) { + // Mismatch in payload size in header info + SEP_PRINT_ERROR( + "Mismatch in data size: header=%llu, payload_size=%d\n", + header.payload_size, payload_size); +- break; ++ goto handler_cleanup; + } + if (header.cpu_id != cpu_id) { + // Mismatch in cpu index in header info + SEP_PRINT_ERROR( + "Mismatch in cpu idx: header=%u, buffer=%d\n", + header.cpu_id, cpu_id); +- break; ++ goto handler_cleanup; + } + + // Now, handle the sample data in buffer + overflow_status = psample.csample.overflow_status; +- SEP_PRINT_DEBUG("overflow_status cpu%d, value=0x%llx\n", ++ SEP_DRV_LOG_TRACE("overflow_status cpu%d, value=0x%llx\n", + cpu_id, overflow_status); + + FOR_EACH_DATA_REG_CPU(pecb, i, cpu_id) +@@ -546,7 +528,7 @@ S32 PMI_Buffer_Handler(PVOID data) + ECB_entries_event_id_index(pecb, + i)); + evt_desc = desc_data[desc_id]; +- SEP_PRINT_DEBUG( ++ SEP_DRV_LOG_TRACE( + "In Interrupt handler: event_id_index=%u, desc_id=%u\n", + ECB_entries_event_id_index(pecb, + i), +@@ -561,7 +543,7 @@ S32 PMI_Buffer_Handler(PVOID data) + !SEP_IN_NOTIFICATION, + cpu_id); + if (!psamp) { +- SEP_PRINT_DEBUG( ++ SEP_DRV_LOG_TRACE( + "In Interrupt handler: psamp is NULL. No output buffer allocated\n"); + continue; + } +@@ -611,30 +593,34 @@ S32 PMI_Buffer_Handler(PVOID data) + &psample.lsample); + } + +- SEP_PRINT_DEBUG( ++ SEP_DRV_LOG_TRACE( + "SAMPLE_RECORD_cpu_num(psamp) %x\n", + SAMPLE_RECORD_cpu_num(psamp)); +- SEP_PRINT_DEBUG( ++ SEP_DRV_LOG_TRACE( + "SAMPLE_RECORD_iip(psamp) %x\n", + SAMPLE_RECORD_iip(psamp)); +- SEP_PRINT_DEBUG( ++ SEP_DRV_LOG_TRACE( + "SAMPLE_RECORD_cs(psamp) %x\n", + SAMPLE_RECORD_cs(psamp)); +- SEP_PRINT_DEBUG( ++ SEP_DRV_LOG_TRACE( + "SAMPLE_RECORD_csd(psamp).lowWord %x\n", + SAMPLE_RECORD_csd(psamp) + .u1.lowWord); +- SEP_PRINT_DEBUG( ++ SEP_DRV_LOG_TRACE( + "SAMPLE_RECORD_csd(psamp).highWord %x\n", + SAMPLE_RECORD_csd(psamp) + .u2.highWord); + } + } + END_FOR_EACH_DATA_REG_CPU; ++ } else if (header.data_type == (1 << VM_SWITCH_TRACING)) { ++ SEP_DRV_LOG_TRACE("Ignoring VM switch trace data\n"); ++ } else { ++ SEP_DRV_LOG_TRACE("Unknown data_type %x\n", header.data_type); + } + } + + handler_cleanup: +- return 0; ++ return; + } + #endif +diff --git a/drivers/platform/x86/socwatch/inc/sw_collector.h b/drivers/platform/x86/socwatch/inc/sw_collector.h +index b771ab936b26..41430cbeddef 100644 +--- a/drivers/platform/x86/socwatch/inc/sw_collector.h ++++ b/drivers/platform/x86/socwatch/inc/sw_collector.h +@@ -62,7 +62,7 @@ + */ + struct sw_hw_ops; + +-// TODO: convert from 'list_head' to 'hlist_head' ++/* TODO: convert from 'list_head' to 'hlist_head' */ + /** + * struct - sw_collector_data + * Information about the collector to be invoked at collection time. +@@ -133,4 +133,4 @@ void sw_set_collector_ops(const struct sw_hw_ops *hw_ops); + */ + extern int sw_process_snapshot(enum sw_when_type when); + extern int sw_process_snapshot_on_cpu(enum sw_when_type when, int cpu); +-#endif // __SW_COLLECTOR_H__ ++#endif /* __SW_COLLECTOR_H__ */ +diff --git a/drivers/platform/x86/socwatch/inc/sw_defines.h b/drivers/platform/x86/socwatch/inc/sw_defines.h +index 15ccca1efed6..ab0f4911332d 100644 +--- a/drivers/platform/x86/socwatch/inc/sw_defines.h ++++ b/drivers/platform/x86/socwatch/inc/sw_defines.h +@@ -146,11 +146,11 @@ typedef enum { + SW_COUNTER_HOTKEY_EVENT, + SW_MAX_COLLECTION_EVENT + } collector_stop_event_t; +-#endif // SWW_MERGE ++#endif /* SWW_MERGE */ + + #define MAX_UNSIGNED_16_BIT_VALUE 0xFFFF + #define MAX_UNSIGNED_24_BIT_VALUE 0xFFFFFF + #define MAX_UNSIGNED_32_BIT_VALUE 0xFFFFFFFF + #define MAX_UNSIGNED_64_BIT_VALUE 0xFFFFFFFFFFFFFFFF + +-#endif // _PW_DEFINES_H_ ++#endif /* _PW_DEFINES_H_ */ +diff --git a/drivers/platform/x86/socwatch/inc/sw_file_ops.h b/drivers/platform/x86/socwatch/inc/sw_file_ops.h +index 7c5705cf942c..c3a30a17a7b0 100644 +--- a/drivers/platform/x86/socwatch/inc/sw_file_ops.h ++++ b/drivers/platform/x86/socwatch/inc/sw_file_ops.h +@@ -67,4 +67,4 @@ struct sw_file_ops { + int sw_register_dev(struct sw_file_ops *ops); + void sw_unregister_dev(void); + +-#endif // __SW_FILE_OPS_H__ ++#endif /* __SW_FILE_OPS_H__ */ +diff --git a/drivers/platform/x86/socwatch/inc/sw_hardware_io.h b/drivers/platform/x86/socwatch/inc/sw_hardware_io.h +index f93fa6b10d7a..5cc9ebe18cf1 100644 +--- a/drivers/platform/x86/socwatch/inc/sw_hardware_io.h ++++ b/drivers/platform/x86/socwatch/inc/sw_hardware_io.h +@@ -74,13 +74,15 @@ typedef bool (*sw_hw_op_post_config_func_t)(void); + /** + * struct sw_hw_ops - Operations for each of the HW collection mechanisms + * in swkernelcollector. +- * @name: A descriptive name used to identify this particular operation. ++ * @name: A descriptive name used to identify this particular ++ * operation. + * @init: Initialize a metric's collection. + * @read: Read a metric's data. + * @write: Write to the HW for the metric(?). + * @print: Print out the data. + * @reset: Opposite of init--called after we're done collecting. +- * @available: Decide whether this H/W op is available on the current platform. ++ * @available: Decide whether this H/W op is available on the current ++ * platform. + * @post_config: Perform any post-configuration steps. + */ + struct sw_hw_ops { +@@ -115,4 +117,4 @@ int sw_register_hw_ops(void); + */ + void sw_free_hw_ops(void); + +-#endif // __SW_HARDWARE_IO_H__ ++#endif /* __SW_HARDWARE_IO_H__ */ +diff --git a/drivers/platform/x86/socwatch/inc/sw_internal.h b/drivers/platform/x86/socwatch/inc/sw_internal.h +index 8e88d5d5ea54..c8d9da330756 100644 +--- a/drivers/platform/x86/socwatch/inc/sw_internal.h ++++ b/drivers/platform/x86/socwatch/inc/sw_internal.h +@@ -66,28 +66,28 @@ + #include + #include + #include +-#include // inode +-#include // class_create +-#include // cdev_alloc +-#include // vmalloc +-#include // TASK_INTERRUPTIBLE +-#include // wait_event_interruptible +-#include // pci_get_bus_and_slot +-#include // LINUX_VERSION_CODE +-#include // For SFI F/W version ++#include /* inode */ ++#include /* class_create */ ++#include /* cdev_alloc */ ++#include /* vmalloc */ ++#include /* TASK_INTERRUPTIBLE */ ++#include /* wait_event_interruptible */ ++#include /* pci_get_bus_and_slot */ ++#include /* LINUX_VERSION_CODE */ ++#include /* For SFI F/W version */ + #include + #include +-#include // local_t +-#include // "in_atomic" ++#include /* local_t */ ++#include /* "in_atomic" */ + #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) +-#include // copy_to_user ++#include /* copy_to_user */ + #else +-#include // copy_to_user +-#endif // LINUX_VERSION_CODE ++#include /* copy_to_user */ ++#endif /* LINUX_VERSION_CODE */ + + #ifdef CONFIG_X86_WANT_INTEL_MID + #include +-#endif // CONFIG_X86_WANT_INTEL_MID ++#endif /* CONFIG_X86_WANT_INTEL_MID */ + /* + * End taken from sw_driver + */ +@@ -135,4 +135,4 @@ int sw_set_module_scope_for_cpus(void); + */ + int sw_reset_module_scope_for_cpus(void); + +-#endif // __SW_DATA_STRUCTS_H__ ++#endif /* __SW_DATA_STRUCTS_H__ */ +diff --git a/drivers/platform/x86/socwatch/inc/sw_ioctl.h b/drivers/platform/x86/socwatch/inc/sw_ioctl.h +index baf93058c5c5..1f8e903a0e1c 100644 +--- a/drivers/platform/x86/socwatch/inc/sw_ioctl.h ++++ b/drivers/platform/x86/socwatch/inc/sw_ioctl.h +@@ -62,11 +62,11 @@ + #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) + #include + #include +-#endif // COMPAT && x64 +-#else // !__KERNEL__ ++#endif /* COMPAT && x64 */ ++#else /* !__KERNEL__ */ + #include +-#endif // __KERNEL__ +-#endif // __linux__ ++#endif /* __KERNEL__ */ ++#endif /* __linux__ */ + /* + * Ensure we pull in definition of 'DO_COUNT_DROPPED_SAMPLES'! + */ +@@ -75,8 +75,8 @@ + #ifdef ONECORE + #ifndef __KERNEL__ + #include +-#endif //__KERNEL__ +-#endif // ONECORE ++#endif /* __KERNEL__ */ ++#endif /* ONECORE */ + + /* + * The APWR-specific IOCTL magic +@@ -84,13 +84,13 @@ + * are delivered to the correct + * driver. + */ +-// #define APWR_IOCTL_MAGIC_NUM 0xdead ++/* #define APWR_IOCTL_MAGIC_NUM 0xdead */ + #define APWR_IOCTL_MAGIC_NUM 100 + + /* + * The name of the device file + */ +-// #define DEVICE_FILE_NAME "/dev/pw_driver_char_dev" ++/* #define DEVICE_FILE_NAME "/dev/pw_driver_char_dev" */ + #define PW_DEVICE_FILE_NAME "/dev/apwr_driver_char_dev" + #define PW_DEVICE_NAME "apwr_driver_char_dev" + +@@ -119,13 +119,13 @@ enum sw_ioctl_cmd { + * Where "Read" and "Write" are from the user's perspective + * (similar to the file "read" and "write" calls). + */ +-#ifdef SWW_MERGE // Windows +-// +-// Device type -- in the "User Defined" range." +-// ++#ifdef SWW_MERGE /* Windows */ ++/* ++ * Device type -- in the "User Defined" range." ++ */ + #define POWER_I_CONF_TYPE 40000 + +-// List assigned tracepoint id ++/* List assigned tracepoint id */ + #define CSIR_TRACEPOINT_ID_MASK 1 + #define DEVICE_STATE_TRACEPOINT_ID_MASK 2 + #define CSIR_SEPARATE_TRACEPOINT_ID_MASK 3 +@@ -133,18 +133,18 @@ enum sw_ioctl_cmd { + #define DISPLAY_ON_TRACEPOINT_ID_MASK 5 + + #ifdef SWW_MERGE +-// +-// TELEM BAR CONFIG +-// ++/* ++ * TELEM BAR CONFIG ++ */ + #define MAX_TELEM_BAR_CFG 3 + #define TELEM_MCHBAR_CFG 0 + #define TELEM_IPC1BAR_CFG 1 + #define TELEM_SSRAMBAR_CFG 2 + #endif + +-// +-// The IOCTL function codes from 0x800 to 0xFFF are for customer use. +-// ++/* ++ * The IOCTL function codes from 0x800 to 0xFFF are for customer use. ++ */ + #define PW_IOCTL_CONFIG \ + CTL_CODE(POWER_I_CONF_TYPE, 0x900, METHOD_BUFFERED, FILE_ANY_ACCESS) + #define PW_IOCTL_START_COLLECTION \ +@@ -152,7 +152,7 @@ enum sw_ioctl_cmd { + #define PW_IOCTL_STOP_COLLECTION \ + CTL_CODE(POWER_I_CONF_TYPE, 0x902, METHOD_BUFFERED, FILE_ANY_ACCESS) + +-// TODO: pause, resume, cancel not supported yet ++/* TODO: pause, resume, cancel not supported yet */ + #define PW_IOCTL_PAUSE_COLLECTION \ + CTL_CODE(POWER_I_CONF_TYPE, 0x903, METHOD_BUFFERED, FILE_ANY_ACCESS) + #define PW_IOCTL_RESUME_COLLECTION \ +@@ -201,7 +201,7 @@ enum sw_ioctl_cmd { + #define PW_IOCTL_CMD \ + _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, \ + struct sw_driver_ioctl_arg *) +-#endif // DO_COUNT_DROPPED_SAMPLES ++#endif /* DO_COUNT_DROPPED_SAMPLES */ + #define PW_IOCTL_POLL _IO(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_poll) + #define PW_IOCTL_IMMEDIATE_IO \ + _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_immediate_io, \ +@@ -227,7 +227,7 @@ enum sw_ioctl_cmd { + #define PW_IOCTL_GET_TOPOLOGY_CHANGES \ + _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_topology_changes, \ + struct sw_driver_ioctl_arg *) +-#else // __APPLE__ ++#else /* __APPLE__ */ + #define PW_IOCTL_CONFIG \ + _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_config, \ + struct sw_driver_ioctl_arg) +@@ -238,7 +238,7 @@ enum sw_ioctl_cmd { + #else + #define PW_IOCTL_CMD \ + _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, struct sw_driver_ioctl_arg) +-#endif // DO_COUNT_DROPPED_SAMPLES ++#endif /* DO_COUNT_DROPPED_SAMPLES */ + #define PW_IOCTL_POLL _IO(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_poll) + #define PW_IOCTL_IMMEDIATE_IO \ + _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_immediate_io, \ +@@ -264,7 +264,7 @@ enum sw_ioctl_cmd { + #define PW_IOCTL_GET_TOPOLOGY_CHANGES \ + _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_topology_changes, \ + struct sw_driver_ioctl_arg) +-#endif // __APPLE__ ++#endif /* __APPLE__ */ + + /* + * 32b-compatible version of the above +@@ -281,7 +281,7 @@ enum sw_ioctl_cmd { + #else + #define PW_IOCTL_CMD32 \ + _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, compat_uptr_t) +-#endif // DO_COUNT_DROPPED_SAMPLES ++#endif /* DO_COUNT_DROPPED_SAMPLES */ + #define PW_IOCTL_POLL32 _IO(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_poll) + #define PW_IOCTL_IMMEDIATE_IO32 \ + _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_immediate_io, compat_uptr_t) +@@ -299,5 +299,5 @@ enum sw_ioctl_cmd { + _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_collect, compat_uptr_t) + #define PW_IOCTL_GET_TOPOLOGY_CHANGES32 \ + _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_topology_changes, compat_uptr_t) +-#endif // defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) +-#endif // __SW_IOCTL_H__ ++#endif /* defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) */ ++#endif /* __SW_IOCTL_H__ */ +diff --git a/drivers/platform/x86/socwatch/inc/sw_kernel_defines.h b/drivers/platform/x86/socwatch/inc/sw_kernel_defines.h +index 275b1bdfc25e..26328645b232 100644 +--- a/drivers/platform/x86/socwatch/inc/sw_kernel_defines.h ++++ b/drivers/platform/x86/socwatch/inc/sw_kernel_defines.h +@@ -61,7 +61,7 @@ + #if defined(__APPLE__) + #define likely(x) (x) + #define unlikely(x) (x) +-#endif // __APPLE__ ++#endif /* __APPLE__ */ + + #if !defined(__APPLE__) + #define CPU() (raw_smp_processor_id()) +@@ -69,7 +69,7 @@ + #else + #define CPU() (cpu_number()) + #define RAW_CPU() (cpu_number()) +-#endif // __APPLE__ ++#endif /* __APPLE__ */ + + #define TID() (current->pid) + #define PID() (current->tgid) +@@ -101,12 +101,12 @@ + #if !DO_OVERHEAD_MEASUREMENTS + #undef DO_OVERHEAD_MEASUREMENTS + #define DO_OVERHEAD_MEASUREMENTS 1 +-#endif // DO_OVERHEAD_MEASUREMENTS ++#endif /* DO_OVERHEAD_MEASUREMENTS */ + #if !DO_TRACK_MEMORY_USAGE + #undef DO_TRACK_MEMORY_USAGE + #define DO_TRACK_MEMORY_USAGE 1 +-#endif // DO_TRACK_MEMORY_USAGE +-#endif // DO_DRIVER_PROFILING ++#endif /* DO_TRACK_MEMORY_USAGE */ ++#endif /* DO_DRIVER_PROFILING */ + /* + * Should we allow debug output. + * Set to: "1" ==> 'OUTPUT' is enabled. +@@ -142,7 +142,7 @@ + #define pw_pr_warn(...) + #endif + #define pw_pr_force(...) IOLog(__VA_ARGS__) +-#endif // __APPLE__ ++#endif /* __APPLE__ */ + + /* + * Macro for driver error messages. +@@ -159,6 +159,6 @@ + #else + #define pw_pr_error(...) + #endif +-#endif // __APPLE__ ++#endif /* __APPLE__ */ + +-#endif // _SW_KERNEL_DEFINES_H_ ++#endif /* _SW_KERNEL_DEFINES_H_ */ +diff --git a/drivers/platform/x86/socwatch/inc/sw_list.h b/drivers/platform/x86/socwatch/inc/sw_list.h +index ecc646a99caa..9c17e50ac5bf 100644 +--- a/drivers/platform/x86/socwatch/inc/sw_list.h ++++ b/drivers/platform/x86/socwatch/inc/sw_list.h +@@ -73,4 +73,4 @@ + #define SW_LIST_EMPTY(head) list_empty(head) + #define SW_LIST_HEAD_INITIALIZER(head) LIST_HEAD_INIT(head) + +-#endif // __SW_LIST_H__ ++#endif /* __SW_LIST_H__ */ +diff --git a/drivers/platform/x86/socwatch/inc/sw_lock_defs.h b/drivers/platform/x86/socwatch/inc/sw_lock_defs.h +index 7c9d68c02f58..be44bfab01a7 100644 +--- a/drivers/platform/x86/socwatch/inc/sw_lock_defs.h ++++ b/drivers/platform/x86/socwatch/inc/sw_lock_defs.h +@@ -95,4 +95,4 @@ + write_unlock_irqrestore(&(l), _tmp_l_flags); \ + } + +-#endif // __SW_LOCK_DEFS_H__ ++#endif /* __SW_LOCK_DEFS_H__ */ +diff --git a/drivers/platform/x86/socwatch/inc/sw_mem.h b/drivers/platform/x86/socwatch/inc/sw_mem.h +index 600b8881262c..0d6de7f3a21b 100644 +--- a/drivers/platform/x86/socwatch/inc/sw_mem.h ++++ b/drivers/platform/x86/socwatch/inc/sw_mem.h +@@ -79,4 +79,4 @@ void sw_release_pages(unsigned long addr, unsigned int alloc_size_in_bytes); + u64 sw_get_total_bytes_alloced(void); + u64 sw_get_max_bytes_alloced(void); + u64 sw_get_curr_bytes_alloced(void); +-#endif // _SW_MEM_H_ ++#endif /* _SW_MEM_H_ */ +diff --git a/drivers/platform/x86/socwatch/inc/sw_ops_provider.h b/drivers/platform/x86/socwatch/inc/sw_ops_provider.h +index 43bd73fd3445..bb841bf65cb6 100644 +--- a/drivers/platform/x86/socwatch/inc/sw_ops_provider.h ++++ b/drivers/platform/x86/socwatch/inc/sw_ops_provider.h +@@ -59,4 +59,4 @@ + int sw_register_ops_providers(void); + void sw_free_ops_providers(void); + +-#endif // __SW_OPS_PROVIDER_H__ ++#endif /* __SW_OPS_PROVIDER_H__ */ +diff --git a/drivers/platform/x86/socwatch/inc/sw_output_buffer.h b/drivers/platform/x86/socwatch/inc/sw_output_buffer.h +index 17e59445ce85..8d6518222ce3 100644 +--- a/drivers/platform/x86/socwatch/inc/sw_output_buffer.h ++++ b/drivers/platform/x86/socwatch/inc/sw_output_buffer.h +@@ -59,7 +59,7 @@ + /* + * Special mask for the case where all buffers have been flushed. + */ +-// #define sw_ALL_WRITES_DONE_MASK 0xffffffff ++/* #define sw_ALL_WRITES_DONE_MASK 0xffffffff */ + #define SW_ALL_WRITES_DONE_MASK ((u32)-1) + /* + * Special mask for the case where no data is available to be read. +@@ -133,4 +133,4 @@ void sw_cancel_reader(void); + */ + void sw_print_reader_stats(void); + +-#endif // _SW_OUTPUT_BUFFER_H_ ++#endif /* _SW_OUTPUT_BUFFER_H_ */ +diff --git a/drivers/platform/x86/socwatch/inc/sw_overhead_measurements.h b/drivers/platform/x86/socwatch/inc/sw_overhead_measurements.h +index 7d9dc683119b..4052555419a8 100644 +--- a/drivers/platform/x86/socwatch/inc/sw_overhead_measurements.h ++++ b/drivers/platform/x86/socwatch/inc/sw_overhead_measurements.h +@@ -82,18 +82,18 @@ + + #ifndef __get_cpu_var + /* +- * Kernels >= 3.19 don't include a definition +- * of '__get_cpu_var'. Create one now. +- */ ++ * Kernels >= 3.19 don't include a definition ++ * of '__get_cpu_var'. Create one now. ++ */ + #define __get_cpu_var(var) (*this_cpu_ptr(&var)) +-#endif // __get_cpu_var ++#endif /* __get_cpu_var */ + #ifndef __raw_get_cpu_var + /* +- * Kernels >= 3.19 don't include a definition +- * of '__raw_get_cpu_var'. Create one now. +- */ ++ * Kernels >= 3.19 don't include a definition ++ * of '__raw_get_cpu_var'. Create one now. ++ */ + #define __raw_get_cpu_var(var) (*raw_cpu_ptr(&var)) +-#endif // __get_cpu_var ++#endif /* __get_cpu_var */ + + extern u64 sw_timestamp(void); + +@@ -171,7 +171,7 @@ extern u64 sw_timestamp(void); + __ret; \ + }) + +-#else // !DO_OVERHEAD_MEASUREMENTS ++#else /* !DO_OVERHEAD_MEASUREMENTS */ + #define DECLARE_OVERHEAD_VARS(name) \ + static inline void name##_print_cumulative_overhead_params( \ + const char *str) \ +@@ -181,9 +181,9 @@ extern u64 sw_timestamp(void); + #define DO_PER_CPU_OVERHEAD_FUNC(func, ...) func(__VA_ARGS__) + #define DO_PER_CPU_OVERHEAD_FUNC_RET(type, func, ...) func(__VA_ARGS__) + +-#endif // DO_OVERHEAD_MEASUREMENTS ++#endif /* DO_OVERHEAD_MEASUREMENTS */ + + #define PRINT_CUMULATIVE_OVERHEAD_PARAMS(name, str) \ + name##_print_cumulative_overhead_params(str) + +-#endif // _PW_OVERHEAD_MEASUREMENTS_H_ ++#endif /* _PW_OVERHEAD_MEASUREMENTS_H_ */ +diff --git a/drivers/platform/x86/socwatch/inc/sw_structs.h b/drivers/platform/x86/socwatch/inc/sw_structs.h +index 7f53a9e2984c..de5ad2b6eb70 100644 +--- a/drivers/platform/x86/socwatch/inc/sw_structs.h ++++ b/drivers/platform/x86/socwatch/inc/sw_structs.h +@@ -95,14 +95,17 @@ struct sw_key_value_payload { + (sizeof(struct sw_key_value_payload) - sizeof(char[1])) + + typedef enum sw_kernel_wakelock_type { +- SW_WAKE_LOCK = 0, // A kernel wakelock was acquired +- SW_WAKE_UNLOCK = 1, // A kernel wakelock was released ++ SW_WAKE_LOCK = 0, /* A kernel wakelock was acquired */ ++ SW_WAKE_UNLOCK = 1, /* A kernel wakelock was released */ + SW_WAKE_LOCK_TIMEOUT = +- 2, // A kernel wakelock was acquired with a timeout +- SW_WAKE_LOCK_INITIAL = 3, // A kernel wakelock was acquired before the +- // collection started +- SW_WAKE_UNLOCK_ALL = 4, // All previously held kernel wakelocks were +- // released -- used in ACPI S3 notifications ++ 2, /* A kernel wakelock was acquired with a timeout */ ++ SW_WAKE_LOCK_INITIAL = 3, /* A kernel wakelock was acquired ++ * before the ++ * collection started ++ */ ++ SW_WAKE_UNLOCK_ALL = 4, /* All previously held kernel wakelocks were ++ * released -- used in ACPI S3 notifications ++ */ + } sw_kernel_wakelock_type_t; + + typedef enum sw_when_type { +@@ -115,7 +118,8 @@ typedef enum sw_when_type { + } sw_when_type_t; + + /** +- * trigger_bits is defined to use type pw_u8_t that makes only upto 8 types possible ++ * trigger_bits is defined to use type pw_u8_t ++ * that makes only upto 8 types possible + */ + #define SW_TRIGGER_BEGIN_MASK() (1U << SW_WHEN_TYPE_BEGIN) + #define SW_TRIGGER_END_MASK() (1U << SW_WHEN_TYPE_END) +@@ -140,23 +144,27 @@ struct sw_driver_ipc_mmio_io_descriptor { + #ifdef SWW_MERGE + #pragma warning(push) + #pragma warning( \ +- disable : 4201) // disable C4201: nonstandard extension used: nameless struct/union ++ disable : 4201) /* disable C4201: nonstandard extension used: ++ * nameless struct/union ++ */ + #endif + struct { + pw_u16_t command; + pw_u16_t sub_command; + }; + #ifdef SWW_MERGE +-#pragma warning(pop) // enable C4201 ++#pragma warning(pop) /* enable C4201 */ + #endif + union { +- pw_u32_t ipc_command; // (sub_command << 12) | (command) +- pw_u8_t is_gbe; // Used only for GBE MMIO ++ pw_u32_t ipc_command; /* (sub_command << 12) ++ * | (command) ++ */ ++ pw_u8_t is_gbe; /* Used only for GBE MMIO */ + }; + }; +- // TODO: add a section for 'ctrl_address' and 'ctrl_remapped_address' ++ /* TODO: add a section for 'ctrl_address' and 'ctrl_remapped_address' */ + union { +- pw_u64_t data_address; // Will be "io_remapped" ++ pw_u64_t data_address; /* Will be "io_remapped" */ + pw_u64_t data_remapped_address; + }; + }; +@@ -180,16 +188,18 @@ struct sw_driver_pci_io_descriptor { + + #pragma pack(push, 1) + struct sw_driver_configdb_io_descriptor { +- // pw_u32_t port; +- // pw_u32_t offset; ++ /* pw_u32_t port; */ ++ /* pw_u32_t offset; */ + pw_u32_t address; + }; + #pragma pack(pop) + + #pragma pack(push, 1) + struct sw_driver_trace_args_io_descriptor { +- pw_u8_t num_args; // Number of valid entries in the 'args' array, below; 1 <= num_args <= 7 +- pw_u8_t args[7]; // Max of 7 args can be recorded ++ pw_u8_t num_args; /* Number of valid entries in the 'args' array, ++ * below; 1 <= num_args <= 7 ++ */ ++ pw_u8_t args[7]; /* Max of 7 args can be recorded */ + }; + #pragma pack(pop) + +@@ -302,9 +312,11 @@ struct sw_driver_pch_mailbox_io_descriptor { + #pragma pack(push, 1) + typedef struct sw_driver_io_descriptor { + pw_u16_t collection_type; +- // TODO: specify READ/WRITE +- pw_s16_t collection_command; // One of 'enum sw_io_cmd' +- pw_u16_t counter_size_in_bytes; // The number of bytes to READ or WRITE ++ /* TODO: specify READ/WRITE */ ++ pw_s16_t collection_command; /* One of 'enum sw_io_cmd' */ ++ pw_u16_t counter_size_in_bytes; /* The number of bytes to ++ * READ or WRITE ++ */ + union { + struct sw_driver_msr_io_descriptor msr_descriptor; + struct sw_driver_ipc_mmio_io_descriptor ipc_descriptor; +@@ -317,7 +329,7 @@ typedef struct sw_driver_io_descriptor { + pch_mailbox_descriptor; + struct sw_driver_mailbox_io_descriptor mailbox_descriptor; + }; +- pw_u64_t write_value; // The value to WRITE ++ pw_u64_t write_value; /* The value to WRITE */ + } sw_driver_io_descriptor_t; + #pragma pack(pop) + +@@ -333,17 +345,23 @@ typedef struct sw_driver_io_descriptor { + struct sw_driver_interface_info { + pw_u64_t tracepoint_id_mask; + pw_u64_t notifier_id_mask; +- pw_s16_t cpu_mask; // On which CPU(s) should the driver read the data? +- // Currently: -2 ==> read on ALL CPUs, +- // -1 ==> read on ANY CPU, +- // >= 0 ==> the specific CPU to read on +- pw_s16_t plugin_id; // Metric Plugin SID +- pw_s16_t metric_id; // Domain-specific ID assigned by each Metric Plugin +- pw_s16_t msg_id; // Msg ID retrieved from the SoC Watch config file +- pw_u16_t num_io_descriptors; // Number of descriptors in the array, below. +- pw_u8_t trigger_bits; // Mask of 'when bits' to fire this collector. +- pw_u16_t sampling_interval_msec; // Sampling interval, in msecs +- pw_u8_t descriptors[1]; // Array of sw_driver_io_descriptor structs. ++ pw_s16_t cpu_mask; /* On which CPU(s) should the driver ++ * read the data? ++ * Currently: -2 ==> read on ALL CPUs, ++ * -1 ==> read on ANY CPU, ++ * >= 0 ==> the specific CPU to read on ++ */ ++ pw_s16_t plugin_id; /* Metric Plugin SID */ ++ pw_s16_t metric_id; /* Domain-specific ID assigned by ++ * each Metric Plugin ++ */ ++ pw_s16_t msg_id; /* Msg ID retrieved from the SoC Watch config file */ ++ pw_u16_t num_io_descriptors; /* Number of descriptors in the array, ++ * below. ++ */ ++ pw_u8_t trigger_bits; /* Mask of 'when bits' to fire this collector. */ ++ pw_u16_t sampling_interval_msec; /* Sampling interval, in msecs */ ++ pw_u8_t descriptors[1]; /* Array of sw_driver_io_descriptor structs. */ + }; + #pragma pack(pop) + +@@ -352,11 +370,19 @@ struct sw_driver_interface_info { + + #pragma pack(push, 1) + struct sw_driver_interface_msg { +- pw_u16_t num_infos; // Number of 'sw_driver_interface_info' structs contained within the 'infos' variable, below +- pw_u16_t min_polling_interval_msecs; // Min time to wait before polling; used exclusively +- // with the low overhead, context-switch based +- // polling mode +- // pw_u16_t infos_size_bytes; // Size of data inlined within the 'infos' variable, below ++ pw_u16_t num_infos; /* Number of 'sw_driver_interface_info' ++ * structs contained within the 'infos' variable, ++ * below ++ */ ++ pw_u16_t min_polling_interval_msecs; /* Min time to wait before ++ * polling; used exclusively ++ * with the low overhead, ++ * context-switch based ++ * polling mode ++ */ ++ /* pw_u16_t infos_size_bytes; Size of data inlined ++ * within the 'infos' variable, below ++ */ + pw_u8_t infos[1]; + }; + #pragma pack(pop) +@@ -373,7 +399,7 @@ typedef enum sw_name_id_type { + #pragma pack(push, 1) + struct sw_name_id_pair { + pw_u16_t id; +- pw_u16_t type; // One of 'sw_name_id_type' ++ pw_u16_t type; /* One of 'sw_name_id_type' */ + struct sw_string_type name; + }; + #pragma pack(pop) +@@ -398,14 +424,16 @@ struct sw_name_info_msg { + typedef struct sw_driver_msg { + pw_u64_t tsc; + pw_u16_t cpuidx; +- pw_u8_t plugin_id; // Cannot have more than 256 plugins +- pw_u8_t metric_id; // Each plugin cannot handle more than 256 metrics +- pw_u8_t msg_id; // Each metric cannot have more than 256 components ++ pw_u8_t plugin_id; /* Cannot have more than 256 plugins */ ++ pw_u8_t metric_id; /* Each plugin cannot handle more than 256 metrics */ ++ pw_u8_t msg_id; /* Each metric cannot have more than 256 components */ + pw_u16_t payload_len; +- // pw_u64_t p_payload; // Ptr to payload ++ /* pw_u64_t p_payload; Ptr to payload */ + union { +- pw_u64_t __dummy; // Ensure size of struct is consistent on x86, x64 +- char *p_payload; // Ptr to payload (collected data values). ++ pw_u64_t __dummy; /* Ensure size of struct is consistent ++ * on x86, x64 ++ */ ++ char *p_payload; /* Ptr to payload (collected data values). */ + }; + } sw_driver_msg_t; + #pragma pack(pop) +@@ -437,11 +465,11 @@ enum cpu_action { + }; + #pragma pack(push, 1) + struct sw_driver_topology_change { +- pw_u64_t timestamp; // timestamp +- enum cpu_action type; // One of 'enum cpu_action' +- pw_u16_t cpu; // logical cpu +- pw_u16_t core; // core id +- pw_u16_t pkg; // pkg/physical id ++ pw_u64_t timestamp; /* timestamp */ ++ enum cpu_action type; /* One of 'enum cpu_action' */ ++ pw_u16_t cpu; /* logical cpu */ ++ pw_u16_t core; /* core id */ ++ pw_u16_t pkg; /* pkg/physical id */ + }; + struct sw_driver_topology_msg { + pw_u16_t num_entries; +@@ -481,8 +509,8 @@ enum sw_pm_mode { + struct sw_driver_ioctl_arg { + pw_s32_t in_len; + pw_s32_t out_len; +- // pw_u64_t p_in_arg; // Pointer to input arg +- // pw_u64_t p_out_arg; // Pointer to output arg ++ /* pw_u64_t p_in_arg; Pointer to input arg */ ++ /* pw_u64_t p_out_arg; Pointer to output arg */ + char *in_arg; + char *out_arg; + }; +@@ -490,11 +518,11 @@ struct sw_driver_ioctl_arg { + + #pragma pack(push, 1) + typedef struct sw_driver_msg_interval { +- pw_u8_t plugin_id; // Cannot have more than 256 plugins +- pw_u8_t metric_id; // Each plugin cannot handle more than 256 metrics +- pw_u8_t msg_id; // Each metric cannot have more than 256 components +- pw_u16_t interval; // collection interval ++ pw_u8_t plugin_id; /* Cannot have more than 256 plugins */ ++ pw_u8_t metric_id; /* Each plugin cannot handle more than 256 metrics */ ++ pw_u8_t msg_id; /* Each metric cannot have more than 256 components */ ++ pw_u16_t interval; /* collection interval */ + } sw_driver_msg_interval_t; + #pragma pack(pop) + +-#endif // __SW_STRUCTS_H__ ++#endif /* __SW_STRUCTS_H__ */ +diff --git a/drivers/platform/x86/socwatch/inc/sw_telem.h b/drivers/platform/x86/socwatch/inc/sw_telem.h +index 52e5119b557e..e324ff681b2e 100644 +--- a/drivers/platform/x86/socwatch/inc/sw_telem.h ++++ b/drivers/platform/x86/socwatch/inc/sw_telem.h +@@ -57,8 +57,8 @@ + #ifndef _SW_TELEM_H_ + #define _SW_TELEM_H_ 1 + +-#include "sw_structs.h" // sw_driver_io_descriptor +-#include "sw_types.h" // u8 and other types ++#include "sw_structs.h" /* sw_driver_io_descriptor */ ++#include "sw_types.h" /* u8 and other types */ + + int sw_telem_init_func(struct sw_driver_io_descriptor *descriptor); + void sw_read_telem_info(char *dst_vals, int cpu, +diff --git a/drivers/platform/x86/socwatch/inc/sw_trace_notifier_provider.h b/drivers/platform/x86/socwatch/inc/sw_trace_notifier_provider.h +index 3834a16d7ae8..3ec4930c9010 100644 +--- a/drivers/platform/x86/socwatch/inc/sw_trace_notifier_provider.h ++++ b/drivers/platform/x86/socwatch/inc/sw_trace_notifier_provider.h +@@ -79,4 +79,4 @@ int sw_add_trace_notifier_providers(void); + * Remove previously added providers. + */ + void sw_remove_trace_notifier_providers(void); +-#endif // __SW_TRACE_NOTIFIER_PROVIDER_H__ ++#endif /* __SW_TRACE_NOTIFIER_PROVIDER_H__ */ +diff --git a/drivers/platform/x86/socwatch/inc/sw_tracepoint_handlers.h b/drivers/platform/x86/socwatch/inc/sw_tracepoint_handlers.h +index db8294a9a137..d8a54c099d36 100644 +--- a/drivers/platform/x86/socwatch/inc/sw_tracepoint_handlers.h ++++ b/drivers/platform/x86/socwatch/inc/sw_tracepoint_handlers.h +@@ -67,9 +67,14 @@ enum sw_trace_data_type { + + struct sw_trace_notifier_name { + const char * +- kernel_name; // The tracepoint name; used by the kernel to identify tracepoints ++ kernel_name; /* The tracepoint name; used by the kernel ++ * to identify tracepoints ++ */ + const char * +- abstract_name; // An abstract name used by plugins to specify tracepoints-of-interest; shared with Ring-3 ++ abstract_name; /* An abstract name used by plugins to ++ * specify tracepoints-of-interest; ++ * shared with Ring-3 ++ */ + }; + + typedef struct sw_trace_notifier_data sw_trace_notifier_data_t; +@@ -79,17 +84,27 @@ typedef int (*sw_trace_notifier_unregister_func)( + struct sw_trace_notifier_data *node); + + struct sw_trace_notifier_data { +- enum sw_trace_data_type type; // Tracepoint or Notifier +- const struct sw_trace_notifier_name *name; // Tracepoint name(s) +- sw_trace_notifier_register_func probe_register; // probe register function +- sw_trace_notifier_unregister_func probe_unregister; // probe unregister function ++ enum sw_trace_data_type type; /* Tracepoint or Notifier */ ++ const struct sw_trace_notifier_name *name; /* Tracepoint name(s) */ ++ sw_trace_notifier_register_func probe_register; /* probe register ++ * function ++ */ ++ sw_trace_notifier_unregister_func probe_unregister; /* probe unregister ++ * function ++ */ + struct tracepoint *tp; +- bool always_register; // Set to TRUE if this tracepoint/notifier must ALWAYS be registered, regardless +- // of whether the user has specified anything to collect ++ bool always_register; /* Set to TRUE if this tracepoint/notifier ++ * must ALWAYS be registered, regardless ++ * of whether the user has specified ++ * anything to collect ++ */ + bool was_registered; + SW_DEFINE_LIST_HEAD( + list, +- sw_collector_data); // List of 'sw_collector_data' instances for this tracepoint or notifier ++ sw_collector_data); /* List of 'sw_collector_data' ++ * instances for this tracepoint ++ * or notifier ++ */ + }; + + struct sw_topology_node { +@@ -99,8 +114,10 @@ struct sw_topology_node { + }; + SW_DECLARE_LIST_HEAD( + sw_topology_list, +- sw_topology_node); // List of entries tracking changes in CPU topology +-extern size_t sw_num_topology_entries; // Size of the 'sw_topology_list' ++ sw_topology_node); /* List of entries tracking ++ * changes in CPU topology ++ */ ++extern size_t sw_num_topology_entries; /* Size of the 'sw_topology_list' */ + + int sw_extract_tracepoints(void); + int sw_register_trace_notifiers(void); +@@ -139,4 +156,4 @@ sw_get_trace_notifier_abstract_name(struct sw_trace_notifier_data *node); + */ + void sw_clear_topology_list(void); + +-#endif // __SW_TRACEPOINT_HANDLERS_H__ ++#endif /* __SW_TRACEPOINT_HANDLERS_H__ */ +diff --git a/drivers/platform/x86/socwatch/inc/sw_types.h b/drivers/platform/x86/socwatch/inc/sw_types.h +index 914ce9806965..156c92c8349a 100644 +--- a/drivers/platform/x86/socwatch/inc/sw_types.h ++++ b/drivers/platform/x86/socwatch/inc/sw_types.h +@@ -63,8 +63,8 @@ + /* + * Called from Ring-3. + */ +-#include // Grab 'uint64_t' etc. +-#include // Grab 'pid_t' ++#include /* Grab 'uint64_t' etc. */ ++#include /* Grab 'pid_t' */ + /* + * UNSIGNED types... + */ +@@ -80,26 +80,26 @@ typedef int16_t s16; + typedef int32_t s32; + typedef int64_t s64; + +-#else // __KERNEL__ ++#else /* __KERNEL__ */ + #if !defined(__APPLE__) + #include +-#else // __APPLE__ ++#else /* __APPLE__ */ + #include +-#include // Grab 'uint64_t' etc. ++#include /* Grab 'uint64_t' etc. */ + + typedef uint8_t u8; + typedef uint16_t u16; + typedef uint32_t u32; + typedef uint64_t u64; + /* +-* SIGNED types... +-*/ ++ * SIGNED types... ++ */ + typedef int8_t s8; + typedef int16_t s16; + typedef int32_t s32; + typedef int64_t s64; +-#endif // __APPLE__ +-#endif // __KERNEL__ ++#endif /* __APPLE__ */ ++#endif /* __KERNEL__ */ + + #elif defined(_WIN32) + typedef __int32 int32_t; +@@ -125,7 +125,7 @@ typedef signed long long s64; + typedef s32 pid_t; + typedef s32 ssize_t; + +-#endif // _WIN32 ++#endif /* _WIN32 */ + + /* ************************************ + * Common to both operating systems. +@@ -149,4 +149,4 @@ typedef s64 pw_s64_t; + + typedef pid_t pw_pid_t; + +-#endif // _PW_TYPES_H_ ++#endif /* _PW_TYPES_H_ */ +diff --git a/drivers/platform/x86/socwatch/inc/sw_version.h b/drivers/platform/x86/socwatch/inc/sw_version.h +index 5797edffa64d..5476b0d79ac5 100644 +--- a/drivers/platform/x86/socwatch/inc/sw_version.h ++++ b/drivers/platform/x86/socwatch/inc/sw_version.h +@@ -71,4 +71,4 @@ + #define SOCWATCH_VERSION_MINOR 8 + #define SOCWATCH_VERSION_OTHER 0 + +-#endif // __SW_VERSION_H__ ++#endif /* __SW_VERSION_H__ */ +diff --git a/drivers/platform/x86/socwatch/sw_collector.c b/drivers/platform/x86/socwatch/sw_collector.c +index a6c8a9cec48b..db855bab4fd8 100644 +--- a/drivers/platform/x86/socwatch/sw_collector.c ++++ b/drivers/platform/x86/socwatch/sw_collector.c +@@ -498,7 +498,7 @@ void sw_handle_per_cpu_msg_i(void *info, enum sw_wakeup_action action) + const struct sw_hw_ops **ops = node->ops; + bool wasAnyWrite = false; + +- // msg TSC assigned when msg is written to buffer ++ /* msg TSC assigned when msg is written to buffer */ + msg->cpuidx = cpu; + + for (i = 0; i < num_descriptors; ++i, +@@ -696,7 +696,8 @@ void sw_handle_per_cpu_msg_on_cpu(int cpu, void *info) + smp_call_function_single( + cpu, &sw_handle_per_cpu_msg_no_sched, info, + false /* false ==> do NOT wait for function +- completion */); ++ * completion ++ */); + } + } + +diff --git a/drivers/platform/x86/socwatch/sw_driver.c b/drivers/platform/x86/socwatch/sw_driver.c +index 35b516cfb26a..661a42555baa 100644 +--- a/drivers/platform/x86/socwatch/sw_driver.c ++++ b/drivers/platform/x86/socwatch/sw_driver.c +@@ -133,8 +133,8 @@ static bool sw_should_flush_buffer_i(void); + */ + struct swa_internal_state { + sw_driver_collection_cmd_t +- cmd; // indicates which command was specified +- // last e.g. START, STOP etc. ++ cmd; /* indicates which command was specified */ ++ /* last e.g. START, STOP etc. */ + /* + * Should we write to our per-cpu output buffers? + * YES if we're actively collecting. +@@ -146,7 +146,7 @@ struct swa_internal_state { + * (See "device_read" for an explanation) + */ + bool drain_buffers; +- // Others... ++ /* Others... */ + }; + + /* ------------------------------------------------- +@@ -196,7 +196,7 @@ static struct sw_file_ops s_ops = { + * that you declare here *MUST* match the function name! + */ + +-DECLARE_OVERHEAD_VARS(sw_collection_poll_i); // for POLL ++DECLARE_OVERHEAD_VARS(sw_collection_poll_i); /* for POLL */ + DECLARE_OVERHEAD_VARS(sw_any_seg_full); + + /* +@@ -206,7 +206,7 @@ DECLARE_OVERHEAD_VARS(sw_any_seg_full); + #if DO_DEBUG_OUTPUT + static const char *s_when_type_names[] = { "BEGIN", "POLL", "NOTIFIER", + "TRACEPOINT", "END" }; +-#endif // DO_DEBUG_OUTPUT ++#endif /* DO_DEBUG_OUTPUT */ + + /* ------------------------------------------------- + * Function definitions. +@@ -332,7 +332,8 @@ void sw_iterate_driver_info_lists_i(void) + pw_pr_debug("ITERATING list %s\n", s_when_type_names[which]); + if (sw_handle_collector_list( + &sw_collector_lists[which], +- &sw_print_collector_node_i)) { // Should NEVER happen! ++ &sw_print_collector_node_i)) { ++ /* Should NEVER happen! */ + pw_pr_error( + "WARNING: error occurred while printing values!\n"); + } +@@ -578,7 +579,7 @@ int sw_collection_stop_i(void) + pw_pr_force( + "DEBUG: there were %llu samples produced and %llu samples dropped in buffer v5!\n", + sw_num_samples_produced, sw_num_samples_dropped); +-#endif // DO_OVERHEAD_MEASUREMENTS ++#endif /* DO_OVERHEAD_MEASUREMENTS */ + /* + * DEBUG: iterate over collection lists. + */ +@@ -594,7 +595,7 @@ int sw_collection_stop_i(void) + pw_pr_debug("OK, STOPPED collection!\n"); + #if DO_OVERHEAD_MEASUREMENTS + pw_pr_force("There were %d poll ticks!\n", num_times_polled); +-#endif // DO_OVERHEAD_MEASUREMENTS ++#endif /* DO_OVERHEAD_MEASUREMENTS */ + return PW_SUCCESS; + } + +@@ -723,7 +724,7 @@ sw_set_driver_infos_i(struct sw_driver_interface_msg __user *remote_msg, + (unsigned int)dst_idx); + for (i = SW_WHEN_TYPE_BEGIN; i <= SW_WHEN_TYPE_END; + ++i, read_triggers >>= 1) { +- if (read_triggers & 0x1) { // Bit 'i' is set ++ if (read_triggers & 0x1) { /* Bit 'i' is set */ + pw_pr_debug("BIT %d is SET!\n", i); + if (i == SW_WHEN_TYPE_TRACEPOINT) { + struct tn_data tn_data = { +@@ -734,9 +735,9 @@ sw_set_driver_infos_i(struct sw_driver_interface_msg __user *remote_msg, + "TRACEPOINT, MASK = 0x%llx\n", + local_info->tracepoint_id_mask); + sw_for_each_tracepoint_node( +- &sw_add_trace_notifier_driver_info_i, +- &tn_data, +- false /*return-on-error*/); ++ &sw_add_trace_notifier_driver_info_i, ++ &tn_data, ++ false /*return-on-error*/); + } else if (i == SW_WHEN_TYPE_NOTIFIER) { + struct tn_data tn_data = { + local_info, +@@ -746,9 +747,9 @@ sw_set_driver_infos_i(struct sw_driver_interface_msg __user *remote_msg, + "NOTIFIER, MASK = 0x%llx\n", + local_info->notifier_id_mask); + sw_for_each_notifier_node( +- &sw_add_trace_notifier_driver_info_i, +- &tn_data, +- false /*return-on-error*/); ++ &sw_add_trace_notifier_driver_info_i, ++ &tn_data, ++ false /*return-on-error*/); + } else { + if (sw_add_driver_info( + &sw_collector_lists[i], +@@ -814,7 +815,7 @@ static long sw_handle_cmd_i(sw_driver_collection_cmd_t cmd, + return -PW_ERROR; + } + } +-#endif // DO_COUNT_DROPPED_SAMPLES ++#endif /* DO_COUNT_DROPPED_SAMPLES */ + return PW_SUCCESS; + } + +@@ -823,7 +824,7 @@ static int sw_do_parse_sfi_oemb_table(struct sfi_table_header *header) + { + #ifdef CONFIG_X86_WANT_INTEL_MID + struct sfi_table_oemb *oemb = (struct sfi_table_oemb *) +- header; // 'struct sfi_table_oemb' defined in 'intel-mid.h' ++ header; /* 'struct sfi_table_oemb' defined in 'intel-mid.h' */ + if (!oemb) { + pw_pr_error("ERROR: NULL sfi table header!\n"); + return -PW_ERROR; +@@ -833,10 +834,10 @@ static int sw_do_parse_sfi_oemb_table(struct sfi_table_header *header) + pw_pr_debug("DEBUG: major = %u, minor = %u\n", + oemb->scu_runtime_major_version, + oemb->scu_runtime_minor_version); +-#endif // CONFIG_X86_WANT_INTEL_MID ++#endif /* CONFIG_X86_WANT_INTEL_MID */ + return PW_SUCCESS; + } +-#endif // SFI_SIG_OEMB ++#endif /* SFI_SIG_OEMB */ + + static void sw_do_extract_scu_fw_version(void) + { +@@ -846,7 +847,7 @@ static void sw_do_extract_scu_fw_version(void) + &sw_do_parse_sfi_oemb_table)) { + pw_pr_force("WARNING: NO SFI information!\n"); + } +-#endif // SFI_SIG_OEMB ++#endif /* SFI_SIG_OEMB */ + } + + static int sw_gather_trace_notifier_i(struct sw_trace_notifier_data *node, +@@ -864,7 +865,8 @@ static int sw_gather_trace_notifier_i(struct sw_trace_notifier_data *node, + ++msg->num_name_id_pairs; + pair->type = type; + pair->id = (u16)id; +- str->len = strlen(abstract_name) + 1; // "+1" for trailing '\0' ++ /* "+1" for trailing '\0' */ ++ str->len = strlen(abstract_name) + 1; + memcpy(&str->data[0], abstract_name, str->len); + + pw_pr_debug("TP[%d] = %s (%u)\n", +@@ -937,7 +939,7 @@ static int sw_gather_hw_op_i(const struct sw_hw_ops *op, void *priv) + pair->type = SW_NAME_TYPE_COLLECTOR; + pair->id = (u16)id; + str->len = strlen(abstract_name) + +- 1; // "+1" for trailing '\0' ++ 1; /* "+1" for trailing '\0' */ + memcpy(&str->data[0], abstract_name, str->len); + + *idx += SW_NAME_ID_HEADER_SIZE() + +@@ -1206,11 +1208,11 @@ static long sw_unlocked_handle_ioctl_i(unsigned int ioctl_num, + + cpumask_clear(&cpumask); + switch (local_info->cpu_mask) { +- case -1: // IO on ANY CPU (assume current CPU) ++ case -1: /* IO on ANY CPU (assume current CPU) */ + cpumask_set_cpu(RAW_CPU(), &cpumask); + pw_pr_debug("ANY CPU\n"); + break; +- default: // IO on a particular CPU ++ default: /* IO on a particular CPU */ + cpumask_set_cpu(local_info->cpu_mask, &cpumask); + pw_pr_debug("[%d] setting for %d\n", RAW_CPU(), + local_info->cpu_mask); +@@ -1398,7 +1400,7 @@ int sw_load_driver_i(void) + #ifdef CONFIG_X86_WANT_INTEL_MID + pw_pr_force("SOC Identifier = %u, Stepping = %u\n", + intel_mid_identify_cpu(), intel_mid_soc_stepping()); +-#endif // CONFIG_X86_WANT_INTEL_MID ++#endif /* CONFIG_X86_WANT_INTEL_MID */ + pw_pr_force("-----------------------------------------\n"); + return PW_SUCCESS; + +@@ -1460,7 +1462,7 @@ void sw_unload_driver_i(void) + "***********************************************************************\n"); + } + } +-#endif // DO_TRACK_MEMORY_USAGE ++#endif /* DO_TRACK_MEMORY_USAGE */ + pw_pr_force("-----------------------------------------\n"); + } + +diff --git a/drivers/platform/x86/socwatch/sw_file_ops.c b/drivers/platform/x86/socwatch/sw_file_ops.c +index 06c88801a9ec..ea84d252a4d3 100644 +--- a/drivers/platform/x86/socwatch/sw_file_ops.c ++++ b/drivers/platform/x86/socwatch/sw_file_ops.c +@@ -53,18 +53,18 @@ + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + */ +-#include // try_module_get +-#include // inode +-#include // class_create +-#include // cdev_alloc +-#include // LINUX_VERSION_CODE ++#include /* try_module_get */ ++#include /* inode */ ++#include /* class_create */ ++#include /* cdev_alloc */ ++#include /* LINUX_VERSION_CODE */ + #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) +-#include // copy_to_user ++#include /* copy_to_user */ + #else +-#include // copy_to_user +-#endif // LINUX_VERSION_CODE +-#include // wait_event_interruptible +-#include // TASK_INTERRUPTIBLE ++#include /* copy_to_user */ ++#endif /* LINUX_VERSION_CODE */ ++#include /* wait_event_interruptible */ ++#include /* TASK_INTERRUPTIBLE */ + + #include "sw_kernel_defines.h" + #include "sw_types.h" +@@ -131,7 +131,7 @@ static struct file_operations s_fops = { + .unlocked_ioctl = &sw_device_unlocked_ioctl_i, + #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) + .compat_ioctl = &sw_device_compat_ioctl_i, +-#endif // COMPAT && x64 ++#endif /* COMPAT && x64 */ + .release = &sw_device_release_i, + }; + /* +@@ -154,7 +154,7 @@ static struct sw_file_ops *s_file_ops; + * Is the device open right now? Used to prevent + * concurent access into the same device. + */ +-#define DEV_IS_OPEN 0 // see if device is in use ++#define DEV_IS_OPEN 0 /* see if device is in use */ + static volatile sw_bits_t dev_status; + + /* +@@ -169,12 +169,12 @@ static int sw_device_open_i(struct inode *inode, struct file *file) + * We don't want to talk to two processes at the same time + */ + if (test_and_set_bit(DEV_IS_OPEN, &dev_status)) { +- // Device is busy ++ /* Device is busy */ + return -EBUSY; + } + + if (!try_module_get(THIS_MODULE)) { +- // No such device ++ /* No such device */ + return -ENODEV; + } + pw_pr_debug("OK, allowed client open!\n"); +@@ -231,14 +231,14 @@ static ssize_t sw_device_read_i(struct file *file, char __user *user_buffer, + * Are we done producing/consuming? + */ + if (val == SW_ALL_WRITES_DONE_MASK) { +- return 0; // "0" ==> EOF ++ return 0; /* "0" ==> EOF */ + } + /* + * Copy the buffer contents into userspace. + */ + bytes_read = sw_consume_data( + val, user_buffer, +- length); // 'read' returns # of bytes actually read ++ length); /* 'read' returns # of bytes actually read */ + if (unlikely(bytes_read == 0)) { + /* Cannot be EOF since that has already been checked above */ + return -EIO; +diff --git a/drivers/platform/x86/socwatch/sw_internal.c b/drivers/platform/x86/socwatch/sw_internal.c +index a4a4dca9dc53..04544b8fecb3 100644 +--- a/drivers/platform/x86/socwatch/sw_internal.c ++++ b/drivers/platform/x86/socwatch/sw_internal.c +@@ -85,7 +85,7 @@ void sw_schedule_work(const struct cpumask *mask, void (*work)(void *), + * Did the user ask us to run on 'ANY' CPU? + */ + if (cpumask_empty(mask)) { +- (*work)(data); // Call on current CPU ++ (*work)(data); /* Call on current CPU */ + } else { + preempt_disable(); + { +@@ -93,7 +93,7 @@ void sw_schedule_work(const struct cpumask *mask, void (*work)(void *), + * Did the user ask to run on this CPU? + */ + if (cpumask_test_cpu(RAW_CPU(), mask)) { +- (*work)(data); // Call on current CPU ++ (*work)(data); /* Call on current CPU */ + } + /* + * OK, now check other CPUs. +@@ -120,7 +120,7 @@ void sw_put_cpu(unsigned long flags) + + #ifndef CONFIG_NR_CPUS_PER_MODULE + #define CONFIG_NR_CPUS_PER_MODULE 2 +-#endif // CONFIG_NR_CPUS_PER_MODULE ++#endif /* CONFIG_NR_CPUS_PER_MODULE */ + + static void sw_get_cpu_sibling_mask(int cpu, struct cpumask *sibling_mask) + { +@@ -168,8 +168,8 @@ int sw_set_module_scope_for_cpus(void) + */ + if (cpumask_subset(&sibling_mask, policy->cpus)) { + /* +- * 'sibling_mask' is already a subset of affected_cpus -- nothing +- * to do on this CPU. ++ * 'sibling_mask' is already a subset of ++ * affected_cpus -- nothing to do on this CPU. + */ + cpufreq_cpu_put(policy); + continue; +diff --git a/drivers/platform/x86/socwatch/sw_mem.c b/drivers/platform/x86/socwatch/sw_mem.c +index 0d1231c2e3a8..ac7725387c78 100644 +--- a/drivers/platform/x86/socwatch/sw_mem.c ++++ b/drivers/platform/x86/socwatch/sw_mem.c +@@ -92,7 +92,7 @@ static atomic_t pw_mem_should_panic = ATOMIC_INIT(0); + __tmp; \ + }) + +-#else // if !DO_MEM_PANIC_ON_ALLOC_ERROR ++#else /* if !DO_MEM_PANIC_ON_ALLOC_ERROR */ + + #define MEM_PANIC() + #define SHOULD_TRACE() (true) +@@ -305,7 +305,7 @@ void sw_kfree(const void *obj) + kfree(tmp); + }; + +-#else // !DO_TRACK_MEMORY_USAGE ++#else /* !DO_TRACK_MEMORY_USAGE */ + + void *sw_kmalloc(size_t size, gfp_t flags) + { +@@ -328,4 +328,4 @@ void sw_kfree(const void *mem) + kfree(mem); + }; + +-#endif // DO_TRACK_MEMORY_USAGE ++#endif /* DO_TRACK_MEMORY_USAGE */ +diff --git a/drivers/platform/x86/socwatch/sw_ops_provider.c b/drivers/platform/x86/socwatch/sw_ops_provider.c +index 1eb60d12b701..6e0c77204657 100644 +--- a/drivers/platform/x86/socwatch/sw_ops_provider.c ++++ b/drivers/platform/x86/socwatch/sw_ops_provider.c +@@ -55,12 +55,12 @@ + */ + #include + #include +-#include // "pci_get_domain_bus_and_slot" +-#include // "udelay" ++#include /* "pci_get_domain_bus_and_slot" */ ++#include /* "udelay" */ + #include + #ifdef CONFIG_RPMSG_IPC + #include +-#endif // CONFIG_RPMSG_IPC ++#endif /* CONFIG_RPMSG_IPC */ + + #include "sw_types.h" + #include "sw_kernel_defines.h" +@@ -74,7 +74,8 @@ + /* + * Should we be doing 'direct' PCI reads and writes? + * '1' ==> YES, call "pci_{read,write}_config_dword()" directly +- * '0' ==> NO, Use the "intel_mid_msgbus_{read32,write32}_raw()" API (defined in 'intel_mid_pcihelpers.c') ++ * '0' ==> NO, Use the "intel_mid_msgbus_{read32,write32}_raw()" ++ * API (defined in 'intel_mid_pcihelpers.c') + */ + #define DO_DIRECT_PCI_READ_WRITE 0 + #if !IS_ENABLED(CONFIG_ANDROID) || !defined(CONFIG_X86_WANT_INTEL_MID) +@@ -99,9 +100,9 @@ + #define NUM_RETRY 100 + #define USEC_DELAY 100 + +-#define EXTCNF_CTRL 0xF00 // offset for hw semaphore. +-#define FWSM_CTRL 0x5B54 // offset for fw semaphore +-#define GBE_CTRL_OFFSET 0x34 // GBE LPM offset ++#define EXTCNF_CTRL 0xF00 /* offset for hw semaphore. */ ++#define FWSM_CTRL 0x5B54 /* offset for fw semaphore */ ++#define GBE_CTRL_OFFSET 0x34 /* GBE LPM offset */ + + #define IS_HW_SEMAPHORE_SET(data) (data & (pw_u64_t)(0x1 << 6)) + #define IS_FW_SEMAPHORE_SET(data) (data & (pw_u64_t)0x1) +@@ -346,11 +347,11 @@ static const struct sw_hw_ops s_hw_ops[] = { + int sw_ipc_mmio_descriptor_init_func_i( + struct sw_driver_io_descriptor *descriptor) + { +- // Perform any required 'io_remap' calls here ++ /* Perform any required 'io_remap' calls here */ + struct sw_driver_ipc_mmio_io_descriptor *__ipc_mmio = NULL; + u64 data_address = 0; + +- if (!descriptor) { // Should NEVER happen ++ if (!descriptor) { /* Should NEVER happen */ + return -PW_ERROR; + } + if (descriptor->collection_type == SW_IO_IPC) { +@@ -363,12 +364,12 @@ int sw_ipc_mmio_descriptor_init_func_i( + __ipc_mmio->data_address); + data_address = __ipc_mmio->data_address; + /* +- if (__ipc_mmio->command || __ipc_mmio->sub_command) { +- __ipc_mmio->ipc_command = +- ((pw_u32_t)__ipc_mmio->sub_command << 12) +- | (pw_u32_t)__ipc_mmio->command; +- } +- */ ++ * if (__ipc_mmio->command || __ipc_mmio->sub_command) { ++ * __ipc_mmio->ipc_command = ++ * ((pw_u32_t)__ipc_mmio->sub_command << 12) ++ * | (pw_u32_t)__ipc_mmio->command; ++ * } ++ */ + if (data_address) { + __ipc_mmio->data_remapped_address = + (pw_u64_t)(unsigned long)ioremap_nocache( +@@ -393,14 +394,14 @@ int sw_ipc_mmio_descriptor_init_func_i( + FWSM_CTRL; + s_gbe_semaphore.hw_semaphore = + ioremap_nocache( +- (unsigned long)hw_addr, +- descriptor +- ->counter_size_in_bytes); ++ (unsigned long)hw_addr, ++ descriptor ++ ->counter_size_in_bytes); + s_gbe_semaphore.fw_semaphore = + ioremap_nocache( +- (unsigned long)fw_addr, +- descriptor +- ->counter_size_in_bytes); ++ (unsigned long)fw_addr, ++ descriptor ++ ->counter_size_in_bytes); + if (s_gbe_semaphore.hw_semaphore == + NULL || + s_gbe_semaphore.fw_semaphore == +@@ -418,7 +419,7 @@ int sw_ipc_mmio_descriptor_init_func_i( + s_gbe_semaphore + .fw_semaphore, + descriptor +- ->counter_size_in_bytes); ++ ->counter_size_in_bytes); + } + } + } +@@ -429,10 +430,10 @@ int sw_ipc_mmio_descriptor_init_func_i( + int sw_pch_mailbox_descriptor_init_func_i( + struct sw_driver_io_descriptor *descriptor) + { +- // Perform any required 'io_remap' calls here ++ /* Perform any required 'io_remap' calls here */ + struct sw_driver_pch_mailbox_io_descriptor *__pch_mailbox = NULL; + +- if (!descriptor) { // Should NEVER happen ++ if (!descriptor) { /* Should NEVER happen */ + return -PW_ERROR; + } + __pch_mailbox = &descriptor->pch_mailbox_descriptor; +@@ -478,12 +479,13 @@ int sw_pch_mailbox_descriptor_init_func_i( + return PW_SUCCESS; + } + +-int sw_mailbox_descriptor_init_func_i(struct sw_driver_io_descriptor *descriptor) ++int sw_mailbox_descriptor_init_func_i(struct sw_driver_io_descriptor ++ *descriptor) + { +- // Perform any required 'io_remap' calls here ++ /* Perform any required 'io_remap' calls here */ + struct sw_driver_mailbox_io_descriptor *__mailbox = NULL; + +- if (!descriptor) { // Should NEVER happen ++ if (!descriptor) { /* Should NEVER happen */ + return -PW_ERROR; + } + __mailbox = &descriptor->mailbox_descriptor; +@@ -561,7 +563,7 @@ void sw_read_msr_info_i(char *dst_vals, int cpu, + #define SW_DO_IPC(cmd, sub_cmd) rpmsg_send_generic_simple_command(cmd, sub_cmd) + #else + #define SW_DO_IPC(cmd, sub_cmd) (-ENODEV) +-#endif // CONFIG_RPMSG_IPC ++#endif /* CONFIG_RPMSG_IPC */ + + void sw_read_ipc_info_i(char *dst_vals, int cpu, + const struct sw_driver_io_descriptor *descriptors, +@@ -581,7 +583,7 @@ void sw_read_ipc_info_i(char *dst_vals, int cpu, + } + + if (remapped_address) { +- // memcpy(&value, (void *)remapped_address, counter_size_in_bytes); ++ /* memcpy(&value, (void *)remapped_address, counter_size_in_bytes); */ + pw_pr_debug("COPYING MMIO size %u\n", counter_size_in_bytes); + memcpy(dst_vals, (void *)remapped_address, + counter_size_in_bytes); +@@ -648,8 +650,8 @@ void sw_read_mmio_info_i(char *dst_vals, int cpu, + } + + void sw_read_pch_mailbox_info_i(char *dst_vals, int cpu, +- const struct sw_driver_io_descriptor *descriptor, +- u16 counter_size_in_bytes) ++ const struct sw_driver_io_descriptor ++ *descriptor, u16 counter_size_in_bytes) + { + /* + * TODO: spinlock? +@@ -870,7 +872,7 @@ void sw_read_socperf_info_i(char *dst_vals, int cpu, + + memset(socperf_buffer, 0, counter_size_in_bytes); + SOCPERF_Read_Data2(socperf_buffer); +-#endif // IS_ENABLED(CONFIG_INTEL_SOCPERF) ++#endif /* IS_ENABLED(CONFIG_INTEL_SOCPERF) */ + return; + } + +@@ -883,7 +885,7 @@ bool sw_socperf_available_i(void) + bool retVal = false; + #if IS_ENABLED(CONFIG_INTEL_SOCPERF) + retVal = true; +-#endif // IS_ENABLED(CONFIG_INTEL_SOCPERF) ++#endif /* IS_ENABLED(CONFIG_INTEL_SOCPERF) */ + return retVal; + } + +@@ -905,9 +907,9 @@ u32 sw_platform_configdb_read32(u32 address) + SW_PCI_MSG_CTRL_REG /*ctrl-offset*/, + address /*ctrl-value*/, + SW_PCI_MSG_DATA_REG /*data-offset*/); +-#else // !DO_DIRECT_PCI_READ_WRITE ++#else /* !DO_DIRECT_PCI_READ_WRITE */ + read_value = intel_mid_msgbus_read32_raw(address); +-#endif // if DO_DIRECT_PCI_READ_WRITE ++#endif /* if DO_DIRECT_PCI_READ_WRITE */ + pw_pr_debug("address = %u, value = %u\n", address, read_value); + return read_value; + } +@@ -917,18 +919,18 @@ u32 sw_platform_pci_read32(u32 bus, u32 device, u32 function, u32 write_offset, + { + u32 read_value = 0; + struct pci_dev *pci_root = pci_get_domain_bus_and_slot( +- 0, bus, PCI_DEVFN(device, function)); // 0, PCI_DEVFN(0, 0)); ++ 0, bus, PCI_DEVFN(device, function)); /* 0, PCI_DEVFN(0, 0)); */ + if (!pci_root) { + return 0; /* Application will verify the data */ + } + if (write_offset) { + pci_write_config_dword( + pci_root, write_offset, +- write_value); // SW_PCI_MSG_CTRL_REG, address); ++ write_value); /* SW_PCI_MSG_CTRL_REG, address); */ + } + pci_read_config_dword( + pci_root, read_offset, +- &read_value); // SW_PCI_MSG_DATA_REG, &read_value); ++ &read_value); /* SW_PCI_MSG_DATA_REG, &read_value); */ + return read_value; + } + +@@ -1074,7 +1076,7 @@ bool sw_platform_pci_write32(u32 bus, u32 device, u32 function, + u32 write_offset, u32 data_value) + { + struct pci_dev *pci_root = pci_get_domain_bus_and_slot( +- 0, bus, PCI_DEVFN(device, function)); // 0, PCI_DEVFN(0, 0)); ++ 0, bus, PCI_DEVFN(device, function)); /* 0, PCI_DEVFN(0, 0)); */ + if (!pci_root) { + return false; + } +@@ -1100,7 +1102,7 @@ int sw_ipc_mmio_descriptor_reset_func_i( + /* Unmap previously mapped memory here */ + struct sw_driver_ipc_mmio_io_descriptor *__ipc_mmio = NULL; + +- if (!descriptor) { // Should NEVER happen ++ if (!descriptor) { /* Should NEVER happen */ + return -PW_ERROR; + } + if (descriptor->collection_type == SW_IO_IPC) { +@@ -1139,7 +1141,7 @@ int sw_pch_mailbox_descriptor_reset_func_i( + /* Unmap previously mapped memory here */ + struct sw_driver_pch_mailbox_io_descriptor *__pch_mailbox = NULL; + +- if (!descriptor) { // Should NEVER happen ++ if (!descriptor) { /* Should NEVER happen */ + return -PW_ERROR; + } + __pch_mailbox = +@@ -1175,7 +1177,7 @@ int sw_mailbox_descriptor_reset_func_i( + /* Unmap previously mapped memory here */ + struct sw_driver_mailbox_io_descriptor *__mailbox = NULL; + +- if (!descriptor) { // Should NEVER happen ++ if (!descriptor) { /* Should NEVER happen */ + return -PW_ERROR; + } + __mailbox = (struct sw_driver_mailbox_io_descriptor *)&descriptor +@@ -1221,5 +1223,5 @@ int sw_register_ops_providers(void) + + void sw_free_ops_providers(void) + { +- // NOP ++ /* NOP */ + } +diff --git a/drivers/platform/x86/socwatch/sw_output_buffer.c b/drivers/platform/x86/socwatch/sw_output_buffer.c +index d3b8e585595d..a0c1c5fedd05 100644 +--- a/drivers/platform/x86/socwatch/sw_output_buffer.c ++++ b/drivers/platform/x86/socwatch/sw_output_buffer.c +@@ -163,7 +163,7 @@ DECLARE_OVERHEAD_VARS(sw_produce_generic_msg_i); + /* + * Per-cpu output buffers. + */ +-static sw_output_buffer_t *per_cpu_output_buffers = NULL; ++static sw_output_buffer_t *per_cpu_output_buffers; + /* + * Variables for book keeping. + */ +@@ -176,7 +176,7 @@ SW_DECLARE_SPINLOCK(sw_polled_lock); + /* + * Buffer allocation size. + */ +-unsigned long sw_buffer_alloc_size = (1 << 16); // 64 KB ++unsigned long sw_buffer_alloc_size = (1 << 16); /* 64 KB */ + + /* ------------------------------------------------- + * Function definitions. +@@ -242,8 +242,8 @@ static int sw_produce_polled_msg_i(struct sw_driver_msg *msg, + * are always CPU agnostic (otherwise they would + * be invoked from within a preempt_disable()d context + * in 'sw_handle_collector_node_i()', which ensures they +- * will be enqueued within the 'sw_produce_generic_msg_on_cpu()' +- * function). ++ * will be enqueued within the ++ * 'sw_produce_generic_msg_on_cpu()' function). + */ + msg->cpuidx = cpu; + memcpy(dst, msg, SW_DRIVER_MSG_HEADER_SIZE()); +@@ -553,10 +553,10 @@ size_t sw_consume_data(u32 mask, void __user *buffer, size_t bytes_to_read) + seg = &buff->buffers[which_seg]; + + bytes_not_copied = sw_copy_to_user(buffer, seg->buffer, +- seg->bytes_written); // dst, src ++ seg->bytes_written); /* dst, src */ + +- // bytes_not_copied = +- // copy_to_user(buffer, seg->buffer, seg->bytes_written); // dst,src ++ /* bytes_not_copied = */ ++ /* copy_to_user(buffer, seg->buffer, seg->bytes_written); dst,src */ + if (likely(bytes_not_copied == 0)) { + bytes_read = seg->bytes_written; + } else { +diff --git a/drivers/platform/x86/socwatch/sw_reader.c b/drivers/platform/x86/socwatch/sw_reader.c +index c94e7e8983db..2e55ae1a54cc 100644 +--- a/drivers/platform/x86/socwatch/sw_reader.c ++++ b/drivers/platform/x86/socwatch/sw_reader.c +@@ -123,7 +123,7 @@ void sw_wakeup_reader(enum sw_wakeup_action action) + if (!hrtimer_active(&s_reader_wakeup_timer)) { + ktime_t ktime = + ns_to_ktime(SW_BUFFER_CLEANUP_TIMER_DELAY_NSEC); +- // TODO: possible race here -- introduce locks? ++ /* TODO: possible race here -- introduce locks? */ + hrtimer_start(&s_reader_wakeup_timer, ktime, + HRTIMER_MODE_REL); + } +@@ -159,5 +159,5 @@ void sw_print_reader_stats(void) + #if DO_OVERHEAD_MEASUREMENTS + printk(KERN_INFO "# reader queue timer fires = %d\n", + s_num_timer_fires); +-#endif // OVERHEAD ++#endif /* OVERHEAD */ + } +diff --git a/drivers/platform/x86/socwatch/sw_telem.c b/drivers/platform/x86/socwatch/sw_telem.c +index 9f8beb57da68..eccb37df44d5 100644 +--- a/drivers/platform/x86/socwatch/sw_telem.c ++++ b/drivers/platform/x86/socwatch/sw_telem.c +@@ -299,7 +299,9 @@ int sw_telem_init_func(struct sw_driver_io_descriptor *descriptor) + /* Check if we've already added this ID */ + for (idx = 0; idx < *unit_idx && idx < MAX_TELEM_EVENTS; ++idx) { + if (s_event_map[unit][idx] == id) { +- /* Invariant: idx contains the index of the new data item. */ ++ /* Invariant: idx contains the ++ * index of the new data item. ++ */ + /* Save the index for later fast lookup. */ + td->idx = (u16)idx; + return 0; +@@ -330,7 +332,8 @@ int sw_telem_init_func(struct sw_driver_io_descriptor *descriptor) + * @descriptor: The descriptor containing the data ID to read + * @data_size_in_bytes: The # of bytes in the result (always 8) + * +- * Returns: Nothing, but stores SW_TELEM_READ_FAIL_VALUE to dest if the read fails. ++ * Returns: Nothing, but stores SW_TELEM_READ_FAIL_VALUE to dest ++ * if the read fails. + */ + void sw_read_telem_info(char *dest, int cpu, + const sw_driver_io_descriptor_t *descriptor, +@@ -348,7 +351,7 @@ void sw_read_telem_info(char *dest, int cpu, + #define TELEM_PKT_SIZE 16 /* sizeof(struct telemetry_evtlog) + padding */ + static struct telemetry_evtlog events[MAX_TELEM_EVENTS]; + +- // Get the event index ++ /* Get the event index */ + if (IS_SCALED_ID(td)) { + unsigned char *scaled_ids; + +@@ -401,8 +404,10 @@ void sw_read_telem_info(char *dest, int cpu, + } + + if (retry_count) { +- // TODO: Resolve if we should return something other than +- // SW_TELEM_READ_FAIL_VALUE, if the actual data happens to be that. ++ /* TODO: Resolve if we should return something other than ++ * SW_TELEM_READ_FAIL_VALUE, if the actual data ++ * happens to be that. ++ */ + *data_dest = events[idx].telem_evtlog; + } else { + *data_dest = SW_TELEM_READ_FAIL_VALUE; +diff --git a/drivers/platform/x86/socwatch/sw_trace_notifier_provider.c b/drivers/platform/x86/socwatch/sw_trace_notifier_provider.c +index d6ff34b65638..153fa7010295 100644 +--- a/drivers/platform/x86/socwatch/sw_trace_notifier_provider.c ++++ b/drivers/platform/x86/socwatch/sw_trace_notifier_provider.c +@@ -71,7 +71,8 @@ + #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) + #include /* for the various APIC vector tracepoints + * (e.g. "thermal_apic", +- * "local_timer" etc.) */ ++ * "local_timer" etc.) ++ */ + #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) */ + struct pool_workqueue; + struct cpu_workqueue_struct; +@@ -118,7 +119,8 @@ struct cpu_workqueue_struct; + #ifdef CONFIG_TIMER_STATS + #define GET_TIMER_THREAD_ID(t) \ + ((t)->start_pid) /* 'start_pid' is actually the thread ID +- * of the thread that initialized the timer */ ++ * of the thread that initialized the timer ++ */ + #else + #define GET_TIMER_THREAD_ID(t) (-1) + #endif /* CONFIG_TIMER_STATS */ +@@ -408,7 +410,8 @@ void sw_handle_reset_messages_i(struct sw_trace_notifier_data *node); + */ + DECLARE_OVERHEAD_VARS( + sw_handle_timer_wakeup_helper_i); /* for the "timer_expire" +- family of probes */ ++ * family of probes ++ */ + DECLARE_OVERHEAD_VARS(sw_handle_irq_wakeup_i); /* for IRQ wakeups */ + DECLARE_OVERHEAD_VARS(sw_handle_sched_wakeup_i); /* for SCHED */ + DECLARE_OVERHEAD_VARS(sw_tps_i); /* for TPS */ +@@ -642,13 +645,15 @@ static struct sw_trace_notifier_data s_notifier_collector_lists[] = { + &s_notifier_names[SW_NOTIFIER_ID_SUSPEND], &sw_register_pm_notifier_i, + &sw_unregister_pm_notifier_i, NULL, true /* always register */ }, + /* Placeholder for suspend enter/exit -- these will be called +- from within the pm notifier */ ++ * from within the pm notifier ++ */ + { SW_TRACE_COLLECTOR_NOTIFIER, + &s_notifier_names[SW_NOTIFIER_ID_SUSPEND_ENTER], NULL, NULL, NULL }, + { SW_TRACE_COLLECTOR_NOTIFIER, + &s_notifier_names[SW_NOTIFIER_ID_SUSPEND_EXIT], NULL, NULL, NULL }, + /* Placeholder for hibernate enter/exit -- these will be called +- from within the pm notifier */ ++ * from within the pm notifier ++ */ + { SW_TRACE_COLLECTOR_NOTIFIER, + &s_notifier_names[SW_NOTIFIER_ID_HIBERNATE], NULL, NULL, NULL }, + { SW_TRACE_COLLECTOR_NOTIFIER, +@@ -979,7 +984,8 @@ void sw_handle_sched_switch_helper_i(void) + timeout = sw_min_polling_interval_msecs; + } + /* Has there been enough time since the last +- collection point? */ ++ * collection point? ++ */ + if (delta_msecs < timeout) { + continue; + } +@@ -987,11 +993,13 @@ void sw_handle_sched_switch_helper_i(void) + if (cpumask_test_cpu( + RAW_CPU(), + mask) /* This msg must be handled on +- the current CPU */ ++ * the current CPU ++ */ + || + cpumask_empty( + mask) /* This msg may be handled by +- any CPU */) { ++ * any CPU ++ */) { + if (!CAS64(&curr->last_update_jiffies, + prev_jiff, curr_jiff)) { + /* +@@ -1679,7 +1687,8 @@ int sw_probe_hotplug_notifier_i(struct notifier_block *block, + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: + /* CPU is online -- first store top change +- then take BEGIN snapshot */ ++ * then take BEGIN snapshot ++ */ + sw_store_topology_change_i(SW_CPU_ACTION_ONLINE, cpu, core_id, + pkg_id); + sw_process_snapshot_on_cpu(SW_WHEN_TYPE_BEGIN, cpu); +@@ -1722,13 +1731,15 @@ static void sw_probe_cpuhp_helper_i(unsigned int cpu, enum cpu_action action) + break; + case SW_CPU_ACTION_ONLINE: + /* CPU is online -- first store top change +- then take BEGIN snapshot */ ++ * then take BEGIN snapshot ++ */ + sw_store_topology_change_i(action, cpu, core_id, pkg_id); + sw_process_snapshot_on_cpu(SW_WHEN_TYPE_BEGIN, cpu); + break; + case SW_CPU_ACTION_OFFLINE: + /* CPU is preparing to go offline -- take +- END snapshot then store top change */ ++ * END snapshot then store top change ++ */ + sw_process_snapshot_on_cpu(SW_WHEN_TYPE_END, cpu); + sw_store_topology_change_i(action, cpu, core_id, pkg_id); + break; +diff --git a/drivers/platform/x86/socwatch/sw_tracepoint_handlers.c b/drivers/platform/x86/socwatch/sw_tracepoint_handlers.c +index bc335ce9a65e..8154f6b516c8 100644 +--- a/drivers/platform/x86/socwatch/sw_tracepoint_handlers.c ++++ b/drivers/platform/x86/socwatch/sw_tracepoint_handlers.c +@@ -78,7 +78,7 @@ static int s_trace_idx = -1, s_notifier_idx = -1; + + SW_DEFINE_LIST_HEAD(sw_topology_list, sw_topology_node) = + SW_LIST_HEAD_INITIALIZER(sw_topology_list); +-size_t sw_num_topology_entries = 0; ++size_t sw_num_topology_entries; + + /* ------------------------------------------------- + * Function definitions. +@@ -102,7 +102,8 @@ void sw_print_trace_notifier_overheads(void) + static int sw_for_each_node_i(void *list_head, + int (*func)(struct sw_trace_notifier_data *node, + void *priv), +- void *priv, bool return_on_error) { ++ void *priv, bool return_on_error) ++{ + SW_LIST_HEAD_VAR(sw_trace_list_node) * head = list_head; + int retval = PW_SUCCESS; + struct sw_trace_list_node *lnode = NULL; +@@ -121,7 +122,8 @@ static int sw_for_each_node_i(void *list_head, + + int sw_for_each_tracepoint_node(int (*func)(struct sw_trace_notifier_data *node, + void *priv), +- void *priv, bool return_on_error) { ++ void *priv, bool return_on_error) ++{ + if (func) { + return sw_for_each_node_i(&s_trace_list, func, priv, + return_on_error); +@@ -131,7 +133,8 @@ int sw_for_each_tracepoint_node(int (*func)(struct sw_trace_notifier_data *node, + + int sw_for_each_notifier_node(int (*func)(struct sw_trace_notifier_data *node, + void *priv), +- void *priv, bool return_on_error) { ++ void *priv, bool return_on_error) ++{ + if (func) { + return sw_for_each_node_i(&s_notifier_list, func, priv, + return_on_error); +diff --git a/drivers/platform/x86/socwatchhv/control.c b/drivers/platform/x86/socwatchhv/control.c +index 4d1c384b1fe8..120705e562d9 100644 +--- a/drivers/platform/x86/socwatchhv/control.c ++++ b/drivers/platform/x86/socwatchhv/control.c +@@ -103,21 +103,22 @@ extern void CONTROL_Invoke_Cpu(int cpu_idx, void (*func)(pvoid), pvoid ctx) + * @fn VOID CONTROL_Invoke_Parallel_Service(func, ctx, blocking, exclude) + * + * @param func - function to be invoked by each core in the system +- * @param ctx - pointer to the parameter block for each function invocation ++ * @param ctx - pointer to the parameter block for each function ++ * invocation + * @param blocking - Wait for invoked function to complete + * @param exclude - exclude the current core from executing the code + * + * @returns None + * +- * @brief Service routine to handle all kinds of parallel invoke on all CPU calls ++ * @brief Service routine to handle all kinds of parallel invoke on ++ * all CPU calls + * + * Special Notes: + * Invoke the function provided in parallel in either a blocking or + * non-blocking mode. The current core may be excluded if desired. + * NOTE - Do not call this function directly from source code. +- * Use the aliases CONTROL_Invoke_Parallel(), CONTROL_Invoke_Parallel_NB(), +- * or CONTROL_Invoke_Parallel_XS(). +- * ++ * Use the aliases CONTROL_Invoke_Parallel(), ++ * CONTROL_Invoke_Parallel_NB(), or CONTROL_Invoke_Parallel_XS(). + */ + extern void CONTROL_Invoke_Parallel_Service(void (*func)(pvoid), pvoid ctx, + int blocking, int exclude) +diff --git a/drivers/platform/x86/socwatchhv/inc/asm_helper.h b/drivers/platform/x86/socwatchhv/inc/asm_helper.h +index d09a3bbd19cb..10e95190e4f0 100644 +--- a/drivers/platform/x86/socwatchhv/inc/asm_helper.h ++++ b/drivers/platform/x86/socwatchhv/inc/asm_helper.h +@@ -152,7 +152,7 @@ + + .macro RESTORE_ALL RESTORE_EXTRA_REGS RESTORE_C_REGS + REMOVE_PT_GPREGS_FROM_STACK.endm +-#endif //CONFIG_X86_64 ++#endif /*CONFIG_X86_64 */ + #endif + + #endif +diff --git a/drivers/platform/x86/socwatchhv/inc/pw_types.h b/drivers/platform/x86/socwatchhv/inc/pw_types.h +index b8a3ac855e53..8b56e5c265dc 100644 +--- a/drivers/platform/x86/socwatchhv/inc/pw_types.h ++++ b/drivers/platform/x86/socwatchhv/inc/pw_types.h +@@ -63,8 +63,8 @@ + /* + * Called from Ring-3. + */ +-#include // Grab 'uint64_t' etc. +-#include // Grab 'pid_t' ++#include /* Grab 'uint64_t' etc. */ ++#include /* Grab 'pid_t' */ + /* + * UNSIGNED types... + */ +@@ -80,7 +80,7 @@ typedef int16_t s16; + typedef int32_t s32; + typedef int64_t s64; + +-#endif // __KERNEL__ ++#endif /* __KERNEL__ */ + + #elif defined(_WIN32) + /* +@@ -100,7 +100,7 @@ typedef signed long long s64; + typedef s32 pid_t; + typedef s32 ssize_t; + +-#endif // _WIN32 ++#endif /* _WIN32 */ + + /* ************************************ + * Common to both operating systems. +@@ -129,4 +129,4 @@ typedef void *pvoid; + #define TRUE 1 + #define FALSE 0 + +-#endif // _PW_TYPES_H_ ++#endif /* _PW_TYPES_H_ */ +diff --git a/drivers/platform/x86/socwatchhv/inc/pw_version.h b/drivers/platform/x86/socwatchhv/inc/pw_version.h +index 8e1cf1cc4d62..7f1a40d82d71 100644 +--- a/drivers/platform/x86/socwatchhv/inc/pw_version.h ++++ b/drivers/platform/x86/socwatchhv/inc/pw_version.h +@@ -64,4 +64,4 @@ + #define SWHVDRV_VERSION_MINOR 0 + #define SWHVDRV_VERSION_OTHER 0 + +-#endif // _PW_VERSION_H_ ++#endif /* _PW_VERSION_H_ */ +diff --git a/drivers/platform/x86/socwatchhv/inc/sw_defines.h b/drivers/platform/x86/socwatchhv/inc/sw_defines.h +index 9c8995805464..f0ef6baceb3f 100644 +--- a/drivers/platform/x86/socwatchhv/inc/sw_defines.h ++++ b/drivers/platform/x86/socwatchhv/inc/sw_defines.h +@@ -72,8 +72,8 @@ + */ + #define TO_ULL(x) (unsigned long long)(x) + /* +-* Convert an arg to 'long long' +-*/ ++ * Convert an arg to 'long long' ++ */ + #define TO_LL(x) (long long)(x) + /* + * Convert an arg to 'unsigned long' +@@ -146,11 +146,11 @@ typedef enum { + SW_COUNTER_HOTKEY_EVENT, + SW_MAX_COLLECTION_EVENT + } collector_stop_event_t; +-#endif // SWW_MERGE ++#endif /* SWW_MERGE */ + + #define MAX_UNSIGNED_16_BIT_VALUE 0xFFFF + #define MAX_UNSIGNED_24_BIT_VALUE 0xFFFFFF + #define MAX_UNSIGNED_32_BIT_VALUE 0xFFFFFFFF + #define MAX_UNSIGNED_64_BIT_VALUE 0xFFFFFFFFFFFFFFFF + +-#endif // _PW_DEFINES_H_ ++#endif /* _PW_DEFINES_H_ */ +diff --git a/drivers/platform/x86/socwatchhv/inc/sw_ioctl.h b/drivers/platform/x86/socwatchhv/inc/sw_ioctl.h +index baf93058c5c5..1f8e903a0e1c 100644 +--- a/drivers/platform/x86/socwatchhv/inc/sw_ioctl.h ++++ b/drivers/platform/x86/socwatchhv/inc/sw_ioctl.h +@@ -62,11 +62,11 @@ + #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) + #include + #include +-#endif // COMPAT && x64 +-#else // !__KERNEL__ ++#endif /* COMPAT && x64 */ ++#else /* !__KERNEL__ */ + #include +-#endif // __KERNEL__ +-#endif // __linux__ ++#endif /* __KERNEL__ */ ++#endif /* __linux__ */ + /* + * Ensure we pull in definition of 'DO_COUNT_DROPPED_SAMPLES'! + */ +@@ -75,8 +75,8 @@ + #ifdef ONECORE + #ifndef __KERNEL__ + #include +-#endif //__KERNEL__ +-#endif // ONECORE ++#endif /* __KERNEL__ */ ++#endif /* ONECORE */ + + /* + * The APWR-specific IOCTL magic +@@ -84,13 +84,13 @@ + * are delivered to the correct + * driver. + */ +-// #define APWR_IOCTL_MAGIC_NUM 0xdead ++/* #define APWR_IOCTL_MAGIC_NUM 0xdead */ + #define APWR_IOCTL_MAGIC_NUM 100 + + /* + * The name of the device file + */ +-// #define DEVICE_FILE_NAME "/dev/pw_driver_char_dev" ++/* #define DEVICE_FILE_NAME "/dev/pw_driver_char_dev" */ + #define PW_DEVICE_FILE_NAME "/dev/apwr_driver_char_dev" + #define PW_DEVICE_NAME "apwr_driver_char_dev" + +@@ -119,13 +119,13 @@ enum sw_ioctl_cmd { + * Where "Read" and "Write" are from the user's perspective + * (similar to the file "read" and "write" calls). + */ +-#ifdef SWW_MERGE // Windows +-// +-// Device type -- in the "User Defined" range." +-// ++#ifdef SWW_MERGE /* Windows */ ++/* ++ * Device type -- in the "User Defined" range." ++ */ + #define POWER_I_CONF_TYPE 40000 + +-// List assigned tracepoint id ++/* List assigned tracepoint id */ + #define CSIR_TRACEPOINT_ID_MASK 1 + #define DEVICE_STATE_TRACEPOINT_ID_MASK 2 + #define CSIR_SEPARATE_TRACEPOINT_ID_MASK 3 +@@ -133,18 +133,18 @@ enum sw_ioctl_cmd { + #define DISPLAY_ON_TRACEPOINT_ID_MASK 5 + + #ifdef SWW_MERGE +-// +-// TELEM BAR CONFIG +-// ++/* ++ * TELEM BAR CONFIG ++ */ + #define MAX_TELEM_BAR_CFG 3 + #define TELEM_MCHBAR_CFG 0 + #define TELEM_IPC1BAR_CFG 1 + #define TELEM_SSRAMBAR_CFG 2 + #endif + +-// +-// The IOCTL function codes from 0x800 to 0xFFF are for customer use. +-// ++/* ++ * The IOCTL function codes from 0x800 to 0xFFF are for customer use. ++ */ + #define PW_IOCTL_CONFIG \ + CTL_CODE(POWER_I_CONF_TYPE, 0x900, METHOD_BUFFERED, FILE_ANY_ACCESS) + #define PW_IOCTL_START_COLLECTION \ +@@ -152,7 +152,7 @@ enum sw_ioctl_cmd { + #define PW_IOCTL_STOP_COLLECTION \ + CTL_CODE(POWER_I_CONF_TYPE, 0x902, METHOD_BUFFERED, FILE_ANY_ACCESS) + +-// TODO: pause, resume, cancel not supported yet ++/* TODO: pause, resume, cancel not supported yet */ + #define PW_IOCTL_PAUSE_COLLECTION \ + CTL_CODE(POWER_I_CONF_TYPE, 0x903, METHOD_BUFFERED, FILE_ANY_ACCESS) + #define PW_IOCTL_RESUME_COLLECTION \ +@@ -201,7 +201,7 @@ enum sw_ioctl_cmd { + #define PW_IOCTL_CMD \ + _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, \ + struct sw_driver_ioctl_arg *) +-#endif // DO_COUNT_DROPPED_SAMPLES ++#endif /* DO_COUNT_DROPPED_SAMPLES */ + #define PW_IOCTL_POLL _IO(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_poll) + #define PW_IOCTL_IMMEDIATE_IO \ + _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_immediate_io, \ +@@ -227,7 +227,7 @@ enum sw_ioctl_cmd { + #define PW_IOCTL_GET_TOPOLOGY_CHANGES \ + _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_topology_changes, \ + struct sw_driver_ioctl_arg *) +-#else // __APPLE__ ++#else /* __APPLE__ */ + #define PW_IOCTL_CONFIG \ + _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_config, \ + struct sw_driver_ioctl_arg) +@@ -238,7 +238,7 @@ enum sw_ioctl_cmd { + #else + #define PW_IOCTL_CMD \ + _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, struct sw_driver_ioctl_arg) +-#endif // DO_COUNT_DROPPED_SAMPLES ++#endif /* DO_COUNT_DROPPED_SAMPLES */ + #define PW_IOCTL_POLL _IO(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_poll) + #define PW_IOCTL_IMMEDIATE_IO \ + _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_immediate_io, \ +@@ -264,7 +264,7 @@ enum sw_ioctl_cmd { + #define PW_IOCTL_GET_TOPOLOGY_CHANGES \ + _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_topology_changes, \ + struct sw_driver_ioctl_arg) +-#endif // __APPLE__ ++#endif /* __APPLE__ */ + + /* + * 32b-compatible version of the above +@@ -281,7 +281,7 @@ enum sw_ioctl_cmd { + #else + #define PW_IOCTL_CMD32 \ + _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, compat_uptr_t) +-#endif // DO_COUNT_DROPPED_SAMPLES ++#endif /* DO_COUNT_DROPPED_SAMPLES */ + #define PW_IOCTL_POLL32 _IO(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_poll) + #define PW_IOCTL_IMMEDIATE_IO32 \ + _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_immediate_io, compat_uptr_t) +@@ -299,5 +299,5 @@ enum sw_ioctl_cmd { + _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_collect, compat_uptr_t) + #define PW_IOCTL_GET_TOPOLOGY_CHANGES32 \ + _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_topology_changes, compat_uptr_t) +-#endif // defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) +-#endif // __SW_IOCTL_H__ ++#endif /* defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) */ ++#endif /* __SW_IOCTL_H__ */ +diff --git a/drivers/platform/x86/socwatchhv/inc/sw_kernel_defines.h b/drivers/platform/x86/socwatchhv/inc/sw_kernel_defines.h +index 23e939a732c7..d970236df8c7 100644 +--- a/drivers/platform/x86/socwatchhv/inc/sw_kernel_defines.h ++++ b/drivers/platform/x86/socwatchhv/inc/sw_kernel_defines.h +@@ -61,7 +61,7 @@ + #if defined(__APPLE__) + #define likely(x) (x) + #define unlikely(x) (x) +-#endif // __APPLE__ ++#endif /* __APPLE__ */ + + #if !defined(__APPLE__) + #define CPU() (raw_smp_processor_id()) +@@ -69,7 +69,7 @@ + #else + #define CPU() (cpu_number()) + #define RAW_CPU() (cpu_number()) +-#endif // __APPLE__ ++#endif /* __APPLE__ */ + + #define TID() (current->pid) + #define PID() (current->tgid) +@@ -101,12 +101,12 @@ + #if !DO_OVERHEAD_MEASUREMENTS + #undef DO_OVERHEAD_MEASUREMENTS + #define DO_OVERHEAD_MEASUREMENTS 1 +-#endif // DO_OVERHEAD_MEASUREMENTS ++#endif /* DO_OVERHEAD_MEASUREMENTS */ + #if !DO_TRACK_MEMORY_USAGE + #undef DO_TRACK_MEMORY_USAGE + #define DO_TRACK_MEMORY_USAGE 1 +-#endif // DO_TRACK_MEMORY_USAGE +-#endif // CONFIG_SOCWATCH_DRIVER_PROFILING ++#endif /* DO_TRACK_MEMORY_USAGE */ ++#endif /* CONFIG_SOCWATCH_DRIVER_PROFILING */ + /* + * Should we allow debug output. + * Set to: "1" ==> 'OUTPUT' is enabled. +@@ -142,7 +142,7 @@ + #define pw_pr_warn(...) + #endif + #define pw_pr_force(...) IOLog(__VA_ARGS__) +-#endif // __APPLE__ ++#endif /* __APPLE__ */ + + /* + * Macro for driver error messages. +@@ -159,6 +159,6 @@ + #else + #define pw_pr_error(...) + #endif +-#endif // __APPLE__ ++#endif /* __APPLE__ */ + +-#endif // _SW_KERNEL_DEFINES_H_ ++#endif /* _SW_KERNEL_DEFINES_H_ */ +diff --git a/drivers/platform/x86/socwatchhv/inc/sw_structs.h b/drivers/platform/x86/socwatchhv/inc/sw_structs.h +index 94e58b5244f4..baac8520e7fd 100644 +--- a/drivers/platform/x86/socwatchhv/inc/sw_structs.h ++++ b/drivers/platform/x86/socwatchhv/inc/sw_structs.h +@@ -95,14 +95,15 @@ struct sw_key_value_payload { + (sizeof(struct sw_key_value_payload) - sizeof(char[1])) + + typedef enum sw_kernel_wakelock_type { +- SW_WAKE_LOCK = 0, // A kernel wakelock was acquired +- SW_WAKE_UNLOCK = 1, // A kernel wakelock was released ++ SW_WAKE_LOCK = 0, /* A kernel wakelock was acquired */ ++ SW_WAKE_UNLOCK = 1, /* A kernel wakelock was released */ + SW_WAKE_LOCK_TIMEOUT = +- 2, // A kernel wakelock was acquired with a timeout +- SW_WAKE_LOCK_INITIAL = 3, // A kernel wakelock was acquired before the +- // collection started +- SW_WAKE_UNLOCK_ALL = 4, // All previously held kernel wakelocks were +- // released -- used in ACPI S3 notifications ++ 2, /* A kernel wakelock was acquired with a timeout */ ++ SW_WAKE_LOCK_INITIAL = 3, /* A kernel wakelock was acquired ++ * before the collection started ++ */ ++ SW_WAKE_UNLOCK_ALL = 4, /* All previously held kernel wakelocks were */ ++ /* released -- used in ACPI S3 notifications */ + } sw_kernel_wakelock_type_t; + + typedef enum sw_when_type { +@@ -141,23 +142,27 @@ struct sw_driver_ipc_mmio_io_descriptor { + #ifdef SWW_MERGE + #pragma warning(push) + #pragma warning( \ +- disable : 4201) // disable C4201: nonstandard extension used: nameless struct/union ++ disable : 4201) /* disable C4201: nonstandard extension used: ++ * nameless struct/union ++ */ + #endif + struct { + pw_u16_t command; + pw_u16_t sub_command; + }; + #ifdef SWW_MERGE +-#pragma warning(pop) // enable C4201 ++#pragma warning(pop) /* enable C4201 */ + #endif + union { +- pw_u32_t ipc_command; // (sub_command << 12) | (command) +- pw_u8_t is_gbe; // Used only for GBE MMIO ++ pw_u32_t ipc_command; /* (sub_command << 12) ++ * | (command) ++ */ ++ pw_u8_t is_gbe; /* Used only for GBE MMIO */ + }; + }; +- // TODO: add a section for 'ctrl_address' and 'ctrl_remapped_address' ++ /* TODO: add a section for 'ctrl_address' and 'ctrl_remapped_address' */ + union { +- pw_u64_t data_address; // Will be "io_remapped" ++ pw_u64_t data_address; /* Will be "io_remapped" */ + pw_u64_t data_remapped_address; + }; + }; +@@ -181,16 +186,18 @@ struct sw_driver_pci_io_descriptor { + + #pragma pack(push, 1) + struct sw_driver_configdb_io_descriptor { +- // pw_u32_t port; +- // pw_u32_t offset; ++ /* pw_u32_t port; */ ++ /* pw_u32_t offset; */ + pw_u32_t address; + }; + #pragma pack(pop) + + #pragma pack(push, 1) + struct sw_driver_trace_args_io_descriptor { +- pw_u8_t num_args; // Number of valid entries in the 'args' array, below; 1 <= num_args <= 7 +- pw_u8_t args[7]; // Max of 7 args can be recorded ++ pw_u8_t num_args; /* Number of valid entries in the 'args' array, ++ * below; 1 <= num_args <= 7 ++ */ ++ pw_u8_t args[7]; /* Max of 7 args can be recorded */ + }; + #pragma pack(pop) + +@@ -303,9 +310,11 @@ struct sw_driver_pch_mailbox_io_descriptor { + #pragma pack(push, 1) + typedef struct sw_driver_io_descriptor { + pw_u16_t collection_type; +- // TODO: specify READ/WRITE +- pw_s16_t collection_command; // One of 'enum sw_io_cmd' +- pw_u16_t counter_size_in_bytes; // The number of bytes to READ or WRITE ++ /* TODO: specify READ/WRITE */ ++ pw_s16_t collection_command; /* One of 'enum sw_io_cmd' */ ++ pw_u16_t counter_size_in_bytes; /* The number of bytes to ++ * READ or WRITE ++ */ + union { + struct sw_driver_msr_io_descriptor msr_descriptor; + struct sw_driver_ipc_mmio_io_descriptor ipc_descriptor; +@@ -318,7 +327,7 @@ typedef struct sw_driver_io_descriptor { + pch_mailbox_descriptor; + struct sw_driver_mailbox_io_descriptor mailbox_descriptor; + }; +- pw_u64_t write_value; // The value to WRITE ++ pw_u64_t write_value; /* The value to WRITE */ + } sw_driver_io_descriptor_t; + #pragma pack(pop) + +@@ -334,17 +343,23 @@ typedef struct sw_driver_io_descriptor { + struct sw_driver_interface_info { + pw_u64_t tracepoint_id_mask; + pw_u64_t notifier_id_mask; +- pw_s16_t cpu_mask; // On which CPU(s) should the driver read the data? +- // Currently: -2 ==> read on ALL CPUs, +- // -1 ==> read on ANY CPU, +- // >= 0 ==> the specific CPU to read on +- pw_s16_t plugin_id; // Metric Plugin SID +- pw_s16_t metric_id; // Domain-specific ID assigned by each Metric Plugin +- pw_s16_t msg_id; // Msg ID retrieved from the SoC Watch config file +- pw_u16_t num_io_descriptors; // Number of descriptors in the array, below. +- pw_u8_t trigger_bits; // Mask of 'when bits' to fire this collector. +- pw_u16_t sampling_interval_msec; // Sampling interval, in msecs +- pw_u8_t descriptors[1]; // Array of sw_driver_io_descriptor structs. ++ pw_s16_t cpu_mask; /* On which CPU(s) should the driver ++ * read the data? ++ * Currently: -2 ==> read on ALL CPUs, ++ * -1 ==> read on ANY CPU, ++ * >= 0 ==> the specific CPU to read on ++ */ ++ pw_s16_t plugin_id; /* Metric Plugin SID */ ++ pw_s16_t metric_id; /* Domain-specific ID assigned by each ++ * Metric Plugin ++ */ ++ pw_s16_t msg_id; /* Msg ID retrieved from the SoC Watch config file */ ++ pw_u16_t num_io_descriptors; /* Number of descriptors in the array, ++ * below. ++ */ ++ pw_u8_t trigger_bits; /* Mask of 'when bits' to fire this collector. */ ++ pw_u16_t sampling_interval_msec; /* Sampling interval, in msecs */ ++ pw_u8_t descriptors[1]; /* Array of sw_driver_io_descriptor structs. */ + }; + #pragma pack(pop) + +@@ -353,11 +368,20 @@ struct sw_driver_interface_info { + + #pragma pack(push, 1) + struct sw_driver_interface_msg { +- pw_u16_t num_infos; // Number of 'sw_driver_interface_info' structs contained within the 'infos' variable, below +- pw_u16_t min_polling_interval_msecs; // Min time to wait before polling; used exclusively +- // with the low overhead, context-switch based +- // polling mode +- // pw_u16_t infos_size_bytes; // Size of data inlined within the 'infos' variable, below ++ pw_u16_t num_infos; /* Number of 'sw_driver_interface_info' ++ * structs contained within the 'infos' variable, ++ * below ++ */ ++ pw_u16_t min_polling_interval_msecs; /* Min time to wait before ++ * polling; used exclusively ++ * with the low overhead, ++ * context-switch based ++ * polling mode ++ */ ++ /* pw_u16_t infos_size_bytes; ++ * Size of data inlined within the ++ * 'infos' variable, below ++ */ + pw_u8_t infos[1]; + }; + #pragma pack(pop) +@@ -374,7 +398,7 @@ typedef enum sw_name_id_type { + #pragma pack(push, 1) + struct sw_name_id_pair { + pw_u16_t id; +- pw_u16_t type; // One of 'sw_name_id_type' ++ pw_u16_t type; /* One of 'sw_name_id_type' */ + struct sw_string_type name; + }; + #pragma pack(pop) +@@ -399,14 +423,16 @@ struct sw_name_info_msg { + typedef struct sw_driver_msg { + pw_u64_t tsc; + pw_u16_t cpuidx; +- pw_u8_t plugin_id; // Cannot have more than 256 plugins +- pw_u8_t metric_id; // Each plugin cannot handle more than 256 metrics +- pw_u8_t msg_id; // Each metric cannot have more than 256 components ++ pw_u8_t plugin_id; /* Cannot have more than 256 plugins */ ++ pw_u8_t metric_id; /* Each plugin cannot handle more than 256 metrics */ ++ pw_u8_t msg_id; /* Each metric cannot have more than 256 components */ + pw_u16_t payload_len; +- // pw_u64_t p_payload; // Ptr to payload ++ /* pw_u64_t p_payload; Ptr to payload */ + union { +- pw_u64_t __dummy; // Ensure size of struct is consistent on x86, x64 +- char *p_payload; // Ptr to payload (collected data values). ++ pw_u64_t __dummy; /* Ensure size of struct is ++ * consistent on x86, x64 ++ */ ++ char *p_payload; /* Ptr to payload (collected data values). */ + }; + } sw_driver_msg_t; + #pragma pack(pop) +@@ -438,11 +464,11 @@ enum cpu_action { + }; + #pragma pack(push, 1) + struct sw_driver_topology_change { +- pw_u64_t timestamp; // timestamp +- enum cpu_action type; // One of 'enum cpu_action' +- pw_u16_t cpu; // logical cpu +- pw_u16_t core; // core id +- pw_u16_t pkg; // pkg/physical id ++ pw_u64_t timestamp; /* timestamp */ ++ enum cpu_action type; /* One of 'enum cpu_action' */ ++ pw_u16_t cpu; /* logical cpu */ ++ pw_u16_t core; /* core id */ ++ pw_u16_t pkg; /* pkg/physical id */ + }; + struct sw_driver_topology_msg { + pw_u16_t num_entries; +@@ -482,8 +508,8 @@ enum sw_pm_mode { + struct sw_driver_ioctl_arg { + pw_s32_t in_len; + pw_s32_t out_len; +- // pw_u64_t p_in_arg; // Pointer to input arg +- // pw_u64_t p_out_arg; // Pointer to output arg ++ /* pw_u64_t p_in_arg; Pointer to input arg */ ++ /* pw_u64_t p_out_arg; Pointer to output arg */ + char *in_arg; + char *out_arg; + }; +@@ -491,11 +517,11 @@ struct sw_driver_ioctl_arg { + + #pragma pack(push, 1) + typedef struct sw_driver_msg_interval { +- pw_u8_t plugin_id; // Cannot have more than 256 plugins +- pw_u8_t metric_id; // Each plugin cannot handle more than 256 metrics +- pw_u8_t msg_id; // Each metric cannot have more than 256 components +- pw_u16_t interval; // collection interval ++ pw_u8_t plugin_id; /* Cannot have more than 256 plugins */ ++ pw_u8_t metric_id; /* Each plugin cannot handle more than 256 metrics */ ++ pw_u8_t msg_id; /* Each metric cannot have more than 256 components */ ++ pw_u16_t interval; /* collection interval */ + } sw_driver_msg_interval_t; + #pragma pack(pop) + +-#endif // __SW_STRUCTS_H__ ++#endif /* __SW_STRUCTS_H__ */ +diff --git a/drivers/platform/x86/socwatchhv/inc/sw_types.h b/drivers/platform/x86/socwatchhv/inc/sw_types.h +index 914ce9806965..156c92c8349a 100644 +--- a/drivers/platform/x86/socwatchhv/inc/sw_types.h ++++ b/drivers/platform/x86/socwatchhv/inc/sw_types.h +@@ -63,8 +63,8 @@ + /* + * Called from Ring-3. + */ +-#include // Grab 'uint64_t' etc. +-#include // Grab 'pid_t' ++#include /* Grab 'uint64_t' etc. */ ++#include /* Grab 'pid_t' */ + /* + * UNSIGNED types... + */ +@@ -80,26 +80,26 @@ typedef int16_t s16; + typedef int32_t s32; + typedef int64_t s64; + +-#else // __KERNEL__ ++#else /* __KERNEL__ */ + #if !defined(__APPLE__) + #include +-#else // __APPLE__ ++#else /* __APPLE__ */ + #include +-#include // Grab 'uint64_t' etc. ++#include /* Grab 'uint64_t' etc. */ + + typedef uint8_t u8; + typedef uint16_t u16; + typedef uint32_t u32; + typedef uint64_t u64; + /* +-* SIGNED types... +-*/ ++ * SIGNED types... ++ */ + typedef int8_t s8; + typedef int16_t s16; + typedef int32_t s32; + typedef int64_t s64; +-#endif // __APPLE__ +-#endif // __KERNEL__ ++#endif /* __APPLE__ */ ++#endif /* __KERNEL__ */ + + #elif defined(_WIN32) + typedef __int32 int32_t; +@@ -125,7 +125,7 @@ typedef signed long long s64; + typedef s32 pid_t; + typedef s32 ssize_t; + +-#endif // _WIN32 ++#endif /* _WIN32 */ + + /* ************************************ + * Common to both operating systems. +@@ -149,4 +149,4 @@ typedef s64 pw_s64_t; + + typedef pid_t pw_pid_t; + +-#endif // _PW_TYPES_H_ ++#endif /* _PW_TYPES_H_ */ +diff --git a/drivers/platform/x86/socwatchhv/inc/sw_version.h b/drivers/platform/x86/socwatchhv/inc/sw_version.h +index 5797edffa64d..5476b0d79ac5 100644 +--- a/drivers/platform/x86/socwatchhv/inc/sw_version.h ++++ b/drivers/platform/x86/socwatchhv/inc/sw_version.h +@@ -71,4 +71,4 @@ + #define SOCWATCH_VERSION_MINOR 8 + #define SOCWATCH_VERSION_OTHER 0 + +-#endif // __SW_VERSION_H__ ++#endif /* __SW_VERSION_H__ */ +diff --git a/drivers/platform/x86/socwatchhv/inc/swhv_acrn.h b/drivers/platform/x86/socwatchhv/inc/swhv_acrn.h +index 06a9e090932b..2bcc97a84bbc 100644 +--- a/drivers/platform/x86/socwatchhv/inc/swhv_acrn.h ++++ b/drivers/platform/x86/socwatchhv/inc/swhv_acrn.h +@@ -12,8 +12,8 @@ + #include + #include + #include +-#include // LINUX_VERSION_CODE +-#include // for struct list_head ++#include /* LINUX_VERSION_CODE */ ++#include /* for struct list_head */ + + #include "swhv_defines.h" + #include "pw_version.h" +@@ -78,7 +78,7 @@ struct profiling_msr_op { + struct profiling_msr_ops_list { + int32_t collector_id; + uint32_t num_entries; +- int32_t msr_op_state; // enum value from 'MSR_CMD_STATUS' ++ int32_t msr_op_state; /* enum value from 'MSR_CMD_STATUS' */ + struct profiling_msr_op entries[MAX_MSR_LIST_NUM]; + }; + +@@ -114,4 +114,4 @@ typedef struct swhv_acrn_msr_collector_data { + struct profiling_msr_ops_list *msr_ops_list; + size_t per_msg_payload_size; + } swhv_acrn_msr_collector_data_t; +-#endif // _SWHV_ACRN_H_ ++#endif /* _SWHV_ACRN_H_ */ +diff --git a/drivers/platform/x86/socwatchhv/inc/swhv_acrn_sbuf.h b/drivers/platform/x86/socwatchhv/inc/swhv_acrn_sbuf.h +index c5a08d1025ae..5f62c2d43e11 100644 +--- a/drivers/platform/x86/socwatchhv/inc/swhv_acrn_sbuf.h ++++ b/drivers/platform/x86/socwatchhv/inc/swhv_acrn_sbuf.h +@@ -84,7 +84,7 @@ int sbuf_get_variable(struct shared_buf *sbuf, void **data, uint32_t size) + + from = (void *)sbuf + SBUF_HEAD_SIZE + sbuf->head; + +- if (next_head < sbuf->head) { // wrap-around ++ if (next_head < sbuf->head) { /* wrap-around */ + /* copy first part */ + offset = sbuf->size - sbuf->head; + memcpy(*data, from, offset); +@@ -140,7 +140,7 @@ int sbuf_get_wrapper(struct shared_buf *sbuf, uint8_t **data) + + header = vmalloc(sizeof(ACRN_MSG_HEADER_SIZE)); + memset(header, 0, sizeof(ACRN_MSG_HEADER_SIZE)); +- //read header ++ /*read header */ + sbuf_get(sbuf, (uint8_t *)header); + + payload_size = header->payload_size; +@@ -149,7 +149,7 @@ int sbuf_get_wrapper(struct shared_buf *sbuf, uint8_t **data) + + sample = vmalloc(sample_size); + +- //copy header ++ /*copy header */ + memcpy((void *)sample, (void *)header, ACRN_MSG_HEADER_SIZE); + + sample_offset += ACRN_MSG_HEADER_SIZE; +@@ -183,4 +183,4 @@ int sbuf_get_wrapper(struct shared_buf *sbuf, uint8_t **data) + vfree(header); + return sample_size; + } +-#endif // _SWHV_ACRN_SBUF_H_ ++#endif /* _SWHV_ACRN_SBUF_H_ */ +diff --git a/drivers/platform/x86/socwatchhv/inc/swhv_defines.h b/drivers/platform/x86/socwatchhv/inc/swhv_defines.h +index 65239d566ae1..2f51a5d760f6 100644 +--- a/drivers/platform/x86/socwatchhv/inc/swhv_defines.h ++++ b/drivers/platform/x86/socwatchhv/inc/swhv_defines.h +@@ -66,26 +66,26 @@ + #define PW_ERROR 1 + #define PW_SUCCESS_NO_COLLECT 2 + +-// +-// Start off with none of the OS'es are defined +-// ++/* ++ * Start off with none of the OS'es are defined ++ */ + #undef SWDRV_OS_LINUX + #undef SWDRV_OS_ANDROID + #undef SWDRV_OS_UNIX + +-// +-// Make sure none of the architectures is defined here +-// ++/* ++ * Make sure none of the architectures is defined here ++ */ + #undef SWDRV_IA32 + #undef SWDRV_EM64T + +-// +-// Make sure one (and only one) of the OS'es gets defined here +-// +-// Unfortunately entirex defines _WIN32 so we need to check for linux +-// first. The definition of these flags is one and only one +-// _OS_xxx is allowed to be defined. +-// ++/* ++ * Make sure one (and only one) of the OS'es gets defined here ++ * ++ * Unfortunately entirex defines _WIN32 so we need to check for linux ++ * first. The definition of these flags is one and only one ++ * _OS_xxx is allowed to be defined. ++ */ + #if defined(__ANDROID__) + #define SWDRV_OS_ANDROID + #define SWDRV_OS_UNIX +@@ -96,10 +96,10 @@ + #error "Compiling for an unknown OS" + #endif + +-// +-// Make sure one (and only one) architecture is defined here +-// as well as one (and only one) pointer__ size +-// ++/* ++ * Make sure one (and only one) architecture is defined here ++ * as well as one (and only one) pointer__ size ++ */ + #if defined(_M_IX86) || defined(__i386__) + #define SWDRV_IA32 + #elif defined(_M_AMD64) || defined(__x86_64__) +@@ -108,4 +108,4 @@ + #error "Unknown architecture for compilation" + #endif + +-#endif // _SWHV_DEFINES_H_ ++#endif /* _SWHV_DEFINES_H_ */ +diff --git a/drivers/platform/x86/socwatchhv/inc/swhv_driver.h b/drivers/platform/x86/socwatchhv/inc/swhv_driver.h +index 8ad0d672f095..f2f9f662b311 100644 +--- a/drivers/platform/x86/socwatchhv/inc/swhv_driver.h ++++ b/drivers/platform/x86/socwatchhv/inc/swhv_driver.h +@@ -57,8 +57,8 @@ + #ifndef _SWHV_DRIVER_H_ + #define _SWHV_DRIVER_H_ 1 + +-#include // LINUX_VERSION_CODE +-#include // vmalloc ++#include /* LINUX_VERSION_CODE */ ++#include /* vmalloc */ + #include "swhv_defines.h" + #include "sw_kernel_defines.h" + #include "pw_version.h" +@@ -68,17 +68,17 @@ + #define MOBILEVISOR 1 + #define ACRN 2 + +-// define this flag to have IDT entry programmed for SoCWatch IRQ handler ++/* define this flag to have IDT entry programmed for SoCWatch IRQ handler */ + #define SOCWATCH_IDT_IRQ 1 + + extern void SYS_Perfvec_Handler(void); + extern short SYS_Get_cs(void); + + #if defined(SWDRV_IA32) && (SOCWATCH_IDT_IRQ) +-extern void *SYS_Get_IDT_Base_HWR(void); /// IDT base from hardware IDTR ++extern void *SYS_Get_IDT_Base_HWR(void); /* IDT base from hardware IDTR */ + + #define SYS_Get_IDT_Base SYS_Get_IDT_Base_HWR +-#endif // defined(SWDRV_IA32) && (SOCWATCH_IDT_IRQ) ++#endif /* defined(SWDRV_IA32) && (SOCWATCH_IDT_IRQ) */ + + #if defined(SWDRV_EM64T) && (SOCWATCH_IDT_IRQ) + extern void SYS_Get_IDT_Base(void **); +@@ -87,10 +87,10 @@ extern void SYS_Get_IDT_Base(void **); + typedef struct gate_struct gate_struct_t; + #else + typedef struct gate_struct64 gate_struct_t; +-#endif // LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) +-#endif // defined(SWDRV_EM64T) && (SOCWATCH_IDT_IRQ) ++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) */ ++#endif /* defined(SWDRV_EM64T) && (SOCWATCH_IDT_IRQ) */ + +-// miscellaneous defines ++/* miscellaneous defines */ + #define CPU() (raw_smp_processor_id()) + #define GET_BOOL_STRING(b) ((b) ? "TRUE" : "FALSE") + +@@ -106,4 +106,4 @@ typedef struct gate_struct64 gate_struct_t; + + typedef struct PWCollector_msg PWCollector_msg_t; + +-#endif // _SWHV_DRIVER_H_ ++#endif /* _SWHV_DRIVER_H_ */ +diff --git a/drivers/platform/x86/socwatchhv/inc/swhv_ioctl.h b/drivers/platform/x86/socwatchhv/inc/swhv_ioctl.h +index 690bbcd5ccba..0d2a368c12ca 100644 +--- a/drivers/platform/x86/socwatchhv/inc/swhv_ioctl.h ++++ b/drivers/platform/x86/socwatchhv/inc/swhv_ioctl.h +@@ -63,11 +63,11 @@ + #include + #if defined(HAVE_COMPAT_IOCTL) && defined(CONFIG_X86_64) + #include +-#endif // COMPAT && x64 +-#else // !__KERNEL__ ++#endif /* COMPAT && x64 */ ++#else /* !__KERNEL__ */ + #include +-#endif // __KERNEL__ +-#endif // __linux__ ++#endif /* __KERNEL__ */ ++#endif /* __linux__ */ + /* + * Path to the Hypervisor driver device file. + */ +@@ -159,6 +159,6 @@ enum swhv_ioctl_cmd { + _IOWR(SP_IOC_MAGIC, SWHVDRV_OPERATION_MSR_READ, compat_uptr_t) + #define SWHVDRV_IOCTL_POLL32 \ + _IO(SP_IOC_MAGIC, SWHVDRV_OPERATION_POLL, compat_uptr_t) +-#endif // COMPAT && x64 ++#endif /* COMPAT && x64 */ + +-#endif // __SWHV_IOCTL_H__ ++#endif /* __SWHV_IOCTL_H__ */ +diff --git a/drivers/platform/x86/socwatchhv/inc/swhv_structs.h b/drivers/platform/x86/socwatchhv/inc/swhv_structs.h +index 67bac8e36ad4..d5fd717511ba 100644 +--- a/drivers/platform/x86/socwatchhv/inc/swhv_structs.h ++++ b/drivers/platform/x86/socwatchhv/inc/swhv_structs.h +@@ -117,26 +117,37 @@ struct swhv_driver_switch_io_descriptor { + + #pragma pack(push, 1) + typedef struct swhv_driver_io_descriptor { +- pw_u16_t collection_type; // One of 'enum swhv_collector_type' +- pw_s16_t collection_command; // One of 'enum swhv_io_cmd' +- pw_u16_t counter_size_in_bytes; // The number of bytes to READ or WRITE ++ pw_u16_t collection_type; /* One of 'enum swhv_collector_type' */ ++ pw_s16_t collection_command; /* One of 'enum swhv_io_cmd' */ ++ pw_u16_t counter_size_in_bytes; /* The number of bytes to ++ * READ or WRITE ++ */ + union { + struct swhv_driver_msr_io_descriptor msr_descriptor; + struct swhv_driver_switch_io_descriptor switch_descriptor; + }; +- pw_u64_t write_value; // The value to WRITE ++ pw_u64_t write_value; /* The value to WRITE */ + } swhv_driver_io_descriptor_t; + #pragma pack(pop) + + #pragma pack(push, 1) + struct swhv_driver_interface_info { +- pw_s16_t cpu_mask; // On which CPU(s) should the driver read the data? +- // Currently: -2 ==> read on ALL CPUs, +- // -1 ==> read on ANY CPU, +- // >= 0 ==> the specific CPU to read on +- pw_s16_t sample_id; // Sample ID, used to map it back to Metric Plugin, Metric and Msg ID combo +- pw_u16_t num_io_descriptors; // Number of descriptors in the array, below. +- pw_u8_t descriptors[1]; // Array of swhv_driver_io_descriptor structs. ++ pw_s16_t cpu_mask; /* On which CPU(s) should the driver ++ * read the data? ++ */ ++ /* Currently: -2 ==> read on ALL CPUs, ++ * -1 ==> read on ANY CPU, ++ * >= 0 ==> the specific CPU to read on ++ */ ++ pw_s16_t sample_id; /* Sample ID, used to map it back ++ * to Metric Plugin, Metric and Msg ID combo ++ */ ++ pw_u16_t num_io_descriptors; /* Number of descriptors in the array, ++ * below. ++ */ ++ pw_u8_t descriptors[1]; /* Array of swhv_driver_io_descriptor ++ * structs. ++ */ + }; + #pragma pack(pop) + #define SWHV_DRIVER_INTERFACE_INFO_HEADER_SIZE() \ +@@ -144,8 +155,13 @@ struct swhv_driver_interface_info { + + #pragma pack(push, 1) + struct swhv_driver_interface_msg { +- pw_u16_t num_infos; // Number of 'swhv_driver_interface_info' structs contained within the 'infos' variable, below +- // pw_u16_t infos_size_bytes; // Size of data inlined within the 'infos' variable, below ++ pw_u16_t num_infos; /* Number of 'swhv_driver_interface_info' ++ * structs contained within the 'infos' variable, ++ * below ++ */ ++ /* pw_u16_t infos_size_bytes; Size of data inlined within ++ * the 'infos' variable, below ++ */ + pw_u8_t infos[1]; + }; + #pragma pack(pop) +@@ -161,7 +177,7 @@ struct swhv_driver_interface_msg { + typedef enum PROFILING_SOCWATCH_FEATURE { + SOCWATCH_COMMAND = 0, + SOCWATCH_VM_SWITCH_TRACING, +- MAX_SOCWATCH_FEATURE_ID, ++ MAX_SOCWATCH_FEATURE_ID + } profiling_socwatch_feature; + + typedef enum PROFILING_SOCWATCH_FEATURE acrn_type; +@@ -170,22 +186,22 @@ typedef enum PROFILING_SOCWATCH_FEATURE acrn_type; + * current default ACRN header + */ + struct data_header { +- uint32_t collector_id; ++ int32_t collector_id; + uint16_t cpu_id; + uint16_t data_type; + uint64_t tsc; + uint64_t payload_size; + uint64_t reserved; + } __attribute__((aligned(32))); +-#define ACRN_MSG_HEADER_SIZE (sizeof(struct data_header)) ++#define ACRN_MSG_HEADER_SIZE ((uint64_t)sizeof(struct data_header)) + + struct vm_switch_trace { ++ uint64_t vm_enter_tsc; ++ uint64_t vm_exit_tsc; ++ uint64_t vm_exit_reason; + int32_t os_id; +- uint64_t vmenter_tsc; +- uint64_t vmexit_tsc; +- uint64_t vmexit_reason; + } __attribute__((aligned(32))); +-#define VM_SWITCH_TRACE_SIZE (sizeof(struct vm_switch_trace)) ++#define VM_SWITCH_TRACE_SIZE ((uint64_t)sizeof(struct vm_switch_trace)) + + #define MAX_NR_VCPUS 8 + #define MAX_NR_VMS 6 +@@ -197,7 +213,7 @@ struct profiling_vcpu_pcpu_map { + } __attribute__((aligned(8))); + + struct profiling_vm_info { +- int32_t vm_id; ++ int32_t vm_id_num; + unsigned char guid[16]; + char vm_name[16]; + int32_t num_vcpus; +@@ -218,9 +234,9 @@ typedef struct vm_switch_trace vmswitch_trace_t; + /* + * ACRN specific constants shared between the driver and user-mode + */ +-// Per CPU buffer size ++/* Per CPU buffer size */ + #define ACRN_BUF_SIZE ((4 * 1024 * 1024) - SBUF_HEAD_SIZE /* 64 bytes */) +-// Size of buffer at which data should be transferred to user-mode ++/* Size of buffer at which data should be transferred to user-mode */ + #define ACRN_BUF_TRANSFER_SIZE (ACRN_BUF_SIZE / 2) + /* + * The ACRN 'sbuf' buffers consist of fixed size elements. +@@ -231,4 +247,4 @@ typedef struct vm_switch_trace vmswitch_trace_t; + #define ACRN_BUF_ELEMENT_NUM (ACRN_BUF_SIZE / ACRN_BUF_ELEMENT_SIZE) + #define ACRN_BUF_FILLED_SIZE(sbuf) (sbuf->size - sbuf_available_space(sbuf)) + +-#endif // _SWHV_STRUCTS_H_ ++#endif /* _SWHV_STRUCTS_H_ */ +diff --git a/drivers/platform/x86/socwatchhv/swhv_acrn.c b/drivers/platform/x86/socwatchhv/swhv_acrn.c +index 926ff09819a7..962db47cec45 100644 +--- a/drivers/platform/x86/socwatchhv/swhv_acrn.c ++++ b/drivers/platform/x86/socwatchhv/swhv_acrn.c +@@ -34,10 +34,9 @@ + static int pcpu_num; + bool flush_mode; + +-wait_queue_head_t read_queue; +- +-//TODO is this needed? +-//module_param(nr_cpus, int, S_IRUSR | S_IWUSR); ++/* TODO is this needed? ++ * module_param(nr_cpus, int, S_IRUSR | S_IWUSR); ++ */ + + static struct shared_buf **sbuf_per_cpu; + +@@ -101,7 +100,7 @@ int swhv_add_driver_msr_io_desc(struct swhv_acrn_msr_collector_data *node, + pw_u16_t num_entries; + struct profiling_msr_op *msr_op = NULL; + +- // Confirm this is an MSR IO descriptor ++ /* Confirm this is an MSR IO descriptor */ + if (info->collection_type != SWHV_COLLECTOR_TYPE_MSR) { + pw_pr_error( + "ERROR trying to configure MSR collector with other data!\n"); +@@ -165,7 +164,7 @@ int swhv_init_per_cpu_buffers(void) + } + } + +- //TODO understand the use of this API ++ /* TODO understand the use of this API */ + foreach_cpu(cpu, pcpu_num) + { + ret = sbuf_share_setup(cpu, ACRN_SOCWATCH, sbuf_per_cpu[cpu]); +@@ -199,8 +198,9 @@ void swhv_destroy_per_cpu_buffers(void) + + foreach_cpu(cpu, pcpu_num) + { +- //TODO anything else to de-register? +- /* deregister devices */ ++ /* TODO anything else to de-register? ++ * deregister devices ++ */ + + /* set sbuf pointer to NULL in HV */ + sbuf_share_setup(cpu, ACRN_SOCWATCH, NULL); +@@ -254,18 +254,21 @@ void swhv_handle_hypervisor_collector(uint32_t control_cmd) + + acrn_profiling_control->collector_id = COLLECTOR_SOCWATCH; + +- if (control_cmd == 1) { // start collection + send switch bitmask ++ if (control_cmd == 1) { /* start collection + send switch bitmask */ + pw_pr_debug("STARTING ACRN PROFILING SERVICE\n"); + global_collection_switch |= +- control_cmd; // first bit controls start/stop +- // of collection +- } else if (control_cmd == 0) { // stop collection + reset switch bitmask ++ control_cmd; /* first bit controls start/stop ++ * of collection ++ */ ++ } else if (control_cmd == 0) { /* stop collection ++ * + reset switch bitmask ++ */ + pw_pr_debug("STOPPING ACRN PROFILING SERVICE\n"); + global_collection_switch = control_cmd; + } + acrn_profiling_control->switches = global_collection_switch; + +- // send collection command + switch bitmask ++ /* send collection command + switch bitmask */ + acrn_hypercall2(HC_PROFILING_OPS, PROFILING_SET_CONTROL_SWITCH, + virt_to_phys(acrn_profiling_control)); + kfree(acrn_profiling_control); +@@ -277,7 +280,6 @@ int swhv_handle_msr_collector_list(void) + + SW_LIST_HEAD_VAR(swhv_acrn_msr_collector_data) * head = list_head; + int retVal = PW_SUCCESS; +- int dummy_cpu = 0; + struct swhv_acrn_msr_collector_data *curr = NULL; + + if (SW_LIST_EMPTY(&swhv_msr_collector)) { +@@ -292,18 +294,10 @@ int swhv_handle_msr_collector_list(void) + { + pw_pr_debug("HANDLING MSR NODE\n"); + +- //hypervisor call to do immediate MSR read ++ /*hypervisor call to do immediate MSR read */ + acrn_hypercall2(HC_PROFILING_OPS, PROFILING_MSR_OPS, + virt_to_phys(curr->msr_ops_list)); + } +- if (buffer_not_ready(&dummy_cpu) == false) { +- /* +- * force the device_read function to check if any buffers are +- * filled with data above 'ACRN_BUF_TRANSFER_SIZE' size and +- * if yes, copy to userspace +- */ +- wake_up_interruptible(&read_queue); +- } + return retVal; + } + +@@ -341,7 +335,7 @@ long swhv_configure(struct swhv_driver_interface_msg __user *remote_msg, + */ + swhv_destroy_msr_collector_list(); + +- // clear the collection bitmask ++ /* clear the collection bitmask */ + global_collection_switch = 0; + + num_infos = local_msg->num_infos; +@@ -394,8 +388,9 @@ long swhv_configure(struct swhv_driver_interface_msg __user *remote_msg, + + global_collection_switch = local_config_bitmap; + +- // only one set of collection switches are +- // expected, we are done configuring ++ /* only one set of collection switches are ++ * expected, we are done configuring ++ */ + done = 1; + break; + } else { +@@ -412,23 +407,19 @@ long swhv_configure(struct swhv_driver_interface_msg __user *remote_msg, + + long swhv_stop(void) + { +- uint32_t control = 0; // stop collection command ++ uint32_t control = 0; /* stop collection command */ + + pw_pr_debug("socwatch: stop called\n"); + +- //If MSR ops are present, perform them to get begin snapshot data. ++ /*If MSR ops are present, perform them to get begin snapshot data. */ + swhv_handle_msr_collector_list(); + +- // stop collection + reset switch bitmask ++ /* stop collection + reset switch bitmask */ + swhv_handle_hypervisor_collector(control); + +- // flush partially filled hypervisor buffers ++ /* flush partially filled hypervisor buffers */ + flush_mode = true; + +- // force the device_read function to check if any +- // buffers are partially filled with data +- wake_up_interruptible(&read_queue); +- + /* + * Clear out the MSR collector list. + */ +@@ -439,7 +430,7 @@ long swhv_stop(void) + + long swhv_start(void) + { +- uint32_t control = 1; // start collection command ++ uint32_t control = 1; /* start collection command */ + #if 0 + struct profiling_vm_info_list *vm_info_list = NULL; + int i; +@@ -448,15 +439,16 @@ long swhv_start(void) + + flush_mode = false; + +- // start collection + send switch bitmask ++ /* start collection + send switch bitmask */ + swhv_handle_hypervisor_collector(control); + +- //If MSR ops are present, perform them to get begin snapshot data. ++ /* If MSR ops are present, perform them to get begin snapshot data. */ + swhv_handle_msr_collector_list(); + + #if 0 +- // Expand this eventually to retrieve VM-realted info from the hypervisor +- // Leaving it here for now. ++ /* Expand this eventually to retrieve VM-related info ++ * from the hypervisor. Leaving it here for now. ++ */ + vm_info_list = kmalloc(sizeof(struct profiling_vm_info_list), + GFP_KERNEL); + memset(vm_info_list, 0, sizeof(struct profiling_vm_info_list)); +@@ -541,16 +533,16 @@ long swhv_msr_read(u32 __user *remote_in_args, u64 __user *remote_args) + msr_read_ops_list[cpu].entries[0].msr_id = msr_addr; + msr_read_ops_list[cpu].entries[0].msr_op_type = MSR_OP_READ; + msr_read_ops_list[cpu].entries[1].msr_id = +- -1; // the next entry is expected to be set to -1 ++ -1; /* the next entry is expected to be set to -1 */ + msr_read_ops_list[cpu].entries[1].param = +- 0; // set to 0 to not generate sample in hypervisor ++ 0; /* set to 0 to not generate sample in hypervisor */ + } + +- //hypervisor call to do immediate MSR read ++ /* hypervisor call to do immediate MSR read */ + acrn_hypercall2(HC_PROFILING_OPS, PROFILING_MSR_OPS, + virt_to_phys(msr_read_ops_list)); + +- // copy value to remote args, pick from any CPU ++ /* copy value to remote args, pick from any CPU */ + value = msr_read_ops_list[0].entries[0].value; + + if (copy_to_user(remote_args, &value, sizeof(value))) { +@@ -617,7 +609,7 @@ ssize_t swhv_transfer_data(void *user_buffer, struct shared_buf *sbuf_to_copy, + goto ret_free; + } + if (bytes_read) { +- // copy data to device file ++ /* copy data to device file */ + if (bytes_read > bytes_to_read) { + pw_pr_error("user buffer is too small\n"); + ret = -PW_ERROR; +@@ -626,14 +618,14 @@ ssize_t swhv_transfer_data(void *user_buffer, struct shared_buf *sbuf_to_copy, + + bytes_not_copied = copy_to_user(user_buffer, data_read, + bytes_read); +- //TODO check if this is meaningful enough to have +- //*offset += bytes_read - bytes_not_copied; ++ /* TODO check if this is meaningful enough to have */ ++ /* *offset += bytes_read - bytes_not_copied; */ + + if (bytes_not_copied) { + pw_pr_error( + "transferring data to user mode failed, bytes %ld\n", + bytes_not_copied); +- // copy_to_user returns an unsigned ++ /* copy_to_user returns an unsigned */ + ret = -EIO; + goto ret_free; + } +@@ -652,9 +644,10 @@ ssize_t swhv_transfer_data(void *user_buffer, struct shared_buf *sbuf_to_copy, + + bool buffer_not_ready(int *cpu) + { +- // cycle through and confirm buffers on all CPUs +- // are less than ACRN_BUF_TRANSFER_SIZE +- // as well as flush mode has not been requested ++ /* cycle through and confirm buffers on all CPUs ++ * are less than ACRN_BUF_TRANSFER_SIZE ++ * as well as flush mode has not been requested ++ */ + int i = 0; + bool not_enough_data = true; + +@@ -688,12 +681,10 @@ ssize_t device_read_i(struct file *file, char __user *user_buffer, + int cpu = 0; + + pw_pr_debug("%s - usermode attempting to read device file\n", __func__); +- +- if (wait_event_interruptible(read_queue, !buffer_not_ready(&cpu))) { +- pw_pr_error("%s - wait_event_interruptible failed\n", __func__); +- return -ERESTARTSYS; ++ if (buffer_not_ready(&cpu)) { ++ pw_pr_debug("%s - no buffer ready to be read\n", __func__); ++ return bytes_read; + } +- pw_pr_debug("%s - wait_event cleared\n", __func__); + + if (flush_mode) { + pw_pr_debug("flush mode on, ready to flush a buffer\n"); +@@ -708,7 +699,7 @@ ssize_t device_read_i(struct file *file, char __user *user_buffer, + + void cleanup_error_i(void) + { +- // NOP for acrn ++ /* NOP for acrn */ + } + + int swhv_load_driver_i(void) +@@ -729,10 +720,6 @@ int swhv_load_driver_i(void) + return ret; + } + +- // initialize a work queue to be used for signalling when +- // data is ready to copy to usermode +- init_waitqueue_head(&read_queue); +- + swhv_init_msr_collector_list(); + + return ret; +diff --git a/drivers/platform/x86/socwatchhv/swhv_driver.c b/drivers/platform/x86/socwatchhv/swhv_driver.c +index 369d8a69158f..7a4e6c57ab45 100644 +--- a/drivers/platform/x86/socwatchhv/swhv_driver.c ++++ b/drivers/platform/x86/socwatchhv/swhv_driver.c +@@ -85,6 +85,7 @@ + * Compile-time constants + * ******************************************* + */ ++ + /* ******************************************* + * Local data structures. + * ******************************************* +@@ -101,7 +102,7 @@ struct spdrv_ioctl_arg32 { + compat_caddr_t in_arg; + compat_caddr_t out_arg; + }; +-#endif // COMPAT && x64 ++#endif /* COMPAT && x64 */ + + static int sp_dev_major_num = -1; + static dev_t sp_dev; +@@ -114,7 +115,7 @@ static struct class *sp_class; + */ + + /* Per-CPU variable containing the currently running vcpu. */ +-//static DEFINE_PER_CPU(int, curr_vcpu) = 0; ++/*static DEFINE_PER_CPU(int, curr_vcpu) = 0; */ + + /* ******************************************* + * Function definitions. +@@ -273,7 +274,7 @@ static long device_compat_ioctl(struct file *file, unsigned int ioctl_num, + } + return handle_ioctl(_IOC_NR(ioctl_num), remote_args); + }; +-#endif // COMPAT && x64 ++#endif /* COMPAT && x64 */ + + static int device_open(struct inode *inode, struct file *file) + { +@@ -295,7 +296,7 @@ static struct file_operations s_fops = { + .unlocked_ioctl = &device_unlocked_ioctl, + #if defined(HAVE_COMPAT_IOCTL) && defined(CONFIG_X86_64) + .compat_ioctl = &device_compat_ioctl, +-#endif // COMPAT && x64 ++#endif /* COMPAT && x64 */ + }; + + static void cleanup_error(void) +@@ -312,7 +313,7 @@ int __init swhv_load_driver(void) + int error; + struct device *dev; + +- // create the char device "sp" ++ /* create the char device "sp" */ + alloc_chrdev_region(&sp_dev, 0, 1, SWHV_DEVICE_NAME); + sp_dev_major_num = MAJOR(sp_dev); + sp_class = class_create(THIS_MODULE, SWHV_DEVICE_NAME); +@@ -354,7 +355,7 @@ int __init swhv_load_driver(void) + cleanup_return_error: + cleanup_error_i(); + +- // release char device ++ /* release char device */ + cleanup_error(); + return error; + } +@@ -363,7 +364,7 @@ static void __exit swhv_unload_driver(void) + { + swhv_unload_driver_i(); + +- // release char device ++ /* release char device */ + cleanup_error(); + } + +-- +2.17.1 + diff --git a/patches/0005-powercap-intel_rapl-add-support-for-ElkhartLake.core-ehl b/patches/0005-powercap-intel_rapl-add-support-for-ElkhartLake.core-ehl new file mode 100644 index 0000000000..8727864ef0 --- /dev/null +++ b/patches/0005-powercap-intel_rapl-add-support-for-ElkhartLake.core-ehl @@ -0,0 +1,30 @@ +From ac762ce78ea1cb7f2d138b7bb0d2b5657b3ccbc0 Mon Sep 17 00:00:00 2001 +From: Jacob Pan +Date: Wed, 12 Sep 2018 16:36:54 +0530 +Subject: [PATCH 05/12] powercap/intel_rapl: add support for ElkhartLake + +This is initial data for pre-si environment. Not for upstream. + +Change-Id: I1a1502d99be954a0ec836bf61be8170e40ac0d74 +Signed-off-by: Jacob Pan +Signed-off-by: Rajneesh Bhardwaj +Signed-off-by: Zhang Rui +--- + drivers/powercap/intel_rapl_common.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c +index 94ddd7d659c8..50d41262d850 100644 +--- a/drivers/powercap/intel_rapl_common.c ++++ b/drivers/powercap/intel_rapl_common.c +@@ -986,6 +986,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = { + INTEL_CPU_FAM6(ATOM_GOLDMONT, rapl_defaults_core), + INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, rapl_defaults_core), + INTEL_CPU_FAM6(ATOM_GOLDMONT_D, rapl_defaults_core), ++ INTEL_CPU_FAM6(ATOM_TREMONT, rapl_defaults_core), + INTEL_CPU_FAM6(ATOM_TREMONT_D, rapl_defaults_core), + + INTEL_CPU_FAM6(XEON_PHI_KNL, rapl_defaults_hsw_server), +-- +2.17.1 + diff --git a/patches/0005-rpmb-add-Replay-Protected-Memory-Block-RPMB-subsy.security b/patches/0005-rpmb-add-Replay-Protected-Memory-Block-RPMB-subsy.security new file mode 100644 index 0000000000..4eddb3e89f --- /dev/null +++ b/patches/0005-rpmb-add-Replay-Protected-Memory-Block-RPMB-subsy.security @@ -0,0 +1,746 @@ +From 6da799c9ff1ca792f9ef8b3d8ed5e88bbb536c43 Mon Sep 17 00:00:00 2001 +From: Tomas Winkler +Date: Sun, 1 Feb 2015 10:17:24 +0200 +Subject: [PATCH 05/65] rpmb: add Replay Protected Memory Block (RPMB) + subsystem + +Few storage technologies such is EMMC, UFS, and NVMe support RPMB +a hardware partition with common protocol and frame layout. +The RPMB partition cannot be accessed via standard block layer, +but by a set of specific commands: WRITE, READ, GET_WRITE_COUNTER, +and PROGRAM_KEY. +Such a partition provides authenticated and replay protected access, +hence suitable as a secure storage. + +The RPMB layer aims to provide in-kernel API for Trusted Execution +Environment (TEE) devices that are capable to securely compute block +frame signature. In case a TEE device wishes to store a replay protected +data, it creates an RPMB frame with requested data and computes HMAC of +the frame, then it requests the storage device via RPMB layer to store +the data. +A TEE device driver can claim the RPMB interface, for example, via +class_interface_register(). +The RPMB layer provides an API for issuing a sequence of RPMB protocol +frames via rpmb_cmd_seq() call. + +A storage device registers its RPMB (eMMC) partition, RPMB +W-LUN (UFS), or RPMB target NVMe with the RPMB layer providing an +implementation for rpmb_cmd_seq() handler, that enables +sending sequence of RPMB standard frames and set of attributes. + +V2: added short workflow description in the commit message +V3: commit message fix +V4: resend +V5: add rpmb sequence interface. +V6: 1. More info in the commit message + 2. Define simulation device type +V7: resend +V8: 1. Add rpmb_cmd_req_write/read helper functions. + 2. Fix minor checkpatch warning. + 3. Change the license to Dual BSD/GPL +V9: 1. Drop rpmb_cmd_req interface. + 2. Add NVME type + 3. Support for multiple RPMB partition on same device. + 4. Add additional information about partition. + 5. Add driver data access functions. + 6. Add SPDX identifiers. + 7. Unexport rpmb_dev_find_device() +V10:1. Adjust kdoc + 1. Use GPL v2 license + +Change-Id: I830751859c2aed519c41a8123bd96c7a7243262a +Signed-off-by: Tomas Winkler +Signed-off-by: Alexander Usyskin +Tested-by: Avri Altman +--- + MAINTAINERS | 7 + + drivers/char/Kconfig | 2 + + drivers/char/Makefile | 1 + + drivers/char/rpmb/Kconfig | 11 ++ + drivers/char/rpmb/Makefile | 7 + + drivers/char/rpmb/core.c | 343 +++++++++++++++++++++++++++++++++++++ + include/linux/rpmb.h | 250 +++++++++++++++++++++++++++ + 7 files changed, 621 insertions(+) + create mode 100644 drivers/char/rpmb/Kconfig + create mode 100644 drivers/char/rpmb/Makefile + create mode 100644 drivers/char/rpmb/core.c + create mode 100644 include/linux/rpmb.h + +diff --git a/MAINTAINERS b/MAINTAINERS +index e51a68bf8ca8..a9bb0d4fe7ff 100644 +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -13976,6 +13976,13 @@ F: include/net/rose.h + F: include/uapi/linux/rose.h + F: net/rose/ + ++RPMB SUBSYSTEM ++M: Tomas Winkler ++L: linux-kernel@vger.kernel.org ++S: Supported ++F: drivers/char/rpmb/* ++F: include/linux/rpmb.h ++ + RTL2830 MEDIA DRIVER + M: Antti Palosaari + L: linux-media@vger.kernel.org +diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig +index df0fc997dc3e..cce9f7da3446 100644 +--- a/drivers/char/Kconfig ++++ b/drivers/char/Kconfig +@@ -535,6 +535,8 @@ config ADI + and SSM (Silicon Secured Memory). Intended consumers of this + driver include crash and makedumpfile. + ++source "drivers/char/rpmb/Kconfig" ++ + endmenu + + config RANDOM_TRUST_CPU +diff --git a/drivers/char/Makefile b/drivers/char/Makefile +index 7c5ea6f9df14..f9f7b5de84a3 100644 +--- a/drivers/char/Makefile ++++ b/drivers/char/Makefile +@@ -52,3 +52,4 @@ js-rtc-y = rtc.o + obj-$(CONFIG_XILLYBUS) += xillybus/ + obj-$(CONFIG_POWERNV_OP_PANEL) += powernv-op-panel.o + obj-$(CONFIG_ADI) += adi.o ++obj-$(CONFIG_RPMB) += rpmb/ +diff --git a/drivers/char/rpmb/Kconfig b/drivers/char/rpmb/Kconfig +new file mode 100644 +index 000000000000..431c2823cf70 +--- /dev/null ++++ b/drivers/char/rpmb/Kconfig +@@ -0,0 +1,11 @@ ++# SPDX-License-Identifier: GPL-2.0 ++# Copyright (c) 2015-2019, Intel Corporation. ++ ++config RPMB ++ tristate "RPMB partition interface" ++ help ++ Unified RPMB partition interface for eMMC and UFS. ++ Provides interface for in kernel security controllers to ++ access RPMB partition. ++ ++ If unsure, select N. +diff --git a/drivers/char/rpmb/Makefile b/drivers/char/rpmb/Makefile +new file mode 100644 +index 000000000000..24d4752a9a53 +--- /dev/null ++++ b/drivers/char/rpmb/Makefile +@@ -0,0 +1,7 @@ ++# SPDX-License-Identifier: GPL-2.0 ++# Copyright (c) 2015-2019, Intel Corporation. ++ ++obj-$(CONFIG_RPMB) += rpmb.o ++rpmb-objs += core.o ++ ++ccflags-y += -D__CHECK_ENDIAN__ +diff --git a/drivers/char/rpmb/core.c b/drivers/char/rpmb/core.c +new file mode 100644 +index 000000000000..aa0ea4c3f1ce +--- /dev/null ++++ b/drivers/char/rpmb/core.c +@@ -0,0 +1,343 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright(c) 2015 - 2019 Intel Corporation. All rights reserved. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++static DEFINE_IDA(rpmb_ida); ++ ++/** ++ * rpmb_dev_get() - increase rpmb device ref counter ++ * @rdev: rpmb device ++ */ ++struct rpmb_dev *rpmb_dev_get(struct rpmb_dev *rdev) ++{ ++ return get_device(&rdev->dev) ? rdev : NULL; ++} ++EXPORT_SYMBOL_GPL(rpmb_dev_get); ++ ++/** ++ * rpmb_dev_put() - decrease rpmb device ref counter ++ * @rdev: rpmb device ++ */ ++void rpmb_dev_put(struct rpmb_dev *rdev) ++{ ++ put_device(&rdev->dev); ++} ++EXPORT_SYMBOL_GPL(rpmb_dev_put); ++ ++/** ++ * rpmb_cmd_seq() - send RPMB command sequence ++ * @rdev: rpmb device ++ * @cmds: rpmb command list ++ * @ncmds: number of commands ++ * ++ * Return: ++ * * 0 on success ++ * * -EINVAL on wrong parameters ++ * * -EOPNOTSUPP if device doesn't support the requested operation ++ * * < 0 if the operation fails ++ */ ++int rpmb_cmd_seq(struct rpmb_dev *rdev, struct rpmb_cmd *cmds, u32 ncmds) ++{ ++ int err; ++ ++ if (!rdev || !cmds || !ncmds) ++ return -EINVAL; ++ ++ mutex_lock(&rdev->lock); ++ err = -EOPNOTSUPP; ++ if (rdev->ops && rdev->ops->cmd_seq) { ++ err = rdev->ops->cmd_seq(rdev->dev.parent, rdev->target, ++ cmds, ncmds); ++ } ++ mutex_unlock(&rdev->lock); ++ ++ return err; ++} ++EXPORT_SYMBOL_GPL(rpmb_cmd_seq); ++ ++/** ++ * rpmb_get_capacity() - returns the capacity of the rpmb device ++ * @rdev: rpmb device ++ * ++ * Return: ++ * * capacity of the device in units of 128K, on success ++ * * -EINVAL on wrong parameters ++ * * -EOPNOTSUPP if device doesn't support the requested operation ++ * * < 0 if the operation fails ++ */ ++int rpmb_get_capacity(struct rpmb_dev *rdev) ++{ ++ int err; ++ ++ if (!rdev) ++ return -EINVAL; ++ ++ mutex_lock(&rdev->lock); ++ err = -EOPNOTSUPP; ++ if (rdev->ops && rdev->ops->get_capacity) ++ err = rdev->ops->get_capacity(rdev->dev.parent, rdev->target); ++ mutex_unlock(&rdev->lock); ++ ++ return err; ++} ++EXPORT_SYMBOL_GPL(rpmb_get_capacity); ++ ++static void rpmb_dev_release(struct device *dev) ++{ ++ struct rpmb_dev *rdev = to_rpmb_dev(dev); ++ ++ ida_simple_remove(&rpmb_ida, rdev->id); ++ kfree(rdev); ++} ++ ++struct class rpmb_class = { ++ .name = "rpmb", ++ .owner = THIS_MODULE, ++ .dev_release = rpmb_dev_release, ++}; ++EXPORT_SYMBOL(rpmb_class); ++ ++/** ++ * rpmb_dev_find_device() - return first matching rpmb device ++ * @data: data for the match function ++ * @match: the matching function ++ * ++ * Return: matching rpmb device or NULL on failure ++ */ ++static ++struct rpmb_dev *rpmb_dev_find_device(const void *data, ++ int (*match)(struct device *dev, ++ const void *data)) ++{ ++ struct device *dev; ++ ++ dev = class_find_device(&rpmb_class, NULL, data, match); ++ ++ return dev ? to_rpmb_dev(dev) : NULL; ++} ++ ++static int match_by_type(struct device *dev, const void *data) ++{ ++ struct rpmb_dev *rdev = to_rpmb_dev(dev); ++ const u32 *type = data; ++ ++ return (*type == RPMB_TYPE_ANY || rdev->ops->type == *type); ++} ++ ++/** ++ * rpmb_dev_get_by_type() - return first registered rpmb device ++ * with matching type. ++ * @type: rpbm underlying device type ++ * ++ * If run with RPMB_TYPE_ANY the first an probably only ++ * device is returned ++ * ++ * Return: matching rpmb device or NULL/ERR_PTR on failure ++ */ ++struct rpmb_dev *rpmb_dev_get_by_type(u32 type) ++{ ++ if (type > RPMB_TYPE_MAX) ++ return ERR_PTR(-EINVAL); ++ ++ return rpmb_dev_find_device(&type, match_by_type); ++} ++EXPORT_SYMBOL_GPL(rpmb_dev_get_by_type); ++ ++struct device_with_target { ++ const struct device *dev; ++ u8 target; ++}; ++ ++static int match_by_parent(struct device *dev, const void *data) ++{ ++ const struct device_with_target *d = data; ++ struct rpmb_dev *rdev = to_rpmb_dev(dev); ++ ++ return (d->dev && dev->parent == d->dev && rdev->target == d->target); ++} ++ ++/** ++ * rpmb_dev_find_by_device() - retrieve rpmb device from the parent device ++ * @parent: parent device of the rpmb device ++ * @target: RPMB target/region within the physical device ++ * ++ * Return: NULL if there is no rpmb device associated with the parent device ++ */ ++struct rpmb_dev *rpmb_dev_find_by_device(struct device *parent, u8 target) ++{ ++ struct device_with_target t; ++ ++ if (!parent) ++ return NULL; ++ ++ t.dev = parent; ++ t.target = target; ++ ++ return rpmb_dev_find_device(&t, match_by_parent); ++} ++EXPORT_SYMBOL_GPL(rpmb_dev_find_by_device); ++ ++/** ++ * rpmb_dev_unregister() - unregister RPMB partition from the RPMB subsystem ++ * @rdev: the rpmb device to unregister ++ * Return: ++ * * 0 on success ++ * * -EINVAL on wrong parameters ++ */ ++int rpmb_dev_unregister(struct rpmb_dev *rdev) ++{ ++ if (!rdev) ++ return -EINVAL; ++ ++ mutex_lock(&rdev->lock); ++ device_del(&rdev->dev); ++ mutex_unlock(&rdev->lock); ++ ++ rpmb_dev_put(rdev); ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(rpmb_dev_unregister); ++ ++/** ++ * rpmb_dev_unregister_by_device() - unregister RPMB partition ++ * from the RPMB subsystem ++ * @dev: the parent device of the rpmb device ++ * @target: RPMB target/region within the physical device ++ * Return: ++ * * 0 on success ++ * * -EINVAL on wrong parameters ++ * * -ENODEV if a device cannot be find. ++ */ ++int rpmb_dev_unregister_by_device(struct device *dev, u8 target) ++{ ++ struct rpmb_dev *rdev; ++ ++ if (!dev) ++ return -EINVAL; ++ ++ rdev = rpmb_dev_find_by_device(dev, target); ++ if (!rdev) { ++ dev_warn(dev, "no disk found %s\n", dev_name(dev->parent)); ++ return -ENODEV; ++ } ++ ++ rpmb_dev_put(rdev); ++ ++ return rpmb_dev_unregister(rdev); ++} ++EXPORT_SYMBOL_GPL(rpmb_dev_unregister_by_device); ++ ++/** ++ * rpmb_dev_get_drvdata() - driver data getter ++ * @rdev: rpmb device ++ * ++ * Return: driver private data ++ */ ++void *rpmb_dev_get_drvdata(const struct rpmb_dev *rdev) ++{ ++ return dev_get_drvdata(&rdev->dev); ++} ++EXPORT_SYMBOL_GPL(rpmb_dev_get_drvdata); ++ ++/** ++ * rpmb_dev_set_drvdata() - driver data setter ++ * @rdev: rpmb device ++ * @data: data to store ++ */ ++void rpmb_dev_set_drvdata(struct rpmb_dev *rdev, void *data) ++{ ++ dev_set_drvdata(&rdev->dev, data); ++} ++EXPORT_SYMBOL_GPL(rpmb_dev_set_drvdata); ++ ++/** ++ * rpmb_dev_register - register RPMB partition with the RPMB subsystem ++ * @dev: storage device of the rpmb device ++ * @target: RPMB target/region within the physical device ++ * @ops: device specific operations ++ * ++ * Return: a pointer to rpmb device ++ */ ++struct rpmb_dev *rpmb_dev_register(struct device *dev, u8 target, ++ const struct rpmb_ops *ops) ++{ ++ struct rpmb_dev *rdev; ++ int id; ++ int ret; ++ ++ if (!dev || !ops) ++ return ERR_PTR(-EINVAL); ++ ++ if (!ops->cmd_seq) ++ return ERR_PTR(-EINVAL); ++ ++ if (!ops->get_capacity) ++ return ERR_PTR(-EINVAL); ++ ++ if (ops->type == RPMB_TYPE_ANY || ops->type > RPMB_TYPE_MAX) ++ return ERR_PTR(-EINVAL); ++ ++ rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); ++ if (!rdev) ++ return ERR_PTR(-ENOMEM); ++ ++ id = ida_simple_get(&rpmb_ida, 0, 0, GFP_KERNEL); ++ if (id < 0) { ++ ret = id; ++ goto exit; ++ } ++ ++ mutex_init(&rdev->lock); ++ rdev->ops = ops; ++ rdev->id = id; ++ rdev->target = target; ++ ++ dev_set_name(&rdev->dev, "rpmb%d", id); ++ rdev->dev.class = &rpmb_class; ++ rdev->dev.parent = dev; ++ ret = device_register(&rdev->dev); ++ if (ret) ++ goto exit; ++ ++ dev_dbg(&rdev->dev, "registered device\n"); ++ ++ return rdev; ++ ++exit: ++ if (id >= 0) ++ ida_simple_remove(&rpmb_ida, id); ++ kfree(rdev); ++ return ERR_PTR(ret); ++} ++EXPORT_SYMBOL_GPL(rpmb_dev_register); ++ ++static int __init rpmb_init(void) ++{ ++ ida_init(&rpmb_ida); ++ class_register(&rpmb_class); ++ return 0; ++} ++ ++static void __exit rpmb_exit(void) ++{ ++ class_unregister(&rpmb_class); ++ ida_destroy(&rpmb_ida); ++} ++ ++subsys_initcall(rpmb_init); ++module_exit(rpmb_exit); ++ ++MODULE_AUTHOR("Intel Corporation"); ++MODULE_DESCRIPTION("RPMB class"); ++MODULE_LICENSE("GPL v2"); +diff --git a/include/linux/rpmb.h b/include/linux/rpmb.h +new file mode 100644 +index 000000000000..ab8d95b39a69 +--- /dev/null ++++ b/include/linux/rpmb.h +@@ -0,0 +1,250 @@ ++/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ ++/* ++ * Copyright (C) 2015-2019 Intel Corp. All rights reserved ++ */ ++#ifndef __RPMB_H__ ++#define __RPMB_H__ ++ ++#include ++#include ++#include ++ ++/** ++ * struct rpmb_frame_jdec - rpmb frame as defined by JDEC specs ++ * ++ * @stuff : stuff bytes ++ * @key_mac : The authentication key or the message authentication ++ * code (MAC) depending on the request/response type. ++ * The MAC will be delivered in the last (or the only) ++ * block of data. ++ * @data : Data to be written or read by signed access. ++ * @nonce : Random number generated by the host for the requests ++ * and copied to the response by the RPMB engine. ++ * @write_counter: Counter value for the total amount of the successful ++ * authenticated data write requests made by the host. ++ * @addr : Address of the data to be programmed to or read ++ * from the RPMB. Address is the serial number of ++ * the accessed block (half sector 256B). ++ * @block_count : Number of blocks (half sectors, 256B) requested to be ++ * read/programmed. ++ * @result : Includes information about the status of the write counter ++ * (valid, expired) and result of the access made to the RPMB. ++ * @req_resp : Defines the type of request and response to/from the memory. ++ */ ++struct rpmb_frame_jdec { ++ u8 stuff[196]; ++ u8 key_mac[32]; ++ u8 data[256]; ++ u8 nonce[16]; ++ __be32 write_counter; ++ __be16 addr; ++ __be16 block_count; ++ __be16 result; ++ __be16 req_resp; ++} __packed; ++ ++#define RPMB_PROGRAM_KEY 0x0001 /* Program RPMB Authentication Key */ ++#define RPMB_GET_WRITE_COUNTER 0x0002 /* Read RPMB write counter */ ++#define RPMB_WRITE_DATA 0x0003 /* Write data to RPMB partition */ ++#define RPMB_READ_DATA 0x0004 /* Read data from RPMB partition */ ++#define RPMB_RESULT_READ 0x0005 /* Read result request (Internal) */ ++ ++#define RPMB_REQ2RESP(_OP) ((_OP) << 8) ++#define RPMB_RESP2REQ(_OP) ((_OP) >> 8) ++ ++/** ++ * enum rpmb_op_result - rpmb operation results ++ * ++ * @RPMB_ERR_OK : operation successful ++ * @RPMB_ERR_GENERAL : general failure ++ * @RPMB_ERR_AUTH : mac doesn't match or ac calculation failure ++ * @RPMB_ERR_COUNTER : counter doesn't match or counter increment failure ++ * @RPMB_ERR_ADDRESS : address out of range or wrong address alignment ++ * @RPMB_ERR_WRITE : data, counter, or result write failure ++ * @RPMB_ERR_READ : data, counter, or result read failure ++ * @RPMB_ERR_NO_KEY : authentication key not yet programmed ++ * ++ * @RPMB_ERR_COUNTER_EXPIRED: counter expired ++ */ ++enum rpmb_op_result { ++ RPMB_ERR_OK = 0x0000, ++ RPMB_ERR_GENERAL = 0x0001, ++ RPMB_ERR_AUTH = 0x0002, ++ RPMB_ERR_COUNTER = 0x0003, ++ RPMB_ERR_ADDRESS = 0x0004, ++ RPMB_ERR_WRITE = 0x0005, ++ RPMB_ERR_READ = 0x0006, ++ RPMB_ERR_NO_KEY = 0x0007, ++ ++ RPMB_ERR_COUNTER_EXPIRED = 0x0080 ++}; ++ ++/** ++ * enum rpmb_type - type of underlying storage technology ++ * ++ * @RPMB_TYPE_ANY : any type, used for search only ++ * @RPMB_TYPE_EMMC : eMMC (JESD84-B50.1) ++ * @RPMB_TYPE_UFS : UFS (JESD220) ++ * @RPMB_TYPE_NVME : NVM Express Revision 1.3a ++ * @RPMB_TYPE_SIM : Simulation device. ++ * @RPMB_TYPE_MAX : upper sentinel ++ */ ++enum rpmb_type { ++ RPMB_TYPE_ANY = 0, ++ RPMB_TYPE_EMMC, ++ RPMB_TYPE_UFS, ++ RPMB_TYPE_NVME, ++ ++ RPMB_TYPE_SIM = 0x0100, ++ RPMB_TYPE_MAX = RPMB_TYPE_SIM | RPMB_TYPE_NVME, ++}; ++ ++#define RPMB_TYPE_HW(_type) ((_type) & 0xFF) ++ ++extern struct class rpmb_class; ++ ++#define RPMB_F_WRITE BIT(0) ++#define RPMB_F_REL_WRITE BIT(1) ++ ++/** ++ * struct rpmb_cmd: rpmb access command ++ * ++ * @flags: command flags ++ * 0 - read command ++ * 1 - write command RPMB_F_WRITE ++ * 2 - reliable write RPMB_F_REL_WRITE ++ * @nframes: number of rpmb frames in the command ++ * @frames: list of rpmb frames ++ */ ++struct rpmb_cmd { ++ u32 flags; ++ u32 nframes; ++ void *frames; ++}; ++ ++enum rpmb_auth_method { ++ RPMB_HMAC_ALGO_SHA_256 = 0, ++}; ++ ++/** ++ * struct rpmb_ops - RPMB ops to be implemented by underlying block device ++ * ++ * @cmd_seq : send RPMB command sequence to the RPBM partition ++ * backed by the storage device to specific ++ * region(UFS)/target(NVMe) ++ * @get_capacity : rpmb size in 128K units in for region/target. ++ * @type : block device type eMMC, UFS, NVMe. ++ * @block_size : block size in half sectors (1 == 256B) ++ * @wr_cnt_max : maximal number of blocks that can be ++ * written in one access. ++ * @rd_cnt_max : maximal number of blocks that can be ++ * read in one access. ++ * @auth_method : rpmb_auth_method ++ * @dev_id : unique device identifier ++ * @dev_id_len : unique device identifier length ++ */ ++struct rpmb_ops { ++ int (*cmd_seq)(struct device *dev, u8 target, ++ struct rpmb_cmd *cmds, u32 ncmds); ++ int (*get_capacity)(struct device *dev, u8 target); ++ u32 type; ++ u16 block_size; ++ u16 wr_cnt_max; ++ u16 rd_cnt_max; ++ u16 auth_method; ++ const u8 *dev_id; ++ size_t dev_id_len; ++}; ++ ++/** ++ * struct rpmb_dev - device which can support RPMB partition ++ * ++ * @lock : the device lock ++ * @dev : device ++ * @id : device id ++ * @target : RPMB target/region within the physical device ++ * @ops : operation exported by block layer ++ */ ++struct rpmb_dev { ++ struct mutex lock; /* device serialization lock */ ++ struct device dev; ++ int id; ++ u8 target; ++ const struct rpmb_ops *ops; ++}; ++ ++#define to_rpmb_dev(x) container_of((x), struct rpmb_dev, dev) ++ ++#if IS_ENABLED(CONFIG_RPMB) ++struct rpmb_dev *rpmb_dev_get(struct rpmb_dev *rdev); ++void rpmb_dev_put(struct rpmb_dev *rdev); ++struct rpmb_dev *rpmb_dev_find_by_device(struct device *parent, u8 target); ++struct rpmb_dev *rpmb_dev_get_by_type(u32 type); ++struct rpmb_dev *rpmb_dev_register(struct device *dev, u8 target, ++ const struct rpmb_ops *ops); ++void *rpmb_dev_get_drvdata(const struct rpmb_dev *rdev); ++void rpmb_dev_set_drvdata(struct rpmb_dev *rdev, void *data); ++int rpmb_dev_unregister(struct rpmb_dev *rdev); ++int rpmb_dev_unregister_by_device(struct device *dev, u8 target); ++int rpmb_cmd_seq(struct rpmb_dev *rdev, struct rpmb_cmd *cmds, u32 ncmds); ++int rpmb_get_capacity(struct rpmb_dev *rdev); ++ ++#else ++static inline struct rpmb_dev *rpmb_dev_get(struct rpmb_dev *rdev) ++{ ++ return NULL; ++} ++ ++static inline void rpmb_dev_put(struct rpmb_dev *rdev) { } ++ ++static inline struct rpmb_dev *rpmb_dev_find_by_device(struct device *parent, ++ u8 target) ++{ ++ return NULL; ++} ++ ++static inline ++struct rpmb_dev *rpmb_dev_get_by_type(enum rpmb_type type) ++{ ++ return NULL; ++} ++ ++static inline void *rpmb_dev_get_drvdata(const struct rpmb_dev *rdev) ++{ ++ return NULL; ++} ++ ++static inline void rpmb_dev_set_drvdata(struct rpmb_dev *rdev, void *data) ++{ ++} ++ ++static inline struct rpmb_dev * ++rpmb_dev_register(struct device *dev, u8 target, const struct rpmb_ops *ops) ++{ ++ return NULL; ++} ++ ++static inline int rpmb_dev_unregister(struct rpmb_dev *dev) ++{ ++ return 0; ++} ++ ++static inline int rpmb_dev_unregister_by_device(struct device *dev, u8 target) ++{ ++ return 0; ++} ++ ++static inline int rpmb_cmd_seq(struct rpmb_dev *rdev, ++ struct rpmb_cmd *cmds, u32 ncmds) ++{ ++ return 0; ++} ++ ++static inline int rpmb_get_capacity(struct rpmb_dev *rdev) ++{ ++ return 0; ++} ++ ++#endif /* CONFIG_RPMB */ ++ ++#endif /* __RPMB_H__ */ +-- +2.17.1 + diff --git a/patches/0005-trusty-Add-fiq-support.trusty b/patches/0005-trusty-Add-fiq-support.trusty new file mode 100644 index 0000000000..4ae0cd5927 --- /dev/null +++ b/patches/0005-trusty-Add-fiq-support.trusty @@ -0,0 +1,252 @@ +From 339c52411de8a215f234d63aff90f3a05e6ac229 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?= +Date: Tue, 26 Nov 2013 20:18:35 -0800 +Subject: [PATCH 05/63] trusty: Add fiq support +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Register a custom fiq-return handler with the fiq-glue so the +monitor mode can atomically re-enable the fiq and return to the +last mode. + +Change-Id: I0016d67edccea096d7f189e223ac73cc20f79ac9 +Signed-off-by: Arve Hjønnevåg +--- + .../bindings/trusty/trusty-fiq-debugger.txt | 8 ++ + .../devicetree/bindings/trusty/trusty-fiq.txt | 8 ++ + drivers/trusty/Kconfig | 12 +++ + drivers/trusty/Makefile | 2 + + drivers/trusty/trusty-fiq-arm.c | 42 +++++++++ + drivers/trusty/trusty-fiq.c | 85 +++++++++++++++++++ + drivers/trusty/trusty-fiq.h | 16 ++++ + 7 files changed, 173 insertions(+) + create mode 100644 Documentation/devicetree/bindings/trusty/trusty-fiq-debugger.txt + create mode 100644 Documentation/devicetree/bindings/trusty/trusty-fiq.txt + create mode 100644 drivers/trusty/trusty-fiq-arm.c + create mode 100644 drivers/trusty/trusty-fiq.c + create mode 100644 drivers/trusty/trusty-fiq.h + +diff --git a/Documentation/devicetree/bindings/trusty/trusty-fiq-debugger.txt b/Documentation/devicetree/bindings/trusty/trusty-fiq-debugger.txt +new file mode 100644 +index 000000000000..18329d39487e +--- /dev/null ++++ b/Documentation/devicetree/bindings/trusty/trusty-fiq-debugger.txt +@@ -0,0 +1,8 @@ ++Trusty fiq debugger interface ++ ++Provides a single fiq for the fiq debugger. ++ ++Required properties: ++- compatible: compatible = "android,trusty-fiq-v1-*"; where * is a serial port. ++ ++Must be a child of the node that provides fiq support ("android,trusty-fiq-v1"). +diff --git a/Documentation/devicetree/bindings/trusty/trusty-fiq.txt b/Documentation/devicetree/bindings/trusty/trusty-fiq.txt +new file mode 100644 +index 000000000000..de810b955bc9 +--- /dev/null ++++ b/Documentation/devicetree/bindings/trusty/trusty-fiq.txt +@@ -0,0 +1,8 @@ ++Trusty fiq interface ++ ++Trusty provides fiq emulation. ++ ++Required properties: ++- compatible: "android,trusty-fiq-v1" ++ ++Must be a child of the node that provides the trusty std/fast call interface. +diff --git a/drivers/trusty/Kconfig b/drivers/trusty/Kconfig +index f577ae8acad3..3c725e29b399 100644 +--- a/drivers/trusty/Kconfig ++++ b/drivers/trusty/Kconfig +@@ -8,4 +8,16 @@ config TRUSTY + tristate "Trusty" + default n + ++config TRUSTY_FIQ ++ tristate ++ depends on TRUSTY ++ ++config TRUSTY_FIQ_ARM ++ tristate ++ depends on TRUSTY ++ depends on ARM ++ select FIQ_GLUE ++ select TRUSTY_FIQ ++ default y ++ + endmenu +diff --git a/drivers/trusty/Makefile b/drivers/trusty/Makefile +index 89acb6f7868a..a01c82485eb6 100644 +--- a/drivers/trusty/Makefile ++++ b/drivers/trusty/Makefile +@@ -4,3 +4,5 @@ + + obj-$(CONFIG_TRUSTY) += trusty.o + obj-$(CONFIG_TRUSTY) += trusty-irq.o ++obj-$(CONFIG_TRUSTY_FIQ) += trusty-fiq.o ++obj-$(CONFIG_TRUSTY_FIQ_ARM) += trusty-fiq-arm.o +diff --git a/drivers/trusty/trusty-fiq-arm.c b/drivers/trusty/trusty-fiq-arm.c +new file mode 100644 +index 000000000000..8c62a00bbc44 +--- /dev/null ++++ b/drivers/trusty/trusty-fiq-arm.c +@@ -0,0 +1,42 @@ ++/* ++ * Copyright (C) 2013 Google, Inc. ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++ ++#include "trusty-fiq.h" ++ ++#define _STRINGIFY(x) #x ++#define STRINGIFY(x) _STRINGIFY(x) ++ ++static void __naked trusty_fiq_return(void) ++{ ++ asm volatile( ++ ".arch_extension sec\n" ++ "mov r12, r0\n" ++ "ldr r0, =" STRINGIFY(SMC_FC_FIQ_EXIT) "\n" ++ "smc #0"); ++} ++ ++int trusty_fiq_arch_probe(struct platform_device *pdev) ++{ ++ return fiq_glue_set_return_handler(trusty_fiq_return); ++} ++ ++void trusty_fiq_arch_remove(struct platform_device *pdev) ++{ ++ fiq_glue_clear_return_handler(trusty_fiq_return); ++} +diff --git a/drivers/trusty/trusty-fiq.c b/drivers/trusty/trusty-fiq.c +new file mode 100644 +index 000000000000..1a031c67ea72 +--- /dev/null ++++ b/drivers/trusty/trusty-fiq.c +@@ -0,0 +1,85 @@ ++/* ++ * Copyright (C) 2013 Google, Inc. ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include "trusty-fiq.h" ++ ++static int trusty_fiq_remove_child(struct device *dev, void *data) ++{ ++ platform_device_unregister(to_platform_device(dev)); ++ return 0; ++} ++ ++static int trusty_fiq_probe(struct platform_device *pdev) ++{ ++ int ret; ++ ++ ret = trusty_fiq_arch_probe(pdev); ++ if (ret) ++ goto err_set_fiq_return; ++ ++ ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); ++ if (ret < 0) { ++ dev_err(&pdev->dev, "Failed to add children: %d\n", ret); ++ goto err_add_children; ++ } ++ ++ return 0; ++ ++err_add_children: ++ device_for_each_child(&pdev->dev, NULL, trusty_fiq_remove_child); ++ trusty_fiq_arch_remove(pdev); ++err_set_fiq_return: ++ return ret; ++} ++ ++static int trusty_fiq_remove(struct platform_device *pdev) ++{ ++ device_for_each_child(&pdev->dev, NULL, trusty_fiq_remove_child); ++ trusty_fiq_arch_remove(pdev); ++ return 0; ++} ++ ++static const struct of_device_id trusty_fiq_of_match[] = { ++ { .compatible = "android,trusty-fiq-v1", }, ++ {}, ++}; ++ ++static struct platform_driver trusty_fiq_driver = { ++ .probe = trusty_fiq_probe, ++ .remove = trusty_fiq_remove, ++ .driver = { ++ .name = "trusty-fiq", ++ .owner = THIS_MODULE, ++ .of_match_table = trusty_fiq_of_match, ++ }, ++}; ++ ++static int __init trusty_fiq_driver_init(void) ++{ ++ return platform_driver_register(&trusty_fiq_driver); ++} ++ ++static void __exit trusty_fiq_driver_exit(void) ++{ ++ platform_driver_unregister(&trusty_fiq_driver); ++} ++ ++subsys_initcall(trusty_fiq_driver_init); ++module_exit(trusty_fiq_driver_exit); +diff --git a/drivers/trusty/trusty-fiq.h b/drivers/trusty/trusty-fiq.h +new file mode 100644 +index 000000000000..d4ae9a9635f3 +--- /dev/null ++++ b/drivers/trusty/trusty-fiq.h +@@ -0,0 +1,16 @@ ++/* ++ * Copyright (C) 2014 Google, Inc. ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++ ++int trusty_fiq_arch_probe(struct platform_device *pdev); ++void trusty_fiq_arch_remove(struct platform_device *pdev); +-- +2.17.1 + diff --git a/patches/0005-usb-typec-Copy-everything-from-struct-typec_capa.usb-typec b/patches/0005-usb-typec-Copy-everything-from-struct-typec_capa.usb-typec new file mode 100644 index 0000000000..d074f1d63e --- /dev/null +++ b/patches/0005-usb-typec-Copy-everything-from-struct-typec_capa.usb-typec @@ -0,0 +1,210 @@ +From 75f503d1819575fb828f94c6c219ef03dc211c5a Mon Sep 17 00:00:00 2001 +From: Heikki Krogerus +Date: Tue, 1 Oct 2019 12:21:38 +0300 +Subject: [PATCH 05/18] usb: typec: Copy everything from struct + typec_capability during registration + +Copying everything from struct typec_capability to struct +typec_port during port registration. + +Signed-off-by: Heikki Krogerus +--- + drivers/usb/typec/class.c | 55 +++++++++++++++++++++++++-------------- + 1 file changed, 35 insertions(+), 20 deletions(-) + +diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c +index 94a3eda62add..3835e2d9fba6 100644 +--- a/drivers/usb/typec/class.c ++++ b/drivers/usb/typec/class.c +@@ -46,8 +46,14 @@ struct typec_port { + enum typec_role vconn_role; + enum typec_pwr_opmode pwr_opmode; + enum typec_port_type port_type; ++ enum typec_port_type fixed_role; ++ enum typec_port_data port_roles; ++ enum typec_accessory accessory[TYPEC_MAX_ACCESSORY]; + struct mutex port_type_lock; + ++ u16 revision; ++ u16 pd_revision; ++ + enum typec_orientation orientation; + struct typec_switch *sw; + struct typec_mux *mux; +@@ -950,7 +956,7 @@ preferred_role_store(struct device *dev, struct device_attribute *attr, + int role; + int ret; + +- if (port->cap->type != TYPEC_PORT_DRP) { ++ if (port->fixed_role != TYPEC_PORT_DRP) { + dev_dbg(dev, "Preferred role only supported with DRP ports\n"); + return -EOPNOTSUPP; + } +@@ -982,7 +988,7 @@ preferred_role_show(struct device *dev, struct device_attribute *attr, + { + struct typec_port *port = to_typec_port(dev); + +- if (port->cap->type != TYPEC_PORT_DRP) ++ if (port->fixed_role != TYPEC_PORT_DRP) + return 0; + + if (port->prefer_role < 0) +@@ -1009,7 +1015,7 @@ static ssize_t data_role_store(struct device *dev, + return ret; + + mutex_lock(&port->port_type_lock); +- if (port->cap->data != TYPEC_PORT_DRD) { ++ if (port->port_roles != TYPEC_PORT_DRD) { + ret = -EOPNOTSUPP; + goto unlock_and_ret; + } +@@ -1029,7 +1035,7 @@ static ssize_t data_role_show(struct device *dev, + { + struct typec_port *port = to_typec_port(dev); + +- if (port->cap->data == TYPEC_PORT_DRD) ++ if (port->port_roles == TYPEC_PORT_DRD) + return sprintf(buf, "%s\n", port->data_role == TYPEC_HOST ? + "[host] device" : "host [device]"); + +@@ -1044,7 +1050,7 @@ static ssize_t power_role_store(struct device *dev, + struct typec_port *port = to_typec_port(dev); + int ret; + +- if (!port->cap->pd_revision) { ++ if (!port->pd_revision) { + dev_dbg(dev, "USB Power Delivery not supported\n"); + return -EOPNOTSUPP; + } +@@ -1064,9 +1070,9 @@ static ssize_t power_role_store(struct device *dev, + return ret; + + mutex_lock(&port->port_type_lock); +- if (port->port_type != TYPEC_PORT_DRP) { ++ if (port->fixed_role != TYPEC_PORT_DRP) { + dev_dbg(dev, "port type fixed at \"%s\"", +- typec_port_power_roles[port->port_type]); ++ typec_port_power_roles[port->fixed_role]); + ret = -EOPNOTSUPP; + goto unlock_and_ret; + } +@@ -1086,7 +1092,7 @@ static ssize_t power_role_show(struct device *dev, + { + struct typec_port *port = to_typec_port(dev); + +- if (port->cap->type == TYPEC_PORT_DRP) ++ if (port->fixed_role == TYPEC_PORT_DRP) + return sprintf(buf, "%s\n", port->pwr_role == TYPEC_SOURCE ? + "[source] sink" : "source [sink]"); + +@@ -1102,7 +1108,7 @@ port_type_store(struct device *dev, struct device_attribute *attr, + int ret; + enum typec_port_type type; + +- if (!port->cap->port_type_set || port->cap->type != TYPEC_PORT_DRP) { ++ if (!port->cap->port_type_set || port->fixed_role != TYPEC_PORT_DRP) { + dev_dbg(dev, "changing port type not supported\n"); + return -EOPNOTSUPP; + } +@@ -1114,7 +1120,7 @@ port_type_store(struct device *dev, struct device_attribute *attr, + type = ret; + mutex_lock(&port->port_type_lock); + +- if (port->port_type == type) { ++ if (port->fixed_role == type) { + ret = size; + goto unlock_and_ret; + } +@@ -1123,7 +1129,7 @@ port_type_store(struct device *dev, struct device_attribute *attr, + if (ret) + goto unlock_and_ret; + +- port->port_type = type; ++ port->fixed_role = type; + ret = size; + + unlock_and_ret: +@@ -1137,11 +1143,11 @@ port_type_show(struct device *dev, struct device_attribute *attr, + { + struct typec_port *port = to_typec_port(dev); + +- if (port->cap->type == TYPEC_PORT_DRP) ++ if (port->fixed_role == TYPEC_PORT_DRP) + return sprintf(buf, "%s\n", +- typec_port_types_drp[port->port_type]); ++ typec_port_types_drp[port->fixed_role]); + +- return sprintf(buf, "[%s]\n", typec_port_power_roles[port->cap->type]); ++ return sprintf(buf, "[%s]\n", typec_port_power_roles[port->fixed_role]); + } + static DEVICE_ATTR_RW(port_type); + +@@ -1170,7 +1176,7 @@ static ssize_t vconn_source_store(struct device *dev, + bool source; + int ret; + +- if (!port->cap->pd_revision) { ++ if (!port->pd_revision) { + dev_dbg(dev, "VCONN swap depends on USB Power Delivery\n"); + return -EOPNOTSUPP; + } +@@ -1209,10 +1215,10 @@ static ssize_t supported_accessory_modes_show(struct device *dev, + ssize_t ret = 0; + int i; + +- for (i = 0; i < ARRAY_SIZE(port->cap->accessory); i++) { +- if (port->cap->accessory[i]) ++ for (i = 0; i < ARRAY_SIZE(port->accessory); i++) { ++ if (port->accessory[i]) + ret += sprintf(buf + ret, "%s ", +- typec_accessory_modes[port->cap->accessory[i]]); ++ typec_accessory_modes[port->accessory[i]]); + } + + if (!ret) +@@ -1229,7 +1235,7 @@ static ssize_t usb_typec_revision_show(struct device *dev, + char *buf) + { + struct typec_port *port = to_typec_port(dev); +- u16 rev = port->cap->revision; ++ u16 rev = port->revision; + + return sprintf(buf, "%d.%d\n", (rev >> 8) & 0xff, (rev >> 4) & 0xf); + } +@@ -1241,7 +1247,7 @@ static ssize_t usb_power_delivery_revision_show(struct device *dev, + { + struct typec_port *p = to_typec_port(dev); + +- return sprintf(buf, "%d\n", (p->cap->pd_revision >> 8) & 0xff); ++ return sprintf(buf, "%d\n", (p->pd_revision >> 8) & 0xff); + } + static DEVICE_ATTR_RO(usb_power_delivery_revision); + +@@ -1532,6 +1538,7 @@ struct typec_port *typec_register_port(struct device *parent, + struct typec_port *port; + int ret; + int id; ++ int i; + + port = kzalloc(sizeof(*port), GFP_KERNEL); + if (!port) +@@ -1581,8 +1588,16 @@ struct typec_port *typec_register_port(struct device *parent, + port->id = id; + port->cap = cap; + port->port_type = cap->type; ++ port->fixed_role = cap->type; ++ port->port_roles = cap->data; + port->prefer_role = cap->prefer_role; + ++ port->revision = cap->revision; ++ port->pd_revision = cap->pd_revision; ++ ++ for (i = 0; i < TYPEC_MAX_ACCESSORY; i++) ++ port->accessory[i] = cap->accessory[i]; ++ + device_initialize(&port->dev); + port->dev.class = typec_class; + port->dev.parent = parent; +-- +2.17.1 + diff --git a/patches/0006-ASoC-Intel-Skylake-Put-FW-runtime-params-defs-in-one.audio b/patches/0006-ASoC-Intel-Skylake-Put-FW-runtime-params-defs-in-one.audio new file mode 100644 index 0000000000..73c8c3aac8 --- /dev/null +++ b/patches/0006-ASoC-Intel-Skylake-Put-FW-runtime-params-defs-in-one.audio @@ -0,0 +1,74 @@ +From a4c1136c1bcd0efa46a13b5ef9697edcdf0da126 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Amadeusz=20S=C5=82awi=C5=84ski?= + +Date: Wed, 7 Aug 2019 15:24:58 +0200 +Subject: [PATCH 006/193] ASoC: Intel: Skylake: Put FW runtime params defs in + one place +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Change Runtime Parameters enum to have its own prefix and then reuse +those defines in all places where we actually use those parameters. + +Signed-off-by: Amadeusz Sławiński +--- + sound/soc/intel/skylake/skl-messages.c | 7 ++----- + sound/soc/intel/skylake/skl-sst-ipc.h | 5 +++++ + 2 files changed, 7 insertions(+), 5 deletions(-) + +diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c +index 476ef1897961..bb0b843ed187 100644 +--- a/sound/soc/intel/skylake/skl-messages.c ++++ b/sound/soc/intel/skylake/skl-messages.c +@@ -34,13 +34,11 @@ static int skl_free_dma_buf(struct device *dev, struct snd_dma_buffer *dmab) + return 0; + } + +-#define SKL_ASTATE_PARAM_ID 4 +- + void skl_dsp_set_astate_cfg(struct skl_dev *skl, u32 cnt, void *data) + { + struct skl_ipc_large_config_msg msg = {0}; + +- msg.large_param_id = SKL_ASTATE_PARAM_ID; ++ msg.large_param_id = SKL_BASEFW_ASTATE_TABLE; + msg.param_data_size = (cnt * sizeof(struct skl_astate_param) + + sizeof(cnt)); + +@@ -614,7 +612,6 @@ static void skl_setup_cpr_gateway_cfg(struct skl_dev *skl, + skl_copy_copier_caps(mconfig, cpr_mconfig); + } + +-#define DMA_CONTROL_ID 5 + #define DMA_I2S_BLOB_SIZE 21 + + int skl_dsp_set_dma_control(struct skl_dev *skl, u32 *caps, +@@ -631,7 +628,7 @@ int skl_dsp_set_dma_control(struct skl_dev *skl, u32 *caps, + if (caps_size == 0) + return 0; + +- msg.large_param_id = DMA_CONTROL_ID; ++ msg.large_param_id = SKL_BASEFW_DMA_CONTROL; + msg.param_data_size = sizeof(struct skl_dma_control) + caps_size; + + dma_ctrl = kzalloc(msg.param_data_size, GFP_KERNEL); +diff --git a/sound/soc/intel/skylake/skl-sst-ipc.h b/sound/soc/intel/skylake/skl-sst-ipc.h +index f48898fc1436..8c4fa5e5f7de 100644 +--- a/sound/soc/intel/skylake/skl-sst-ipc.h ++++ b/sound/soc/intel/skylake/skl-sst-ipc.h +@@ -184,6 +184,11 @@ struct skl_lib_info { + const struct firmware *fw; + }; + ++enum skl_basefw_runtime_param { ++ SKL_BASEFW_ASTATE_TABLE = 4, ++ SKL_BASEFW_DMA_CONTROL = 5, ++}; ++ + struct skl_ipc_init_instance_msg { + u32 module_id; + u32 instance_id; +-- +2.17.1 + diff --git a/patches/0006-PTP-implement-PTP_EVENT_COUNT_TSTAMP-ioctl.felipeb-5.4 b/patches/0006-PTP-implement-PTP_EVENT_COUNT_TSTAMP-ioctl.felipeb-5.4 new file mode 100644 index 0000000000..14993d997a --- /dev/null +++ b/patches/0006-PTP-implement-PTP_EVENT_COUNT_TSTAMP-ioctl.felipeb-5.4 @@ -0,0 +1,69 @@ +From 2b8eb9c867c9c6f5b2ff20fcedd53fa6322fc04b Mon Sep 17 00:00:00 2001 +From: Felipe Balbi +Date: Thu, 7 Mar 2019 10:40:52 +0200 +Subject: [PATCH 06/14] PTP: implement PTP_EVENT_COUNT_TSTAMP ioctl + +With this, we can request the underlying driver to count the number of +events that have been captured. + +Signed-off-by: Felipe Balbi +--- + drivers/ptp/ptp_chardev.c | 21 +++++++++++++++++++++ + include/uapi/linux/ptp_clock.h | 2 ++ + 2 files changed, 23 insertions(+) + +diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c +index 67d0199840fd..d16be5314cb3 100644 +--- a/drivers/ptp/ptp_chardev.c ++++ b/drivers/ptp/ptp_chardev.c +@@ -111,6 +111,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) + struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); + struct ptp_sys_offset_extended *extoff = NULL; + struct ptp_sys_offset_precise precise_offset; ++ struct ptp_event_count_tstamp counttstamp; + struct system_device_crosststamp xtstamp; + struct ptp_clock_info *ops = ptp->info; + struct ptp_sys_offset *sysoff = NULL; +@@ -199,6 +200,26 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) + err = ops->enable(ops, &req, enable); + break; + ++ case PTP_EVENT_COUNT_TSTAMP2: ++ if (!ops->counttstamp) ++ return -ENOTSUPP; ++ if (copy_from_user(&counttstamp, (void __user *)arg, ++ sizeof(counttstamp))) { ++ err = -EFAULT; ++ break; ++ } ++ if (counttstamp.flags & ~PTP_EVENT_COUNT_TSTAMP_POL_LOW) ++ counttstamp.flags &= PTP_EVENT_COUNT_TSTAMP_POL_LOW; ++ if (counttstamp.rsv[0] || counttstamp.rsv[1]) { ++ err = -EINVAL; ++ break; ++ } ++ err = ops->counttstamp(ops, &counttstamp); ++ if (!err && copy_to_user((void __user *)arg, &counttstamp, ++ sizeof(counttstamp))) ++ err = -EFAULT; ++ break; ++ + case PTP_ENABLE_PPS: + case PTP_ENABLE_PPS2: + memset(&req, 0, sizeof(req)); +diff --git a/include/uapi/linux/ptp_clock.h b/include/uapi/linux/ptp_clock.h +index 35318884fcc3..91721dbf1e4c 100644 +--- a/include/uapi/linux/ptp_clock.h ++++ b/include/uapi/linux/ptp_clock.h +@@ -204,6 +204,8 @@ struct ptp_pin_desc { + _IOWR(PTP_CLK_MAGIC, 17, struct ptp_sys_offset_precise) + #define PTP_SYS_OFFSET_EXTENDED2 \ + _IOWR(PTP_CLK_MAGIC, 18, struct ptp_sys_offset_extended) ++#define PTP_EVENT_COUNT_TSTAMP2 \ ++ _IOWR(PTP_CLK_MAGIC, 19, struct ptp_event_count_tstamp) + + struct ptp_extts_event { + struct ptp_clock_time t; /* Time event occured. */ +-- +2.17.1 + diff --git a/patches/0006-TO-BE-FOLDED-pinctrl-tigerlake-Update-pin-list-accord.lpss b/patches/0006-TO-BE-FOLDED-pinctrl-tigerlake-Update-pin-list-accord.lpss new file mode 100644 index 0000000000..41dd95e827 --- /dev/null +++ b/patches/0006-TO-BE-FOLDED-pinctrl-tigerlake-Update-pin-list-accord.lpss @@ -0,0 +1,692 @@ +From f9be28d4682ee4bafe0f3760b83ec375cb15fccd Mon Sep 17 00:00:00 2001 +From: Andy Shevchenko +Date: Tue, 9 Jul 2019 19:47:13 +0300 +Subject: [PATCH 06/40] TO BE FOLDED: pinctrl: tigerlake: Update pin list + according to H v1.0 HIP_PIC14 + +Signed-off-by: Andy Shevchenko +--- + drivers/pinctrl/intel/pinctrl-tigerlake.c | 624 +++++++++++----------- + 1 file changed, 319 insertions(+), 305 deletions(-) + +diff --git a/drivers/pinctrl/intel/pinctrl-tigerlake.c b/drivers/pinctrl/intel/pinctrl-tigerlake.c +index 3e208070319a..93796395a41d 100644 +--- a/drivers/pinctrl/intel/pinctrl-tigerlake.c ++++ b/drivers/pinctrl/intel/pinctrl-tigerlake.c +@@ -381,7 +381,7 @@ static const struct intel_pinctrl_soc_data tgllp_soc_data = { + + /* Tiger Lake-H */ + static const struct pinctrl_pin_desc tglh_pins[] = { +- /* SPI */ ++ /* GPP_A */ + PINCTRL_PIN(0, "SPI0_IO_2"), + PINCTRL_PIN(1, "SPI0_IO_3"), + PINCTRL_PIN(2, "SPI0_MOSI_IO_0"), +@@ -390,346 +390,360 @@ static const struct pinctrl_pin_desc tglh_pins[] = { + PINCTRL_PIN(5, "SPI0_FLASH_0_CSB"), + PINCTRL_PIN(6, "SPI0_FLASH_1_CSB"), + PINCTRL_PIN(7, "SPI0_CLK"), +- PINCTRL_PIN(8, "SPI0_CLK_LOOPBK"), +- /* GPP_A */ +- PINCTRL_PIN(9, "ESPI_ALERT1B"), +- PINCTRL_PIN(10, "ESPI_IO_0"), +- PINCTRL_PIN(11, "ESPI_IO_1"), +- PINCTRL_PIN(12, "ESPI_IO_2"), +- PINCTRL_PIN(13, "ESPI_IO_3"), +- PINCTRL_PIN(14, "ESPI_CS0B"), ++ PINCTRL_PIN(8, "ESPI_IO_0"), ++ PINCTRL_PIN(9, "ESPI_IO_1"), ++ PINCTRL_PIN(10, "ESPI_IO_2"), ++ PINCTRL_PIN(11, "ESPI_IO_3"), ++ PINCTRL_PIN(12, "ESPI_CS0B"), ++ PINCTRL_PIN(13, "ESPI_CLK"), ++ PINCTRL_PIN(14, "ESPI_RESETB"), + PINCTRL_PIN(15, "ESPI_CS1B"), +- PINCTRL_PIN(16, "ESPI_ALERT0B"), +- PINCTRL_PIN(17, "PCIE_LNK_DOWN"), +- PINCTRL_PIN(18, "ESPI_CLK"), +- PINCTRL_PIN(19, "GPP_A_10"), +- PINCTRL_PIN(20, "ISH_UART0_RTSB"), +- PINCTRL_PIN(21, "SX_EXIT_HOLDOFFB"), +- PINCTRL_PIN(22, "SUSWARNB_SUSPWRDNACK"), +- PINCTRL_PIN(23, "ESPI_RESETB"), +- PINCTRL_PIN(24, "SUSACKB"), +- PINCTRL_PIN(25, "CLKOUT_48"), +- PINCTRL_PIN(26, "ISH_GP_7"), +- PINCTRL_PIN(27, "ISH_GP_0"), +- PINCTRL_PIN(28, "ISH_GP_1"), +- PINCTRL_PIN(29, "ISH_GP_2"), +- PINCTRL_PIN(30, "ISH_GP_3"), +- PINCTRL_PIN(31, "ISH_GP_4"), +- PINCTRL_PIN(32, "ISH_GP_5"), +- PINCTRL_PIN(33, "ESPI_CLK_LOOPBK"), ++ PINCTRL_PIN(16, "ESPI_CS2B"), ++ PINCTRL_PIN(17, "ESPI_CS3B"), ++ PINCTRL_PIN(18, "ESPI_ALERT0B"), ++ PINCTRL_PIN(19, "ESPI_ALERT1B"), ++ PINCTRL_PIN(20, "ESPI_ALERT2B"), ++ PINCTRL_PIN(21, "ESPI_ALERT3B"), ++ PINCTRL_PIN(22, "GPPC_A_14"), ++ PINCTRL_PIN(23, "SPI0_CLK_LOOPBK"), ++ PINCTRL_PIN(24, "ESPI_CLK_LOOPBK"), ++ /* GPP_R */ ++ PINCTRL_PIN(25, "HDA_BCLK"), ++ PINCTRL_PIN(26, "HDA_SYNC"), ++ PINCTRL_PIN(27, "HDA_SDO"), ++ PINCTRL_PIN(28, "HDA_SDI_0"), ++ PINCTRL_PIN(29, "HDA_RSTB"), ++ PINCTRL_PIN(30, "HDA_SDI_1"), ++ PINCTRL_PIN(31, "GPP_R_6"), ++ PINCTRL_PIN(32, "GPP_R_7"), ++ PINCTRL_PIN(33, "GPP_R_8"), ++ PINCTRL_PIN(34, "PCIE_LNK_DOWN"), ++ PINCTRL_PIN(35, "ISH_UART0_RTSB"), ++ PINCTRL_PIN(36, "SX_EXIT_HOLDOFFB"), ++ PINCTRL_PIN(37, "CLKOUT_48"), ++ PINCTRL_PIN(38, "ISH_GP_7"), ++ PINCTRL_PIN(39, "ISH_GP_0"), ++ PINCTRL_PIN(40, "ISH_GP_1"), ++ PINCTRL_PIN(41, "ISH_GP_2"), ++ PINCTRL_PIN(42, "ISH_GP_3"), ++ PINCTRL_PIN(43, "ISH_GP_4"), ++ PINCTRL_PIN(44, "ISH_GP_5"), + /* GPP_B */ +- PINCTRL_PIN(34, "GSPI0_CS1B"), +- PINCTRL_PIN(35, "GSPI1_CS1B"), +- PINCTRL_PIN(36, "VRALERTB"), +- PINCTRL_PIN(37, "CPU_GP_2"), +- PINCTRL_PIN(38, "CPU_GP_3"), +- PINCTRL_PIN(39, "SRCCLKREQB_0"), +- PINCTRL_PIN(40, "SRCCLKREQB_1"), +- PINCTRL_PIN(41, "SRCCLKREQB_2"), +- PINCTRL_PIN(42, "SRCCLKREQB_3"), +- PINCTRL_PIN(43, "SRCCLKREQB_4"), +- PINCTRL_PIN(44, "SRCCLKREQB_5"), +- PINCTRL_PIN(45, "I2S_MCLK"), +- PINCTRL_PIN(46, "SLP_S0B"), +- PINCTRL_PIN(47, "PLTRSTB"), +- PINCTRL_PIN(48, "SPKR"), +- PINCTRL_PIN(49, "GSPI0_CS0B"), +- PINCTRL_PIN(50, "GSPI0_CLK"), +- PINCTRL_PIN(51, "GSPI0_MISO"), +- PINCTRL_PIN(52, "GSPI0_MOSI"), +- PINCTRL_PIN(53, "GSPI1_CS0B"), +- PINCTRL_PIN(54, "GSPI1_CLK"), +- PINCTRL_PIN(55, "GSPI1_MISO"), +- PINCTRL_PIN(56, "GSPI1_MOSI"), +- PINCTRL_PIN(57, "SML1ALERTB"), +- PINCTRL_PIN(58, "GSPI0_CLK_LOOPBK"), +- PINCTRL_PIN(59, "GSPI1_CLK_LOOPBK"), ++ PINCTRL_PIN(45, "GSPI0_CS1B"), ++ PINCTRL_PIN(46, "GSPI1_CS1B"), ++ PINCTRL_PIN(47, "VRALERTB"), ++ PINCTRL_PIN(48, "CPU_GP_2"), ++ PINCTRL_PIN(49, "CPU_GP_3"), ++ PINCTRL_PIN(50, "SRCCLKREQB_0"), ++ PINCTRL_PIN(51, "SRCCLKREQB_1"), ++ PINCTRL_PIN(52, "SRCCLKREQB_2"), ++ PINCTRL_PIN(53, "SRCCLKREQB_3"), ++ PINCTRL_PIN(54, "SRCCLKREQB_4"), ++ PINCTRL_PIN(55, "SRCCLKREQB_5"), ++ PINCTRL_PIN(56, "I2S_MCLK"), ++ PINCTRL_PIN(57, "SLP_S0B"), ++ PINCTRL_PIN(58, "PLTRSTB"), ++ PINCTRL_PIN(59, "SPKR"), ++ PINCTRL_PIN(60, "GSPI0_CS0B"), ++ PINCTRL_PIN(61, "GSPI0_CLK"), ++ PINCTRL_PIN(62, "GSPI0_MISO"), ++ PINCTRL_PIN(63, "GSPI0_MOSI"), ++ PINCTRL_PIN(64, "GSPI1_CS0B"), ++ PINCTRL_PIN(65, "GSPI1_CLK"), ++ PINCTRL_PIN(66, "GSPI1_MISO"), ++ PINCTRL_PIN(67, "GSPI1_MOSI"), ++ PINCTRL_PIN(68, "SML1ALERTB"), ++ PINCTRL_PIN(69, "GSPI0_CLK_LOOPBK"), ++ PINCTRL_PIN(70, "GSPI1_CLK_LOOPBK"), + /* vGPIO_0 */ +- PINCTRL_PIN(60, "ESPI_USB_OCB_0"), +- PINCTRL_PIN(61, "ESPI_USB_OCB_1"), +- PINCTRL_PIN(62, "ESPI_USB_OCB_2"), +- PINCTRL_PIN(63, "ESPI_USB_OCB_3"), +- PINCTRL_PIN(64, "USB_CPU_OCB_0"), +- PINCTRL_PIN(65, "USB_CPU_OCB_1"), +- PINCTRL_PIN(66, "USB_CPU_OCB_2"), +- PINCTRL_PIN(67, "USB_CPU_OCB_3"), +- /* GPP_C */ +- PINCTRL_PIN(68, "SMBCLK"), +- PINCTRL_PIN(69, "SMBDATA"), +- PINCTRL_PIN(70, "SMBALERTB"), +- PINCTRL_PIN(71, "ISH_UART0_RXD"), +- PINCTRL_PIN(72, "ISH_UART0_TXD"), +- PINCTRL_PIN(73, "SML0ALERTB"), +- PINCTRL_PIN(74, "ISH_I2C2_SDA"), +- PINCTRL_PIN(75, "ISH_I2C2_SCL"), +- PINCTRL_PIN(76, "UART0_RXD"), +- PINCTRL_PIN(77, "UART0_TXD"), +- PINCTRL_PIN(78, "UART0_RTSB"), +- PINCTRL_PIN(79, "UART0_CTSB"), +- PINCTRL_PIN(80, "UART1_RXD"), +- PINCTRL_PIN(81, "UART1_TXD"), +- PINCTRL_PIN(82, "UART1_RTSB"), +- PINCTRL_PIN(83, "UART1_CTSB"), +- PINCTRL_PIN(84, "I2C0_SDA"), +- PINCTRL_PIN(85, "I2C0_SCL"), +- PINCTRL_PIN(86, "I2C1_SDA"), +- PINCTRL_PIN(87, "I2C1_SCL"), +- PINCTRL_PIN(88, "UART2_RXD"), +- PINCTRL_PIN(89, "UART2_TXD"), +- PINCTRL_PIN(90, "UART2_RTSB"), +- PINCTRL_PIN(91, "UART2_CTSB"), ++ PINCTRL_PIN(71, "ESPI_USB_OCB_0"), ++ PINCTRL_PIN(72, "ESPI_USB_OCB_1"), ++ PINCTRL_PIN(73, "ESPI_USB_OCB_2"), ++ PINCTRL_PIN(74, "ESPI_USB_OCB_3"), ++ PINCTRL_PIN(75, "USB_CPU_OCB_0"), ++ PINCTRL_PIN(76, "USB_CPU_OCB_1"), ++ PINCTRL_PIN(77, "USB_CPU_OCB_2"), ++ PINCTRL_PIN(78, "USB_CPU_OCB_3"), + /* GPP_D */ +- PINCTRL_PIN(92, "SPI1_CSB"), +- PINCTRL_PIN(93, "SPI1_CLK"), +- PINCTRL_PIN(94, "SPI1_MISO_IO_1"), +- PINCTRL_PIN(95, "SPI1_MOSI_IO_0"), +- PINCTRL_PIN(96, "SML1CLK"), +- PINCTRL_PIN(97, "I2S2_SFRM"), +- PINCTRL_PIN(98, "I2S2_TXD"), +- PINCTRL_PIN(99, "I2S2_RXD"), +- PINCTRL_PIN(100, "I2S2_SCLK"), +- PINCTRL_PIN(101, "SML0CLK"), +- PINCTRL_PIN(102, "SML0DATA"), +- PINCTRL_PIN(103, "I2S1_SCLK"), +- PINCTRL_PIN(104, "ISH_UART0_CTSB"), +- PINCTRL_PIN(105, "SPI1_IO_2"), +- PINCTRL_PIN(106, "SPI1_IO_3"), +- PINCTRL_PIN(107, "SML1DATA"), +- PINCTRL_PIN(108, "GSPI3_CS0B"), +- PINCTRL_PIN(109, "GSPI3_CLK"), +- PINCTRL_PIN(110, "GSPI3_MISO"), +- PINCTRL_PIN(111, "GSPI3_MOSI"), +- PINCTRL_PIN(112, "UART3_RXD"), +- PINCTRL_PIN(113, "UART3_TXD"), +- PINCTRL_PIN(114, "UART3_RTSB"), +- PINCTRL_PIN(115, "UART3_CTSB"), +- PINCTRL_PIN(116, "GSPI2_CLK_LOOPBK"), +- PINCTRL_PIN(117, "SPI1_CLK_LOOPBK"), +- /* GPP_R */ +- PINCTRL_PIN(118, "HDA_BCLK"), +- PINCTRL_PIN(119, "HDA_SYNC"), +- PINCTRL_PIN(120, "HDA_SDO"), +- PINCTRL_PIN(121, "HDA_SDI_0"), +- PINCTRL_PIN(122, "HDA_RSTB"), +- PINCTRL_PIN(123, "HDA_SDI_1"), +- PINCTRL_PIN(124, "GPP_R_6"), +- PINCTRL_PIN(125, "GPP_R_7"), ++ PINCTRL_PIN(79, "SPI1_CSB"), ++ PINCTRL_PIN(80, "SPI1_CLK"), ++ PINCTRL_PIN(81, "SPI1_MISO_IO_1"), ++ PINCTRL_PIN(82, "SPI1_MOSI_IO_0"), ++ PINCTRL_PIN(83, "SML1CLK"), ++ PINCTRL_PIN(84, "I2S2_SFRM"), ++ PINCTRL_PIN(85, "I2S2_TXD"), ++ PINCTRL_PIN(86, "I2S2_RXD"), ++ PINCTRL_PIN(87, "I2S2_SCLK"), ++ PINCTRL_PIN(88, "SML0CLK"), ++ PINCTRL_PIN(89, "SML0DATA"), ++ PINCTRL_PIN(90, "GPP_D_11"), ++ PINCTRL_PIN(91, "ISH_UART0_CTSB"), ++ PINCTRL_PIN(92, "SPI1_IO_2"), ++ PINCTRL_PIN(93, "SPI1_IO_3"), ++ PINCTRL_PIN(94, "SML1DATA"), ++ PINCTRL_PIN(95, "GSPI3_CS0B"), ++ PINCTRL_PIN(96, "GSPI3_CLK"), ++ PINCTRL_PIN(97, "GSPI3_MISO"), ++ PINCTRL_PIN(98, "GSPI3_MOSI"), ++ PINCTRL_PIN(99, "UART3_RXD"), ++ PINCTRL_PIN(100, "UART3_TXD"), ++ PINCTRL_PIN(101, "UART3_RTSB"), ++ PINCTRL_PIN(102, "UART3_CTSB"), ++ PINCTRL_PIN(103, "SPI1_CLK_LOOPBK"), ++ PINCTRL_PIN(104, "GSPI3_CLK_LOOPBK"), ++ /* GPP_C */ ++ PINCTRL_PIN(105, "SMBCLK"), ++ PINCTRL_PIN(106, "SMBDATA"), ++ PINCTRL_PIN(107, "SMBALERTB"), ++ PINCTRL_PIN(108, "ISH_UART0_RXD"), ++ PINCTRL_PIN(109, "ISH_UART0_TXD"), ++ PINCTRL_PIN(110, "SML0ALERTB"), ++ PINCTRL_PIN(111, "ISH_I2C2_SDA"), ++ PINCTRL_PIN(112, "ISH_I2C2_SCL"), ++ PINCTRL_PIN(113, "UART0_RXD"), ++ PINCTRL_PIN(114, "UART0_TXD"), ++ PINCTRL_PIN(115, "UART0_RTSB"), ++ PINCTRL_PIN(116, "UART0_CTSB"), ++ PINCTRL_PIN(117, "UART1_RXD"), ++ PINCTRL_PIN(118, "UART1_TXD"), ++ PINCTRL_PIN(119, "UART1_RTSB"), ++ PINCTRL_PIN(120, "UART1_CTSB"), ++ PINCTRL_PIN(121, "I2C0_SDA"), ++ PINCTRL_PIN(122, "I2C0_SCL"), ++ PINCTRL_PIN(123, "I2C1_SDA"), ++ PINCTRL_PIN(124, "I2C1_SCL"), ++ PINCTRL_PIN(125, "UART2_RXD"), ++ PINCTRL_PIN(126, "UART2_TXD"), ++ PINCTRL_PIN(127, "UART2_RTSB"), ++ PINCTRL_PIN(128, "UART2_CTSB"), + /* GPP_S */ +- PINCTRL_PIN(126, "SNDW1_CLK"), +- PINCTRL_PIN(127, "SNDW1_DATA"), +- PINCTRL_PIN(128, "SNDW2_CLK"), +- PINCTRL_PIN(129, "SNDW2_DATA"), +- PINCTRL_PIN(130, "SNDW3_CLK"), +- PINCTRL_PIN(131, "SNDW3_DATA"), +- PINCTRL_PIN(132, "SNDW4_CLK"), +- PINCTRL_PIN(133, "SNDW4_DATA"), ++ PINCTRL_PIN(129, "SNDW1_CLK"), ++ PINCTRL_PIN(130, "SNDW1_DATA"), ++ PINCTRL_PIN(131, "SNDW2_CLK"), ++ PINCTRL_PIN(132, "SNDW2_DATA"), ++ PINCTRL_PIN(133, "SNDW3_CLK"), ++ PINCTRL_PIN(134, "SNDW3_DATA"), ++ PINCTRL_PIN(135, "SNDW4_CLK"), ++ PINCTRL_PIN(136, "SNDW4_DATA"), + /* GPP_G */ +- PINCTRL_PIN(134, "DDPA_CTRLCLK"), +- PINCTRL_PIN(135, "DDPA_CTRLDATA"), +- PINCTRL_PIN(136, "DNX_FORCE_RELOAD"), +- PINCTRL_PIN(137, "GPPC_G_3"), +- PINCTRL_PIN(138, "GPPC_G_4"), +- PINCTRL_PIN(139, "GPPC_G_5"), +- PINCTRL_PIN(140, "GPPC_G_6"), +- PINCTRL_PIN(141, "GPPC_G_7"), +- PINCTRL_PIN(142, "ISH_SPI_CSB"), +- PINCTRL_PIN(143, "ISH_SPI_CLK"), +- PINCTRL_PIN(144, "ISH_SPI_MISO"), +- PINCTRL_PIN(145, "ISH_SPI_MOSI"), +- PINCTRL_PIN(146, "DDP1_CTRLCLK"), +- PINCTRL_PIN(147, "DDP1_CTRLDATA"), +- PINCTRL_PIN(148, "DDP2_CTRLCLK"), +- PINCTRL_PIN(149, "DDP2_CTRLDATA"), ++ PINCTRL_PIN(137, "DDPA_CTRLCLK"), ++ PINCTRL_PIN(138, "DDPA_CTRLDATA"), ++ PINCTRL_PIN(139, "DNX_FORCE_RELOAD"), ++ PINCTRL_PIN(140, "GMII_MDC_0"), ++ PINCTRL_PIN(141, "GMII_MDIO_0"), ++ PINCTRL_PIN(142, "SLP_DRAMB"), ++ PINCTRL_PIN(143, "GPPC_G_6"), ++ PINCTRL_PIN(144, "GPPC_G_7"), ++ PINCTRL_PIN(145, "ISH_SPI_CSB"), ++ PINCTRL_PIN(146, "ISH_SPI_CLK"), ++ PINCTRL_PIN(147, "ISH_SPI_MISO"), ++ PINCTRL_PIN(148, "ISH_SPI_MOSI"), ++ PINCTRL_PIN(149, "DDP1_CTRLCLK"), ++ PINCTRL_PIN(150, "DDP1_CTRLDATA"), ++ PINCTRL_PIN(151, "DDP2_CTRLCLK"), ++ PINCTRL_PIN(152, "DDP2_CTRLDATA"), ++ PINCTRL_PIN(153, "GSPI2_CLK_LOOPBK"), + /* vGPIO */ +- PINCTRL_PIN(150, "CNV_BTEN"), +- PINCTRL_PIN(151, "CNV_BT_HOST_WAKEB"), +- PINCTRL_PIN(152, "CNV_BT_IF_SELECT"), +- PINCTRL_PIN(153, "vCNV_BT_UART_TXD"), +- PINCTRL_PIN(154, "vCNV_BT_UART_RXD"), +- PINCTRL_PIN(155, "vCNV_BT_UART_CTS_B"), +- PINCTRL_PIN(156, "vCNV_BT_UART_RTS_B"), +- PINCTRL_PIN(157, "vCNV_MFUART1_TXD"), +- PINCTRL_PIN(158, "vCNV_MFUART1_RXD"), +- PINCTRL_PIN(159, "vCNV_MFUART1_CTS_B"), +- PINCTRL_PIN(160, "vCNV_MFUART1_RTS_B"), +- PINCTRL_PIN(161, "vUART0_TXD"), +- PINCTRL_PIN(162, "vUART0_RXD"), +- PINCTRL_PIN(163, "vUART0_CTS_B"), +- PINCTRL_PIN(164, "vUART0_RTS_B"), +- PINCTRL_PIN(165, "vISH_UART0_TXD"), +- PINCTRL_PIN(166, "vISH_UART0_RXD"), +- PINCTRL_PIN(167, "vISH_UART0_CTS_B"), +- PINCTRL_PIN(168, "vISH_UART0_RTS_B"), +- PINCTRL_PIN(169, "vCNV_BT_I2S_BCLK"), +- PINCTRL_PIN(170, "vCNV_BT_I2S_WS_SYNC"), +- PINCTRL_PIN(171, "vCNV_BT_I2S_SDO"), +- PINCTRL_PIN(172, "vCNV_BT_I2S_SDI"), +- PINCTRL_PIN(173, "vI2S2_SCLK"), +- PINCTRL_PIN(174, "vI2S2_SFRM"), +- PINCTRL_PIN(175, "vI2S2_TXD"), +- PINCTRL_PIN(176, "vI2S2_RXD"), ++ PINCTRL_PIN(154, "CNV_BTEN"), ++ PINCTRL_PIN(155, "CNV_BT_HOST_WAKEB"), ++ PINCTRL_PIN(156, "CNV_BT_IF_SELECT"), ++ PINCTRL_PIN(157, "vCNV_BT_UART_TXD"), ++ PINCTRL_PIN(158, "vCNV_BT_UART_RXD"), ++ PINCTRL_PIN(159, "vCNV_BT_UART_CTS_B"), ++ PINCTRL_PIN(160, "vCNV_BT_UART_RTS_B"), ++ PINCTRL_PIN(161, "vCNV_MFUART1_TXD"), ++ PINCTRL_PIN(162, "vCNV_MFUART1_RXD"), ++ PINCTRL_PIN(163, "vCNV_MFUART1_CTS_B"), ++ PINCTRL_PIN(164, "vCNV_MFUART1_RTS_B"), ++ PINCTRL_PIN(165, "vUART0_TXD"), ++ PINCTRL_PIN(166, "vUART0_RXD"), ++ PINCTRL_PIN(167, "vUART0_CTS_B"), ++ PINCTRL_PIN(168, "vUART0_RTS_B"), ++ PINCTRL_PIN(169, "vISH_UART0_TXD"), ++ PINCTRL_PIN(170, "vISH_UART0_RXD"), ++ PINCTRL_PIN(171, "vISH_UART0_CTS_B"), ++ PINCTRL_PIN(172, "vISH_UART0_RTS_B"), ++ PINCTRL_PIN(173, "vCNV_BT_I2S_BCLK"), ++ PINCTRL_PIN(174, "vCNV_BT_I2S_WS_SYNC"), ++ PINCTRL_PIN(175, "vCNV_BT_I2S_SDO"), ++ PINCTRL_PIN(176, "vCNV_BT_I2S_SDI"), ++ PINCTRL_PIN(177, "vI2S2_SCLK"), ++ PINCTRL_PIN(178, "vI2S2_SFRM"), ++ PINCTRL_PIN(179, "vI2S2_TXD"), ++ PINCTRL_PIN(180, "vI2S2_RXD"), + /* GPP_E */ +- PINCTRL_PIN(177, "SATAXPCIE_0"), +- PINCTRL_PIN(178, "SATAXPCIE_1"), +- PINCTRL_PIN(179, "SATAXPCIE_2"), +- PINCTRL_PIN(180, "CPU_GP_0"), +- PINCTRL_PIN(181, "SATA_DEVSLP_0"), +- PINCTRL_PIN(182, "SATA_DEVSLP_1"), +- PINCTRL_PIN(183, "SATA_DEVSLP_2"), +- PINCTRL_PIN(184, "CPU_GP_1"), +- PINCTRL_PIN(185, "SATA_LEDB"), +- PINCTRL_PIN(186, "USB2_OCB_0"), +- PINCTRL_PIN(187, "USB2_OCB_1"), +- PINCTRL_PIN(188, "USB2_OCB_2"), +- PINCTRL_PIN(189, "USB2_OCB_3"), ++ PINCTRL_PIN(181, "SATAXPCIE_0"), ++ PINCTRL_PIN(182, "SATAXPCIE_1"), ++ PINCTRL_PIN(183, "SATAXPCIE_2"), ++ PINCTRL_PIN(184, "CPU_GP_0"), ++ PINCTRL_PIN(185, "SATA_DEVSLP_0"), ++ PINCTRL_PIN(186, "SATA_DEVSLP_1"), ++ PINCTRL_PIN(187, "SATA_DEVSLP_2"), ++ PINCTRL_PIN(188, "CPU_GP_1"), ++ PINCTRL_PIN(189, "SATA_LEDB"), ++ PINCTRL_PIN(190, "USB2_OCB_0"), ++ PINCTRL_PIN(191, "USB2_OCB_1"), ++ PINCTRL_PIN(192, "USB2_OCB_2"), ++ PINCTRL_PIN(193, "USB2_OCB_3"), + /* GPP_F */ +- PINCTRL_PIN(190, "SATAXPCIE_3"), +- PINCTRL_PIN(191, "SATAXPCIE_4"), +- PINCTRL_PIN(192, "SATAXPCIE_5"), +- PINCTRL_PIN(193, "SATAXPCIE_6"), +- PINCTRL_PIN(194, "SATAXPCIE_7"), +- PINCTRL_PIN(195, "SATA_DEVSLP_3"), +- PINCTRL_PIN(196, "SATA_DEVSLP_4"), +- PINCTRL_PIN(197, "SATA_DEVSLP_5"), +- PINCTRL_PIN(198, "SATA_DEVSLP_6"), +- PINCTRL_PIN(199, "SATA_DEVSLP_7"), +- PINCTRL_PIN(200, "SATA_SCLOCK"), +- PINCTRL_PIN(201, "SATA_SLOAD"), +- PINCTRL_PIN(202, "SATA_SDATAOUT1"), +- PINCTRL_PIN(203, "SATA_SDATAOUT0"), +- PINCTRL_PIN(204, "PS_ONB"), +- PINCTRL_PIN(205, "M2_SKT2_CFG_0"), +- PINCTRL_PIN(206, "M2_SKT2_CFG_1"), +- PINCTRL_PIN(207, "M2_SKT2_CFG_2"), +- PINCTRL_PIN(208, "M2_SKT2_CFG_3"), +- PINCTRL_PIN(209, "L_VDDEN"), +- PINCTRL_PIN(210, "L_BKLTEN"), +- PINCTRL_PIN(211, "L_BKLTCTL"), +- PINCTRL_PIN(212, "VNN_CTRL"), +- PINCTRL_PIN(213, "GPP_F_23"), ++ PINCTRL_PIN(194, "SATAXPCIE_3"), ++ PINCTRL_PIN(195, "SATAXPCIE_4"), ++ PINCTRL_PIN(196, "SATAXPCIE_5"), ++ PINCTRL_PIN(197, "SATAXPCIE_6"), ++ PINCTRL_PIN(198, "SATAXPCIE_7"), ++ PINCTRL_PIN(199, "SATA_DEVSLP_3"), ++ PINCTRL_PIN(200, "SATA_DEVSLP_4"), ++ PINCTRL_PIN(201, "SATA_DEVSLP_5"), ++ PINCTRL_PIN(202, "SATA_DEVSLP_6"), ++ PINCTRL_PIN(203, "SATA_DEVSLP_7"), ++ PINCTRL_PIN(204, "SATA_SCLOCK"), ++ PINCTRL_PIN(205, "SATA_SLOAD"), ++ PINCTRL_PIN(206, "SATA_SDATAOUT1"), ++ PINCTRL_PIN(207, "SATA_SDATAOUT0"), ++ PINCTRL_PIN(208, "PS_ONB"), ++ PINCTRL_PIN(209, "M2_SKT2_CFG_0"), ++ PINCTRL_PIN(210, "M2_SKT2_CFG_1"), ++ PINCTRL_PIN(211, "M2_SKT2_CFG_2"), ++ PINCTRL_PIN(212, "M2_SKT2_CFG_3"), ++ PINCTRL_PIN(213, "L_VDDEN"), ++ PINCTRL_PIN(214, "L_BKLTEN"), ++ PINCTRL_PIN(215, "L_BKLTCTL"), ++ PINCTRL_PIN(216, "VNN_CTRL"), ++ PINCTRL_PIN(217, "GPP_F_23"), + /* GPP_H */ +- PINCTRL_PIN(214, "SRCCLKREQB_6"), +- PINCTRL_PIN(215, "SRCCLKREQB_7"), +- PINCTRL_PIN(216, "SRCCLKREQB_8"), +- PINCTRL_PIN(217, "SRCCLKREQB_9"), +- PINCTRL_PIN(218, "SRCCLKREQB_10"), +- PINCTRL_PIN(219, "SRCCLKREQB_11"), +- PINCTRL_PIN(220, "SRCCLKREQB_12"), +- PINCTRL_PIN(221, "SRCCLKREQB_13"), +- PINCTRL_PIN(222, "SRCCLKREQB_14"), +- PINCTRL_PIN(223, "SRCCLKREQB_15"), +- PINCTRL_PIN(224, "SML2CLK"), +- PINCTRL_PIN(225, "SML2DATA"), +- PINCTRL_PIN(226, "SML2ALERTB"), +- PINCTRL_PIN(227, "SML3CLK"), +- PINCTRL_PIN(228, "SML3DATA"), +- PINCTRL_PIN(229, "SML3ALERTB"), +- PINCTRL_PIN(230, "SML4CLK"), +- PINCTRL_PIN(231, "SML4DATA"), +- PINCTRL_PIN(232, "SML4ALERTB"), +- PINCTRL_PIN(233, "ISH_I2C0_SDA"), +- PINCTRL_PIN(234, "ISH_I2C0_SCL"), +- PINCTRL_PIN(235, "ISH_I2C1_SDA"), +- PINCTRL_PIN(236, "ISH_I2C1_SCL"), +- PINCTRL_PIN(237, "TIME_SYNC_0"), +- /* GPP_K */ +- PINCTRL_PIN(238, "GSXDOUT"), +- PINCTRL_PIN(239, "GSXSLOAD"), +- PINCTRL_PIN(240, "GSXDIN"), +- PINCTRL_PIN(241, "GSXSRESETB"), +- PINCTRL_PIN(242, "GSXCLK"), +- PINCTRL_PIN(243, "ADR_COMPLETE"), +- PINCTRL_PIN(244, "DDSP_HPD_A"), +- PINCTRL_PIN(245, "DDSP_HPD_B"), +- PINCTRL_PIN(246, "CORE_VID_0"), +- PINCTRL_PIN(247, "CORE_VID_1"), +- PINCTRL_PIN(248, "DDSP_HPD_C"), +- PINCTRL_PIN(249, "GPP_K_11"), ++ PINCTRL_PIN(218, "SRCCLKREQB_6"), ++ PINCTRL_PIN(219, "SRCCLKREQB_7"), ++ PINCTRL_PIN(220, "SRCCLKREQB_8"), ++ PINCTRL_PIN(221, "SRCCLKREQB_9"), ++ PINCTRL_PIN(222, "SRCCLKREQB_10"), ++ PINCTRL_PIN(223, "SRCCLKREQB_11"), ++ PINCTRL_PIN(224, "SRCCLKREQB_12"), ++ PINCTRL_PIN(225, "SRCCLKREQB_13"), ++ PINCTRL_PIN(226, "SRCCLKREQB_14"), ++ PINCTRL_PIN(227, "SRCCLKREQB_15"), ++ PINCTRL_PIN(228, "SML2CLK"), ++ PINCTRL_PIN(229, "SML2DATA"), ++ PINCTRL_PIN(230, "SML2ALERTB"), ++ PINCTRL_PIN(231, "SML3CLK"), ++ PINCTRL_PIN(232, "SML3DATA"), ++ PINCTRL_PIN(233, "SML3ALERTB"), ++ PINCTRL_PIN(234, "SML4CLK"), ++ PINCTRL_PIN(235, "SML4DATA"), ++ PINCTRL_PIN(236, "SML4ALERTB"), ++ PINCTRL_PIN(237, "ISH_I2C0_SDA"), ++ PINCTRL_PIN(238, "ISH_I2C0_SCL"), ++ PINCTRL_PIN(239, "ISH_I2C1_SDA"), ++ PINCTRL_PIN(240, "ISH_I2C1_SCL"), ++ PINCTRL_PIN(241, "TIME_SYNC_0"), + /* GPP_J */ +- PINCTRL_PIN(250, "CNV_PA_BLANKING"), +- PINCTRL_PIN(251, "CPU_C10_GATEB"), +- PINCTRL_PIN(252, "CNV_BRI_DT"), +- PINCTRL_PIN(253, "CNV_BRI_RSP"), +- PINCTRL_PIN(254, "CNV_RGI_DT"), +- PINCTRL_PIN(255, "CNV_RGI_RSP"), +- PINCTRL_PIN(256, "CNV_MFUART2_RXD"), +- PINCTRL_PIN(257, "CNV_MFUART2_TXD"), +- PINCTRL_PIN(258, "GPP_J_8"), +- PINCTRL_PIN(259, "GPP_J_9"), ++ PINCTRL_PIN(242, "CNV_PA_BLANKING"), ++ PINCTRL_PIN(243, "CPU_C10_GATEB"), ++ PINCTRL_PIN(244, "CNV_BRI_DT"), ++ PINCTRL_PIN(245, "CNV_BRI_RSP"), ++ PINCTRL_PIN(246, "CNV_RGI_DT"), ++ PINCTRL_PIN(247, "CNV_RGI_RSP"), ++ PINCTRL_PIN(248, "CNV_MFUART2_RXD"), ++ PINCTRL_PIN(249, "CNV_MFUART2_TXD"), ++ PINCTRL_PIN(250, "GPP_J_8"), ++ PINCTRL_PIN(251, "GPP_J_9"), ++ /* GPP_K */ ++ PINCTRL_PIN(252, "GSXDOUT"), ++ PINCTRL_PIN(253, "GSXSLOAD"), ++ PINCTRL_PIN(254, "GSXDIN"), ++ PINCTRL_PIN(255, "GSXSRESETB"), ++ PINCTRL_PIN(256, "GSXCLK"), ++ PINCTRL_PIN(257, "ADR_COMPLETE"), ++ PINCTRL_PIN(258, "DDSP_HPD_A"), ++ PINCTRL_PIN(259, "DDSP_HPD_B"), ++ PINCTRL_PIN(260, "CORE_VID_0"), ++ PINCTRL_PIN(261, "CORE_VID_1"), ++ PINCTRL_PIN(262, "DDSP_HPD_C"), ++ PINCTRL_PIN(263, "GPP_K_11"), ++ PINCTRL_PIN(264, "SYS_PWROK"), ++ PINCTRL_PIN(265, "SYS_RESETB"), ++ PINCTRL_PIN(266, "MLK_RSTB"), + /* GPP_I */ +- PINCTRL_PIN(260, "PMCALERTB"), +- PINCTRL_PIN(261, "DDSP_HPD_1"), +- PINCTRL_PIN(262, "DDSP_HPD_2"), +- PINCTRL_PIN(263, "DDSP_HPD_3"), +- PINCTRL_PIN(264, "DDSP_HPD_4"), +- PINCTRL_PIN(265, "DDPB_CTRLCLK"), +- PINCTRL_PIN(266, "DDPB_CTRLDATA"), +- PINCTRL_PIN(267, "DDPC_CTRLCLK"), +- PINCTRL_PIN(268, "DDPC_CTRLDATA"), +- PINCTRL_PIN(269, "FUSA_DIAGTEST_EN"), +- PINCTRL_PIN(270, "FUSA_DIAGTEST_MODE"), +- PINCTRL_PIN(271, "USB2_OCB_4"), +- PINCTRL_PIN(272, "USB2_OCB_5"), +- PINCTRL_PIN(273, "USB2_OCB_6"), +- PINCTRL_PIN(274, "USB2_OCB_7"), +- PINCTRL_PIN(275, "SYS_PWROK"), +- PINCTRL_PIN(276, "SYS_RESETB"), +- PINCTRL_PIN(277, "MLK_RSTB"), ++ PINCTRL_PIN(267, "PMCALERTB"), ++ PINCTRL_PIN(268, "DDSP_HPD_1"), ++ PINCTRL_PIN(269, "DDSP_HPD_2"), ++ PINCTRL_PIN(270, "DDSP_HPD_3"), ++ PINCTRL_PIN(271, "DDSP_HPD_4"), ++ PINCTRL_PIN(272, "DDPB_CTRLCLK"), ++ PINCTRL_PIN(273, "DDPB_CTRLDATA"), ++ PINCTRL_PIN(274, "DDPC_CTRLCLK"), ++ PINCTRL_PIN(275, "DDPC_CTRLDATA"), ++ PINCTRL_PIN(276, "FUSA_DIAGTEST_EN"), ++ PINCTRL_PIN(277, "FUSA_DIAGTEST_MODE"), ++ PINCTRL_PIN(278, "USB2_OCB_4"), ++ PINCTRL_PIN(279, "USB2_OCB_5"), ++ PINCTRL_PIN(280, "USB2_OCB_6"), ++ PINCTRL_PIN(281, "USB2_OCB_7"), + /* JTAG */ +- PINCTRL_PIN(278, "JTAG_TDO"), +- PINCTRL_PIN(279, "JTAGX"), +- PINCTRL_PIN(280, "PRDYB"), +- PINCTRL_PIN(281, "PREQB"), +- PINCTRL_PIN(282, "CPU_TRSTB"), +- PINCTRL_PIN(283, "JTAG_TDI"), +- PINCTRL_PIN(284, "JTAG_TMS"), +- PINCTRL_PIN(285, "JTAG_TCK"), +- PINCTRL_PIN(286, "DBG_PMODE"), ++ PINCTRL_PIN(282, "JTAG_TDO"), ++ PINCTRL_PIN(283, "JTAGX"), ++ PINCTRL_PIN(284, "PRDYB"), ++ PINCTRL_PIN(285, "PREQB"), ++ PINCTRL_PIN(286, "JTAG_TDI"), ++ PINCTRL_PIN(287, "JTAG_TMS"), ++ PINCTRL_PIN(288, "JTAG_TCK"), ++ PINCTRL_PIN(289, "DBG_PMODE"), ++ PINCTRL_PIN(290, "CPU_TRSTB"), + }; + + static const struct intel_padgroup tglh_community0_gpps[] = { +- TGL_GPP(0, 0, 8), /* SPI */ +- TGL_GPP(1, 9, 33), /* GPP_A */ +- TGL_GPP(2, 34, 59), /* GPP_B */ +- TGL_GPP(3, 60, 67), /* vGPIO_0 */ ++ TGL_GPP(0, 0, 24), /* GPP_A */ ++ TGL_GPP(1, 25, 44), /* GPP_R */ ++ TGL_GPP(2, 45, 70), /* GPP_B */ ++ TGL_GPP(3, 71, 78), /* vGPIO_0 */ + }; + + static const struct intel_padgroup tglh_community1_gpps[] = { +- TGL_GPP(0, 68, 91), /* GPP_C */ +- TGL_GPP(1, 92, 117), /* GPP_D */ +- TGL_GPP(2, 118, 125), /* GPP_R */ +- TGL_GPP(3, 126, 133), /* GPP_S */ +- TGL_GPP(4, 134, 149), /* GPP_G */ +- TGL_GPP(5, 150, 176), /* vGPIO */ ++ TGL_GPP(0, 79, 104), /* GPP_D */ ++ TGL_GPP(1, 105, 128), /* GPP_C */ ++ TGL_GPP(2, 129, 136), /* GPP_S */ ++ TGL_GPP(3, 137, 153), /* GPP_G */ ++ TGL_GPP(4, 154, 180), /* vGPIO */ + }; + + static const struct intel_padgroup tglh_community3_gpps[] = { +- TGL_GPP(0, 177, 189), /* GPP_E */ +- TGL_GPP(1, 190, 213), /* GPP_F */ ++ TGL_GPP(0, 181, 193), /* GPP_E */ ++ TGL_GPP(1, 194, 217), /* GPP_F */ + }; + + static const struct intel_padgroup tglh_community4_gpps[] = { +- TGL_GPP(0, 214, 237), /* GPP_H */ +- TGL_GPP(1, 238, 249), /* GPP_K */ +- TGL_GPP(2, 250, 259), /* GPP_J */ ++ TGL_GPP(0, 218, 241), /* GPP_H */ ++ TGL_GPP(1, 242, 251), /* GPP_J */ ++ TGL_GPP(2, 252, 266), /* GPP_K */ + }; + + static const struct intel_padgroup tglh_community5_gpps[] = { +- TGL_GPP(0, 260, 277), /* GPP_I */ +- TGL_GPP(1, 278, 286), /* JTAG */ ++ TGL_GPP(0, 267, 281), /* GPP_I */ ++ TGL_GPP(1, 282, 290), /* JTAG */ + }; + + static const struct intel_community tglh_communities[] = { +- TGL_COMMUNITY(0, 0, 67, tglh_community0_gpps), +- TGL_COMMUNITY(1, 68, 176, tglh_community1_gpps), +- TGL_COMMUNITY(2, 177, 213, tglh_community3_gpps), +- TGL_COMMUNITY(3, 214, 259, tglh_community4_gpps), +- TGL_COMMUNITY(4, 260, 286, tglh_community5_gpps), ++ TGL_COMMUNITY(0, 0, 78, tglh_community0_gpps), ++ TGL_COMMUNITY(1, 79, 180, tglh_community1_gpps), ++ TGL_COMMUNITY(2, 181, 217, tglh_community3_gpps), ++ TGL_COMMUNITY(3, 218, 266, tglh_community4_gpps), ++ TGL_COMMUNITY(4, 267, 290, tglh_community5_gpps), ++}; ++ ++static const struct intel_pingroup tglh_groups[] = { ++ /* PLACE HOLDER */ ++}; ++ ++static const struct intel_function tglh_functions[] = { ++ /* PLACE HOLDER */ + }; + + static const struct intel_pinctrl_soc_data tglh_soc_data = { + .pins = tglh_pins, + .npins = ARRAY_SIZE(tglh_pins), ++ .groups = tglh_groups, ++ .ngroups = ARRAY_SIZE(tglh_groups), ++ .functions = tglh_functions, ++ .nfunctions = ARRAY_SIZE(tglh_functions), + .communities = tglh_communities, + .ncommunities = ARRAY_SIZE(tglh_communities), + }; +-- +2.17.1 + diff --git a/patches/0006-VHM-add-interrupt-injection-support.acrn b/patches/0006-VHM-add-interrupt-injection-support.acrn new file mode 100644 index 0000000000..b3b3176f07 --- /dev/null +++ b/patches/0006-VHM-add-interrupt-injection-support.acrn @@ -0,0 +1,292 @@ +From c05786421449f17b62942afc66f6232ade257977 Mon Sep 17 00:00:00 2001 +From: liang ding +Date: Fri, 29 Dec 2017 16:38:20 +0800 +Subject: [PATCH 006/150] VHM: add interrupt injection support + +VHM provides interrupt injection service for emulated devices. +this patch added interrupt injection support APIs. + +Change-Id: I10385318877aa52026d6d2fc56f5fdbc8106bbd9 +Tracked-On: 218445 +Signed-off-by: liang ding +Signed-off-by: Xiao Zheng +Signed-off-by: Jason Chen CJ +Signed-off-by: Mingqiang Chi +Reviewed-on: +Reviewed-by: Dong, Eddie +Tested-by: Dong, Eddie +--- + drivers/char/vhm/vhm_dev.c | 29 +++++++++++++++ + drivers/vhm/vhm_hypercall.c | 59 ++++++++++++++++++++++++++++++ + drivers/vhm/vhm_vm_mngt.c | 18 +++++++++ + include/linux/vhm/acrn_common.h | 32 ++++++++++++++++ + include/linux/vhm/acrn_hv_defs.h | 7 ++++ + include/linux/vhm/vhm_hypercall.h | 4 ++ + include/linux/vhm/vhm_ioctl_defs.h | 7 ++++ + include/linux/vhm/vhm_vm_mngt.h | 2 + + 8 files changed, 158 insertions(+) + +diff --git a/drivers/char/vhm/vhm_dev.c b/drivers/char/vhm/vhm_dev.c +index 454211466e5d..4bee160998bc 100644 +--- a/drivers/char/vhm/vhm_dev.c ++++ b/drivers/char/vhm/vhm_dev.c +@@ -238,6 +238,35 @@ static long vhm_dev_ioctl(struct file *filep, + break; + } + ++ case IC_ASSERT_IRQLINE: { ++ ret = vhm_assert_irqline(vm, ioctl_param); ++ break; ++ } ++ ++ case IC_DEASSERT_IRQLINE: { ++ ret = vhm_deassert_irqline(vm, ioctl_param); ++ break; ++ } ++ ++ case IC_PULSE_IRQLINE: { ++ ret = vhm_pulse_irqline(vm, ioctl_param); ++ break; ++ } ++ ++ case IC_INJECT_MSI: { ++ struct acrn_msi_entry msi; ++ ++ if (copy_from_user(&msi, (void *)ioctl_param, sizeof(msi))) ++ return -EFAULT; ++ ++ ret = hcall_inject_msi(vm->vmid, virt_to_phys(&msi)); ++ if (ret < 0) { ++ pr_err("vhm: failed to inject!\n"); ++ return -EFAULT; ++ } ++ break; ++ } ++ + default: + pr_warn("Unknown IOCTL 0x%x\n", ioctl_num); + ret = 0; +diff --git a/drivers/vhm/vhm_hypercall.c b/drivers/vhm/vhm_hypercall.c +index 1b25f4ec4d06..dc87d30151d5 100644 +--- a/drivers/vhm/vhm_hypercall.c ++++ b/drivers/vhm/vhm_hypercall.c +@@ -53,6 +53,11 @@ + #include + #include + ++inline long hcall_inject_msi(unsigned long vmid, unsigned long msi) ++{ ++ return acrn_hypercall2(HC_INJECT_MSI, vmid, msi); ++} ++ + inline long hcall_set_ioreq_buffer(unsigned long vmid, unsigned long buffer) + { + return acrn_hypercall2(HC_SET_IOREQ_BUFFER, vmid, buffer); +@@ -147,3 +152,57 @@ inline long vhm_query_vm_state(struct vhm_vm *vm) + + return ret; + } ++ ++inline long vhm_assert_irqline(struct vhm_vm *vm, unsigned long ioctl_param) ++{ ++ long ret = 0; ++ struct acrn_irqline irq; ++ ++ if (copy_from_user(&irq, (void *)ioctl_param, sizeof(irq))) ++ return -EFAULT; ++ ++ ret = acrn_hypercall2(HC_ASSERT_IRQLINE, vm->vmid, ++ virt_to_phys(&irq)); ++ if (ret < 0) { ++ pr_err("vhm: failed to assert irq!\n"); ++ return -EFAULT; ++ } ++ ++ return ret; ++} ++ ++inline long vhm_deassert_irqline(struct vhm_vm *vm, unsigned long ioctl_param) ++{ ++ long ret = 0; ++ struct acrn_irqline irq; ++ ++ if (copy_from_user(&irq, (void *)ioctl_param, sizeof(irq))) ++ return -EFAULT; ++ ++ ret = acrn_hypercall2(HC_DEASSERT_IRQLINE, vm->vmid, ++ virt_to_phys(&irq)); ++ if (ret < 0) { ++ pr_err("vhm: failed to deassert irq!\n"); ++ return -EFAULT; ++ } ++ ++ return ret; ++} ++ ++inline long vhm_pulse_irqline(struct vhm_vm *vm, unsigned long ioctl_param) ++{ ++ long ret = 0; ++ struct acrn_irqline irq; ++ ++ if (copy_from_user(&irq, (void *)ioctl_param, sizeof(irq))) ++ return -EFAULT; ++ ++ ret = acrn_hypercall2(HC_PULSE_IRQLINE, vm->vmid, ++ virt_to_phys(&irq)); ++ if (ret < 0) { ++ pr_err("vhm: failed to assert irq!\n"); ++ return -EFAULT; ++ } ++ ++ return ret; ++} +diff --git a/drivers/vhm/vhm_vm_mngt.c b/drivers/vhm/vhm_vm_mngt.c +index 564435f2bb40..048ab41f4f9c 100644 +--- a/drivers/vhm/vhm_vm_mngt.c ++++ b/drivers/vhm/vhm_vm_mngt.c +@@ -95,6 +95,24 @@ void put_vm(struct vhm_vm *vm) + mutex_unlock(&vhm_vm_list_lock); + } + ++int vhm_inject_msi(unsigned long vmid, unsigned long msi_addr, ++ unsigned long msi_data) ++{ ++ struct acrn_msi_entry msi; ++ int ret; ++ ++ /* msi_addr: addr[19:12] with dest vcpu id */ ++ /* msi_data: data[7:0] with vector */ ++ msi.msi_addr = msi_addr; ++ msi.msi_data = msi_data; ++ ret = hcall_inject_msi(vmid, virt_to_phys(&msi)); ++ if (ret < 0) { ++ pr_err("vhm: failed to inject!\n"); ++ return -EFAULT; ++ } ++ return 0; ++} ++ + void vm_list_add(struct list_head *list) + { + list_add(list, &vhm_vm_list); +diff --git a/include/linux/vhm/acrn_common.h b/include/linux/vhm/acrn_common.h +index 47c3542c9cc6..6345f4ec6a47 100644 +--- a/include/linux/vhm/acrn_common.h ++++ b/include/linux/vhm/acrn_common.h +@@ -62,6 +62,20 @@ + * Commmon structures for ACRN/VHM/DM + */ + ++enum irq_mode { ++ IRQ_PULSE, ++ IRQ_ASSERT, ++ IRQ_DEASSERT, ++} __attribute__((aligned(4))); ++ ++/* ISA type ++ * inject interrut to both PIC and IOAPIC ++ */ ++enum interrupt_type { ++ ACRN_INTR_TYPE_ISA, ++ ACRN_INTR_TYPE_IOAPIC, ++} __attribute__((aligned(4))); ++ + /* + * IO request + */ +@@ -185,4 +199,22 @@ struct acrn_ioreq_notify { + unsigned long vcpu_mask; + } __attribute__((aligned(8))); + ++/* For ISA, PIC, IOAPIC etc */ ++struct acrn_irqline { ++ enum interrupt_type intr_type; ++ unsigned long pic_irq; /* IN: for ISA type */ ++ unsigned long ioapic_irq; /* IN: for IOAPIC type, -1 don't inject */ ++} __attribute__((aligned(8))); ++ ++/* For MSI type inject */ ++struct acrn_msi_entry { ++ unsigned long msi_addr; /* IN: addr[19:12] with dest vcpu id */ ++ unsigned long msi_data; /* IN: data[7:0] with vector */ ++} __attribute__((aligned(8))); ++ ++/* For NMI inject */ ++struct acrn_nmi_entry { ++ unsigned long vcpuid; /* IN: -1 means vcpu0 */ ++} __attribute__((aligned(8))); ++ + #endif /* ACRN_COMMON_H */ +diff --git a/include/linux/vhm/acrn_hv_defs.h b/include/linux/vhm/acrn_hv_defs.h +index f57f2b62e972..7b438cc01b48 100644 +--- a/include/linux/vhm/acrn_hv_defs.h ++++ b/include/linux/vhm/acrn_hv_defs.h +@@ -74,6 +74,13 @@ + #define HC_PAUSE_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x04) + #define HC_QUERY_VMSTATE _HC_ID(HC_ID, HC_ID_VM_BASE + 0x05) + ++/* IRQ and Interrupts */ ++#define HC_ID_IRQ_BASE 0x100UL ++#define HC_ASSERT_IRQLINE _HC_ID(HC_ID, HC_ID_IRQ_BASE + 0x00) ++#define HC_DEASSERT_IRQLINE _HC_ID(HC_ID, HC_ID_IRQ_BASE + 0x01) ++#define HC_PULSE_IRQLINE _HC_ID(HC_ID, HC_ID_IRQ_BASE + 0x02) ++#define HC_INJECT_MSI _HC_ID(HC_ID, HC_ID_IRQ_BASE + 0x03) ++ + /* DM ioreq management */ + #define HC_ID_IOREQ_BASE 0x200UL + #define HC_SET_IOREQ_BUFFER _HC_ID(HC_ID, HC_ID_IOREQ_BASE + 0x00) +diff --git a/include/linux/vhm/vhm_hypercall.h b/include/linux/vhm/vhm_hypercall.h +index 86b5f579687a..e372ea48fa81 100644 +--- a/include/linux/vhm/vhm_hypercall.h ++++ b/include/linux/vhm/vhm_hypercall.h +@@ -139,6 +139,7 @@ static inline long acrn_hypercall4(unsigned long hyp_id, unsigned long param1, + return result; + } + ++inline long hcall_inject_msi(unsigned long vmid, unsigned long msi); + inline long hcall_set_ioreq_buffer(unsigned long vmid, unsigned long buffer); + inline long hcall_notify_req_finish(unsigned long vmid, + unsigned long vcpu_mask); +@@ -148,5 +149,8 @@ inline long vhm_resume_vm(struct vhm_vm *vm); + inline long vhm_pause_vm(struct vhm_vm *vm); + inline long vhm_destroy_vm(struct vhm_vm *vm); + inline long vhm_query_vm_state(struct vhm_vm *vm); ++inline long vhm_assert_irqline(struct vhm_vm *vm, unsigned long ioctl_param); ++inline long vhm_deassert_irqline(struct vhm_vm *vm, unsigned long ioctl_param); ++inline long vhm_pulse_irqline(struct vhm_vm *vm, unsigned long ioctl_param); + + #endif /* VHM_HYPERCALL_H */ +diff --git a/include/linux/vhm/vhm_ioctl_defs.h b/include/linux/vhm/vhm_ioctl_defs.h +index 01adcfade99c..3be6aca40844 100644 +--- a/include/linux/vhm/vhm_ioctl_defs.h ++++ b/include/linux/vhm/vhm_ioctl_defs.h +@@ -64,6 +64,13 @@ + #define IC_PAUSE_VM _IC_ID(IC_ID, IC_ID_VM_BASE + 0x04) + #define IC_QUERY_VMSTATE _IC_ID(IC_ID, IC_ID_VM_BASE + 0x05) + ++/* IRQ and Interrupts */ ++#define IC_ID_IRQ_BASE 0x100UL ++#define IC_ASSERT_IRQLINE _IC_ID(IC_ID, IC_ID_IRQ_BASE + 0x00) ++#define IC_DEASSERT_IRQLINE _IC_ID(IC_ID, IC_ID_IRQ_BASE + 0x01) ++#define IC_PULSE_IRQLINE _IC_ID(IC_ID, IC_ID_IRQ_BASE + 0x02) ++#define IC_INJECT_MSI _IC_ID(IC_ID, IC_ID_IRQ_BASE + 0x03) ++ + /* DM ioreq management */ + #define IC_ID_IOREQ_BASE 0x200UL + #define IC_SET_IOREQ_BUFFER _IC_ID(IC_ID, IC_ID_IOREQ_BASE + 0x00) +diff --git a/include/linux/vhm/vhm_vm_mngt.h b/include/linux/vhm/vhm_vm_mngt.h +index eb410024157f..fb02c00ec5e2 100644 +--- a/include/linux/vhm/vhm_vm_mngt.h ++++ b/include/linux/vhm/vhm_vm_mngt.h +@@ -77,6 +77,8 @@ struct vhm_vm { + + struct vhm_vm *find_get_vm(unsigned long vmid); + void put_vm(struct vhm_vm *vm); ++int vhm_inject_msi(unsigned long vmid, unsigned long msi_addr, ++ unsigned long msi_data); + + void vm_list_add(struct list_head *list); + void vm_mutex_lock(struct mutex *mlock); +-- +2.17.1 + diff --git a/patches/0006-drm-drop-resource_id-parameter-from-drm_fb_helper_remo.drm b/patches/0006-drm-drop-resource_id-parameter-from-drm_fb_helper_remo.drm new file mode 100644 index 0000000000..062b98c77b --- /dev/null +++ b/patches/0006-drm-drop-resource_id-parameter-from-drm_fb_helper_remo.drm @@ -0,0 +1,139 @@ +From 799092424f9eef7d797e65403cb7d523717fdc79 Mon Sep 17 00:00:00 2001 +From: Gerd Hoffmann +Date: Thu, 22 Aug 2019 11:06:44 +0200 +Subject: [PATCH 006/690] drm: drop resource_id parameter from + drm_fb_helper_remove_conflicting_pci_framebuffers + +Not needed any more for remove_conflicting_pci_framebuffers calls. + +Signed-off-by: Gerd Hoffmann +Reviewed-by: Daniel Vetter +Link: http://patchwork.freedesktop.org/patch/msgid/20190822090645.25410-3-kraxel@redhat.com +--- + drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 2 +- + drivers/gpu/drm/bochs/bochs_drv.c | 2 +- + drivers/gpu/drm/cirrus/cirrus.c | 2 +- + drivers/gpu/drm/mgag200/mgag200_drv.c | 2 +- + drivers/gpu/drm/qxl/qxl_drv.c | 2 +- + drivers/gpu/drm/radeon/radeon_drv.c | 2 +- + drivers/gpu/drm/virtio/virtgpu_drv.c | 1 - + include/drm/drm_fb_helper.h | 4 +--- + 8 files changed, 7 insertions(+), 10 deletions(-) + +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +index 2a00a36106b2..4da1d7fb10f4 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +@@ -1084,7 +1084,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, + #endif + + /* Get rid of things like offb */ +- ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "amdgpudrmfb"); ++ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "amdgpudrmfb"); + if (ret) + return ret; + +diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c +index 770e1625d05e..3b9b0d9bbc14 100644 +--- a/drivers/gpu/drm/bochs/bochs_drv.c ++++ b/drivers/gpu/drm/bochs/bochs_drv.c +@@ -114,7 +114,7 @@ static int bochs_pci_probe(struct pci_dev *pdev, + return -ENOMEM; + } + +- ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "bochsdrmfb"); ++ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "bochsdrmfb"); + if (ret) + return ret; + +diff --git a/drivers/gpu/drm/cirrus/cirrus.c b/drivers/gpu/drm/cirrus/cirrus.c +index 36a69aec8a4b..89d9e6fdeb8c 100644 +--- a/drivers/gpu/drm/cirrus/cirrus.c ++++ b/drivers/gpu/drm/cirrus/cirrus.c +@@ -532,7 +532,7 @@ static int cirrus_pci_probe(struct pci_dev *pdev, + struct cirrus_device *cirrus; + int ret; + +- ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "cirrusdrmfb"); ++ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "cirrusdrmfb"); + if (ret) + return ret; + +diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c +index afd9119b6cf1..4f9df3b93598 100644 +--- a/drivers/gpu/drm/mgag200/mgag200_drv.c ++++ b/drivers/gpu/drm/mgag200/mgag200_drv.c +@@ -46,7 +46,7 @@ MODULE_DEVICE_TABLE(pci, pciidlist); + + static int mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + { +- drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "mgag200drmfb"); ++ drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "mgag200drmfb"); + + return drm_get_pci_dev(pdev, ent, &driver); + } +diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c +index 265bfe9f8016..59818e5930af 100644 +--- a/drivers/gpu/drm/qxl/qxl_drv.c ++++ b/drivers/gpu/drm/qxl/qxl_drv.c +@@ -88,7 +88,7 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + if (ret) + goto free_dev; + +- ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "qxl"); ++ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "qxl"); + if (ret) + goto disable_pci; + +diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c +index 9e55076578c6..888e0f384c61 100644 +--- a/drivers/gpu/drm/radeon/radeon_drv.c ++++ b/drivers/gpu/drm/radeon/radeon_drv.c +@@ -361,7 +361,7 @@ static int radeon_pci_probe(struct pci_dev *pdev, + return -EPROBE_DEFER; + + /* Get rid of things like offb */ +- ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "radeondrmfb"); ++ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "radeondrmfb"); + if (ret) + return ret; + +diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c +index 0fc32fa0b3c0..3d24181636e1 100644 +--- a/drivers/gpu/drm/virtio/virtgpu_drv.c ++++ b/drivers/gpu/drm/virtio/virtgpu_drv.c +@@ -56,7 +56,6 @@ static int virtio_gpu_pci_quirk(struct drm_device *dev, struct virtio_device *vd + dev->pdev = pdev; + if (vga) + drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, +- 0, + "virtiodrmfb"); + + /* +diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h +index 5a5f4b1d8241..8dcc012ccbc8 100644 +--- a/include/drm/drm_fb_helper.h ++++ b/include/drm/drm_fb_helper.h +@@ -539,18 +539,16 @@ drm_fb_helper_remove_conflicting_framebuffers(struct apertures_struct *a, + /** + * drm_fb_helper_remove_conflicting_pci_framebuffers - remove firmware-configured framebuffers for PCI devices + * @pdev: PCI device +- * @resource_id: index of PCI BAR configuring framebuffer memory + * @name: requesting driver name + * + * This function removes framebuffer devices (eg. initialized by firmware) +- * using memory range configured for @pdev's BAR @resource_id. ++ * using memory range configured for any of @pdev's memory bars. + * + * The function assumes that PCI device with shadowed ROM drives a primary + * display and so kicks out vga16fb. + */ + static inline int + drm_fb_helper_remove_conflicting_pci_framebuffers(struct pci_dev *pdev, +- int resource_id, + const char *name) + { + int ret = 0; +-- +2.17.1 + diff --git a/patches/0006-net-stmmac-update-pci-platform-data-to-phy_in.connectivity b/patches/0006-net-stmmac-update-pci-platform-data-to-phy_in.connectivity new file mode 100644 index 0000000000..26a76e0f67 --- /dev/null +++ b/patches/0006-net-stmmac-update-pci-platform-data-to-phy_in.connectivity @@ -0,0 +1,81 @@ +From a44bbe19da015910181e865cf89e62a1c4e9a51e Mon Sep 17 00:00:00 2001 +From: Voon Weifeng +Date: Mon, 21 Oct 2019 17:33:06 +0800 +Subject: [PATCH 006/108] net: stmmac: update pci platform data to + phy_interface + +The recent patch to support passive mode converter did not take care the +phy interface configuration in PCI platform data. Hence, converting all +the PCI platform data from plat->interface to plat->phy_interface as the +default mode is meant for PHY. + +Fixes: 0060c8783330 ("net: stmmac: implement support for passive mode converters via dt") + +Signed-off-by: Voon Weifeng +--- + drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c | 14 ++++++++------ + 1 file changed, 8 insertions(+), 6 deletions(-) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +index 292045f4581f..03e999b77aab 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +@@ -96,7 +96,7 @@ static int stmmac_default_data(struct pci_dev *pdev, + + plat->bus_id = 1; + plat->phy_addr = 0; +- plat->interface = PHY_INTERFACE_MODE_GMII; ++ plat->phy_interface = PHY_INTERFACE_MODE_GMII; + + plat->dma_cfg->pbl = 32; + plat->dma_cfg->pblx8 = true; +@@ -220,7 +220,8 @@ static int ehl_sgmii_data(struct pci_dev *pdev, + { + plat->bus_id = 1; + plat->phy_addr = 0; +- plat->interface = PHY_INTERFACE_MODE_SGMII; ++ plat->phy_interface = PHY_INTERFACE_MODE_SGMII; ++ + return ehl_common_data(pdev, plat); + } + +@@ -233,7 +234,8 @@ static int ehl_rgmii_data(struct pci_dev *pdev, + { + plat->bus_id = 1; + plat->phy_addr = 0; +- plat->interface = PHY_INTERFACE_MODE_RGMII; ++ plat->phy_interface = PHY_INTERFACE_MODE_RGMII; ++ + return ehl_common_data(pdev, plat); + } + +@@ -261,7 +263,7 @@ static int tgl_sgmii_data(struct pci_dev *pdev, + { + plat->bus_id = 1; + plat->phy_addr = 0; +- plat->interface = PHY_INTERFACE_MODE_SGMII; ++ plat->phy_interface = PHY_INTERFACE_MODE_SGMII; + return tgl_common_data(pdev, plat); + } + +@@ -361,7 +363,7 @@ static int quark_default_data(struct pci_dev *pdev, + + plat->bus_id = pci_dev_id(pdev); + plat->phy_addr = ret; +- plat->interface = PHY_INTERFACE_MODE_RMII; ++ plat->phy_interface = PHY_INTERFACE_MODE_RMII; + + plat->dma_cfg->pbl = 16; + plat->dma_cfg->pblx8 = true; +@@ -418,7 +420,7 @@ static int snps_gmac5_default_data(struct pci_dev *pdev, + + plat->bus_id = 1; + plat->phy_addr = -1; +- plat->interface = PHY_INTERFACE_MODE_GMII; ++ plat->phy_interface = PHY_INTERFACE_MODE_GMII; + + plat->dma_cfg->pbl = 32; + plat->dma_cfg->pblx8 = true; +-- +2.17.1 + diff --git a/patches/0006-platform-x86-Change-struct-fields-to-16-bit-i.sep-socwatch b/patches/0006-platform-x86-Change-struct-fields-to-16-bit-i.sep-socwatch new file mode 100644 index 0000000000..de58f9e41f --- /dev/null +++ b/patches/0006-platform-x86-Change-struct-fields-to-16-bit-i.sep-socwatch @@ -0,0 +1,169 @@ +From 226c0906405836e6d7eb1d3728e90e93a5d27e38 Mon Sep 17 00:00:00 2001 +From: Jon Moeller +Date: Tue, 8 Jan 2019 20:29:09 -0600 +Subject: [PATCH 06/27] platform/x86: Change struct fields to 16-bit in sep, + socwatchhv drivers + +Changing some fields in structs used by sep and socwatch drivers to match +16-bit fields in hypervisor to avoid casting. + +Signed-off-by: Jon Moeller +--- + drivers/platform/x86/sepdk/inc/control.h | 2 +- + drivers/platform/x86/sepdk/inc/lwpmudrv.h | 15 ++++++----- + .../x86/sepdk/include/lwpmudrv_struct.h | 6 ++--- + drivers/platform/x86/sepdk/sep/apic.c | 4 +-- + .../x86/socwatchhv/inc/swhv_structs.h | 25 ++++++++++--------- + 5 files changed, 26 insertions(+), 26 deletions(-) + +diff --git a/drivers/platform/x86/sepdk/inc/control.h b/drivers/platform/x86/sepdk/inc/control.h +index 73ecc2efeaa5..5a94c3ae0fed 100644 +--- a/drivers/platform/x86/sepdk/inc/control.h ++++ b/drivers/platform/x86/sepdk/inc/control.h +@@ -102,7 +102,7 @@ struct GLOBAL_STATE_NODE_S { + typedef struct CPU_STATE_NODE_S CPU_STATE_NODE; + typedef CPU_STATE_NODE * CPU_STATE; + struct CPU_STATE_NODE_S { +- S32 apic_id; // Processor ID on the system bus ++ U32 apic_id; // Processor ID on the system bus + PVOID apic_linear_addr; // linear address of local apic + PVOID apic_physical_addr; // physical address of local apic + +diff --git a/drivers/platform/x86/sepdk/inc/lwpmudrv.h b/drivers/platform/x86/sepdk/inc/lwpmudrv.h +index 37c8109a0e8b..994121d28ef2 100644 +--- a/drivers/platform/x86/sepdk/inc/lwpmudrv.h ++++ b/drivers/platform/x86/sepdk/inc/lwpmudrv.h +@@ -454,9 +454,8 @@ int sbuf_share_setup(uint32_t pcpu_id, uint32_t sbuf_id, shared_buf_t *sbuf); + + extern shared_buf_t **samp_buf_per_cpu; + +-#define MAX_NR_PCPUS 8 + #define MAX_NR_VCPUS 8 +-#define MAX_NR_VMS 6 ++#define MAX_NR_VMS 4 + #define MAX_MSR_LIST_NUM 15 + #define MAX_GROUP_NUM 1 + +@@ -490,21 +489,21 @@ struct profiling_msr_ops_list { + }; + + struct profiling_vcpu_pcpu_map { +- int32_t vcpu_id; +- int32_t pcpu_id; +- int32_t apic_id; ++ int16_t vcpu_id; ++ int16_t pcpu_id; ++ uint32_t apic_id; + }; + + struct profiling_vm_info { +- int32_t vm_id; ++ uint16_t vm_id; + u_char guid[16]; + char vm_name[16]; +- int32_t num_vcpus; ++ uint16_t num_vcpus; + struct profiling_vcpu_pcpu_map cpu_map[MAX_NR_VCPUS]; + }; + + struct profiling_vm_info_list { +- int32_t num_vms; ++ uint16_t num_vms; + struct profiling_vm_info vm_list[MAX_NR_VMS]; + }; + +diff --git a/drivers/platform/x86/sepdk/include/lwpmudrv_struct.h b/drivers/platform/x86/sepdk/include/lwpmudrv_struct.h +index 82819e5e11b7..3af04d4ed829 100644 +--- a/drivers/platform/x86/sepdk/include/lwpmudrv_struct.h ++++ b/drivers/platform/x86/sepdk/include/lwpmudrv_struct.h +@@ -1646,14 +1646,14 @@ typedef CPU_MAP_TRACE_NODE * CPU_MAP_TRACE; + struct CPU_MAP_TRACE_NODE_S { + U64 tsc; + U32 os_id; +- U32 vcpu_id; +- U32 pcpu_id; ++ U16 vcpu_id; ++ U16 pcpu_id; + U8 is_static : 1; + U8 initial : 1; + U8 reserved1 : 6; + U8 reserved2; + U16 reserved3; +- U64 reserved4; ++ U32 reserved4; + }; + + #define CPU_MAP_TRACE_tsc(x) ((x)->tsc) +diff --git a/drivers/platform/x86/sepdk/sep/apic.c b/drivers/platform/x86/sepdk/sep/apic.c +index 693c526d63de..8f8bc5635ced 100755 +--- a/drivers/platform/x86/sepdk/sep/apic.c ++++ b/drivers/platform/x86/sepdk/sep/apic.c +@@ -65,7 +65,7 @@ static VOID apic_Get_APIC_ID(S32 cpu) + U32 apic_id = 0; + CPU_STATE pcpu; + #if defined(DRV_SEP_ACRN_ON) +- U32 i; ++ U16 i; + #endif + + SEP_DRV_LOG_TRACE_IN("CPU: %d.", cpu); +@@ -108,7 +108,7 @@ static VOID apic_Get_APIC_ID(S32 cpu) + "apic_Get_APIC_ID: Error in reading APIC ID on ACRN\n"); + } else { + for (i = 0; i < vm_info_list->num_vms; i++) { +- if (vm_info_list->vm_list[i].vm_id == 0xFFFFFFFF) { ++ if (vm_info_list->vm_list[i].vm_id == 0xFFFF) { + CPU_STATE_apic_id(pcpu) = + vm_info_list->vm_list[i] + .cpu_map[cpu] +diff --git a/drivers/platform/x86/socwatchhv/inc/swhv_structs.h b/drivers/platform/x86/socwatchhv/inc/swhv_structs.h +index d5fd717511ba..0393a95e4875 100644 +--- a/drivers/platform/x86/socwatchhv/inc/swhv_structs.h ++++ b/drivers/platform/x86/socwatchhv/inc/swhv_structs.h +@@ -199,30 +199,31 @@ struct vm_switch_trace { + uint64_t vm_enter_tsc; + uint64_t vm_exit_tsc; + uint64_t vm_exit_reason; +- int32_t os_id; ++ uint16_t os_id; ++ uint16_t reserved; + } __attribute__((aligned(32))); + #define VM_SWITCH_TRACE_SIZE ((uint64_t)sizeof(struct vm_switch_trace)) + +-#define MAX_NR_VCPUS 8 +-#define MAX_NR_VMS 6 ++#define CONFIG_MAX_VCPUS_PER_VM 8 ++#define CONFIG_MAX_VM_NUM 6 + + struct profiling_vcpu_pcpu_map { +- int32_t vcpu_id; +- int32_t pcpu_id; +- int32_t apic_id; ++ int16_t vcpu_id; ++ int16_t pcpu_id; ++ uint32_t apic_id; + } __attribute__((aligned(8))); + + struct profiling_vm_info { +- int32_t vm_id_num; +- unsigned char guid[16]; ++ uint16_t vm_id_num; ++ uint8_t guid[16]; + char vm_name[16]; +- int32_t num_vcpus; +- struct profiling_vcpu_pcpu_map cpu_map[MAX_NR_VCPUS]; ++ uint16_t num_vcpus; ++ struct profiling_vcpu_pcpu_map cpu_map[CONFIG_MAX_VCPUS_PER_VM]; + } __attribute__((aligned(8))); + + struct profiling_vm_info_list { +- int32_t num_vms; +- struct profiling_vm_info vm_list[MAX_NR_VMS]; ++ uint16_t num_vms; ++ struct profiling_vm_info vm_list[CONFIG_MAX_VM_NUM]; + } __attribute__((aligned(8))); + + /* +-- +2.17.1 + diff --git a/patches/0006-rpmb-enable-emmc-specific-read-data-fixup.security b/patches/0006-rpmb-enable-emmc-specific-read-data-fixup.security new file mode 100644 index 0000000000..fd944b18d2 --- /dev/null +++ b/patches/0006-rpmb-enable-emmc-specific-read-data-fixup.security @@ -0,0 +1,77 @@ +From 504f4cf7e5ef75d9ab4ef4da871b31934547640d Mon Sep 17 00:00:00 2001 +From: Tomas Winkler +Date: Sun, 7 Aug 2016 11:27:08 +0300 +Subject: [PATCH 06/65] rpmb: enable emmc specific read data fixup + +For eMMC the block count of the RPMB read operation is not indicated in +the original RPMB Data Read Request packet. +This might be different then the implementation of other protocol +standards. +This patch implements a fixup for this behavior. + +V6: New in the series. +V7: Resend +V8: Resend. +V9: Scan all the frames in the sequence. +V10: Fix kdoc + +Change-Id: I34a4aeccbd0294b2c7c83837faa4ba5a54b9be48 +Signed-off-by: Tomas Winkler +Signed-off-by: Alexander Usyskin +Tested-by: Avri Altman +--- + drivers/char/rpmb/core.c | 31 +++++++++++++++++++++++++++++++ + 1 file changed, 31 insertions(+) + +diff --git a/drivers/char/rpmb/core.c b/drivers/char/rpmb/core.c +index aa0ea4c3f1ce..c9e62193b8d1 100644 +--- a/drivers/char/rpmb/core.c ++++ b/drivers/char/rpmb/core.c +@@ -34,6 +34,36 @@ void rpmb_dev_put(struct rpmb_dev *rdev) + } + EXPORT_SYMBOL_GPL(rpmb_dev_put); + ++/** ++ * rpmb_cmd_fixup() - fixup rpmb command ++ * @rdev: rpmb device ++ * @cmds: rpmb command list ++ * @ncmds: number of commands ++ */ ++static void rpmb_cmd_fixup(struct rpmb_dev *rdev, ++ struct rpmb_cmd *cmds, u32 ncmds) ++{ ++ int i; ++ ++ if (RPMB_TYPE_HW(rdev->ops->type) != RPMB_TYPE_EMMC) ++ return; ++ ++ /* Fixup RPMB_READ_DATA specific to eMMC ++ * The block count of the RPMB read operation is not indicated ++ * in the original RPMB Data Read Request packet. ++ * This is different then implementation for other protocol ++ * standards. ++ */ ++ for (i = 0; i < ncmds; i++) { ++ struct rpmb_frame_jdec *frame = cmds[i].frames; ++ ++ if (frame->req_resp == cpu_to_be16(RPMB_READ_DATA)) { ++ dev_dbg(&rdev->dev, "Fixing up READ_DATA frame to block_count=0\n"); ++ frame->block_count = 0; ++ } ++ } ++} ++ + /** + * rpmb_cmd_seq() - send RPMB command sequence + * @rdev: rpmb device +@@ -56,6 +86,7 @@ int rpmb_cmd_seq(struct rpmb_dev *rdev, struct rpmb_cmd *cmds, u32 ncmds) + mutex_lock(&rdev->lock); + err = -EOPNOTSUPP; + if (rdev->ops && rdev->ops->cmd_seq) { ++ rpmb_cmd_fixup(rdev, cmds, ncmds); + err = rdev->ops->cmd_seq(rdev->dev.parent, rdev->target, + cmds, ncmds); + } +-- +2.17.1 + diff --git a/patches/0006-trusty-arm64-fiq-support.trusty b/patches/0006-trusty-arm64-fiq-support.trusty new file mode 100644 index 0000000000..f5637aff95 --- /dev/null +++ b/patches/0006-trusty-arm64-fiq-support.trusty @@ -0,0 +1,266 @@ +From 9bd76d702484673d3e89eca3e9fbfecd9172e91f Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?= +Date: Mon, 14 Apr 2014 17:18:40 -0700 +Subject: [PATCH 06/63] trusty: arm64 fiq support + +Change-Id: I907fbaa2b9d1697b204dad6c16d9027ef3bb0a58 +--- + drivers/trusty/Kconfig | 8 ++ + drivers/trusty/Makefile | 1 + + drivers/trusty/trusty-fiq-arm64-glue.S | 54 ++++++++++ + drivers/trusty/trusty-fiq-arm64.c | 140 +++++++++++++++++++++++++ + include/linux/trusty/smcall.h | 4 + + 5 files changed, 207 insertions(+) + create mode 100644 drivers/trusty/trusty-fiq-arm64-glue.S + create mode 100644 drivers/trusty/trusty-fiq-arm64.c + +diff --git a/drivers/trusty/Kconfig b/drivers/trusty/Kconfig +index 3c725e29b399..fc1061deb876 100644 +--- a/drivers/trusty/Kconfig ++++ b/drivers/trusty/Kconfig +@@ -20,4 +20,12 @@ config TRUSTY_FIQ_ARM + select TRUSTY_FIQ + default y + ++config TRUSTY_FIQ_ARM64 ++ tristate ++ depends on TRUSTY ++ depends on ARM64 ++ select FIQ_GLUE ++ select TRUSTY_FIQ ++ default y ++ + endmenu +diff --git a/drivers/trusty/Makefile b/drivers/trusty/Makefile +index a01c82485eb6..e162a4061e14 100644 +--- a/drivers/trusty/Makefile ++++ b/drivers/trusty/Makefile +@@ -6,3 +6,4 @@ obj-$(CONFIG_TRUSTY) += trusty.o + obj-$(CONFIG_TRUSTY) += trusty-irq.o + obj-$(CONFIG_TRUSTY_FIQ) += trusty-fiq.o + obj-$(CONFIG_TRUSTY_FIQ_ARM) += trusty-fiq-arm.o ++obj-$(CONFIG_TRUSTY_FIQ_ARM64) += trusty-fiq-arm64.o trusty-fiq-arm64-glue.o +diff --git a/drivers/trusty/trusty-fiq-arm64-glue.S b/drivers/trusty/trusty-fiq-arm64-glue.S +new file mode 100644 +index 000000000000..6994b3a94fc3 +--- /dev/null ++++ b/drivers/trusty/trusty-fiq-arm64-glue.S +@@ -0,0 +1,54 @@ ++/* ++ * Copyright (C) 2013 Google, Inc. ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++ ++.macro push reg1,reg2,remregs:vararg ++ .ifnb \remregs ++ push \remregs ++ .endif ++ stp \reg1, \reg2, [sp, #-16]! ++.endm ++ ++.macro pop reg1,reg2,remregs:vararg ++ ldp \reg1, \reg2, [sp], #16 ++ .ifnb \remregs ++ pop \remregs ++ .endif ++.endm ++ ++ENTRY(trusty_fiq_glue_arm64) ++ sub sp, sp, #S_FRAME_SIZE - S_LR ++ push x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, \ ++ x14, x15, x16, x17, x18, x19, x20, x21, x22, x23, x24, x25, \ ++ x26, x27, x28, x29 ++ ldr x0, =SMC_FC64_GET_FIQ_REGS ++ smc #0 ++ stp x0, x1, [sp, #S_PC] /* original pc, cpsr */ ++ tst x1, PSR_MODE_MASK ++ csel x2, x2, x3, eq /* sp el0, sp el1 */ ++ stp x30, x2, [sp, #S_LR] /* lr, original sp */ ++ mov x0, sp ++ mov x1, x3 ++ bl trusty_fiq_handler ++ pop x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, \ ++ x14, x15, x16, x17, x18, x19, x20, x21, x22, x23, x24, x25, \ ++ x26, x27, x28, x29 ++ ldr x30, [sp], #S_FRAME_SIZE - S_LR /* load LR and restore SP */ ++ ldr x0, =SMC_FC_FIQ_EXIT ++ smc #0 ++ b . /* should not get here */ +diff --git a/drivers/trusty/trusty-fiq-arm64.c b/drivers/trusty/trusty-fiq-arm64.c +new file mode 100644 +index 000000000000..df05a98f235d +--- /dev/null ++++ b/drivers/trusty/trusty-fiq-arm64.c +@@ -0,0 +1,140 @@ ++/* ++ * Copyright (C) 2013 Google, Inc. ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include "trusty-fiq.h" ++ ++extern void trusty_fiq_glue_arm64(void); ++ ++static struct device *trusty_dev; ++static DEFINE_PER_CPU(void *, fiq_stack); ++static struct fiq_glue_handler *current_handler; ++static DEFINE_MUTEX(fiq_glue_lock); ++ ++void trusty_fiq_handler(struct pt_regs *regs, void *svc_sp) ++{ ++ current_handler->fiq(current_handler, regs, svc_sp); ++} ++ ++static void smp_nop_call(void *info) ++{ ++ /* If this call is reached, the fiq handler is not currently running */ ++} ++ ++static void fiq_glue_clear_handler(void) ++{ ++ int cpu; ++ int ret; ++ void *stack; ++ ++ for_each_possible_cpu(cpu) { ++ stack = per_cpu(fiq_stack, cpu); ++ if (!stack) ++ continue; ++ ++ ret = trusty_fast_call64(trusty_dev, SMC_FC64_SET_FIQ_HANDLER, ++ cpu, 0, 0); ++ if (ret) { ++ pr_err("%s: SMC_FC_SET_FIQ_HANDLER(%d, 0, 0) failed 0x%x, skip free stack\n", ++ __func__, cpu, ret); ++ continue; ++ } ++ ++ per_cpu(fiq_stack, cpu) = NULL; ++ smp_call_function_single(cpu, smp_nop_call, NULL, true); ++ free_pages((unsigned long)stack, THREAD_SIZE_ORDER); ++ } ++} ++ ++int fiq_glue_register_handler(struct fiq_glue_handler *handler) ++{ ++ int ret; ++ int cpu; ++ void *stack; ++ unsigned long irqflags; ++ ++ if (!handler || !handler->fiq) ++ return -EINVAL; ++ ++ mutex_lock(&fiq_glue_lock); ++ ++ if (!trusty_dev) { ++ ret = -ENODEV; ++ goto err_no_trusty; ++ } ++ if (current_handler) { ++ ret = -EBUSY; ++ goto err_busy; ++ } ++ ++ current_handler = handler; ++ ++ for_each_possible_cpu(cpu) { ++ stack = (void *)__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); ++ if (WARN_ON(!stack)) { ++ ret = -ENOMEM; ++ goto err_alloc_fiq_stack; ++ } ++ per_cpu(fiq_stack, cpu) = stack; ++ stack += THREAD_START_SP; ++ ++ local_irq_save(irqflags); ++ ret = trusty_fast_call64(trusty_dev, SMC_FC64_SET_FIQ_HANDLER, ++ cpu, (uintptr_t)trusty_fiq_glue_arm64, ++ (uintptr_t)stack); ++ local_irq_restore(irqflags); ++ if (ret) { ++ pr_err("%s: SMC_FC_SET_FIQ_HANDLER(%d, %p, %p) failed 0x%x\n", ++ __func__, cpu, trusty_fiq_glue_arm64, ++ stack, ret); ++ ret = -EINVAL; ++ goto err_set_fiq_handler; ++ } ++ } ++ ++ mutex_unlock(&fiq_glue_lock); ++ return 0; ++ ++err_set_fiq_handler: ++err_alloc_fiq_stack: ++ fiq_glue_clear_handler(); ++err_busy: ++err_no_trusty: ++ mutex_unlock(&fiq_glue_lock); ++ return ret; ++} ++ ++int trusty_fiq_arch_probe(struct platform_device *pdev) ++{ ++ mutex_lock(&fiq_glue_lock); ++ trusty_dev = pdev->dev.parent; ++ mutex_unlock(&fiq_glue_lock); ++ ++ return 0; ++} ++ ++void trusty_fiq_arch_remove(struct platform_device *pdev) ++{ ++ mutex_lock(&fiq_glue_lock); ++ fiq_glue_clear_handler(); ++ trusty_dev = NULL; ++ mutex_unlock(&fiq_glue_lock); ++} +diff --git a/include/linux/trusty/smcall.h b/include/linux/trusty/smcall.h +index 4344683f6c61..e8704974d3e3 100644 +--- a/include/linux/trusty/smcall.h ++++ b/include/linux/trusty/smcall.h +@@ -66,6 +66,10 @@ + #define SMC_FC_FIQ_EXIT SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 1) + #define SMC_FC_REQUEST_FIQ SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 2) + #define SMC_FC_GET_NEXT_IRQ SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 3) ++#define SMC_FC_FIQ_ENTER SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 4) ++ ++#define SMC_FC64_SET_FIQ_HANDLER SMC_FASTCALL64_NR(SMC_ENTITY_SECURE_MONITOR, 5) ++#define SMC_FC64_GET_FIQ_REGS SMC_FASTCALL64_NR (SMC_ENTITY_SECURE_MONITOR, 6) + + #define SMC_FC_CPU_SUSPEND SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 7) + #define SMC_FC_CPU_RESUME SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 8) +-- +2.17.1 + diff --git a/patches/0006-usb-typec-Introduce-typec_get_drvdata.usb-typec b/patches/0006-usb-typec-Introduce-typec_get_drvdata.usb-typec new file mode 100644 index 0000000000..849f3a3ebd --- /dev/null +++ b/patches/0006-usb-typec-Introduce-typec_get_drvdata.usb-typec @@ -0,0 +1,75 @@ +From 49ad3c90db4b5f526b4c5d6ec3b899af603736b2 Mon Sep 17 00:00:00 2001 +From: Heikki Krogerus +Date: Tue, 1 Oct 2019 12:21:38 +0300 +Subject: [PATCH 06/18] usb: typec: Introduce typec_get_drvdata() + +Leaving the private driver_data pointer of the port device +to the port drivers. + +Signed-off-by: Heikki Krogerus +--- + drivers/usb/typec/class.c | 11 +++++++++++ + include/linux/usb/typec.h | 4 ++++ + 2 files changed, 15 insertions(+) + +diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c +index 3835e2d9fba6..9fab0be8f08c 100644 +--- a/drivers/usb/typec/class.c ++++ b/drivers/usb/typec/class.c +@@ -1492,6 +1492,16 @@ EXPORT_SYMBOL_GPL(typec_set_mode); + + /* --------------------------------------- */ + ++/** ++ * typec_get_drvdata - Return private driver data pointer ++ * @port: USB Type-C port ++ */ ++void *typec_get_drvdata(struct typec_port *port) ++{ ++ return dev_get_drvdata(&port->dev); ++} ++EXPORT_SYMBOL_GPL(typec_get_drvdata); ++ + /** + * typec_port_register_altmode - Register USB Type-C Port Alternate Mode + * @port: USB Type-C Port that supports the alternate mode +@@ -1604,6 +1614,7 @@ struct typec_port *typec_register_port(struct device *parent, + port->dev.fwnode = cap->fwnode; + port->dev.type = &typec_port_dev_type; + dev_set_name(&port->dev, "port%d", id); ++ dev_set_drvdata(&port->dev, cap->driver_data); + + port->sw = typec_switch_get(&port->dev); + if (IS_ERR(port->sw)) { +diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h +index 7df4ecabc78a..8b90cd77331c 100644 +--- a/include/linux/usb/typec.h ++++ b/include/linux/usb/typec.h +@@ -179,6 +179,7 @@ struct typec_partner_desc { + * @sw: Cable plug orientation switch + * @mux: Multiplexer switch for Alternate/Accessory Modes + * @fwnode: Optional fwnode of the port ++ * @driver_data: Private pointer for driver specific info + * @try_role: Set data role preference for DRP port + * @dr_set: Set Data Role + * @pr_set: Set Power Role +@@ -198,6 +199,7 @@ struct typec_capability { + struct typec_switch *sw; + struct typec_mux *mux; + struct fwnode_handle *fwnode; ++ void *driver_data; + + int (*try_role)(const struct typec_capability *, + int role); +@@ -241,6 +243,8 @@ int typec_set_orientation(struct typec_port *port, + enum typec_orientation typec_get_orientation(struct typec_port *port); + int typec_set_mode(struct typec_port *port, int mode); + ++void *typec_get_drvdata(struct typec_port *port); ++ + int typec_find_port_power_role(const char *name); + int typec_find_power_role(const char *name); + int typec_find_port_data_role(const char *name); +-- +2.17.1 + diff --git a/patches/0006-x86-intel_pmc_core-Clean-up-Remove-comma-after-th.core-ehl b/patches/0006-x86-intel_pmc_core-Clean-up-Remove-comma-after-th.core-ehl new file mode 100644 index 0000000000..3830ed33c1 --- /dev/null +++ b/patches/0006-x86-intel_pmc_core-Clean-up-Remove-comma-after-th.core-ehl @@ -0,0 +1,79 @@ +From 097218db5e843e749f412c74ee8a3271104750a5 Mon Sep 17 00:00:00 2001 +From: Gayatri Kammela +Date: Tue, 3 Sep 2019 12:43:44 -0700 +Subject: [PATCH 06/12] x86/intel_pmc_core: Clean up: Remove comma after the + termination line + +It is common practice to place a comma after the last entry in an +initialized array to avoid the need to add one in future patches that +extend the array. But when the last element is a termination marker, the +comma could be harmful. Any new entries must be added before the +terminator (and the comma would prevent the compiler from issuing a +warning about an entry after the terminator). + +Cc: Peter Zijlstra +Cc: Srinivas Pandruvada +Cc: Andy Shevchenko +Cc: Kan Liang +Cc: David E. Box +Cc: Rajneesh Bhardwaj +Cc: Tony Luck +Suggested-by: Andy Shevchenko +Reviewed-by: Tony Luck +Signed-off-by: Gayatri Kammela +--- + drivers/platform/x86/intel_pmc_core.c | 10 +++++----- + 1 file changed, 5 insertions(+), 5 deletions(-) + +diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c +index 94a008efb09b..6ad829915689 100644 +--- a/drivers/platform/x86/intel_pmc_core.c ++++ b/drivers/platform/x86/intel_pmc_core.c +@@ -49,7 +49,7 @@ static const struct pmc_bit_map spt_pll_map[] = { + {"GEN2 USB2PCIE2 PLL", SPT_PMC_BIT_MPHY_CMN_LANE1}, + {"DMIPCIE3 PLL", SPT_PMC_BIT_MPHY_CMN_LANE2}, + {"SATA PLL", SPT_PMC_BIT_MPHY_CMN_LANE3}, +- {}, ++ {} + }; + + static const struct pmc_bit_map spt_mphy_map[] = { +@@ -69,7 +69,7 @@ static const struct pmc_bit_map spt_mphy_map[] = { + {"MPHY CORE LANE 13", SPT_PMC_BIT_MPHY_LANE13}, + {"MPHY CORE LANE 14", SPT_PMC_BIT_MPHY_LANE14}, + {"MPHY CORE LANE 15", SPT_PMC_BIT_MPHY_LANE15}, +- {}, ++ {} + }; + + static const struct pmc_bit_map spt_pfear_map[] = { +@@ -113,7 +113,7 @@ static const struct pmc_bit_map spt_pfear_map[] = { + {"CSME_SMS1", SPT_PMC_BIT_CSME_SMS1}, + {"CSME_RTC", SPT_PMC_BIT_CSME_RTC}, + {"CSME_PSF", SPT_PMC_BIT_CSME_PSF}, +- {}, ++ {} + }; + + static const struct pmc_bit_map spt_ltr_show_map[] = { +@@ -299,7 +299,7 @@ static const struct pmc_bit_map *cnp_slps0_dbg_maps[] = { + cnp_slps0_dbg0_map, + cnp_slps0_dbg1_map, + cnp_slps0_dbg2_map, +- NULL, ++ NULL + }; + + static const struct pmc_bit_map cnp_ltr_show_map[] = { +@@ -820,7 +820,7 @@ MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_ids); + + static const struct pci_device_id pmc_pci_ids[] = { + { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID), 0}, +- { 0, }, ++ { 0, } + }; + + /* +-- +2.17.1 + diff --git a/patches/0007-ASoC-Intel-Skylake-Add-FIRMWARE_CONFIG-IPC-request.audio b/patches/0007-ASoC-Intel-Skylake-Add-FIRMWARE_CONFIG-IPC-request.audio new file mode 100644 index 0000000000..f599c5edde --- /dev/null +++ b/patches/0007-ASoC-Intel-Skylake-Add-FIRMWARE_CONFIG-IPC-request.audio @@ -0,0 +1,266 @@ +From 956dd149f7e42361a9443f1f319cae3d43283146 Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Fri, 22 Mar 2019 14:19:49 +0100 +Subject: [PATCH 007/193] ASoC: Intel: Skylake: Add FIRMWARE_CONFIG IPC request + +Implement interface for retrieving firmware configuration. Skylake +driver will use this data instead of hardcoded values in updates to +come. + +Most params are currently unused. In time driver dependency on fw config +will increase, and with it, more parsing will be unveiled. + +Signed-off-by: Cezary Rojewski +--- + sound/soc/intel/skylake/skl-sst-ipc.c | 122 ++++++++++++++++++++++++++ + sound/soc/intel/skylake/skl-sst-ipc.h | 72 +++++++++++++++ + sound/soc/intel/skylake/skl.h | 1 + + 3 files changed, 195 insertions(+) + +diff --git a/sound/soc/intel/skylake/skl-sst-ipc.c b/sound/soc/intel/skylake/skl-sst-ipc.c +index 667cdddc289f..e9e11ec4c97b 100644 +--- a/sound/soc/intel/skylake/skl-sst-ipc.c ++++ b/sound/soc/intel/skylake/skl-sst-ipc.c +@@ -11,6 +11,7 @@ + #include "skl.h" + #include "skl-sst-dsp.h" + #include "skl-sst-ipc.h" ++#include "skl-topology.h" + #include "sound/hdaudio_ext.h" + + +@@ -1067,3 +1068,124 @@ int skl_ipc_set_d0ix(struct sst_generic_ipc *ipc, struct skl_ipc_d0ix_msg *msg) + return ret; + } + EXPORT_SYMBOL_GPL(skl_ipc_set_d0ix); ++ ++int skl_ipc_fw_cfg_get(struct sst_generic_ipc *ipc, struct skl_fw_cfg *cfg) ++{ ++ struct skl_ipc_large_config_msg msg = {0}; ++ struct skl_tlv *tlv; ++ size_t bytes = 0, offset = 0; ++ u8 *payload = NULL; ++ int ret; ++ ++ msg.module_id = 0; ++ msg.instance_id = 0; ++ msg.large_param_id = SKL_BASEFW_FIRMWARE_CONFIG; ++ ++ ret = skl_ipc_get_large_config(ipc, &msg, (u32 **)&payload, &bytes); ++ if (ret) ++ goto exit; ++ ++ while (offset < bytes) { ++ tlv = (struct skl_tlv *)(payload + offset); ++ ++ switch (tlv->type) { ++ case SKL_FW_CFG_FW_VERSION: ++ memcpy(&cfg->fw_version, tlv->value, ++ sizeof(cfg->fw_version)); ++ break; ++ ++ case SKL_FW_CFG_MEMORY_RECLAIMED: ++ cfg->memory_reclaimed = *tlv->value; ++ break; ++ ++ case SKL_FW_CFG_SLOW_CLOCK_FREQ_HZ: ++ cfg->slow_clock_freq_hz = *tlv->value; ++ break; ++ ++ case SKL_FW_CFG_FAST_CLOCK_FREQ_HZ: ++ cfg->fast_clock_freq_hz = *tlv->value; ++ break; ++ ++ case SKL_FW_CFG_ALH_SUPPORT_LEVEL: ++ cfg->alh_support = *tlv->value; ++ break; ++ ++ case SKL_FW_CFG_IPC_DL_MAILBOX_BYTES: ++ cfg->ipc_dl_mailbox_bytes = *tlv->value; ++ break; ++ ++ case SKL_FW_CFG_IPC_UL_MAILBOX_BYTES: ++ cfg->ipc_ul_mailbox_bytes = *tlv->value; ++ break; ++ ++ case SKL_FW_CFG_TRACE_LOG_BYTES: ++ cfg->trace_log_bytes = *tlv->value; ++ break; ++ ++ case SKL_FW_CFG_MAX_PPL_COUNT: ++ cfg->max_ppl_count = *tlv->value; ++ break; ++ ++ case SKL_FW_CFG_MAX_ASTATE_COUNT: ++ cfg->max_astate_count = *tlv->value; ++ break; ++ ++ case SKL_FW_CFG_MAX_MODULE_PIN_COUNT: ++ cfg->max_module_pin_count = *tlv->value; ++ break; ++ ++ case SKL_FW_CFG_MODULES_COUNT: ++ cfg->modules_count = *tlv->value; ++ break; ++ ++ case SKL_FW_CFG_MAX_MOD_INST_COUNT: ++ cfg->max_mod_inst_count = *tlv->value; ++ break; ++ ++ case SKL_FW_CFG_MAX_LL_TASKS_PER_PRI_COUNT: ++ cfg->max_ll_tasks_per_pri_count = *tlv->value; ++ break; ++ ++ case SKL_FW_CFG_LL_PRI_COUNT: ++ cfg->ll_pri_count = *tlv->value; ++ break; ++ ++ case SKL_FW_CFG_MAX_DP_TASKS_COUNT: ++ cfg->max_dp_tasks_count = *tlv->value; ++ break; ++ ++ case SKL_FW_CFG_MAX_LIBS_COUNT: ++ cfg->max_libs_count = *tlv->value; ++ break; ++ ++ case SKL_FW_CFG_XTAL_FREQ_HZ: ++ cfg->xtal_freq_hz = *tlv->value; ++ break; ++ ++ case SKL_FW_CFG_UAOL_SUPPORT: ++ cfg->uaol_support = *tlv->value; ++ break; ++ ++ case SKL_FW_CFG_POWER_GATING_POLICY: ++ cfg->power_gating_policy = *tlv->value; ++ break; ++ ++ case SKL_FW_CFG_DMA_BUFFER_CONFIG: ++ case SKL_FW_CFG_SCHEDULER_CONFIG: ++ case SKL_FW_CFG_CLOCKS_CONFIG: ++ break; ++ ++ default: ++ dev_info(ipc->dev, "Unrecognized fw param: %d\n", ++ tlv->type); ++ break; ++ } ++ ++ offset += sizeof(*tlv) + tlv->length; ++ } ++ ++exit: ++ kfree(payload); ++ return ret; ++} ++EXPORT_SYMBOL_GPL(skl_ipc_fw_cfg_get); +diff --git a/sound/soc/intel/skylake/skl-sst-ipc.h b/sound/soc/intel/skylake/skl-sst-ipc.h +index 8c4fa5e5f7de..5dfd6317ff4b 100644 +--- a/sound/soc/intel/skylake/skl-sst-ipc.h ++++ b/sound/soc/intel/skylake/skl-sst-ipc.h +@@ -132,6 +132,12 @@ struct skl_base_outfmt_cfg { + struct skl_audio_data_format out_fmt; + } __packed; + ++struct skl_tlv { ++ u32 type; ++ u32 length; ++ u8 value[0]; ++}; ++ + enum skl_ipc_pipeline_state { + PPL_INVALID_STATE = 0, + PPL_UNINITIALIZED = 1, +@@ -187,6 +193,69 @@ struct skl_lib_info { + enum skl_basefw_runtime_param { + SKL_BASEFW_ASTATE_TABLE = 4, + SKL_BASEFW_DMA_CONTROL = 5, ++ SKL_BASEFW_FIRMWARE_CONFIG = 7, ++}; ++ ++enum skl_fw_cfg_params { ++ SKL_FW_CFG_FW_VERSION = 0, ++ SKL_FW_CFG_MEMORY_RECLAIMED, ++ SKL_FW_CFG_SLOW_CLOCK_FREQ_HZ, ++ SKL_FW_CFG_FAST_CLOCK_FREQ_HZ, ++ SKL_FW_CFG_DMA_BUFFER_CONFIG, ++ SKL_FW_CFG_ALH_SUPPORT_LEVEL, ++ SKL_FW_CFG_IPC_DL_MAILBOX_BYTES, ++ SKL_FW_CFG_IPC_UL_MAILBOX_BYTES, ++ SKL_FW_CFG_TRACE_LOG_BYTES, ++ SKL_FW_CFG_MAX_PPL_COUNT, ++ SKL_FW_CFG_MAX_ASTATE_COUNT, ++ SKL_FW_CFG_MAX_MODULE_PIN_COUNT, ++ SKL_FW_CFG_MODULES_COUNT, ++ SKL_FW_CFG_MAX_MOD_INST_COUNT, ++ SKL_FW_CFG_MAX_LL_TASKS_PER_PRI_COUNT, ++ SKL_FW_CFG_LL_PRI_COUNT, ++ SKL_FW_CFG_MAX_DP_TASKS_COUNT, ++ SKL_FW_CFG_MAX_LIBS_COUNT, ++ SKL_FW_CFG_SCHEDULER_CONFIG, ++ SKL_FW_CFG_XTAL_FREQ_HZ, ++ SKL_FW_CFG_CLOCKS_CONFIG, ++ SKL_FW_CFG_UAOL_SUPPORT, ++ SKL_FW_CFG_POWER_GATING_POLICY, ++ SKL_FW_CFG_ASSERT_MODE, ++}; ++ ++struct skl_fw_version { ++ u16 major; ++ u16 minor; ++ u16 hotfix; ++ u16 build; ++}; ++ ++enum skl_alh_support_level { ++ ALH_NO_SUPPORT = 0x00000, ++ ALH_CAVS_1_8_CNL = 0x10000, ++}; ++ ++struct skl_fw_cfg { ++ struct skl_fw_version fw_version; ++ u32 memory_reclaimed; ++ u32 slow_clock_freq_hz; ++ u32 fast_clock_freq_hz; ++ enum skl_alh_support_level alh_support; ++ u32 ipc_dl_mailbox_bytes; ++ u32 ipc_ul_mailbox_bytes; ++ u32 trace_log_bytes; ++ u32 max_ppl_count; ++ u32 max_astate_count; ++ u32 max_module_pin_count; ++ u32 modules_count; ++ u32 max_mod_inst_count; ++ u32 max_ll_tasks_per_pri_count; ++ u32 ll_pri_count; ++ u32 max_dp_tasks_count; ++ u32 max_libs_count; ++ u32 xtal_freq_hz; ++ u32 uaol_support; ++ u32 power_gating_policy; + }; + + struct skl_ipc_init_instance_msg { +@@ -288,4 +357,7 @@ int skl_ipc_process_notification(struct sst_generic_ipc *ipc, + struct skl_ipc_header header); + void skl_ipc_tx_data_copy(struct ipc_message *msg, char *tx_data, + size_t tx_size); ++ ++int skl_ipc_fw_cfg_get(struct sst_generic_ipc *ipc, struct skl_fw_cfg *cfg); ++ + #endif /* __SKL_IPC_H */ +diff --git a/sound/soc/intel/skylake/skl.h b/sound/soc/intel/skylake/skl.h +index 6379ac571fb0..22bfc6b8bc7b 100644 +--- a/sound/soc/intel/skylake/skl.h ++++ b/sound/soc/intel/skylake/skl.h +@@ -109,6 +109,7 @@ struct skl_dev { + + /* Populate module information */ + struct list_head module_list; ++ struct skl_fw_cfg fw_cfg; + + /* Is firmware loaded */ + bool fw_loaded; +-- +2.17.1 + diff --git a/patches/0007-PTP-Add-support-for-Intel-PMC-Timed-GPIO-Contr.felipeb-5.4 b/patches/0007-PTP-Add-support-for-Intel-PMC-Timed-GPIO-Contr.felipeb-5.4 new file mode 100644 index 0000000000..d85f28e6a8 --- /dev/null +++ b/patches/0007-PTP-Add-support-for-Intel-PMC-Timed-GPIO-Contr.felipeb-5.4 @@ -0,0 +1,494 @@ +From 402c92da7bb0af4917ffa1c4836e86b224f4fb9d Mon Sep 17 00:00:00 2001 +From: Felipe Balbi +Date: Tue, 12 Feb 2019 10:17:55 +0200 +Subject: [PATCH 07/14] PTP: Add support for Intel PMC Timed GPIO Controller + +Add a driver supporting Intel Timed GPIO controller available as part +of some Intel PMCs. + +Signed-off-by: Felipe Balbi +--- + drivers/ptp/Kconfig | 8 + + drivers/ptp/Makefile | 1 + + drivers/ptp/ptp-intel-pmc-tgpio.c | 438 ++++++++++++++++++++++++++++++ + 3 files changed, 447 insertions(+) + create mode 100644 drivers/ptp/ptp-intel-pmc-tgpio.c + +diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig +index 0517272a268e..c0e6e9672750 100644 +--- a/drivers/ptp/Kconfig ++++ b/drivers/ptp/Kconfig +@@ -107,6 +107,14 @@ config PTP_1588_CLOCK_PCH + To compile this driver as a module, choose M here: the module + will be called ptp_pch. + ++config PTP_INTEL_PMC_TGPIO ++ tristate "Intel PMC Timed GPIO" ++ depends on X86 ++ depends on ACPI ++ imply PTP_1588_CLOCK ++ help ++ This driver adds support for Intel PMC Timed GPIO Controller ++ + config PTP_1588_CLOCK_KVM + tristate "KVM virtual PTP clock" + depends on PTP_1588_CLOCK +diff --git a/drivers/ptp/Makefile b/drivers/ptp/Makefile +index 677d1d178a3e..ff89c90ace82 100644 +--- a/drivers/ptp/Makefile ++++ b/drivers/ptp/Makefile +@@ -7,6 +7,7 @@ ptp-y := ptp_clock.o ptp_chardev.o ptp_sysfs.o + obj-$(CONFIG_PTP_1588_CLOCK) += ptp.o + obj-$(CONFIG_PTP_1588_CLOCK_DTE) += ptp_dte.o + obj-$(CONFIG_PTP_1588_CLOCK_IXP46X) += ptp_ixp46x.o ++obj-$(CONFIG_PTP_INTEL_PMC_TGPIO) += ptp-intel-pmc-tgpio.o + obj-$(CONFIG_PTP_1588_CLOCK_PCH) += ptp_pch.o + obj-$(CONFIG_PTP_1588_CLOCK_KVM) += ptp_kvm.o + obj-$(CONFIG_PTP_1588_CLOCK_QORIQ) += ptp-qoriq.o +diff --git a/drivers/ptp/ptp-intel-pmc-tgpio.c b/drivers/ptp/ptp-intel-pmc-tgpio.c +new file mode 100644 +index 000000000000..571c6604299e +--- /dev/null ++++ b/drivers/ptp/ptp-intel-pmc-tgpio.c +@@ -0,0 +1,438 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Intel Timed GPIO Controller Driver ++ * ++ * Copyright (C) 2018 Intel Corporation ++ * Author: Felipe Balbi ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define TGPIOCTL 0x00 ++#define TGPIOCOMPV31_0 0x10 ++#define TGPIOCOMPV63_32 0x14 ++#define TGPIOPIV31_0 0x18 ++#define TGPIOPIV63_32 0x1c ++#define TGPIOTCV31_0 0x20 ++#define TGPIOTCV63_32 0x24 ++#define TGPIOECCV31_0 0x28 ++#define TGPIOECCV63_32 0x2c ++#define TGPIOEC31_0 0x30 ++#define TGPIOEC63_32 0x34 ++ ++/* Control Register */ ++#define TGPIOCTL_EN BIT(0) ++#define TGPIOCTL_DIR BIT(1) ++#define TGPIOCTL_EP GENMASK(3, 2) ++#define TGPIOCTL_EP_RISING_EDGE (0 << 2) ++#define TGPIOCTL_EP_FALLING_EDGE (1 << 2) ++#define TGPIOCTL_EP_TOGGLE_EDGE (2 << 2) ++#define TGPIOCTL_PM BIT(4) ++ ++#define NSECS_PER_SEC 1000000000 ++#define TGPIO_MAX_ADJ_TIME 999999900 ++ ++struct intel_pmc_tgpio { ++ struct ptp_clock_info info; ++ struct ptp_clock *clock; ++ struct dentry *root; ++ struct debugfs_regset32 *regset; ++ ++ struct mutex lock; ++ struct device *dev; ++ void __iomem *base; ++ ++ struct task_struct *event_thread; ++ bool input; ++}; ++#define to_intel_pmc_tgpio(i) (container_of((i), struct intel_pmc_tgpio, info)) ++ ++static const struct debugfs_reg32 intel_pmc_tgpio_regs[] = { ++ { ++ .name = "TGPIOCTL", ++ .offset = TGPIOCTL ++ }, ++ { ++ .name = "TGPIOCOMPV31_0", ++ .offset = TGPIOCOMPV31_0 ++ }, ++ { ++ .name = "TGPIOCOMPV63_32", ++ .offset = TGPIOCOMPV63_32 ++ }, ++ { ++ .name = "TGPIOPIV31_0", ++ .offset = TGPIOPIV31_0 ++ }, ++ { ++ .name = "TGPIOPIV63_32", ++ .offset = TGPIOPIV63_32 ++ }, ++ { ++ .name = "TGPIOTCV31_0", ++ .offset = TGPIOTCV31_0 ++ }, ++ { ++ .name = "TGPIOTCV63_32", ++ .offset = TGPIOTCV63_32 ++ }, ++ { ++ .name = "TGPIOECCV31_0", ++ .offset = TGPIOECCV31_0 ++ }, ++ { ++ .name = "TGPIOECCV63_32", ++ .offset = TGPIOECCV63_32 ++ }, ++ { ++ .name = "TGPIOEC31_0", ++ .offset = TGPIOEC31_0 ++ }, ++ { ++ .name = "TGPIOEC63_32", ++ .offset = TGPIOEC63_32 ++ }, ++}; ++ ++static inline u64 intel_pmc_tgpio_readq(void __iomem *base, u32 offset) ++{ ++ return lo_hi_readq(base + offset); ++} ++ ++static inline void intel_pmc_tgpio_writeq(void __iomem *base, u32 offset, u64 v) ++{ ++ return lo_hi_writeq(v, base + offset); ++} ++ ++static inline u32 intel_pmc_tgpio_readl(void __iomem *base, u32 offset) ++{ ++ return readl(base + offset); ++} ++ ++static inline void intel_pmc_tgpio_writel(void __iomem *base, u32 offset, u32 value) ++{ ++ writel(value, base + offset); ++} ++ ++static struct ptp_pin_desc intel_pmc_tgpio_pin_config[] = { ++ { \ ++ .name = "pin0", \ ++ .index = 0, \ ++ .func = PTP_PF_NONE, \ ++ .chan = 0, \ ++ } ++}; ++ ++static int intel_pmc_tgpio_gettime64(struct ptp_clock_info *info, ++ struct timespec64 *ts) ++{ ++ struct intel_pmc_tgpio *tgpio = to_intel_pmc_tgpio(info); ++ u64 now; ++ ++ mutex_lock(&tgpio->lock); ++ now = get_art_ns_now(); ++ *ts = ns_to_timespec64(now); ++ mutex_unlock(&tgpio->lock); ++ ++ return 0; ++} ++ ++static int intel_pmc_tgpio_settime64(struct ptp_clock_info *info, ++ const struct timespec64 *ts) ++{ ++ return -EOPNOTSUPP; ++} ++ ++static int intel_pmc_tgpio_event_thread(void *_tgpio) ++{ ++ struct intel_pmc_tgpio *tgpio = _tgpio; ++ u64 reg; ++ ++ while (!kthread_should_stop()) { ++ bool input; ++ int i; ++ ++ mutex_lock(&tgpio->lock); ++ input = tgpio->input; ++ mutex_unlock(&tgpio->lock); ++ ++ if (!input) ++ schedule(); ++ ++ reg = intel_pmc_tgpio_readq(tgpio->base, TGPIOEC31_0); ++ ++ for (i = 0; i < reg; i++) { ++ struct ptp_clock_event event; ++ ++ event.type = PTP_CLOCK_EXTTS; ++ event.index = 0; ++ event.timestamp = intel_pmc_tgpio_readq(tgpio->base, ++ TGPIOTCV31_0); ++ ++ ptp_clock_event(tgpio->clock, &event); ++ } ++ schedule_timeout_interruptible(10); ++ } ++ ++ return 0; ++} ++ ++static int intel_pmc_tgpio_config_input(struct intel_pmc_tgpio *tgpio, ++ struct ptp_extts_request *extts, int on) ++{ ++ u32 ctrl; ++ bool input; ++ ++ ctrl = intel_pmc_tgpio_readl(tgpio->base, TGPIOCTL); ++ ctrl &= ~TGPIOCTL_EN; ++ intel_pmc_tgpio_writel(tgpio->base, TGPIOCTL, ctrl); ++ ++ if (on) { ++ ctrl |= TGPIOCTL_DIR; ++ ++ if (extts->flags & PTP_RISING_EDGE && ++ extts->flags & PTP_FALLING_EDGE) ++ ctrl |= TGPIOCTL_EP_TOGGLE_EDGE; ++ else if (extts->flags & PTP_RISING_EDGE) ++ ctrl |= TGPIOCTL_EP_RISING_EDGE; ++ else if (extts->flags & PTP_FALLING_EDGE) ++ ctrl |= TGPIOCTL_EP_FALLING_EDGE; ++ ++ /* gotta program all other bits before EN bit is set */ ++ intel_pmc_tgpio_writel(tgpio->base, TGPIOCTL, ctrl); ++ ctrl |= TGPIOCTL_EN; ++ input = true; ++ } else { ++ ctrl &= ~(TGPIOCTL_DIR | TGPIOCTL_EN); ++ input = false; ++ } ++ ++ intel_pmc_tgpio_writel(tgpio->base, TGPIOCTL, ctrl); ++ tgpio->input = input; ++ ++ if (input) ++ wake_up_process(tgpio->event_thread); ++ ++ return 0; ++} ++ ++static int intel_pmc_tgpio_config_output(struct intel_pmc_tgpio *tgpio, ++ struct ptp_perout_request *perout, int on) ++{ ++ u32 ctrl; ++ ++ ctrl = intel_pmc_tgpio_readl(tgpio->base, TGPIOCTL); ++ if (on) { ++ struct ptp_clock_time *period = &perout->period; ++ struct ptp_clock_time *start = &perout->start; ++ ++ intel_pmc_tgpio_writel(tgpio->base, TGPIOCOMPV63_32, ++ start->sec); ++ intel_pmc_tgpio_writel(tgpio->base, TGPIOCOMPV31_0, ++ start->nsec); ++ ++ intel_pmc_tgpio_writeq(tgpio->base, TGPIOPIV63_32, ++ period->sec); ++ intel_pmc_tgpio_writeq(tgpio->base, TGPIOPIV31_0, ++ period->nsec); ++ ++ ctrl &= ~TGPIOCTL_DIR; ++ if (perout->flags & PTP_PEROUT_ONE_SHOT) ++ ctrl &= ~TGPIOCTL_PM; ++ else ++ ctrl |= TGPIOCTL_PM; ++ ++ /* gotta program all other bits before EN bit is set */ ++ intel_pmc_tgpio_writel(tgpio->base, TGPIOCTL, ctrl); ++ ++ ctrl |= TGPIOCTL_EN; ++ intel_pmc_tgpio_writel(tgpio->base, TGPIOCTL, ctrl); ++ } else { ++ ctrl &= ~(TGPIOCTL_EN | TGPIOCTL_PM); ++ intel_pmc_tgpio_writel(tgpio->base, TGPIOCTL, ctrl); ++ } ++ ++ return 0; ++} ++ ++static int intel_pmc_tgpio_enable(struct ptp_clock_info *info, ++ struct ptp_clock_request *req, int on) ++{ ++ struct intel_pmc_tgpio *tgpio = to_intel_pmc_tgpio(info); ++ int ret = -EOPNOTSUPP; ++ ++ mutex_lock(&tgpio->lock); ++ switch (req->type) { ++ case PTP_CLK_REQ_EXTTS: ++ ret = intel_pmc_tgpio_config_input(tgpio, &req->extts, on); ++ break; ++ case PTP_CLK_REQ_PEROUT: ++ ret = intel_pmc_tgpio_config_output(tgpio, &req->perout, on); ++ break; ++ default: ++ break; ++ } ++ mutex_unlock(&tgpio->lock); ++ ++ return ret; ++} ++ ++static int intel_pmc_tgpio_get_time_fn(ktime_t *device_time, ++ struct system_counterval_t *system_counter, void *_tgpio) ++{ ++ get_tsc_ns(system_counter, device_time); ++ return 0; ++} ++ ++static int intel_pmc_tgpio_getcrosststamp(struct ptp_clock_info *info, ++ struct system_device_crosststamp *cts) ++{ ++ struct intel_pmc_tgpio *tgpio = to_intel_pmc_tgpio(info); ++ ++ return get_device_system_crosststamp(intel_pmc_tgpio_get_time_fn, tgpio, ++ NULL, cts); ++} ++ ++static int intel_pmc_tgpio_counttstamp(struct ptp_clock_info *info, ++ struct ptp_event_count_tstamp *count) ++{ ++ struct intel_pmc_tgpio *tgpio = to_intel_pmc_tgpio(info); ++ u32 dt_hi_tmp; ++ u32 dt_hi; ++ u32 dt_lo; ++ ++ dt_hi_tmp = intel_pmc_tgpio_readl(tgpio->base, TGPIOTCV63_32); ++ dt_lo = intel_pmc_tgpio_readl(tgpio->base, TGPIOTCV31_0); ++ ++ count->event_count = intel_pmc_tgpio_readl(tgpio->base, TGPIOECCV63_32); ++ count->event_count <<= 32; ++ count->event_count |= intel_pmc_tgpio_readl(tgpio->base, TGPIOECCV31_0); ++ ++ dt_hi = intel_pmc_tgpio_readl(tgpio->base, TGPIOTCV63_32); ++ ++ if (dt_hi_tmp != dt_hi && dt_lo & 0x80000000) ++ count->device_time.sec = dt_hi_tmp; ++ else ++ count->device_time.sec = dt_hi; ++ ++ count->device_time.nsec = dt_lo; ++ ++ return 0; ++} ++ ++static int intel_pmc_tgpio_verify(struct ptp_clock_info *ptp, unsigned int pin, ++ enum ptp_pin_function func, unsigned int chan) ++{ ++ return 0; ++} ++ ++static const struct ptp_clock_info intel_pmc_tgpio_info = { ++ .owner = THIS_MODULE, ++ .name = "Intel PMC TGPIO", ++ .max_adj = 50000000, ++ .n_pins = 1, ++ .n_ext_ts = 1, ++ .n_per_out = 1, ++ .pin_config = intel_pmc_tgpio_pin_config, ++ .gettime64 = intel_pmc_tgpio_gettime64, ++ .settime64 = intel_pmc_tgpio_settime64, ++ .enable = intel_pmc_tgpio_enable, ++ .getcrosststamp = intel_pmc_tgpio_getcrosststamp, ++ .counttstamp = intel_pmc_tgpio_counttstamp, ++ .verify = intel_pmc_tgpio_verify, ++}; ++ ++static int intel_pmc_tgpio_probe(struct platform_device *pdev) ++{ ++ struct intel_pmc_tgpio *tgpio; ++ struct device *dev; ++ struct resource *res; ++ ++ dev = &pdev->dev; ++ tgpio = devm_kzalloc(dev, sizeof(*tgpio), GFP_KERNEL); ++ if (!tgpio) ++ return -ENOMEM; ++ ++ tgpio->dev = dev; ++ tgpio->info = intel_pmc_tgpio_info; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ tgpio->base = devm_ioremap_resource(dev, res); ++ if (!tgpio->base) ++ return -ENOMEM; ++ ++ tgpio->regset = devm_kzalloc(dev, sizeof(*tgpio->regset), GFP_KERNEL); ++ if (!tgpio->regset) ++ return -ENOMEM; ++ ++ tgpio->regset->regs = intel_pmc_tgpio_regs; ++ tgpio->regset->nregs = ARRAY_SIZE(intel_pmc_tgpio_regs); ++ tgpio->regset->base = tgpio->base; ++ ++ tgpio->root = debugfs_create_dir(dev_name(dev), NULL); ++ debugfs_create_regset32("regdump", S_IRUGO, tgpio->root, tgpio->regset); ++ ++ mutex_init(&tgpio->lock); ++ platform_set_drvdata(pdev, tgpio); ++ ++ tgpio->event_thread = kthread_create(intel_pmc_tgpio_event_thread, ++ tgpio, dev_name(tgpio->dev)); ++ if (IS_ERR(tgpio->event_thread)) ++ return PTR_ERR(tgpio->event_thread); ++ ++ tgpio->clock = ptp_clock_register(&tgpio->info, &pdev->dev); ++ if (IS_ERR(tgpio->clock)) ++ return PTR_ERR(tgpio->clock); ++ ++ wake_up_process(tgpio->event_thread); ++ ++ return 0; ++} ++ ++static int intel_pmc_tgpio_remove(struct platform_device *pdev) ++{ ++ struct intel_pmc_tgpio *tgpio = platform_get_drvdata(pdev); ++ ++ debugfs_remove_recursive(tgpio->root); ++ ptp_clock_unregister(tgpio->clock); ++ ++ return 0; ++} ++ ++static const struct acpi_device_id intel_pmc_acpi_match[] = { ++ { "INTC1021", 0 }, /* EHL */ ++ { "INTC1022", 0 }, /* EHL */ ++ { "INTC1023", 0 }, /* TGL */ ++ { "INTC1024", 0 }, /* TGL */ ++ { }, ++}; ++ ++MODULE_ALIAS("acpi*:INTC1021:*"); ++MODULE_ALIAS("acpi*:INTC1022:*"); ++MODULE_ALIAS("acpi*:INTC1023:*"); ++MODULE_ALIAS("acpi*:INTC1024:*"); ++ ++static struct platform_driver intel_pmc_tgpio_driver = { ++ .probe = intel_pmc_tgpio_probe, ++ .remove = intel_pmc_tgpio_remove, ++ .driver = { ++ .name = "intel-pmc-tgpio", ++ .acpi_match_table = ACPI_PTR(intel_pmc_acpi_match), ++ }, ++}; ++ ++module_platform_driver(intel_pmc_tgpio_driver); ++ ++MODULE_AUTHOR("Felipe Balbi "); ++MODULE_LICENSE("GPL v2"); ++MODULE_DESCRIPTION("Intel PMC Timed GPIO Controller Driver"); +-- +2.17.1 + diff --git a/patches/0007-TO-BE-FOLDED-pinctrl-tigerlake-Update-pin-list-accord.lpss b/patches/0007-TO-BE-FOLDED-pinctrl-tigerlake-Update-pin-list-accord.lpss new file mode 100644 index 0000000000..1caabebf9a --- /dev/null +++ b/patches/0007-TO-BE-FOLDED-pinctrl-tigerlake-Update-pin-list-accord.lpss @@ -0,0 +1,44 @@ +From 3f4132eed18f22aed10937d03d4e18ee7f1c7386 Mon Sep 17 00:00:00 2001 +From: Andy Shevchenko +Date: Tue, 9 Jul 2019 19:47:13 +0300 +Subject: [PATCH 07/40] TO BE FOLDED: pinctrl: tigerlake: Update pin list + according to K v1.0 + +Signed-off-by: Andy Shevchenko +--- + drivers/pinctrl/intel/pinctrl-tigerlake.c | 12 ++++++------ + 1 file changed, 6 insertions(+), 6 deletions(-) + +diff --git a/drivers/pinctrl/intel/pinctrl-tigerlake.c b/drivers/pinctrl/intel/pinctrl-tigerlake.c +index 93796395a41d..b8f0414d0c73 100644 +--- a/drivers/pinctrl/intel/pinctrl-tigerlake.c ++++ b/drivers/pinctrl/intel/pinctrl-tigerlake.c +@@ -818,10 +818,10 @@ static const struct pinctrl_pin_desc tglk_pins[] = { + PINCTRL_PIN(62, "TBT_LSX2_B"), + PINCTRL_PIN(63, "TBT_LSX3_A"), + PINCTRL_PIN(64, "TBT_LSX3_B"), +- PINCTRL_PIN(65, "TBT_LSX4_A"), +- PINCTRL_PIN(66, "TBT_LSX4_B"), +- PINCTRL_PIN(67, "TBT_LSX5_A"), +- PINCTRL_PIN(68, "TBT_LSX5_B"), ++ PINCTRL_PIN(65, "GPP_C_17"), ++ PINCTRL_PIN(66, "GPP_C_18"), ++ PINCTRL_PIN(67, "GPP_C_19"), ++ PINCTRL_PIN(68, "GPP_C_20"), + PINCTRL_PIN(69, "PCIE_LNK_DOWN"), + PINCTRL_PIN(70, "PCHHOTB"), + PINCTRL_PIN(71, "DNX_FORCE_RELOAD"), +@@ -835,8 +835,8 @@ static const struct pinctrl_pin_desc tglk_pins[] = { + PINCTRL_PIN(78, "GPP_D_6"), + PINCTRL_PIN(79, "GPP_D_7"), + PINCTRL_PIN(80, "GPP_D_8"), +- PINCTRL_PIN(81, "GPP_D_9"), +- PINCTRL_PIN(82, "GPP_D_10"), ++ PINCTRL_PIN(81, "HVM_HDA_SDI"), ++ PINCTRL_PIN(82, "HVM_HDA_SDO"), + PINCTRL_PIN(83, "GPP_D_11"), + PINCTRL_PIN(84, "GPP_D_12"), + PINCTRL_PIN(85, "GPP_D_13"), +-- +2.17.1 + diff --git a/patches/0007-VHM-add-API-to-get-vm-info.acrn b/patches/0007-VHM-add-API-to-get-vm-info.acrn new file mode 100644 index 0000000000..0c7e0ae54d --- /dev/null +++ b/patches/0007-VHM-add-API-to-get-vm-info.acrn @@ -0,0 +1,105 @@ +From 1e177fb05270a7d2cce91333daf223991cb3efbb Mon Sep 17 00:00:00 2001 +From: Jason Chen CJ +Date: Fri, 31 Aug 2018 10:58:55 +0800 +Subject: [PATCH 007/150] VHM: add API to get vm info + +Added API vhm_get_vm_info: get guest vm's max_vcpu & max_gfn + +Change-Id: Ibe668c75e893092a1e5ea824aa09d9b65825fabb +Tracked-On: 218445 +Signed-off-by: Jason Chen CJ +Reviewed-on: +Reviewed-by: Chi, Mingqiang +Reviewed-by: Dong, Eddie +Tested-by: Dong, Eddie +--- + drivers/vhm/vhm_mm.c | 9 +++++++-- + drivers/vhm/vhm_vm_mngt.c | 17 +++++++++++++++++ + include/linux/vhm/vhm_vm_mngt.h | 7 +++++++ + 3 files changed, 31 insertions(+), 2 deletions(-) + +diff --git a/drivers/vhm/vhm_mm.c b/drivers/vhm/vhm_mm.c +index ea7604b19aaf..61ebb8c508d2 100644 +--- a/drivers/vhm/vhm_mm.c ++++ b/drivers/vhm/vhm_mm.c +@@ -117,6 +117,7 @@ int alloc_guest_memseg(struct vhm_vm *vm, struct vm_memseg *memseg) + { + struct guest_memseg *seg; + u64 base; ++ int max_gfn; + + seg = kzalloc(sizeof(struct guest_memseg), GFP_KERNEL); + if (seg == NULL) +@@ -134,9 +135,13 @@ int alloc_guest_memseg(struct vhm_vm *vm, struct vm_memseg *memseg) + strncpy(seg->name, memseg->name, SPECNAMELEN + 1); + seg->gpa = memseg->gpa; + ++ max_gfn = (seg->gpa + seg->len) >> PAGE_SHIFT; ++ if (vm->max_gfn < max_gfn) ++ vm->max_gfn = max_gfn; ++ + pr_info("VHM: alloc memseg[%s] with len=0x%lx, base=0x%llx," +- " and its guest gpa = 0x%llx\n", +- seg->name, seg->len, seg->base, seg->gpa); ++ " and its guest gpa = 0x%llx, vm max_gfn 0x%x\n", ++ seg->name, seg->len, seg->base, seg->gpa, vm->max_gfn); + + seg->vma_count = 0; + mutex_lock(&vm->seg_lock); +diff --git a/drivers/vhm/vhm_vm_mngt.c b/drivers/vhm/vhm_vm_mngt.c +index 048ab41f4f9c..d1aa4ba1a4f0 100644 +--- a/drivers/vhm/vhm_vm_mngt.c ++++ b/drivers/vhm/vhm_vm_mngt.c +@@ -95,6 +95,23 @@ void put_vm(struct vhm_vm *vm) + mutex_unlock(&vhm_vm_list_lock); + } + ++int vhm_get_vm_info(unsigned long vmid, struct vm_info *info) ++{ ++ struct vhm_vm *vm; ++ ++ vm = find_get_vm(vmid); ++ if (unlikely(vm == NULL)) { ++ pr_err("vhm: failed to find vm from vmid %ld\n", ++ vmid); ++ return -EINVAL; ++ } ++ /*TODO: hardcode max_vcpu here, should be fixed by getting at runtime */ ++ info->max_vcpu = 4; ++ info->max_gfn = vm->max_gfn; ++ put_vm(vm); ++ return 0; ++} ++ + int vhm_inject_msi(unsigned long vmid, unsigned long msi_addr, + unsigned long msi_data) + { +diff --git a/include/linux/vhm/vhm_vm_mngt.h b/include/linux/vhm/vhm_vm_mngt.h +index fb02c00ec5e2..77c21c4bba7a 100644 +--- a/include/linux/vhm/vhm_vm_mngt.h ++++ b/include/linux/vhm/vhm_vm_mngt.h +@@ -69,14 +69,21 @@ struct vhm_vm { + long refcnt; + struct mutex seg_lock; + struct list_head memseg_list; ++ int max_gfn; + spinlock_t ioreq_client_lock; + struct list_head ioreq_client_list; + struct vhm_request_buffer *req_buf; + struct page *pg; + }; + ++struct vm_info { ++ int max_vcpu; ++ int max_gfn; ++}; ++ + struct vhm_vm *find_get_vm(unsigned long vmid); + void put_vm(struct vhm_vm *vm); ++int vhm_get_vm_info(unsigned long vmid, struct vm_info *info); + int vhm_inject_msi(unsigned long vmid, unsigned long msi_addr, + unsigned long msi_data); + +-- +2.17.1 + diff --git a/patches/0007-drm-i915-switch-to-drm_fb_helper_remove_conflicting_pc.drm b/patches/0007-drm-i915-switch-to-drm_fb_helper_remove_conflicting_pc.drm new file mode 100644 index 0000000000..b3bdf10a41 --- /dev/null +++ b/patches/0007-drm-i915-switch-to-drm_fb_helper_remove_conflicting_pc.drm @@ -0,0 +1,81 @@ +From c7031843043546547a6bcfc1e221a81431b77013 Mon Sep 17 00:00:00 2001 +From: Gerd Hoffmann +Date: Thu, 22 Aug 2019 11:06:45 +0200 +Subject: [PATCH 007/690] drm/i915: switch to + drm_fb_helper_remove_conflicting_pci_framebuffers + +No need for a home-grown version, the generic helper should work just +fine. It also handles vgacon removal these days, see commit +1c74ca7a1a9a ("drm/fb-helper: call vga_remove_vgacon automatically."), +so that can be removed too. + +Signed-off-by: Gerd Hoffmann +Reviewed-by: Daniel Vetter +Link: http://patchwork.freedesktop.org/patch/msgid/20190822090645.25410-4-kraxel@redhat.com +--- + drivers/gpu/drm/i915/i915_drv.c | 41 ++------------------------------- + 1 file changed, 2 insertions(+), 39 deletions(-) + +diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c +index bb6f86c7067a..e080151c0696 100644 +--- a/drivers/gpu/drm/i915/i915_drv.c ++++ b/drivers/gpu/drm/i915/i915_drv.c +@@ -422,31 +422,6 @@ static int i915_driver_modeset_probe(struct drm_device *dev) + return ret; + } + +-static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) +-{ +- struct apertures_struct *ap; +- struct pci_dev *pdev = dev_priv->drm.pdev; +- struct i915_ggtt *ggtt = &dev_priv->ggtt; +- bool primary; +- int ret; +- +- ap = alloc_apertures(1); +- if (!ap) +- return -ENOMEM; +- +- ap->ranges[0].base = ggtt->gmadr.start; +- ap->ranges[0].size = ggtt->mappable_end; +- +- primary = +- pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; +- +- ret = drm_fb_helper_remove_conflicting_framebuffers(ap, "inteldrmfb", primary); +- +- kfree(ap); +- +- return ret; +-} +- + static void intel_init_dpio(struct drm_i915_private *dev_priv) + { + /* +@@ -1249,21 +1224,9 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) + if (ret) + goto err_perf; + +- /* +- * WARNING: Apparently we must kick fbdev drivers before vgacon, +- * otherwise the vga fbdev driver falls over. +- */ +- ret = i915_kick_out_firmware_fb(dev_priv); +- if (ret) { +- DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); +- goto err_ggtt; +- } +- +- ret = vga_remove_vgacon(pdev); +- if (ret) { +- DRM_ERROR("failed to remove conflicting VGA console\n"); ++ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "inteldrmfb"); ++ if (ret) + goto err_ggtt; +- } + + ret = i915_ggtt_init_hw(dev_priv); + if (ret) +-- +2.17.1 + diff --git a/patches/0007-phy-fix-phy_id-detection-for-C22-C45-that-var.connectivity b/patches/0007-phy-fix-phy_id-detection-for-C22-C45-that-var.connectivity new file mode 100644 index 0000000000..b4b185d06c --- /dev/null +++ b/patches/0007-phy-fix-phy_id-detection-for-C22-C45-that-var.connectivity @@ -0,0 +1,76 @@ +From 264f7dad2c42c723ed7100512ac2c59dec573135 Mon Sep 17 00:00:00 2001 +From: Ong Boon Leong +Date: Thu, 4 Jul 2019 15:23:15 +0800 +Subject: [PATCH 007/108] phy: fix phy_id detection for C22 & C45 that varies + in check condition + +For C22, PHY ID may be all 0s and all 1s, so add the condition of all +0s back. + +For C45, get_phy_c45_ids() set all 1s to phy_id to indicate there is +no C45 devices and set all 0s to phy_id if there is one C45 device +present. We add extra logics to check for valid device ID (Not All 1s +and All 0s) and only set phy_id=0 if at least one valid device ID is +present. + +Fixes: 6436cbcd735a phy: fix phy_id detection also for broken hardware + +Signed-off-by: Ong Boon Leong +--- + drivers/net/phy/phy_device.c | 25 +++++++++++++++++++++---- + 1 file changed, 21 insertions(+), 4 deletions(-) + +diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c +index adb66a2fae18..bbbe877707c8 100644 +--- a/drivers/net/phy/phy_device.c ++++ b/drivers/net/phy/phy_device.c +@@ -708,6 +708,7 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id, + int i, reg_addr; + const int num_ids = ARRAY_SIZE(c45_ids->device_ids); + u32 *devs = &c45_ids->devices_in_package; ++ u32 valid_did = 0; + + /* Find first non-zero Devices In package. Device zero is reserved + * for 802.3 c45 complied PHYs, so don't probe it at first. +@@ -752,8 +753,16 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id, + if (phy_reg < 0) + return -EIO; + c45_ids->device_ids[i] |= phy_reg; ++ ++ if (c45_ids->device_ids[i] && ++ (c45_ids->device_ids[i] & 0x1fffffff) != 0x1fffffff) ++ valid_did |= (1 << i); + } +- *phy_id = 0; ++ if (valid_did) ++ *phy_id = 0; ++ else ++ *phy_id = 0xffffffff; ++ + return 0; + } + +@@ -820,9 +829,17 @@ struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45) + if (r) + return ERR_PTR(r); + +- /* If the phy_id is mostly Fs, there is no device there */ +- if ((phy_id & 0x1fffffff) == 0x1fffffff) +- return ERR_PTR(-ENODEV); ++ /* For C45, get_phy_c45_ids() sets phy_id to all 1s to indicate ++ * there is no device there. However, for C22, phy_id read from ++ * PHY can be either all 1s or all 0s. ++ */ ++ if (is_c45) { ++ if ((phy_id & 0x1fffffff) == 0x1fffffff) ++ return ERR_PTR(-ENODEV); ++ } else { ++ if ((phy_id & 0x1fffffff) == 0x1fffffff || phy_id == 0x0) ++ return ERR_PTR(-ENODEV); ++ } + + return phy_device_create(bus, addr, phy_id, is_c45, &c45_ids); + } +-- +2.17.1 + diff --git a/patches/0007-platform-x86-sep-remove-type-field-for-access.sep-socwatch b/patches/0007-platform-x86-sep-remove-type-field-for-access.sep-socwatch new file mode 100644 index 0000000000..2fe38aaea7 --- /dev/null +++ b/patches/0007-platform-x86-sep-remove-type-field-for-access.sep-socwatch @@ -0,0 +1,39 @@ +From 4b3915e0ed4540e076f9277634ed1d3968a988a4 Mon Sep 17 00:00:00 2001 +From: Jon Moeller +Date: Thu, 10 Jan 2019 15:20:28 -0600 +Subject: [PATCH 07/27] platform/x86: sep remove type field for access_ok() in + kernel 5.0. + +Signed-off-by: Jon Moeller +--- + drivers/platform/x86/sepdk/sep/lwpmudrv.c | 9 +++++++++ + 1 file changed, 9 insertions(+) + +diff --git a/drivers/platform/x86/sepdk/sep/lwpmudrv.c b/drivers/platform/x86/sepdk/sep/lwpmudrv.c +index bb53962d2695..f88d6fee9b83 100755 +--- a/drivers/platform/x86/sepdk/sep/lwpmudrv.c ++++ b/drivers/platform/x86/sepdk/sep/lwpmudrv.c +@@ -4902,11 +4902,20 @@ static OS_STATUS lwpmudrv_Samp_Find_Physical_Address(IOCTL_ARGS arg) + return OS_FAULT; + } + ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0) ++ /* 'type' field has been removed */ ++ if (!access_ok((void __user *)search_addr, ++ sizeof(CHIPSET_PCI_SEARCH_ADDR_NODE))) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Access not OK!"); ++ return OS_FAULT; ++ } ++#else + if (!access_ok(VERIFY_WRITE, (void __user *)search_addr, + sizeof(CHIPSET_PCI_SEARCH_ADDR_NODE))) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Access not OK!"); + return OS_FAULT; + } ++#endif + + if (copy_from_user(&user_addr, (void __user *)search_addr, + sizeof(CHIPSET_PCI_SEARCH_ADDR_NODE))) { +-- +2.17.1 + diff --git a/patches/0007-rpmb-add-sysfs-class-ABI-documentation.security b/patches/0007-rpmb-add-sysfs-class-ABI-documentation.security new file mode 100644 index 0000000000..e7fbe315ba --- /dev/null +++ b/patches/0007-rpmb-add-sysfs-class-ABI-documentation.security @@ -0,0 +1,65 @@ +From 40c533e6afa6ad75b894339d9ce882e81c18a85c Mon Sep 17 00:00:00 2001 +From: Tomas Winkler +Date: Sun, 13 Mar 2016 13:36:52 +0200 +Subject: [PATCH 07/65] rpmb: add sysfs-class ABI documentation + +V2: resend +V3: add more verbose description +V4: resend +V5: adjust date and kernel version +V6: adjust date and kernel version +V7: adjust date and kernel version +V8: adjust date and kernel version +V9: adjust date and kernel version +V10: adjust date and kernel version + +Change-Id: I2d71ca467e5960ca93c904e92cfcf69591a3de59 +Signed-off-by: Tomas Winkler +Signed-off-by: Alexander Usyskin +--- + Documentation/ABI/testing/sysfs-class-rpmb | 20 ++++++++++++++++++++ + MAINTAINERS | 1 + + 2 files changed, 21 insertions(+) + create mode 100644 Documentation/ABI/testing/sysfs-class-rpmb + +diff --git a/Documentation/ABI/testing/sysfs-class-rpmb b/Documentation/ABI/testing/sysfs-class-rpmb +new file mode 100644 +index 000000000000..a017b15eabb3 +--- /dev/null ++++ b/Documentation/ABI/testing/sysfs-class-rpmb +@@ -0,0 +1,20 @@ ++What: /sys/class/rpmb/ ++Date: May 2019 ++KernelVersion: 5.2 ++Contact: Tomas Winkler ++Description: ++ The rpmb/ class sub-directory belongs to RPMB device class. ++ ++ Few storage technologies such is EMMC, UFS, and NVMe support ++ Replay Protected Memory Block (RPMB) hardware partition with ++ common protocol and similar frame layout. ++ Such a partition provides authenticated and replay protected access, ++ hence suitable as a secure storage. ++ ++What: /sys/class/rpmb/rpmbN/ ++Date: May 2019 ++KernelVersion: 5.2 ++Contact: Tomas Winkler ++Description: ++ The /sys/class/rpmb/rpmbN directory is created for ++ each RPMB registered device. +diff --git a/MAINTAINERS b/MAINTAINERS +index a9bb0d4fe7ff..af01f3b1e00d 100644 +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -13982,6 +13982,7 @@ L: linux-kernel@vger.kernel.org + S: Supported + F: drivers/char/rpmb/* + F: include/linux/rpmb.h ++F: Documentation/ABI/testing/sysfs-class-rpmb + + RTL2830 MEDIA DRIVER + M: Antti Palosaari +-- +2.17.1 + diff --git a/patches/0007-trusty-fiq-arm64-Allow-multiple-fiq-handlers.trusty b/patches/0007-trusty-fiq-arm64-Allow-multiple-fiq-handlers.trusty new file mode 100644 index 0000000000..5fdb4e372f --- /dev/null +++ b/patches/0007-trusty-fiq-arm64-Allow-multiple-fiq-handlers.trusty @@ -0,0 +1,138 @@ +From 3792c07f840a8b2fe79df019f4cda6cb47baa686 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?= +Date: Fri, 2 May 2014 19:15:44 -0700 +Subject: [PATCH 07/63] trusty: fiq-arm64: Allow multiple fiq handlers. +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +If multiple fiq handlers are reqistered call them all. There is +currently no api to remove handlers. + +Change-Id: I1d4bd936081d690ea6f1ec0c041f43a5f7717733 +Signed-off-by: Arve Hjønnevåg +--- + drivers/trusty/trusty-fiq-arm64.c | 76 ++++++++++++++++++++++--------- + 1 file changed, 54 insertions(+), 22 deletions(-) + +diff --git a/drivers/trusty/trusty-fiq-arm64.c b/drivers/trusty/trusty-fiq-arm64.c +index df05a98f235d..8b9a40887587 100644 +--- a/drivers/trusty/trusty-fiq-arm64.c ++++ b/drivers/trusty/trusty-fiq-arm64.c +@@ -26,12 +26,19 @@ extern void trusty_fiq_glue_arm64(void); + + static struct device *trusty_dev; + static DEFINE_PER_CPU(void *, fiq_stack); +-static struct fiq_glue_handler *current_handler; ++static struct fiq_glue_handler *fiq_handlers; + static DEFINE_MUTEX(fiq_glue_lock); + + void trusty_fiq_handler(struct pt_regs *regs, void *svc_sp) + { +- current_handler->fiq(current_handler, regs, svc_sp); ++ struct fiq_glue_handler *handler; ++ ++ for (handler = ACCESS_ONCE(fiq_handlers); handler; ++ handler = ACCESS_ONCE(handler->next)) { ++ /* Barrier paired with smp_wmb in fiq_glue_register_handler */ ++ smp_read_barrier_depends(); ++ handler->fiq(handler, regs, svc_sp); ++ } + } + + static void smp_nop_call(void *info) +@@ -64,29 +71,13 @@ static void fiq_glue_clear_handler(void) + } + } + +-int fiq_glue_register_handler(struct fiq_glue_handler *handler) ++static int fiq_glue_set_handler(void) + { + int ret; + int cpu; + void *stack; + unsigned long irqflags; + +- if (!handler || !handler->fiq) +- return -EINVAL; +- +- mutex_lock(&fiq_glue_lock); +- +- if (!trusty_dev) { +- ret = -ENODEV; +- goto err_no_trusty; +- } +- if (current_handler) { +- ret = -EBUSY; +- goto err_busy; +- } +- +- current_handler = handler; +- + for_each_possible_cpu(cpu) { + stack = (void *)__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); + if (WARN_ON(!stack)) { +@@ -109,16 +100,57 @@ int fiq_glue_register_handler(struct fiq_glue_handler *handler) + goto err_set_fiq_handler; + } + } ++ return 0; ++ ++err_alloc_fiq_stack: ++err_set_fiq_handler: ++ fiq_glue_clear_handler(); ++ return ret; ++} ++ ++int fiq_glue_register_handler(struct fiq_glue_handler *handler) ++{ ++ int ret; ++ ++ if (!handler || !handler->fiq) { ++ ret = -EINVAL; ++ goto err_bad_arg; ++ } ++ ++ mutex_lock(&fiq_glue_lock); ++ ++ if (!trusty_dev) { ++ ret = -ENODEV; ++ goto err_no_trusty; ++ } ++ ++ handler->next = fiq_handlers; ++ /* ++ * Write barrier paired with smp_read_barrier_depends in ++ * trusty_fiq_handler. Make sure next pointer is updated before ++ * fiq_handlers so trusty_fiq_handler does not see an uninitialized ++ * value and terminate early or crash. ++ */ ++ smp_wmb(); ++ fiq_handlers = handler; ++ ++ smp_call_function(smp_nop_call, NULL, true); ++ ++ if (!handler->next) { ++ ret = fiq_glue_set_handler(); ++ if (ret) ++ goto err_set_fiq_handler; ++ } + + mutex_unlock(&fiq_glue_lock); + return 0; + + err_set_fiq_handler: +-err_alloc_fiq_stack: +- fiq_glue_clear_handler(); +-err_busy: ++ fiq_handlers = handler->next; + err_no_trusty: + mutex_unlock(&fiq_glue_lock); ++err_bad_arg: ++ pr_err("%s: failed, %d\n", __func__, ret); + return ret; + } + +-- +2.17.1 + diff --git a/patches/0007-usb-typec-Separate-the-operations-vector.usb-typec b/patches/0007-usb-typec-Separate-the-operations-vector.usb-typec new file mode 100644 index 0000000000..fc96a29244 --- /dev/null +++ b/patches/0007-usb-typec-Separate-the-operations-vector.usb-typec @@ -0,0 +1,241 @@ +From fcddff64270e0635ebfb39dff968ed29898b508d Mon Sep 17 00:00:00 2001 +From: Heikki Krogerus +Date: Tue, 1 Oct 2019 12:21:38 +0300 +Subject: [PATCH 07/18] usb: typec: Separate the operations vector + +Introducing struct typec_operations which has the same +callbacks as struct typec_capability. The old callbacks are +kept for now, but after all users have been converted, they +will be removed. + +Signed-off-by: Heikki Krogerus +--- + drivers/usb/typec/class.c | 90 +++++++++++++++++++++++++-------------- + include/linux/usb/typec.h | 19 +++++++++ + 2 files changed, 76 insertions(+), 33 deletions(-) + +diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c +index 9fab0be8f08c..542be63795db 100644 +--- a/drivers/usb/typec/class.c ++++ b/drivers/usb/typec/class.c +@@ -59,6 +59,7 @@ struct typec_port { + struct typec_mux *mux; + + const struct typec_capability *cap; ++ const struct typec_operations *ops; + }; + + #define to_typec_port(_dev_) container_of(_dev_, struct typec_port, dev) +@@ -961,11 +962,6 @@ preferred_role_store(struct device *dev, struct device_attribute *attr, + return -EOPNOTSUPP; + } + +- if (!port->cap->try_role) { +- dev_dbg(dev, "Setting preferred role not supported\n"); +- return -EOPNOTSUPP; +- } +- + role = sysfs_match_string(typec_roles, buf); + if (role < 0) { + if (sysfs_streq(buf, "none")) +@@ -974,9 +970,18 @@ preferred_role_store(struct device *dev, struct device_attribute *attr, + return -EINVAL; + } + +- ret = port->cap->try_role(port->cap, role); +- if (ret) +- return ret; ++ if (port->ops && port->ops->try_role) { ++ ret = port->ops->try_role(port, role); ++ if (ret) ++ return ret; ++ } else if (port->cap && port->cap->try_role) { ++ ret = port->cap->try_role(port->cap, role); ++ if (ret) ++ return ret; ++ } else { ++ dev_dbg(dev, "Setting preferred role not supported\n"); ++ return -EOPNOTSUPP; ++ } + + port->prefer_role = role; + return size; +@@ -1005,11 +1010,6 @@ static ssize_t data_role_store(struct device *dev, + struct typec_port *port = to_typec_port(dev); + int ret; + +- if (!port->cap->dr_set) { +- dev_dbg(dev, "data role swapping not supported\n"); +- return -EOPNOTSUPP; +- } +- + ret = sysfs_match_string(typec_data_roles, buf); + if (ret < 0) + return ret; +@@ -1020,9 +1020,19 @@ static ssize_t data_role_store(struct device *dev, + goto unlock_and_ret; + } + +- ret = port->cap->dr_set(port->cap, ret); +- if (ret) ++ if (port->ops && port->ops->dr_set) { ++ ret = port->ops->dr_set(port, ret); ++ if (ret) ++ goto unlock_and_ret; ++ } else if (port->cap && port->cap->dr_set) { ++ ret = port->cap->dr_set(port->cap, ret); ++ if (ret) ++ goto unlock_and_ret; ++ } else { ++ dev_dbg(dev, "data role swapping not supported\n"); ++ ret = -EOPNOTSUPP; + goto unlock_and_ret; ++ } + + ret = size; + unlock_and_ret: +@@ -1055,11 +1065,6 @@ static ssize_t power_role_store(struct device *dev, + return -EOPNOTSUPP; + } + +- if (!port->cap->pr_set) { +- dev_dbg(dev, "power role swapping not supported\n"); +- return -EOPNOTSUPP; +- } +- + if (port->pwr_opmode != TYPEC_PWR_MODE_PD) { + dev_dbg(dev, "partner unable to swap power role\n"); + return -EIO; +@@ -1077,11 +1082,21 @@ static ssize_t power_role_store(struct device *dev, + goto unlock_and_ret; + } + +- ret = port->cap->pr_set(port->cap, ret); +- if (ret) ++ if (port->ops && port->ops->pr_set) { ++ ret = port->ops->pr_set(port, ret); ++ if (ret) ++ goto unlock_and_ret; ++ } else if (port->cap && port->cap->pr_set) { ++ ret = port->cap->pr_set(port->cap, ret); ++ if (ret) ++ goto unlock_and_ret; ++ } else { ++ dev_dbg(dev, "power role swapping not supported\n"); ++ ret = -EOPNOTSUPP; + goto unlock_and_ret; +- ++ } + ret = size; ++ + unlock_and_ret: + mutex_unlock(&port->port_type_lock); + return ret; +@@ -1108,7 +1123,8 @@ port_type_store(struct device *dev, struct device_attribute *attr, + int ret; + enum typec_port_type type; + +- if (!port->cap->port_type_set || port->fixed_role != TYPEC_PORT_DRP) { ++ if ((!port->ops || !port->ops->port_type_set) || ++ !port->cap->port_type_set || port->fixed_role != TYPEC_PORT_DRP) { + dev_dbg(dev, "changing port type not supported\n"); + return -EOPNOTSUPP; + } +@@ -1125,7 +1141,10 @@ port_type_store(struct device *dev, struct device_attribute *attr, + goto unlock_and_ret; + } + +- ret = port->cap->port_type_set(port->cap, type); ++ if (port->ops && port->ops->port_type_set) ++ ret = port->ops->port_type_set(port, type); ++ else ++ ret = port->cap->port_type_set(port->cap, type); + if (ret) + goto unlock_and_ret; + +@@ -1181,18 +1200,22 @@ static ssize_t vconn_source_store(struct device *dev, + return -EOPNOTSUPP; + } + +- if (!port->cap->vconn_set) { +- dev_dbg(dev, "VCONN swapping not supported\n"); +- return -EOPNOTSUPP; +- } +- + ret = kstrtobool(buf, &source); + if (ret) + return ret; + +- ret = port->cap->vconn_set(port->cap, (enum typec_role)source); +- if (ret) +- return ret; ++ if (port->ops && port->ops->vconn_set) { ++ ret = port->ops->vconn_set(port, source); ++ if (ret) ++ return ret; ++ } else if (port->cap && port->cap->vconn_set) { ++ ret = port->cap->vconn_set(port->cap, (enum typec_role)source); ++ if (ret) ++ return ret; ++ } else { ++ dev_dbg(dev, "VCONN swapping not supported\n"); ++ return -EOPNOTSUPP; ++ } + + return size; + } +@@ -1597,6 +1620,7 @@ struct typec_port *typec_register_port(struct device *parent, + + port->id = id; + port->cap = cap; ++ port->ops = cap->ops; + port->port_type = cap->type; + port->fixed_role = cap->type; + port->port_roles = cap->data; +diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h +index 8b90cd77331c..6c95a9ff43c6 100644 +--- a/include/linux/usb/typec.h ++++ b/include/linux/usb/typec.h +@@ -168,6 +168,22 @@ struct typec_partner_desc { + struct usb_pd_identity *identity; + }; + ++/* ++ * struct typec_operations - USB Type-C Port Operations ++ * @try_role: Set data role preference for DRP port ++ * @dr_set: Set Data Role ++ * @pr_set: Set Power Role ++ * @vconn_set: Source VCONN ++ * @port_type_set: Set port type ++ */ ++struct typec_operations { ++ int (*try_role)(struct typec_port *port, int role); ++ int (*dr_set)(struct typec_port *port, enum typec_data_role); ++ int (*pr_set)(struct typec_port *port, enum typec_role); ++ int (*vconn_set)(struct typec_port *port, bool source); ++ int (*port_type_set)(struct typec_port *port, enum typec_port_type); ++}; ++ + /* + * struct typec_capability - USB Type-C Port Capabilities + * @type: Supported power role of the port +@@ -180,6 +196,7 @@ struct typec_partner_desc { + * @mux: Multiplexer switch for Alternate/Accessory Modes + * @fwnode: Optional fwnode of the port + * @driver_data: Private pointer for driver specific info ++ * @ops: Port operations vector + * @try_role: Set data role preference for DRP port + * @dr_set: Set Data Role + * @pr_set: Set Power Role +@@ -201,6 +218,8 @@ struct typec_capability { + struct fwnode_handle *fwnode; + void *driver_data; + ++ const struct typec_operations *ops; ++ + int (*try_role)(const struct typec_capability *, + int role); + +-- +2.17.1 + diff --git a/patches/0007-x86-intel_pmc_core-Create-platform-dependent-pmc-.core-ehl b/patches/0007-x86-intel_pmc_core-Create-platform-dependent-pmc-.core-ehl new file mode 100644 index 0000000000..609a22e979 --- /dev/null +++ b/patches/0007-x86-intel_pmc_core-Create-platform-dependent-pmc-.core-ehl @@ -0,0 +1,166 @@ +From 091d7e3078341b7d9d17ced44540e10c29b7706f Mon Sep 17 00:00:00 2001 +From: Gayatri Kammela +Date: Tue, 3 Sep 2019 16:45:15 -0700 +Subject: [PATCH 07/12] x86/intel_pmc_core: Create platform dependent pmc + bitmap structs + +The current implementation of pmc_core driver allows to reuse, but does +not reflect the exact number and names of IPs for a newer platform which +does not necessarily support all the IPs in the entries. The names and +number of these IPs might differ from its previous platforms. The number +of PCH IPs per platform is calculated based on PPFEAR_NUM_ENTRIES +defined, where each entry represents a bucket (8 bits). The platform can +support 'n' entries, but not necessarily all 'n*8' IPs. + +Create platform dependent bitmap structures to specify the exact number, +names of IPs while reusing the existing IPs. + +The changes in this patch are preparatory to accommodate future SoCs +that might reuse the CNL/ICL PCH IPs, and to reflect the exact number of +IPs with its names. + +Cc: Peter Zijlstra +Cc: Srinivas Pandruvada +Cc: Andy Shevchenko +Cc: Kan Liang +Cc: David E. Box +Cc: Rajneesh Bhardwaj +Cc: Tony Luck +Reviewed-by: Tony Luck +Signed-off-by: Gayatri Kammela +--- + drivers/platform/x86/intel_pmc_core.c | 46 ++++++++++++++++++++------- + drivers/platform/x86/intel_pmc_core.h | 2 +- + 2 files changed, 35 insertions(+), 13 deletions(-) + +diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c +index 6ad829915689..c6a0c29d3476 100644 +--- a/drivers/platform/x86/intel_pmc_core.c ++++ b/drivers/platform/x86/intel_pmc_core.c +@@ -116,6 +116,11 @@ static const struct pmc_bit_map spt_pfear_map[] = { + {} + }; + ++static const struct pmc_bit_map *ext_spt_pfear_map[] = { ++ spt_pfear_map, ++ NULL ++}; ++ + static const struct pmc_bit_map spt_ltr_show_map[] = { + {"SOUTHPORT_A", SPT_PMC_LTR_SPA}, + {"SOUTHPORT_B", SPT_PMC_LTR_SPB}, +@@ -142,7 +147,7 @@ static const struct pmc_bit_map spt_ltr_show_map[] = { + }; + + static const struct pmc_reg_map spt_reg_map = { +- .pfear_sts = spt_pfear_map, ++ .pfear_sts = ext_spt_pfear_map, + .mphy_sts = spt_mphy_map, + .pll_sts = spt_pll_map, + .ltr_show_sts = spt_ltr_show_map, +@@ -233,7 +238,15 @@ static const struct pmc_bit_map cnp_pfear_map[] = { + {"PSF6", BIT(5)}, + {"PSF7", BIT(6)}, + {"PSF8", BIT(7)}, ++ {} ++}; ++ ++static const struct pmc_bit_map *ext_cnp_pfear_map[] = { ++ cnp_pfear_map, ++ NULL ++}; + ++static const struct pmc_bit_map icl_pfear_map[] = { + /* Icelake generation onwards only */ + {"RES_65", BIT(0)}, + {"RES_66", BIT(1)}, +@@ -246,6 +259,12 @@ static const struct pmc_bit_map cnp_pfear_map[] = { + {} + }; + ++static const struct pmc_bit_map *ext_icl_pfear_map[] = { ++ cnp_pfear_map, ++ icl_pfear_map, ++ NULL ++}; ++ + static const struct pmc_bit_map cnp_slps0_dbg0_map[] = { + {"AUDIO_D3", BIT(0)}, + {"OTG_D3", BIT(1)}, +@@ -333,7 +352,7 @@ static const struct pmc_bit_map cnp_ltr_show_map[] = { + }; + + static const struct pmc_reg_map cnp_reg_map = { +- .pfear_sts = cnp_pfear_map, ++ .pfear_sts = ext_cnp_pfear_map, + .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET, + .slps0_dbg_maps = cnp_slps0_dbg_maps, + .ltr_show_sts = cnp_ltr_show_map, +@@ -349,7 +368,7 @@ static const struct pmc_reg_map cnp_reg_map = { + }; + + static const struct pmc_reg_map icl_reg_map = { +- .pfear_sts = cnp_pfear_map, ++ .pfear_sts = ext_icl_pfear_map, + .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET, + .slps0_dbg_maps = cnp_slps0_dbg_maps, + .ltr_show_sts = cnp_ltr_show_map, +@@ -411,20 +430,20 @@ static int pmc_core_check_read_lock_bit(void) + #if IS_ENABLED(CONFIG_DEBUG_FS) + static bool slps0_dbg_latch; + +-static void pmc_core_display_map(struct seq_file *s, int index, +- u8 pf_reg, const struct pmc_bit_map *pf_map) ++static void pmc_core_display_map(struct seq_file *s, int index, int idx, int ip, ++ u8 pf_reg, const struct pmc_bit_map **pf_map) + { + seq_printf(s, "PCH IP: %-2d - %-32s\tState: %s\n", +- index, pf_map[index].name, +- pf_map[index].bit_mask & pf_reg ? "Off" : "On"); ++ ip, pf_map[idx][index].name, ++ pf_map[idx][index].bit_mask & pf_reg ? "Off" : "On"); + } + + static int pmc_core_ppfear_show(struct seq_file *s, void *unused) + { + struct pmc_dev *pmcdev = s->private; +- const struct pmc_bit_map *map = pmcdev->map->pfear_sts; ++ const struct pmc_bit_map **maps = pmcdev->map->pfear_sts; + u8 pf_regs[PPFEAR_MAX_NUM_ENTRIES]; +- int index, iter; ++ int index, iter, idx, ip = 0; + + iter = pmcdev->map->ppfear0_offset; + +@@ -432,9 +451,12 @@ static int pmc_core_ppfear_show(struct seq_file *s, void *unused) + index < PPFEAR_MAX_NUM_ENTRIES; index++, iter++) + pf_regs[index] = pmc_core_reg_read_byte(pmcdev, iter); + +- for (index = 0; map[index].name && +- index < pmcdev->map->ppfear_buckets * 8; index++) +- pmc_core_display_map(s, index, pf_regs[index / 8], map); ++ for (idx = 0; maps[idx]; idx++) { ++ for (index = 0; maps[idx][index].name && ++ index < pmcdev->map->ppfear_buckets * 8; ip++, index++) ++ pmc_core_display_map(s, index, idx, ip, ++ pf_regs[index / 8], maps); ++ } + + return 0; + } +diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h +index fdee5772e532..4b9f9ad6b692 100644 +--- a/drivers/platform/x86/intel_pmc_core.h ++++ b/drivers/platform/x86/intel_pmc_core.h +@@ -213,7 +213,7 @@ struct pmc_bit_map { + * captures them to have a common implementation. + */ + struct pmc_reg_map { +- const struct pmc_bit_map *pfear_sts; ++ const struct pmc_bit_map **pfear_sts; + const struct pmc_bit_map *mphy_sts; + const struct pmc_bit_map *pll_sts; + const struct pmc_bit_map **slps0_dbg_maps; +-- +2.17.1 + diff --git a/patches/0008-ASoC-Intel-Skylake-Add-HARDWARE_CONFIG-IPC-request.audio b/patches/0008-ASoC-Intel-Skylake-Add-HARDWARE_CONFIG-IPC-request.audio new file mode 100644 index 0000000000..de0a4203f4 --- /dev/null +++ b/patches/0008-ASoC-Intel-Skylake-Add-HARDWARE_CONFIG-IPC-request.audio @@ -0,0 +1,212 @@ +From bbd9879efaa4a232f7fa4acc39550bf660e19deb Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Wed, 20 Mar 2019 20:59:28 +0100 +Subject: [PATCH 008/193] ASoC: Intel: Skylake: Add HARDWARE_CONFIG IPC request + +Driver requests this property to discover underlying HW configuration. +Internally hw config is split between core config followed by +capabilities e.g.: i2s, gpdma. + +Most params are currently unused. In time driver dependency on hw config +will increase, and with it, more parsing will be unveiled. + +Signed-off-by: Cezary Rojewski +--- + sound/soc/intel/skylake/skl-messages.c | 1 + + sound/soc/intel/skylake/skl-sst-ipc.c | 87 ++++++++++++++++++++++++++ + sound/soc/intel/skylake/skl-sst-ipc.h | 46 ++++++++++++++ + sound/soc/intel/skylake/skl.h | 1 + + 4 files changed, 135 insertions(+) + +diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c +index bb0b843ed187..5ccf9572c988 100644 +--- a/sound/soc/intel/skylake/skl-messages.c ++++ b/sound/soc/intel/skylake/skl-messages.c +@@ -320,6 +320,7 @@ int skl_free_dsp(struct skl_dev *skl) + + skl->dsp_ops->cleanup(bus->dev, skl); + ++ kfree(skl->hw_cfg.i2s_caps.ctrl_base_addr); + kfree(skl->cores.state); + kfree(skl->cores.usage_count); + +diff --git a/sound/soc/intel/skylake/skl-sst-ipc.c b/sound/soc/intel/skylake/skl-sst-ipc.c +index e9e11ec4c97b..91b5440c643d 100644 +--- a/sound/soc/intel/skylake/skl-sst-ipc.c ++++ b/sound/soc/intel/skylake/skl-sst-ipc.c +@@ -1189,3 +1189,90 @@ int skl_ipc_fw_cfg_get(struct sst_generic_ipc *ipc, struct skl_fw_cfg *cfg) + return ret; + } + EXPORT_SYMBOL_GPL(skl_ipc_fw_cfg_get); ++ ++int skl_ipc_hw_cfg_get(struct sst_generic_ipc *ipc, struct skl_hw_cfg *cfg) ++{ ++ struct skl_ipc_large_config_msg msg = {0}; ++ struct skl_tlv *tlv; ++ size_t size, bytes = 0, offset = 0; ++ u8 *payload = NULL; ++ int ret; ++ ++ msg.module_id = 0; ++ msg.instance_id = 0; ++ msg.large_param_id = SKL_BASEFW_HARDWARE_CONFIG; ++ ++ ret = skl_ipc_get_large_config(ipc, &msg, (u32 **)&payload, &bytes); ++ if (ret) ++ goto exit; ++ ++ while (offset < bytes) { ++ tlv = (struct skl_tlv *)(payload + offset); ++ ++ switch (tlv->type) { ++ case SKL_HW_CFG_CAVS_VER: ++ cfg->cavs_version = *tlv->value; ++ break; ++ ++ case SKL_HW_CFG_DSP_CORES: ++ cfg->dsp_cores = *tlv->value; ++ break; ++ ++ case SKL_HW_CFG_MEM_PAGE_BYTES: ++ cfg->mem_page_bytes = *tlv->value; ++ break; ++ ++ case SKL_HW_CFG_TOTAL_PHYS_MEM_PAGES: ++ cfg->total_phys_mem_pages = *tlv->value; ++ break; ++ ++ case SKL_HW_CFG_I2S_CAPS: ++ cfg->i2s_caps.version = tlv->value[0]; ++ size = tlv->value[1]; ++ cfg->i2s_caps.ctrl_count = size; ++ if (!size) ++ break; ++ ++ size *= sizeof(*cfg->i2s_caps.ctrl_base_addr); ++ cfg->i2s_caps.ctrl_base_addr = ++ kmemdup(&tlv->value[2], size, GFP_KERNEL); ++ if (!cfg->i2s_caps.ctrl_base_addr) { ++ ret = -ENOMEM; ++ goto exit; ++ } ++ break; ++ ++ case SKL_HW_CFG_GATEWAY_COUNT: ++ cfg->gateway_count = *tlv->value; ++ break; ++ ++ case SKL_HW_CFG_HP_EBB_COUNT: ++ cfg->hp_ebb_count = *tlv->value; ++ break; ++ ++ case SKL_HW_CFG_LP_EBB_COUNT: ++ cfg->lp_ebb_count = *tlv->value; ++ break; ++ ++ case SKL_HW_CFG_EBB_SIZE_BYTES: ++ cfg->ebb_size_bytes = *tlv->value; ++ break; ++ ++ case SKL_HW_CFG_GPDMA_CAPS: ++ case SKL_HW_CFG_UAOL_CAPS: ++ break; ++ ++ default: ++ dev_info(ipc->dev, "Unrecognized hw param: %d\n", ++ tlv->type); ++ break; ++ } ++ ++ offset += sizeof(*tlv) + tlv->length; ++ } ++ ++exit: ++ kfree(payload); ++ return ret; ++} ++EXPORT_SYMBOL_GPL(skl_ipc_hw_cfg_get); +diff --git a/sound/soc/intel/skylake/skl-sst-ipc.h b/sound/soc/intel/skylake/skl-sst-ipc.h +index 5dfd6317ff4b..c54272609f0a 100644 +--- a/sound/soc/intel/skylake/skl-sst-ipc.h ++++ b/sound/soc/intel/skylake/skl-sst-ipc.h +@@ -194,6 +194,7 @@ enum skl_basefw_runtime_param { + SKL_BASEFW_ASTATE_TABLE = 4, + SKL_BASEFW_DMA_CONTROL = 5, + SKL_BASEFW_FIRMWARE_CONFIG = 7, ++ SKL_BASEFW_HARDWARE_CONFIG = 8, + }; + + enum skl_fw_cfg_params { +@@ -258,6 +259,50 @@ struct skl_fw_cfg { + u32 power_gating_policy; + }; + ++enum skl_hw_cfg_params { ++ SKL_HW_CFG_CAVS_VER, ++ SKL_HW_CFG_DSP_CORES, ++ SKL_HW_CFG_MEM_PAGE_BYTES, ++ SKL_HW_CFG_TOTAL_PHYS_MEM_PAGES, ++ SKL_HW_CFG_I2S_CAPS, ++ SKL_HW_CFG_GPDMA_CAPS, ++ SKL_HW_CFG_GATEWAY_COUNT, ++ SKL_HW_CFG_HP_EBB_COUNT, ++ SKL_HW_CFG_LP_EBB_COUNT, ++ SKL_HW_CFG_EBB_SIZE_BYTES, ++ SKL_HW_CFG_UAOL_CAPS ++}; ++ ++enum skl_cavs_version { ++ SKL_CAVS_VER_1_5 = 0x10005, ++ SKL_CAVS_VER_1_8 = 0x10008, ++}; ++ ++enum skl_i2s_version { ++ SKL_I2S_VER_15_SKYLAKE = 0x00000, ++ SKL_I2S_VER_15_BROXTON = 0x10000, ++ SKL_I2S_VER_15_BROXTON_P = 0x20000, ++ SKL_I2S_VER_18_KBL_CNL = 0x30000, ++}; ++ ++struct skl_i2s_caps { ++ enum skl_i2s_version version; ++ u32 ctrl_count; ++ u32 *ctrl_base_addr; ++}; ++ ++struct skl_hw_cfg { ++ enum skl_cavs_version cavs_version; ++ u32 dsp_cores; ++ u32 mem_page_bytes; ++ u32 total_phys_mem_pages; ++ struct skl_i2s_caps i2s_caps; ++ u32 gateway_count; ++ u32 hp_ebb_count; ++ u32 lp_ebb_count; ++ u32 ebb_size_bytes; ++}; ++ + struct skl_ipc_init_instance_msg { + u32 module_id; + u32 instance_id; +@@ -359,5 +404,6 @@ void skl_ipc_tx_data_copy(struct ipc_message *msg, char *tx_data, + size_t tx_size); + + int skl_ipc_fw_cfg_get(struct sst_generic_ipc *ipc, struct skl_fw_cfg *cfg); ++int skl_ipc_hw_cfg_get(struct sst_generic_ipc *ipc, struct skl_hw_cfg *cfg); + + #endif /* __SKL_IPC_H */ +diff --git a/sound/soc/intel/skylake/skl.h b/sound/soc/intel/skylake/skl.h +index 22bfc6b8bc7b..e20712cfc549 100644 +--- a/sound/soc/intel/skylake/skl.h ++++ b/sound/soc/intel/skylake/skl.h +@@ -110,6 +110,7 @@ struct skl_dev { + /* Populate module information */ + struct list_head module_list; + struct skl_fw_cfg fw_cfg; ++ struct skl_hw_cfg hw_cfg; + + /* Is firmware loaded */ + bool fw_loaded; +-- +2.17.1 + diff --git a/patches/0008-Bug-Fix-to-fix-incorrect-osid-value.sep-socwatch b/patches/0008-Bug-Fix-to-fix-incorrect-osid-value.sep-socwatch new file mode 100644 index 0000000000..22f36d506b --- /dev/null +++ b/patches/0008-Bug-Fix-to-fix-incorrect-osid-value.sep-socwatch @@ -0,0 +1,26 @@ +From ac44eae8c09c49e127bb2f87c4ae59a363187976 Mon Sep 17 00:00:00 2001 +From: Manisha Chinthapally +Date: Thu, 10 Jan 2019 17:22:38 -0800 +Subject: [PATCH 08/27] Bug Fix to fix incorrect osid value + +Signed-off-by: Manisha Chinthapally +--- + drivers/platform/x86/sepdk/include/lwpmudrv_defines.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/platform/x86/sepdk/include/lwpmudrv_defines.h b/drivers/platform/x86/sepdk/include/lwpmudrv_defines.h +index aeee9516bef2..8346ea72d587 100644 +--- a/drivers/platform/x86/sepdk/include/lwpmudrv_defines.h ++++ b/drivers/platform/x86/sepdk/include/lwpmudrv_defines.h +@@ -511,7 +511,7 @@ extern "C" { + #define OS_ID_MODEM 1 + #define OS_ID_ANDROID 2 + #define OS_ID_SECVM 3 +-#define OS_ID_ACORN (U32)(-1) ++#define OS_ID_ACORN 0xFFFF + + #define PERF_HW_VER4 (5) + #if defined(__cplusplus) +-- +2.17.1 + diff --git a/patches/0008-VHM-add-API-to-do-guest-gpa2hpa-translation.acrn b/patches/0008-VHM-add-API-to-do-guest-gpa2hpa-translation.acrn new file mode 100644 index 0000000000..e0cd4370c0 --- /dev/null +++ b/patches/0008-VHM-add-API-to-do-guest-gpa2hpa-translation.acrn @@ -0,0 +1,142 @@ +From bfc89875d95b3f5a645dcfd85880dcbfe933a715 Mon Sep 17 00:00:00 2001 +From: Jason Chen CJ +Date: Fri, 31 Aug 2018 10:58:55 +0800 +Subject: [PATCH 008/150] VHM: add API to do guest gpa2hpa translation + +Added API vhm_vm_gpa2hpa: do translation between gpa and hpa for +corresponding guest. + +Change-Id: I5ccdc3c6ac73d02d854878957093895c7f0cbee6 +Tracked-On: 218445 +Signed-off-by: Jason Chen CJ +Reviewed-on: +Reviewed-by: Chi, Mingqiang +Reviewed-by: Dong, Eddie +Tested-by: Dong, Eddie +--- + drivers/vhm/vhm_hypercall.c | 5 +++++ + drivers/vhm/vhm_vm_mngt.c | 16 ++++++++++++++++ + include/linux/vhm/acrn_common.h | 5 +++++ + include/linux/vhm/acrn_hv_defs.h | 2 ++ + include/linux/vhm/acrn_vhm_mm.h | 6 ++++++ + include/linux/vhm/vhm_hypercall.h | 1 + + include/linux/vhm/vhm_vm_mngt.h | 1 + + 7 files changed, 36 insertions(+) + +diff --git a/drivers/vhm/vhm_hypercall.c b/drivers/vhm/vhm_hypercall.c +index dc87d30151d5..384b86e60c9c 100644 +--- a/drivers/vhm/vhm_hypercall.c ++++ b/drivers/vhm/vhm_hypercall.c +@@ -73,6 +73,11 @@ inline long hcall_set_memmap(unsigned long vmid, unsigned long memmap) + return acrn_hypercall2(HC_VM_SET_MEMMAP, vmid, memmap); + } + ++inline long hcall_vm_gpa2hpa(unsigned long vmid, unsigned long gpa2hpa) ++{ ++ return acrn_hypercall2(HC_VM_GPA2HPA, vmid, gpa2hpa); ++} ++ + inline long vhm_create_vm(struct vhm_vm *vm, unsigned long ioctl_param) + { + long ret = 0; +diff --git a/drivers/vhm/vhm_vm_mngt.c b/drivers/vhm/vhm_vm_mngt.c +index d1aa4ba1a4f0..8f1a00777dd4 100644 +--- a/drivers/vhm/vhm_vm_mngt.c ++++ b/drivers/vhm/vhm_vm_mngt.c +@@ -130,6 +130,22 @@ int vhm_inject_msi(unsigned long vmid, unsigned long msi_addr, + return 0; + } + ++unsigned long vhm_vm_gpa2hpa(unsigned long vmid, unsigned long gpa) ++{ ++ struct vm_gpa2hpa gpa2hpa; ++ int ret; ++ ++ gpa2hpa.gpa = gpa; ++ gpa2hpa.hpa = -1UL; /* Init value as invalid gpa */ ++ ret = hcall_vm_gpa2hpa(vmid, virt_to_phys(&gpa2hpa)); ++ if (ret < 0) { ++ pr_err("vhm: failed to inject!\n"); ++ return -EFAULT; ++ } ++ mb(); ++ return gpa2hpa.hpa; ++} ++ + void vm_list_add(struct list_head *list) + { + list_add(list, &vhm_vm_list); +diff --git a/include/linux/vhm/acrn_common.h b/include/linux/vhm/acrn_common.h +index 6345f4ec6a47..7aaf78327eae 100644 +--- a/include/linux/vhm/acrn_common.h ++++ b/include/linux/vhm/acrn_common.h +@@ -217,4 +217,9 @@ struct acrn_nmi_entry { + unsigned long vcpuid; /* IN: -1 means vcpu0 */ + } __attribute__((aligned(8))); + ++struct vm_gpa2hpa { ++ unsigned long gpa; /* IN: gpa to translation */ ++ unsigned long hpa; /* OUT: -1 means invalid gpa */ ++} __attribute__((aligned(8))); ++ + #endif /* ACRN_COMMON_H */ +diff --git a/include/linux/vhm/acrn_hv_defs.h b/include/linux/vhm/acrn_hv_defs.h +index 7b438cc01b48..d527a8fa8435 100644 +--- a/include/linux/vhm/acrn_hv_defs.h ++++ b/include/linux/vhm/acrn_hv_defs.h +@@ -86,9 +86,11 @@ + #define HC_SET_IOREQ_BUFFER _HC_ID(HC_ID, HC_ID_IOREQ_BASE + 0x00) + #define HC_NOTIFY_REQUEST_FINISH _HC_ID(HC_ID, HC_ID_IOREQ_BASE + 0x01) + ++ + /* Guest memory management */ + #define HC_ID_MEM_BASE 0x300UL + #define HC_VM_SET_MEMMAP _HC_ID(HC_ID, HC_ID_MEM_BASE + 0x00) ++#define HC_VM_GPA2HPA _HC_ID(HC_ID, HC_ID_MEM_BASE + 0x01) + + #define ACRN_DOM0_VMID (0UL) + #define ACRN_INVALID_VMID (-1UL) +diff --git a/include/linux/vhm/acrn_vhm_mm.h b/include/linux/vhm/acrn_vhm_mm.h +index e701254bc249..1af6fd3aa11b 100644 +--- a/include/linux/vhm/acrn_vhm_mm.h ++++ b/include/linux/vhm/acrn_vhm_mm.h +@@ -70,6 +70,12 @@ + #define MMU_MEM_ATTR_ALL_WB 0x00000047 + #define MMU_MEM_ATTR_ALL_WC 0x00000207 + ++/* 1:1 mapping for service OS */ ++static inline unsigned long acrn_hpa2gpa(unsigned long hpa) ++{ ++ return hpa; ++} ++ + void *map_guest_phys(unsigned long vmid, u64 uos_phys, size_t size); + int unmap_guest_phys(unsigned long vmid, u64 uos_phys); + int set_mmio_map(unsigned long vmid, unsigned long guest_gpa, +diff --git a/include/linux/vhm/vhm_hypercall.h b/include/linux/vhm/vhm_hypercall.h +index e372ea48fa81..f1ed9a07e708 100644 +--- a/include/linux/vhm/vhm_hypercall.h ++++ b/include/linux/vhm/vhm_hypercall.h +@@ -144,6 +144,7 @@ inline long hcall_set_ioreq_buffer(unsigned long vmid, unsigned long buffer); + inline long hcall_notify_req_finish(unsigned long vmid, + unsigned long vcpu_mask); + inline long hcall_set_memmap(unsigned long vmid, unsigned long memmap); ++inline long hcall_vm_gpa2hpa(unsigned long vmid, unsigned long gpa2hpa); + inline long vhm_create_vm(struct vhm_vm *vm, unsigned long ioctl_param); + inline long vhm_resume_vm(struct vhm_vm *vm); + inline long vhm_pause_vm(struct vhm_vm *vm); +diff --git a/include/linux/vhm/vhm_vm_mngt.h b/include/linux/vhm/vhm_vm_mngt.h +index 77c21c4bba7a..5edacb31dc1b 100644 +--- a/include/linux/vhm/vhm_vm_mngt.h ++++ b/include/linux/vhm/vhm_vm_mngt.h +@@ -86,6 +86,7 @@ void put_vm(struct vhm_vm *vm); + int vhm_get_vm_info(unsigned long vmid, struct vm_info *info); + int vhm_inject_msi(unsigned long vmid, unsigned long msi_addr, + unsigned long msi_data); ++unsigned long vhm_vm_gpa2hpa(unsigned long vmid, unsigned long gpa); + + void vm_list_add(struct list_head *list); + void vm_mutex_lock(struct mutex *mlock); +-- +2.17.1 + diff --git a/patches/0008-char-rpmb-add-device-attributes.security b/patches/0008-char-rpmb-add-device-attributes.security new file mode 100644 index 0000000000..513bff71d1 --- /dev/null +++ b/patches/0008-char-rpmb-add-device-attributes.security @@ -0,0 +1,184 @@ +From 8ab1603d0c5869d52c7ae21fbaff047ae7dc3c6f Mon Sep 17 00:00:00 2001 +From: Tomas Winkler +Date: Sun, 28 Feb 2016 23:59:39 +0200 +Subject: [PATCH 08/65] char: rpmb: add device attributes + +Add attribute type that displays underlay storage type technology +EMMC, UFS, and attribute id, that displays underlay storage device id. +For EMMC this would be content of CID and for UFS serial number from +the device descriptor. + +V2: resend +V3: set kernel version to 4.7 +V4: update target date to Maj +V5: update date and kernel version +V6: 1. Add simulation device type + 2. Update date and kernel version + 3. Use binary attribute for id + 4. use simple sprintf instead of scnprintf + 5. Add more verbose documenation +V7: resend +V8: update date and kernel version +V9: 1. update date and kernel version + 2. add new rd_cnt_max and wr_cnt_max attributes. + 3. Use SIM as a suffix of the device type. +V10: update date and kernel version + +Change-Id: If25a96f1371d8fea5820f6e06366bc0945d32faa +Signed-off-by: Tomas Winkler +Signed-off-by: Alexander Usyskin +--- + Documentation/ABI/testing/sysfs-class-rpmb | 37 ++++++++++ + drivers/char/rpmb/core.c | 84 ++++++++++++++++++++++ + 2 files changed, 121 insertions(+) + +diff --git a/Documentation/ABI/testing/sysfs-class-rpmb b/Documentation/ABI/testing/sysfs-class-rpmb +index a017b15eabb3..00e76ff3cddf 100644 +--- a/Documentation/ABI/testing/sysfs-class-rpmb ++++ b/Documentation/ABI/testing/sysfs-class-rpmb +@@ -18,3 +18,40 @@ Contact: Tomas Winkler + Description: + The /sys/class/rpmb/rpmbN directory is created for + each RPMB registered device. ++ ++What: /sys/class/rpmb/rpmbN/type ++Date: May 2019 ++KernelVersion: 5.2 ++Contact: Tomas Winkler ++Description: ++ The /sys/class/rpmb/rpmbN/type file contains device ++ underlying storage type technology: EMMC, UFS, NVMe. ++ In case of simulated device it will have :SIM suffix ++ i.e EMMC:SIM. ++ ++What: /sys/class/rpmb/rpmbN/id ++Date: May 2019 ++KernelVersion: 5.2 ++Contact: Tomas Winkler ++Description: ++ The /sys/class/rpmb/rpmbN/id file contains unique device id ++ in a binary form as defined by underlying storage device. ++ In case of multiple RPMB devices a user can determine correct ++ device. ++ The content can be parsed according the storage device type. ++ ++What: /sys/class/rpmb/rpmbN/wr_cnt_max ++Date: May 2019 ++KernelVersion: 5.2 ++Contact: Tomas Winkler ++Description: ++ The /sys/class/rpmb/rpmbN/wr_cnt_max file contains ++ number of blocks that can be reliable written in a single request. ++ ++What: /sys/class/rpmb/rpmbN/rd_cnt_max ++Date: May 2019 ++KernelVersion: 5.2 ++Contact: Tomas Winkler ++Description: ++ The /sys/class/rpmb/rpmbN/rd_cnt_max file contains ++ number of blocks that can be read in a single request. +diff --git a/drivers/char/rpmb/core.c b/drivers/char/rpmb/core.c +index c9e62193b8d1..dd0afa0a3e24 100644 +--- a/drivers/char/rpmb/core.c ++++ b/drivers/char/rpmb/core.c +@@ -218,6 +218,88 @@ struct rpmb_dev *rpmb_dev_find_by_device(struct device *parent, u8 target) + } + EXPORT_SYMBOL_GPL(rpmb_dev_find_by_device); + ++static ssize_t type_show(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ struct rpmb_dev *rdev = to_rpmb_dev(dev); ++ const char *sim; ++ ssize_t ret; ++ ++ sim = (rdev->ops->type & RPMB_TYPE_SIM) ? ":SIM" : ""; ++ switch (RPMB_TYPE_HW(rdev->ops->type)) { ++ case RPMB_TYPE_EMMC: ++ ret = sprintf(buf, "EMMC%s\n", sim); ++ break; ++ case RPMB_TYPE_UFS: ++ ret = sprintf(buf, "UFS%s\n", sim); ++ break; ++ case RPMB_TYPE_NVME: ++ ret = sprintf(buf, "NVMe%s\n", sim); ++ break; ++ default: ++ ret = sprintf(buf, "UNKNOWN\n"); ++ break; ++ } ++ ++ return ret; ++} ++static DEVICE_ATTR_RO(type); ++ ++static ssize_t id_read(struct file *file, struct kobject *kobj, ++ struct bin_attribute *attr, char *buf, ++ loff_t off, size_t count) ++{ ++ struct device *dev = kobj_to_dev(kobj); ++ struct rpmb_dev *rdev = to_rpmb_dev(dev); ++ size_t sz = min_t(size_t, rdev->ops->dev_id_len, PAGE_SIZE); ++ ++ if (!rdev->ops->dev_id) ++ return 0; ++ ++ return memory_read_from_buffer(buf, count, &off, rdev->ops->dev_id, sz); ++} ++static BIN_ATTR_RO(id, 0); ++ ++static ssize_t wr_cnt_max_show(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ struct rpmb_dev *rdev = to_rpmb_dev(dev); ++ ++ return sprintf(buf, "%u\n", rdev->ops->wr_cnt_max); ++} ++static DEVICE_ATTR_RO(wr_cnt_max); ++ ++static ssize_t rd_cnt_max_show(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ struct rpmb_dev *rdev = to_rpmb_dev(dev); ++ ++ return sprintf(buf, "%u\n", rdev->ops->rd_cnt_max); ++} ++static DEVICE_ATTR_RO(rd_cnt_max); ++ ++static struct attribute *rpmb_attrs[] = { ++ &dev_attr_type.attr, ++ &dev_attr_wr_cnt_max.attr, ++ &dev_attr_rd_cnt_max.attr, ++ NULL, ++}; ++ ++static struct bin_attribute *rpmb_bin_attributes[] = { ++ &bin_attr_id, ++ NULL, ++}; ++ ++static struct attribute_group rpmb_attr_group = { ++ .attrs = rpmb_attrs, ++ .bin_attrs = rpmb_bin_attributes, ++}; ++ ++static const struct attribute_group *rpmb_attr_groups[] = { ++ &rpmb_attr_group, ++ NULL ++}; ++ + /** + * rpmb_dev_unregister() - unregister RPMB partition from the RPMB subsystem + * @rdev: the rpmb device to unregister +@@ -337,6 +419,8 @@ struct rpmb_dev *rpmb_dev_register(struct device *dev, u8 target, + dev_set_name(&rdev->dev, "rpmb%d", id); + rdev->dev.class = &rpmb_class; + rdev->dev.parent = dev; ++ rdev->dev.groups = rpmb_attr_groups; ++ + ret = device_register(&rdev->dev); + if (ret) + goto exit; +-- +2.17.1 + diff --git a/patches/0008-counter-introduce-support-for-Intel-QEP-Encode.felipeb-5.4 b/patches/0008-counter-introduce-support-for-Intel-QEP-Encode.felipeb-5.4 new file mode 100644 index 0000000000..ec9d5ad6f0 --- /dev/null +++ b/patches/0008-counter-introduce-support-for-Intel-QEP-Encode.felipeb-5.4 @@ -0,0 +1,737 @@ +From 4ba63a7d840bd3b4f26324571f048d10155486e5 Mon Sep 17 00:00:00 2001 +From: Felipe Balbi +Date: Tue, 6 Nov 2018 12:57:33 +0200 +Subject: [PATCH 08/14] counter: introduce support for Intel QEP Encoder + +Add support for Intel PSE Quadrature Encoder + +Signed-off-by: Felipe Balbi +--- + drivers/counter/Kconfig | 6 + + drivers/counter/Makefile | 1 + + drivers/counter/intel-qep.c | 689 ++++++++++++++++++++++++++++++++++++ + 3 files changed, 696 insertions(+) + create mode 100644 drivers/counter/intel-qep.c + +diff --git a/drivers/counter/Kconfig b/drivers/counter/Kconfig +index 2967d0a9ff91..f280cd721350 100644 +--- a/drivers/counter/Kconfig ++++ b/drivers/counter/Kconfig +@@ -59,4 +59,10 @@ config FTM_QUADDEC + To compile this driver as a module, choose M here: the + module will be called ftm-quaddec. + ++config INTEL_QEP ++ tristate "Intel Quadrature Encoder" ++ depends on PCI ++ help ++ Support for Intel Quadrature Encoder Devices ++ + endif # COUNTER +diff --git a/drivers/counter/Makefile b/drivers/counter/Makefile +index 40d35522937d..cf291cfd8cf0 100644 +--- a/drivers/counter/Makefile ++++ b/drivers/counter/Makefile +@@ -9,3 +9,4 @@ obj-$(CONFIG_104_QUAD_8) += 104-quad-8.o + obj-$(CONFIG_STM32_TIMER_CNT) += stm32-timer-cnt.o + obj-$(CONFIG_STM32_LPTIMER_CNT) += stm32-lptimer-cnt.o + obj-$(CONFIG_FTM_QUADDEC) += ftm-quaddec.o ++obj-$(CONFIG_INTEL_QEP) += intel-qep.o +diff --git a/drivers/counter/intel-qep.c b/drivers/counter/intel-qep.c +new file mode 100644 +index 000000000000..fa410a333b05 +--- /dev/null ++++ b/drivers/counter/intel-qep.c +@@ -0,0 +1,689 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * intel-qep.c - Intel Quadrature Encoder Driver ++ * ++ * Copyright (C) 2019 Intel Corporation - https://www.intel.com ++ * ++ * Author: Felipe Balbi ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define INTEL_QEPCON 0x00 ++#define INTEL_QEPFLT 0x04 ++#define INTEL_QEPCOUNT 0x08 ++#define INTEL_QEPMAX 0x0c ++#define INTEL_QEPWDT 0x10 ++#define INTEL_QEPCAPDIV 0x14 ++#define INTEL_QEPCNTR 0x18 ++#define INTEL_QEPCAPBUF 0x1c ++#define INTEL_QEPINT_STAT 0x20 ++#define INTEL_QEPINT_MASK 0x24 ++ ++/* QEPCON */ ++#define INTEL_QEPCON_EN BIT(0) ++#define INTEL_QEPCON_FLT_EN BIT(1) ++#define INTEL_QEPCON_EDGE_A BIT(2) ++#define INTEL_QEPCON_EDGE_B BIT(3) ++#define INTEL_QEPCON_EDGE_INDX BIT(4) ++#define INTEL_QEPCON_SWPAB BIT(5) ++#define INTEL_QEPCON_OP_MODE BIT(6) ++#define INTEL_QEPCON_PH_ERR BIT(7) ++#define INTEL_QEPCON_COUNT_RST_MODE BIT(8) ++#define INTEL_QEPCON_INDX_GATING_MASK GENMASK(10, 9) ++#define INTEL_QEPCON_INDX_GATING(n) (((n) & 3) << 9) ++#define INTEL_QEPCON_INDX_PAL_PBL INTEL_QEPCON_INDX_GATING(0) ++#define INTEL_QEPCON_INDX_PAL_PBH INTEL_QEPCON_INDX_GATING(1) ++#define INTEL_QEPCON_INDX_PAH_PBL INTEL_QEPCON_INDX_GATING(2) ++#define INTEL_QEPCON_INDX_PAH_PBH INTEL_QEPCON_INDX_GATING(3) ++#define INTEL_QEPCON_CAP_MODE BIT(11) ++#define INTEL_QEPCON_FIFO_THRE_MASK GENMASK(14, 12) ++#define INTEL_QEPCON_FIFO_THRE(n) ((((n) - 1) & 7) << 12) ++#define INTEL_QEPCON_FIFO_EMPTY BIT(15) ++ ++/* QEPFLT */ ++#define INTEL_QEPFLT_MAX_COUNT(n) ((n) & 0x1fffff) ++ ++/* QEPINT */ ++#define INTEL_QEPINT_FIFOCRIT BIT(5) ++#define INTEL_QEPINT_FIFOENTRY BIT(4) ++#define INTEL_QEPINT_QEPDIR BIT(3) ++#define INTEL_QEPINT_QEPRST_UP BIT(2) ++#define INTEL_QEPINT_QEPRST_DOWN BIT(1) ++#define INTEL_QEPINT_WDT BIT(0) ++ ++#define INTEL_QEP_DIRECTION_FORWARD 1 ++#define INTEL_QEP_DIRECTION_BACKWARD !INTEL_QEP_DIRECTION_FORWARD ++ ++#define INTEL_QEP_COUNTER_EXT_RW(_name) \ ++{ \ ++ .name = #_name, \ ++ .read = _name##_read, \ ++ .write = _name##_write, \ ++} ++ ++#define INTEL_QEP_COUNTER_EXT_RO(_name) \ ++{ \ ++ .name = #_name, \ ++ .read = _name##_read, \ ++} ++ ++#define INTEL_QEP_COUNTER_COUNT_EXT_RW(_name) \ ++{ \ ++ .name = #_name, \ ++ .read = _name##_read, \ ++ .write = _name##_write, \ ++} ++ ++#define INTEL_QEP_COUNTER_COUNT_EXT_RO(_name) \ ++{ \ ++ .name = #_name, \ ++ .read = _name##_read, \ ++} ++ ++struct intel_qep { ++ struct counter_device counter; ++ struct mutex lock; ++ struct pci_dev *pci; ++ struct device *dev; ++ void __iomem *regs; ++ u32 interrupt; ++ int direction; ++ bool enabled; ++}; ++ ++#define counter_to_qep(c) (container_of((c), struct intel_qep, counter)) ++ ++static inline u32 intel_qep_readl(void __iomem *base, u32 offset) ++{ ++ return readl(base + offset); ++} ++ ++static inline void intel_qep_writel(void __iomem *base, u32 offset, u32 value) ++{ ++ writel(value, base + offset); ++} ++ ++static const struct pci_device_id intel_qep_id_table[] = { ++ /* EHL */ ++ { PCI_VDEVICE(INTEL, 0x4bc3), }, ++ { PCI_VDEVICE(INTEL, 0x4b81), }, ++ { PCI_VDEVICE(INTEL, 0x4b82), }, ++ { PCI_VDEVICE(INTEL, 0x4b83), }, ++ { } /* Terminating Entry */ ++}; ++MODULE_DEVICE_TABLE(pci, intel_qep_id_table); ++ ++static void intel_qep_init(struct intel_qep *qep, bool reset) ++{ ++ u32 reg; ++ ++ reg = intel_qep_readl(qep->regs, INTEL_QEPCON); ++ reg &= ~INTEL_QEPCON_EN; ++ intel_qep_writel(qep->regs, INTEL_QEPCON, reg); ++ ++ /* make sure periperal is disabled by reading one more time */ ++ reg = intel_qep_readl(qep->regs, INTEL_QEPCON); ++ ++ if (reset) { ++ reg &= ~(INTEL_QEPCON_OP_MODE | INTEL_QEPCON_FLT_EN); ++ reg |= INTEL_QEPCON_EDGE_A | INTEL_QEPCON_EDGE_B | ++ INTEL_QEPCON_EDGE_INDX | INTEL_QEPCON_COUNT_RST_MODE; ++ } ++ ++ intel_qep_writel(qep->regs, INTEL_QEPCON, reg); ++ ++ intel_qep_writel(qep->regs, INTEL_QEPWDT, 0x1000); ++ intel_qep_writel(qep->regs, INTEL_QEPINT_MASK, 0x0); ++ ++ qep->direction = INTEL_QEP_DIRECTION_FORWARD; ++} ++ ++static irqreturn_t intel_qep_irq_thread(int irq, void *_qep) ++{ ++ struct intel_qep *qep = _qep; ++ u32 stat; ++ ++ mutex_lock(&qep->lock); ++ ++ stat = qep->interrupt; ++ if (stat & INTEL_QEPINT_FIFOCRIT) ++ dev_dbg(qep->dev, "Fifo Critical\n"); ++ ++ if (stat & INTEL_QEPINT_FIFOENTRY) ++ dev_dbg(qep->dev, "Fifo Entry\n"); ++ ++ if (stat & INTEL_QEPINT_QEPDIR) ++ qep->direction = !qep->direction; ++ ++ if (stat & INTEL_QEPINT_QEPRST_UP) ++ qep->direction = INTEL_QEP_DIRECTION_FORWARD; ++ ++ if (stat & INTEL_QEPINT_QEPRST_DOWN) ++ qep->direction = INTEL_QEP_DIRECTION_BACKWARD; ++ ++ if (stat & INTEL_QEPINT_WDT) ++ dev_dbg(qep->dev, "Watchdog\n"); ++ ++ intel_qep_writel(qep->regs, INTEL_QEPINT_MASK, 0x00); ++ mutex_unlock(&qep->lock); ++ ++ return IRQ_HANDLED; ++} ++ ++static irqreturn_t intel_qep_irq(int irq, void *_qep) ++{ ++ struct intel_qep *qep = _qep; ++ u32 stat; ++ ++ stat = intel_qep_readl(qep->regs, INTEL_QEPINT_STAT); ++ if (stat) { ++ qep->interrupt = stat; ++ intel_qep_writel(qep->regs, INTEL_QEPINT_MASK, 0xffffffff); ++ intel_qep_writel(qep->regs, INTEL_QEPINT_STAT, stat); ++ return IRQ_WAKE_THREAD; ++ } ++ ++ return IRQ_HANDLED; ++} ++ ++enum intel_qep_synapse_action { ++ INTEL_QEP_SYNAPSE_ACTION_RISING_EDGE, ++ INTEL_QEP_SYNAPSE_ACTION_FALLING_EDGE, ++}; ++ ++static enum counter_synapse_action intel_qep_synapse_actions[] = { ++ [INTEL_QEP_SYNAPSE_ACTION_RISING_EDGE] = ++ COUNTER_SYNAPSE_ACTION_RISING_EDGE, ++ ++ [INTEL_QEP_SYNAPSE_ACTION_FALLING_EDGE] = ++ COUNTER_SYNAPSE_ACTION_FALLING_EDGE, ++}; ++ ++enum intel_qep_count_function { ++ INTEL_QEP_ENCODER_MODE_NORMAL, ++ INTEL_QEP_ENCODER_MODE_SWAPPED, ++}; ++ ++static const enum counter_count_function intel_qep_count_functions[] = { ++ [INTEL_QEP_ENCODER_MODE_NORMAL] = ++ COUNTER_COUNT_FUNCTION_QUADRATURE_X4, ++ ++ [INTEL_QEP_ENCODER_MODE_SWAPPED] = ++ COUNTER_COUNT_FUNCTION_QUADRATURE_X4_SWAPPED, ++}; ++ ++static int intel_qep_count_read(struct counter_device *counter, ++ struct counter_count *count, ++ struct counter_count_read_value *val) ++{ ++ struct intel_qep *const qep = counter->priv; ++ uint32_t cntval; ++ ++ cntval = intel_qep_readl(qep, INTEL_QEPCOUNT); ++ counter_count_read_value_set(val, COUNTER_COUNT_POSITION, &cntval); ++ ++ return 0; ++} ++ ++static int intel_qep_count_write(struct counter_device *counter, ++ struct counter_count *count, ++ struct counter_count_write_value *val) ++{ ++ struct intel_qep *const qep = counter->priv; ++ u32 cnt; ++ int err; ++ ++ err = counter_count_write_value_get(&cnt, COUNTER_COUNT_POSITION, val); ++ if (err) ++ return err; ++ ++ intel_qep_writel(qep->regs, INTEL_QEPMAX, cnt); ++ ++ return 0; ++} ++ ++static int intel_qep_function_get(struct counter_device *counter, ++ struct counter_count *count, size_t *function) ++{ ++ struct intel_qep *qep = counter_to_qep(counter); ++ u32 reg; ++ ++ reg = intel_qep_readl(qep->regs, INTEL_QEPCON); ++ if (reg & INTEL_QEPCON_SWPAB) ++ *function = INTEL_QEP_ENCODER_MODE_SWAPPED; ++ else ++ *function = INTEL_QEP_ENCODER_MODE_NORMAL; ++ ++ return 0; ++} ++ ++static int intel_qep_function_set(struct counter_device *counter, ++ struct counter_count *count, size_t function) ++{ ++ struct intel_qep *qep = counter_to_qep(counter); ++ u32 reg; ++ ++ reg = intel_qep_readl(qep->regs, INTEL_QEPCON); ++ if (function == INTEL_QEP_ENCODER_MODE_SWAPPED) ++ reg |= INTEL_QEPCON_SWPAB; ++ else ++ reg &= ~INTEL_QEPCON_SWPAB; ++ intel_qep_writel(qep->regs, INTEL_QEPCON, reg); ++ ++ return 0; ++} ++ ++static int intel_qep_action_get(struct counter_device *counter, ++ struct counter_count *count, struct counter_synapse *synapse, ++ size_t *action) ++{ ++ struct intel_qep *qep = counter_to_qep(counter); ++ u32 reg; ++ ++ reg = intel_qep_readl(qep->regs, INTEL_QEPCON); ++ ++ *action = reg & synapse->signal->id ? ++ INTEL_QEP_SYNAPSE_ACTION_RISING_EDGE : ++ INTEL_QEP_SYNAPSE_ACTION_FALLING_EDGE; ++ ++ return 0; ++} ++ ++static int intel_qep_action_set(struct counter_device *counter, ++ struct counter_count *count, ++ struct counter_synapse *synapse, size_t action) ++{ ++ struct intel_qep *qep = counter_to_qep(counter); ++ u32 reg; ++ ++ reg = intel_qep_readl(qep->regs, INTEL_QEPCON); ++ ++ if (action == INTEL_QEP_SYNAPSE_ACTION_RISING_EDGE) ++ reg |= synapse->signal->id; ++ else ++ reg &= ~synapse->signal->id; ++ ++ intel_qep_writel(qep->regs, INTEL_QEPCON, reg); ++ ++ return 0; ++} ++ ++static const struct counter_ops intel_qep_counter_ops = { ++ .count_read = intel_qep_count_read, ++ .count_write = intel_qep_count_write, ++ ++ .function_get = intel_qep_function_get, ++ .function_set = intel_qep_function_set, ++ ++ .action_get = intel_qep_action_get, ++ .action_set = intel_qep_action_set, ++}; ++ ++static struct counter_signal intel_qep_signals[] = { ++ { ++ .id = INTEL_QEPCON_EDGE_A, ++ .name = "Phase A", ++ }, ++ { ++ .id = INTEL_QEPCON_EDGE_B, ++ .name = "Phase B", ++ }, ++ { ++ .id = INTEL_QEPCON_EDGE_INDX, ++ .name = "Index", ++ }, ++}; ++ ++static struct counter_synapse intel_qep_count_synapses[] = { ++ { ++ .actions_list = intel_qep_synapse_actions, ++ .num_actions = ARRAY_SIZE(intel_qep_synapse_actions), ++ .signal = &intel_qep_signals[0], ++ }, ++ { ++ .actions_list = intel_qep_synapse_actions, ++ .num_actions = ARRAY_SIZE(intel_qep_synapse_actions), ++ .signal = &intel_qep_signals[1], ++ }, ++ { ++ .actions_list = intel_qep_synapse_actions, ++ .num_actions = ARRAY_SIZE(intel_qep_synapse_actions), ++ .signal = &intel_qep_signals[2], ++ }, ++}; ++ ++static ssize_t ceiling_read(struct counter_device *counter, ++ struct counter_count *count, void *priv, char *buf) ++{ ++ struct intel_qep *qep = counter_to_qep(counter); ++ u32 reg; ++ ++ reg = intel_qep_readl(qep->regs, INTEL_QEPMAX); ++ ++ return snprintf(buf, PAGE_SIZE, "%d\n", reg); ++} ++ ++static ssize_t ceiling_write(struct counter_device *counter, ++ struct counter_count *count, void *priv, const char *buf, ++ size_t len) ++{ ++ struct intel_qep *qep = counter_to_qep(counter); ++ u32 max; ++ int ret; ++ ++ ret = kstrtou32(buf, 0, &max); ++ if (ret < 0) ++ return ret; ++ ++ intel_qep_writel(qep->regs, INTEL_QEPMAX, max); ++ ++ return len; ++} ++ ++static ssize_t enable_read(struct counter_device *counter, ++ struct counter_count *count, void *priv, char *buf) ++{ ++ struct intel_qep *qep = counter_to_qep(counter); ++ u32 reg; ++ ++ reg = intel_qep_readl(qep->regs, INTEL_QEPCON); ++ ++ return snprintf(buf, PAGE_SIZE, "%d\n", !!(reg & INTEL_QEPCON_EN)); ++} ++ ++static ssize_t enable_write(struct counter_device *counter, ++ struct counter_count *count, void *priv, const char *buf, ++ size_t len) ++{ ++ struct intel_qep *qep = counter_to_qep(counter); ++ u32 reg; ++ u32 val; ++ int ret; ++ ++ ret = kstrtou32(buf, 0, &val); ++ if (ret < 0) ++ return ret; ++ ++ reg = intel_qep_readl(qep->regs, INTEL_QEPCON); ++ ++ if (val) ++ reg |= INTEL_QEPCON_EN; ++ else ++ reg &= ~INTEL_QEPCON_EN; ++ ++ intel_qep_writel(qep->regs, INTEL_QEPCON, reg); ++ ++ return len; ++} ++ ++static ssize_t direction_read(struct counter_device *counter, ++ struct counter_count *count, void *priv, char *buf) ++{ ++ struct intel_qep *qep = counter_to_qep(counter); ++ ++ return snprintf(buf, PAGE_SIZE, "%s\n", qep->direction ? ++ "forward" : "backward"); ++} ++ ++static const struct counter_count_ext intel_qep_count_ext[] = { ++ INTEL_QEP_COUNTER_COUNT_EXT_RW(ceiling), ++ INTEL_QEP_COUNTER_COUNT_EXT_RW(enable), ++ INTEL_QEP_COUNTER_COUNT_EXT_RO(direction), ++}; ++ ++static struct counter_count intel_qep_counter_count[] = { ++ { ++ .id = 0, ++ .name = "Channel 1 Count", ++ .functions_list = intel_qep_count_functions, ++ .num_functions = ARRAY_SIZE(intel_qep_count_functions), ++ .synapses = intel_qep_count_synapses, ++ .num_synapses = ARRAY_SIZE(intel_qep_count_synapses), ++ .ext = intel_qep_count_ext, ++ .num_ext = ARRAY_SIZE(intel_qep_count_ext), ++ }, ++}; ++ ++static ssize_t noise_read(struct counter_device *counter, void *priv, char *buf) ++{ ++ struct intel_qep *qep = counter_to_qep(counter); ++ u32 reg; ++ ++ reg = intel_qep_readl(qep->regs, INTEL_QEPCON); ++ ++ if (!(reg & INTEL_QEPCON_FLT_EN)) ++ return snprintf(buf, PAGE_SIZE, "0\n"); ++ ++ reg = intel_qep_readl(qep->regs, INTEL_QEPFLT); ++ ++ return snprintf(buf, PAGE_SIZE, "%d\n", INTEL_QEPFLT_MAX_COUNT(reg)); ++} ++ ++static ssize_t noise_write(struct counter_device *counter, void *priv, ++ const char *buf, size_t len) ++{ ++ struct intel_qep *qep = counter_to_qep(counter); ++ u32 reg; ++ u32 max; ++ int ret; ++ ++ ret = kstrtou32(buf, 0, &max); ++ if (ret < 0) ++ return ret; ++ ++ if (max > 0x1fffff) ++ max = 0x1ffff; ++ ++ reg = intel_qep_readl(qep->regs, INTEL_QEPCON); ++ ++ if (max == 0) { ++ reg &= ~INTEL_QEPCON_FLT_EN; ++ } else { ++ reg |= INTEL_QEPCON_FLT_EN; ++ intel_qep_writel(qep->regs, INTEL_QEPFLT, max); ++ } ++ ++ intel_qep_writel(qep->regs, INTEL_QEPCON, reg); ++ ++ return len; ++} ++ ++static ssize_t preset_read(struct counter_device *counter, void *priv, char *buf) ++{ ++ return snprintf(buf, PAGE_SIZE, "0\n"); ++} ++ ++static ssize_t preset_enable_read(struct counter_device *counter, void *priv, ++ char *buf) ++{ ++ struct intel_qep *qep = counter_to_qep(counter); ++ u32 reg; ++ ++ reg = intel_qep_readl(qep->regs, INTEL_QEPCON); ++ return snprintf(buf, PAGE_SIZE, "%d\n", ++ !(reg & INTEL_QEPCON_COUNT_RST_MODE)); ++} ++ ++static ssize_t preset_enable_write(struct counter_device *counter, void *priv, ++ const char *buf, size_t len) ++{ ++ struct intel_qep *qep = counter_to_qep(counter); ++ u32 reg; ++ u32 val; ++ int ret; ++ ++ ret = kstrtou32(buf, 0, &val); ++ if (ret < 0) ++ return ret; ++ ++ reg = intel_qep_readl(qep->regs, INTEL_QEPCON); ++ ++ if (val) ++ reg &= ~INTEL_QEPCON_COUNT_RST_MODE; ++ else ++ reg |= INTEL_QEPCON_COUNT_RST_MODE; ++ ++ intel_qep_writel(qep->regs, INTEL_QEPCON, reg); ++ ++ return len; ++} ++ ++static const struct counter_device_ext intel_qep_ext[] = { ++ INTEL_QEP_COUNTER_EXT_RW(noise), ++ INTEL_QEP_COUNTER_EXT_RO(preset), ++ INTEL_QEP_COUNTER_EXT_RW(preset_enable) ++}; ++ ++static int intel_qep_probe(struct pci_dev *pci, const struct pci_device_id *id) ++{ ++ struct intel_qep *qep; ++ struct device *dev = &pci->dev; ++ void __iomem *regs; ++ int ret; ++ int irq; ++ ++ qep = devm_kzalloc(dev, sizeof(*qep), GFP_KERNEL); ++ if (!qep) ++ return -ENOMEM; ++ ++ ret = pcim_enable_device(pci); ++ if (ret) ++ return ret; ++ ++ pci_set_master(pci); ++ ++ ret = pcim_iomap_regions(pci, BIT(0), pci_name(pci)); ++ if (ret) ++ return ret; ++ ++ regs = pcim_iomap_table(pci)[0]; ++ if (!regs) ++ return -ENOMEM; ++ ++ qep->pci = pci; ++ qep->dev = dev; ++ qep->regs = regs; ++ mutex_init(&qep->lock); ++ ++ intel_qep_init(qep, true); ++ pci_set_drvdata(pci, qep); ++ ++ qep->counter.name = pci_name(pci); ++ qep->counter.parent = dev; ++ qep->counter.ops = &intel_qep_counter_ops; ++ qep->counter.counts = intel_qep_counter_count; ++ qep->counter.num_counts = ARRAY_SIZE(intel_qep_counter_count); ++ qep->counter.signals = intel_qep_signals; ++ qep->counter.num_signals = ARRAY_SIZE(intel_qep_signals); ++ qep->counter.ext = intel_qep_ext; ++ qep->counter.num_ext = ARRAY_SIZE(intel_qep_ext); ++ qep->counter.priv = qep; ++ ++ ret = counter_register(&qep->counter); ++ if (ret) ++ return ret; ++ ++ ret = pci_alloc_irq_vectors(pci, 1, 1, PCI_IRQ_ALL_TYPES); ++ if (ret < 0) ++ goto err_irq_vectors; ++ ++ irq = pci_irq_vector(pci, 0); ++ ret = devm_request_threaded_irq(&pci->dev, irq, intel_qep_irq, ++ intel_qep_irq_thread, IRQF_SHARED | IRQF_TRIGGER_RISING, ++ "intel-qep", qep); ++ if (ret) ++ goto err_irq; ++ ++ pm_runtime_set_autosuspend_delay(dev, 1000); ++ pm_runtime_use_autosuspend(dev); ++ pm_runtime_put_noidle(dev); ++ pm_runtime_allow(dev); ++ ++ return 0; ++ ++err_irq: ++ pci_free_irq_vectors(pci); ++ ++err_irq_vectors: ++ counter_unregister(&qep->counter); ++ ++ return ret; ++} ++ ++static void intel_qep_remove(struct pci_dev *pci) ++{ ++ struct intel_qep *qep = pci_get_drvdata(pci); ++ struct device *dev = &pci->dev; ++ ++ pm_runtime_forbid(dev); ++ pm_runtime_get_noresume(dev); ++ ++ intel_qep_writel(qep->regs, INTEL_QEPCON, 0); ++ pci_free_irq_vectors(pci); ++ counter_unregister(&qep->counter); ++} ++ ++#ifdef CONFIG_PM_SLEEP ++static int intel_qep_suspend(struct device *dev) ++{ ++ return 0; ++} ++ ++static int intel_qep_resume(struct device *dev) ++{ ++ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); ++ struct intel_qep *qep = pci_get_drvdata(pdev); ++ ++ intel_qep_init(qep, false); ++ ++ return 0; ++} ++ ++static int intel_qep_runtime_suspend(struct device *dev) ++{ ++ return 0; ++} ++ ++static int intel_qep_runtime_resume(struct device *dev) ++{ ++ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); ++ struct intel_qep *qep = pci_get_drvdata(pdev); ++ ++ intel_qep_init(qep, false); ++ ++ return 0; ++} ++#endif ++ ++static const struct dev_pm_ops intel_qep_pm_ops = { ++ SET_SYSTEM_SLEEP_PM_OPS(intel_qep_suspend, ++ intel_qep_resume) ++ SET_RUNTIME_PM_OPS(intel_qep_runtime_suspend, intel_qep_runtime_resume, ++ NULL) ++}; ++ ++static struct pci_driver intel_qep_driver = { ++ .name = "intel-qep", ++ .id_table = intel_qep_id_table, ++ .probe = intel_qep_probe, ++ .remove = intel_qep_remove, ++ .driver = { ++ .pm = &intel_qep_pm_ops, ++ } ++}; ++ ++module_pci_driver(intel_qep_driver); ++ ++MODULE_AUTHOR("Felipe Balbi "); ++MODULE_LICENSE("GPL v2"); ++MODULE_DESCRIPTION("Intel Quadrature Encoder Driver"); +-- +2.17.1 + diff --git a/patches/0008-drm-panfrost-Add-missing-check-for-pfdev-regulator.drm b/patches/0008-drm-panfrost-Add-missing-check-for-pfdev-regulator.drm new file mode 100644 index 0000000000..356a5bb8c5 --- /dev/null +++ b/patches/0008-drm-panfrost-Add-missing-check-for-pfdev-regulator.drm @@ -0,0 +1,37 @@ +From 3cacefa7f546b74d6ebb2cf0785dce91dc08c85e Mon Sep 17 00:00:00 2001 +From: Steven Price +Date: Thu, 22 Aug 2019 10:32:18 +0100 +Subject: [PATCH 008/690] drm/panfrost: Add missing check for pfdev->regulator + +When modifying panfrost_devfreq_target() to support a device without a +regulator defined I missed the check on the error path. Let's add it. + +Reported-by: Dan Carpenter +Fixes: e21dd290881b ("drm/panfrost: Enable devfreq to work without regulator") +Signed-off-by: Steven Price +Signed-off-by: Rob Herring +Link: https://patchwork.freedesktop.org/patch/msgid/20190822093218.26014-1-steven.price@arm.com +--- + drivers/gpu/drm/panfrost/panfrost_devfreq.c | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c +index 12ff77dacc95..c1eb8cfe6aeb 100644 +--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c ++++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c +@@ -53,8 +53,10 @@ static int panfrost_devfreq_target(struct device *dev, unsigned long *freq, + if (err) { + dev_err(dev, "Cannot set frequency %lu (%d)\n", target_rate, + err); +- regulator_set_voltage(pfdev->regulator, pfdev->devfreq.cur_volt, +- pfdev->devfreq.cur_volt); ++ if (pfdev->regulator) ++ regulator_set_voltage(pfdev->regulator, ++ pfdev->devfreq.cur_volt, ++ pfdev->devfreq.cur_volt); + return err; + } + +-- +2.17.1 + diff --git a/patches/0008-net-phy-mdio_bus-make-mdiobus_scan-also-cover.connectivity b/patches/0008-net-phy-mdio_bus-make-mdiobus_scan-also-cover.connectivity new file mode 100644 index 0000000000..4cb486f9fa --- /dev/null +++ b/patches/0008-net-phy-mdio_bus-make-mdiobus_scan-also-cover.connectivity @@ -0,0 +1,35 @@ +From aea4bb209ef294338ae11c0d3712334ab37aabf1 Mon Sep 17 00:00:00 2001 +From: Ong Boon Leong +Date: Fri, 2 Aug 2019 11:04:51 +0800 +Subject: [PATCH 008/108] net: phy: mdio_bus: make mdiobus_scan also cover PHY + that only talks C45 + +Make mdiobus_scan() to try harder to look for any PHY that only talks C45. + +Signed-off-by: Ong Boon Leong +--- + drivers/net/phy/mdio_bus.c | 8 ++++++-- + 1 file changed, 6 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c +index 2e29ab841b4d..eda7703fed4b 100644 +--- a/drivers/net/phy/mdio_bus.c ++++ b/drivers/net/phy/mdio_bus.c +@@ -514,8 +514,12 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr) + int err; + + phydev = get_phy_device(bus, addr, false); +- if (IS_ERR(phydev)) +- return phydev; ++ if (IS_ERR(phydev)) { ++ /* Try C45 to ensure we don't miss PHY that only talks C45 */ ++ phydev = get_phy_device(bus, addr, true); ++ if (IS_ERR(phydev)) ++ return phydev; ++ } + + /* + * For DT, see if the auto-probed phy has a correspoding child +-- +2.17.1 + diff --git a/patches/0008-pinctrl-intel-Add-Intel-Whitley-pin-controller-suppor.lpss b/patches/0008-pinctrl-intel-Add-Intel-Whitley-pin-controller-suppor.lpss new file mode 100644 index 0000000000..146b0700bb --- /dev/null +++ b/patches/0008-pinctrl-intel-Add-Intel-Whitley-pin-controller-suppor.lpss @@ -0,0 +1,287 @@ +From 7fdeaaf40a62201514b94071444d62d82f740b4b Mon Sep 17 00:00:00 2001 +From: Andy Shevchenko +Date: Fri, 23 Nov 2018 17:13:12 +0200 +Subject: [PATCH 08/40] pinctrl: intel: Add Intel Whitley pin controller + support + +TODO: SoC or PCH codename? + +Lewisburg-R sounds not good enough, can we use Whitley? +Otherwise we would need to unify with Lewisburg. Which one is better? + +This driver adds pinctrl/GPIO support for Intel Whitley SoC. The +GPIO controller is based on the next generation GPIO hardware but still +compatible with the one supported by the Intel core pinctrl/GPIO driver. + +Signed-off-by: Andy Shevchenko +--- + drivers/pinctrl/intel/Kconfig | 9 + + drivers/pinctrl/intel/Makefile | 1 + + drivers/pinctrl/intel/pinctrl-whitley.c | 228 ++++++++++++++++++++++++ + 3 files changed, 238 insertions(+) + create mode 100644 drivers/pinctrl/intel/pinctrl-whitley.c + +diff --git a/drivers/pinctrl/intel/Kconfig b/drivers/pinctrl/intel/Kconfig +index c091eb2f0aeb..9bc86cb2592e 100644 +--- a/drivers/pinctrl/intel/Kconfig ++++ b/drivers/pinctrl/intel/Kconfig +@@ -130,4 +130,13 @@ config PINCTRL_TIGERLAKE + help + This pinctrl driver provides an interface that allows configuring + of Intel Tiger Lake PCH pins and using them as GPIOs. ++ ++config PINCTRL_WHITLEY ++ tristate "Intel Whitley pinctrl and GPIO driver" ++ depends on ACPI ++ select PINCTRL_INTEL ++ help ++ This pinctrl driver provides an interface that allows configuring ++ of Intel Whitley pins and using them as GPIOs. ++ + endif +diff --git a/drivers/pinctrl/intel/Makefile b/drivers/pinctrl/intel/Makefile +index 5e92aba018ac..22ca296e78ce 100644 +--- a/drivers/pinctrl/intel/Makefile ++++ b/drivers/pinctrl/intel/Makefile +@@ -15,3 +15,4 @@ obj-$(CONFIG_PINCTRL_ICELAKE) += pinctrl-icelake.o + obj-$(CONFIG_PINCTRL_LEWISBURG) += pinctrl-lewisburg.o + obj-$(CONFIG_PINCTRL_SUNRISEPOINT) += pinctrl-sunrisepoint.o + obj-$(CONFIG_PINCTRL_TIGERLAKE) += pinctrl-tigerlake.o ++obj-$(CONFIG_PINCTRL_WHITLEY) += pinctrl-whitley.o +diff --git a/drivers/pinctrl/intel/pinctrl-whitley.c b/drivers/pinctrl/intel/pinctrl-whitley.c +new file mode 100644 +index 000000000000..a5e0fff3260b +--- /dev/null ++++ b/drivers/pinctrl/intel/pinctrl-whitley.c +@@ -0,0 +1,228 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Intel Whitley PCH pinctrl/GPIO driver ++ * ++ * Copyright (C) 2018, Intel Corporation ++ * Author: Andy Shevchenko ++ */ ++ ++#include ++#include ++#include ++ ++#include ++ ++#include "pinctrl-intel.h" ++ ++#define ICX_PAD_OWN 0x020 ++#define ICX_PADCFGLOCK 0x080 ++#define ICX_HOSTSW_OWN 0x0b0 ++#define ICX_GPI_IS 0x100 ++#define ICX_GPI_IE 0x120 ++ ++#define ICX_GPP(r, s, e) \ ++ { \ ++ .reg_num = (r), \ ++ .base = (s), \ ++ .size = ((e) - (s) + 1), \ ++ } ++ ++#define ICX_COMMUNITY(b, s, e, g) \ ++ { \ ++ .barno = (b), \ ++ .padown_offset = ICX_PAD_OWN, \ ++ .padcfglock_offset = ICX_PADCFGLOCK, \ ++ .hostown_offset = ICX_HOSTSW_OWN, \ ++ .is_offset = ICX_GPI_IS, \ ++ .ie_offset = ICX_GPI_IE, \ ++ .pin_base = (s), \ ++ .npins = ((e) - (s) + 1), \ ++ .gpps = (g), \ ++ .ngpps = ARRAY_SIZE(g), \ ++ } ++ ++/* Whitley */ ++static const struct pinctrl_pin_desc icxsp_pins[] = { ++ /* FIVRDEBUG */ ++ PINCTRL_PIN(0, "FIVR_CLKREF"), ++ PINCTRL_PIN(1, "FIVR_PRB_DIG_0"), ++ PINCTRL_PIN(2, "FIVR_PRB_DIG_1"), ++ PINCTRL_PIN(3, "FBRK_N"), ++ /* JTAG */ ++ PINCTRL_PIN(4, "TCK"), ++ PINCTRL_PIN(5, "TDI"), ++ PINCTRL_PIN(6, "TDO"), ++ PINCTRL_PIN(7, "TMS"), ++ PINCTRL_PIN(8, "TRST_N"), ++ /* JTAG1 */ ++ PINCTRL_PIN(9, "DEBUG_EN_N"), ++ PINCTRL_PIN(10, "PRDY_N"), ++ PINCTRL_PIN(11, "PREQ_N"), ++ /* MBP1 */ ++ PINCTRL_PIN(12, "MBP0_N"), ++ PINCTRL_PIN(13, "MBP1_N"), ++ PINCTRL_PIN(14, "MBP2_N"), ++ PINCTRL_PIN(15, "MBP3_N"), ++ /* MBP2 */ ++ PINCTRL_PIN(16, "MCP_SPARE0"), ++ PINCTRL_PIN(17, "MCP_SPARE1"), ++ PINCTRL_PIN(18, "MCP_SPARE2"), ++ PINCTRL_PIN(19, "MCP_SPARE3"), ++ /* MCP */ ++ PINCTRL_PIN(20, "MCP_MBP0_N"), ++ PINCTRL_PIN(21, "MCP_MBP1_N"), ++ PINCTRL_PIN(22, "MCPSMBUSSCL"), ++ PINCTRL_PIN(23, "MCPSMBUSSDA"), ++ /* MISC1 */ ++ PINCTRL_PIN(24, "CATERR_N"), ++ PINCTRL_PIN(25, "ERROR0_N"), ++ PINCTRL_PIN(26, "ERROR1_N"), ++ PINCTRL_PIN(27, "ERROR2_N"), ++ /* MISC2 */ ++ PINCTRL_PIN(28, "SPDSMBUSSCL0"), ++ PINCTRL_PIN(29, "SPDSMBUSSDA0"), ++ PINCTRL_PIN(30, "MEMHOT_OUT_N"), ++ PINCTRL_PIN(31, "MEMHOT_IN_N"), ++ /* MISC3 */ ++ PINCTRL_PIN(32, "MSMI_N"), ++ PINCTRL_PIN(33, "PECI"), ++ PINCTRL_PIN(34, "PROCHOT_N"), ++ /* MISC4 */ ++ PINCTRL_PIN(35, "SPDSMBUSSCL1"), ++ PINCTRL_PIN(36, "SPDSMBUSSDA1"), ++ PINCTRL_PIN(37, "VPPSMBUSSCL"), ++ PINCTRL_PIN(38, "VPPSMBUSSDA"), ++ /* MISC5 */ ++ PINCTRL_PIN(39, "ENH_MCECC_DIS"), ++ PINCTRL_PIN(40, "DMI_MODE_OVERRIDE"), ++ PINCTRL_PIN(41, "VLN_DISABLE"), ++ PINCTRL_PIN(42, "MCSMBUS_ALRT_N"), ++ PINCTRL_PIN(43, "MEMTRIP_N"), ++ /* PMAX */ ++ PINCTRL_PIN(44, "VSENSEPMAX"), ++ /* PTI */ ++ PINCTRL_PIN(45, "PTI_0"), ++ PINCTRL_PIN(46, "PTI_1"), ++ PINCTRL_PIN(47, "PTI_10"), ++ PINCTRL_PIN(48, "PTI_11"), ++ PINCTRL_PIN(49, "PTI_12"), ++ PINCTRL_PIN(50, "PTI_13"), ++ PINCTRL_PIN(51, "PTI_14"), ++ PINCTRL_PIN(52, "PTI_15"), ++ PINCTRL_PIN(53, "PTI_2"), ++ PINCTRL_PIN(54, "PTI_3"), ++ PINCTRL_PIN(55, "PTI_4"), ++ PINCTRL_PIN(56, "PTI_5"), ++ PINCTRL_PIN(57, "PTI_6"), ++ PINCTRL_PIN(58, "PTI_7"), ++ PINCTRL_PIN(59, "PTI_8"), ++ PINCTRL_PIN(60, "PTI_9"), ++ PINCTRL_PIN(61, "PTI_STB_0"), ++ PINCTRL_PIN(62, "PTI_STB_1"), ++ /* RESET2 */ ++ PINCTRL_PIN(63, "EAR_N"), ++ PINCTRL_PIN(64, "PMSYNC"), ++ PINCTRL_PIN(65, "PMSYNC_CLK"), ++ PINCTRL_PIN(66, "THERMTRIP_N"), ++ /* RESET3 */ ++ PINCTRL_PIN(67, "TSC_SYNC"), ++ PINCTRL_PIN(68, "NMI"), ++ PINCTRL_PIN(69, "PM_FAST_WAKE_N"), ++ /* SPARE */ ++ PINCTRL_PIN(70, "LGSPARE_0"), ++ PINCTRL_PIN(71, "LGSPARE_1"), ++ PINCTRL_PIN(72, "LGSPARE_2"), ++ PINCTRL_PIN(73, "LGSPARE_3"), ++ PINCTRL_PIN(74, "LGSPARE_4"), ++ PINCTRL_PIN(75, "LGSPARE_5"), ++ PINCTRL_PIN(76, "LGSPARE_6"), ++ /* STRAP */ ++ PINCTRL_PIN(77, "SAFE_MODE_BOOT"), ++ PINCTRL_PIN(78, "PROCDIS_N"), ++ PINCTRL_PIN(79, "SOCKET_ID_2"), ++ PINCTRL_PIN(80, "FRMAGENT"), ++ PINCTRL_PIN(81, "BIST_ENABLE"), ++ PINCTRL_PIN(82, "TXT_PLTEN"), ++ PINCTRL_PIN(83, "SOCKET_ID_1"), ++ PINCTRL_PIN(84, "TAP_ODT_EN"), ++ PINCTRL_PIN(85, "EX_LEGACY_SKT"), ++ PINCTRL_PIN(86, "TXT_AGENT"), ++ PINCTRL_PIN(87, "SOCKET_ID_0"), ++ PINCTRL_PIN(88, "BMCINIT"), ++ /* SVID1 */ ++ PINCTRL_PIN(89, "SVIDALERT0_N"), ++ PINCTRL_PIN(90, "SVIDCLK0"), ++ PINCTRL_PIN(91, "SVIDDATA0"), ++ /* SVID2 */ ++ PINCTRL_PIN(92, "SVIDALERT1_N"), ++ PINCTRL_PIN(93, "SVIDCLK1"), ++ PINCTRL_PIN(94, "SVIDDATA1"), ++}; ++ ++static const struct intel_padgroup icxsp_community0_gpps[] = { ++ ICX_GPP(0, 0, 3), /* FIVRDEBUG */ ++ ICX_GPP(1, 4, 8), /* JTAG */ ++ ICX_GPP(2, 9, 11), /* JTAG1 */ ++ ICX_GPP(3, 12, 15), /* MBP1 */ ++ ICX_GPP(4, 16, 19), /* MBP2 */ ++ ICX_GPP(5, 20, 23), /* MCP */ ++ ICX_GPP(6, 24, 27), /* MISC1 */ ++ ICX_GPP(7, 28, 31), /* MISC2 */ ++ ICX_GPP(8, 32, 34), /* MISC3 */ ++ ICX_GPP(9, 35, 38), /* MISC4 */ ++ ICX_GPP(10, 39, 43), /* MISC5 */ ++ ICX_GPP(11, 44, 44), /* PMAX */ ++ ICX_GPP(12, 45, 62), /* PTI */ ++ ICX_GPP(13, 63, 66), /* RESET2 */ ++ ICX_GPP(14, 67, 69), /* RESET3 */ ++ ICX_GPP(15, 70, 76), /* SPARE */ ++ ICX_GPP(16, 77, 88), /* STRAP */ ++ ICX_GPP(17, 89, 91), /* SVID1 */ ++ ICX_GPP(18, 92, 94), /* SVID2 */ ++}; ++ ++static const struct intel_community icxsp_communities[] = { ++ ICX_COMMUNITY(0, 0, 94, icxsp_community0_gpps), /* WEST */ ++}; ++ ++static const struct intel_pingroup icxsp_groups[] = { ++ /* PLACE HOLDER */ ++}; ++ ++static const struct intel_function icxsp_functions[] = { ++ /* PLACE HOLDER */ ++}; ++ ++static const struct intel_pinctrl_soc_data icxsp_soc_data = { ++ .pins = icxsp_pins, ++ .npins = ARRAY_SIZE(icxsp_pins), ++ .groups = icxsp_groups, ++ .ngroups = ARRAY_SIZE(icxsp_groups), ++ .functions = icxsp_functions, ++ .nfunctions = ARRAY_SIZE(icxsp_functions), ++ .communities = icxsp_communities, ++ .ncommunities = ARRAY_SIZE(icxsp_communities), ++}; ++ ++static const struct acpi_device_id icxsp_pinctrl_acpi_match[] = { ++ { "", (kernel_ulong_t)&icxsp_soc_data }, ++ { } ++}; ++MODULE_DEVICE_TABLE(acpi, icxsp_pinctrl_acpi_match); ++ ++static INTEL_PINCTRL_PM_OPS(icxsp_pinctrl_pm_ops); ++ ++static struct platform_driver icxsp_pinctrl_driver = { ++ .probe = intel_pinctrl_probe_by_hid, ++ .driver = { ++ .name = "whitley-pinctrl", ++ .acpi_match_table = icxsp_pinctrl_acpi_match, ++ .pm = &icxsp_pinctrl_pm_ops, ++ }, ++}; ++ ++module_platform_driver(icxsp_pinctrl_driver); ++ ++MODULE_AUTHOR("Andy Shevchenko "); ++MODULE_DESCRIPTION("Intel Whitley PCH pinctrl/GPIO driver"); ++MODULE_LICENSE("GPL v2"); +-- +2.17.1 + diff --git a/patches/0008-trusty-Add-trusty-logging-driver.trusty b/patches/0008-trusty-Add-trusty-logging-driver.trusty new file mode 100644 index 0000000000..4a6a6e7b59 --- /dev/null +++ b/patches/0008-trusty-Add-trusty-logging-driver.trusty @@ -0,0 +1,367 @@ +From 1ec87e31a602ea6705784c143dcde54255e1d270 Mon Sep 17 00:00:00 2001 +From: Riley Andrews +Date: Thu, 5 May 2016 14:42:41 -0700 +Subject: [PATCH 08/63] trusty: Add trusty logging driver. + +This driver is the consumer side of a ringbuffer of log data +that the secure operating system dumps prints into. Trusty +printfs will be dumped into the kernel log after smc calls +and during panics. + +Change-Id: Iadc939b60940330e8fe02a52f3e397da7833c2fa +--- + drivers/trusty/Kconfig | 5 + + drivers/trusty/Makefile | 1 + + drivers/trusty/trusty-log.c | 274 ++++++++++++++++++++++++++++++++++ + drivers/trusty/trusty-log.h | 22 +++ + include/linux/trusty/smcall.h | 1 + + 5 files changed, 303 insertions(+) + create mode 100644 drivers/trusty/trusty-log.c + create mode 100644 drivers/trusty/trusty-log.h + +diff --git a/drivers/trusty/Kconfig b/drivers/trusty/Kconfig +index fc1061deb876..ea75813254c0 100644 +--- a/drivers/trusty/Kconfig ++++ b/drivers/trusty/Kconfig +@@ -28,4 +28,9 @@ config TRUSTY_FIQ_ARM64 + select TRUSTY_FIQ + default y + ++config TRUSTY_LOG ++ tristate ++ depends on TRUSTY ++ default y ++ + endmenu +diff --git a/drivers/trusty/Makefile b/drivers/trusty/Makefile +index e162a4061e14..641ee2a6e830 100644 +--- a/drivers/trusty/Makefile ++++ b/drivers/trusty/Makefile +@@ -7,3 +7,4 @@ obj-$(CONFIG_TRUSTY) += trusty-irq.o + obj-$(CONFIG_TRUSTY_FIQ) += trusty-fiq.o + obj-$(CONFIG_TRUSTY_FIQ_ARM) += trusty-fiq-arm.o + obj-$(CONFIG_TRUSTY_FIQ_ARM64) += trusty-fiq-arm64.o trusty-fiq-arm64-glue.o ++obj-$(CONFIG_TRUSTY_LOG) += trusty-log.o +diff --git a/drivers/trusty/trusty-log.c b/drivers/trusty/trusty-log.c +new file mode 100644 +index 000000000000..e8dcced2ff1d +--- /dev/null ++++ b/drivers/trusty/trusty-log.c +@@ -0,0 +1,274 @@ ++/* ++ * Copyright (C) 2015 Google, Inc. ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "trusty-log.h" ++ ++#define TRUSTY_LOG_SIZE (PAGE_SIZE * 2) ++#define TRUSTY_LINE_BUFFER_SIZE 256 ++ ++struct trusty_log_state { ++ struct device *dev; ++ struct device *trusty_dev; ++ ++ /* ++ * This lock is here to ensure only one consumer will read ++ * from the log ring buffer at a time. ++ */ ++ spinlock_t lock; ++ struct log_rb *log; ++ uint32_t get; ++ ++ struct page *log_pages; ++ ++ struct notifier_block call_notifier; ++ struct notifier_block panic_notifier; ++ char line_buffer[TRUSTY_LINE_BUFFER_SIZE]; ++}; ++ ++static int log_read_line(struct trusty_log_state *s, int put, int get) ++{ ++ struct log_rb *log = s->log; ++ int i; ++ char c = '\0'; ++ size_t max_to_read = min((size_t)(put - get), ++ sizeof(s->line_buffer) - 1); ++ size_t mask = log->sz - 1; ++ ++ for (i = 0; i < max_to_read && c != '\n';) ++ s->line_buffer[i++] = c = log->data[get++ & mask]; ++ s->line_buffer[i] = '\0'; ++ ++ return i; ++} ++ ++static void trusty_dump_logs(struct trusty_log_state *s) ++{ ++ struct log_rb *log = s->log; ++ uint32_t get, put, alloc; ++ int read_chars; ++ ++ BUG_ON(!is_power_of_2(log->sz)); ++ ++ /* ++ * For this ring buffer, at any given point, alloc >= put >= get. ++ * The producer side of the buffer is not locked, so the put and alloc ++ * pointers must be read in a defined order (put before alloc) so ++ * that the above condition is maintained. A read barrier is needed ++ * to make sure the hardware and compiler keep the reads ordered. ++ */ ++ get = s->get; ++ while ((put = log->put) != get) { ++ /* Make sure that the read of put occurs before the read of log data */ ++ rmb(); ++ ++ /* Read a line from the log */ ++ read_chars = log_read_line(s, put, get); ++ ++ /* Force the loads from log_read_line to complete. */ ++ rmb(); ++ alloc = log->alloc; ++ ++ /* ++ * Discard the line that was just read if the data could ++ * have been corrupted by the producer. ++ */ ++ if (alloc - get > log->sz) { ++ pr_err("trusty: log overflow."); ++ get = alloc - log->sz; ++ continue; ++ } ++ pr_info("trusty: %s", s->line_buffer); ++ get += read_chars; ++ } ++ s->get = get; ++} ++ ++static int trusty_log_call_notify(struct notifier_block *nb, ++ unsigned long action, void *data) ++{ ++ struct trusty_log_state *s; ++ unsigned long flags; ++ ++ if (action != TRUSTY_CALL_RETURNED) ++ return NOTIFY_DONE; ++ ++ s = container_of(nb, struct trusty_log_state, call_notifier); ++ spin_lock_irqsave(&s->lock, flags); ++ trusty_dump_logs(s); ++ spin_unlock_irqrestore(&s->lock, flags); ++ return NOTIFY_OK; ++} ++ ++static int trusty_log_panic_notify(struct notifier_block *nb, ++ unsigned long action, void *data) ++{ ++ struct trusty_log_state *s; ++ ++ /* ++ * Don't grab the spin lock to hold up the panic notifier, even ++ * though this is racy. ++ */ ++ s = container_of(nb, struct trusty_log_state, panic_notifier); ++ pr_info("trusty-log panic notifier - trusty version %s", ++ trusty_version_str_get(s->trusty_dev)); ++ trusty_dump_logs(s); ++ return NOTIFY_OK; ++} ++ ++static bool trusty_supports_logging(struct device *device) ++{ ++ int result; ++ ++ result = trusty_std_call32(device, SMC_SC_SHARED_LOG_VERSION, ++ TRUSTY_LOG_API_VERSION, 0, 0); ++ if (result == SM_ERR_UNDEFINED_SMC) { ++ pr_info("trusty-log not supported on secure side.\n"); ++ return false; ++ } else if (result < 0) { ++ pr_err("trusty std call (SMC_SC_SHARED_LOG_VERSION) failed: %d\n", ++ result); ++ return false; ++ } ++ ++ if (result == TRUSTY_LOG_API_VERSION) { ++ return true; ++ } else { ++ pr_info("trusty-log unsupported api version: %d, supported: %d\n", ++ result, TRUSTY_LOG_API_VERSION); ++ return false; ++ } ++} ++ ++static int trusty_log_probe(struct platform_device *pdev) ++{ ++ struct trusty_log_state *s; ++ int result; ++ phys_addr_t pa; ++ ++ dev_dbg(&pdev->dev, "%s\n", __func__); ++ if (!trusty_supports_logging(pdev->dev.parent)) { ++ return -ENXIO; ++ } ++ ++ s = kzalloc(sizeof(*s), GFP_KERNEL); ++ if (!s) { ++ result = -ENOMEM; ++ goto error_alloc_state; ++ } ++ ++ spin_lock_init(&s->lock); ++ s->dev = &pdev->dev; ++ s->trusty_dev = s->dev->parent; ++ s->get = 0; ++ s->log_pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, ++ get_order(TRUSTY_LOG_SIZE)); ++ if (!s->log_pages) { ++ result = -ENOMEM; ++ goto error_alloc_log; ++ } ++ s->log = page_address(s->log_pages); ++ ++ pa = page_to_phys(s->log_pages); ++ result = trusty_std_call32(s->trusty_dev, ++ SMC_SC_SHARED_LOG_ADD, ++ (u32)(pa), (u32)(pa >> 32), ++ TRUSTY_LOG_SIZE); ++ if (result < 0) { ++ pr_err("trusty std call (SMC_SC_SHARED_LOG_ADD) failed: %d %pa\n", ++ result, &pa); ++ goto error_std_call; ++ } ++ ++ s->call_notifier.notifier_call = trusty_log_call_notify; ++ result = trusty_call_notifier_register(s->trusty_dev, ++ &s->call_notifier); ++ if (result < 0) { ++ dev_err(&pdev->dev, ++ "failed to register trusty call notifier\n"); ++ goto error_call_notifier; ++ } ++ ++ s->panic_notifier.notifier_call = trusty_log_panic_notify; ++ result = atomic_notifier_chain_register(&panic_notifier_list, ++ &s->panic_notifier); ++ if (result < 0) { ++ dev_err(&pdev->dev, ++ "failed to register panic notifier\n"); ++ goto error_panic_notifier; ++ } ++ platform_set_drvdata(pdev, s); ++ ++ return 0; ++ ++error_panic_notifier: ++ trusty_call_notifier_unregister(s->trusty_dev, &s->call_notifier); ++error_call_notifier: ++ trusty_std_call32(s->trusty_dev, SMC_SC_SHARED_LOG_RM, ++ (u32)pa, (u32)(pa >> 32), 0); ++error_std_call: ++ __free_pages(s->log_pages, get_order(TRUSTY_LOG_SIZE)); ++error_alloc_log: ++ kfree(s); ++error_alloc_state: ++ return result; ++} ++ ++static int trusty_log_remove(struct platform_device *pdev) ++{ ++ int result; ++ struct trusty_log_state *s = platform_get_drvdata(pdev); ++ phys_addr_t pa = page_to_phys(s->log_pages); ++ ++ dev_dbg(&pdev->dev, "%s\n", __func__); ++ ++ atomic_notifier_chain_unregister(&panic_notifier_list, ++ &s->panic_notifier); ++ trusty_call_notifier_unregister(s->trusty_dev, &s->call_notifier); ++ ++ result = trusty_std_call32(s->trusty_dev, SMC_SC_SHARED_LOG_RM, ++ (u32)pa, (u32)(pa >> 32), 0); ++ if (result) { ++ pr_err("trusty std call (SMC_SC_SHARED_LOG_RM) failed: %d\n", ++ result); ++ } ++ __free_pages(s->log_pages, get_order(TRUSTY_LOG_SIZE)); ++ kfree(s); ++ ++ return 0; ++} ++ ++static const struct of_device_id trusty_test_of_match[] = { ++ { .compatible = "android,trusty-log-v1", }, ++ {}, ++}; ++ ++static struct platform_driver trusty_log_driver = { ++ .probe = trusty_log_probe, ++ .remove = trusty_log_remove, ++ .driver = { ++ .name = "trusty-log", ++ .owner = THIS_MODULE, ++ .of_match_table = trusty_test_of_match, ++ }, ++}; ++ ++module_platform_driver(trusty_log_driver); +diff --git a/drivers/trusty/trusty-log.h b/drivers/trusty/trusty-log.h +new file mode 100644 +index 000000000000..09f60213e1f6 +--- /dev/null ++++ b/drivers/trusty/trusty-log.h +@@ -0,0 +1,22 @@ ++#ifndef _TRUSTY_LOG_H_ ++#define _TRUSTY_LOG_H_ ++ ++/* ++ * Ring buffer that supports one secure producer thread and one ++ * linux side consumer thread. ++ */ ++struct log_rb { ++ volatile uint32_t alloc; ++ volatile uint32_t put; ++ uint32_t sz; ++ volatile char data[0]; ++} __packed; ++ ++#define SMC_SC_SHARED_LOG_VERSION SMC_STDCALL_NR(SMC_ENTITY_LOGGING, 0) ++#define SMC_SC_SHARED_LOG_ADD SMC_STDCALL_NR(SMC_ENTITY_LOGGING, 1) ++#define SMC_SC_SHARED_LOG_RM SMC_STDCALL_NR(SMC_ENTITY_LOGGING, 2) ++ ++#define TRUSTY_LOG_API_VERSION 1 ++ ++#endif ++ +diff --git a/include/linux/trusty/smcall.h b/include/linux/trusty/smcall.h +index e8704974d3e3..aaad5cee6143 100644 +--- a/include/linux/trusty/smcall.h ++++ b/include/linux/trusty/smcall.h +@@ -51,6 +51,7 @@ + #define SMC_ENTITY_RESERVED 5 /* Reserved for future use */ + #define SMC_ENTITY_TRUSTED_APP 48 /* Trusted Application calls */ + #define SMC_ENTITY_TRUSTED_OS 50 /* Trusted OS calls */ ++#define SMC_ENTITY_LOGGING 51 /* Used for secure -> nonsecure logging */ + #define SMC_ENTITY_SECURE_MONITOR 60 /* Trusted OS calls internal to secure monitor */ + + /* FC = Fast call, SC = Standard call */ +-- +2.17.1 + diff --git a/patches/0008-usb-typec-tcpm-Start-using-struct-typec_operatio.usb-typec b/patches/0008-usb-typec-tcpm-Start-using-struct-typec_operatio.usb-typec new file mode 100644 index 0000000000..bdb167b341 --- /dev/null +++ b/patches/0008-usb-typec-tcpm-Start-using-struct-typec_operatio.usb-typec @@ -0,0 +1,136 @@ +From 11beb05a36f6b9cde3ed4ee810ded69c60ea8378 Mon Sep 17 00:00:00 2001 +From: Heikki Krogerus +Date: Tue, 1 Oct 2019 12:21:39 +0300 +Subject: [PATCH 08/18] usb: typec: tcpm: Start using struct typec_operations + +Supplying the operation callbacks as part of a struct +typec_operations instead of as part of struct +typec_capability during port registration. + +Signed-off-by: Heikki Krogerus +--- + drivers/usb/typec/tcpm/tcpm.c | 47 ++++++++++++++++------------------- + 1 file changed, 21 insertions(+), 26 deletions(-) + +diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c +index 5f61d9977a15..b254c2abf652 100644 +--- a/drivers/usb/typec/tcpm/tcpm.c ++++ b/drivers/usb/typec/tcpm/tcpm.c +@@ -390,12 +390,6 @@ static enum tcpm_state tcpm_default_state(struct tcpm_port *port) + return SRC_UNATTACHED; + } + +-static inline +-struct tcpm_port *typec_cap_to_tcpm(const struct typec_capability *cap) +-{ +- return container_of(cap, struct tcpm_port, typec_caps); +-} +- + static bool tcpm_port_is_disconnected(struct tcpm_port *port) + { + return (!port->attached && port->cc1 == TYPEC_CC_OPEN && +@@ -3970,10 +3964,9 @@ void tcpm_pd_hard_reset(struct tcpm_port *port) + } + EXPORT_SYMBOL_GPL(tcpm_pd_hard_reset); + +-static int tcpm_dr_set(const struct typec_capability *cap, +- enum typec_data_role data) ++static int tcpm_dr_set(struct typec_port *p, enum typec_data_role data) + { +- struct tcpm_port *port = typec_cap_to_tcpm(cap); ++ struct tcpm_port *port = typec_get_drvdata(p); + int ret; + + mutex_lock(&port->swap_lock); +@@ -4038,10 +4031,9 @@ static int tcpm_dr_set(const struct typec_capability *cap, + return ret; + } + +-static int tcpm_pr_set(const struct typec_capability *cap, +- enum typec_role role) ++static int tcpm_pr_set(struct typec_port *p, enum typec_role role) + { +- struct tcpm_port *port = typec_cap_to_tcpm(cap); ++ struct tcpm_port *port = typec_get_drvdata(p); + int ret; + + mutex_lock(&port->swap_lock); +@@ -4082,10 +4074,9 @@ static int tcpm_pr_set(const struct typec_capability *cap, + return ret; + } + +-static int tcpm_vconn_set(const struct typec_capability *cap, +- enum typec_role role) ++static int tcpm_vconn_set(struct typec_port *p, bool source) + { +- struct tcpm_port *port = typec_cap_to_tcpm(cap); ++ struct tcpm_port *port = typec_get_drvdata(p); + int ret; + + mutex_lock(&port->swap_lock); +@@ -4096,7 +4087,7 @@ static int tcpm_vconn_set(const struct typec_capability *cap, + goto port_unlock; + } + +- if (role == port->vconn_role) { ++ if (source == port->vconn_role) { + ret = 0; + goto port_unlock; + } +@@ -4122,9 +4113,9 @@ static int tcpm_vconn_set(const struct typec_capability *cap, + return ret; + } + +-static int tcpm_try_role(const struct typec_capability *cap, int role) ++static int tcpm_try_role(struct typec_port *p, int role) + { +- struct tcpm_port *port = typec_cap_to_tcpm(cap); ++ struct tcpm_port *port = typec_get_drvdata(p); + struct tcpc_dev *tcpc = port->tcpc; + int ret = 0; + +@@ -4331,10 +4322,9 @@ static void tcpm_init(struct tcpm_port *port) + tcpm_set_state(port, PORT_RESET, 0); + } + +-static int tcpm_port_type_set(const struct typec_capability *cap, +- enum typec_port_type type) ++static int tcpm_port_type_set(struct typec_port *p, enum typec_port_type type) + { +- struct tcpm_port *port = typec_cap_to_tcpm(cap); ++ struct tcpm_port *port = typec_get_drvdata(p); + + mutex_lock(&port->lock); + if (type == port->port_type) +@@ -4359,6 +4349,14 @@ static int tcpm_port_type_set(const struct typec_capability *cap, + return 0; + } + ++static const struct typec_operations tcpm_ops = { ++ .try_role = tcpm_try_role, ++ .dr_set = tcpm_dr_set, ++ .pr_set = tcpm_pr_set, ++ .vconn_set = tcpm_vconn_set, ++ .port_type_set = tcpm_port_type_set ++}; ++ + void tcpm_tcpc_reset(struct tcpm_port *port) + { + mutex_lock(&port->lock); +@@ -4772,11 +4770,8 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc) + port->typec_caps.fwnode = tcpc->fwnode; + port->typec_caps.revision = 0x0120; /* Type-C spec release 1.2 */ + port->typec_caps.pd_revision = 0x0300; /* USB-PD spec release 3.0 */ +- port->typec_caps.dr_set = tcpm_dr_set; +- port->typec_caps.pr_set = tcpm_pr_set; +- port->typec_caps.vconn_set = tcpm_vconn_set; +- port->typec_caps.try_role = tcpm_try_role; +- port->typec_caps.port_type_set = tcpm_port_type_set; ++ port->typec_caps.driver_data = port; ++ port->typec_caps.ops = &tcpm_ops; + + port->partner_desc.identity = &port->partner_ident; + port->port_type = port->typec_caps.type; +-- +2.17.1 + diff --git a/patches/0008-x86-intel_pmc_core-Make-debugfs-entry-for-pch_ip_.core-ehl b/patches/0008-x86-intel_pmc_core-Make-debugfs-entry-for-pch_ip_.core-ehl new file mode 100644 index 0000000000..d47acbdad4 --- /dev/null +++ b/patches/0008-x86-intel_pmc_core-Make-debugfs-entry-for-pch_ip_.core-ehl @@ -0,0 +1,41 @@ +From cfc427c418c17a20639af9081356de15a79e15dc Mon Sep 17 00:00:00 2001 +From: Gayatri Kammela +Date: Wed, 25 Sep 2019 19:01:03 -0700 +Subject: [PATCH 08/12] x86/intel_pmc_core: Make debugfs entry for + pch_ip_power_gating_status conditional + +Check if the platform supports and only then add a debugfs entry for PCH +IP power gating status. + +Cc: Peter Zijlstra +Cc: Srinivas Pandruvada +Cc: Andy Shevchenko +Cc: Kan Liang +Cc: David E. Box +Cc: Rajneesh Bhardwaj +Cc: Tony Luck +Reviewed-by: Tony Luck +Signed-off-by: Gayatri Kammela +--- + drivers/platform/x86/intel_pmc_core.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c +index c6a0c29d3476..ea43a5989c96 100644 +--- a/drivers/platform/x86/intel_pmc_core.c ++++ b/drivers/platform/x86/intel_pmc_core.c +@@ -788,8 +788,9 @@ static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev) + debugfs_create_file("slp_s0_residency_usec", 0444, dir, pmcdev, + &pmc_core_dev_state); + +- debugfs_create_file("pch_ip_power_gating_status", 0444, dir, pmcdev, +- &pmc_core_ppfear_fops); ++ if (pmcdev->map->pfear_sts) ++ debugfs_create_file("pch_ip_power_gating_status", 0444, dir, ++ pmcdev, &pmc_core_ppfear_fops); + + debugfs_create_file("ltr_ignore", 0644, dir, pmcdev, + &pmc_core_ltr_ignore_ops); +-- +2.17.1 + diff --git a/patches/0009-ASoC-Intel-Skylake-Unify-firmware-loading-mechanism.audio b/patches/0009-ASoC-Intel-Skylake-Unify-firmware-loading-mechanism.audio new file mode 100644 index 0000000000..f532dfbbe4 --- /dev/null +++ b/patches/0009-ASoC-Intel-Skylake-Unify-firmware-loading-mechanism.audio @@ -0,0 +1,337 @@ +From 1436144c2849ba05c1f795bb13e24f18a893283b Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Fri, 22 Mar 2019 19:34:24 +0100 +Subject: [PATCH 009/193] ASoC: Intel: Skylake: Unify firmware loading + mechanism + +There are certain operations we want to do before and after firmware +loading e.g.: disabling/ enabling power and clock gating. To make code +coherent, provide skl_init_fw as a unified way for loading dsp firmware. + +In consequence, this change provides CNL load library support during fw +initialization which was previously missing. + +skl_dsp_fw_ops already takes care of fw and library load customization. +New post-load additions in form of fw and hw config assignments make +this change even more welcome. + +Signed-off-by: Cezary Rojewski +--- + sound/soc/intel/skylake/bxt-sst.c | 27 ----------------- + sound/soc/intel/skylake/cnl-sst-dsp.h | 1 - + sound/soc/intel/skylake/cnl-sst.c | 19 ------------ + sound/soc/intel/skylake/skl-messages.c | 8 ----- + sound/soc/intel/skylake/skl-pcm.c | 22 ++------------ + sound/soc/intel/skylake/skl-sst-dsp.c | 1 + + sound/soc/intel/skylake/skl-sst-dsp.h | 2 -- + sound/soc/intel/skylake/skl-sst.c | 41 ++++++++++++++++++++++---- + sound/soc/intel/skylake/skl.h | 2 +- + 9 files changed, 40 insertions(+), 83 deletions(-) + +diff --git a/sound/soc/intel/skylake/bxt-sst.c b/sound/soc/intel/skylake/bxt-sst.c +index dd0eaee0a77a..666a11a2e40b 100644 +--- a/sound/soc/intel/skylake/bxt-sst.c ++++ b/sound/soc/intel/skylake/bxt-sst.c +@@ -591,33 +591,6 @@ int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, + } + EXPORT_SYMBOL_GPL(bxt_sst_dsp_init); + +-int bxt_sst_init_fw(struct device *dev, struct skl_dev *skl) +-{ +- int ret; +- struct sst_dsp *sst = skl->dsp; +- +- ret = sst->fw_ops.load_fw(sst); +- if (ret < 0) { +- dev_err(dev, "Load base fw failed: %x\n", ret); +- return ret; +- } +- +- skl_dsp_init_core_state(sst); +- +- if (skl->lib_count > 1) { +- ret = sst->fw_ops.load_library(sst, skl->lib_info, +- skl->lib_count); +- if (ret < 0) { +- dev_err(dev, "Load Library failed : %x\n", ret); +- return ret; +- } +- } +- skl->is_first_boot = false; +- +- return 0; +-} +-EXPORT_SYMBOL_GPL(bxt_sst_init_fw); +- + void bxt_sst_dsp_cleanup(struct device *dev, struct skl_dev *skl) + { + +diff --git a/sound/soc/intel/skylake/cnl-sst-dsp.h b/sound/soc/intel/skylake/cnl-sst-dsp.h +index 7bd4d2a8fdfa..50f4a53a607c 100644 +--- a/sound/soc/intel/skylake/cnl-sst-dsp.h ++++ b/sound/soc/intel/skylake/cnl-sst-dsp.h +@@ -97,7 +97,6 @@ void cnl_ipc_free(struct sst_generic_ipc *ipc); + int cnl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, + const char *fw_name, struct skl_dsp_loader_ops dsp_ops, + struct skl_dev **dsp); +-int cnl_sst_init_fw(struct device *dev, struct skl_dev *skl); + void cnl_sst_dsp_cleanup(struct device *dev, struct skl_dev *skl); + + #endif /*__CNL_SST_DSP_H__*/ +diff --git a/sound/soc/intel/skylake/cnl-sst.c b/sound/soc/intel/skylake/cnl-sst.c +index 1b4379fb3ffb..abb0d0f8ae8d 100644 +--- a/sound/soc/intel/skylake/cnl-sst.c ++++ b/sound/soc/intel/skylake/cnl-sst.c +@@ -452,25 +452,6 @@ int cnl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, + } + EXPORT_SYMBOL_GPL(cnl_sst_dsp_init); + +-int cnl_sst_init_fw(struct device *dev, struct skl_dev *skl) +-{ +- int ret; +- struct sst_dsp *sst = skl->dsp; +- +- ret = skl->dsp->fw_ops.load_fw(sst); +- if (ret < 0) { +- dev_err(dev, "load base fw failed: %d", ret); +- return ret; +- } +- +- skl_dsp_init_core_state(sst); +- +- skl->is_first_boot = false; +- +- return 0; +-} +-EXPORT_SYMBOL_GPL(cnl_sst_init_fw); +- + void cnl_sst_dsp_cleanup(struct device *dev, struct skl_dev *skl) + { + if (skl->dsp->fw) +diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c +index 5ccf9572c988..e91fb3d4cb5e 100644 +--- a/sound/soc/intel/skylake/skl-messages.c ++++ b/sound/soc/intel/skylake/skl-messages.c +@@ -173,7 +173,6 @@ static const struct skl_dsp_ops dsp_ops[] = { + .num_cores = 2, + .loader_ops = skl_get_loader_ops, + .init = skl_sst_dsp_init, +- .init_fw = skl_sst_init_fw, + .cleanup = skl_sst_dsp_cleanup + }, + { +@@ -181,7 +180,6 @@ static const struct skl_dsp_ops dsp_ops[] = { + .num_cores = 2, + .loader_ops = skl_get_loader_ops, + .init = skl_sst_dsp_init, +- .init_fw = skl_sst_init_fw, + .cleanup = skl_sst_dsp_cleanup + }, + { +@@ -189,7 +187,6 @@ static const struct skl_dsp_ops dsp_ops[] = { + .num_cores = 2, + .loader_ops = bxt_get_loader_ops, + .init = bxt_sst_dsp_init, +- .init_fw = bxt_sst_init_fw, + .cleanup = bxt_sst_dsp_cleanup + }, + { +@@ -197,7 +194,6 @@ static const struct skl_dsp_ops dsp_ops[] = { + .num_cores = 2, + .loader_ops = bxt_get_loader_ops, + .init = bxt_sst_dsp_init, +- .init_fw = bxt_sst_init_fw, + .cleanup = bxt_sst_dsp_cleanup + }, + { +@@ -205,7 +201,6 @@ static const struct skl_dsp_ops dsp_ops[] = { + .num_cores = 4, + .loader_ops = bxt_get_loader_ops, + .init = cnl_sst_dsp_init, +- .init_fw = cnl_sst_init_fw, + .cleanup = cnl_sst_dsp_cleanup + }, + { +@@ -213,7 +208,6 @@ static const struct skl_dsp_ops dsp_ops[] = { + .num_cores = 4, + .loader_ops = bxt_get_loader_ops, + .init = cnl_sst_dsp_init, +- .init_fw = cnl_sst_init_fw, + .cleanup = cnl_sst_dsp_cleanup + }, + { +@@ -221,7 +215,6 @@ static const struct skl_dsp_ops dsp_ops[] = { + .num_cores = 4, + .loader_ops = bxt_get_loader_ops, + .init = cnl_sst_dsp_init, +- .init_fw = cnl_sst_init_fw, + .cleanup = cnl_sst_dsp_cleanup + }, + { +@@ -229,7 +222,6 @@ static const struct skl_dsp_ops dsp_ops[] = { + .num_cores = 4, + .loader_ops = bxt_get_loader_ops, + .init = cnl_sst_dsp_init, +- .init_fw = cnl_sst_init_fw, + .cleanup = cnl_sst_dsp_cleanup + }, + }; +diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c +index eea38868b700..20a7c497a6aa 100644 +--- a/sound/soc/intel/skylake/skl-pcm.c ++++ b/sound/soc/intel/skylake/skl-pcm.c +@@ -1408,7 +1408,6 @@ static int skl_platform_soc_probe(struct snd_soc_component *component) + { + struct hdac_bus *bus = dev_get_drvdata(component->dev); + struct skl_dev *skl = bus_to_skl(bus); +- const struct skl_dsp_ops *ops; + int ret; + + pm_runtime_get_sync(component->dev); +@@ -1424,25 +1423,10 @@ static int skl_platform_soc_probe(struct snd_soc_component *component) + return ret; + } + +- /* load the firmwares, since all is set */ +- ops = skl_get_dsp_ops(skl->pci->device); +- if (!ops) +- return -EIO; +- +- /* +- * Disable dynamic clock and power gating during firmware +- * and library download +- */ +- skl->enable_miscbdcge(component->dev, false); +- skl->clock_power_gating(component->dev, false); +- +- ret = ops->init_fw(component->dev, skl); +- skl->enable_miscbdcge(component->dev, true); +- skl->clock_power_gating(component->dev, true); +- if (ret < 0) { +- dev_err(component->dev, "Failed to boot first fw: %d\n", ret); ++ ret = skl_sst_init_fw(skl); ++ if (ret < 0) + return ret; +- } ++ + skl_populate_modules(skl); + skl->update_d0i3c = skl_update_d0i3c; + +diff --git a/sound/soc/intel/skylake/skl-sst-dsp.c b/sound/soc/intel/skylake/skl-sst-dsp.c +index 225706d148d8..0eecf26986f9 100644 +--- a/sound/soc/intel/skylake/skl-sst-dsp.c ++++ b/sound/soc/intel/skylake/skl-sst-dsp.c +@@ -44,6 +44,7 @@ void skl_dsp_init_core_state(struct sst_dsp *ctx) + skl->cores.usage_count[i] = 0; + } + } ++EXPORT_SYMBOL_GPL(skl_dsp_init_core_state); + + /* Get the mask for all enabled cores */ + unsigned int skl_dsp_get_enabled_cores(struct sst_dsp *ctx) +diff --git a/sound/soc/intel/skylake/skl-sst-dsp.h b/sound/soc/intel/skylake/skl-sst-dsp.h +index f8878d0bb765..f42358f6977f 100644 +--- a/sound/soc/intel/skylake/skl-sst-dsp.h ++++ b/sound/soc/intel/skylake/skl-sst-dsp.h +@@ -239,8 +239,6 @@ int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, + int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, + const char *fw_name, struct skl_dsp_loader_ops dsp_ops, + struct skl_dev **dsp); +-int skl_sst_init_fw(struct device *dev, struct skl_dev *skl); +-int bxt_sst_init_fw(struct device *dev, struct skl_dev *skl); + void skl_sst_dsp_cleanup(struct device *dev, struct skl_dev *skl); + void bxt_sst_dsp_cleanup(struct device *dev, struct skl_dev *skl); + +diff --git a/sound/soc/intel/skylake/skl-sst.c b/sound/soc/intel/skylake/skl-sst.c +index c3796ac1ae78..dc92208b67f5 100644 +--- a/sound/soc/intel/skylake/skl-sst.c ++++ b/sound/soc/intel/skylake/skl-sst.c +@@ -554,30 +554,59 @@ int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, + } + EXPORT_SYMBOL_GPL(skl_sst_dsp_init); + +-int skl_sst_init_fw(struct device *dev, struct skl_dev *skl) ++int skl_sst_init_fw(struct skl_dev *skl) + { +- int ret; + struct sst_dsp *sst = skl->dsp; ++ struct device *dev = skl->dev; ++ int (*lp_check)(struct sst_dsp *dsp, bool state); ++ int ret; ++ ++ lp_check = skl->ipc.ops.check_dsp_lp_on; ++ skl->enable_miscbdcge(dev, false); ++ skl->clock_power_gating(dev, false); + + ret = sst->fw_ops.load_fw(sst); + if (ret < 0) { + dev_err(dev, "Load base fw failed : %d\n", ret); +- return ret; ++ goto exit; ++ } ++ ++ if (!skl->is_first_boot) ++ goto library_load; ++ /* Disable power check during cfg setup */ ++ skl->ipc.ops.check_dsp_lp_on = NULL; ++ ++ ret = skl_ipc_fw_cfg_get(&skl->ipc, &skl->fw_cfg); ++ if (ret < 0) { ++ dev_err(dev, "Failed to get fw cfg: %d\n", ret); ++ goto exit; ++ } ++ ++ ret = skl_ipc_hw_cfg_get(&skl->ipc, &skl->hw_cfg); ++ if (ret < 0) { ++ dev_err(dev, "Failed to get hw cfg: %d\n", ret); ++ goto exit; + } + + skl_dsp_init_core_state(sst); + ++library_load: + if (skl->lib_count > 1) { + ret = sst->fw_ops.load_library(sst, skl->lib_info, + skl->lib_count); + if (ret < 0) { +- dev_err(dev, "Load Library failed : %x\n", ret); +- return ret; ++ dev_err(dev, "Load library failed : %x\n", ret); ++ goto exit; + } + } ++ + skl->is_first_boot = false; ++exit: ++ skl->ipc.ops.check_dsp_lp_on = lp_check; ++ skl->enable_miscbdcge(dev, true); ++ skl->clock_power_gating(dev, true); + +- return 0; ++ return ret; + } + EXPORT_SYMBOL_GPL(skl_sst_init_fw); + +diff --git a/sound/soc/intel/skylake/skl.h b/sound/soc/intel/skylake/skl.h +index e20712cfc549..e5bc6d6fda21 100644 +--- a/sound/soc/intel/skylake/skl.h ++++ b/sound/soc/intel/skylake/skl.h +@@ -160,7 +160,6 @@ struct skl_dsp_ops { + int irq, const char *fw_name, + struct skl_dsp_loader_ops loader_ops, + struct skl_dev **skl_sst); +- int (*init_fw)(struct device *dev, struct skl_dev *skl); + void (*cleanup)(struct device *dev, struct skl_dev *skl); + }; + +@@ -174,6 +173,7 @@ struct nhlt_specific_cfg *skl_get_ep_blob(struct skl_dev *skl, u32 instance, + int skl_nhlt_update_topology_bin(struct skl_dev *skl); + int skl_init_dsp(struct skl_dev *skl); + int skl_free_dsp(struct skl_dev *skl); ++int skl_sst_init_fw(struct skl_dev *skl); + int skl_suspend_late_dsp(struct skl_dev *skl); + int skl_suspend_dsp(struct skl_dev *skl); + int skl_resume_dsp(struct skl_dev *skl); +-- +2.17.1 + diff --git a/patches/0009-Get-vcpu-pcpu-mapping.sep-socwatch b/patches/0009-Get-vcpu-pcpu-mapping.sep-socwatch new file mode 100644 index 0000000000..302345d42f --- /dev/null +++ b/patches/0009-Get-vcpu-pcpu-mapping.sep-socwatch @@ -0,0 +1,319 @@ +From 351d2ac0be16c5f418d2444320d98a326790e14f Mon Sep 17 00:00:00 2001 +From: Manisha Chinthapally +Date: Fri, 25 Jan 2019 15:24:05 -0800 +Subject: [PATCH 09/27] Get vcpu pcpu mapping + +In virtualization platforms, SEP collects samples from multiple guest OSes, +But to associate a sample to the guest we need VCPU-PCPU-OSID mapping info. + +So, added an IOCTL that allows to get VCPU PCPU mapping information on all the guest OSes + +Signed-off-by: Manisha Chinthapally +--- + drivers/platform/x86/sepdk/inc/lwpmudrv.h | 6 +- + .../x86/sepdk/include/lwpmudrv_ioctl.h | 3 + + .../x86/sepdk/include/lwpmudrv_struct.h | 36 +++- + drivers/platform/x86/sepdk/sep/lwpmudrv.c | 162 +++++++++++++++++- + 4 files changed, 195 insertions(+), 12 deletions(-) + +diff --git a/drivers/platform/x86/sepdk/inc/lwpmudrv.h b/drivers/platform/x86/sepdk/inc/lwpmudrv.h +index 994121d28ef2..ae8a3aee26a1 100644 +--- a/drivers/platform/x86/sepdk/inc/lwpmudrv.h ++++ b/drivers/platform/x86/sepdk/inc/lwpmudrv.h +@@ -454,8 +454,8 @@ int sbuf_share_setup(uint32_t pcpu_id, uint32_t sbuf_id, shared_buf_t *sbuf); + + extern shared_buf_t **samp_buf_per_cpu; + +-#define MAX_NR_VCPUS 8 +-#define MAX_NR_VMS 4 ++#define MAX_NR_VCPUS 4 ++#define MAX_NR_VMS 4 + #define MAX_MSR_LIST_NUM 15 + #define MAX_GROUP_NUM 1 + +@@ -504,7 +504,7 @@ struct profiling_vm_info { + + struct profiling_vm_info_list { + uint16_t num_vms; +- struct profiling_vm_info vm_list[MAX_NR_VMS]; ++ struct profiling_vm_info vm_list[MAX_NR_VMS+1]; + }; + + struct profiling_version_info { +diff --git a/drivers/platform/x86/sepdk/include/lwpmudrv_ioctl.h b/drivers/platform/x86/sepdk/include/lwpmudrv_ioctl.h +index 3b60274826c6..9713b19c0e5c 100644 +--- a/drivers/platform/x86/sepdk/include/lwpmudrv_ioctl.h ++++ b/drivers/platform/x86/sepdk/include/lwpmudrv_ioctl.h +@@ -112,6 +112,9 @@ extern "C" { + #define DRV_OPERATION_GET_AGENT_MODE 93 + #define DRV_OPERATION_INIT_DRIVER 94 + #define DRV_OPERATION_SET_EMON_BUFFER_DRIVER_HELPER 95 ++#define DRV_OPERATION_GET_NUM_VM 96 ++#define DRV_OPERATION_GET_VCPU_MAP 97 ++ + // Only used by MAC OS + #define DRV_OPERATION_GET_ASLR_OFFSET 997 // this may not need + #define DRV_OPERATION_SET_OSX_VERSION 998 +diff --git a/drivers/platform/x86/sepdk/include/lwpmudrv_struct.h b/drivers/platform/x86/sepdk/include/lwpmudrv_struct.h +index 3af04d4ed829..629750152fdb 100644 +--- a/drivers/platform/x86/sepdk/include/lwpmudrv_struct.h ++++ b/drivers/platform/x86/sepdk/include/lwpmudrv_struct.h +@@ -1646,14 +1646,14 @@ typedef CPU_MAP_TRACE_NODE * CPU_MAP_TRACE; + struct CPU_MAP_TRACE_NODE_S { + U64 tsc; + U32 os_id; +- U16 vcpu_id; +- U16 pcpu_id; ++ U32 vcpu_id; ++ U32 pcpu_id; + U8 is_static : 1; + U8 initial : 1; + U8 reserved1 : 6; + U8 reserved2; + U16 reserved3; +- U32 reserved4; ++ U64 tsc_offset; + }; + + #define CPU_MAP_TRACE_tsc(x) ((x)->tsc) +@@ -1663,6 +1663,28 @@ struct CPU_MAP_TRACE_NODE_S { + #define CPU_MAP_TRACE_is_static(x) ((x)->is_static) + #define CPU_MAP_TRACE_initial(x) ((x)->initial) + ++#define MAX_NUM_VCPU 64 ++#define MAX_NUM_VM 16 ++ ++typedef struct CPU_MAP_TRACE_LIST_NODE_S CPU_MAP_TRACE_LIST_NODE; ++typedef CPU_MAP_TRACE_LIST_NODE * CPU_MAP_TRACE_LIST; ++ ++struct CPU_MAP_TRACE_LIST_NODE_S { ++ U32 osid; ++ U8 num_entries; ++ U8 reserved1; ++ U16 reserved2; ++ CPU_MAP_TRACE_NODE entries[MAX_NUM_VCPU]; ++}; ++ ++typedef struct VM_OSID_MAP_NODE_S VM_OSID_MAP_NODE; ++typedef VM_OSID_MAP_NODE * VM_OSID_MAP; ++struct VM_OSID_MAP_NODE_S { ++ U32 num_vms; ++ U32 reserved1; ++ U32 osid[MAX_NUM_VM]; ++}; ++ + typedef struct VM_SWITCH_TRACE_NODE_S VM_SWITCH_TRACE_NODE; + typedef VM_SWITCH_TRACE_NODE * VM_SWITCH_TRACE; + +@@ -1675,10 +1697,10 @@ struct VM_SWITCH_TRACE_NODE_S { + U64 reserved2; + }; + +-#define VM_SWITCH_TRACE_tsc(x) ((x)->tsc) +-#define VM_SWITCH_TRACE_from_os_id(x) ((x)->from_os_id) +-#define VM_SWITCH_TRACE_to_os_id(x) ((x)->to_os_id) +-#define VM_SWITCH_TRACE_reason(x) ((x)->reason) ++#define VM_SWITCH_TRACE_tsc(x) ((x)->tsc) ++#define VM_SWITCH_TRACE_from_os_id(x) ((x)->from_os_id) ++#define VM_SWITCH_TRACE_to_os_id(x) ((x)->to_os_id) ++#define VM_SWITCH_TRACE_reason(x) ((x)->reason) + + typedef struct EMON_BUFFER_DRIVER_HELPER_NODE_S EMON_BUFFER_DRIVER_HELPER_NODE; + typedef EMON_BUFFER_DRIVER_HELPER_NODE * EMON_BUFFER_DRIVER_HELPER; +diff --git a/drivers/platform/x86/sepdk/sep/lwpmudrv.c b/drivers/platform/x86/sepdk/sep/lwpmudrv.c +index f88d6fee9b83..bfc52cb4d494 100755 +--- a/drivers/platform/x86/sepdk/sep/lwpmudrv.c ++++ b/drivers/platform/x86/sepdk/sep/lwpmudrv.c +@@ -1675,7 +1675,7 @@ static OS_STATUS lwpmudrv_Pause(void) + #if !defined(DRV_SEP_ACRN_ON) + CONTROL_Invoke_Parallel(lwpmudrv_Pause_Op, NULL); + #endif +- /* ++ /* + * This means that the PAUSE state has been reached. + */ + CHANGE_DRIVER_STATE(STATE_BIT_PAUSING, DRV_STATE_PAUSED); +@@ -6246,6 +6246,154 @@ static OS_STATUS lwpmudrv_Get_Agent_Mode(IOCTL_ARGS args) + return status; + } + ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Get_Num_Of_Vms(IOCTL_ARGS arg) ++ * ++ * @param arg - pointer to the IOCTL_ARGS structure ++ * ++ * @return OS_STATUS ++ * ++ * @brief Local function to get number of VMS available ++ * @brief Returns status. ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Get_Num_Of_Vms(IOCTL_ARGS args) ++ ++{ ++ VM_OSID_MAP_NODE vm_map; ++#if defined(DRV_SEP_ACRN_ON) ++ U32 i; ++#endif ++ if (args->buf_drv_to_usr == NULL) { ++ SEP_PRINT_ERROR("Invalid arguments (buf_drv_to_usr is NULL)!"); ++ return OS_INVALID; ++ } ++ ++ if (args->len_drv_to_usr != sizeof(VM_OSID_MAP_NODE)) { ++ SEP_PRINT_ERROR( ++ "Invalid arguments (unexpected len_drv_to_usr value)!"); ++ return OS_INVALID; ++ } ++ ++ memset(&vm_map, 0, sizeof(VM_OSID_MAP_NODE)); ++ ++#if defined(DRV_SEP_ACRN_ON) ++ if (vm_info_list == NULL) { ++ vm_info_list = ++ CONTROL_Allocate_Memory(sizeof(struct profiling_vm_info_list)); ++ } ++ memset(vm_info_list, 0, sizeof(struct profiling_vm_info_list)); ++ ++ BUG_ON(!virt_addr_valid(vm_info_list)); ++ ++ acrn_hypercall2(HC_PROFILING_OPS, PROFILING_GET_VMINFO, ++ virt_to_phys(vm_info_list)); ++ ++ vm_map.num_vms = 0; ++ for (i = 0; i < vm_info_list->num_vms; i++) { ++ if (vm_info_list->vm_list[i].num_vcpus != 0) { ++ vm_map.osid[i] = (U32)vm_info_list->vm_list[i].vm_id; ++ vm_map.num_vms++; ++ } ++ } ++ ++#endif ++ if (copy_to_user((void __user *)args->buf_drv_to_usr, ++ &vm_map, args->len_drv_to_usr)) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); ++ return OS_FAULT; ++ } ++ ++ return OS_SUCCESS; ++ ++} ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Get_Cpu_Map_Info(IOCTL_ARGS arg) ++ * ++ * @param arg - pointer to the IOCTL_ARGS structure ++ * ++ * @return OS_STATUS ++ * ++ * @brief Local function to get pcpu-vcpu mapping info ++ * @brief Returns status. ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Get_Cpu_Map_Info(IOCTL_ARGS args) ++{ ++ CPU_MAP_TRACE_LIST cpumap; ++ DRV_STATUS status = OS_SUCCESS; ++#if defined(DRV_SEP_ACRN_ON) ++ U32 i, j; ++#endif ++ ++ if ((args->buf_drv_to_usr == NULL) || ++ (args->len_drv_to_usr != sizeof(CPU_MAP_TRACE_LIST_NODE))) { ++ SEP_PRINT_ERROR("Invalid drv_to_usr arguments!"); ++ return OS_INVALID; ++ } ++ ++ if ((args->buf_usr_to_drv == NULL) || ++ (args->len_usr_to_drv != sizeof(CPU_MAP_TRACE_LIST_NODE))) { ++ SEP_PRINT_ERROR("Invalid usr_to_drv arguments!"); ++ return OS_INVALID; ++ } ++ ++ cpumap = (CPU_MAP_TRACE_LIST) ++ CONTROL_Allocate_Memory(sizeof(CPU_MAP_TRACE_LIST_NODE)); ++ if (cpumap == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Memory allocation failure"); ++ return OS_NO_MEM; ++ } ++ ++ if (copy_from_user(cpumap, (void __user *)args->buf_usr_to_drv, ++ sizeof(CPU_MAP_TRACE_LIST_NODE))) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure"); ++ status = OS_FAULT; ++ goto cleanup; ++ } ++ ++#if defined(DRV_SEP_ACRN_ON) ++ if (vm_info_list == NULL) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("vm_info_list is NULL!"); ++ status = OS_INVALID; ++ goto cleanup; ++ } ++ ++ SEP_DRV_LOG_TRACE("CPU mapping for osid %d ", cpumap->osid); ++ for (i = 0; i < vm_info_list->num_vms; i++) { ++ if (vm_info_list->vm_list[i].vm_id == cpumap->osid) { ++ for (j = 0; ++ j < vm_info_list->vm_list[i].num_vcpus; j++) { ++ UTILITY_Read_TSC(&(cpumap->entries[j].tsc)); ++ cpumap->entries[j].is_static = 1; ++ cpumap->entries[j].vcpu_id = ++ vm_info_list->vm_list[i].cpu_map[j].vcpu_id; ++ cpumap->entries[j].pcpu_id = ++ vm_info_list->vm_list[i].cpu_map[j].pcpu_id; ++ cpumap->entries[j].os_id = ++ vm_info_list->vm_list[i].vm_id; ++ cpumap->num_entries++; ++ } ++ } ++ } ++#endif ++ if (copy_to_user((void __user *)args->buf_drv_to_usr, ++ cpumap, args->len_drv_to_usr)) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); ++ status = OS_FAULT; ++ goto cleanup; ++ } ++ ++cleanup: ++ cpumap = CONTROL_Free_Memory(cpumap); ++ return status; ++} ++ ++ + /******************************************************************************* + * External Driver functions - Open + * This function is common to all drivers +@@ -6367,7 +6515,7 @@ static IOCTL_OP_TYPE lwpmu_Service_IOCTL(IOCTL_USE_INODE struct file *filp, + UTILITY_Driver_Set_Active_Ioctl(cmd); + + switch (cmd) { +- /* ++ /* + * Common IOCTL commands + */ + +@@ -6538,6 +6686,16 @@ static IOCTL_OP_TYPE lwpmu_Service_IOCTL(IOCTL_USE_INODE struct file *filp, + status = lwpmudrv_Get_Agent_Mode(&local_args); + break; + ++ case DRV_OPERATION_GET_VCPU_MAP: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_CPU_MAP\n"); ++ status = lwpmudrv_Get_Cpu_Map_Info(&local_args); ++ break; ++ ++ case DRV_OPERATION_GET_NUM_VM: ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_NUM_VM\n"); ++ status = lwpmudrv_Get_Num_Of_Vms(&local_args); ++ break; ++ + /* + * EMON-specific IOCTL commands + */ +-- +2.17.1 + diff --git a/patches/0009-MAINTAINERS-Add-Steven-and-Alyssa-as-panfrost-reviewer.drm b/patches/0009-MAINTAINERS-Add-Steven-and-Alyssa-as-panfrost-reviewer.drm new file mode 100644 index 0000000000..85d444f776 --- /dev/null +++ b/patches/0009-MAINTAINERS-Add-Steven-and-Alyssa-as-panfrost-reviewer.drm @@ -0,0 +1,38 @@ +From da5dfc9f82fae0e08bb5c8d032d05282869d56b4 Mon Sep 17 00:00:00 2001 +From: Rob Herring +Date: Thu, 22 Aug 2019 20:33:57 -0500 +Subject: [PATCH 009/690] MAINTAINERS: Add Steven and Alyssa as panfrost + reviewers + +Add Steven Price and Alyssa Rosenzweig as reviewers as they have been the +primary reviewers already. + +Cc: Steven Price +Cc: Alyssa Rosenzweig +Cc: Tomeu Vizoso +Signed-off-by: Rob Herring +Acked-by: Neil Armstrong +Acked-by: Steven Price +Acked-by: Tomeu Vizoso +Reviewed-by: Alyssa Rosenzweig +Link: https://patchwork.freedesktop.org/patch/msgid/20190823013357.932-1-robh@kernel.org +--- + MAINTAINERS | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/MAINTAINERS b/MAINTAINERS +index e51a68bf8ca8..b6544351cb25 100644 +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -1272,6 +1272,8 @@ F: Documentation/gpu/afbc.rst + ARM MALI PANFROST DRM DRIVER + M: Rob Herring + M: Tomeu Vizoso ++R: Steven Price ++R: Alyssa Rosenzweig + L: dri-devel@lists.freedesktop.org + S: Supported + T: git git://anongit.freedesktop.org/drm/drm-misc +-- +2.17.1 + diff --git a/patches/0009-VHM-add-passthrough-device-support.acrn b/patches/0009-VHM-add-passthrough-device-support.acrn new file mode 100644 index 0000000000..f44ddff15f --- /dev/null +++ b/patches/0009-VHM-add-passthrough-device-support.acrn @@ -0,0 +1,402 @@ +From 96bb522d0189858fc2914d3b7ae72eaa514f5968 Mon Sep 17 00:00:00 2001 +From: Binbin Wu +Date: Fri, 31 Aug 2018 10:58:55 +0800 +Subject: [PATCH 009/150] VHM: add passthrough device support + +add following ioctl in vhm_dev to support device passthrough +- assign, deassign pass-through device + ACRN_ASSIGN_PTDEV + ACRN_DEASSIGN_PTDEV +- set, reset pass-through device intr info + ACRN_SET_PTDEV_INTR_INFO + ACRN_RESET_PTDEV_INTR_INFO +reuse exist ioctl to support device passthrough +- BAR mapping + ACRN_IOC_SET_MEMSEG +- MSI support + ACRN_VM_PCI_MSIX_REMAP + +Change-Id: I94bbee48e8de1faf70804061c65c2e2855e6bf0f +Tracked-On: 218445 +Signed-off-by: Gao, Shiqing +Signed-off-by: Binbin Wu +Signed-off-by: Edwin Zhai +Signed-off-by: Jason Chen CJ +Reviewed-on: +Reviewed-by: Chi, Mingqiang +Reviewed-by: Dong, Eddie +Tested-by: Dong, Eddie +--- + drivers/char/vhm/vhm_dev.c | 25 +++++ + drivers/vhm/vhm_hypercall.c | 175 +++++++++++++++++++++++++++++ + include/linux/vhm/acrn_common.h | 43 +++++++ + include/linux/vhm/acrn_hv_defs.h | 8 ++ + include/linux/vhm/vhm_hypercall.h | 8 ++ + include/linux/vhm/vhm_ioctl_defs.h | 9 ++ + 6 files changed, 268 insertions(+) + +diff --git a/drivers/char/vhm/vhm_dev.c b/drivers/char/vhm/vhm_dev.c +index 4bee160998bc..e551392710ef 100644 +--- a/drivers/char/vhm/vhm_dev.c ++++ b/drivers/char/vhm/vhm_dev.c +@@ -267,6 +267,31 @@ static long vhm_dev_ioctl(struct file *filep, + break; + } + ++ case IC_ASSIGN_PTDEV: { ++ ret = vhm_assign_ptdev(vm, ioctl_param); ++ break; ++ } ++ ++ case IC_DEASSIGN_PTDEV: { ++ ret = vhm_deassign_ptdev(vm, ioctl_param); ++ break; ++ } ++ ++ case IC_SET_PTDEV_INTR_INFO: { ++ ret = vhm_set_ptdev_intr_info(vm, ioctl_param); ++ break; ++ } ++ ++ case IC_RESET_PTDEV_INTR_INFO: { ++ ret = vhm_reset_ptdev_intr_info(vm, ioctl_param); ++ break; ++ } ++ ++ case IC_VM_PCI_MSIX_REMAP: { ++ ret = vhm_remap_pci_msix(vm, ioctl_param); ++ break; ++ } ++ + default: + pr_warn("Unknown IOCTL 0x%x\n", ioctl_num); + ret = 0; +diff --git a/drivers/vhm/vhm_hypercall.c b/drivers/vhm/vhm_hypercall.c +index 384b86e60c9c..0f3f6c1c5f4c 100644 +--- a/drivers/vhm/vhm_hypercall.c ++++ b/drivers/vhm/vhm_hypercall.c +@@ -50,14 +50,30 @@ + */ + #include + #include ++#include + #include + #include + ++/* max num of pass-through devices using msix */ ++#define MAX_ENTRY 3 ++ ++struct table_iomems { ++ /* device's virtual BDF */ ++ unsigned short virt_bdf; ++ /* virtual base address of MSI-X table in memory space after ioremap */ ++ unsigned long mmap_addr; ++} tables[MAX_ENTRY]; ++ + inline long hcall_inject_msi(unsigned long vmid, unsigned long msi) + { + return acrn_hypercall2(HC_INJECT_MSI, vmid, msi); + } + ++inline long hcall_remap_pci_msix(unsigned long vmid, unsigned long msix) ++{ ++ return acrn_hypercall2(HC_VM_PCI_MSIX_REMAP, vmid, msix); ++} ++ + inline long hcall_set_ioreq_buffer(unsigned long vmid, unsigned long buffer) + { + return acrn_hypercall2(HC_SET_IOREQ_BUFFER, vmid, buffer); +@@ -211,3 +227,162 @@ inline long vhm_pulse_irqline(struct vhm_vm *vm, unsigned long ioctl_param) + + return ret; + } ++ ++inline long vhm_assign_ptdev(struct vhm_vm *vm, unsigned long ioctl_param) ++{ ++ long ret = 0; ++ uint16_t bdf; ++ ++ if (copy_from_user(&bdf, ++ (void *)ioctl_param, sizeof(uint16_t))) ++ return -EFAULT; ++ ++ ret = acrn_hypercall2(HC_ASSIGN_PTDEV, vm->vmid, ++ virt_to_phys(&bdf)); ++ if (ret < 0) { ++ pr_err("vhm: failed to assign ptdev!\n"); ++ return -EFAULT; ++ } ++ ++ return ret; ++} ++ ++inline long vhm_deassign_ptdev(struct vhm_vm *vm, unsigned long ioctl_param) ++{ ++ long ret = 0; ++ uint16_t bdf; ++ ++ if (copy_from_user(&bdf, ++ (void *)ioctl_param, sizeof(uint16_t))) ++ return -EFAULT; ++ ++ ret = acrn_hypercall2(HC_DEASSIGN_PTDEV, vm->vmid, ++ virt_to_phys(&bdf)); ++ if (ret < 0) { ++ pr_err("vhm: failed to deassign ptdev!\n"); ++ return -EFAULT; ++ } ++ ++ return ret; ++} ++ ++inline long vhm_set_ptdev_intr_info(struct vhm_vm *vm, ++ unsigned long ioctl_param) ++{ ++ long ret = 0; ++ struct acrn_ptdev_irq pt_irq; ++ int i; ++ ++ if (copy_from_user(&pt_irq, ++ (void *)ioctl_param, sizeof(pt_irq))) ++ return -EFAULT; ++ ++ ret = acrn_hypercall2(HC_SET_PTDEV_INTR_INFO, vm->vmid, ++ virt_to_phys(&pt_irq)); ++ if (ret < 0) { ++ pr_err("vhm: failed to set intr info for ptdev!\n"); ++ return -EFAULT; ++ } ++ ++ if (pt_irq.msix.table_paddr) { ++ for (i = 0; i < MAX_ENTRY; i++) { ++ if (tables[i].virt_bdf) ++ continue; ++ ++ tables[i].virt_bdf = pt_irq.virt_bdf; ++ tables[i].mmap_addr = (unsigned long) ++ ioremap_nocache(pt_irq.msix.table_paddr, ++ pt_irq.msix.table_size); ++ break; ++ } ++ } ++ ++ return ret; ++} ++ ++inline long vhm_reset_ptdev_intr_info(struct vhm_vm *vm, ++ unsigned long ioctl_param) ++{ ++ long ret = 0; ++ struct acrn_ptdev_irq pt_irq; ++ int i; ++ ++ if (copy_from_user(&pt_irq, ++ (void *)ioctl_param, sizeof(pt_irq))) ++ return -EFAULT; ++ ++ ret = acrn_hypercall2(HC_RESET_PTDEV_INTR_INFO, vm->vmid, ++ virt_to_phys(&pt_irq)); ++ if (ret < 0) { ++ pr_err("vhm: failed to reset intr info for ptdev!\n"); ++ return -EFAULT; ++ } ++ ++ if (pt_irq.msix.table_paddr) { ++ for (i = 0; i < MAX_ENTRY; i++) { ++ if (tables[i].virt_bdf) ++ continue; ++ ++ tables[i].virt_bdf = pt_irq.virt_bdf; ++ tables[i].mmap_addr = (unsigned long) ++ ioremap_nocache(pt_irq.msix.table_paddr, ++ pt_irq.msix.table_size); ++ break; ++ } ++ } ++ ++ return ret; ++} ++ ++inline long vhm_remap_pci_msix(struct vhm_vm *vm, unsigned long ioctl_param) ++{ ++ long ret = 0; ++ struct acrn_vm_pci_msix_remap msix_remap; ++ ++ if (copy_from_user(&msix_remap, ++ (void *)ioctl_param, sizeof(msix_remap))) ++ return -EFAULT; ++ ++ ret = acrn_hypercall2(HC_VM_PCI_MSIX_REMAP, vm->vmid, ++ virt_to_phys(&msix_remap)); ++ ++ if (copy_to_user((void *)ioctl_param, ++ &msix_remap, sizeof(msix_remap))) ++ return -EFAULT; ++ ++ if (msix_remap.msix) { ++ void __iomem *msix_entry; ++ int i; ++ ++ for (i = 0; i < MAX_ENTRY; i++) { ++ if (tables[i].virt_bdf == msix_remap.virt_bdf) ++ break; ++ } ++ ++ if (!tables[i].mmap_addr) ++ return -EFAULT; ++ ++ msix_entry = (void *)(tables[i].mmap_addr + ++ msix_remap.msix_entry_index * ++ PCI_MSIX_ENTRY_SIZE); ++ ++ /* mask the entry when setup */ ++ writel(PCI_MSIX_ENTRY_CTRL_MASKBIT, ++ msix_entry + PCI_MSIX_ENTRY_VECTOR_CTRL); ++ ++ /* setup the msi entry */ ++ writel((uint32_t)msix_remap.msi_addr, ++ msix_entry + PCI_MSIX_ENTRY_LOWER_ADDR); ++ writel((uint32_t)(msix_remap.msi_addr >> 32), ++ msix_entry + PCI_MSIX_ENTRY_UPPER_ADDR); ++ writel(msix_remap.msi_data, ++ msix_entry + PCI_MSIX_ENTRY_DATA); ++ ++ /* unmask the entry */ ++ writel(msix_remap.vector_ctl & ++ PCI_MSIX_ENTRY_CTRL_MASKBIT, ++ msix_entry + PCI_MSIX_ENTRY_VECTOR_CTRL); ++ } ++ ++ return ret; ++} +diff --git a/include/linux/vhm/acrn_common.h b/include/linux/vhm/acrn_common.h +index 7aaf78327eae..71b8c2606f57 100644 +--- a/include/linux/vhm/acrn_common.h ++++ b/include/linux/vhm/acrn_common.h +@@ -105,6 +105,15 @@ enum request_direction { + DIRECTION_MAX, + } __attribute__((aligned(4))); + ++/* ++ * IRQ type for ptdev ++ */ ++enum irq_type { ++ IRQ_INTX, ++ IRQ_MSI, ++ IRQ_MSIX, ++} __attribute__((aligned(4))); ++ + struct msr_request { + enum request_direction direction; + long index; +@@ -222,4 +231,38 @@ struct vm_gpa2hpa { + unsigned long hpa; /* OUT: -1 means invalid gpa */ + } __attribute__((aligned(8))); + ++struct acrn_ptdev_irq { ++ enum irq_type type; ++ unsigned short virt_bdf; /* IN: Device virtual BDF# */ ++ unsigned short phys_bdf; /* IN: Device physical BDF# */ ++ union { ++ struct { ++ int virt_pin; /* IN: virtual IOAPIC pin */ ++ int phys_pin; /* IN: physical IOAPIC pin */ ++ bool pic_pin; /* IN: pin from PIC? */ ++ } intx; ++ struct { ++ int vector_cnt; /* IN: vector count of MSI/MSIX */ ++ ++ /* IN: physcial address of MSI-X table */ ++ unsigned long table_paddr; ++ ++ /* IN: size of MSI-X table (round up to 4K) */ ++ int table_size; ++ } msix; ++ }; ++} __attribute__((aligned(8))); ++ ++struct acrn_vm_pci_msix_remap { ++ unsigned short virt_bdf; /* IN: Device virtual BDF# */ ++ unsigned short phys_bdf; /* IN: Device physical BDF# */ ++ unsigned short msi_ctl; /* IN: PCI MSI/x cap control data */ ++ unsigned long msi_addr; /* IN/OUT: msi address to fix */ ++ unsigned int msi_data; /* IN/OUT: msi data to fix */ ++ int msix; /* IN: 0 - MSI, 1 - MSI-X */ ++ int msix_entry_index; /* IN: MSI-X the entry table index */ ++ /* IN: Vector Control for MSI-X Entry, field defined in MSIX spec */ ++ unsigned int vector_ctl; ++} __attribute__((aligned(8))); ++ + #endif /* ACRN_COMMON_H */ +diff --git a/include/linux/vhm/acrn_hv_defs.h b/include/linux/vhm/acrn_hv_defs.h +index d527a8fa8435..3e43da56813d 100644 +--- a/include/linux/vhm/acrn_hv_defs.h ++++ b/include/linux/vhm/acrn_hv_defs.h +@@ -92,6 +92,14 @@ + #define HC_VM_SET_MEMMAP _HC_ID(HC_ID, HC_ID_MEM_BASE + 0x00) + #define HC_VM_GPA2HPA _HC_ID(HC_ID, HC_ID_MEM_BASE + 0x01) + ++/* PCI assignment*/ ++#define HC_ID_PCI_BASE 0x400UL ++#define HC_ASSIGN_PTDEV _HC_ID(HC_ID, HC_ID_PCI_BASE + 0x00) ++#define HC_DEASSIGN_PTDEV _HC_ID(HC_ID, HC_ID_PCI_BASE + 0x01) ++#define HC_VM_PCI_MSIX_REMAP _HC_ID(HC_ID, HC_ID_PCI_BASE + 0x02) ++#define HC_SET_PTDEV_INTR_INFO _HC_ID(HC_ID, HC_ID_PCI_BASE + 0x03) ++#define HC_RESET_PTDEV_INTR_INFO _HC_ID(HC_ID, HC_ID_PCI_BASE + 0x04) ++ + #define ACRN_DOM0_VMID (0UL) + #define ACRN_INVALID_VMID (-1UL) + #define ACRN_INVALID_HPA (-1UL) +diff --git a/include/linux/vhm/vhm_hypercall.h b/include/linux/vhm/vhm_hypercall.h +index f1ed9a07e708..ce579e3734ff 100644 +--- a/include/linux/vhm/vhm_hypercall.h ++++ b/include/linux/vhm/vhm_hypercall.h +@@ -140,6 +140,7 @@ static inline long acrn_hypercall4(unsigned long hyp_id, unsigned long param1, + } + + inline long hcall_inject_msi(unsigned long vmid, unsigned long msi); ++inline long hcall_remap_pci_msix(unsigned long vmid, unsigned long msix); + inline long hcall_set_ioreq_buffer(unsigned long vmid, unsigned long buffer); + inline long hcall_notify_req_finish(unsigned long vmid, + unsigned long vcpu_mask); +@@ -153,5 +154,12 @@ inline long vhm_query_vm_state(struct vhm_vm *vm); + inline long vhm_assert_irqline(struct vhm_vm *vm, unsigned long ioctl_param); + inline long vhm_deassert_irqline(struct vhm_vm *vm, unsigned long ioctl_param); + inline long vhm_pulse_irqline(struct vhm_vm *vm, unsigned long ioctl_param); ++inline long vhm_assign_ptdev(struct vhm_vm *vm, unsigned long ioctl_param); ++inline long vhm_deassign_ptdev(struct vhm_vm *vm, unsigned long ioctl_param); ++inline long vhm_set_ptdev_intr_info(struct vhm_vm *vm, ++ unsigned long ioctl_param); ++inline long vhm_reset_ptdev_intr_info(struct vhm_vm *vm, ++ unsigned long ioctl_param); ++inline long vhm_remap_pci_msix(struct vhm_vm *vm, unsigned long ioctl_param); + + #endif /* VHM_HYPERCALL_H */ +diff --git a/include/linux/vhm/vhm_ioctl_defs.h b/include/linux/vhm/vhm_ioctl_defs.h +index 3be6aca40844..8d03d38b788d 100644 +--- a/include/linux/vhm/vhm_ioctl_defs.h ++++ b/include/linux/vhm/vhm_ioctl_defs.h +@@ -79,11 +79,20 @@ + #define IC_ATTACH_IOREQ_CLIENT _IC_ID(IC_ID, IC_ID_IOREQ_BASE + 0x03) + #define IC_DESTROY_IOREQ_CLIENT _IC_ID(IC_ID, IC_ID_IOREQ_BASE + 0x04) + ++ + /* Guest memory management */ + #define IC_ID_MEM_BASE 0x300UL + #define IC_ALLOC_MEMSEG _IC_ID(IC_ID, IC_ID_MEM_BASE + 0x00) + #define IC_SET_MEMSEG _IC_ID(IC_ID, IC_ID_MEM_BASE + 0x01) + ++/* PCI assignment*/ ++#define IC_ID_PCI_BASE 0x400UL ++#define IC_ASSIGN_PTDEV _IC_ID(IC_ID, IC_ID_PCI_BASE + 0x00) ++#define IC_DEASSIGN_PTDEV _IC_ID(IC_ID, IC_ID_PCI_BASE + 0x01) ++#define IC_VM_PCI_MSIX_REMAP _IC_ID(IC_ID, IC_ID_PCI_BASE + 0x02) ++#define IC_SET_PTDEV_INTR_INFO _IC_ID(IC_ID, IC_ID_PCI_BASE + 0x03) ++#define IC_RESET_PTDEV_INTR_INFO _IC_ID(IC_ID, IC_ID_PCI_BASE + 0x04) ++ + #define SPECNAMELEN 63 + + enum { +-- +2.17.1 + diff --git a/patches/0009-char-rpmb-provide-a-user-space-interface.security b/patches/0009-char-rpmb-provide-a-user-space-interface.security new file mode 100644 index 0000000000..f427417c04 --- /dev/null +++ b/patches/0009-char-rpmb-provide-a-user-space-interface.security @@ -0,0 +1,1156 @@ +From 2279f47abe2ac51b7b496fa1293a219c5885ee1e Mon Sep 17 00:00:00 2001 +From: Tomas Winkler +Date: Thu, 16 Jul 2015 12:29:50 +0300 +Subject: [PATCH 09/65] char: rpmb: provide a user space interface + +The user space API is achieved via two synchronous IOCTLs. +Simplified one, RPMB_IOC_REQ_CMD, were read result cycles is performed +by the framework on behalf the user and second, RPMB_IOC_SEQ_CMD where +the whole RPMB sequence including RESULT_READ is supplied by the caller. +The latter is intended for easier adjusting of the applications that +use MMC_IOC_MULTI_CMD ioctl. + +V2: use memdup_user +V3: commit message fix +V4: resend +V5: 1. Add RPMB_IOC_SEQ_CMD API. + 2. Export uapi rpmb.h header +V6: 1. Remove #include . + 2. Add ioctl documentation. +V7: 1. copy_from_user the value of the frame pointer. + 2. Fix possible macro side-effect due to macro argument reuse. +V8: 1. Fix kdoc errors + 2. Move IOCTL to a different range due to conflict + 3. Change license to dual BSD/GPL +V9: 1. Add version and capability ioctls and drop the request ioctl + 2. Use zero based frame count: 0 means only meted are in a frame. + 2. Add SPDX identifiers. + 3. Fix comment typo in uapi/linux/rpmb.h +V10: + 1. Rebase on 5.0 + 2. Rebase on 5.1 + 3. Drop rpmb_compat_ioctl + +Change-Id: I00f2b5d5c92982fa2a3814a8bc56a3fecd19456f +Signed-off-by: Tomas Winkler +Signed-off-by: Alexander Usyskin +Tested-by: Avri Altman +--- + Documentation/ioctl/ioctl-number.txt | 351 +++++++++++++++++++++++++++ + MAINTAINERS | 1 + + drivers/char/rpmb/Kconfig | 7 + + drivers/char/rpmb/Makefile | 1 + + drivers/char/rpmb/cdev.c | 285 ++++++++++++++++++++++ + drivers/char/rpmb/core.c | 9 +- + drivers/char/rpmb/rpmb-cdev.h | 17 ++ + include/linux/rpmb.h | 107 +------- + include/uapi/linux/rpmb.h | 192 +++++++++++++++ + 9 files changed, 871 insertions(+), 99 deletions(-) + create mode 100644 Documentation/ioctl/ioctl-number.txt + create mode 100644 drivers/char/rpmb/cdev.c + create mode 100644 drivers/char/rpmb/rpmb-cdev.h + create mode 100644 include/uapi/linux/rpmb.h + +diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt +new file mode 100644 +index 000000000000..7342b1174449 +--- /dev/null ++++ b/Documentation/ioctl/ioctl-number.txt +@@ -0,0 +1,351 @@ ++Ioctl Numbers ++19 October 1999 ++Michael Elizabeth Chastain ++ ++ ++If you are adding new ioctl's to the kernel, you should use the _IO ++macros defined in : ++ ++ _IO an ioctl with no parameters ++ _IOW an ioctl with write parameters (copy_from_user) ++ _IOR an ioctl with read parameters (copy_to_user) ++ _IOWR an ioctl with both write and read parameters. ++ ++'Write' and 'read' are from the user's point of view, just like the ++system calls 'write' and 'read'. For example, a SET_FOO ioctl would ++be _IOW, although the kernel would actually read data from user space; ++a GET_FOO ioctl would be _IOR, although the kernel would actually write ++data to user space. ++ ++The first argument to _IO, _IOW, _IOR, or _IOWR is an identifying letter ++or number from the table below. Because of the large number of drivers, ++many drivers share a partial letter with other drivers. ++ ++If you are writing a driver for a new device and need a letter, pick an ++unused block with enough room for expansion: 32 to 256 ioctl commands. ++You can register the block by patching this file and submitting the ++patch to Linus Torvalds. Or you can e-mail me at and ++I'll register one for you. ++ ++The second argument to _IO, _IOW, _IOR, or _IOWR is a sequence number ++to distinguish ioctls from each other. The third argument to _IOW, ++_IOR, or _IOWR is the type of the data going into the kernel or coming ++out of the kernel (e.g. 'int' or 'struct foo'). NOTE! Do NOT use ++sizeof(arg) as the third argument as this results in your ioctl thinking ++it passes an argument of type size_t. ++ ++Some devices use their major number as the identifier; this is OK, as ++long as it is unique. Some devices are irregular and don't follow any ++convention at all. ++ ++Following this convention is good because: ++ ++(1) Keeping the ioctl's globally unique helps error checking: ++ if a program calls an ioctl on the wrong device, it will get an ++ error rather than some unexpected behaviour. ++ ++(2) The 'strace' build procedure automatically finds ioctl numbers ++ defined with _IO, _IOW, _IOR, or _IOWR. ++ ++(3) 'strace' can decode numbers back into useful names when the ++ numbers are unique. ++ ++(4) People looking for ioctls can grep for them more easily when ++ this convention is used to define the ioctl numbers. ++ ++(5) When following the convention, the driver code can use generic ++ code to copy the parameters between user and kernel space. ++ ++This table lists ioctls visible from user land for Linux/x86. It contains ++most drivers up to 2.6.31, but I know I am missing some. There has been ++no attempt to list non-X86 architectures or ioctls from drivers/staging/. ++ ++Code Seq#(hex) Include File Comments ++======================================================== ++0x00 00-1F linux/fs.h conflict! ++0x00 00-1F scsi/scsi_ioctl.h conflict! ++0x00 00-1F linux/fb.h conflict! ++0x00 00-1F linux/wavefront.h conflict! ++0x02 all linux/fd.h ++0x03 all linux/hdreg.h ++0x04 D2-DC linux/umsdos_fs.h Dead since 2.6.11, but don't reuse these. ++0x06 all linux/lp.h ++0x09 all linux/raid/md_u.h ++0x10 00-0F drivers/char/s390/vmcp.h ++0x10 10-1F arch/s390/include/uapi/sclp_ctl.h ++0x10 20-2F arch/s390/include/uapi/asm/hypfs.h ++0x12 all linux/fs.h ++ linux/blkpg.h ++0x1b all InfiniBand Subsystem ++0x20 all drivers/cdrom/cm206.h ++0x22 all scsi/sg.h ++'!' 00-1F uapi/linux/seccomp.h ++'#' 00-3F IEEE 1394 Subsystem Block for the entire subsystem ++'$' 00-0F linux/perf_counter.h, linux/perf_event.h ++'%' 00-0F include/uapi/linux/stm.h ++ System Trace Module subsystem ++ ++'&' 00-07 drivers/firewire/nosy-user.h ++'1' 00-1F PPS kit from Ulrich Windl ++ ++'2' 01-04 linux/i2o.h ++'3' 00-0F drivers/s390/char/raw3270.h conflict! ++'3' 00-1F linux/suspend_ioctls.h conflict! ++ and kernel/power/user.c ++'8' all SNP8023 advanced NIC card ++ ++';' 64-7F linux/vfio.h ++'@' 00-0F linux/radeonfb.h conflict! ++'@' 00-0F drivers/video/aty/aty128fb.c conflict! ++'A' 00-1F linux/apm_bios.h conflict! ++'A' 00-0F linux/agpgart.h conflict! ++ and drivers/char/agp/compat_ioctl.h ++'A' 00-7F sound/asound.h conflict! ++'B' 00-1F linux/cciss_ioctl.h conflict! ++'B' 00-0F include/linux/pmu.h conflict! ++'B' C0-FF advanced bbus ++ ++'C' all linux/soundcard.h conflict! ++'C' 01-2F linux/capi.h conflict! ++'C' F0-FF drivers/net/wan/cosa.h conflict! ++'D' all arch/s390/include/asm/dasd.h ++'D' 40-5F drivers/scsi/dpt/dtpi_ioctl.h ++'D' 05 drivers/scsi/pmcraid.h ++'E' all linux/input.h conflict! ++'E' 00-0F xen/evtchn.h conflict! ++'F' all linux/fb.h conflict! ++'F' 01-02 drivers/scsi/pmcraid.h conflict! ++'F' 20 drivers/video/fsl-diu-fb.h conflict! ++'F' 20 drivers/video/intelfb/intelfb.h conflict! ++'F' 20 linux/ivtvfb.h conflict! ++'F' 20 linux/matroxfb.h conflict! ++'F' 20 drivers/video/aty/atyfb_base.c conflict! ++'F' 00-0F video/da8xx-fb.h conflict! ++'F' 80-8F linux/arcfb.h conflict! ++'F' DD video/sstfb.h conflict! ++'G' 00-3F drivers/misc/sgi-gru/grulib.h conflict! ++'G' 00-0F linux/gigaset_dev.h conflict! ++'H' 00-7F linux/hiddev.h conflict! ++'H' 00-0F linux/hidraw.h conflict! ++'H' 01 linux/mei.h conflict! ++'H' 02 linux/mei.h conflict! ++'H' 03 linux/mei.h conflict! ++'H' 00-0F sound/asound.h conflict! ++'H' 20-40 sound/asound_fm.h conflict! ++'H' 80-8F sound/sfnt_info.h conflict! ++'H' 10-8F sound/emu10k1.h conflict! ++'H' 10-1F sound/sb16_csp.h conflict! ++'H' 10-1F sound/hda_hwdep.h conflict! ++'H' 40-4F sound/hdspm.h conflict! ++'H' 40-4F sound/hdsp.h conflict! ++'H' 90 sound/usb/usx2y/usb_stream.h ++'H' A0 uapi/linux/usb/cdc-wdm.h ++'H' C0-F0 net/bluetooth/hci.h conflict! ++'H' C0-DF net/bluetooth/hidp/hidp.h conflict! ++'H' C0-DF net/bluetooth/cmtp/cmtp.h conflict! ++'H' C0-DF net/bluetooth/bnep/bnep.h conflict! ++'H' F1 linux/hid-roccat.h ++'H' F8-FA sound/firewire.h ++'I' all linux/isdn.h conflict! ++'I' 00-0F drivers/isdn/divert/isdn_divert.h conflict! ++'I' 40-4F linux/mISDNif.h conflict! ++'J' 00-1F drivers/scsi/gdth_ioctl.h ++'K' all linux/kd.h ++'L' 00-1F linux/loop.h conflict! ++'L' 10-1F drivers/scsi/mpt3sas/mpt3sas_ctl.h conflict! ++'L' 20-2F linux/lightnvm.h ++'L' E0-FF linux/ppdd.h encrypted disk device driver ++ ++'M' all linux/soundcard.h conflict! ++'M' 01-16 mtd/mtd-abi.h conflict! ++ and drivers/mtd/mtdchar.c ++'M' 01-03 drivers/scsi/megaraid/megaraid_sas.h ++'M' 00-0F drivers/video/fsl-diu-fb.h conflict! ++'N' 00-1F drivers/usb/scanner.h ++'N' 40-7F drivers/block/nvme.c ++'O' 00-06 mtd/ubi-user.h UBI ++'P' all linux/soundcard.h conflict! ++'P' 60-6F sound/sscape_ioctl.h conflict! ++'P' 00-0F drivers/usb/class/usblp.c conflict! ++'P' 01-09 drivers/misc/pci_endpoint_test.c conflict! ++'Q' all linux/soundcard.h ++'R' 00-1F linux/random.h conflict! ++'R' 01 linux/rfkill.h conflict! ++'R' C0-DF net/bluetooth/rfcomm.h ++'S' all linux/cdrom.h conflict! ++'S' 80-81 scsi/scsi_ioctl.h conflict! ++'S' 82-FF scsi/scsi.h conflict! ++'S' 00-7F sound/asequencer.h conflict! ++'T' all linux/soundcard.h conflict! ++'T' 00-AF sound/asound.h conflict! ++'T' all arch/x86/include/asm/ioctls.h conflict! ++'T' C0-DF linux/if_tun.h conflict! ++'U' all sound/asound.h conflict! ++'U' 00-CF linux/uinput.h conflict! ++'U' 00-EF linux/usbdevice_fs.h ++'U' C0-CF drivers/bluetooth/hci_uart.h ++'V' all linux/vt.h conflict! ++'V' all linux/videodev2.h conflict! ++'V' C0 linux/ivtvfb.h conflict! ++'V' C0 linux/ivtv.h conflict! ++'V' C0 media/davinci/vpfe_capture.h conflict! ++'V' C0 media/si4713.h conflict! ++'W' 00-1F linux/watchdog.h conflict! ++'W' 00-1F linux/wanrouter.h conflict! (pre 3.9) ++'W' 00-3F sound/asound.h conflict! ++'W' 40-5F drivers/pci/switch/switchtec.c ++'X' all fs/xfs/xfs_fs.h conflict! ++ and fs/xfs/linux-2.6/xfs_ioctl32.h ++ and include/linux/falloc.h ++ and linux/fs.h ++'X' all fs/ocfs2/ocfs_fs.h conflict! ++'X' 01 linux/pktcdvd.h conflict! ++'Y' all linux/cyclades.h ++'Z' 14-15 drivers/message/fusion/mptctl.h ++'[' 00-3F linux/usb/tmc.h USB Test and Measurement Devices ++ ++'a' all linux/atm*.h, linux/sonet.h ATM on linux ++ ++'a' 00-0F drivers/crypto/qat/qat_common/adf_cfg_common.h conflict! qat driver ++'b' 00-FF conflict! bit3 vme host bridge ++ ++'c' all linux/cm4000_cs.h conflict! ++'c' 00-7F linux/comstats.h conflict! ++'c' 00-7F linux/coda.h conflict! ++'c' 00-1F linux/chio.h conflict! ++'c' 80-9F arch/s390/include/asm/chsc.h conflict! ++'c' A0-AF arch/x86/include/asm/msr.h conflict! ++'d' 00-FF linux/char/drm/drm.h conflict! ++'d' 02-40 pcmcia/ds.h conflict! ++'d' F0-FF linux/digi1.h ++'e' all linux/digi1.h conflict! ++'f' 00-1F linux/ext2_fs.h conflict! ++'f' 00-1F linux/ext3_fs.h conflict! ++'f' 00-0F fs/jfs/jfs_dinode.h conflict! ++'f' 00-0F fs/ext4/ext4.h conflict! ++'f' 00-0F linux/fs.h conflict! ++'f' 00-0F fs/ocfs2/ocfs2_fs.h conflict! ++'g' 00-0F linux/usb/gadgetfs.h ++'g' 20-2F linux/usb/g_printer.h ++'h' 00-7F conflict! Charon filesystem ++ ++'h' 00-1F linux/hpet.h conflict! ++'h' 80-8F fs/hfsplus/ioctl.c ++'i' 00-3F linux/i2o-dev.h conflict! ++'i' 0B-1F linux/ipmi.h conflict! ++'i' 80-8F linux/i8k.h ++'j' 00-3F linux/joystick.h ++'k' 00-0F linux/spi/spidev.h conflict! ++'k' 00-05 video/kyro.h conflict! ++'k' 10-17 linux/hsi/hsi_char.h HSI character device ++'l' 00-3F linux/tcfs_fs.h transparent cryptographic file system ++ ++'l' 40-7F linux/udf_fs_i.h in development: ++ ++'m' 00-09 linux/mmtimer.h conflict! ++'m' all linux/mtio.h conflict! ++'m' all linux/soundcard.h conflict! ++'m' all linux/synclink.h conflict! ++'m' 00-19 drivers/message/fusion/mptctl.h conflict! ++'m' 00 drivers/scsi/megaraid/megaraid_ioctl.h conflict! ++'n' 00-7F linux/ncp_fs.h and fs/ncpfs/ioctl.c ++'n' 80-8F uapi/linux/nilfs2_api.h NILFS2 ++'n' E0-FF linux/matroxfb.h matroxfb ++'o' 00-1F fs/ocfs2/ocfs2_fs.h OCFS2 ++'o' 00-03 mtd/ubi-user.h conflict! (OCFS2 and UBI overlaps) ++'o' 40-41 mtd/ubi-user.h UBI ++'o' 01-A1 linux/dvb/*.h DVB ++'p' 00-0F linux/phantom.h conflict! (OpenHaptics needs this) ++'p' 00-1F linux/rtc.h conflict! ++'p' 00-3F linux/mc146818rtc.h conflict! ++'p' 40-7F linux/nvram.h ++'p' 80-9F linux/ppdev.h user-space parport ++ ++'p' A1-A5 linux/pps.h LinuxPPS ++ ++'q' 00-1F linux/serio.h ++'q' 80-FF linux/telephony.h Internet PhoneJACK, Internet LineJACK ++ linux/ixjuser.h ++'r' 00-1F linux/msdos_fs.h and fs/fat/dir.c ++'s' all linux/cdk.h ++'t' 00-7F linux/ppp-ioctl.h ++'t' 80-8F linux/isdn_ppp.h ++'t' 90-91 linux/toshiba.h toshiba and toshiba_acpi SMM ++'u' 00-1F linux/smb_fs.h gone ++'u' 20-3F linux/uvcvideo.h USB video class host driver ++'u' 40-4f linux/udmabuf.h userspace dma-buf misc device ++'v' 00-1F linux/ext2_fs.h conflict! ++'v' 00-1F linux/fs.h conflict! ++'v' 00-0F linux/sonypi.h conflict! ++'v' 00-0F media/v4l2-subdev.h conflict! ++'v' C0-FF linux/meye.h conflict! ++'w' all CERN SCI driver ++'y' 00-1F packet based user level communications ++ ++'z' 00-3F CAN bus card conflict! ++ ++'z' 40-7F CAN bus card conflict! ++ ++'z' 10-4F drivers/s390/crypto/zcrypt_api.h conflict! ++'|' 00-7F linux/media.h ++0x80 00-1F linux/fb.h ++0x89 00-06 arch/x86/include/asm/sockios.h ++0x89 0B-DF linux/sockios.h ++0x89 E0-EF linux/sockios.h SIOCPROTOPRIVATE range ++0x89 E0-EF linux/dn.h PROTOPRIVATE range ++0x89 F0-FF linux/sockios.h SIOCDEVPRIVATE range ++0x8B all linux/wireless.h ++0x8C 00-3F WiNRADiO driver ++ ++0x90 00 drivers/cdrom/sbpcd.h ++0x92 00-0F drivers/usb/mon/mon_bin.c ++0x93 60-7F linux/auto_fs.h ++0x94 all fs/btrfs/ioctl.h Btrfs filesystem ++ and linux/fs.h some lifted to vfs/generic ++0x97 00-7F fs/ceph/ioctl.h Ceph file system ++0x99 00-0F 537-Addinboard driver ++ ++0xA0 all linux/sdp/sdp.h Industrial Device Project ++ ++0xA1 0 linux/vtpm_proxy.h TPM Emulator Proxy Driver ++0xA3 80-8F Port ACL in development: ++ ++0xA3 90-9F linux/dtlk.h ++0xA4 00-1F uapi/linux/tee.h Generic TEE subsystem ++0xAA 00-3F linux/uapi/linux/userfaultfd.h ++0xAB 00-1F linux/nbd.h ++0xAC 00-1F linux/raw.h ++0xAD 00 Netfilter device in development: ++ ++0xAE all linux/kvm.h Kernel-based Virtual Machine ++ ++0xAF 00-1F linux/fsl_hypervisor.h Freescale hypervisor ++0xB0 all RATIO devices in development: ++ ++0xB1 00-1F PPPoX ++0xB3 00 linux/mmc/ioctl.h ++0xB4 00-0F linux/gpio.h ++0xB5 00-0F uapi/linux/rpmsg.h ++0xB5 80-8F linux/uapi/linux/rpmb.h ++0xB6 all linux/fpga-dfl.h ++0xC0 00-0F linux/usb/iowarrior.h ++0xCA 00-0F uapi/misc/cxl.h ++0xCA 10-2F uapi/misc/ocxl.h ++0xCA 80-BF uapi/scsi/cxlflash_ioctl.h ++0xCB 00-1F CBM serial IEC bus in development: ++ ++0xCC 00-0F drivers/misc/ibmvmc.h pseries VMC driver ++0xCD 01 linux/reiserfs_fs.h ++0xCF 02 fs/cifs/ioctl.c ++0xDB 00-0F drivers/char/mwave/mwavepub.h ++0xDD 00-3F ZFCP device driver see drivers/s390/scsi/ ++ ++0xE5 00-3F linux/fuse.h ++0xEC 00-01 drivers/platform/chrome/cros_ec_dev.h ChromeOS EC driver ++0xF3 00-3F drivers/usb/misc/sisusbvga/sisusb.h sisfb (in development) ++ ++0xF4 00-1F video/mbxfb.h mbxfb ++ ++0xF6 all LTTng Linux Trace Toolkit Next Generation ++ ++0xFD all linux/dm-ioctl.h +diff --git a/MAINTAINERS b/MAINTAINERS +index af01f3b1e00d..f9f579837aac 100644 +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -13981,6 +13981,7 @@ M: Tomas Winkler + L: linux-kernel@vger.kernel.org + S: Supported + F: drivers/char/rpmb/* ++F: include/uapi/linux/rpmb.h + F: include/linux/rpmb.h + F: Documentation/ABI/testing/sysfs-class-rpmb + +diff --git a/drivers/char/rpmb/Kconfig b/drivers/char/rpmb/Kconfig +index 431c2823cf70..69dbc1cfe89f 100644 +--- a/drivers/char/rpmb/Kconfig ++++ b/drivers/char/rpmb/Kconfig +@@ -9,3 +9,10 @@ config RPMB + access RPMB partition. + + If unsure, select N. ++ ++config RPMB_INTF_DEV ++ bool "RPMB character device interface /dev/rpmbN" ++ depends on RPMB ++ help ++ Say yes here if you want to access RPMB from user space ++ via character device interface /dev/rpmb%d +diff --git a/drivers/char/rpmb/Makefile b/drivers/char/rpmb/Makefile +index 24d4752a9a53..f54b3f30514b 100644 +--- a/drivers/char/rpmb/Makefile ++++ b/drivers/char/rpmb/Makefile +@@ -3,5 +3,6 @@ + + obj-$(CONFIG_RPMB) += rpmb.o + rpmb-objs += core.o ++rpmb-$(CONFIG_RPMB_INTF_DEV) += cdev.o + + ccflags-y += -D__CHECK_ENDIAN__ +diff --git a/drivers/char/rpmb/cdev.c b/drivers/char/rpmb/cdev.c +new file mode 100644 +index 000000000000..49a6a26a5297 +--- /dev/null ++++ b/drivers/char/rpmb/cdev.c +@@ -0,0 +1,285 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright(c) 2015 - 2019 Intel Corporation. ++ */ ++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include "rpmb-cdev.h" ++ ++static dev_t rpmb_devt; ++#define RPMB_MAX_DEVS MINORMASK ++ ++#define RPMB_DEV_OPEN 0 /** single open bit (position) */ ++/* from MMC_IOC_MAX_CMDS */ ++#define RPMB_MAX_FRAMES 255 ++ ++/** ++ * rpmb_open - the open function ++ * ++ * @inode: pointer to inode structure ++ * @fp: pointer to file structure ++ * ++ * Return: 0 on success, <0 on error ++ */ ++static int rpmb_open(struct inode *inode, struct file *fp) ++{ ++ struct rpmb_dev *rdev; ++ ++ rdev = container_of(inode->i_cdev, struct rpmb_dev, cdev); ++ if (!rdev) ++ return -ENODEV; ++ ++ /* the rpmb is single open! */ ++ if (test_and_set_bit(RPMB_DEV_OPEN, &rdev->status)) ++ return -EBUSY; ++ ++ mutex_lock(&rdev->lock); ++ ++ fp->private_data = rdev; ++ ++ mutex_unlock(&rdev->lock); ++ ++ return nonseekable_open(inode, fp); ++} ++ ++/** ++ * rpmb_release - the cdev release function ++ * ++ * @inode: pointer to inode structure ++ * @fp: pointer to file structure ++ * ++ * Return: 0 always. ++ */ ++static int rpmb_release(struct inode *inode, struct file *fp) ++{ ++ struct rpmb_dev *rdev = fp->private_data; ++ ++ clear_bit(RPMB_DEV_OPEN, &rdev->status); ++ ++ return 0; ++} ++ ++/** ++ * rpmb_cmd_copy_from_user - copy rpmb command from the user space ++ * ++ * @cmd: internal cmd structure ++ * @ucmd: user space cmd structure ++ * ++ * Return: 0 on success, <0 on error ++ */ ++static int rpmb_cmd_copy_from_user(struct rpmb_cmd *cmd, ++ struct rpmb_ioc_cmd __user *ucmd) ++{ ++ struct rpmb_frame *frames; ++ u64 frames_ptr; ++ ++ if (get_user(cmd->flags, &ucmd->flags)) ++ return -EFAULT; ++ ++ if (get_user(cmd->nframes, &ucmd->nframes)) ++ return -EFAULT; ++ ++ if (cmd->nframes > RPMB_MAX_FRAMES) ++ return -EOVERFLOW; ++ ++ /* some archs have issues with 64bit get_user */ ++ if (copy_from_user(&frames_ptr, &ucmd->frames_ptr, sizeof(frames_ptr))) ++ return -EFAULT; ++ ++ frames = memdup_user(u64_to_user_ptr(frames_ptr), ++ rpmb_ioc_frames_len_jdec(cmd->nframes)); ++ if (IS_ERR(frames)) ++ return PTR_ERR(frames); ++ ++ cmd->frames = frames; ++ return 0; ++} ++ ++/** ++ * rpmb_cmd_copy_to_user - copy rpmb command to the user space ++ * ++ * @ucmd: user space cmd structure ++ * @cmd: internal cmd structure ++ * ++ * Return: 0 on success, <0 on error ++ */ ++static int rpmb_cmd_copy_to_user(struct rpmb_ioc_cmd __user *ucmd, ++ struct rpmb_cmd *cmd) ++{ ++ u64 frames_ptr; ++ ++ if (copy_from_user(&frames_ptr, &ucmd->frames_ptr, sizeof(frames_ptr))) ++ return -EFAULT; ++ ++ /* some archs have issues with 64bit get_user */ ++ if (copy_to_user(u64_to_user_ptr(frames_ptr), cmd->frames, ++ rpmb_ioc_frames_len_jdec(cmd->nframes))) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++/** ++ * rpmb_ioctl_seq_cmd - issue an rpmb command sequence ++ * ++ * @rdev: rpmb device ++ * @ptr: rpmb cmd sequence ++ * ++ * RPMB_IOC_SEQ_CMD handler ++ * ++ * Return: 0 on success, <0 on error ++ */ ++static long rpmb_ioctl_seq_cmd(struct rpmb_dev *rdev, ++ struct rpmb_ioc_seq_cmd __user *ptr) ++{ ++ __u64 ncmds; ++ struct rpmb_cmd *cmds; ++ struct rpmb_ioc_cmd __user *ucmds; ++ ++ int i; ++ int ret; ++ ++ /* The caller must have CAP_SYS_RAWIO, like mmc ioctl */ ++ if (!capable(CAP_SYS_RAWIO)) ++ return -EPERM; ++ ++ /* some archs have issues with 64bit get_user */ ++ if (copy_from_user(&ncmds, &ptr->num_of_cmds, sizeof(ncmds))) ++ return -EFAULT; ++ ++ if (ncmds > 3) { ++ dev_err(&rdev->dev, "supporting up to 3 packets (%llu)\n", ++ ncmds); ++ return -EINVAL; ++ } ++ ++ cmds = kcalloc(ncmds, sizeof(*cmds), GFP_KERNEL); ++ if (!cmds) ++ return -ENOMEM; ++ ++ ucmds = (struct rpmb_ioc_cmd __user *)ptr->cmds; ++ for (i = 0; i < ncmds; i++) { ++ ret = rpmb_cmd_copy_from_user(&cmds[i], &ucmds[i]); ++ if (ret) ++ goto out; ++ } ++ ++ ret = rpmb_cmd_seq(rdev, cmds, ncmds); ++ if (ret) ++ goto out; ++ ++ for (i = 0; i < ncmds; i++) { ++ ret = rpmb_cmd_copy_to_user(&ucmds[i], &cmds[i]); ++ if (ret) ++ goto out; ++ } ++out: ++ for (i = 0; i < ncmds; i++) ++ kfree(cmds[i].frames); ++ kfree(cmds); ++ return ret; ++} ++ ++static long rpmb_ioctl_ver_cmd(struct rpmb_dev *rdev, ++ struct rpmb_ioc_ver_cmd __user *ptr) ++{ ++ struct rpmb_ioc_ver_cmd ver = { ++ .api_version = RPMB_API_VERSION, ++ }; ++ ++ return copy_to_user(ptr, &ver, sizeof(ver)) ? -EFAULT : 0; ++} ++ ++static long rpmb_ioctl_cap_cmd(struct rpmb_dev *rdev, ++ struct rpmb_ioc_cap_cmd __user *ptr) ++{ ++ struct rpmb_ioc_cap_cmd cap; ++ ++ cap.device_type = rdev->ops->type; ++ cap.target = rdev->target; ++ cap.block_size = rdev->ops->block_size; ++ cap.wr_cnt_max = rdev->ops->wr_cnt_max; ++ cap.rd_cnt_max = rdev->ops->rd_cnt_max; ++ cap.auth_method = rdev->ops->auth_method; ++ cap.capacity = rpmb_get_capacity(rdev); ++ cap.reserved = 0; ++ ++ return copy_to_user(ptr, &cap, sizeof(cap)) ? -EFAULT : 0; ++} ++ ++/** ++ * rpmb_ioctl - rpmb ioctl dispatcher ++ * ++ * @fp: a file pointer ++ * @cmd: ioctl command RPMB_IOC_SEQ_CMD RPMB_IOC_VER_CMD RPMB_IOC_CAP_CMD ++ * @arg: ioctl data: rpmb_ioc_ver_cmd rpmb_ioc_cap_cmd pmb_ioc_seq_cmd ++ * ++ * Return: 0 on success; < 0 on error ++ */ ++static long rpmb_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) ++{ ++ struct rpmb_dev *rdev = fp->private_data; ++ void __user *ptr = (void __user *)arg; ++ ++ switch (cmd) { ++ case RPMB_IOC_VER_CMD: ++ return rpmb_ioctl_ver_cmd(rdev, ptr); ++ case RPMB_IOC_CAP_CMD: ++ return rpmb_ioctl_cap_cmd(rdev, ptr); ++ case RPMB_IOC_SEQ_CMD: ++ return rpmb_ioctl_seq_cmd(rdev, ptr); ++ default: ++ dev_err(&rdev->dev, "unsupported ioctl 0x%x.\n", cmd); ++ return -ENOIOCTLCMD; ++ } ++} ++ ++static const struct file_operations rpmb_fops = { ++ .open = rpmb_open, ++ .release = rpmb_release, ++ .unlocked_ioctl = rpmb_ioctl, ++ .owner = THIS_MODULE, ++ .llseek = noop_llseek, ++}; ++ ++void rpmb_cdev_prepare(struct rpmb_dev *rdev) ++{ ++ rdev->dev.devt = MKDEV(MAJOR(rpmb_devt), rdev->id); ++ rdev->cdev.owner = THIS_MODULE; ++ cdev_init(&rdev->cdev, &rpmb_fops); ++} ++ ++void rpmb_cdev_add(struct rpmb_dev *rdev) ++{ ++ cdev_add(&rdev->cdev, rdev->dev.devt, 1); ++} ++ ++void rpmb_cdev_del(struct rpmb_dev *rdev) ++{ ++ if (rdev->dev.devt) ++ cdev_del(&rdev->cdev); ++} ++ ++int __init rpmb_cdev_init(void) ++{ ++ int ret; ++ ++ ret = alloc_chrdev_region(&rpmb_devt, 0, RPMB_MAX_DEVS, "rpmb"); ++ if (ret < 0) ++ pr_err("unable to allocate char dev region\n"); ++ ++ return ret; ++} ++ ++void __exit rpmb_cdev_exit(void) ++{ ++ unregister_chrdev_region(rpmb_devt, RPMB_MAX_DEVS); ++} +diff --git a/drivers/char/rpmb/core.c b/drivers/char/rpmb/core.c +index dd0afa0a3e24..785451fd1736 100644 +--- a/drivers/char/rpmb/core.c ++++ b/drivers/char/rpmb/core.c +@@ -11,6 +11,7 @@ + #include + + #include ++#include "rpmb-cdev.h" + + static DEFINE_IDA(rpmb_ida); + +@@ -313,6 +314,7 @@ int rpmb_dev_unregister(struct rpmb_dev *rdev) + return -EINVAL; + + mutex_lock(&rdev->lock); ++ rpmb_cdev_del(rdev); + device_del(&rdev->dev); + mutex_unlock(&rdev->lock); + +@@ -421,10 +423,14 @@ struct rpmb_dev *rpmb_dev_register(struct device *dev, u8 target, + rdev->dev.parent = dev; + rdev->dev.groups = rpmb_attr_groups; + ++ rpmb_cdev_prepare(rdev); ++ + ret = device_register(&rdev->dev); + if (ret) + goto exit; + ++ rpmb_cdev_add(rdev); ++ + dev_dbg(&rdev->dev, "registered device\n"); + + return rdev; +@@ -441,11 +447,12 @@ static int __init rpmb_init(void) + { + ida_init(&rpmb_ida); + class_register(&rpmb_class); +- return 0; ++ return rpmb_cdev_init(); + } + + static void __exit rpmb_exit(void) + { ++ rpmb_cdev_exit(); + class_unregister(&rpmb_class); + ida_destroy(&rpmb_ida); + } +diff --git a/drivers/char/rpmb/rpmb-cdev.h b/drivers/char/rpmb/rpmb-cdev.h +new file mode 100644 +index 000000000000..e59ff0c05e9d +--- /dev/null ++++ b/drivers/char/rpmb/rpmb-cdev.h +@@ -0,0 +1,17 @@ ++/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ ++/* ++ * Copyright (C) 2015-2018 Intel Corp. All rights reserved ++ */ ++#ifdef CONFIG_RPMB_INTF_DEV ++int __init rpmb_cdev_init(void); ++void __exit rpmb_cdev_exit(void); ++void rpmb_cdev_prepare(struct rpmb_dev *rdev); ++void rpmb_cdev_add(struct rpmb_dev *rdev); ++void rpmb_cdev_del(struct rpmb_dev *rdev); ++#else ++static inline int __init rpmb_cdev_init(void) { return 0; } ++static inline void __exit rpmb_cdev_exit(void) {} ++static inline void rpmb_cdev_prepare(struct rpmb_dev *rdev) {} ++static inline void rpmb_cdev_add(struct rpmb_dev *rdev) {} ++static inline void rpmb_cdev_del(struct rpmb_dev *rdev) {} ++#endif /* CONFIG_RPMB_INTF_DEV */ +diff --git a/include/linux/rpmb.h b/include/linux/rpmb.h +index ab8d95b39a69..2e19ce89eed5 100644 +--- a/include/linux/rpmb.h ++++ b/include/linux/rpmb.h +@@ -7,105 +7,14 @@ + + #include + #include ++#include ++#include + #include + +-/** +- * struct rpmb_frame_jdec - rpmb frame as defined by JDEC specs +- * +- * @stuff : stuff bytes +- * @key_mac : The authentication key or the message authentication +- * code (MAC) depending on the request/response type. +- * The MAC will be delivered in the last (or the only) +- * block of data. +- * @data : Data to be written or read by signed access. +- * @nonce : Random number generated by the host for the requests +- * and copied to the response by the RPMB engine. +- * @write_counter: Counter value for the total amount of the successful +- * authenticated data write requests made by the host. +- * @addr : Address of the data to be programmed to or read +- * from the RPMB. Address is the serial number of +- * the accessed block (half sector 256B). +- * @block_count : Number of blocks (half sectors, 256B) requested to be +- * read/programmed. +- * @result : Includes information about the status of the write counter +- * (valid, expired) and result of the access made to the RPMB. +- * @req_resp : Defines the type of request and response to/from the memory. +- */ +-struct rpmb_frame_jdec { +- u8 stuff[196]; +- u8 key_mac[32]; +- u8 data[256]; +- u8 nonce[16]; +- __be32 write_counter; +- __be16 addr; +- __be16 block_count; +- __be16 result; +- __be16 req_resp; +-} __packed; +- +-#define RPMB_PROGRAM_KEY 0x0001 /* Program RPMB Authentication Key */ +-#define RPMB_GET_WRITE_COUNTER 0x0002 /* Read RPMB write counter */ +-#define RPMB_WRITE_DATA 0x0003 /* Write data to RPMB partition */ +-#define RPMB_READ_DATA 0x0004 /* Read data from RPMB partition */ +-#define RPMB_RESULT_READ 0x0005 /* Read result request (Internal) */ +- +-#define RPMB_REQ2RESP(_OP) ((_OP) << 8) +-#define RPMB_RESP2REQ(_OP) ((_OP) >> 8) +- +-/** +- * enum rpmb_op_result - rpmb operation results +- * +- * @RPMB_ERR_OK : operation successful +- * @RPMB_ERR_GENERAL : general failure +- * @RPMB_ERR_AUTH : mac doesn't match or ac calculation failure +- * @RPMB_ERR_COUNTER : counter doesn't match or counter increment failure +- * @RPMB_ERR_ADDRESS : address out of range or wrong address alignment +- * @RPMB_ERR_WRITE : data, counter, or result write failure +- * @RPMB_ERR_READ : data, counter, or result read failure +- * @RPMB_ERR_NO_KEY : authentication key not yet programmed +- * +- * @RPMB_ERR_COUNTER_EXPIRED: counter expired +- */ +-enum rpmb_op_result { +- RPMB_ERR_OK = 0x0000, +- RPMB_ERR_GENERAL = 0x0001, +- RPMB_ERR_AUTH = 0x0002, +- RPMB_ERR_COUNTER = 0x0003, +- RPMB_ERR_ADDRESS = 0x0004, +- RPMB_ERR_WRITE = 0x0005, +- RPMB_ERR_READ = 0x0006, +- RPMB_ERR_NO_KEY = 0x0007, +- +- RPMB_ERR_COUNTER_EXPIRED = 0x0080 +-}; +- +-/** +- * enum rpmb_type - type of underlying storage technology +- * +- * @RPMB_TYPE_ANY : any type, used for search only +- * @RPMB_TYPE_EMMC : eMMC (JESD84-B50.1) +- * @RPMB_TYPE_UFS : UFS (JESD220) +- * @RPMB_TYPE_NVME : NVM Express Revision 1.3a +- * @RPMB_TYPE_SIM : Simulation device. +- * @RPMB_TYPE_MAX : upper sentinel +- */ +-enum rpmb_type { +- RPMB_TYPE_ANY = 0, +- RPMB_TYPE_EMMC, +- RPMB_TYPE_UFS, +- RPMB_TYPE_NVME, +- +- RPMB_TYPE_SIM = 0x0100, +- RPMB_TYPE_MAX = RPMB_TYPE_SIM | RPMB_TYPE_NVME, +-}; +- +-#define RPMB_TYPE_HW(_type) ((_type) & 0xFF) ++#define RPMB_API_VERSION 0x80000001 + + extern struct class rpmb_class; + +-#define RPMB_F_WRITE BIT(0) +-#define RPMB_F_REL_WRITE BIT(1) +- + /** + * struct rpmb_cmd: rpmb access command + * +@@ -122,10 +31,6 @@ struct rpmb_cmd { + void *frames; + }; + +-enum rpmb_auth_method { +- RPMB_HMAC_ALGO_SHA_256 = 0, +-}; +- + /** + * struct rpmb_ops - RPMB ops to be implemented by underlying block device + * +@@ -163,6 +68,8 @@ struct rpmb_ops { + * @dev : device + * @id : device id + * @target : RPMB target/region within the physical device ++ * @cdev : character dev ++ * @status : device status + * @ops : operation exported by block layer + */ + struct rpmb_dev { +@@ -170,6 +77,10 @@ struct rpmb_dev { + struct device dev; + int id; + u8 target; ++#ifdef CONFIG_RPMB_INTF_DEV ++ struct cdev cdev; ++ unsigned long status; ++#endif /* CONFIG_RPMB_INTF_DEV */ + const struct rpmb_ops *ops; + }; + +diff --git a/include/uapi/linux/rpmb.h b/include/uapi/linux/rpmb.h +new file mode 100644 +index 000000000000..2bcfb2715dfa +--- /dev/null ++++ b/include/uapi/linux/rpmb.h +@@ -0,0 +1,192 @@ ++/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ ++/* ++ * Copyright (C) 2015-2018 Intel Corp. All rights reserved ++ */ ++#ifndef _UAPI_LINUX_RPMB_H_ ++#define _UAPI_LINUX_RPMB_H_ ++ ++#include ++ ++/** ++ * enum rpmb_type - type of underlying storage technology ++ * ++ * @RPMB_TYPE_ANY : any type, used for search only ++ * @RPMB_TYPE_EMMC : eMMC (JESD84-B50.1) ++ * @RPMB_TYPE_UFS : UFS (JESD220) ++ * @RPMB_TYPE_NVME : NVM Express Revision 1.3a ++ * @RPMB_TYPE_SIM : Simulation device. ++ * @RPMB_TYPE_MAX : upper sentinel ++ */ ++enum rpmb_type { ++ RPMB_TYPE_ANY = 0, ++ RPMB_TYPE_EMMC, ++ RPMB_TYPE_UFS, ++ RPMB_TYPE_NVME, ++ ++ RPMB_TYPE_SIM = 0x0100, ++ RPMB_TYPE_MAX = RPMB_TYPE_SIM | RPMB_TYPE_NVME, ++}; ++ ++#define RPMB_TYPE_HW(_type) ((_type) & 0xFF) ++ ++/** ++ * struct rpmb_frame_jdec - rpmb frame as defined by JDEC specs ++ * ++ * @stuff : stuff bytes ++ * @key_mac : The authentication key or the message authentication ++ * code (MAC) depending on the request/response type. ++ * The MAC will be delivered in the last (or the only) ++ * block of data. ++ * @data : Data to be written or read by signed access. ++ * @nonce : Random number generated by the host for the requests ++ * and copied to the response by the RPMB engine. ++ * @write_counter: Counter value for the total amount of the successful ++ * authenticated data write requests made by the host. ++ * @addr : Address of the data to be programmed to or read ++ * from the RPMB. Address is the serial number of ++ * the accessed block (half sector 256B). ++ * @block_count : Number of blocks (half sectors, 256B) requested to be ++ * read/programmed. ++ * @result : Includes information about the status of the write counter ++ * (valid, expired) and result of the access made to the RPMB. ++ * @req_resp : Defines the type of request and response to/from the memory. ++ */ ++struct rpmb_frame_jdec { ++ __u8 stuff[196]; ++ __u8 key_mac[32]; ++ __u8 data[256]; ++ __u8 nonce[16]; ++ __be32 write_counter; ++ __be16 addr; ++ __be16 block_count; ++ __be16 result; ++ __be16 req_resp; ++} __attribute__((packed)); ++ ++/* length of the part of the frame used for HMAC computation */ ++#define rpmb_jdec_hmac_data_len \ ++ (sizeof(struct rpmb_frame_jdec) - \ ++ offsetof(struct rpmb_frame_jdec, data)) ++ ++#define RPMB_PROGRAM_KEY 0x0001 /* Program RPMB Authentication Key */ ++#define RPMB_GET_WRITE_COUNTER 0x0002 /* Read RPMB write counter */ ++#define RPMB_WRITE_DATA 0x0003 /* Write data to RPMB partition */ ++#define RPMB_READ_DATA 0x0004 /* Read data from RPMB partition */ ++#define RPMB_RESULT_READ 0x0005 /* Read result request (Internal) */ ++ ++#define RPMB_REQ2RESP(_OP) ((_OP) << 8) ++#define RPMB_RESP2REQ(_OP) ((_OP) >> 8) ++ ++/** ++ * enum rpmb_op_result - rpmb operation results ++ * ++ * @RPMB_ERR_OK: operation successful ++ * @RPMB_ERR_GENERAL: general failure ++ * @RPMB_ERR_AUTH: mac doesn't match or ac calculation failure ++ * @RPMB_ERR_COUNTER: counter doesn't match or counter increment failure ++ * @RPMB_ERR_ADDRESS: address out of range or wrong address alignment ++ * @RPMB_ERR_WRITE: data, counter, or result write failure ++ * @RPMB_ERR_READ: data, counter, or result read failure ++ * @RPMB_ERR_NO_KEY: authentication key not yet programmed ++ * ++ * @RPMB_ERR_COUNTER_EXPIRED: counter expired ++ */ ++enum rpmb_op_result { ++ RPMB_ERR_OK = 0x0000, ++ RPMB_ERR_GENERAL = 0x0001, ++ RPMB_ERR_AUTH = 0x0002, ++ RPMB_ERR_COUNTER = 0x0003, ++ RPMB_ERR_ADDRESS = 0x0004, ++ RPMB_ERR_WRITE = 0x0005, ++ RPMB_ERR_READ = 0x0006, ++ RPMB_ERR_NO_KEY = 0x0007, ++ ++ RPMB_ERR_COUNTER_EXPIRED = 0x0080 ++}; ++ ++#define RPMB_F_READ 0UL ++#define RPMB_F_WRITE (1UL << 0) ++#define RPMB_F_REL_WRITE (1UL << 1) ++ ++enum rpmb_auth_method { ++ RPMB_HMAC_ALGO_SHA_256 = 0, ++}; ++ ++/** ++ * struct rpmb_cmd - rpmb access command ++ * ++ * @flags: command flags ++ * 0 - read command ++ * 1 - write command RPMB_F_WRITE ++ * 2 - reliable write RPMB_F_REL_WRITE ++ * @nframes: number of rpmb data frames in the command. ++ * 0 means 1 frame with meta data only. ++ * @frames_ptr: a pointer to the list of rpmb frames ++ */ ++struct rpmb_ioc_cmd { ++ __u32 flags; ++ __u32 nframes; ++ __aligned_u64 frames_ptr; ++}; ++ ++#define rpmb_ioc_cmd_set_frames(_cmd, _ptr) \ ++ (_cmd).frames_ptr = (__aligned_u64)(intptr_t)(_ptr) ++ ++#define rpmb_ioc_cmd_set(_cmd, _flags, _ptr, _n) do { \ ++ struct rpmb_ioc_cmd *icmd = &(_cmd); \ ++ icmd->flags = (_flags); \ ++ icmd->nframes = (_n); \ ++ icmd->frames_ptr = (__aligned_u64)(intptr_t)(_ptr); \ ++} while (0) ++ ++#define rpmb_ioc_frames_len_jdec(_n) \ ++ (((_n) ?: 1) * sizeof(struct rpmb_frame_jdec)) ++ ++/** ++ * struct rpmb_ioc_seq_cmd - rpmb command sequence ++ * ++ * @num_of_cmds: number of commands ++ * @cmds: list of rpmb commands ++ */ ++struct rpmb_ioc_seq_cmd { ++ __u64 num_of_cmds; ++ struct rpmb_ioc_cmd cmds[0]; ++} __attribute__((packed)); ++ ++/** ++ * struct rpmb_ioc_ver_cmd - rpmb api version ++ * ++ * @api_version: rpmb API version. ++ */ ++struct rpmb_ioc_ver_cmd { ++ __u32 api_version; ++} __attribute__((packed)); ++ ++/** ++ * struct rpmb_ioc_cap_cmd - rpmb capabilities ++ * ++ * @device_type: underlying storage device type (defined in rpmb_type above) ++ * @target: rpmb target/region within RPMB partition. ++ * @capacity: storage capacity (in units of 128K) ++ * @block_size: storage data block size (in units of 256B) ++ * @wr_cnt_max: maximal number of block that can be written in a single request. ++ * @rd_cnt_max: maximal number of block that can be read in a single request. ++ * @auth_method: authentication method: currently always HMAC_SHA_256 ++ * @reserved: reserved to align to 4 bytes. ++ */ ++struct rpmb_ioc_cap_cmd { ++ __u16 device_type; ++ __u16 target; ++ __u16 capacity; ++ __u16 block_size; ++ __u16 wr_cnt_max; ++ __u16 rd_cnt_max; ++ __u16 auth_method; ++ __u16 reserved; ++} __attribute__((packed)); ++ ++#define RPMB_IOC_VER_CMD _IOR(0xB5, 80, struct rpmb_ioc_ver_cmd) ++#define RPMB_IOC_CAP_CMD _IOR(0xB5, 81, struct rpmb_ioc_cap_cmd) ++#define RPMB_IOC_SEQ_CMD _IOWR(0xB5, 82, struct rpmb_ioc_seq_cmd) ++ ++#endif /* _UAPI_LINUX_RPMB_H_ */ +-- +2.17.1 + diff --git a/patches/0009-mfd-intel-lpss-add-children-devices-asynchronously.lpss b/patches/0009-mfd-intel-lpss-add-children-devices-asynchronously.lpss new file mode 100644 index 0000000000..235d4875e5 --- /dev/null +++ b/patches/0009-mfd-intel-lpss-add-children-devices-asynchronously.lpss @@ -0,0 +1,88 @@ +From cabc64ffd3792ad980a507e080a97eeb9c4d28ce Mon Sep 17 00:00:00 2001 +From: Andy Shevchenko +Date: Thu, 21 Mar 2019 16:00:21 +0300 +Subject: [PATCH 09/40] mfd: intel-lpss: add children devices asynchronously + +TBD + +Signed-off-by: Felipe Balbi +Signed-off-by: Andy Shevchenko +--- + drivers/mfd/intel-lpss.c | 42 ++++++++++++++++++++++++---------------- + 1 file changed, 25 insertions(+), 17 deletions(-) + +diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c +index bfe4ff337581..e8eb3ce98d30 100644 +--- a/drivers/mfd/intel-lpss.c ++++ b/drivers/mfd/intel-lpss.c +@@ -10,6 +10,7 @@ + * Jarkko Nikula + */ + ++#include + #include + #include + #include +@@ -371,6 +372,29 @@ static void intel_lpss_unregister_clock(struct intel_lpss *lpss) + intel_lpss_unregister_clock_tree(lpss->clk); + } + ++static void intel_lpss_async_add_devices(void *_lpss, async_cookie_t cookie) ++{ ++ struct intel_lpss *lpss = _lpss; ++ int ret; ++ ++ if (intel_lpss_has_idma(lpss)) { ++ ret = mfd_add_devices(lpss->dev, lpss->devid, ++ &intel_lpss_idma64_cell, 1, lpss->info->mem, ++ lpss->info->irq, NULL); ++ if (ret) ++ dev_warn(lpss->dev, "Failed to add %s, fallback to PIO\n", ++ LPSS_IDMA64_DRIVER_NAME); ++ } ++ ++ ret = mfd_add_devices(lpss->dev, lpss->devid, lpss->cell, ++ 1, lpss->info->mem, lpss->info->irq, NULL); ++ if (ret) { ++ intel_lpss_debugfs_remove(lpss); ++ intel_lpss_ltr_hide(lpss); ++ intel_lpss_unregister_clock(lpss); ++ } ++} ++ + int intel_lpss_probe(struct device *dev, + const struct intel_lpss_platform_info *info) + { +@@ -417,28 +441,12 @@ int intel_lpss_probe(struct device *dev, + if (ret) + dev_warn(dev, "Failed to create debugfs entries\n"); + +- if (intel_lpss_has_idma(lpss)) { +- ret = mfd_add_devices(dev, lpss->devid, &intel_lpss_idma64_cell, +- 1, info->mem, info->irq, NULL); +- if (ret) +- dev_warn(dev, "Failed to add %s, fallback to PIO\n", +- LPSS_IDMA64_DRIVER_NAME); +- } +- +- ret = mfd_add_devices(dev, lpss->devid, lpss->cell, +- 1, info->mem, info->irq, NULL); +- if (ret) +- goto err_remove_ltr; ++ async_schedule(intel_lpss_async_add_devices, lpss); + + dev_pm_set_driver_flags(dev, DPM_FLAG_SMART_SUSPEND); + + return 0; + +-err_remove_ltr: +- intel_lpss_debugfs_remove(lpss); +- intel_lpss_ltr_hide(lpss); +- intel_lpss_unregister_clock(lpss); +- + err_clk_register: + ida_simple_remove(&intel_lpss_devid_ida, lpss->devid); + +-- +2.17.1 + diff --git a/patches/0009-net-stmmac-add-EHL-PSE0-PSE1-1Gbps-PCI-info-a.connectivity b/patches/0009-net-stmmac-add-EHL-PSE0-PSE1-1Gbps-PCI-info-a.connectivity new file mode 100644 index 0000000000..48e192ae32 --- /dev/null +++ b/patches/0009-net-stmmac-add-EHL-PSE0-PSE1-1Gbps-PCI-info-a.connectivity @@ -0,0 +1,114 @@ +From fdbd4da4d50dcbdf35a0fea52defba743d4b6ebf Mon Sep 17 00:00:00 2001 +From: Voon Weifeng +Date: Wed, 26 Jun 2019 07:31:47 +0800 +Subject: [PATCH 009/108] net: stmmac: add EHL PSE0 & PSE1 1Gbps PCI info and + PCI ID + +Add EHL PSE0/1 RGMII & SGMII 1Gbps PCI info and PCI ID + +Signed-off-by: Voon Weifeng +Signed-off-by: Ong Boon Leong +--- + .../net/ethernet/stmicro/stmmac/stmmac_pci.c | 72 +++++++++++++++++++ + 1 file changed, 72 insertions(+) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +index 03e999b77aab..1fcd75aba1be 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +@@ -243,6 +243,66 @@ static struct stmmac_pci_info ehl_rgmii1g_pci_info = { + .setup = ehl_rgmii_data, + }; + ++static int ehl_pse0_common_data(struct pci_dev *pdev, ++ struct plat_stmmacenet_data *plat) ++{ ++ plat->bus_id = 2; ++ plat->phy_addr = 1; ++ return ehl_common_data(pdev, plat); ++} ++ ++static int ehl_pse0_rgmii1g_data(struct pci_dev *pdev, ++ struct plat_stmmacenet_data *plat) ++{ ++ plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID; ++ return ehl_pse0_common_data(pdev, plat); ++} ++ ++static struct stmmac_pci_info ehl_pse0_rgmii1g_pci_info = { ++ .setup = ehl_pse0_rgmii1g_data, ++}; ++ ++static int ehl_pse0_sgmii1g_data(struct pci_dev *pdev, ++ struct plat_stmmacenet_data *plat) ++{ ++ plat->phy_interface = PHY_INTERFACE_MODE_SGMII; ++ return ehl_pse0_common_data(pdev, plat); ++} ++ ++static struct stmmac_pci_info ehl_pse0_sgmii1g_pci_info = { ++ .setup = ehl_pse0_sgmii1g_data, ++}; ++ ++static int ehl_pse1_common_data(struct pci_dev *pdev, ++ struct plat_stmmacenet_data *plat) ++{ ++ plat->bus_id = 3; ++ plat->phy_addr = 1; ++ return ehl_common_data(pdev, plat); ++} ++ ++static int ehl_pse1_rgmii1g_data(struct pci_dev *pdev, ++ struct plat_stmmacenet_data *plat) ++{ ++ plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID; ++ return ehl_pse1_common_data(pdev, plat); ++} ++ ++static struct stmmac_pci_info ehl_pse1_rgmii1g_pci_info = { ++ .setup = ehl_pse1_rgmii1g_data, ++}; ++ ++static int ehl_pse1_sgmii1g_data(struct pci_dev *pdev, ++ struct plat_stmmacenet_data *plat) ++{ ++ plat->phy_interface = PHY_INTERFACE_MODE_SGMII; ++ return ehl_pse1_common_data(pdev, plat); ++} ++ ++static struct stmmac_pci_info ehl_pse1_sgmii1g_pci_info = { ++ .setup = ehl_pse1_sgmii1g_data, ++}; ++ + static int tgl_common_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) + { +@@ -588,6 +648,10 @@ static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume); + #define STMMAC_DEVICE_ID 0x1108 + #define STMMAC_EHL_RGMII1G_ID 0x4b30 + #define STMMAC_EHL_SGMII1G_ID 0x4b31 ++#define STMMAC_EHL_PSE0_RGMII1G_ID 0x4ba0 ++#define STMMAC_EHL_PSE0_SGMII1G_ID 0x4ba1 ++#define STMMAC_EHL_PSE1_RGMII1G_ID 0x4bb0 ++#define STMMAC_EHL_PSE1_SGMII1G_ID 0x4bb1 + #define STMMAC_TGL_SGMII1G_ID 0xa0ac + #define STMMAC_GMAC5_ID 0x7102 + +@@ -602,6 +666,14 @@ static const struct pci_device_id stmmac_id_table[] = { + STMMAC_DEVICE(INTEL, STMMAC_QUARK_ID, quark_pci_info), + STMMAC_DEVICE(INTEL, STMMAC_EHL_RGMII1G_ID, ehl_rgmii1g_pci_info), + STMMAC_DEVICE(INTEL, STMMAC_EHL_SGMII1G_ID, ehl_sgmii1g_pci_info), ++ STMMAC_DEVICE(INTEL, STMMAC_EHL_PSE0_RGMII1G_ID, ++ ehl_pse0_rgmii1g_pci_info), ++ STMMAC_DEVICE(INTEL, STMMAC_EHL_PSE0_SGMII1G_ID, ++ ehl_pse0_sgmii1g_pci_info), ++ STMMAC_DEVICE(INTEL, STMMAC_EHL_PSE1_RGMII1G_ID, ++ ehl_pse1_rgmii1g_pci_info), ++ STMMAC_DEVICE(INTEL, STMMAC_EHL_PSE1_SGMII1G_ID, ++ ehl_pse1_sgmii1g_pci_info), + STMMAC_DEVICE(INTEL, STMMAC_TGL_SGMII1G_ID, tgl_sgmii1g_pci_info), + STMMAC_DEVICE(SYNOPSYS, STMMAC_GMAC5_ID, snps_gmac5_pci_info), + {} +-- +2.17.1 + diff --git a/patches/0009-platform-x86-Add-Tiger-Lake-TGL-platform-support-.core-ehl b/patches/0009-platform-x86-Add-Tiger-Lake-TGL-platform-support-.core-ehl new file mode 100644 index 0000000000..295372f7c9 --- /dev/null +++ b/patches/0009-platform-x86-Add-Tiger-Lake-TGL-platform-support-.core-ehl @@ -0,0 +1,108 @@ +From f73ee8066404dd5f3fb67bc00a0cdba9fa17de17 Mon Sep 17 00:00:00 2001 +From: Gayatri Kammela +Date: Tue, 3 Sep 2019 17:02:42 -0700 +Subject: [PATCH 09/12] platform/x86: Add Tiger Lake(TGL) platform support to + intel_pmc_core driver + +Add Tiger Lake to the list of the platforms that intel_pmc_core driver +supports for the pmc_core device. + +Just like ICL, TGL can also reuse all the CNL PCH IPs. Since TGL has +almost the same number of PCH IPs as ICL, reuse ICL's PPFEAR_NUM_ENTRIES +instead of defining a new macro. + +Cc: Peter Zijlstra +Cc: Srinivas Pandruvada +Cc: Andy Shevchenko +Cc: Kan Liang +Cc: David E. Box +Cc: Rajneesh Bhardwaj +Cc: Tony Luck +Reviewed-by: Tony Luck +Signed-off-by: Gayatri Kammela +--- + drivers/platform/x86/intel_pmc_core.c | 40 +++++++++++++++++++++++++-- + 1 file changed, 38 insertions(+), 2 deletions(-) + +diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c +index ea43a5989c96..aef8f6d8bddb 100644 +--- a/drivers/platform/x86/intel_pmc_core.c ++++ b/drivers/platform/x86/intel_pmc_core.c +@@ -190,7 +190,7 @@ static const struct pmc_bit_map cnp_pfear_map[] = { + {"SDX", BIT(4)}, + {"SPE", BIT(5)}, + {"Fuse", BIT(6)}, +- /* Reserved for Cannonlake but valid for Icelake */ ++ /* Reserved for Cannonlake but valid for Icelake and Tigerlake */ + {"SBR8", BIT(7)}, + + {"CSME_FSC", BIT(0)}, +@@ -234,7 +234,7 @@ static const struct pmc_bit_map cnp_pfear_map[] = { + {"HDA_PGD4", BIT(2)}, + {"HDA_PGD5", BIT(3)}, + {"HDA_PGD6", BIT(4)}, +- /* Reserved for Cannonlake but valid for Icelake */ ++ /* Reserved for Cannonlake but valid for Icelake and Tigerlake */ + {"PSF6", BIT(5)}, + {"PSF7", BIT(6)}, + {"PSF8", BIT(7)}, +@@ -265,6 +265,24 @@ static const struct pmc_bit_map *ext_icl_pfear_map[] = { + NULL + }; + ++static const struct pmc_bit_map tgl_pfear_map[] = { ++ /* Tigerlake generation onwards only */ ++ {"PSF9", BIT(0)}, ++ {"RES_66", BIT(1)}, ++ {"RES_67", BIT(2)}, ++ {"RES_68", BIT(3)}, ++ {"RES_69", BIT(4)}, ++ {"RES_70", BIT(5)}, ++ {"TBTLSX", BIT(6)}, ++ {} ++}; ++ ++static const struct pmc_bit_map *ext_tgl_pfear_map[] = { ++ cnp_pfear_map, ++ tgl_pfear_map, ++ NULL ++}; ++ + static const struct pmc_bit_map cnp_slps0_dbg0_map[] = { + {"AUDIO_D3", BIT(0)}, + {"OTG_D3", BIT(1)}, +@@ -383,6 +401,22 @@ static const struct pmc_reg_map icl_reg_map = { + .ltr_ignore_max = ICL_NUM_IP_IGN_ALLOWED, + }; + ++static const struct pmc_reg_map tgl_reg_map = { ++ .pfear_sts = ext_tgl_pfear_map, ++ .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET, ++ .slps0_dbg_maps = cnp_slps0_dbg_maps, ++ .ltr_show_sts = cnp_ltr_show_map, ++ .msr_sts = msr_map, ++ .slps0_dbg_offset = CNP_PMC_SLPS0_DBG_OFFSET, ++ .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET, ++ .regmap_length = CNP_PMC_MMIO_REG_LEN, ++ .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A, ++ .ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES, ++ .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET, ++ .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT, ++ .ltr_ignore_max = ICL_NUM_IP_IGN_ALLOWED, ++}; ++ + static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset) + { + return readb(pmcdev->regbase + offset); +@@ -836,6 +870,8 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = { + INTEL_CPU_FAM6(CANNONLAKE_L, cnp_reg_map), + INTEL_CPU_FAM6(ICELAKE_L, icl_reg_map), + INTEL_CPU_FAM6(ICELAKE_NNPI, icl_reg_map), ++ INTEL_CPU_FAM6(TIGERLAKE_L, tgl_reg_map), ++ INTEL_CPU_FAM6(TIGERLAKE, tgl_reg_map), + {} + }; + +-- +2.17.1 + diff --git a/patches/0009-serial-8250_lpss-Switch-over-to-MSI-interrupts.felipeb-5.4 b/patches/0009-serial-8250_lpss-Switch-over-to-MSI-interrupts.felipeb-5.4 new file mode 100644 index 0000000000..993569aaa8 --- /dev/null +++ b/patches/0009-serial-8250_lpss-Switch-over-to-MSI-interrupts.felipeb-5.4 @@ -0,0 +1,80 @@ +From 389cc5ad1aae992606eb8857c5199abf796d8046 Mon Sep 17 00:00:00 2001 +From: Felipe Balbi +Date: Thu, 2 May 2019 08:49:05 +0300 +Subject: [PATCH 09/14] serial: 8250_lpss: Switch over to MSI interrupts + +Some devices support MSI interrupts. Let's at least try to use them in +platforms that provide MSI capability. + +While at that, remove the now duplicated code from qrp_serial_setup(). + +Signed-off-by: Felipe Balbi +--- + drivers/tty/serial/8250/8250_lpss.c | 21 +++++++++------------ + 1 file changed, 9 insertions(+), 12 deletions(-) + +diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c +index 5f72ef3ea574..60eff3240c8a 100644 +--- a/drivers/tty/serial/8250/8250_lpss.c ++++ b/drivers/tty/serial/8250/8250_lpss.c +@@ -221,17 +221,6 @@ static void qrk_serial_exit_dma(struct lpss8250 *lpss) {} + + static int qrk_serial_setup(struct lpss8250 *lpss, struct uart_port *port) + { +- struct pci_dev *pdev = to_pci_dev(port->dev); +- int ret; +- +- pci_set_master(pdev); +- +- ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); +- if (ret < 0) +- return ret; +- +- port->irq = pci_irq_vector(pdev, 0); +- + qrk_serial_setup_dma(lpss, port); + return 0; + } +@@ -293,16 +282,22 @@ static int lpss8250_probe(struct pci_dev *pdev, const struct pci_device_id *id) + if (ret) + return ret; + ++ pci_set_master(pdev); ++ + lpss = devm_kzalloc(&pdev->dev, sizeof(*lpss), GFP_KERNEL); + if (!lpss) + return -ENOMEM; + ++ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); ++ if (ret < 0) ++ return ret; ++ + lpss->board = (struct lpss8250_board *)id->driver_data; + + memset(&uart, 0, sizeof(struct uart_8250_port)); + + uart.port.dev = &pdev->dev; +- uart.port.irq = pdev->irq; ++ uart.port.irq = pci_irq_vector(pdev, 0); + uart.port.private_data = &lpss->data; + uart.port.type = PORT_16550A; + uart.port.iotype = UPIO_MEM; +@@ -337,6 +332,7 @@ static int lpss8250_probe(struct pci_dev *pdev, const struct pci_device_id *id) + err_exit: + if (lpss->board->exit) + lpss->board->exit(lpss); ++ pci_free_irq_vectors(pdev); + return ret; + } + +@@ -348,6 +344,7 @@ static void lpss8250_remove(struct pci_dev *pdev) + + if (lpss->board->exit) + lpss->board->exit(lpss); ++ pci_free_irq_vectors(pdev); + } + + static const struct lpss8250_board byt_board = { +-- +2.17.1 + diff --git a/patches/0009-trusty-add-couple-non-secure-memory-related-helper-.trusty b/patches/0009-trusty-add-couple-non-secure-memory-related-helper-.trusty new file mode 100644 index 0000000000..c160dd56bd --- /dev/null +++ b/patches/0009-trusty-add-couple-non-secure-memory-related-helper-.trusty @@ -0,0 +1,208 @@ +From 8c7c98945a8bc8e59fc01ba85c91139bc5a3c7e7 Mon Sep 17 00:00:00 2001 +From: Michael Ryleev +Date: Thu, 26 Mar 2015 19:31:25 -0700 +Subject: [PATCH 09/63] trusty: add couple non-secure memory related helper + routines + +trusty_encode_page_info - encodes page physical address, memory +type and other attributes into architecture specific structure +that can be parsed by secure side. + +trusty_call32_mem_buf - can be used by drivers to make +specified smc call with physicaly contigues memory buffer as +an argument. Memory buffer info in retrieved by trusty_encode_page_info +and along with buffer size is encoded into series of 32-bit +smc call parameters. + +Change-Id: I79aadca85e2329bb89469b4c8f183cf0752f7641 +Signed-off-by: Michael Ryleev +--- + drivers/trusty/Makefile | 1 + + drivers/trusty/trusty-mem.c | 134 ++++++++++++++++++++++++++++++++++ + include/linux/trusty/trusty.h | 15 ++++ + 3 files changed, 150 insertions(+) + create mode 100644 drivers/trusty/trusty-mem.c + +diff --git a/drivers/trusty/Makefile b/drivers/trusty/Makefile +index 641ee2a6e830..e527a237cb5d 100644 +--- a/drivers/trusty/Makefile ++++ b/drivers/trusty/Makefile +@@ -8,3 +8,4 @@ obj-$(CONFIG_TRUSTY_FIQ) += trusty-fiq.o + obj-$(CONFIG_TRUSTY_FIQ_ARM) += trusty-fiq-arm.o + obj-$(CONFIG_TRUSTY_FIQ_ARM64) += trusty-fiq-arm64.o trusty-fiq-arm64-glue.o + obj-$(CONFIG_TRUSTY_LOG) += trusty-log.o ++obj-$(CONFIG_TRUSTY) += trusty-mem.o +diff --git a/drivers/trusty/trusty-mem.c b/drivers/trusty/trusty-mem.c +new file mode 100644 +index 000000000000..c55ace25beed +--- /dev/null ++++ b/drivers/trusty/trusty-mem.c +@@ -0,0 +1,134 @@ ++/* ++ * Copyright (C) 2015 Google, Inc. ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++ ++static int get_mem_attr(struct page *page, pgprot_t pgprot) ++{ ++#if defined(CONFIG_ARM64) ++ uint64_t mair; ++ uint attr_index = (pgprot_val(pgprot) & PTE_ATTRINDX_MASK) >> 2; ++ ++ asm ("mrs %0, mair_el1\n" : "=&r" (mair)); ++ return (mair >> (attr_index * 8)) & 0xff; ++ ++#elif defined(CONFIG_ARM_LPAE) ++ uint32_t mair; ++ uint attr_index = ((pgprot_val(pgprot) & L_PTE_MT_MASK) >> 2); ++ ++ if (attr_index >= 4) { ++ attr_index -= 4; ++ asm volatile("mrc p15, 0, %0, c10, c2, 1\n" : "=&r" (mair)); ++ } else { ++ asm volatile("mrc p15, 0, %0, c10, c2, 0\n" : "=&r" (mair)); ++ } ++ return (mair >> (attr_index * 8)) & 0xff; ++ ++#elif defined(CONFIG_ARM) ++ /* check memory type */ ++ switch (pgprot_val(pgprot) & L_PTE_MT_MASK) { ++ case L_PTE_MT_WRITEALLOC: ++ /* Normal: write back write allocate */ ++ return 0xFF; ++ ++ case L_PTE_MT_BUFFERABLE: ++ /* Normal: non-cacheble */ ++ return 0x44; ++ ++ case L_PTE_MT_WRITEBACK: ++ /* Normal: writeback, read allocate */ ++ return 0xEE; ++ ++ case L_PTE_MT_WRITETHROUGH: ++ /* Normal: write through */ ++ return 0xAA; ++ ++ case L_PTE_MT_UNCACHED: ++ /* strongly ordered */ ++ return 0x00; ++ ++ case L_PTE_MT_DEV_SHARED: ++ case L_PTE_MT_DEV_NONSHARED: ++ /* device */ ++ return 0x04; ++ ++ default: ++ return -EINVAL; ++ } ++#else ++ return 0; ++#endif ++} ++ ++int trusty_encode_page_info(struct ns_mem_page_info *inf, ++ struct page *page, pgprot_t pgprot) ++{ ++ int mem_attr; ++ uint64_t pte; ++ ++ if (!inf || !page) ++ return -EINVAL; ++ ++ /* get physical address */ ++ pte = (uint64_t) page_to_phys(page); ++ ++ /* get memory attributes */ ++ mem_attr = get_mem_attr(page, pgprot); ++ if (mem_attr < 0) ++ return mem_attr; ++ ++ /* add other attributes */ ++#if defined(CONFIG_ARM64) || defined(CONFIG_ARM_LPAE) ++ pte |= pgprot_val(pgprot); ++#elif defined(CONFIG_ARM) ++ if (pgprot_val(pgprot) & L_PTE_USER) ++ pte |= (1 << 6); ++ if (pgprot_val(pgprot) & L_PTE_RDONLY) ++ pte |= (1 << 7); ++ if (pgprot_val(pgprot) & L_PTE_SHARED) ++ pte |= (3 << 8); /* inner sharable */ ++#endif ++ ++ inf->attr = (pte & 0x0000FFFFFFFFFFFFull) | ((uint64_t)mem_attr << 48); ++ return 0; ++} ++ ++int trusty_call32_mem_buf(struct device *dev, u32 smcnr, ++ struct page *page, u32 size, ++ pgprot_t pgprot) ++{ ++ int ret; ++ struct ns_mem_page_info pg_inf; ++ ++ if (!dev || !page) ++ return -EINVAL; ++ ++ ret = trusty_encode_page_info(&pg_inf, page, pgprot); ++ if (ret) ++ return ret; ++ ++ if (SMC_IS_FASTCALL(smcnr)) { ++ return trusty_fast_call32(dev, smcnr, ++ (u32)pg_inf.attr, ++ (u32)(pg_inf.attr >> 32), size); ++ } else { ++ return trusty_std_call32(dev, smcnr, ++ (u32)pg_inf.attr, ++ (u32)(pg_inf.attr >> 32), size); ++ } ++} ++ +diff --git a/include/linux/trusty/trusty.h b/include/linux/trusty/trusty.h +index abb77f1db74d..d084d9d68a7b 100644 +--- a/include/linux/trusty/trusty.h ++++ b/include/linux/trusty/trusty.h +@@ -16,6 +16,9 @@ + + #include + #include ++#include ++#include ++ + + #ifdef CONFIG_TRUSTY + s32 trusty_std_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2); +@@ -53,4 +56,16 @@ int trusty_call_notifier_register(struct device *dev, + int trusty_call_notifier_unregister(struct device *dev, + struct notifier_block *n); + const char *trusty_version_str_get(struct device *dev); ++ ++struct ns_mem_page_info { ++ uint64_t attr; ++}; ++ ++int trusty_encode_page_info(struct ns_mem_page_info *inf, ++ struct page *page, pgprot_t pgprot); ++ ++int trusty_call32_mem_buf(struct device *dev, u32 smcnr, ++ struct page *page, u32 size, ++ pgprot_t pgprot); ++ + #endif +-- +2.17.1 + diff --git a/patches/0009-usb-typec-tps6598x-Start-using-struct-typec_oper.usb-typec b/patches/0009-usb-typec-tps6598x-Start-using-struct-typec_oper.usb-typec new file mode 100644 index 0000000000..04b07d1603 --- /dev/null +++ b/patches/0009-usb-typec-tps6598x-Start-using-struct-typec_oper.usb-typec @@ -0,0 +1,137 @@ +From 68e716da3e8c30983f950e315eef4e9760a2576d Mon Sep 17 00:00:00 2001 +From: Heikki Krogerus +Date: Tue, 1 Oct 2019 12:21:39 +0300 +Subject: [PATCH 09/18] usb: typec: tps6598x: Start using struct + typec_operations + +Supplying the operation callbacks as part of a struct +typec_operations instead of as part of struct +typec_capability during port registration. After this there +is not need to keep the capabilities stored anywhere in the +driver. + +Signed-off-by: Heikki Krogerus +--- + drivers/usb/typec/tps6598x.c | 49 +++++++++++++++++++----------------- + 1 file changed, 26 insertions(+), 23 deletions(-) + +diff --git a/drivers/usb/typec/tps6598x.c b/drivers/usb/typec/tps6598x.c +index a38d1409f15b..0698addd1185 100644 +--- a/drivers/usb/typec/tps6598x.c ++++ b/drivers/usb/typec/tps6598x.c +@@ -94,7 +94,6 @@ struct tps6598x { + struct typec_port *port; + struct typec_partner *partner; + struct usb_pd_identity partner_identity; +- struct typec_capability typec_cap; + }; + + /* +@@ -307,11 +306,10 @@ static int tps6598x_exec_cmd(struct tps6598x *tps, const char *cmd, + return 0; + } + +-static int +-tps6598x_dr_set(const struct typec_capability *cap, enum typec_data_role role) ++static int tps6598x_dr_set(struct typec_port *port, enum typec_data_role role) + { +- struct tps6598x *tps = container_of(cap, struct tps6598x, typec_cap); + const char *cmd = (role == TYPEC_DEVICE) ? "SWUF" : "SWDF"; ++ struct tps6598x *tps = typec_get_drvdata(port); + u32 status; + int ret; + +@@ -338,11 +336,10 @@ tps6598x_dr_set(const struct typec_capability *cap, enum typec_data_role role) + return ret; + } + +-static int +-tps6598x_pr_set(const struct typec_capability *cap, enum typec_role role) ++static int tps6598x_pr_set(struct typec_port *port, enum typec_role role) + { +- struct tps6598x *tps = container_of(cap, struct tps6598x, typec_cap); + const char *cmd = (role == TYPEC_SINK) ? "SWSk" : "SWSr"; ++ struct tps6598x *tps = typec_get_drvdata(port); + u32 status; + int ret; + +@@ -369,6 +366,11 @@ tps6598x_pr_set(const struct typec_capability *cap, enum typec_role role) + return ret; + } + ++static const struct typec_operations tps6598x_ops = { ++ .dr_set = tps6598x_dr_set, ++ .pr_set = tps6598x_pr_set, ++}; ++ + static irqreturn_t tps6598x_interrupt(int irq, void *data) + { + struct tps6598x *tps = data; +@@ -448,6 +450,7 @@ static const struct regmap_config tps6598x_regmap_config = { + + static int tps6598x_probe(struct i2c_client *client) + { ++ struct typec_capability typec_cap = { }; + struct tps6598x *tps; + u32 status; + u32 conf; +@@ -492,40 +495,40 @@ static int tps6598x_probe(struct i2c_client *client) + if (ret < 0) + return ret; + +- tps->typec_cap.revision = USB_TYPEC_REV_1_2; +- tps->typec_cap.pd_revision = 0x200; +- tps->typec_cap.prefer_role = TYPEC_NO_PREFERRED_ROLE; +- tps->typec_cap.pr_set = tps6598x_pr_set; +- tps->typec_cap.dr_set = tps6598x_dr_set; ++ typec_cap.revision = USB_TYPEC_REV_1_2; ++ typec_cap.pd_revision = 0x200; ++ typec_cap.prefer_role = TYPEC_NO_PREFERRED_ROLE; ++ typec_cap.driver_data = tps; ++ typec_cap.ops = &tps6598x_ops; + + switch (TPS_SYSCONF_PORTINFO(conf)) { + case TPS_PORTINFO_SINK_ACCESSORY: + case TPS_PORTINFO_SINK: +- tps->typec_cap.type = TYPEC_PORT_SNK; +- tps->typec_cap.data = TYPEC_PORT_UFP; ++ typec_cap.type = TYPEC_PORT_SNK; ++ typec_cap.data = TYPEC_PORT_UFP; + break; + case TPS_PORTINFO_DRP_UFP_DRD: + case TPS_PORTINFO_DRP_DFP_DRD: +- tps->typec_cap.type = TYPEC_PORT_DRP; +- tps->typec_cap.data = TYPEC_PORT_DRD; ++ typec_cap.type = TYPEC_PORT_DRP; ++ typec_cap.data = TYPEC_PORT_DRD; + break; + case TPS_PORTINFO_DRP_UFP: +- tps->typec_cap.type = TYPEC_PORT_DRP; +- tps->typec_cap.data = TYPEC_PORT_UFP; ++ typec_cap.type = TYPEC_PORT_DRP; ++ typec_cap.data = TYPEC_PORT_UFP; + break; + case TPS_PORTINFO_DRP_DFP: +- tps->typec_cap.type = TYPEC_PORT_DRP; +- tps->typec_cap.data = TYPEC_PORT_DFP; ++ typec_cap.type = TYPEC_PORT_DRP; ++ typec_cap.data = TYPEC_PORT_DFP; + break; + case TPS_PORTINFO_SOURCE: +- tps->typec_cap.type = TYPEC_PORT_SRC; +- tps->typec_cap.data = TYPEC_PORT_DFP; ++ typec_cap.type = TYPEC_PORT_SRC; ++ typec_cap.data = TYPEC_PORT_DFP; + break; + default: + return -ENODEV; + } + +- tps->port = typec_register_port(&client->dev, &tps->typec_cap); ++ tps->port = typec_register_port(&client->dev, &typec_cap); + if (IS_ERR(tps->port)) + return PTR_ERR(tps->port); + +-- +2.17.1 + diff --git a/patches/0010-ASoC-Intel-Skylake-Reload-libraries-on-D0-entry-for-.audio b/patches/0010-ASoC-Intel-Skylake-Reload-libraries-on-D0-entry-for-.audio new file mode 100644 index 0000000000..bcdb7b6ded --- /dev/null +++ b/patches/0010-ASoC-Intel-Skylake-Reload-libraries-on-D0-entry-for-.audio @@ -0,0 +1,83 @@ +From 8369327bb3c47195278bdf4ae362fdbbc8a171de Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Mon, 8 Oct 2018 12:33:18 +0200 +Subject: [PATCH 010/193] ASoC: Intel: Skylake: Reload libraries on D0 entry + for CNL + +As libraries are lost in D3, they need to be reloaded on D0 entry. +Cannonlake's library load process is equivalent to Broxton's, thus reuse +the existing one. + +Signed-off-by: Cezary Rojewski +--- + sound/soc/intel/skylake/bxt-sst.c | 3 ++- + sound/soc/intel/skylake/cnl-sst.c | 11 +++++++++++ + sound/soc/intel/skylake/skl-sst-dsp.h | 2 ++ + 3 files changed, 15 insertions(+), 1 deletion(-) + +diff --git a/sound/soc/intel/skylake/bxt-sst.c b/sound/soc/intel/skylake/bxt-sst.c +index 666a11a2e40b..c0e9a05b53cb 100644 +--- a/sound/soc/intel/skylake/bxt-sst.c ++++ b/sound/soc/intel/skylake/bxt-sst.c +@@ -45,7 +45,7 @@ static unsigned int bxt_get_errorcode(struct sst_dsp *ctx) + return sst_dsp_shim_read(ctx, BXT_ADSP_ERROR_CODE); + } + +-static int ++int + bxt_load_library(struct sst_dsp *ctx, struct skl_lib_info *linfo, int lib_count) + { + struct snd_dma_buffer dmab; +@@ -88,6 +88,7 @@ bxt_load_library(struct sst_dsp *ctx, struct skl_lib_info *linfo, int lib_count) + skl_release_library(linfo, lib_count); + return ret; + } ++EXPORT_SYMBOL_GPL(bxt_load_library); + + /* + * First boot sequence has some extra steps. Core 0 waits for power +diff --git a/sound/soc/intel/skylake/cnl-sst.c b/sound/soc/intel/skylake/cnl-sst.c +index abb0d0f8ae8d..35dae6ed6668 100644 +--- a/sound/soc/intel/skylake/cnl-sst.c ++++ b/sound/soc/intel/skylake/cnl-sst.c +@@ -178,6 +178,16 @@ static int cnl_set_dsp_D0(struct sst_dsp *ctx, unsigned int core_id) + return ret; + } + ++ if (cnl->lib_count > 1) { ++ ret = ctx->fw_ops.load_library(ctx, cnl->lib_info, ++ cnl->lib_count); ++ if (ret < 0) { ++ dev_err(ctx->dev, ++ "reload libs failed: %d\n", ret); ++ return ret; ++ } ++ } ++ + cnl->cores.state[core_id] = SKL_DSP_RUNNING; + return ret; + } +@@ -272,6 +282,7 @@ static const struct skl_dsp_fw_ops cnl_fw_ops = { + .set_state_D3 = cnl_set_dsp_D3, + .load_fw = cnl_load_base_firmware, + .get_fw_errcode = cnl_get_errno, ++ .load_library = bxt_load_library, + }; + + static struct sst_ops cnl_ops = { +diff --git a/sound/soc/intel/skylake/skl-sst-dsp.h b/sound/soc/intel/skylake/skl-sst-dsp.h +index f42358f6977f..f181e7e3e003 100644 +--- a/sound/soc/intel/skylake/skl-sst-dsp.h ++++ b/sound/soc/intel/skylake/skl-sst-dsp.h +@@ -241,6 +241,8 @@ int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, + struct skl_dev **dsp); + void skl_sst_dsp_cleanup(struct device *dev, struct skl_dev *skl); + void bxt_sst_dsp_cleanup(struct device *dev, struct skl_dev *skl); ++int bxt_load_library(struct sst_dsp *ctx, struct skl_lib_info *linfo, ++ int lib_count); + + int snd_skl_parse_manifest(struct sst_dsp *ctx, const struct firmware *fw, + unsigned int offset, int index); +-- +2.17.1 + diff --git a/patches/0010-FIXUP-mfd-intel-lpss-Probe-UART-devices-synchronously.lpss b/patches/0010-FIXUP-mfd-intel-lpss-Probe-UART-devices-synchronously.lpss new file mode 100644 index 0000000000..55e9abd4dd --- /dev/null +++ b/patches/0010-FIXUP-mfd-intel-lpss-Probe-UART-devices-synchronously.lpss @@ -0,0 +1,65 @@ +From 73082baf22046ca05b8652912c7eacb8351790f0 Mon Sep 17 00:00:00 2001 +From: Andy Shevchenko +Date: Fri, 15 Jun 2018 14:06:00 +0300 +Subject: [PATCH 10/40] FIXUP: mfd: intel-lpss: Probe UART devices + synchronously + +From Sathyanarayanan Kuppuswamy: + +Issue: With this patch merged, I am not able to get my console working +sometimes ( ~70% of the time). Without this patch console has no issues. + +Signed-off-by: Andy Shevchenko +--- + drivers/mfd/intel-lpss.c | 20 +++++++++++++++++--- + 1 file changed, 17 insertions(+), 3 deletions(-) + +diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c +index e8eb3ce98d30..557ee9d96579 100644 +--- a/drivers/mfd/intel-lpss.c ++++ b/drivers/mfd/intel-lpss.c +@@ -372,9 +372,8 @@ static void intel_lpss_unregister_clock(struct intel_lpss *lpss) + intel_lpss_unregister_clock_tree(lpss->clk); + } + +-static void intel_lpss_async_add_devices(void *_lpss, async_cookie_t cookie) ++static int intel_lpss_add_devices(struct intel_lpss *lpss) + { +- struct intel_lpss *lpss = _lpss; + int ret; + + if (intel_lpss_has_idma(lpss)) { +@@ -393,6 +392,12 @@ static void intel_lpss_async_add_devices(void *_lpss, async_cookie_t cookie) + intel_lpss_ltr_hide(lpss); + intel_lpss_unregister_clock(lpss); + } ++ return ret; ++} ++ ++static void intel_lpss_async_add_devices(void *lpss, async_cookie_t cookie) ++{ ++ intel_lpss_add_devices(lpss); + } + + int intel_lpss_probe(struct device *dev, +@@ -441,7 +446,16 @@ int intel_lpss_probe(struct device *dev, + if (ret) + dev_warn(dev, "Failed to create debugfs entries\n"); + +- async_schedule(intel_lpss_async_add_devices, lpss); ++ /* ++ * Probe UART devices synchronously to avoid serial interface ++ * enumeration unpredictability. ++ */ ++ if (lpss->type == LPSS_DEV_UART) { ++ ret = intel_lpss_add_devices(lpss); ++ if (ret) ++ goto err_clk_register; ++ } else ++ async_schedule(intel_lpss_async_add_devices, lpss); + + dev_pm_set_driver_flags(dev, DPM_FLAG_SMART_SUSPEND); + +-- +2.17.1 + diff --git a/patches/0010-Fix-the-sample-data-flushing-issue-on-all-cpu.sep-socwatch b/patches/0010-Fix-the-sample-data-flushing-issue-on-all-cpu.sep-socwatch new file mode 100644 index 0000000000..0a37ebc54e --- /dev/null +++ b/patches/0010-Fix-the-sample-data-flushing-issue-on-all-cpu.sep-socwatch @@ -0,0 +1,101 @@ +From 972b0bd694254a783c57fff2caf5edcb52296703 Mon Sep 17 00:00:00 2001 +From: Manisha Chinthapally +Date: Wed, 30 Jan 2019 16:36:50 -0800 +Subject: [PATCH 10/27] Fix the sample data flushing issue on all cpu buffers + +Currently, per-cpu buffers are flushed by each cpu on SOS. However, +when ACRN SOS launches UOS, it offlines 3 of 4 vcpus, which prevents +flushing sample data from all cpu buffers. + +This patch makes the remaining online cpu to flush data samples from +all cpu buffers. + +Signed-off-by: Min Lim +--- + drivers/platform/x86/sepdk/sep/lwpmudrv.c | 11 +++++++++-- + drivers/platform/x86/sepdk/sep/pmi.c | 17 ++++++++++++++--- + 2 files changed, 23 insertions(+), 5 deletions(-) + +diff --git a/drivers/platform/x86/sepdk/sep/lwpmudrv.c b/drivers/platform/x86/sepdk/sep/lwpmudrv.c +index bfc52cb4d494..e4b9a80efe9a 100755 +--- a/drivers/platform/x86/sepdk/sep/lwpmudrv.c ++++ b/drivers/platform/x86/sepdk/sep/lwpmudrv.c +@@ -2241,6 +2241,8 @@ static VOID lwpmudrv_ACRN_Buffer_Read( + #endif + ) + { ++ S32 i; ++ + SEP_DRV_LOG_TRACE_IN(""); + + if (GET_DRIVER_STATE() != DRV_STATE_RUNNING) { +@@ -2248,7 +2250,9 @@ static VOID lwpmudrv_ACRN_Buffer_Read( + return; + } + +- CONTROL_Invoke_Parallel(PMI_Buffer_Handler, NULL); ++ for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { ++ PMI_Buffer_Handler(&i); ++ } + + #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) + mod_timer(buffer_read_timer, jiffies + buffer_timer_interval); +@@ -4261,7 +4265,10 @@ static OS_STATUS lwpmudrv_Prepare_Stop(void) + + lwpmudrv_ACRN_Flush_Stop_Timer(); + SEP_DRV_LOG_TRACE("Calling final PMI_Buffer_Handler\n"); +- CONTROL_Invoke_Parallel(PMI_Buffer_Handler, NULL); ++ ++ for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { ++ PMI_Buffer_Handler(&i); ++ } + #endif + + SEP_DRV_LOG_TRACE("Outside of all interrupts."); +diff --git a/drivers/platform/x86/sepdk/sep/pmi.c b/drivers/platform/x86/sepdk/sep/pmi.c +index 934473f57efa..516a7f002764 100755 +--- a/drivers/platform/x86/sepdk/sep/pmi.c ++++ b/drivers/platform/x86/sepdk/sep/pmi.c +@@ -442,10 +442,21 @@ VOID PMI_Buffer_Handler(PVOID data) + U64 overflow_status = 0; + + if (!pcb || !cpu_buf || !devices) { ++ SEP_DRV_LOG_ERROR( ++ "Invalid data pointers in PMI_Buffer_Handler!\n"); + return; + } + +- cpu_id = (S32)CONTROL_THIS_CPU(); ++ if (data) { ++ cpu_id = *(S32 *)data; ++ if (cpu_id >= GLOBAL_STATE_num_cpus(driver_state)) { ++ SEP_DRV_LOG_ERROR( ++ "Invalid cpu_id: %d\n", cpu_id); ++ return; ++ } ++ } else { ++ cpu_id = (S32)CONTROL_THIS_CPU(); ++ } + pcpu = &pcb[cpu_id]; + bd = &cpu_buf[cpu_id]; + dev_idx = core_to_dev_map[cpu_id]; +@@ -493,14 +504,14 @@ VOID PMI_Buffer_Handler(PVOID data) + SEP_DRV_LOG_TRACE("payload_size = %x\n", payload_size); + if (header.payload_size > payload_size) { + // Mismatch in payload size in header info +- SEP_PRINT_ERROR( ++ SEP_DRV_LOG_ERROR( + "Mismatch in data size: header=%llu, payload_size=%d\n", + header.payload_size, payload_size); + goto handler_cleanup; + } + if (header.cpu_id != cpu_id) { + // Mismatch in cpu index in header info +- SEP_PRINT_ERROR( ++ SEP_DRV_LOG_ERROR( + "Mismatch in cpu idx: header=%u, buffer=%d\n", + header.cpu_id, cpu_id); + goto handler_cleanup; +-- +2.17.1 + diff --git a/patches/0010-PTP-Add-support-for-Intel-Timed-GPIO-controlle.felipeb-5.4 b/patches/0010-PTP-Add-support-for-Intel-Timed-GPIO-controlle.felipeb-5.4 new file mode 100644 index 0000000000..2a9a9a3b12 --- /dev/null +++ b/patches/0010-PTP-Add-support-for-Intel-Timed-GPIO-controlle.felipeb-5.4 @@ -0,0 +1,636 @@ +From 16f3aedb401f46e871d79722d3e6cbfb19386b96 Mon Sep 17 00:00:00 2001 +From: Felipe Balbi +Date: Fri, 3 Aug 2018 15:05:31 +0300 +Subject: [PATCH 10/14] PTP: Add support for Intel Timed GPIO controller + +Add support for PSE's TGPIO controllers. Each controller has a total of +20 pins configurable as output or input. + +Signed-off-by: Felipe Balbi +--- + drivers/ptp/Kconfig | 8 + + drivers/ptp/Makefile | 1 + + drivers/ptp/ptp-intel-tgpio.c | 580 ++++++++++++++++++++++++++++++++++ + 3 files changed, 589 insertions(+) + create mode 100644 drivers/ptp/ptp-intel-tgpio.c + +diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig +index c0e6e9672750..400bdcb9d0d0 100644 +--- a/drivers/ptp/Kconfig ++++ b/drivers/ptp/Kconfig +@@ -107,6 +107,14 @@ config PTP_1588_CLOCK_PCH + To compile this driver as a module, choose M here: the module + will be called ptp_pch. + ++config PTP_INTEL_TGPIO ++ tristate "Intel Timed GPIO" ++ depends on X86 ++ depends on PCI ++ imply PTP_1588_CLOCK ++ help ++ This driver asdds support for Intel Timed GPIO ++ + config PTP_INTEL_PMC_TGPIO + tristate "Intel PMC Timed GPIO" + depends on X86 +diff --git a/drivers/ptp/Makefile b/drivers/ptp/Makefile +index ff89c90ace82..f7e9dd3ebfff 100644 +--- a/drivers/ptp/Makefile ++++ b/drivers/ptp/Makefile +@@ -7,6 +7,7 @@ ptp-y := ptp_clock.o ptp_chardev.o ptp_sysfs.o + obj-$(CONFIG_PTP_1588_CLOCK) += ptp.o + obj-$(CONFIG_PTP_1588_CLOCK_DTE) += ptp_dte.o + obj-$(CONFIG_PTP_1588_CLOCK_IXP46X) += ptp_ixp46x.o ++obj-$(CONFIG_PTP_INTEL_TGPIO) += ptp-intel-tgpio.o + obj-$(CONFIG_PTP_INTEL_PMC_TGPIO) += ptp-intel-pmc-tgpio.o + obj-$(CONFIG_PTP_1588_CLOCK_PCH) += ptp_pch.o + obj-$(CONFIG_PTP_1588_CLOCK_KVM) += ptp_kvm.o +diff --git a/drivers/ptp/ptp-intel-tgpio.c b/drivers/ptp/ptp-intel-tgpio.c +new file mode 100644 +index 000000000000..5d962eb2008f +--- /dev/null ++++ b/drivers/ptp/ptp-intel-tgpio.c +@@ -0,0 +1,580 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Intel Timed GPIO Controller Driver ++ * ++ * Copyright (C) 2018 Intel Corporation ++ * Author: Felipe Balbi ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define TGPIOCTL(n) (((n) * 0x40) + 0x00) ++#define TGPIOCOMPV31_0(n) (((n) * 0x40) + 0x04) ++#define TGPIOCOMPV63_32(n) (((n) * 0x40) + 0x08) ++#define TGPIOPIV31_0(n) (((n) * 0x40) + 0x0c) ++#define TGPIOPIV63_32(n) (((n) * 0x40) + 0x10) ++#define TGPIOTCV31_0(n) (((n) * 0x40) + 0x14) ++#define TGPIOTCV63_32(n) (((n) * 0x40) + 0x18) ++#define TGPIOECCV31_0(n) (((n) * 0x40) + 0x1c) ++#define TGPIOECCV63_32(n) (((n) * 0x40) + 0x20) ++#define TGPIOEC31_0(n) (((n) * 0x40) + 0x24) ++#define TGPIOEC63_32(n) (((n) * 0x40) + 0x28) ++ ++#define TGPIOINTRCTL 0x0500 ++#define TGPIORIS 0x0504 ++#define TGPIOMSC 0x0508 ++#define TGPIOMIS 0x050c ++#define TGPIOICR 0x0510 ++#define TGPIO_CLK_SEL 0x0514 ++#define TGPIO_TS_SEL_0 0x0520 ++#define TGPIO_TS_SEL_1 0x0524 ++#define TMT_CLK_SEL 0x0528 ++#define TGPIO_TSC_CTL 0x0530 ++#define TGPIO_TSC_STATUS 0x0534 ++#define TMTCTL_TSG 0x0600 ++#define TMTR_TSG 0x0604 ++#define TMTL_TSG 0x0608 ++#define TMTH_TSG 0x060C ++#define TIMINCA_TSG 0x0610 ++#define TIMADJ_TSG 0x0614 ++#define LXTS_TMT_LOW_TSG 0x0618 ++#define LXTS_TMT_HIGH_TSG 0x061C ++#define LXTS_ART_LOW_TSG 0x0620 ++#define LXTS_ART_HIGH_TSG 0x0624 ++#define RXTS_TMT_LOW_TSG 0x0628 ++#define RXTS_TMT_HIGH_TSG 0x062C ++#define TMTCTL_GLOBAL 0x0640 ++#define TMTR_GLOBAL 0x0644 ++#define TMTL_GLOBAL 0x0648 ++#define TMTH_GLOBAL 0x064C ++#define TIMINCA_GLOBAL 0x0650 ++#define TIMADJ_GLOBAL 0x0654 ++#define LXTS_TMT_LOW_GLOBAL 0x0658 ++#define LXTS_TMT_HIGH_GLOBAL 0x065C ++#define LXTS_ART_LOW_GLOBAL 0x0660 ++#define LXTS_ART_HIGH_GLOBAL 0x0664 ++#define RXTS_TMT_LOW_GLOBAL 0x0668 ++#define RXTS_TMT_HIGH_GLOBAL 0x066C ++#define TMTCTL_WORKING 0x0680 ++#define TMTR_WORKING 0x0684 ++#define TMTL_WORKING 0x0688 ++#define TMTH_WORKING 0x068C ++#define TIMINCA_WORKING 0x0690 ++#define TIMADJ_WORKING 0x0694 ++#define LXTS_TMT_LOW_WORKING 0x0698 ++#define LXTS_TMT_HIGH_WORKING 0x069C ++#define LXTS_ART_LOW_WORKING 0x06A0 ++#define LXTS_ART_HIGH_WORKING 0x06A4 ++#define RXTS_TMT_LOW_WORKING 0x06A8 ++#define RXTS_TMT_HIGH_WORKING 0x06AC ++ ++/* Control Register */ ++#define TGPIOCTL_EN BIT(0) ++#define TGPIOCTL_DIR BIT(1) ++#define TGPIOCTL_EP GENMASK(3, 2) ++#define TGPIOCTL_EP_RISING_EDGE (0 << 2) ++#define TGPIOCTL_EP_FALLING_EDGE (1 << 2) ++#define TGPIOCTL_EP_TOGGLE_EDGE (2 << 2) ++#define TGPIOCTL_PM BIT(4) ++#define TGPIOCTL_PWS GENMASK(8, 5) ++#define TGPIOCTL_PWS_N(n) (((n) & 0xf) << 5) ++#define TGPIOCTL_ICS BIT(9) ++#define TGPIOCTL_TSCS BIT(10) ++#define TGPIOCTL_OEC BIT(12) ++#define TGPIOCTL_FIT BIT(13) ++#define TGPIOCTL_IEC GENMASK(15, 14) ++#define TGPIOCTL_ECC BIT(16) ++#define TGPIOCTL_PSL GENMASK(24, 17) ++#define TGPIOCTL_TS GENMASK(29, 28) ++#define TGPIOCTL_TS_TMT0 (0 << 28) ++#define TGPIOCTL_TS_TMT1 (1 << 28) ++#define TGPIOCTL_TS_TMT2 (2 << 28) ++#define TGPIOCTL_TS_LART (3 << 28) ++ ++/* Timed GPIO Interrupt Status/Mask/Clear registers */ ++#define TGPIOINT_TMT_NSEC_WRAP_GLOBAL BIT(25) ++#define TGPIOINT_TMT_NSEC_WRAP_WORKING BIT(24) ++#define TGPIOINT_TMT_NSEC_WRAP_TSG BIT(23) ++#define TGPIOINT_TADJ_TMT_GLOBAL_CMPLT BIT(22) ++#define TGPIOINT_TADJ_TMT_WORKING_CMPLT BIT(21) ++#define TGPIOINT_TADJ_TMT_TSG_CMPLT BIT(20) ++#define TGPIOINT_EVENT_INTERRUPT(n) BIT((n)) ++ ++/* Tunable Monotonous Timer Control Register */ ++#define TMTCTL_TMT_ENABLE BIT(0) ++ ++#define NSECS_PER_SEC 1000000000 ++#define TGPIO_MAX_ADJ_TIME 999999900 ++ ++struct intel_tgpio { ++ struct ptp_clock_info info; ++ struct ptp_clock *clock; ++ ++ spinlock_t lock; ++ struct device *dev; ++ void __iomem *base; ++ ++ u32 irq_status; ++ u32 irq_mask; ++}; ++#define to_intel_tgpio(i) (container_of((i), struct intel_tgpio, info)) ++ ++static inline u64 to_intel_tgpio_time(struct ptp_clock_time *t) ++{ ++ return t->sec * NSECS_PER_SEC + t->nsec; ++} ++ ++static inline u64 intel_tgpio_readq(void __iomem *base, u32 offset) ++{ ++ return lo_hi_readq(base + offset); ++} ++ ++static inline void intel_tgpio_writeq(void __iomem *base, u32 offset, u64 v) ++{ ++ return lo_hi_writeq(v, base + offset); ++} ++ ++static inline u32 intel_tgpio_readl(void __iomem *base, u32 offset) ++{ ++ return readl(base + offset); ++} ++ ++static inline void intel_tgpio_writel(void __iomem *base, u32 offset, u32 value) ++{ ++ writel(value, base + offset); ++} ++ ++static void intel_tgpio_get_time(struct intel_tgpio *tgpio, ++ struct timespec64 *ts) ++{ ++ (void) intel_tgpio_readl(tgpio->base, TMTR_TSG); ++ ts->tv_nsec = intel_tgpio_readl(tgpio->base, TMTL_TSG); ++ ts->tv_sec = intel_tgpio_readl(tgpio->base, TMTH_TSG); ++} ++ ++static void intel_tgpio_set_time(struct intel_tgpio *tgpio, ++ const struct timespec64 *ts) ++{ ++ /* Disable TMT0 */ ++ intel_tgpio_writel(tgpio->base, TMTCTL_TSG, 0x00); ++ ++ intel_tgpio_writel(tgpio->base, TMTR_TSG, 0x00); ++ intel_tgpio_writel(tgpio->base, TMTL_TSG, ts->tv_nsec); ++ intel_tgpio_writel(tgpio->base, TMTH_TSG, (u32) ts->tv_sec); ++ ++ /* Enable TMT0 */ ++ intel_tgpio_writel(tgpio->base, TMTCTL_TSG, TMTCTL_TMT_ENABLE); ++} ++ ++#define TGPIO_PIN(n) \ ++ { \ ++ .name = "pin" __stringify((n)), \ ++ .index = (n), \ ++ .chan = (n), \ ++ .func = PTP_PF_NONE, \ ++ } ++ ++static struct ptp_pin_desc intel_tgpio_pin_config[] = { ++ TGPIO_PIN(0), ++ TGPIO_PIN(1), ++ TGPIO_PIN(2), ++ TGPIO_PIN(3), ++ TGPIO_PIN(4), ++ TGPIO_PIN(5), ++ TGPIO_PIN(6), ++ TGPIO_PIN(7), ++ TGPIO_PIN(8), ++ TGPIO_PIN(9), ++ TGPIO_PIN(10), ++ TGPIO_PIN(11), ++ TGPIO_PIN(12), ++ TGPIO_PIN(13), ++ TGPIO_PIN(14), ++ TGPIO_PIN(15), ++ TGPIO_PIN(16), ++ TGPIO_PIN(17), ++ TGPIO_PIN(18), ++ TGPIO_PIN(19), ++}; ++ ++static int intel_tgpio_adjfine(struct ptp_clock_info *info, long scaled_ppm) ++{ ++ struct intel_tgpio *tgpio = to_intel_tgpio(info); ++ unsigned long flags; ++ u32 reg; ++ bool isgn; ++ ++ spin_lock_irqsave(&tgpio->lock, flags); ++ if (scaled_ppm < 0) { ++ isgn = true; ++ scaled_ppm = -scaled_ppm; ++ } ++ ++ /* ++ * HW uses a 200MHz clock, meaning it has a 5ns period. Just ++ * multiply scaled_ppm by 5 to get our increment. ++ */ ++ reg = 5 * scaled_ppm; ++ ++ /* bit 31 is sign bit */ ++ reg &= ~BIT(31); ++ reg |= isgn << 31; ++ ++ intel_tgpio_writel(tgpio->base, TIMINCA_GLOBAL, reg); ++ spin_unlock_irqrestore(&tgpio->lock, flags); ++ ++ return 0; ++} ++ ++static int intel_tgpio_adjtime(struct ptp_clock_info *info, s64 delta) ++{ ++ struct intel_tgpio *tgpio = to_intel_tgpio(info); ++ struct timespec64 then; ++ struct timespec64 now; ++ unsigned long flags; ++ ++ if (delta > TGPIO_MAX_ADJ_TIME) ++ return -EINVAL; ++ ++ then = ns_to_timespec64(delta); ++ ++ spin_lock_irqsave(&tgpio->lock, flags); ++ intel_tgpio_get_time(tgpio, &now); ++ now = timespec64_add(now, then); ++ intel_tgpio_set_time(tgpio, &now); ++ spin_unlock_irqrestore(&tgpio->lock, flags); ++ ++ return 0; ++} ++ ++static int intel_tgpio_gettime64(struct ptp_clock_info *info, ++ struct timespec64 *ts) ++{ ++ struct intel_tgpio *tgpio = to_intel_tgpio(info); ++ unsigned long flags; ++ ++ spin_lock_irqsave(&tgpio->lock, flags); ++ intel_tgpio_get_time(tgpio, ts); ++ spin_unlock_irqrestore(&tgpio->lock, flags); ++ ++ return 0; ++} ++ ++static int intel_tgpio_settime64(struct ptp_clock_info *info, ++ const struct timespec64 *ts) ++{ ++ struct intel_tgpio *tgpio = to_intel_tgpio(info); ++ unsigned long flags; ++ ++ spin_lock_irqsave(&tgpio->lock, flags); ++ intel_tgpio_set_time(tgpio, ts); ++ spin_unlock_irqrestore(&tgpio->lock, flags); ++ ++ return 0; ++} ++ ++static int intel_tgpio_config_input(struct intel_tgpio *tgpio, ++ struct ptp_extts_request *extts, int on) ++{ ++ unsigned int index = extts->index; ++ u32 offset; ++ u32 ctrl; ++ ++ offset = TGPIOCTL(index); ++ ctrl = intel_tgpio_readl(tgpio->base, offset); ++ ctrl &= ~(TGPIOCTL_TS | TGPIOCTL_EP | TGPIOCTL_DIR | TGPIOCTL_PWS); ++ ++ if (on) { ++ tgpio->irq_mask |= TGPIOINT_EVENT_INTERRUPT(index); ++ ctrl |= TGPIOCTL_DIR | TGPIOCTL_TS_TMT0; ++ ++ if ((extts->flags & PTP_RISING_EDGE) && ++ (extts->flags & PTP_FALLING_EDGE)) ++ ctrl |= TGPIOCTL_EP_TOGGLE_EDGE; ++ else if (extts->flags & PTP_RISING_EDGE) ++ ctrl |= TGPIOCTL_EP_RISING_EDGE; ++ else if (extts->flags & PTP_FALLING_EDGE) ++ ctrl |= TGPIOCTL_EP_FALLING_EDGE; ++ ++ /* gotta program all other bits before EN bit is set */ ++ intel_tgpio_writel(tgpio->base, offset, ctrl); ++ ++ ctrl |= TGPIOCTL_EN; ++ } else { ++ tgpio->irq_mask &= ~TGPIOINT_EVENT_INTERRUPT(index); ++ ctrl &= ~TGPIOCTL_EN; ++ } ++ ++ intel_tgpio_writel(tgpio->base, TGPIOMSC, tgpio->irq_mask); ++ intel_tgpio_writel(tgpio->base, offset, ctrl); ++ ++ return 0; ++} ++ ++static int intel_tgpio_config_output(struct intel_tgpio *tgpio, ++ struct ptp_perout_request *perout, int on) ++{ ++ unsigned int index = perout->index; ++ u32 offset; ++ u32 ctrl; ++ ++ offset = TGPIOCTL(index); ++ ctrl = intel_tgpio_readl(tgpio->base, offset); ++ ctrl &= ~(TGPIOCTL_TS | TGPIOCTL_EP | TGPIOCTL_DIR | TGPIOCTL_PWS); ++ ++ if (on) { ++ struct ptp_clock_time *period = &perout->period; ++ struct ptp_clock_time *start = &perout->start; ++ ++ ctrl |= TGPIOCTL_TS_TMT0 | TGPIOCTL_ECC | TGPIOCTL_PWS_N(2); ++ ++ if (perout->flags & PTP_PEROUT_ONE_SHOT) ++ ctrl &= ~TGPIOCTL_PM; ++ else ++ ctrl |= TGPIOCTL_PM; ++ ++ intel_tgpio_writel(tgpio->base, TGPIOCOMPV31_0(index), ++ start->nsec); ++ intel_tgpio_writel(tgpio->base, TGPIOCOMPV63_32(index), ++ start->sec); ++ ++ intel_tgpio_writeq(tgpio->base, TGPIOPIV31_0(index), ++ to_intel_tgpio_time(period)); ++ ++ /* gotta program all other bits before EN bit is set */ ++ intel_tgpio_writel(tgpio->base, offset, ctrl); ++ ++ ctrl |= TGPIOCTL_EN; ++ } else { ++ ctrl &= ~TGPIOCTL_EN; ++ } ++ ++ intel_tgpio_writel(tgpio->base, offset, ctrl); ++ ++ return 0; ++} ++ ++static int intel_tgpio_enable(struct ptp_clock_info *info, ++ struct ptp_clock_request *req, int on) ++{ ++ struct intel_tgpio *tgpio = to_intel_tgpio(info); ++ unsigned long flags; ++ int ret = -EOPNOTSUPP; ++ ++ spin_lock_irqsave(&tgpio->lock, flags); ++ switch (req->type) { ++ case PTP_CLK_REQ_EXTTS: ++ ret = intel_tgpio_config_input(tgpio, &req->extts, on); ++ break; ++ case PTP_CLK_REQ_PEROUT: ++ ret = intel_tgpio_config_output(tgpio, &req->perout, on); ++ break; ++ default: ++ break; ++ } ++ spin_unlock_irqrestore(&tgpio->lock, flags); ++ ++ return ret; ++} ++ ++static int intel_tgpio_get_time_fn(ktime_t *device_time, ++ struct system_counterval_t *system_counter, void *_tgpio) ++{ ++ struct intel_tgpio *tgpio = _tgpio; ++ struct timespec64 ts; ++ u64 cycles; ++ ++ intel_tgpio_get_time(tgpio, &ts); ++ *device_time = timespec64_to_ktime(ts); ++ cycles = intel_tgpio_readq(tgpio->base, LXTS_ART_LOW_GLOBAL); ++ *system_counter = convert_art_to_tsc(cycles); ++ ++ return 0; ++} ++ ++static int intel_tgpio_getcrosststamp(struct ptp_clock_info *info, ++ struct system_device_crosststamp *cts) ++{ ++ struct intel_tgpio *tgpio = to_intel_tgpio(info); ++ ++ return get_device_system_crosststamp(intel_tgpio_get_time_fn, tgpio, ++ NULL, cts); ++} ++ ++static int intel_tgpio_verify(struct ptp_clock_info *ptp, unsigned int pin, ++ enum ptp_pin_function func, unsigned int chan) ++{ ++ return 0; ++} ++ ++static const struct ptp_clock_info intel_tgpio_info = { ++ .owner = THIS_MODULE, ++ .name = "Intel TGPIO", ++ .max_adj = 50000000, ++ .n_pins = 20, ++ .n_ext_ts = 20, ++ .n_per_out = 20, ++ .pin_config = intel_tgpio_pin_config, ++ .adjfine = intel_tgpio_adjfine, ++ .adjtime = intel_tgpio_adjtime, ++ .gettime64 = intel_tgpio_gettime64, ++ .settime64 = intel_tgpio_settime64, ++ .enable = intel_tgpio_enable, ++ .getcrosststamp = intel_tgpio_getcrosststamp, ++ .verify = intel_tgpio_verify, ++}; ++ ++static irqreturn_t intel_tgpio_irq_thread(int irq, void *_tgpio) ++{ ++ struct intel_tgpio *tgpio = _tgpio; ++ unsigned long irq_status; ++ unsigned long pin; ++ ++ spin_lock(&tgpio->lock); ++ ++ irq_status = tgpio->irq_status; ++ for_each_set_bit(pin, &irq_status, BITS_PER_LONG) { ++ struct ptp_clock_event event; ++ ++ event.type = PTP_CLOCK_EXTTS; ++ event.index = pin; ++ event.timestamp = intel_tgpio_readq(tgpio->base, ++ TGPIOTCV31_0(pin)); ++ ++ ptp_clock_event(tgpio->clock, &event); ++ } ++ ++ intel_tgpio_writel(tgpio->base, TGPIOMSC, tgpio->irq_mask); ++ spin_unlock(&tgpio->lock); ++ ++ return IRQ_HANDLED; ++} ++ ++static irqreturn_t intel_tgpio_irq(int irq, void *_tgpio) ++{ ++ struct intel_tgpio *tgpio = _tgpio; ++ u32 intr; ++ ++ intr = intel_tgpio_readl(tgpio->base, TGPIOMIS); ++ if (intr) { ++ tgpio->irq_status = intr; ++ intel_tgpio_writel(tgpio->base, TGPIOMSC, 0x00); ++ intel_tgpio_writel(tgpio->base, TGPIOICR, intr); ++ return IRQ_WAKE_THREAD; ++ } ++ ++ return IRQ_HANDLED; ++} ++ ++static void intel_tgpio_disable_all_pins(struct intel_tgpio *tgpio) ++{ ++ int i; ++ ++ for (i = 0; i < 20; i++) ++ intel_tgpio_writel(tgpio->base, TGPIOCTL(i), 0); ++} ++ ++static int intel_tgpio_probe(struct pci_dev *pci, const struct pci_device_id *id) ++{ ++ struct intel_tgpio *tgpio; ++ struct device *dev; ++ int ret; ++ int irq; ++ ++ dev = &pci->dev; ++ tgpio = devm_kzalloc(dev, sizeof(*tgpio), GFP_KERNEL); ++ if (!tgpio) ++ return -ENOMEM; ++ ++ tgpio->dev = dev; ++ tgpio->info = intel_tgpio_info; ++ ++ ret = pcim_enable_device(pci); ++ if (ret) ++ return ret; ++ ++ pci_set_master(pci); ++ ++ ret = pcim_iomap_regions(pci, BIT(0), pci_name(pci)); ++ if (ret) ++ return ret; ++ ++ tgpio->base = pcim_iomap_table(pci)[0]; ++ if (!tgpio->base) { ++ ret = -ENOMEM; ++ goto err1; ++ } ++ ++ tgpio->base += 0x1000; ++ ++ /* make sure all pins are disabled */ ++ intel_tgpio_disable_all_pins(tgpio); ++ ++ /* mask all interrupt events */ ++ intel_tgpio_writel(tgpio->base, TGPIOMIS, 0x00); ++ ++ /* enable TMT0 */ ++ intel_tgpio_writel(tgpio->base, TMTCTL_TSG, TMTCTL_TMT_ENABLE); ++ ++ spin_lock_init(&tgpio->lock); ++ pci_set_drvdata(pci, tgpio); ++ ++ tgpio->clock = ptp_clock_register(&tgpio->info, &pci->dev); ++ if (IS_ERR(tgpio->clock)) ++ return PTR_ERR(tgpio->clock); ++ ++ ret = pci_alloc_irq_vectors(pci, 2, 2, PCI_IRQ_ALL_TYPES); ++ if (ret < 0) ++ goto err0; ++ ++ irq = pci_irq_vector(pci, 1); ++ ret = devm_request_threaded_irq(dev, irq, intel_tgpio_irq, ++ intel_tgpio_irq_thread, IRQF_TRIGGER_RISING, ++ pci_name(pci), tgpio); ++ if (ret) ++ goto err1; ++ ++ return 0; ++ ++err1: ++ pci_free_irq_vectors(pci); ++ ++err0: ++ ptp_clock_unregister(tgpio->clock); ++ return ret; ++} ++ ++static void intel_tgpio_remove(struct pci_dev *pci) ++{ ++ struct intel_tgpio *tgpio = pci_get_drvdata(pci); ++ ++ pci_free_irq_vectors(pci); ++ ptp_clock_unregister(tgpio->clock); ++} ++ ++static const struct pci_device_id intel_tgpio_id_table[] = { ++ { PCI_VDEVICE(INTEL, 0x4b88), /* EHL */ }, ++ { PCI_VDEVICE(INTEL, 0x4b89), /* EHL */ }, ++ { } /* Terminating Entry */ ++}; ++MODULE_DEVICE_TABLE(pci, intel_tgpio_id_table); ++ ++static struct pci_driver intel_tgpio_driver = { ++ .name = "intel-tgpio", ++ .id_table = intel_tgpio_id_table, ++ .probe = intel_tgpio_probe, ++ .remove = intel_tgpio_remove, ++}; ++ ++module_pci_driver(intel_tgpio_driver); ++ ++MODULE_AUTHOR("Felipe Balbi "); ++MODULE_LICENSE("GPL v2"); ++MODULE_DESCRIPTION("Intel Timed GPIO Controller Driver"); +-- +2.17.1 + diff --git a/patches/0010-char-rpmb-add-RPMB-simulation-device.security b/patches/0010-char-rpmb-add-RPMB-simulation-device.security new file mode 100644 index 0000000000..e897dc31fd --- /dev/null +++ b/patches/0010-char-rpmb-add-RPMB-simulation-device.security @@ -0,0 +1,817 @@ +From a901e4d0b36665b176fd89cf0608639482c337a8 Mon Sep 17 00:00:00 2001 +From: Tomas Winkler +Date: Sun, 28 Feb 2016 10:36:13 +0200 +Subject: [PATCH 10/65] char: rpmb: add RPMB simulation device + +The RPMB partition simulation device is a virtual device that +provides simulation of the RPMB protocol and use kernel memory +as storage. + +Be aware it doesn't promise any real security. This driver is +suitable only for testing of the RPMB subsystem or RPMB +applications prior to RPMB key provisioning, as RPMB key +programming can be performed only once in the life time of the +storage device. + +The module currently supports two configuration options via +module parameters +1. max_wr_blks: for specifying max blocks that can be written +in a single command +2. daunits: used to set storage capacity in 128K units. + +V2: remove .owner setting, it is set automatically +V3: 1. Add shutdown handler (similar to ufshcd) + 2. Commit message fix +V4: Use select RPMB in Kconfg to ensure valid configuration. +V5: Revamp the code using the sequence command. +V6: 1. Be more verbose about some errors, after all this is a testing + module. + 2. Fix RPMB_READ_DATA: + a. The number of blocks for eMMC request frame should be 0 + b. Fix missing return before bailing on error + c. Copy all the frames back + 3. Fix RPMB_WRITE_DATA: + a. Compute MAC on result packet + b. Also address should be set in the result frame. + 4. Remove platform device + 5. Update the commit message +V7: Resend. +V8: 1. drop use SHASH_DESC_ON_STACK, + variable length arrays are problematic in C. + 2. Fix typos. + 3. Set out_frames in case of not programmed keys + otherwise read cycle won't return correct answer. +V9: 1. Add SPDX identifiers. + 2. Adjust to new unregister API. + 3. Adjust to the new zero based RPMB frame count. +V10:1. Drop Dual license + +Change-Id: Idd0a414c4ce157631f69586f1ed3a6e88cd8a4ee +Signed-off-by: Tomas Winkler +Signed-off-by: Alexander Usyskin +--- + drivers/char/rpmb/Kconfig | 16 + + drivers/char/rpmb/Makefile | 1 + + drivers/char/rpmb/rpmb_sim.c | 715 +++++++++++++++++++++++++++++++++++ + 3 files changed, 732 insertions(+) + create mode 100644 drivers/char/rpmb/rpmb_sim.c + +diff --git a/drivers/char/rpmb/Kconfig b/drivers/char/rpmb/Kconfig +index 69dbc1cfe89f..2b91c8e24c92 100644 +--- a/drivers/char/rpmb/Kconfig ++++ b/drivers/char/rpmb/Kconfig +@@ -16,3 +16,19 @@ config RPMB_INTF_DEV + help + Say yes here if you want to access RPMB from user space + via character device interface /dev/rpmb%d ++ ++config RPMB_SIM ++ tristate "RPMB partition device simulator" ++ default n ++ select RPMB ++ select CRYPTO_SHA256 ++ select CRYPTO_HMAC ++ help ++ RPMB partition simulation device is a virtual device that ++ provides simulation of the RPMB protocol and use kernel memory ++ as storage. ++ ++ Be aware it doesn't promise any real security. This driver is ++ suitable only for testing of the RPMB subsystem or RPMB applications ++ prior to RPMB key provisioning. ++ Most people should say N here. +diff --git a/drivers/char/rpmb/Makefile b/drivers/char/rpmb/Makefile +index f54b3f30514b..e4faa1c99373 100644 +--- a/drivers/char/rpmb/Makefile ++++ b/drivers/char/rpmb/Makefile +@@ -4,5 +4,6 @@ + obj-$(CONFIG_RPMB) += rpmb.o + rpmb-objs += core.o + rpmb-$(CONFIG_RPMB_INTF_DEV) += cdev.o ++obj-$(CONFIG_RPMB_SIM) += rpmb_sim.o + + ccflags-y += -D__CHECK_ENDIAN__ +diff --git a/drivers/char/rpmb/rpmb_sim.c b/drivers/char/rpmb/rpmb_sim.c +new file mode 100644 +index 000000000000..c694866d731b +--- /dev/null ++++ b/drivers/char/rpmb/rpmb_sim.c +@@ -0,0 +1,715 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright(c) 2015 - 2019 Intel Corporation. ++ */ ++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++static const char id[] = "RPMB:SIM"; ++#define CAPACITY_UNIT SZ_128K ++#define CAPACITY_MIN SZ_128K ++#define CAPACITY_MAX SZ_16M ++#define BLK_UNIT SZ_256 ++ ++static unsigned int max_wr_blks = 2; ++module_param(max_wr_blks, uint, 0644); ++MODULE_PARM_DESC(max_wr_blks, "max blocks that can be written in a single command (default: 2)"); ++ ++static unsigned int daunits = 1; ++module_param(daunits, uint, 0644); ++MODULE_PARM_DESC(daunits, "number of data area units of 128K (default: 1)"); ++ ++struct blk { ++ u8 data[BLK_UNIT]; ++}; ++ ++/** ++ * struct rpmb_sim_dev ++ * ++ * @dev: back pointer device ++ * @rdev: rpmb device ++ * @auth_key: Authentication key register which is used to authenticate ++ * accesses when MAC is calculated; ++ * @auth_key_set: true if authentication key was set ++ * @write_counter: Counter value for the total amount of successful ++ * authenticated data write requests made by the host. ++ * The initial value of this register after production is 00000000h. ++ * The value will be incremented by one along with each successful ++ * programming access. The value cannot be reset. After the counter ++ * has reached the maximum value of FFFFFFFFh, ++ * it will not be incremented anymore (overflow prevention) ++ * @hash_desc: hmac(sha256) shash descriptor ++ * ++ * @res_frames: frame that holds the result of the last write operation ++ * @out_frames: next read operation result frames ++ * @out_frames_cnt: number of the output frames ++ * ++ * @capacity: size of the partition in bytes multiple of 128K ++ * @blkcnt: block count ++ * @da: data area in blocks ++ */ ++struct rpmb_sim_dev { ++ struct device *dev; ++ struct rpmb_dev *rdev; ++ u8 auth_key[32]; ++ bool auth_key_set; ++ u32 write_counter; ++ struct shash_desc *hash_desc; ++ ++ struct rpmb_frame_jdec res_frames[1]; ++ struct rpmb_frame_jdec *out_frames; ++ unsigned int out_frames_cnt; ++ ++ size_t capacity; ++ size_t blkcnt; ++ struct blk *da; ++}; ++ ++static __be16 op_result(struct rpmb_sim_dev *rsdev, u16 result) ++{ ++ if (!rsdev->auth_key_set) ++ return cpu_to_be16(RPMB_ERR_NO_KEY); ++ ++ if (rsdev->write_counter == 0xFFFFFFFF) ++ result |= RPMB_ERR_COUNTER_EXPIRED; ++ ++ return cpu_to_be16(result); ++} ++ ++static __be16 req_to_resp(u16 req) ++{ ++ return cpu_to_be16(RPMB_REQ2RESP(req)); ++} ++ ++static int rpmb_sim_calc_hmac(struct rpmb_sim_dev *rsdev, ++ struct rpmb_frame_jdec *frames, ++ unsigned int blks, u8 *mac) ++{ ++ struct shash_desc *desc = rsdev->hash_desc; ++ int i; ++ int ret; ++ ++ ret = crypto_shash_init(desc); ++ if (ret) ++ goto out; ++ ++ for (i = 0; i < blks; i++) { ++ ret = crypto_shash_update(desc, frames[i].data, ++ rpmb_jdec_hmac_data_len); ++ if (ret) ++ goto out; ++ } ++ ret = crypto_shash_final(desc, mac); ++out: ++ if (ret) ++ dev_err(rsdev->dev, "digest error = %d", ret); ++ ++ return ret; ++} ++ ++static int rpmb_op_not_programmed(struct rpmb_sim_dev *rsdev, u16 req) ++{ ++ struct rpmb_frame_jdec *res_frame = rsdev->res_frames; ++ ++ res_frame->req_resp = req_to_resp(req); ++ res_frame->result = op_result(rsdev, RPMB_ERR_NO_KEY); ++ ++ rsdev->out_frames = res_frame; ++ rsdev->out_frames_cnt = 1; ++ ++ dev_err(rsdev->dev, "not programmed\n"); ++ ++ return 0; ++} ++ ++static int rpmb_op_program_key(struct rpmb_sim_dev *rsdev, ++ struct rpmb_frame_jdec *in_frame, u32 cnt) ++{ ++ struct rpmb_frame_jdec *res_frame = rsdev->res_frames; ++ struct crypto_shash *tfm = rsdev->hash_desc->tfm; ++ u16 req; ++ int ret; ++ u16 err = RPMB_ERR_OK; ++ ++ req = be16_to_cpu(in_frame[0].req_resp); ++ ++ if (req != RPMB_PROGRAM_KEY) ++ return -EINVAL; ++ ++ if (cnt != 1) { ++ dev_err(rsdev->dev, "wrong number of frames %d != 1\n", cnt); ++ return -EINVAL; ++ } ++ ++ if (rsdev->auth_key_set) { ++ dev_err(rsdev->dev, "key already set\n"); ++ err = RPMB_ERR_WRITE; ++ goto out; ++ } ++ ++ ret = crypto_shash_setkey(tfm, in_frame[0].key_mac, 32); ++ if (ret) { ++ dev_err(rsdev->dev, "set key failed = %d\n", ret); ++ err = RPMB_ERR_GENERAL; ++ goto out; ++ } ++ ++ dev_dbg(rsdev->dev, "digest size %u\n", crypto_shash_digestsize(tfm)); ++ ++ memcpy(rsdev->auth_key, in_frame[0].key_mac, 32); ++ rsdev->auth_key_set = true; ++out: ++ ++ memset(res_frame, 0, sizeof(*res_frame)); ++ res_frame->req_resp = req_to_resp(req); ++ res_frame->result = op_result(rsdev, err); ++ ++ return 0; ++} ++ ++static int rpmb_op_get_wr_counter(struct rpmb_sim_dev *rsdev, ++ struct rpmb_frame_jdec *in_frame, u32 cnt) ++{ ++ struct rpmb_frame_jdec *frame; ++ int ret = 0; ++ u16 req; ++ u16 err; ++ ++ req = be16_to_cpu(in_frame[0].req_resp); ++ if (req != RPMB_GET_WRITE_COUNTER) ++ return -EINVAL; ++ ++ if (cnt != 1) { ++ dev_err(rsdev->dev, "wrong number of frames %d != 1\n", cnt); ++ return -EINVAL; ++ } ++ ++ frame = kcalloc(1, sizeof(*frame), GFP_KERNEL); ++ if (!frame) { ++ err = RPMB_ERR_READ; ++ ret = -ENOMEM; ++ rsdev->out_frames = rsdev->res_frames; ++ rsdev->out_frames_cnt = cnt; ++ goto out; ++ } ++ ++ rsdev->out_frames = frame; ++ rsdev->out_frames_cnt = cnt; ++ ++ frame->req_resp = req_to_resp(req); ++ frame->write_counter = cpu_to_be32(rsdev->write_counter); ++ memcpy(frame->nonce, in_frame[0].nonce, 16); ++ ++ err = RPMB_ERR_OK; ++ if (rpmb_sim_calc_hmac(rsdev, frame, cnt, frame->key_mac)) ++ err = RPMB_ERR_READ; ++ ++out: ++ rsdev->out_frames[0].req_resp = req_to_resp(req); ++ rsdev->out_frames[0].result = op_result(rsdev, err); ++ ++ return ret; ++} ++ ++static int rpmb_op_write_data(struct rpmb_sim_dev *rsdev, ++ struct rpmb_frame_jdec *in_frame, u32 cnt) ++{ ++ struct rpmb_frame_jdec *res_frame = rsdev->res_frames; ++ u8 mac[32]; ++ u16 req, err, addr, blks; ++ unsigned int i; ++ int ret = 0; ++ ++ req = be16_to_cpu(in_frame[0].req_resp); ++ if (req != RPMB_WRITE_DATA) ++ return -EINVAL; ++ ++ if (rsdev->write_counter == 0xFFFFFFFF) { ++ err = RPMB_ERR_WRITE; ++ goto out; ++ } ++ ++ blks = be16_to_cpu(in_frame[0].block_count); ++ if (blks == 0 || blks > cnt) { ++ dev_err(rsdev->dev, "wrong number of blocks: blks=%u cnt=%u\n", ++ blks, cnt); ++ ret = -EINVAL; ++ err = RPMB_ERR_GENERAL; ++ goto out; ++ } ++ ++ if (blks > max_wr_blks) { ++ err = RPMB_ERR_WRITE; ++ goto out; ++ } ++ ++ addr = be16_to_cpu(in_frame[0].addr); ++ if (addr >= rsdev->blkcnt) { ++ err = RPMB_ERR_ADDRESS; ++ goto out; ++ } ++ ++ if (rpmb_sim_calc_hmac(rsdev, in_frame, blks, mac)) { ++ err = RPMB_ERR_AUTH; ++ goto out; ++ } ++ ++ /* mac is in the last frame */ ++ if (memcmp(mac, in_frame[blks - 1].key_mac, sizeof(mac)) != 0) { ++ err = RPMB_ERR_AUTH; ++ goto out; ++ } ++ ++ if (be32_to_cpu(in_frame[0].write_counter) != rsdev->write_counter) { ++ err = RPMB_ERR_COUNTER; ++ goto out; ++ } ++ ++ if (addr + blks > rsdev->blkcnt) { ++ err = RPMB_ERR_WRITE; ++ goto out; ++ } ++ ++ dev_dbg(rsdev->dev, "Writing = %u blocks at addr = 0x%X\n", blks, addr); ++ err = RPMB_ERR_OK; ++ for (i = 0; i < blks; i++) ++ memcpy(rsdev->da[addr + i].data, in_frame[i].data, BLK_UNIT); ++ ++ rsdev->write_counter++; ++ ++ memset(res_frame, 0, sizeof(*res_frame)); ++ res_frame->req_resp = req_to_resp(req); ++ res_frame->write_counter = cpu_to_be32(rsdev->write_counter); ++ res_frame->addr = cpu_to_be16(addr); ++ if (rpmb_sim_calc_hmac(rsdev, res_frame, 1, res_frame->key_mac)) ++ err = RPMB_ERR_READ; ++ ++out: ++ if (err != RPMB_ERR_OK) { ++ memset(res_frame, 0, sizeof(*res_frame)); ++ res_frame->req_resp = req_to_resp(req); ++ } ++ res_frame->result = op_result(rsdev, err); ++ ++ return ret; ++} ++ ++static int rpmb_do_read_data(struct rpmb_sim_dev *rsdev, ++ struct rpmb_frame_jdec *in_frame, u32 cnt) ++{ ++ struct rpmb_frame_jdec *res_frame = rsdev->res_frames; ++ struct rpmb_frame_jdec *out_frames = NULL; ++ u8 mac[32]; ++ u16 req, err, addr, blks; ++ unsigned int i; ++ int ret; ++ ++ req = be16_to_cpu(in_frame->req_resp); ++ if (req != RPMB_READ_DATA) ++ return -EINVAL; ++ ++ /* eMMC intentionally set 0 here */ ++ blks = be16_to_cpu(in_frame->block_count); ++ blks = blks ?: cnt; ++ if (blks > cnt) { ++ dev_err(rsdev->dev, "wrong number of frames cnt %u\n", blks); ++ ret = -EINVAL; ++ err = RPMB_ERR_GENERAL; ++ goto out; ++ } ++ ++ out_frames = kcalloc(blks, sizeof(*out_frames), GFP_KERNEL); ++ if (!out_frames) { ++ ret = -ENOMEM; ++ err = RPMB_ERR_READ; ++ goto out; ++ } ++ ++ ret = 0; ++ addr = be16_to_cpu(in_frame[0].addr); ++ if (addr >= rsdev->blkcnt) { ++ err = RPMB_ERR_ADDRESS; ++ goto out; ++ } ++ ++ if (addr + blks > rsdev->blkcnt) { ++ err = RPMB_ERR_READ; ++ goto out; ++ } ++ ++ dev_dbg(rsdev->dev, "reading = %u blocks at addr = 0x%X\n", blks, addr); ++ for (i = 0; i < blks; i++) { ++ memcpy(out_frames[i].data, rsdev->da[addr + i].data, BLK_UNIT); ++ memcpy(out_frames[i].nonce, in_frame[0].nonce, 16); ++ out_frames[i].req_resp = req_to_resp(req); ++ out_frames[i].addr = in_frame[0].addr; ++ out_frames[i].block_count = cpu_to_be16(blks); ++ } ++ ++ if (rpmb_sim_calc_hmac(rsdev, out_frames, blks, mac)) { ++ err = RPMB_ERR_AUTH; ++ goto out; ++ } ++ ++ memcpy(out_frames[blks - 1].key_mac, mac, sizeof(mac)); ++ ++ err = RPMB_ERR_OK; ++ for (i = 0; i < blks; i++) ++ out_frames[i].result = op_result(rsdev, err); ++ ++ rsdev->out_frames = out_frames; ++ rsdev->out_frames_cnt = cnt; ++ ++ return 0; ++ ++out: ++ memset(res_frame, 0, sizeof(*res_frame)); ++ res_frame->req_resp = req_to_resp(req); ++ res_frame->result = op_result(rsdev, err); ++ kfree(out_frames); ++ rsdev->out_frames = res_frame; ++ rsdev->out_frames_cnt = 1; ++ ++ return ret; ++} ++ ++static int rpmb_op_read_data(struct rpmb_sim_dev *rsdev, ++ struct rpmb_frame_jdec *in_frame, u32 cnt) ++{ ++ struct rpmb_frame_jdec *res_frame = rsdev->res_frames; ++ u16 req; ++ ++ req = be16_to_cpu(in_frame->req_resp); ++ if (req != RPMB_READ_DATA) ++ return -EINVAL; ++ ++ memcpy(res_frame, in_frame, sizeof(*res_frame)); ++ ++ rsdev->out_frames = res_frame; ++ rsdev->out_frames_cnt = 1; ++ ++ return 0; ++} ++ ++static int rpmb_op_result_read(struct rpmb_sim_dev *rsdev, ++ struct rpmb_frame_jdec *frames, u32 cnt) ++{ ++ u16 req = be16_to_cpu(frames[0].req_resp); ++ u16 blks = be16_to_cpu(frames[0].block_count); ++ ++ if (req != RPMB_RESULT_READ) ++ return -EINVAL; ++ ++ if (blks != 0) { ++ dev_err(rsdev->dev, "wrong number of frames %u != 0\n", blks); ++ return -EINVAL; ++ } ++ ++ rsdev->out_frames = rsdev->res_frames; ++ rsdev->out_frames_cnt = 1; ++ return 0; ++} ++ ++static int rpmb_sim_write(struct rpmb_sim_dev *rsdev, ++ struct rpmb_frame_jdec *frames, u32 cnt) ++{ ++ u16 req; ++ int ret; ++ ++ if (!frames) ++ return -EINVAL; ++ ++ if (cnt == 0) ++ cnt = 1; ++ ++ req = be16_to_cpu(frames[0].req_resp); ++ if (!rsdev->auth_key_set && req != RPMB_PROGRAM_KEY) ++ return rpmb_op_not_programmed(rsdev, req); ++ ++ switch (req) { ++ case RPMB_PROGRAM_KEY: ++ dev_dbg(rsdev->dev, "rpmb: program key\n"); ++ ret = rpmb_op_program_key(rsdev, frames, cnt); ++ break; ++ case RPMB_WRITE_DATA: ++ dev_dbg(rsdev->dev, "rpmb: write data\n"); ++ ret = rpmb_op_write_data(rsdev, frames, cnt); ++ break; ++ case RPMB_GET_WRITE_COUNTER: ++ dev_dbg(rsdev->dev, "rpmb: get write counter\n"); ++ ret = rpmb_op_get_wr_counter(rsdev, frames, cnt); ++ break; ++ case RPMB_READ_DATA: ++ dev_dbg(rsdev->dev, "rpmb: read data\n"); ++ ret = rpmb_op_read_data(rsdev, frames, cnt); ++ break; ++ case RPMB_RESULT_READ: ++ dev_dbg(rsdev->dev, "rpmb: result read\n"); ++ ret = rpmb_op_result_read(rsdev, frames, cnt); ++ break; ++ default: ++ dev_err(rsdev->dev, "unsupported command %u\n", req); ++ ret = -EINVAL; ++ break; ++ } ++ ++ dev_dbg(rsdev->dev, "rpmb: ret=%d\n", ret); ++ ++ return ret; ++} ++ ++static int rpmb_sim_read(struct rpmb_sim_dev *rsdev, ++ struct rpmb_frame_jdec *frames, u32 cnt) ++{ ++ int i; ++ ++ if (!frames) ++ return -EINVAL; ++ ++ if (cnt == 0) ++ cnt = 1; ++ ++ if (!rsdev->out_frames || rsdev->out_frames_cnt == 0) { ++ dev_err(rsdev->dev, "out_frames are not set\n"); ++ return -EINVAL; ++ } ++ ++ if (rsdev->out_frames->req_resp == cpu_to_be16(RPMB_READ_DATA)) ++ rpmb_do_read_data(rsdev, rsdev->out_frames, cnt); ++ ++ for (i = 0; i < min_t(u32, rsdev->out_frames_cnt, cnt); i++) ++ memcpy(&frames[i], &rsdev->out_frames[i], sizeof(frames[i])); ++ ++ if (rsdev->out_frames != rsdev->res_frames) ++ kfree(rsdev->out_frames); ++ ++ rsdev->out_frames = NULL; ++ rsdev->out_frames_cnt = 0; ++ dev_dbg(rsdev->dev, "rpmb: cnt=%d\n", cnt); ++ ++ return 0; ++} ++ ++static int rpmb_sim_cmd_seq(struct device *dev, u8 target, ++ struct rpmb_cmd *cmds, u32 ncmds) ++{ ++ struct rpmb_sim_dev *rsdev; ++ int i; ++ int ret; ++ struct rpmb_cmd *cmd; ++ ++ if (!dev) ++ return -EINVAL; ++ ++ rsdev = dev_get_drvdata(dev); ++ ++ if (!rsdev) ++ return -EINVAL; ++ ++ for (ret = 0, i = 0; i < ncmds && !ret; i++) { ++ cmd = &cmds[i]; ++ if (cmd->flags & RPMB_F_WRITE) ++ ret = rpmb_sim_write(rsdev, cmd->frames, cmd->nframes); ++ else ++ ret = rpmb_sim_read(rsdev, cmd->frames, cmd->nframes); ++ } ++ return ret; ++} ++ ++static int rpmb_sim_get_capacity(struct device *dev, u8 target) ++{ ++ return daunits; ++} ++ ++static struct rpmb_ops rpmb_sim_ops = { ++ .cmd_seq = rpmb_sim_cmd_seq, ++ .get_capacity = rpmb_sim_get_capacity, ++ .type = RPMB_TYPE_EMMC | RPMB_TYPE_SIM, ++}; ++ ++static int rpmb_sim_hmac_256_alloc(struct rpmb_sim_dev *rsdev) ++{ ++ struct shash_desc *desc; ++ struct crypto_shash *tfm; ++ ++ tfm = crypto_alloc_shash("hmac(sha256)", 0, 0); ++ if (IS_ERR(tfm)) ++ return PTR_ERR(tfm); ++ ++ desc = kzalloc(sizeof(*desc) + crypto_shash_descsize(tfm), GFP_KERNEL); ++ if (!desc) { ++ crypto_free_shash(tfm); ++ return -ENOMEM; ++ } ++ ++ desc->tfm = tfm; ++ rsdev->hash_desc = desc; ++ ++ dev_dbg(rsdev->dev, "hamac(sha256) registered\n"); ++ return 0; ++} ++ ++static void rpmb_sim_hmac_256_free(struct rpmb_sim_dev *rsdev) ++{ ++ struct shash_desc *desc = rsdev->hash_desc; ++ ++ if (desc->tfm) ++ crypto_free_shash(desc->tfm); ++ kfree(desc); ++ ++ rsdev->hash_desc = NULL; ++} ++ ++static int rpmb_sim_probe(struct device *dev) ++{ ++ struct rpmb_sim_dev *rsdev; ++ int ret; ++ ++ rsdev = kzalloc(sizeof(*rsdev), GFP_KERNEL); ++ if (!rsdev) ++ return -ENOMEM; ++ ++ rsdev->dev = dev; ++ ++ ret = rpmb_sim_hmac_256_alloc(rsdev); ++ if (ret) ++ goto err; ++ ++ rsdev->capacity = CAPACITY_UNIT * daunits; ++ rsdev->blkcnt = rsdev->capacity / BLK_UNIT; ++ rsdev->da = kzalloc(rsdev->capacity, GFP_KERNEL); ++ if (!rsdev->da) { ++ ret = -ENOMEM; ++ goto err; ++ } ++ ++ rpmb_sim_ops.dev_id_len = strlen(id); ++ rpmb_sim_ops.dev_id = id; ++ rpmb_sim_ops.wr_cnt_max = max_wr_blks; ++ rpmb_sim_ops.rd_cnt_max = max_wr_blks; ++ rpmb_sim_ops.block_size = 1; ++ ++ rsdev->rdev = rpmb_dev_register(rsdev->dev, 0, &rpmb_sim_ops); ++ if (IS_ERR(rsdev->rdev)) { ++ ret = PTR_ERR(rsdev->rdev); ++ goto err; ++ } ++ ++ dev_info(dev, "registered RPMB capacity = %zu of %zu blocks\n", ++ rsdev->capacity, rsdev->blkcnt); ++ ++ dev_set_drvdata(dev, rsdev); ++ ++ return 0; ++err: ++ rpmb_sim_hmac_256_free(rsdev); ++ if (rsdev) ++ kfree(rsdev->da); ++ kfree(rsdev); ++ return ret; ++} ++ ++static int rpmb_sim_remove(struct device *dev) ++{ ++ struct rpmb_sim_dev *rsdev; ++ ++ rsdev = dev_get_drvdata(dev); ++ ++ rpmb_dev_unregister(rsdev->rdev); ++ ++ dev_set_drvdata(dev, NULL); ++ ++ rpmb_sim_hmac_256_free(rsdev); ++ ++ kfree(rsdev->da); ++ kfree(rsdev); ++ return 0; ++} ++ ++static void rpmb_sim_shutdown(struct device *dev) ++{ ++ rpmb_sim_remove(dev); ++} ++ ++static int rpmb_sim_match(struct device *dev, struct device_driver *drv) ++{ ++ return 1; ++} ++ ++static struct bus_type rpmb_sim_bus = { ++ .name = "rpmb_sim", ++ .match = rpmb_sim_match, ++}; ++ ++static struct device_driver rpmb_sim_drv = { ++ .name = "rpmb_sim", ++ .probe = rpmb_sim_probe, ++ .remove = rpmb_sim_remove, ++ .shutdown = rpmb_sim_shutdown, ++}; ++ ++static void rpmb_sim_dev_release(struct device *dev) ++{ ++} ++ ++static struct device rpmb_sim_dev; ++ ++static int __init rpmb_sim_init(void) ++{ ++ int ret; ++ struct device *dev = &rpmb_sim_dev; ++ struct device_driver *drv = &rpmb_sim_drv; ++ ++ ret = bus_register(&rpmb_sim_bus); ++ if (ret) ++ return ret; ++ ++ dev->bus = &rpmb_sim_bus; ++ dev->release = rpmb_sim_dev_release; ++ dev_set_name(dev, "%s", "rpmb_sim"); ++ ret = device_register(dev); ++ if (ret) { ++ pr_err("device register failed %d\n", ret); ++ goto err_device; ++ } ++ ++ drv->bus = &rpmb_sim_bus; ++ ret = driver_register(drv); ++ if (ret) { ++ pr_err("driver register failed %d\n", ret); ++ goto err_driver; ++ } ++ ++ return 0; ++ ++err_driver: ++ device_unregister(dev); ++err_device: ++ bus_unregister(&rpmb_sim_bus); ++ return ret; ++} ++ ++static void __exit rpmb_sim_exit(void) ++{ ++ struct device *dev = &rpmb_sim_dev; ++ struct device_driver *drv = &rpmb_sim_drv; ++ ++ device_unregister(dev); ++ driver_unregister(drv); ++ bus_unregister(&rpmb_sim_bus); ++} ++ ++module_init(rpmb_sim_init); ++module_exit(rpmb_sim_exit); ++ ++MODULE_AUTHOR("Tomas Winkler +Date: Fri, 23 Aug 2019 01:20:33 -0700 +Subject: [PATCH 010/690] drm/i915/tgl: Move GTCR register to cope with GAM + MMIO address remap + +GAM registers located in the 0x4xxx range have been relocated to 0xCxxx; +this is to make space for global MOCS registers. + +v2: Rename register and bitfield to its new name (suggested by Mika) + +HSD: 399379 +Cc: Daniele Ceraolo Spurio +Signed-off-by: Michel Thierry +Reviewed-by: Lucas De Marchi +Reviewed-by: Mika Kuoppala +Signed-off-by: Lucas De Marchi +Link: https://patchwork.freedesktop.org/patch/msgid/20190823082055.5992-2-lucas.demarchi@intel.com +--- + drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h | 3 +++ + drivers/gpu/drm/i915/i915_gem_gtt.c | 8 +++++++- + 2 files changed, 10 insertions(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h +index edf194d23c6b..1949346e714e 100644 +--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h ++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h +@@ -83,6 +83,9 @@ + #define GEN8_GTCR _MMIO(0x4274) + #define GEN8_GTCR_INVALIDATE (1<<0) + ++#define GEN12_GUC_TLB_INV_CR _MMIO(0xcee8) ++#define GEN12_GUC_TLB_INV_CR_INVALIDATE (1 << 0) ++ + #define GUC_ARAT_C6DIS _MMIO(0xA178) + + #define GUC_SHIM_CONTROL _MMIO(0xc064) +diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c +index b1a7a8b9b46a..135f5494463a 100644 +--- a/drivers/gpu/drm/i915/i915_gem_gtt.c ++++ b/drivers/gpu/drm/i915/i915_gem_gtt.c +@@ -132,9 +132,15 @@ static void gen6_ggtt_invalidate(struct i915_ggtt *ggtt) + static void guc_ggtt_invalidate(struct i915_ggtt *ggtt) + { + struct intel_uncore *uncore = ggtt->vm.gt->uncore; ++ struct drm_i915_private *i915 = ggtt->vm.i915; + + gen6_ggtt_invalidate(ggtt); +- intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE); ++ ++ if (INTEL_GEN(i915) >= 12) ++ intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR, ++ GEN12_GUC_TLB_INV_CR_INVALIDATE); ++ else ++ intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE); + } + + static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt) +-- +2.17.1 + diff --git a/patches/0010-net-stmmac-add-EHL-PSE0-PSE1-2.5Gbps-PCI-info.connectivity b/patches/0010-net-stmmac-add-EHL-PSE0-PSE1-2.5Gbps-PCI-info.connectivity new file mode 100644 index 0000000000..2e7d28a86b --- /dev/null +++ b/patches/0010-net-stmmac-add-EHL-PSE0-PSE1-2.5Gbps-PCI-info.connectivity @@ -0,0 +1,47 @@ +From 106bd51092e3b9755231a9371e97f833055134ee Mon Sep 17 00:00:00 2001 +From: Voon Weifeng +Date: Sat, 27 Jul 2019 07:49:49 +0800 +Subject: [PATCH 010/108] net: stmmac: add EHL PSE0 & PSE1 2.5Gbps PCI info and + PCI ID + +Add EHL PSE0/1 SGMII 2.5Gbps PCI info and PCI ID + +Signed-off-by: Voon Weifeng +Signed-off-by: Ong Boon Leong +--- + drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +index 1fcd75aba1be..17ddf71f1fbe 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +@@ -650,8 +650,10 @@ static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume); + #define STMMAC_EHL_SGMII1G_ID 0x4b31 + #define STMMAC_EHL_PSE0_RGMII1G_ID 0x4ba0 + #define STMMAC_EHL_PSE0_SGMII1G_ID 0x4ba1 ++#define STMMAC_EHL_PSE0_SGMII2G5_ID 0x4ba2 + #define STMMAC_EHL_PSE1_RGMII1G_ID 0x4bb0 + #define STMMAC_EHL_PSE1_SGMII1G_ID 0x4bb1 ++#define STMMAC_EHL_PSE1_SGMII2G5_ID 0x4bb2 + #define STMMAC_TGL_SGMII1G_ID 0xa0ac + #define STMMAC_GMAC5_ID 0x7102 + +@@ -670,10 +672,14 @@ static const struct pci_device_id stmmac_id_table[] = { + ehl_pse0_rgmii1g_pci_info), + STMMAC_DEVICE(INTEL, STMMAC_EHL_PSE0_SGMII1G_ID, + ehl_pse0_sgmii1g_pci_info), ++ STMMAC_DEVICE(INTEL, STMMAC_EHL_PSE0_SGMII2G5_ID, ++ ehl_pse0_sgmii1g_pci_info), + STMMAC_DEVICE(INTEL, STMMAC_EHL_PSE1_RGMII1G_ID, + ehl_pse1_rgmii1g_pci_info), + STMMAC_DEVICE(INTEL, STMMAC_EHL_PSE1_SGMII1G_ID, + ehl_pse1_sgmii1g_pci_info), ++ STMMAC_DEVICE(INTEL, STMMAC_EHL_PSE1_SGMII2G5_ID, ++ ehl_pse1_sgmii1g_pci_info), + STMMAC_DEVICE(INTEL, STMMAC_TGL_SGMII1G_ID, tgl_sgmii1g_pci_info), + STMMAC_DEVICE(SYNOPSYS, STMMAC_GMAC5_ID, snps_gmac5_pci_info), + {} +-- +2.17.1 + diff --git a/patches/0010-platform-x86-Add-Atom-based-Elkhart-Lake-EHL-plat.core-ehl b/patches/0010-platform-x86-Add-Atom-based-Elkhart-Lake-EHL-plat.core-ehl new file mode 100644 index 0000000000..a856aa80a8 --- /dev/null +++ b/patches/0010-platform-x86-Add-Atom-based-Elkhart-Lake-EHL-plat.core-ehl @@ -0,0 +1,74 @@ +From e9b21eb96cfc363fee9a0dd0f22ed074084c2499 Mon Sep 17 00:00:00 2001 +From: Gayatri Kammela +Date: Tue, 3 Sep 2019 17:21:10 -0700 +Subject: [PATCH 10/12] platform/x86: Add Atom based Elkhart Lake(EHL) platform + support to intel_pmc_core driver + +Add Elkhart Lake to the list of the platforms that intel_pmc_core +driver supports for pmc_core device. + +Just like ICL and TGL, EHL can also reuse all the CNL PCH IPs. Also, it +uses the same PCH IPs of TGL, no additional effort is needed to enable +but to simply reuse them. + +Cc: Peter Zijlstra +Cc: Srinivas Pandruvada +Cc: Andy Shevchenko +Cc: Kan Liang +Cc: David E. Box +Cc: Rajneesh Bhardwaj +Cc: Tony Luck +Reviewed-by: Tony Luck +Signed-off-by: Gayatri Kammela +--- + drivers/platform/x86/intel_pmc_core.c | 13 ++++++++++--- + 1 file changed, 10 insertions(+), 3 deletions(-) + +diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c +index aef8f6d8bddb..2047b54fad54 100644 +--- a/drivers/platform/x86/intel_pmc_core.c ++++ b/drivers/platform/x86/intel_pmc_core.c +@@ -190,7 +190,10 @@ static const struct pmc_bit_map cnp_pfear_map[] = { + {"SDX", BIT(4)}, + {"SPE", BIT(5)}, + {"Fuse", BIT(6)}, +- /* Reserved for Cannonlake but valid for Icelake and Tigerlake */ ++ /* ++ * Reserved for Cannonlake but valid for Icelake, ++ * Tigerlake and Elkhart lake. ++ */ + {"SBR8", BIT(7)}, + + {"CSME_FSC", BIT(0)}, +@@ -234,7 +237,10 @@ static const struct pmc_bit_map cnp_pfear_map[] = { + {"HDA_PGD4", BIT(2)}, + {"HDA_PGD5", BIT(3)}, + {"HDA_PGD6", BIT(4)}, +- /* Reserved for Cannonlake but valid for Icelake and Tigerlake */ ++ /* ++ * Reserved for Cannonlake but valid for Icelake, ++ * Tigerlake and Elkhart lake. ++ */ + {"PSF6", BIT(5)}, + {"PSF7", BIT(6)}, + {"PSF8", BIT(7)}, +@@ -266,7 +272,7 @@ static const struct pmc_bit_map *ext_icl_pfear_map[] = { + }; + + static const struct pmc_bit_map tgl_pfear_map[] = { +- /* Tigerlake generation onwards only */ ++ /* Tigerlake and Elkhart lake generation onwards only */ + {"PSF9", BIT(0)}, + {"RES_66", BIT(1)}, + {"RES_67", BIT(2)}, +@@ -872,6 +878,7 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = { + INTEL_CPU_FAM6(ICELAKE_NNPI, icl_reg_map), + INTEL_CPU_FAM6(TIGERLAKE_L, tgl_reg_map), + INTEL_CPU_FAM6(TIGERLAKE, tgl_reg_map), ++ INTEL_CPU_FAM6(ATOM_TREMONT, tgl_reg_map), + {} + }; + +-- +2.17.1 + diff --git a/patches/0010-sos-cleanup-hypercall-API.acrn b/patches/0010-sos-cleanup-hypercall-API.acrn new file mode 100644 index 0000000000..94af349d16 --- /dev/null +++ b/patches/0010-sos-cleanup-hypercall-API.acrn @@ -0,0 +1,755 @@ +From 79222262995f06a8edfe50ffed630df91beece40 Mon Sep 17 00:00:00 2001 +From: Mingqiang Chi +Date: Fri, 31 Aug 2018 10:58:56 +0800 +Subject: [PATCH 010/150] sos: cleanup hypercall API + +Put all hypercall APIs into vhm_hypercall.c +other modules need to call hypercall API from +this file. + +Rmove the unused IC_VM_PCI_MSIX_REMAP for PCI MSIX device. + +Change-Id: Id896a8300cf54279151a9d5674ed27a352df3f3f +Tracked-On:218445 +Signed-off-by: Mingqiang Chi +Reviewed-on: +Reviewed-by: Dong, Eddie +Tested-by: Dong, Eddie +--- + drivers/char/vhm/vhm_dev.c | 158 ++++++++++++--- + drivers/vhm/vhm_hypercall.c | 320 ++++-------------------------- + include/linux/vhm/vhm_hypercall.h | 63 +++--- + 3 files changed, 204 insertions(+), 337 deletions(-) + +diff --git a/drivers/char/vhm/vhm_dev.c b/drivers/char/vhm/vhm_dev.c +index e551392710ef..e12445e68c44 100644 +--- a/drivers/char/vhm/vhm_dev.c ++++ b/drivers/char/vhm/vhm_dev.c +@@ -153,25 +153,66 @@ static long vhm_dev_ioctl(struct file *filep, + } + + switch (ioctl_num) { +- case IC_CREATE_VM: +- ret = vhm_create_vm(vm, ioctl_param); +- break; ++ case IC_CREATE_VM: { ++ struct acrn_create_vm created_vm; ++ ++ if (copy_from_user(&created_vm, (void *)ioctl_param, ++ sizeof(struct acrn_create_vm))) ++ return -EFAULT; ++ ++ ret = hcall_create_vm(virt_to_phys(&created_vm)); ++ if ((ret < 0) || ++ (created_vm.vmid == ACRN_INVALID_VMID)) { ++ pr_err("vhm: failed to create VM from Hypervisor !\n"); ++ return -EFAULT; ++ } ++ ++ if (copy_to_user((void *)ioctl_param, &created_vm, ++ sizeof(struct acrn_create_vm))) ++ return -EFAULT; + +- case IC_RESUME_VM: +- ret = vhm_resume_vm(vm); ++ vm->vmid = created_vm.vmid; ++ ++ pr_info("vhm: VM %ld created\n", created_vm.vmid); + break; ++ } + +- case IC_PAUSE_VM: +- ret = vhm_pause_vm(vm); ++ case IC_RESUME_VM: { ++ ret = hcall_resume_vm(vm->vmid); ++ if (ret < 0) { ++ pr_err("vhm: failed to start VM %ld!\n", vm->vmid); ++ return -EFAULT; ++ } + break; ++ } + +- case IC_DESTROY_VM: +- ret = vhm_destroy_vm(vm); ++ case IC_PAUSE_VM: { ++ ret = hcall_pause_vm(vm->vmid); ++ if (ret < 0) { ++ pr_err("vhm: failed to pause VM %ld!\n", vm->vmid); ++ return -EFAULT; ++ } + break; ++ } + +- case IC_QUERY_VMSTATE: +- ret = vhm_query_vm_state(vm); ++ case IC_DESTROY_VM: { ++ ret = hcall_destroy_vm(vm->vmid); ++ if (ret < 0) { ++ pr_err("failed to destroy VM %ld\n", vm->vmid); ++ return -EFAULT; ++ } ++ vm->vmid = ACRN_INVALID_VMID; + break; ++ } ++ ++ case IC_QUERY_VMSTATE: { ++ ret = hcall_query_vm_state(vm->vmid); ++ if (ret < 0) { ++ pr_err("vhm: failed to query VM State%ld!\n", vm->vmid); ++ return -EFAULT; ++ } ++ return ret; ++ } + + case IC_ALLOC_MEMSEG: { + struct vm_memseg memseg; +@@ -239,17 +280,43 @@ static long vhm_dev_ioctl(struct file *filep, + } + + case IC_ASSERT_IRQLINE: { +- ret = vhm_assert_irqline(vm, ioctl_param); ++ struct acrn_irqline irq; ++ ++ if (copy_from_user(&irq, (void *)ioctl_param, sizeof(irq))) ++ return -EFAULT; ++ ++ ret = hcall_assert_irqline(vm->vmid, virt_to_phys(&irq)); ++ if (ret < 0) { ++ pr_err("vhm: failed to assert irq!\n"); ++ return -EFAULT; ++ } + break; + } +- + case IC_DEASSERT_IRQLINE: { +- ret = vhm_deassert_irqline(vm, ioctl_param); ++ struct acrn_irqline irq; ++ ++ if (copy_from_user(&irq, (void *)ioctl_param, sizeof(irq))) ++ return -EFAULT; ++ ++ ret = hcall_deassert_irqline(vm->vmid, virt_to_phys(&irq)); ++ if (ret < 0) { ++ pr_err("vhm: failed to deassert irq!\n"); ++ return -EFAULT; ++ } + break; + } +- + case IC_PULSE_IRQLINE: { +- ret = vhm_pulse_irqline(vm, ioctl_param); ++ struct acrn_irqline irq; ++ ++ if (copy_from_user(&irq, (void *)ioctl_param, sizeof(irq))) ++ return -EFAULT; ++ ++ ret = hcall_pulse_irqline(vm->vmid, ++ virt_to_phys(&irq)); ++ if (ret < 0) { ++ pr_err("vhm: failed to assert irq!\n"); ++ return -EFAULT; ++ } + break; + } + +@@ -268,27 +335,72 @@ static long vhm_dev_ioctl(struct file *filep, + } + + case IC_ASSIGN_PTDEV: { +- ret = vhm_assign_ptdev(vm, ioctl_param); ++ uint16_t bdf; ++ ++ if (copy_from_user(&bdf, ++ (void *)ioctl_param, sizeof(uint16_t))) ++ return -EFAULT; ++ ++ ret = hcall_assign_ptdev(vm->vmid, virt_to_phys(&bdf)); ++ if (ret < 0) { ++ pr_err("vhm: failed to assign ptdev!\n"); ++ return -EFAULT; ++ } + break; + } +- + case IC_DEASSIGN_PTDEV: { +- ret = vhm_deassign_ptdev(vm, ioctl_param); ++ uint16_t bdf; ++ ++ if (copy_from_user(&bdf, ++ (void *)ioctl_param, sizeof(uint16_t))) ++ return -EFAULT; ++ ++ ret = hcall_deassign_ptdev(vm->vmid, virt_to_phys(&bdf)); ++ if (ret < 0) { ++ pr_err("vhm: failed to deassign ptdev!\n"); ++ return -EFAULT; ++ } + break; + } + + case IC_SET_PTDEV_INTR_INFO: { +- ret = vhm_set_ptdev_intr_info(vm, ioctl_param); ++ struct acrn_ptdev_irq pt_irq; ++ int i; ++ ++ if (copy_from_user(&pt_irq, ++ (void *)ioctl_param, sizeof(pt_irq))) ++ return -EFAULT; ++ ++ ret = hcall_set_ptdev_intr_info(vm->vmid, ++ virt_to_phys(&pt_irq)); ++ if (ret < 0) { ++ pr_err("vhm: failed to set intr info for ptdev!\n"); ++ return -EFAULT; ++ } ++ + break; + } +- + case IC_RESET_PTDEV_INTR_INFO: { +- ret = vhm_reset_ptdev_intr_info(vm, ioctl_param); ++ struct acrn_ptdev_irq pt_irq; ++ int i; ++ ++ if (copy_from_user(&pt_irq, ++ (void *)ioctl_param, sizeof(pt_irq))) ++ return -EFAULT; ++ ++ ret = hcall_reset_ptdev_intr_info(vm->vmid, ++ virt_to_phys(&pt_irq)); ++ if (ret < 0) { ++ pr_err("vhm: failed to reset intr info for ptdev!\n"); ++ return -EFAULT; ++ } ++ + break; + } + + case IC_VM_PCI_MSIX_REMAP: { +- ret = vhm_remap_pci_msix(vm, ioctl_param); ++ /* This is not used any more */ ++ ret = -EFAULT; + break; + } + +diff --git a/drivers/vhm/vhm_hypercall.c b/drivers/vhm/vhm_hypercall.c +index 0f3f6c1c5f4c..11ca6b86baed 100644 +--- a/drivers/vhm/vhm_hypercall.c ++++ b/drivers/vhm/vhm_hypercall.c +@@ -48,341 +48,97 @@ + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +-#include +-#include +-#include ++#include + #include + #include + +-/* max num of pass-through devices using msix */ +-#define MAX_ENTRY 3 +- +-struct table_iomems { +- /* device's virtual BDF */ +- unsigned short virt_bdf; +- /* virtual base address of MSI-X table in memory space after ioremap */ +- unsigned long mmap_addr; +-} tables[MAX_ENTRY]; +- +-inline long hcall_inject_msi(unsigned long vmid, unsigned long msi) ++inline long hcall_create_vm(unsigned long vminfo) + { +- return acrn_hypercall2(HC_INJECT_MSI, vmid, msi); ++ return acrn_hypercall2(HC_CREATE_VM, 0, vminfo); + } + +-inline long hcall_remap_pci_msix(unsigned long vmid, unsigned long msix) ++inline long hcall_resume_vm(unsigned long vmid) + { +- return acrn_hypercall2(HC_VM_PCI_MSIX_REMAP, vmid, msix); ++ return acrn_hypercall1(HC_RESUME_VM, vmid); + } + +-inline long hcall_set_ioreq_buffer(unsigned long vmid, unsigned long buffer) ++inline long hcall_pause_vm(unsigned long vmid) + { +- return acrn_hypercall2(HC_SET_IOREQ_BUFFER, vmid, buffer); ++ return acrn_hypercall1(HC_PAUSE_VM, vmid); + } + +-inline long hcall_notify_req_finish(unsigned long vmid, unsigned long vcpu_mask) ++inline long hcall_destroy_vm(unsigned long vmid) + { +- return acrn_hypercall2(HC_NOTIFY_REQUEST_FINISH, vmid, vcpu_mask); ++ return acrn_hypercall1(HC_DESTROY_VM, vmid); + } + +-inline long hcall_set_memmap(unsigned long vmid, unsigned long memmap) ++inline long hcall_query_vm_state(unsigned long vmid) + { +- return acrn_hypercall2(HC_VM_SET_MEMMAP, vmid, memmap); ++ return acrn_hypercall1(HC_QUERY_VMSTATE, vmid); + } + +-inline long hcall_vm_gpa2hpa(unsigned long vmid, unsigned long gpa2hpa) +-{ +- return acrn_hypercall2(HC_VM_GPA2HPA, vmid, gpa2hpa); +-} +- +-inline long vhm_create_vm(struct vhm_vm *vm, unsigned long ioctl_param) ++inline long hcall_set_memmap(unsigned long vmid, unsigned long memmap) + { +- long ret = 0; +- struct acrn_create_vm created_vm; +- +- if (copy_from_user(&created_vm, (void *)ioctl_param, +- sizeof(struct acrn_create_vm))) +- return -EFAULT; +- +- ret = acrn_hypercall2(HC_CREATE_VM, 0, +- virt_to_phys(&created_vm)); +- if ((ret < 0) || +- (created_vm.vmid == ACRN_INVALID_VMID)) { +- pr_err("vhm: failed to create VM from Hypervisor !\n"); +- return -EFAULT; +- } +- +- if (copy_to_user((void *)ioctl_param, &created_vm, +- sizeof(struct acrn_create_vm))) +- return -EFAULT; +- +- vm->vmid = created_vm.vmid; +- pr_info("vhm: VM %ld created\n", created_vm.vmid); +- +- return ret; ++ return acrn_hypercall2(HC_VM_SET_MEMMAP, vmid, memmap); + } + +-inline long vhm_resume_vm(struct vhm_vm *vm) ++inline long hcall_set_ioreq_buffer(unsigned long vmid, unsigned long buffer) + { +- long ret = 0; +- +- ret = acrn_hypercall1(HC_RESUME_VM, vm->vmid); +- if (ret < 0) { +- pr_err("vhm: failed to start VM %ld!\n", vm->vmid); +- return -EFAULT; +- } +- +- return ret; ++ return acrn_hypercall2(HC_SET_IOREQ_BUFFER, vmid, buffer); + } + +-inline long vhm_pause_vm(struct vhm_vm *vm) ++inline long hcall_notify_req_finish(unsigned long vmid, unsigned long vcpu_mask) + { +- long ret = 0; +- +- ret = acrn_hypercall1(HC_PAUSE_VM, vm->vmid); +- if (ret < 0) { +- pr_err("vhm: failed to pause VM %ld!\n", vm->vmid); +- return -EFAULT; +- } +- +- return ret; ++ return acrn_hypercall2(HC_NOTIFY_REQUEST_FINISH, vmid, vcpu_mask); + } + +-inline long vhm_destroy_vm(struct vhm_vm *vm) ++inline long hcall_assert_irqline(unsigned long vmid, unsigned long irq) + { +- long ret = 0; +- +- ret = acrn_hypercall1(HC_DESTROY_VM, vm->vmid); +- if (ret < 0) { +- pr_err("failed to destroy VM %ld\n", vm->vmid); +- return -EFAULT; +- } +- vm->vmid = ACRN_INVALID_VMID; +- +- return ret; ++ return acrn_hypercall2(HC_ASSERT_IRQLINE, vmid, irq); + } + +-inline long vhm_query_vm_state(struct vhm_vm *vm) ++inline long hcall_deassert_irqline(unsigned long vmid, unsigned long irq) + { +- long ret = 0; +- +- ret = acrn_hypercall1(HC_QUERY_VMSTATE, vm->vmid); +- if (ret < 0) { +- pr_err("vhm: failed to query VM State%ld!\n", vm->vmid); +- return -EFAULT; +- } +- +- return ret; ++ return acrn_hypercall2(HC_DEASSERT_IRQLINE, vmid, irq); + } + +-inline long vhm_assert_irqline(struct vhm_vm *vm, unsigned long ioctl_param) ++inline long hcall_pulse_irqline(unsigned long vmid, unsigned long irq) + { +- long ret = 0; +- struct acrn_irqline irq; +- +- if (copy_from_user(&irq, (void *)ioctl_param, sizeof(irq))) +- return -EFAULT; +- +- ret = acrn_hypercall2(HC_ASSERT_IRQLINE, vm->vmid, +- virt_to_phys(&irq)); +- if (ret < 0) { +- pr_err("vhm: failed to assert irq!\n"); +- return -EFAULT; +- } +- +- return ret; ++ return acrn_hypercall2(HC_PULSE_IRQLINE, vmid, irq); + } + +-inline long vhm_deassert_irqline(struct vhm_vm *vm, unsigned long ioctl_param) ++inline long hcall_inject_msi(unsigned long vmid, unsigned long msi) + { +- long ret = 0; +- struct acrn_irqline irq; +- +- if (copy_from_user(&irq, (void *)ioctl_param, sizeof(irq))) +- return -EFAULT; +- +- ret = acrn_hypercall2(HC_DEASSERT_IRQLINE, vm->vmid, +- virt_to_phys(&irq)); +- if (ret < 0) { +- pr_err("vhm: failed to deassert irq!\n"); +- return -EFAULT; +- } +- +- return ret; ++ return acrn_hypercall2(HC_INJECT_MSI, vmid, msi); + } + +-inline long vhm_pulse_irqline(struct vhm_vm *vm, unsigned long ioctl_param) ++inline long hcall_assign_ptdev(unsigned long vmid, unsigned long bdf) + { +- long ret = 0; +- struct acrn_irqline irq; +- +- if (copy_from_user(&irq, (void *)ioctl_param, sizeof(irq))) +- return -EFAULT; +- +- ret = acrn_hypercall2(HC_PULSE_IRQLINE, vm->vmid, +- virt_to_phys(&irq)); +- if (ret < 0) { +- pr_err("vhm: failed to assert irq!\n"); +- return -EFAULT; +- } +- +- return ret; ++ return acrn_hypercall2(HC_ASSIGN_PTDEV, vmid, bdf); + } + +-inline long vhm_assign_ptdev(struct vhm_vm *vm, unsigned long ioctl_param) ++inline long hcall_deassign_ptdev(unsigned long vmid, unsigned long bdf) + { +- long ret = 0; +- uint16_t bdf; +- +- if (copy_from_user(&bdf, +- (void *)ioctl_param, sizeof(uint16_t))) +- return -EFAULT; +- +- ret = acrn_hypercall2(HC_ASSIGN_PTDEV, vm->vmid, +- virt_to_phys(&bdf)); +- if (ret < 0) { +- pr_err("vhm: failed to assign ptdev!\n"); +- return -EFAULT; +- } +- +- return ret; ++ return acrn_hypercall2(HC_DEASSIGN_PTDEV, vmid, bdf); + } + +-inline long vhm_deassign_ptdev(struct vhm_vm *vm, unsigned long ioctl_param) ++inline long hcall_set_ptdev_intr_info(unsigned long vmid, unsigned long pt_irq) + { +- long ret = 0; +- uint16_t bdf; +- +- if (copy_from_user(&bdf, +- (void *)ioctl_param, sizeof(uint16_t))) +- return -EFAULT; +- +- ret = acrn_hypercall2(HC_DEASSIGN_PTDEV, vm->vmid, +- virt_to_phys(&bdf)); +- if (ret < 0) { +- pr_err("vhm: failed to deassign ptdev!\n"); +- return -EFAULT; +- } +- +- return ret; ++ return acrn_hypercall2(HC_SET_PTDEV_INTR_INFO, vmid, pt_irq); + } + +-inline long vhm_set_ptdev_intr_info(struct vhm_vm *vm, +- unsigned long ioctl_param) ++inline long hcall_reset_ptdev_intr_info(unsigned long vmid, ++ unsigned long pt_irq) + { +- long ret = 0; +- struct acrn_ptdev_irq pt_irq; +- int i; +- +- if (copy_from_user(&pt_irq, +- (void *)ioctl_param, sizeof(pt_irq))) +- return -EFAULT; +- +- ret = acrn_hypercall2(HC_SET_PTDEV_INTR_INFO, vm->vmid, +- virt_to_phys(&pt_irq)); +- if (ret < 0) { +- pr_err("vhm: failed to set intr info for ptdev!\n"); +- return -EFAULT; +- } +- +- if (pt_irq.msix.table_paddr) { +- for (i = 0; i < MAX_ENTRY; i++) { +- if (tables[i].virt_bdf) +- continue; +- +- tables[i].virt_bdf = pt_irq.virt_bdf; +- tables[i].mmap_addr = (unsigned long) +- ioremap_nocache(pt_irq.msix.table_paddr, +- pt_irq.msix.table_size); +- break; +- } +- } +- +- return ret; ++ return acrn_hypercall2(HC_RESET_PTDEV_INTR_INFO, vmid, pt_irq); + } + +-inline long vhm_reset_ptdev_intr_info(struct vhm_vm *vm, +- unsigned long ioctl_param) ++inline long hcall_remap_pci_msix(unsigned long vmid, unsigned long msi) + { +- long ret = 0; +- struct acrn_ptdev_irq pt_irq; +- int i; +- +- if (copy_from_user(&pt_irq, +- (void *)ioctl_param, sizeof(pt_irq))) +- return -EFAULT; +- +- ret = acrn_hypercall2(HC_RESET_PTDEV_INTR_INFO, vm->vmid, +- virt_to_phys(&pt_irq)); +- if (ret < 0) { +- pr_err("vhm: failed to reset intr info for ptdev!\n"); +- return -EFAULT; +- } +- +- if (pt_irq.msix.table_paddr) { +- for (i = 0; i < MAX_ENTRY; i++) { +- if (tables[i].virt_bdf) +- continue; +- +- tables[i].virt_bdf = pt_irq.virt_bdf; +- tables[i].mmap_addr = (unsigned long) +- ioremap_nocache(pt_irq.msix.table_paddr, +- pt_irq.msix.table_size); +- break; +- } +- } +- +- return ret; ++ return acrn_hypercall2(HC_VM_PCI_MSIX_REMAP, vmid, msi); + } + +-inline long vhm_remap_pci_msix(struct vhm_vm *vm, unsigned long ioctl_param) ++inline long hcall_vm_gpa2hpa(unsigned long vmid, unsigned long addr) + { +- long ret = 0; +- struct acrn_vm_pci_msix_remap msix_remap; +- +- if (copy_from_user(&msix_remap, +- (void *)ioctl_param, sizeof(msix_remap))) +- return -EFAULT; +- +- ret = acrn_hypercall2(HC_VM_PCI_MSIX_REMAP, vm->vmid, +- virt_to_phys(&msix_remap)); +- +- if (copy_to_user((void *)ioctl_param, +- &msix_remap, sizeof(msix_remap))) +- return -EFAULT; +- +- if (msix_remap.msix) { +- void __iomem *msix_entry; +- int i; +- +- for (i = 0; i < MAX_ENTRY; i++) { +- if (tables[i].virt_bdf == msix_remap.virt_bdf) +- break; +- } +- +- if (!tables[i].mmap_addr) +- return -EFAULT; +- +- msix_entry = (void *)(tables[i].mmap_addr + +- msix_remap.msix_entry_index * +- PCI_MSIX_ENTRY_SIZE); +- +- /* mask the entry when setup */ +- writel(PCI_MSIX_ENTRY_CTRL_MASKBIT, +- msix_entry + PCI_MSIX_ENTRY_VECTOR_CTRL); +- +- /* setup the msi entry */ +- writel((uint32_t)msix_remap.msi_addr, +- msix_entry + PCI_MSIX_ENTRY_LOWER_ADDR); +- writel((uint32_t)(msix_remap.msi_addr >> 32), +- msix_entry + PCI_MSIX_ENTRY_UPPER_ADDR); +- writel(msix_remap.msi_data, +- msix_entry + PCI_MSIX_ENTRY_DATA); +- +- /* unmask the entry */ +- writel(msix_remap.vector_ctl & +- PCI_MSIX_ENTRY_CTRL_MASKBIT, +- msix_entry + PCI_MSIX_ENTRY_VECTOR_CTRL); +- } +- +- return ret; ++ return acrn_hypercall2(HC_VM_GPA2HPA, vmid, addr); + } +diff --git a/include/linux/vhm/vhm_hypercall.h b/include/linux/vhm/vhm_hypercall.h +index ce579e3734ff..35bb48ae6cd3 100644 +--- a/include/linux/vhm/vhm_hypercall.h ++++ b/include/linux/vhm/vhm_hypercall.h +@@ -52,14 +52,12 @@ + #ifndef VHM_HYPERCALL_H + #define VHM_HYPERCALL_H + +-#include +- +-static inline long acrn_hypercall0(unsigned long hyp_id) ++static inline long acrn_hypercall0(unsigned long hcall_id) + { + + /* x86-64 System V ABI register usage */ + register signed long result asm("rax"); +- register unsigned long r8 asm("r8") = hyp_id; ++ register unsigned long r8 asm("r8") = hcall_id; + + /* Execute vmcall */ + asm volatile(".byte 0x0F,0x01,0xC1\n" +@@ -70,12 +68,12 @@ static inline long acrn_hypercall0(unsigned long hyp_id) + return result; + } + +-static inline long acrn_hypercall1(unsigned long hyp_id, unsigned long param1) ++static inline long acrn_hypercall1(unsigned long hcall_id, unsigned long param1) + { + + /* x86-64 System V ABI register usage */ + register signed long result asm("rax"); +- register unsigned long r8 asm("r8") = hyp_id; ++ register unsigned long r8 asm("r8") = hcall_id; + + /* Execute vmcall */ + asm volatile(".byte 0x0F,0x01,0xC1\n" +@@ -86,13 +84,13 @@ static inline long acrn_hypercall1(unsigned long hyp_id, unsigned long param1) + return result; + } + +-static inline long acrn_hypercall2(unsigned long hyp_id, unsigned long param1, ++static inline long acrn_hypercall2(unsigned long hcall_id, unsigned long param1, + unsigned long param2) + { + + /* x86-64 System V ABI register usage */ + register signed long result asm("rax"); +- register unsigned long r8 asm("r8") = hyp_id; ++ register unsigned long r8 asm("r8") = hcall_id; + + /* Execute vmcall */ + asm volatile(".byte 0x0F,0x01,0xC1\n" +@@ -103,13 +101,13 @@ static inline long acrn_hypercall2(unsigned long hyp_id, unsigned long param1, + return result; + } + +-static inline long acrn_hypercall3(unsigned long hyp_id, unsigned long param1, ++static inline long acrn_hypercall3(unsigned long hcall_id, unsigned long param1, + unsigned long param2, unsigned long param3) + { + + /* x86-64 System V ABI register usage */ + register signed long result asm("rax"); +- register unsigned long r8 asm("r8") = hyp_id; ++ register unsigned long r8 asm("r8") = hcall_id; + + /* Execute vmcall */ + asm volatile(".byte 0x0F,0x01,0xC1\n" +@@ -120,14 +118,14 @@ static inline long acrn_hypercall3(unsigned long hyp_id, unsigned long param1, + return result; + } + +-static inline long acrn_hypercall4(unsigned long hyp_id, unsigned long param1, ++static inline long acrn_hypercall4(unsigned long hcall_id, unsigned long param1, + unsigned long param2, unsigned long param3, + unsigned long param4) + { + + /* x86-64 System V ABI register usage */ + register signed long result asm("rax"); +- register unsigned long r8 asm("r8") = hyp_id; ++ register unsigned long r8 asm("r8") = hcall_id; + + /* Execute vmcall */ + asm volatile(".byte 0x0F,0x01,0xC1\n" +@@ -139,27 +137,28 @@ static inline long acrn_hypercall4(unsigned long hyp_id, unsigned long param1, + return result; + } + +-inline long hcall_inject_msi(unsigned long vmid, unsigned long msi); +-inline long hcall_remap_pci_msix(unsigned long vmid, unsigned long msix); +-inline long hcall_set_ioreq_buffer(unsigned long vmid, unsigned long buffer); ++inline long hcall_create_vm(unsigned long vminfo); ++inline long hcall_resume_vm(unsigned long vmid); ++inline long hcall_pause_vm(unsigned long vmid); ++inline long hcall_destroy_vm(unsigned long vmid); ++inline long hcall_query_vm_state(unsigned long vmid); ++inline long hcall_set_memmap(unsigned long vmid, ++ unsigned long memmap); ++inline long hcall_set_ioreq_buffer(unsigned long vmid, ++ unsigned long buffer); + inline long hcall_notify_req_finish(unsigned long vmid, + unsigned long vcpu_mask); +-inline long hcall_set_memmap(unsigned long vmid, unsigned long memmap); +-inline long hcall_vm_gpa2hpa(unsigned long vmid, unsigned long gpa2hpa); +-inline long vhm_create_vm(struct vhm_vm *vm, unsigned long ioctl_param); +-inline long vhm_resume_vm(struct vhm_vm *vm); +-inline long vhm_pause_vm(struct vhm_vm *vm); +-inline long vhm_destroy_vm(struct vhm_vm *vm); +-inline long vhm_query_vm_state(struct vhm_vm *vm); +-inline long vhm_assert_irqline(struct vhm_vm *vm, unsigned long ioctl_param); +-inline long vhm_deassert_irqline(struct vhm_vm *vm, unsigned long ioctl_param); +-inline long vhm_pulse_irqline(struct vhm_vm *vm, unsigned long ioctl_param); +-inline long vhm_assign_ptdev(struct vhm_vm *vm, unsigned long ioctl_param); +-inline long vhm_deassign_ptdev(struct vhm_vm *vm, unsigned long ioctl_param); +-inline long vhm_set_ptdev_intr_info(struct vhm_vm *vm, +- unsigned long ioctl_param); +-inline long vhm_reset_ptdev_intr_info(struct vhm_vm *vm, +- unsigned long ioctl_param); +-inline long vhm_remap_pci_msix(struct vhm_vm *vm, unsigned long ioctl_param); ++inline long hcall_assert_irqline(unsigned long vmid, unsigned long irq); ++inline long hcall_deassert_irqline(unsigned long vmid, unsigned long irq); ++inline long hcall_pulse_irqline(unsigned long vmid, unsigned long irq); ++inline long hcall_inject_msi(unsigned long vmid, unsigned long msi); ++inline long hcall_assign_ptdev(unsigned long vmid, unsigned long bdf); ++inline long hcall_deassign_ptdev(unsigned long vmid, unsigned long bdf); ++inline long hcall_set_ptdev_intr_info(unsigned long vmid, ++ unsigned long pt_irq); ++inline long hcall_reset_ptdev_intr_info(unsigned long vmid, ++ unsigned long pt_irq); ++inline long hcall_remap_pci_msix(unsigned long vmid, unsigned long msi); ++inline long hcall_vm_gpa2hpa(unsigned long vmid, unsigned long addr); + + #endif /* VHM_HYPERCALL_H */ +-- +2.17.1 + diff --git a/patches/0010-trusty-add-trusty-virtio-driver.trusty b/patches/0010-trusty-add-trusty-virtio-driver.trusty new file mode 100644 index 0000000000..61aeeb204a --- /dev/null +++ b/patches/0010-trusty-add-trusty-virtio-driver.trusty @@ -0,0 +1,769 @@ +From 7220d7f6c05eb5e03c21031ad39772038815f261 Mon Sep 17 00:00:00 2001 +From: Michael Ryleev +Date: Mon, 30 Mar 2015 12:43:59 -0700 +Subject: [PATCH 10/63] trusty: add trusty virtio driver + +Trusty virtio driver is responsible for management an +interaction with virtio devices exposed by Trusty. +During initialization, this driver makes an smc +call to retrieve Trusty virtio device descriptor from +secure side, parses it then instantiates and configures +the specified set of virtio devices. + +Change-Id: I3bac25d861db55a0f1408a4344ff5f8e53a75d44 +Signed-off-by: Michael Ryleev +--- + drivers/trusty/Kconfig | 6 + + drivers/trusty/Makefile | 1 + + drivers/trusty/trusty-virtio.c | 697 +++++++++++++++++++++++++++++++++ + include/linux/trusty/smcall.h | 8 + + 4 files changed, 712 insertions(+) + create mode 100644 drivers/trusty/trusty-virtio.c + +diff --git a/drivers/trusty/Kconfig b/drivers/trusty/Kconfig +index ea75813254c0..2255c0a9a815 100644 +--- a/drivers/trusty/Kconfig ++++ b/drivers/trusty/Kconfig +@@ -33,4 +33,10 @@ config TRUSTY_LOG + depends on TRUSTY + default y + ++config TRUSTY_VIRTIO ++ tristate "Trusty virtio support" ++ depends on TRUSTY ++ select VIRTIO ++ default y ++ + endmenu +diff --git a/drivers/trusty/Makefile b/drivers/trusty/Makefile +index e527a237cb5d..beb89a87f115 100644 +--- a/drivers/trusty/Makefile ++++ b/drivers/trusty/Makefile +@@ -9,3 +9,4 @@ obj-$(CONFIG_TRUSTY_FIQ_ARM) += trusty-fiq-arm.o + obj-$(CONFIG_TRUSTY_FIQ_ARM64) += trusty-fiq-arm64.o trusty-fiq-arm64-glue.o + obj-$(CONFIG_TRUSTY_LOG) += trusty-log.o + obj-$(CONFIG_TRUSTY) += trusty-mem.o ++obj-$(CONFIG_TRUSTY_VIRTIO) += trusty-virtio.o +diff --git a/drivers/trusty/trusty-virtio.c b/drivers/trusty/trusty-virtio.c +new file mode 100644 +index 000000000000..fabbf29bffcc +--- /dev/null ++++ b/drivers/trusty/trusty-virtio.c +@@ -0,0 +1,697 @@ ++/* ++ * Trusty Virtio driver ++ * ++ * Copyright (C) 2015 Google, Inc. ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++ ++#include ++ ++#define RSC_DESCR_VER 1 ++ ++struct trusty_vdev; ++ ++struct trusty_ctx { ++ struct device *dev; ++ void *shared_va; ++ size_t shared_sz; ++ struct work_struct check_vqs; ++ struct work_struct kick_vqs; ++ struct notifier_block call_notifier; ++ struct list_head vdev_list; ++ struct mutex mlock; /* protects vdev_list */ ++}; ++ ++struct trusty_vring { ++ void *vaddr; ++ phys_addr_t paddr; ++ size_t size; ++ uint align; ++ uint elem_num; ++ u32 notifyid; ++ atomic_t needs_kick; ++ struct fw_rsc_vdev_vring *vr_descr; ++ struct virtqueue *vq; ++ struct trusty_vdev *tvdev; ++}; ++ ++struct trusty_vdev { ++ struct list_head node; ++ struct virtio_device vdev; ++ struct trusty_ctx *tctx; ++ u32 notifyid; ++ uint config_len; ++ void *config; ++ struct fw_rsc_vdev *vdev_descr; ++ uint vring_num; ++ struct trusty_vring vrings[0]; ++}; ++ ++#define vdev_to_tvdev(vd) container_of((vd), struct trusty_vdev, vdev) ++ ++static void check_all_vqs(struct work_struct *work) ++{ ++ uint i; ++ struct trusty_ctx *tctx = container_of(work, struct trusty_ctx, ++ check_vqs); ++ struct trusty_vdev *tvdev; ++ ++ list_for_each_entry(tvdev, &tctx->vdev_list, node) { ++ for (i = 0; i < tvdev->vring_num; i++) ++ vring_interrupt(0, tvdev->vrings[i].vq); ++ } ++} ++ ++static int trusty_call_notify(struct notifier_block *nb, ++ unsigned long action, void *data) ++{ ++ struct trusty_ctx *tctx; ++ ++ if (action != TRUSTY_CALL_RETURNED) ++ return NOTIFY_DONE; ++ ++ tctx = container_of(nb, struct trusty_ctx, call_notifier); ++ schedule_work(&tctx->check_vqs); ++ ++ return NOTIFY_OK; ++} ++ ++static void kick_vq(struct trusty_ctx *tctx, ++ struct trusty_vdev *tvdev, ++ struct trusty_vring *tvr) ++{ ++ int ret; ++ ++ dev_dbg(tctx->dev, "%s: vdev_id=%d: vq_id=%d\n", ++ __func__, tvdev->notifyid, tvr->notifyid); ++ ++ ret = trusty_std_call32(tctx->dev->parent, SMC_SC_VDEV_KICK_VQ, ++ tvdev->notifyid, tvr->notifyid, 0); ++ if (ret) { ++ dev_err(tctx->dev, "vq notify (%d, %d) returned %d\n", ++ tvdev->notifyid, tvr->notifyid, ret); ++ } ++} ++ ++static void kick_vqs(struct work_struct *work) ++{ ++ uint i; ++ struct trusty_vdev *tvdev; ++ struct trusty_ctx *tctx = container_of(work, struct trusty_ctx, ++ kick_vqs); ++ mutex_lock(&tctx->mlock); ++ list_for_each_entry(tvdev, &tctx->vdev_list, node) { ++ for (i = 0; i < tvdev->vring_num; i++) { ++ struct trusty_vring *tvr = &tvdev->vrings[i]; ++ if (atomic_xchg(&tvr->needs_kick, 0)) ++ kick_vq(tctx, tvdev, tvr); ++ } ++ } ++ mutex_unlock(&tctx->mlock); ++} ++ ++static bool trusty_virtio_notify(struct virtqueue *vq) ++{ ++ struct trusty_vring *tvr = vq->priv; ++ struct trusty_vdev *tvdev = tvr->tvdev; ++ struct trusty_ctx *tctx = tvdev->tctx; ++ ++ atomic_set(&tvr->needs_kick, 1); ++ schedule_work(&tctx->kick_vqs); ++ ++ return true; ++} ++ ++static int trusty_load_device_descr(struct trusty_ctx *tctx, ++ void *va, size_t sz) ++{ ++ int ret; ++ ++ dev_dbg(tctx->dev, "%s: %zu bytes @ %p\n", __func__, sz, va); ++ ++ ret = trusty_call32_mem_buf(tctx->dev->parent, ++ SMC_SC_VIRTIO_GET_DESCR, ++ virt_to_page(va), sz, PAGE_KERNEL); ++ if (ret < 0) { ++ dev_err(tctx->dev, "%s: virtio get descr returned (%d)\n", ++ __func__, ret); ++ return -ENODEV; ++ } ++ return ret; ++} ++ ++static void trusty_virtio_stop(struct trusty_ctx *tctx, void *va, size_t sz) ++{ ++ int ret; ++ ++ dev_dbg(tctx->dev, "%s: %zu bytes @ %p\n", __func__, sz, va); ++ ++ ret = trusty_call32_mem_buf(tctx->dev->parent, SMC_SC_VIRTIO_STOP, ++ virt_to_page(va), sz, PAGE_KERNEL); ++ if (ret) { ++ dev_err(tctx->dev, "%s: virtio done returned (%d)\n", ++ __func__, ret); ++ return; ++ } ++} ++ ++static int trusty_virtio_start(struct trusty_ctx *tctx, ++ void *va, size_t sz) ++{ ++ int ret; ++ ++ dev_dbg(tctx->dev, "%s: %zu bytes @ %p\n", __func__, sz, va); ++ ++ ret = trusty_call32_mem_buf(tctx->dev->parent, SMC_SC_VIRTIO_START, ++ virt_to_page(va), sz, PAGE_KERNEL); ++ if (ret) { ++ dev_err(tctx->dev, "%s: virtio start returned (%d)\n", ++ __func__, ret); ++ return -ENODEV; ++ } ++ return 0; ++} ++ ++static void trusty_virtio_reset(struct virtio_device *vdev) ++{ ++ struct trusty_vdev *tvdev = vdev_to_tvdev(vdev); ++ struct trusty_ctx *tctx = tvdev->tctx; ++ ++ dev_dbg(&vdev->dev, "reset vdev_id=%d\n", tvdev->notifyid); ++ trusty_std_call32(tctx->dev->parent, SMC_SC_VDEV_RESET, ++ tvdev->notifyid, 0, 0); ++} ++ ++static u64 trusty_virtio_get_features(struct virtio_device *vdev) ++{ ++ struct trusty_vdev *tvdev = vdev_to_tvdev(vdev); ++ return tvdev->vdev_descr->dfeatures; ++} ++ ++static int trusty_virtio_finalize_features(struct virtio_device *vdev) ++{ ++ struct trusty_vdev *tvdev = vdev_to_tvdev(vdev); ++ ++ /* Make sure we don't have any features > 32 bits! */ ++ BUG_ON((u32)vdev->features != vdev->features); ++ ++ tvdev->vdev_descr->gfeatures = vdev->features; ++ return 0; ++} ++ ++static void trusty_virtio_get_config(struct virtio_device *vdev, ++ unsigned offset, void *buf, ++ unsigned len) ++{ ++ struct trusty_vdev *tvdev = vdev_to_tvdev(vdev); ++ ++ dev_dbg(&vdev->dev, "%s: %d bytes @ offset %d\n", ++ __func__, len, offset); ++ ++ if (tvdev->config) { ++ if (offset + len <= tvdev->config_len) ++ memcpy(buf, tvdev->config + offset, len); ++ } ++} ++ ++static void trusty_virtio_set_config(struct virtio_device *vdev, ++ unsigned offset, const void *buf, ++ unsigned len) ++{ ++ dev_dbg(&vdev->dev, "%s\n", __func__); ++} ++ ++static u8 trusty_virtio_get_status(struct virtio_device *vdev) ++{ ++ struct trusty_vdev *tvdev = vdev_to_tvdev(vdev); ++ return tvdev->vdev_descr->status; ++} ++ ++static void trusty_virtio_set_status(struct virtio_device *vdev, u8 status) ++{ ++ struct trusty_vdev *tvdev = vdev_to_tvdev(vdev); ++ tvdev->vdev_descr->status = status; ++} ++ ++static void _del_vqs(struct virtio_device *vdev) ++{ ++ uint i; ++ struct trusty_vdev *tvdev = vdev_to_tvdev(vdev); ++ struct trusty_vring *tvr = &tvdev->vrings[0]; ++ ++ for (i = 0; i < tvdev->vring_num; i++, tvr++) { ++ /* delete vq */ ++ if (tvr->vq) { ++ vring_del_virtqueue(tvr->vq); ++ tvr->vq = NULL; ++ } ++ /* delete vring */ ++ if (tvr->vaddr) { ++ free_pages_exact(tvr->vaddr, tvr->size); ++ tvr->vaddr = NULL; ++ } ++ } ++} ++ ++static void trusty_virtio_del_vqs(struct virtio_device *vdev) ++{ ++ dev_dbg(&vdev->dev, "%s\n", __func__); ++ _del_vqs(vdev); ++} ++ ++ ++static struct virtqueue *_find_vq(struct virtio_device *vdev, ++ unsigned id, ++ void (*callback)(struct virtqueue *vq), ++ const char *name) ++{ ++ struct trusty_vring *tvr; ++ struct trusty_vdev *tvdev = vdev_to_tvdev(vdev); ++ phys_addr_t pa; ++ ++ if (!name) ++ return ERR_PTR(-EINVAL); ++ ++ if (id >= tvdev->vring_num) ++ return ERR_PTR(-EINVAL); ++ ++ tvr = &tvdev->vrings[id]; ++ ++ /* actual size of vring (in bytes) */ ++ tvr->size = PAGE_ALIGN(vring_size(tvr->elem_num, tvr->align)); ++ ++ /* allocate memory for the vring. */ ++ tvr->vaddr = alloc_pages_exact(tvr->size, GFP_KERNEL | __GFP_ZERO); ++ if (!tvr->vaddr) { ++ dev_err(&vdev->dev, "vring alloc failed\n"); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ pa = virt_to_phys(tvr->vaddr); ++ /* save vring address to shared structure */ ++ tvr->vr_descr->da = (u32)pa; ++ /* da field is only 32 bit wide. Use previously unused 'reserved' field ++ * to store top 32 bits of 64-bit address ++ */ ++ tvr->vr_descr->reserved = (u32)(pa >> 32); ++ ++ dev_info(&vdev->dev, "vring%d: va(pa) %p(%llx) qsz %d notifyid %d\n", ++ id, tvr->vaddr, (u64)tvr->paddr, tvr->elem_num, tvr->notifyid); ++ ++ tvr->vq = vring_new_virtqueue(id, tvr->elem_num, tvr->align, ++ vdev, true, tvr->vaddr, ++ trusty_virtio_notify, callback, name); ++ if (!tvr->vq) { ++ dev_err(&vdev->dev, "vring_new_virtqueue %s failed\n", ++ name); ++ goto err_new_virtqueue; ++ } ++ ++ tvr->vq->priv = tvr; ++ ++ return tvr->vq; ++ ++err_new_virtqueue: ++ free_pages_exact(tvr->vaddr, tvr->size); ++ tvr->vaddr = NULL; ++ return ERR_PTR(-ENOMEM); ++} ++ ++static int trusty_virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs, ++ struct virtqueue *vqs[], ++ vq_callback_t *callbacks[], ++ const char *names[]) ++{ ++ uint i; ++ int ret; ++ ++ for (i = 0; i < nvqs; i++) { ++ vqs[i] = _find_vq(vdev, i, callbacks[i], names[i]); ++ if (IS_ERR(vqs[i])) { ++ ret = PTR_ERR(vqs[i]); ++ _del_vqs(vdev); ++ return ret; ++ } ++ } ++ return 0; ++} ++ ++static const char *trusty_virtio_bus_name(struct virtio_device *vdev) ++{ ++ return "trusty-virtio"; ++} ++ ++/* The ops structure which hooks everything together. */ ++static const struct virtio_config_ops trusty_virtio_config_ops = { ++ .get_features = trusty_virtio_get_features, ++ .finalize_features = trusty_virtio_finalize_features, ++ .get = trusty_virtio_get_config, ++ .set = trusty_virtio_set_config, ++ .get_status = trusty_virtio_get_status, ++ .set_status = trusty_virtio_set_status, ++ .reset = trusty_virtio_reset, ++ .find_vqs = trusty_virtio_find_vqs, ++ .del_vqs = trusty_virtio_del_vqs, ++ .bus_name = trusty_virtio_bus_name, ++}; ++ ++static int trusty_virtio_add_device(struct trusty_ctx *tctx, ++ struct fw_rsc_vdev *vdev_descr, ++ struct fw_rsc_vdev_vring *vr_descr, ++ void *config) ++{ ++ int i, ret; ++ struct trusty_vdev *tvdev; ++ ++ tvdev = kzalloc(sizeof(struct trusty_vdev) + ++ vdev_descr->num_of_vrings * sizeof(struct trusty_vring), ++ GFP_KERNEL); ++ if (!tvdev) { ++ dev_err(tctx->dev, "Failed to allocate VDEV\n"); ++ return -ENOMEM; ++ } ++ ++ /* setup vdev */ ++ tvdev->tctx = tctx; ++ tvdev->vdev.dev.parent = tctx->dev; ++ tvdev->vdev.id.device = vdev_descr->id; ++ tvdev->vdev.config = &trusty_virtio_config_ops; ++ tvdev->vdev_descr = vdev_descr; ++ tvdev->notifyid = vdev_descr->notifyid; ++ ++ /* setup config */ ++ tvdev->config = config; ++ tvdev->config_len = vdev_descr->config_len; ++ ++ /* setup vrings and vdev resource */ ++ tvdev->vring_num = vdev_descr->num_of_vrings; ++ ++ for (i = 0; i < tvdev->vring_num; i++, vr_descr++) { ++ struct trusty_vring *tvr = &tvdev->vrings[i]; ++ tvr->tvdev = tvdev; ++ tvr->vr_descr = vr_descr; ++ tvr->align = vr_descr->align; ++ tvr->elem_num = vr_descr->num; ++ tvr->notifyid = vr_descr->notifyid; ++ } ++ ++ /* register device */ ++ ret = register_virtio_device(&tvdev->vdev); ++ if (ret) { ++ dev_err(tctx->dev, ++ "Failed (%d) to register device dev type %u\n", ++ ret, vdev_descr->id); ++ goto err_register; ++ } ++ ++ /* add it to tracking list */ ++ list_add_tail(&tvdev->node, &tctx->vdev_list); ++ ++ return 0; ++ ++err_register: ++ kfree(tvdev); ++ return ret; ++} ++ ++static int trusty_parse_device_descr(struct trusty_ctx *tctx, ++ void *descr_va, size_t descr_sz) ++{ ++ u32 i; ++ struct resource_table *descr = descr_va; ++ ++ if (descr_sz < sizeof(*descr)) { ++ dev_err(tctx->dev, "descr table is too small (0x%x)\n", ++ (int)descr_sz); ++ return -ENODEV; ++ } ++ ++ if (descr->ver != RSC_DESCR_VER) { ++ dev_err(tctx->dev, "unexpected descr ver (0x%x)\n", ++ (int)descr->ver); ++ return -ENODEV; ++ } ++ ++ if (descr_sz < (sizeof(*descr) + descr->num * sizeof(u32))) { ++ dev_err(tctx->dev, "descr table is too small (0x%x)\n", ++ (int)descr->ver); ++ return -ENODEV; ++ } ++ ++ for (i = 0; i < descr->num; i++) { ++ struct fw_rsc_hdr *hdr; ++ struct fw_rsc_vdev *vd; ++ struct fw_rsc_vdev_vring *vr; ++ void *cfg; ++ size_t vd_sz; ++ ++ u32 offset = descr->offset[i]; ++ ++ if (offset >= descr_sz) { ++ dev_err(tctx->dev, "offset is out of bounds (%u)\n", ++ (uint)offset); ++ return -ENODEV; ++ } ++ ++ /* check space for rsc header */ ++ if ((descr_sz - offset) < sizeof(struct fw_rsc_hdr)) { ++ dev_err(tctx->dev, "no space for rsc header (%u)\n", ++ (uint)offset); ++ return -ENODEV; ++ } ++ hdr = (struct fw_rsc_hdr *)((u8 *)descr + offset); ++ offset += sizeof(struct fw_rsc_hdr); ++ ++ /* check type */ ++ if (hdr->type != RSC_VDEV) { ++ dev_err(tctx->dev, "unsupported rsc type (%u)\n", ++ (uint)hdr->type); ++ continue; ++ } ++ ++ /* got vdev: check space for vdev */ ++ if ((descr_sz - offset) < sizeof(struct fw_rsc_vdev)) { ++ dev_err(tctx->dev, "no space for vdev descr (%u)\n", ++ (uint)offset); ++ return -ENODEV; ++ } ++ vd = (struct fw_rsc_vdev *)((u8 *)descr + offset); ++ ++ /* check space for vrings and config area */ ++ vd_sz = sizeof(struct fw_rsc_vdev) + ++ vd->num_of_vrings * sizeof(struct fw_rsc_vdev_vring) + ++ vd->config_len; ++ ++ if ((descr_sz - offset) < vd_sz) { ++ dev_err(tctx->dev, "no space for vdev (%u)\n", ++ (uint)offset); ++ return -ENODEV; ++ } ++ vr = (struct fw_rsc_vdev_vring *)vd->vring; ++ cfg = (void *)(vr + vd->num_of_vrings); ++ ++ trusty_virtio_add_device(tctx, vd, vr, cfg); ++ } ++ ++ return 0; ++} ++ ++static void _remove_devices_locked(struct trusty_ctx *tctx) ++{ ++ struct trusty_vdev *tvdev, *next; ++ ++ list_for_each_entry_safe(tvdev, next, &tctx->vdev_list, node) { ++ list_del(&tvdev->node); ++ unregister_virtio_device(&tvdev->vdev); ++ kfree(tvdev); ++ } ++} ++ ++static void trusty_virtio_remove_devices(struct trusty_ctx *tctx) ++{ ++ mutex_lock(&tctx->mlock); ++ _remove_devices_locked(tctx); ++ mutex_unlock(&tctx->mlock); ++} ++ ++static int trusty_virtio_add_devices(struct trusty_ctx *tctx) ++{ ++ int ret; ++ void *descr_va; ++ size_t descr_sz; ++ size_t descr_buf_sz; ++ ++ /* allocate buffer to load device descriptor into */ ++ descr_buf_sz = PAGE_SIZE; ++ descr_va = alloc_pages_exact(descr_buf_sz, GFP_KERNEL | __GFP_ZERO); ++ if (!descr_va) { ++ dev_err(tctx->dev, "Failed to allocate shared area\n"); ++ return -ENOMEM; ++ } ++ ++ /* load device descriptors */ ++ ret = trusty_load_device_descr(tctx, descr_va, descr_buf_sz); ++ if (ret < 0) { ++ dev_err(tctx->dev, "failed (%d) to load device descr\n", ret); ++ goto err_load_descr; ++ } ++ ++ descr_sz = (size_t)ret; ++ ++ mutex_lock(&tctx->mlock); ++ ++ /* parse device descriptor and add virtio devices */ ++ ret = trusty_parse_device_descr(tctx, descr_va, descr_sz); ++ if (ret) { ++ dev_err(tctx->dev, "failed (%d) to parse device descr\n", ret); ++ goto err_parse_descr; ++ } ++ ++ /* register call notifier */ ++ ret = trusty_call_notifier_register(tctx->dev->parent, ++ &tctx->call_notifier); ++ if (ret) { ++ dev_err(tctx->dev, "%s: failed (%d) to register notifier\n", ++ __func__, ret); ++ goto err_register_notifier; ++ } ++ ++ /* start virtio */ ++ ret = trusty_virtio_start(tctx, descr_va, descr_sz); ++ if (ret) { ++ dev_err(tctx->dev, "failed (%d) to start virtio\n", ret); ++ goto err_start_virtio; ++ } ++ ++ /* attach shared area */ ++ tctx->shared_va = descr_va; ++ tctx->shared_sz = descr_buf_sz; ++ ++ mutex_unlock(&tctx->mlock); ++ ++ return 0; ++ ++err_start_virtio: ++ trusty_call_notifier_unregister(tctx->dev->parent, ++ &tctx->call_notifier); ++ cancel_work_sync(&tctx->check_vqs); ++err_register_notifier: ++err_parse_descr: ++ _remove_devices_locked(tctx); ++ mutex_unlock(&tctx->mlock); ++ cancel_work_sync(&tctx->kick_vqs); ++ trusty_virtio_stop(tctx, descr_va, descr_sz); ++err_load_descr: ++ free_pages_exact(descr_va, descr_buf_sz); ++ return ret; ++} ++ ++static int trusty_virtio_probe(struct platform_device *pdev) ++{ ++ int ret; ++ struct trusty_ctx *tctx; ++ ++ dev_info(&pdev->dev, "initializing\n"); ++ ++ tctx = kzalloc(sizeof(*tctx), GFP_KERNEL); ++ if (!tctx) { ++ dev_err(&pdev->dev, "Failed to allocate context\n"); ++ return -ENOMEM; ++ } ++ ++ tctx->dev = &pdev->dev; ++ tctx->call_notifier.notifier_call = trusty_call_notify; ++ mutex_init(&tctx->mlock); ++ INIT_LIST_HEAD(&tctx->vdev_list); ++ INIT_WORK(&tctx->check_vqs, check_all_vqs); ++ INIT_WORK(&tctx->kick_vqs, kick_vqs); ++ platform_set_drvdata(pdev, tctx); ++ ++ ret = trusty_virtio_add_devices(tctx); ++ if (ret) { ++ dev_err(&pdev->dev, "Failed to add virtio devices\n"); ++ goto err_add_devices; ++ } ++ ++ dev_info(&pdev->dev, "initializing done\n"); ++ return 0; ++ ++err_add_devices: ++ kfree(tctx); ++ return ret; ++} ++ ++static int trusty_virtio_remove(struct platform_device *pdev) ++{ ++ struct trusty_ctx *tctx = platform_get_drvdata(pdev); ++ ++ dev_err(&pdev->dev, "removing\n"); ++ ++ /* unregister call notifier and wait until workqueue is done */ ++ trusty_call_notifier_unregister(tctx->dev->parent, ++ &tctx->call_notifier); ++ cancel_work_sync(&tctx->check_vqs); ++ ++ /* remove virtio devices */ ++ trusty_virtio_remove_devices(tctx); ++ cancel_work_sync(&tctx->kick_vqs); ++ ++ /* notify remote that shared area goes away */ ++ trusty_virtio_stop(tctx, tctx->shared_va, tctx->shared_sz); ++ ++ /* free shared area */ ++ free_pages_exact(tctx->shared_va, tctx->shared_sz); ++ ++ /* free context */ ++ kfree(tctx); ++ return 0; ++} ++ ++static const struct of_device_id trusty_of_match[] = { ++ { ++ .compatible = "android,trusty-virtio-v1", ++ }, ++}; ++ ++MODULE_DEVICE_TABLE(of, trusty_of_match); ++ ++static struct platform_driver trusty_virtio_driver = { ++ .probe = trusty_virtio_probe, ++ .remove = trusty_virtio_remove, ++ .driver = { ++ .name = "trusty-virtio", ++ .owner = THIS_MODULE, ++ .of_match_table = trusty_of_match, ++ }, ++}; ++ ++module_platform_driver(trusty_virtio_driver); ++ ++MODULE_LICENSE("GPL v2"); ++MODULE_DESCRIPTION("Trusty virtio driver"); +diff --git a/include/linux/trusty/smcall.h b/include/linux/trusty/smcall.h +index aaad5cee6143..a2be2e3579f3 100644 +--- a/include/linux/trusty/smcall.h ++++ b/include/linux/trusty/smcall.h +@@ -78,4 +78,12 @@ + #define SMC_FC_AARCH_SWITCH SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 9) + #define SMC_FC_GET_VERSION_STR SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 10) + ++/* TRUSTED_OS entity calls */ ++#define SMC_SC_VIRTIO_GET_DESCR SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 20) ++#define SMC_SC_VIRTIO_START SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 21) ++#define SMC_SC_VIRTIO_STOP SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 22) ++ ++#define SMC_SC_VDEV_RESET SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 23) ++#define SMC_SC_VDEV_KICK_VQ SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 24) ++ + #endif /* __LINUX_TRUSTY_SMCALL_H */ +-- +2.17.1 + diff --git a/patches/0010-usb-typec-ucsi-Start-using-struct-typec_operatio.usb-typec b/patches/0010-usb-typec-ucsi-Start-using-struct-typec_operatio.usb-typec new file mode 100644 index 0000000000..6f1a53fc3d --- /dev/null +++ b/patches/0010-usb-typec-ucsi-Start-using-struct-typec_operatio.usb-typec @@ -0,0 +1,80 @@ +From 3b16673e93ccaede600827878bf635d0811f65b0 Mon Sep 17 00:00:00 2001 +From: Heikki Krogerus +Date: Tue, 1 Oct 2019 12:21:39 +0300 +Subject: [PATCH 10/18] usb: typec: ucsi: Start using struct typec_operations + +Supplying the operation callbacks as part of a struct +typec_operations instead of as part of struct +typec_capability during port registration. + +Signed-off-by: Heikki Krogerus +--- + drivers/usb/typec/ucsi/ucsi.c | 22 +++++++++++----------- + 1 file changed, 11 insertions(+), 11 deletions(-) + +diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c +index ba288b964dc8..edd722fb88b8 100644 +--- a/drivers/usb/typec/ucsi/ucsi.c ++++ b/drivers/usb/typec/ucsi/ucsi.c +@@ -17,9 +17,6 @@ + #include "ucsi.h" + #include "trace.h" + +-#define to_ucsi_connector(_cap_) container_of(_cap_, struct ucsi_connector, \ +- typec_cap) +- + /* + * UCSI_TIMEOUT_MS - PPM communication timeout + * +@@ -713,10 +710,9 @@ static int ucsi_role_cmd(struct ucsi_connector *con, struct ucsi_control *ctrl) + return ret; + } + +-static int +-ucsi_dr_swap(const struct typec_capability *cap, enum typec_data_role role) ++static int ucsi_dr_swap(struct typec_port *port, enum typec_data_role role) + { +- struct ucsi_connector *con = to_ucsi_connector(cap); ++ struct ucsi_connector *con = typec_get_drvdata(port); + struct ucsi_control ctrl; + int ret = 0; + +@@ -748,10 +744,9 @@ ucsi_dr_swap(const struct typec_capability *cap, enum typec_data_role role) + return ret < 0 ? ret : 0; + } + +-static int +-ucsi_pr_swap(const struct typec_capability *cap, enum typec_role role) ++static int ucsi_pr_swap(struct typec_port *port, enum typec_role role) + { +- struct ucsi_connector *con = to_ucsi_connector(cap); ++ struct ucsi_connector *con = typec_get_drvdata(port); + struct ucsi_control ctrl; + int ret = 0; + +@@ -788,6 +783,11 @@ ucsi_pr_swap(const struct typec_capability *cap, enum typec_role role) + return ret; + } + ++static const struct typec_operations ucsi_ops = { ++ .dr_set = ucsi_dr_swap, ++ .pr_set = ucsi_pr_swap ++}; ++ + static struct fwnode_handle *ucsi_find_fwnode(struct ucsi_connector *con) + { + struct fwnode_handle *fwnode; +@@ -843,8 +843,8 @@ static int ucsi_register_port(struct ucsi *ucsi, int index) + *accessory = TYPEC_ACCESSORY_DEBUG; + + cap->fwnode = ucsi_find_fwnode(con); +- cap->dr_set = ucsi_dr_swap; +- cap->pr_set = ucsi_pr_swap; ++ cap->driver_data = con; ++ cap->ops = &ucsi_ops; + + /* Register the connector */ + con->port = typec_register_port(ucsi->dev, cap); +-- +2.17.1 + diff --git a/patches/0011-ASoC-Intel-Skylake-Unhardcode-dsp-cores-number.audio b/patches/0011-ASoC-Intel-Skylake-Unhardcode-dsp-cores-number.audio new file mode 100644 index 0000000000..e708715d7f --- /dev/null +++ b/patches/0011-ASoC-Intel-Skylake-Unhardcode-dsp-cores-number.audio @@ -0,0 +1,212 @@ +From 786b00db2bd6e083ddb07c99d7b22dcd6f4b50d4 Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Fri, 22 Mar 2019 20:11:59 +0100 +Subject: [PATCH 011/193] ASoC: Intel: Skylake: Unhardcode dsp cores number + +While on the quest for unhardcoding the driver, use skl hw_cfg property +instead of hardcoded value to retrieve number of supported dsp cores. + +Signed-off-by: Cezary Rojewski +--- + sound/soc/intel/skylake/skl-messages.c | 28 ------------------------- + sound/soc/intel/skylake/skl-sst-dsp.c | 29 +++++++++++++++++++------- + sound/soc/intel/skylake/skl-sst-dsp.h | 2 +- + sound/soc/intel/skylake/skl-sst.c | 4 +++- + sound/soc/intel/skylake/skl.h | 1 - + 5 files changed, 26 insertions(+), 38 deletions(-) + +diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c +index e91fb3d4cb5e..592da0803150 100644 +--- a/sound/soc/intel/skylake/skl-messages.c ++++ b/sound/soc/intel/skylake/skl-messages.c +@@ -170,56 +170,48 @@ static struct skl_dsp_loader_ops bxt_get_loader_ops(void) + static const struct skl_dsp_ops dsp_ops[] = { + { + .id = 0x9d70, +- .num_cores = 2, + .loader_ops = skl_get_loader_ops, + .init = skl_sst_dsp_init, + .cleanup = skl_sst_dsp_cleanup + }, + { + .id = 0x9d71, +- .num_cores = 2, + .loader_ops = skl_get_loader_ops, + .init = skl_sst_dsp_init, + .cleanup = skl_sst_dsp_cleanup + }, + { + .id = 0x5a98, +- .num_cores = 2, + .loader_ops = bxt_get_loader_ops, + .init = bxt_sst_dsp_init, + .cleanup = bxt_sst_dsp_cleanup + }, + { + .id = 0x3198, +- .num_cores = 2, + .loader_ops = bxt_get_loader_ops, + .init = bxt_sst_dsp_init, + .cleanup = bxt_sst_dsp_cleanup + }, + { + .id = 0x9dc8, +- .num_cores = 4, + .loader_ops = bxt_get_loader_ops, + .init = cnl_sst_dsp_init, + .cleanup = cnl_sst_dsp_cleanup + }, + { + .id = 0xa348, +- .num_cores = 4, + .loader_ops = bxt_get_loader_ops, + .init = cnl_sst_dsp_init, + .cleanup = cnl_sst_dsp_cleanup + }, + { + .id = 0x02c8, +- .num_cores = 4, + .loader_ops = bxt_get_loader_ops, + .init = cnl_sst_dsp_init, + .cleanup = cnl_sst_dsp_cleanup + }, + { + .id = 0x06c8, +- .num_cores = 4, + .loader_ops = bxt_get_loader_ops, + .init = cnl_sst_dsp_init, + .cleanup = cnl_sst_dsp_cleanup +@@ -245,7 +237,6 @@ int skl_init_dsp(struct skl_dev *skl) + struct skl_dsp_loader_ops loader_ops; + int irq = bus->irq; + const struct skl_dsp_ops *ops; +- struct skl_dsp_cores *cores; + int ret; + + /* enable ppcap interrupt */ +@@ -274,29 +265,10 @@ int skl_init_dsp(struct skl_dev *skl) + goto unmap_mmio; + + skl->dsp_ops = ops; +- cores = &skl->cores; +- cores->count = ops->num_cores; +- +- cores->state = kcalloc(cores->count, sizeof(*cores->state), GFP_KERNEL); +- if (!cores->state) { +- ret = -ENOMEM; +- goto unmap_mmio; +- } +- +- cores->usage_count = kcalloc(cores->count, sizeof(*cores->usage_count), +- GFP_KERNEL); +- if (!cores->usage_count) { +- ret = -ENOMEM; +- goto free_core_state; +- } +- + dev_dbg(bus->dev, "dsp registration status=%d\n", ret); + + return 0; + +-free_core_state: +- kfree(cores->state); +- + unmap_mmio: + iounmap(mmio_base); + +diff --git a/sound/soc/intel/skylake/skl-sst-dsp.c b/sound/soc/intel/skylake/skl-sst-dsp.c +index 0eecf26986f9..15acbe80711e 100644 +--- a/sound/soc/intel/skylake/skl-sst-dsp.c ++++ b/sound/soc/intel/skylake/skl-sst-dsp.c +@@ -8,7 +8,7 @@ + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + #include +- ++#include + #include "../common/sst-dsp.h" + #include "../common/sst-ipc.h" + #include "../common/sst-dsp-priv.h" +@@ -31,18 +31,33 @@ void skl_dsp_set_state_locked(struct sst_dsp *ctx, int state) + * successful first boot. Hence core 0 will be running and other cores + * will be reset + */ +-void skl_dsp_init_core_state(struct sst_dsp *ctx) ++int skl_dsp_init_core_state(struct sst_dsp *ctx) + { + struct skl_dev *skl = ctx->thread_context; ++ struct skl_dsp_cores *cores = &skl->cores; + int i; + +- skl->cores.state[SKL_DSP_CORE0_ID] = SKL_DSP_RUNNING; +- skl->cores.usage_count[SKL_DSP_CORE0_ID] = 1; ++ cores->count = skl->hw_cfg.dsp_cores; ++ cores->state = kcalloc(cores->count, ++ sizeof(*cores->state), GFP_KERNEL); ++ if (!cores->state) ++ return -ENOMEM; ++ ++ cores->usage_count = kcalloc(cores->count, ++ sizeof(*cores->usage_count), GFP_KERNEL); ++ if (!cores->usage_count) { ++ kfree(cores->state); ++ return -ENOMEM; ++ } ++ ++ cores->state[SKL_DSP_CORE0_ID] = SKL_DSP_RUNNING; ++ cores->usage_count[SKL_DSP_CORE0_ID] = 1; + +- for (i = SKL_DSP_CORE0_ID + 1; i < skl->cores.count; i++) { +- skl->cores.state[i] = SKL_DSP_RESET; +- skl->cores.usage_count[i] = 0; ++ for (i = SKL_DSP_CORE0_ID + 1; i < cores->count; i++) { ++ cores->state[i] = SKL_DSP_RESET; ++ cores->usage_count[i] = 0; + } ++ return 0; + } + EXPORT_SYMBOL_GPL(skl_dsp_init_core_state); + +diff --git a/sound/soc/intel/skylake/skl-sst-dsp.h b/sound/soc/intel/skylake/skl-sst-dsp.h +index f181e7e3e003..ecf6d526f2fc 100644 +--- a/sound/soc/intel/skylake/skl-sst-dsp.h ++++ b/sound/soc/intel/skylake/skl-sst-dsp.h +@@ -215,7 +215,7 @@ int skl_dsp_acquire_irq(struct sst_dsp *sst); + bool is_skl_dsp_running(struct sst_dsp *ctx); + + unsigned int skl_dsp_get_enabled_cores(struct sst_dsp *ctx); +-void skl_dsp_init_core_state(struct sst_dsp *ctx); ++int skl_dsp_init_core_state(struct sst_dsp *ctx); + int skl_dsp_enable_core(struct sst_dsp *ctx, unsigned int core_mask); + int skl_dsp_disable_core(struct sst_dsp *ctx, unsigned int core_mask); + int skl_dsp_core_power_up(struct sst_dsp *ctx, unsigned int core_mask); +diff --git a/sound/soc/intel/skylake/skl-sst.c b/sound/soc/intel/skylake/skl-sst.c +index dc92208b67f5..3553fcf9f930 100644 +--- a/sound/soc/intel/skylake/skl-sst.c ++++ b/sound/soc/intel/skylake/skl-sst.c +@@ -588,7 +588,9 @@ int skl_sst_init_fw(struct skl_dev *skl) + goto exit; + } + +- skl_dsp_init_core_state(sst); ++ ret = skl_dsp_init_core_state(sst); ++ if (ret < 0) ++ goto exit; + + library_load: + if (skl->lib_count > 1) { +diff --git a/sound/soc/intel/skylake/skl.h b/sound/soc/intel/skylake/skl.h +index e5bc6d6fda21..6ddf690cd068 100644 +--- a/sound/soc/intel/skylake/skl.h ++++ b/sound/soc/intel/skylake/skl.h +@@ -154,7 +154,6 @@ struct skl_machine_pdata { + + struct skl_dsp_ops { + int id; +- unsigned int num_cores; + struct skl_dsp_loader_ops (*loader_ops)(void); + int (*init)(struct device *dev, void __iomem *mmio_base, + int irq, const char *fw_name, +-- +2.17.1 + diff --git a/patches/0011-PWM-add-DesignWare-PWM-Controller-Driver.felipeb-5.4 b/patches/0011-PWM-add-DesignWare-PWM-Controller-Driver.felipeb-5.4 new file mode 100644 index 0000000000..45b7093075 --- /dev/null +++ b/patches/0011-PWM-add-DesignWare-PWM-Controller-Driver.felipeb-5.4 @@ -0,0 +1,400 @@ +From 0eae404f94028e3a7cc79f7d03d4d23c9284b18a Mon Sep 17 00:00:00 2001 +From: Felipe Balbi +Date: Mon, 3 Sep 2018 11:21:03 +0300 +Subject: [PATCH 11/14] PWM: add DesignWare PWM Controller Driver + +Introduce driver for Synopsys DesignWare PWM Controller used on some +of the newest Intel Atom devices. + +Signed-off-by: Felipe Balbi +--- + drivers/pwm/Kconfig | 6 + + drivers/pwm/Makefile | 1 + + drivers/pwm/pwm-dwc.c | 346 ++++++++++++++++++++++++++++++++++++++++++ + 3 files changed, 353 insertions(+) + create mode 100644 drivers/pwm/pwm-dwc.c + +diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig +index e3a2518503ed..5972496d5a62 100644 +--- a/drivers/pwm/Kconfig ++++ b/drivers/pwm/Kconfig +@@ -150,6 +150,12 @@ config PWM_CROS_EC + PWM driver for exposing a PWM attached to the ChromeOS Embedded + Controller. + ++config PWM_DWC ++ tristate "DesignWare PWM Controller" ++ depends on PCI ++ help ++ PWM driver for Synopsys DWC PWM Controller attached to a PCI bus. ++ + config PWM_EP93XX + tristate "Cirrus Logic EP93xx PWM support" + depends on ARCH_EP93XX +diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile +index 26326adf71d7..96fd1aa61a59 100644 +--- a/drivers/pwm/Makefile ++++ b/drivers/pwm/Makefile +@@ -13,6 +13,7 @@ obj-$(CONFIG_PWM_BRCMSTB) += pwm-brcmstb.o + obj-$(CONFIG_PWM_CLPS711X) += pwm-clps711x.o + obj-$(CONFIG_PWM_CRC) += pwm-crc.o + obj-$(CONFIG_PWM_CROS_EC) += pwm-cros-ec.o ++obj-$(CONFIG_PWM_DWC) += pwm-dwc.o + obj-$(CONFIG_PWM_EP93XX) += pwm-ep93xx.o + obj-$(CONFIG_PWM_FSL_FTM) += pwm-fsl-ftm.o + obj-$(CONFIG_PWM_HIBVT) += pwm-hibvt.o +diff --git a/drivers/pwm/pwm-dwc.c b/drivers/pwm/pwm-dwc.c +new file mode 100644 +index 000000000000..097e14c4c49d +--- /dev/null ++++ b/drivers/pwm/pwm-dwc.c +@@ -0,0 +1,346 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/** ++ * pwm-dwc.c - DesignWare PWM Controller ++ * ++ * Copyright (C) 2018 Intel Corporation ++ * ++ * Author: Felipe Balbi ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define PCI_DEVICE_ID_EHLLP 0x4bb7 ++ ++#define DWC_TIM_LD_CNT(n) ((n) * 0x14) ++#define DWC_TIM_LD_CNT2(n) (((n) * 4) + 0xb0) ++#define DWC_TIM_CUR_VAL(n) (((n) * 0x14) + 0x04) ++#define DWC_TIM_CTRL(n) (((n) * 0x14) + 0x08) ++#define DWC_TIM_EOI(n) (((n) * 0x14) + 0x0c) ++#define DWC_TIM_INT_STS(n) (((n) * 0x14) + 0x10) ++ ++#define DWC_TIMERS_INT_STS 0xa0 ++#define DWC_TIMERS_EOI 0xa4 ++#define DWC_TIMERS_RAW_INT_STS 0xa8 ++#define DWC_TIMERS_COMP_VERSION 0xac ++ ++#define DWC_TIMERS_TOTAL 8 ++ ++/* Timer Control Register */ ++#define DWC_TIM_CTRL_EN BIT(0) ++#define DWC_TIM_CTRL_MODE BIT(1) ++#define DWC_TIM_CTRL_MODE_FREE (0 << 1) ++#define DWC_TIM_CTRL_MODE_USER (1 << 1) ++#define DWC_TIM_CTRL_INT_MASK BIT(2) ++#define DWC_TIM_CTRL_PWM BIT(3) ++ ++struct dwc_pwm_driver_data { ++ unsigned long clk_period_ns; ++ int npwm; ++}; ++ ++struct dwc_pwm { ++ struct pwm_chip pwm; ++ struct device *dev; ++ struct mutex lock; ++ ++ unsigned long clk_period_ns; ++ unsigned int version; ++ ++ void __iomem *base; ++ ++ u32 saved_registers[24]; ++}; ++#define to_dwc(p) (container_of((p), struct dwc_pwm, pwm)) ++ ++static inline u32 dwc_readl(void __iomem *base, u32 offset) ++{ ++ return readl(base + offset); ++} ++ ++static inline void dwc_writel(void __iomem *base, u32 offset, u32 value) ++{ ++ writel(value, base + offset); ++} ++ ++static void __dwc_configure(struct dwc_pwm *dwc, int pwm, int duty_ns, ++ int period_ns) ++{ ++ u32 ctrl; ++ u32 high; ++ u32 low; ++ ++ high = DIV_ROUND_CLOSEST(duty_ns, dwc->clk_period_ns) - 1; ++ low = DIV_ROUND_CLOSEST(period_ns - duty_ns, dwc->clk_period_ns) - 1; ++ ++ dwc_writel(dwc->base, DWC_TIM_LD_CNT(pwm), low); ++ dwc_writel(dwc->base, DWC_TIM_LD_CNT2(pwm), high); ++ ++ ctrl = DWC_TIM_CTRL_MODE_USER | DWC_TIM_CTRL_PWM; ++ dwc_writel(dwc->base, DWC_TIM_CTRL(pwm), ctrl); ++} ++ ++static u32 __dwc_duty_ns(struct dwc_pwm *dwc, int pwm) ++{ ++ u32 duty; ++ ++ duty = dwc_readl(dwc->base, DWC_TIM_LD_CNT2(pwm)); ++ duty *= dwc->clk_period_ns; ++ duty += 1; ++ ++ return duty; ++} ++ ++static u32 __dwc_period_ns(struct dwc_pwm *dwc, int pwm, u32 duty) ++{ ++ u32 period; ++ ++ period = dwc_readl(dwc->base, DWC_TIM_LD_CNT(pwm)); ++ period *= dwc->clk_period_ns; ++ period += 1 + duty; ++ ++ return period; ++} ++ ++static bool __dwc_is_enabled(struct dwc_pwm *dwc, int pwm) ++{ ++ return !!dwc_readl(dwc->base, DWC_TIM_CTRL(pwm)) & DWC_TIM_CTRL_EN; ++} ++ ++static void __dwc_set_enable(struct dwc_pwm *dwc, int pwm, int enabled) ++{ ++ u32 reg; ++ ++ reg = dwc_readl(dwc->base, DWC_TIM_CTRL(pwm)); ++ ++ if (enabled) ++ reg |= DWC_TIM_CTRL_EN; ++ else ++ reg &= ~DWC_TIM_CTRL_EN; ++ ++ dwc_writel(dwc->base, DWC_TIM_CTRL(pwm), reg); ++} ++ ++static int dwc_pwm_apply(struct pwm_chip *pwm, struct pwm_device *pdev, ++ const struct pwm_state *state) ++{ ++ struct dwc_pwm *dwc = to_dwc(pwm); ++ ++ mutex_lock(&dwc->lock); ++ if (state->enabled) { ++ if (!pwm_is_enabled(pdev)) ++ pm_runtime_get_sync(dwc->dev); ++ } else if (pwm_is_enabled(pdev)) { ++ pm_runtime_mark_last_busy(dwc->dev); ++ pm_runtime_put_autosuspend(dwc->dev); ++ } ++ __dwc_configure(dwc, pdev->hwpwm, state->duty_cycle, state->period); ++ __dwc_set_enable(dwc, pdev->hwpwm, state->enabled); ++ mutex_unlock(&dwc->lock); ++ ++ return 0; ++} ++ ++static void dwc_pwm_get_state(struct pwm_chip *pwm, struct pwm_device *pdev, ++ struct pwm_state *state) ++{ ++ struct dwc_pwm *dwc = to_dwc(pwm); ++ ++ mutex_lock(&dwc->lock); ++ state->enabled = __dwc_is_enabled(dwc, pdev->hwpwm); ++ state->duty_cycle = __dwc_duty_ns(dwc, pdev->hwpwm); ++ state->period = __dwc_period_ns(dwc, pdev->hwpwm, state->duty_cycle); ++ mutex_unlock(&dwc->lock); ++} ++ ++static const struct pwm_ops dwc_pwm_ops = { ++ .apply = dwc_pwm_apply, ++ .get_state = dwc_pwm_get_state, ++ .owner = THIS_MODULE, ++}; ++ ++static int dwc_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) ++{ ++ struct dwc_pwm_driver_data *data; ++ struct dwc_pwm *dwc; ++ struct device *dev; ++ int ret; ++ int i; ++ ++ data = (struct dwc_pwm_driver_data *) id->driver_data; ++ dev = &pci->dev; ++ ++ dwc = devm_kzalloc(&pci->dev, sizeof(*dwc), GFP_KERNEL); ++ if (!dwc) { ++ ret = -ENOMEM; ++ goto err0; ++ } ++ ++ dwc->dev = dev; ++ dwc->clk_period_ns = data->clk_period_ns; ++ ++ ret = pcim_enable_device(pci); ++ if (ret) ++ return ret; ++ ++ pci_set_master(pci); ++ ++ ret = pcim_iomap_regions(pci, BIT(0), pci_name(pci)); ++ if (ret) ++ return ret; ++ ++ dwc->base = pcim_iomap_table(pci)[0]; ++ if (!dwc->base) { ++ ret = -ENOMEM; ++ goto err1; ++ } ++ ++ dwc->version = dwc_readl(dwc->base, DWC_TIMERS_COMP_VERSION); ++ ++ /* mask all interrupts and disable all timers */ ++ for (i = 0; i < data->npwm; i++) { ++ dwc_writel(dwc->base, DWC_TIM_CTRL(i), 0); ++ dwc_writel(dwc->base, DWC_TIM_LD_CNT(i), 0); ++ dwc_writel(dwc->base, DWC_TIM_CUR_VAL(i), 0); ++ } ++ ++ mutex_init(&dwc->lock); ++ pci_set_drvdata(pci, dwc); ++ ++ dwc->pwm.dev = dev; ++ dwc->pwm.ops = &dwc_pwm_ops; ++ dwc->pwm.npwm = data->npwm; ++ dwc->pwm.base = -1; ++ ++ ret = pwmchip_add(&dwc->pwm); ++ if (ret) ++ goto err2; ++ ++ pm_runtime_set_autosuspend_delay(dev, 1000); ++ pm_runtime_use_autosuspend(dev); ++ pm_runtime_put_noidle(dev); ++ pm_runtime_allow(dev); ++ ++ return 0; ++ ++err2: ++ pci_iounmap(pci, dwc->base); ++ ++err1: ++ pci_release_region(pci, 0); ++ ++err0: ++ return ret; ++} ++ ++static void dwc_pci_remove(struct pci_dev *pci) ++{ ++ struct dwc_pwm *dwc = pci_get_drvdata(pci); ++ int i; ++ ++ pm_runtime_forbid(&pci->dev); ++ pm_runtime_get_noresume(&pci->dev); ++ ++ for (i = 0; i < dwc->pwm.npwm; i++) ++ pwm_disable(&dwc->pwm.pwms[i]); ++ ++ pwmchip_remove(&dwc->pwm); ++ pci_iounmap(pci, dwc->base); ++ pci_release_region(pci, 0); ++} ++ ++#ifdef CONFIG_PM_SLEEP ++static int dwc_pci_suspend(struct device *dev) ++{ ++ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); ++ struct dwc_pwm *dwc = pci_get_drvdata(pdev); ++ int i, index_base; ++ ++ for (i = 0; i < DWC_TIMERS_TOTAL; i++) { ++ index_base = i * 3; ++ dwc->saved_registers[index_base] = ++ dwc_readl(dwc->base, DWC_TIM_LD_CNT(i)); ++ dwc->saved_registers[index_base+1] = ++ dwc_readl(dwc->base, DWC_TIM_LD_CNT2(i)); ++ dwc->saved_registers[index_base+2] = ++ dwc_readl(dwc->base, DWC_TIM_CTRL(i)); ++ } ++ ++ return 0; ++} ++ ++static int dwc_pci_resume(struct device *dev) ++{ ++ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); ++ struct dwc_pwm *dwc = pci_get_drvdata(pdev); ++ int i, index_base; ++ ++ for (i = 0; i < DWC_TIMERS_TOTAL; i++) { ++ index_base = i * 3; ++ dwc_writel(dwc->base, DWC_TIM_LD_CNT(i), ++ dwc->saved_registers[index_base]); ++ dwc_writel(dwc->base, DWC_TIM_LD_CNT2(i), ++ dwc->saved_registers[index_base+1]); ++ dwc_writel(dwc->base, DWC_TIM_CTRL(i), ++ dwc->saved_registers[index_base+2]); ++ } ++ ++ return 0; ++} ++ ++static int dwc_pci_runtime_suspend(struct device *dev) ++{ ++ /* ++ * The PCI core will handle transition to D3 automatically. We only ++ * need to provide runtime PM hooks for that to happen. ++ */ ++ return 0; ++} ++ ++static int dwc_pci_runtime_resume(struct device *dev) ++{ ++ return 0; ++} ++#endif ++ ++static const struct dev_pm_ops pwm_dwc_pm_ops = { ++ SET_SYSTEM_SLEEP_PM_OPS(dwc_pci_suspend, dwc_pci_resume) ++ SET_RUNTIME_PM_OPS(dwc_pci_runtime_suspend, ++ dwc_pci_runtime_resume, NULL) ++}; ++ ++static const struct dwc_pwm_driver_data ehl_driver_data = { ++ .npwm = 8, ++ .clk_period_ns = 10, ++}; ++ ++static const struct pci_device_id dwc_pci_id_table[] = { ++ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EHLLP), ++ (kernel_ulong_t) &ehl_driver_data, ++ }, ++ { } /* Terminating Entry */ ++}; ++MODULE_DEVICE_TABLE(pci, dwc_pci_id_table); ++ ++static struct pci_driver dwc_pwm_driver = { ++ .name = "pwm-dwc", ++ .probe = dwc_pci_probe, ++ .remove = dwc_pci_remove, ++ .id_table = dwc_pci_id_table, ++ .driver = { ++ .pm = &pwm_dwc_pm_ops, ++ }, ++}; ++ ++module_pci_driver(dwc_pwm_driver); ++ ++MODULE_AUTHOR("Felipe Balbi "); ++MODULE_DESCRIPTION("DesignWare PWM Controller"); ++MODULE_LICENSE("GPL v2"); +-- +2.17.1 + diff --git a/patches/0011-Update-Kconfig-to-default-SEP-to-enabled-and-.sep-socwatch b/patches/0011-Update-Kconfig-to-default-SEP-to-enabled-and-.sep-socwatch new file mode 100644 index 0000000000..87ef063155 --- /dev/null +++ b/patches/0011-Update-Kconfig-to-default-SEP-to-enabled-and-.sep-socwatch @@ -0,0 +1,50 @@ +From 4deb58418baca7be83414b248f66342c4366a7e6 Mon Sep 17 00:00:00 2001 +From: Jon Moeller +Date: Fri, 29 Mar 2019 14:23:46 -0500 +Subject: [PATCH 11/27] Update Kconfig to default SEP to enabled and SoCWatch + to build in X86_64. + +Signed-off-by: Jon Moeller +--- + drivers/platform/x86/sepdk/Kconfig | 1 + + drivers/platform/x86/socwatch/Kconfig | 2 +- + drivers/platform/x86/socwatchhv/Kconfig | 2 +- + 3 files changed, 3 insertions(+), 2 deletions(-) + +diff --git a/drivers/platform/x86/sepdk/Kconfig b/drivers/platform/x86/sepdk/Kconfig +index 884c5055d304..b119ed6d7c1f 100755 +--- a/drivers/platform/x86/sepdk/Kconfig ++++ b/drivers/platform/x86/sepdk/Kconfig +@@ -7,6 +7,7 @@ config INTEL_SEP + SEP is a command line tool for doing hardware-based sampling using + event-based sampling (EBS). + depends on X86 || X86_64 ++ default y + + config SEP + tristate "SEP kernel driver" +diff --git a/drivers/platform/x86/socwatch/Kconfig b/drivers/platform/x86/socwatch/Kconfig +index 87a7ae205f2d..d6102101f128 100644 +--- a/drivers/platform/x86/socwatch/Kconfig ++++ b/drivers/platform/x86/socwatch/Kconfig +@@ -1,5 +1,5 @@ + menuconfig INTEL_SOCWATCH +- depends on X86 ++ depends on X86 || X86_64 + tristate "SocWatch Driver Support" + default m + help +diff --git a/drivers/platform/x86/socwatchhv/Kconfig b/drivers/platform/x86/socwatchhv/Kconfig +index 3226632de1fc..30354e416872 100644 +--- a/drivers/platform/x86/socwatchhv/Kconfig ++++ b/drivers/platform/x86/socwatchhv/Kconfig +@@ -1,5 +1,5 @@ + menuconfig INTEL_SOCWATCH_HV +- depends on X86 && ACRN_VHM && ACRN_SHARED_BUFFER ++ depends on (X86 || X86_64) && ACRN_VHM && ACRN_SHARED_BUFFER + tristate "SocWatch Hypervisor Driver Support" + default m + help +-- +2.17.1 + diff --git a/patches/0011-drm-i915-tgl-Enable-VD-HCP-MFX-sub-pipe-power-gating.drm b/patches/0011-drm-i915-tgl-Enable-VD-HCP-MFX-sub-pipe-power-gating.drm new file mode 100644 index 0000000000..d8b586f11f --- /dev/null +++ b/patches/0011-drm-i915-tgl-Enable-VD-HCP-MFX-sub-pipe-power-gating.drm @@ -0,0 +1,79 @@ +From 4e3d371603d7a1428fd72c84a0c66f53923e3e35 Mon Sep 17 00:00:00 2001 +From: Michel Thierry +Date: Fri, 23 Aug 2019 01:20:34 -0700 +Subject: [PATCH 011/690] drm/i915/tgl: Enable VD HCP/MFX sub-pipe power gating + +HCP/MFX power gating is disabled by default, turn it on for the vd units +available. User space will also issue a MI_FORCE_WAKEUP properly to +wake up proper subwell. + +During driver load, init_clock_gating happens after device_info_init_mmio +read the vdbox disable fuse register, so only present vd units will have +these enabled. + +BSpec: 14214 +HSDES: 1209977827 +Signed-off-by: Michel Thierry +Reviewed-by: Lucas De Marchi +Signed-off-by: Lucas De Marchi +Reviewed-by: Tony Ye +Link: https://patchwork.freedesktop.org/patch/msgid/20190823082055.5992-3-lucas.demarchi@intel.com +--- + drivers/gpu/drm/i915/i915_reg.h | 4 ++++ + drivers/gpu/drm/i915/intel_pm.c | 18 +++++++++++++++++- + 2 files changed, 21 insertions(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h +index a092b34c269d..02e1ef10c47e 100644 +--- a/drivers/gpu/drm/i915/i915_reg.h ++++ b/drivers/gpu/drm/i915/i915_reg.h +@@ -8615,6 +8615,10 @@ enum { + #define GEN9_PWRGT_MEDIA_STATUS_MASK (1 << 0) + #define GEN9_PWRGT_RENDER_STATUS_MASK (1 << 1) + ++#define POWERGATE_ENABLE _MMIO(0xa210) ++#define VDN_HCP_POWERGATE_ENABLE(n) BIT(((n) * 2) + 3) ++#define VDN_MFX_POWERGATE_ENABLE(n) BIT(((n) * 2) + 4) ++ + #define GTFIFODBG _MMIO(0x120000) + #define GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV (0x1f << 20) + #define GT_FIFO_FREE_ENTRIES_CHV (0x7f << 13) +diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c +index 75ee027abb80..d3ea193cd093 100644 +--- a/drivers/gpu/drm/i915/intel_pm.c ++++ b/drivers/gpu/drm/i915/intel_pm.c +@@ -9078,6 +9078,22 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv) + _MASKED_BIT_ENABLE(GEN11_ENABLE_32_PLANE_MODE)); + } + ++static void tgl_init_clock_gating(struct drm_i915_private *dev_priv) ++{ ++ u32 vd_pg_enable = 0; ++ unsigned int i; ++ ++ /* This is not a WA. Enable VD HCP & MFX_ENC powergate */ ++ for (i = 0; i < I915_MAX_VCS; i++) { ++ if (HAS_ENGINE(dev_priv, _VCS(i))) ++ vd_pg_enable |= VDN_HCP_POWERGATE_ENABLE(i) | ++ VDN_MFX_POWERGATE_ENABLE(i); ++ } ++ ++ I915_WRITE(POWERGATE_ENABLE, ++ I915_READ(POWERGATE_ENABLE) | vd_pg_enable); ++} ++ + static void cnp_init_clock_gating(struct drm_i915_private *dev_priv) + { + if (!HAS_PCH_CNP(dev_priv)) +@@ -9598,7 +9614,7 @@ static void nop_init_clock_gating(struct drm_i915_private *dev_priv) + void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) + { + if (IS_GEN(dev_priv, 12)) +- dev_priv->display.init_clock_gating = nop_init_clock_gating; ++ dev_priv->display.init_clock_gating = tgl_init_clock_gating; + else if (IS_GEN(dev_priv, 11)) + dev_priv->display.init_clock_gating = icl_init_clock_gating; + else if (IS_CANNONLAKE(dev_priv)) +-- +2.17.1 + diff --git a/patches/0011-ishtp-Add-support-for-Intel-ishtp-eclite-driver.core-ehl b/patches/0011-ishtp-Add-support-for-Intel-ishtp-eclite-driver.core-ehl new file mode 100644 index 0000000000..3ae646b75d --- /dev/null +++ b/patches/0011-ishtp-Add-support-for-Intel-ishtp-eclite-driver.core-ehl @@ -0,0 +1,612 @@ +From b920989980d2c904f578937ee61a3094201712b4 Mon Sep 17 00:00:00 2001 +From: "K Naduvalath, Sumesh" +Date: Sat, 24 Aug 2019 23:04:26 +0530 +Subject: [PATCH 11/12] ishtp: Add support for Intel ishtp eclite driver + +This driver enables the OS to talk to eclite firmware on a dedicated +low power chip over ISH Transport Protocol (ISHTP) + +Signed-off-by: K Naduvalath, Sumesh +Change-Id: I85d748a21e813080caeee44855dc5de7601c2017 +--- + drivers/platform/x86/Kconfig | 8 + + drivers/platform/x86/Makefile | 1 + + drivers/platform/x86/intel_ishtp_eclite.c | 555 ++++++++++++++++++++++ + 3 files changed, 564 insertions(+) + create mode 100644 drivers/platform/x86/intel_ishtp_eclite.c + +diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig +index ae21d08c65e8..e1c8598b4d82 100644 +--- a/drivers/platform/x86/Kconfig ++++ b/drivers/platform/x86/Kconfig +@@ -1335,6 +1335,14 @@ config PCENGINES_APU2 + To compile this driver as a module, choose M here: the module + will be called pcengines-apuv2. + ++config INTEL_ISHTP_ECLITE ++ tristate "Intel ISHTP eclite controller" ++ depends on INTEL_ISH_HID ++ depends on ACPI ++ help ++ If you say Y here, you get support for talking to eclite fw on a ++ dedicated low power chip over ISH Transport Protocol (ISHTP). ++ + source "drivers/platform/x86/intel_speed_select_if/Kconfig" + + endif # X86_PLATFORM_DEVICES +diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile +index 415104033060..a71682a127e5 100644 +--- a/drivers/platform/x86/Makefile ++++ b/drivers/platform/x86/Makefile +@@ -79,6 +79,7 @@ obj-$(CONFIG_SAMSUNG_Q10) += samsung-q10.o + obj-$(CONFIG_APPLE_GMUX) += apple-gmux.o + obj-$(CONFIG_INTEL_RST) += intel-rst.o + obj-$(CONFIG_INTEL_SMARTCONNECT) += intel-smartconnect.o ++obj-$(CONFIG_INTEL_ISHTP_ECLITE) += intel_ishtp_eclite.o + + obj-$(CONFIG_ALIENWARE_WMI) += alienware-wmi.o + obj-$(CONFIG_INTEL_PMC_IPC) += intel_pmc_ipc.o +diff --git a/drivers/platform/x86/intel_ishtp_eclite.c b/drivers/platform/x86/intel_ishtp_eclite.c +new file mode 100644 +index 000000000000..f2fbf768731b +--- /dev/null ++++ b/drivers/platform/x86/intel_ishtp_eclite.c +@@ -0,0 +1,555 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Intel ECLite opregion driver for talking to EClite firmware running on ++ * Intel Integrated Sensor Hub (ISH) using ISH Trasport protocol (ISHTP) ++ * ++ * Copyright (c) 2019, Intel Corporation. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define ECLITE_DATA_OPREGION_ID 0x9E ++#define ECLITE_CMD_OPREGION_ID 0x9F ++ ++#define ECL_MSG_DATA 0x1 ++#define ECL_MSG_EVENT 0x2 ++ ++#define ECL_ISH_READ 0x1 ++#define ECL_ISH_WRITE 0x2 ++#define ECL_ISH_HEADER_VERSION 0 ++ ++#define ECL_CL_RX_RING_SIZE 8 ++#define ECL_CL_TX_RING_SIZE 4 ++ ++#define ECL_DATA_OPR_BUFLEN 384 ++ ++#define cmd_opr_offsetof(element) offsetof(struct opregion_cmd, element) ++#define cl_data_to_dev(opr_dev) ishtp_device((opr_dev)->cl_device) ++ ++#ifndef BITS_TO_BYTES ++#define BITS_TO_BYTES(x) (x/8) ++#endif ++ ++struct opregion_cmd { ++ unsigned int command; ++ unsigned int offset; ++ unsigned int length; ++ unsigned int event_id; ++}; ++ ++struct opregion_data { ++ char data[ECL_DATA_OPR_BUFLEN]; ++}; ++ ++struct opregion_context { ++ struct opregion_cmd cmd_area; ++ struct opregion_data data_area; ++}; ++ ++struct ecl_message_header { ++ uint32_t version:2; ++ uint32_t data_type:2; ++ uint32_t request_type:2; ++ uint32_t offset:9; ++ uint32_t data_len:9; ++ uint32_t event:8; ++}; ++ ++struct ecl_message { ++ struct ecl_message_header header; ++ char payload[ECL_DATA_OPR_BUFLEN]; ++}; ++ ++struct ishtp_opregion_dev { ++ struct opregion_context opr_context; ++ struct ishtp_cl *ecl_ishtp_cl; ++ struct ishtp_cl_device *cl_device; ++ struct ishtp_fw_client *fw_client; ++ struct ishtp_cl_rb *rb; ++ struct acpi_handle *acpi_handle; ++ unsigned int dsm_event_id; ++ wait_queue_head_t read_wait; ++ struct work_struct event_work; ++ struct work_struct reset_work; ++}; ++ ++/* eclite ishtp client UUID: 6a19cc4b-d760-4de3-b14d-f25ebd0fbcd9 */ ++static const guid_t ecl_ishtp_guid = ++ GUID_INIT(0x6a19cc4b, 0xd760, 0x4de3, ++ 0xb1, 0x4d, 0xf2, 0x5e, 0xbd, 0xf, 0xbc, 0xd9); ++ ++/* ACPI DSM UUID: 91d936a7-1f01-49c6-a6b4-72f00ad8d8a5 */ ++static const guid_t ecl_acpi_guid = ++ GUID_INIT(0x91d936a7, 0x1f01, 0x49c6, 0xa6, ++ 0xb4, 0x72, 0xf0, 0x0a, 0xd8, 0xd8, 0xa5); ++ ++/** ++ * @ecl_ish_cl_read() - Read data from eclite FW ++ * ++ * @opr_dev - pointer to opregion device ++ * ++ * @This function issues a read request to eclite FW and waits until it ++ * receives a response. When response is received the read data is copied to ++ * opregion buffer. ++ */ ++static int ecl_ish_cl_read(struct ishtp_opregion_dev *opr_dev) ++{ ++ struct ecl_message_header header = { 0 }; ++ int len, rv; ++ ++ header.version = ECL_ISH_HEADER_VERSION; ++ header.data_type = ECL_MSG_DATA; ++ header.request_type = ECL_ISH_READ; ++ header.offset = opr_dev->opr_context.cmd_area.offset; ++ header.data_len = opr_dev->opr_context.cmd_area.length; ++ header.event = opr_dev->opr_context.cmd_area.event_id; ++ len = sizeof(header); ++ ++ rv = ishtp_cl_send(opr_dev->ecl_ishtp_cl, (uint8_t *)&header, len); ++ if (rv) { ++ dev_err(cl_data_to_dev(opr_dev), "ish-read : send failed\n"); ++ return rv; ++ } ++ ++ wait_event_interruptible(opr_dev->read_wait, opr_dev->rb != NULL); ++ opr_dev->rb = NULL; ++ ++ return 0; ++} ++ ++/** ++ * @ecl_ish_cl_write() - This function writes data to eclite FW. ++ * ++ * @opr_dev - pointer to opregion device ++ * ++ * This function writes data to eclite FW. ++ */ ++static int ecl_ish_cl_write(struct ishtp_opregion_dev *opr_dev) ++{ ++ struct ecl_message message = { 0 }; ++ int len; ++ ++ message.header.version = ECL_ISH_HEADER_VERSION; ++ message.header.data_type = ECL_MSG_DATA; ++ message.header.request_type = ECL_ISH_WRITE; ++ message.header.offset = opr_dev->opr_context.cmd_area.offset; ++ message.header.data_len = opr_dev->opr_context.cmd_area.length; ++ message.header.event = opr_dev->opr_context.cmd_area.event_id; ++ len = sizeof(struct ecl_message_header) + message.header.data_len; ++ ++ memcpy(message.payload, ++ opr_dev->opr_context.data_area.data + message.header.offset, ++ message.header.data_len); ++ ++ return ishtp_cl_send(opr_dev->ecl_ishtp_cl, (uint8_t *)&message, len); ++ ++} ++ ++static acpi_status ++ecl_opregion_cmd_handler(u32 function, acpi_physical_address address, ++ u32 bits, u64 *value64, ++ void *handler_context, void *region_context) ++{ ++ struct ishtp_opregion_dev *opr_dev; ++ struct opregion_cmd *cmd; ++ ++ if (region_context == NULL || value64 == NULL) ++ return AE_BAD_PARAMETER; ++ ++ if (function == ACPI_READ) ++ return AE_ERROR; ++ ++ opr_dev = (struct ishtp_opregion_dev *) region_context; ++ cmd = &opr_dev->opr_context.cmd_area; ++ ++ switch (address) { ++ case cmd_opr_offsetof(command): ++ cmd->command = (u32)*value64; ++ ++ if (cmd->command == ECL_ISH_READ) ++ return ecl_ish_cl_read(opr_dev); ++ else if (cmd->command == ECL_ISH_WRITE) ++ return ecl_ish_cl_write(opr_dev); ++ ++ return AE_ERROR; ++ ++ case cmd_opr_offsetof(offset): ++ cmd->offset = (u32)*value64; ++ break; ++ case cmd_opr_offsetof(length): ++ cmd->length = (u32)*value64; ++ break; ++ case cmd_opr_offsetof(event_id): ++ cmd->event_id = (u32)*value64; ++ break; ++ default: ++ return AE_ERROR; ++ } ++ ++ return AE_OK; ++} ++ ++static acpi_status ++ecl_opregion_data_handler(u32 function, acpi_physical_address address, ++ u32 bits, u64 *value64, ++ void *handler_context, void *region_context) ++{ ++ struct ishtp_opregion_dev *opr_dev; ++ unsigned int bytes = BITS_TO_BYTES(bits); ++ void *data_addr; ++ ++ if (region_context == NULL || value64 == NULL) ++ return AE_BAD_PARAMETER; ++ ++ if (address + bytes > ECL_DATA_OPR_BUFLEN) ++ return AE_BAD_PARAMETER; ++ ++ opr_dev = (struct ishtp_opregion_dev *)region_context; ++ data_addr = &opr_dev->opr_context.data_area.data[address]; ++ ++ if (function == ACPI_READ) ++ memcpy(value64, data_addr, bytes); ++ else if (function == ACPI_WRITE) ++ memcpy(data_addr, value64, bytes); ++ else ++ return AE_BAD_PARAMETER; ++ ++ return AE_OK; ++} ++ ++static int acpi_opregion_init(struct ishtp_opregion_dev *opr_dev) ++{ ++ acpi_status status; ++ struct acpi_device *adev; ++ ++ /* find ECLite device and install opregion handlers */ ++ adev = acpi_dev_get_first_match_dev("INT3538", NULL, -1); ++ if (!adev) { ++ dev_err(cl_data_to_dev(opr_dev), "eclite ACPI device not found\n"); ++ return -ENODEV; ++ } ++ ++ opr_dev->acpi_handle = adev->handle; ++ acpi_dev_put(adev); ++ ++ status = acpi_install_address_space_handler(opr_dev->acpi_handle, ++ ECLITE_CMD_OPREGION_ID, ++ ecl_opregion_cmd_handler, ++ NULL, opr_dev); ++ if (ACPI_FAILURE(status)) { ++ dev_err(cl_data_to_dev(opr_dev), ++ "cmd space handler install failed\n"); ++ return -ENODEV; ++ } ++ ++ status = acpi_install_address_space_handler(opr_dev->acpi_handle, ++ ECLITE_DATA_OPREGION_ID, ++ ecl_opregion_data_handler, ++ NULL, opr_dev); ++ if (ACPI_FAILURE(status)) { ++ dev_err(cl_data_to_dev(opr_dev), ++ "data space handler install failed\n"); ++ ++ acpi_remove_address_space_handler(opr_dev->acpi_handle, ++ ECLITE_CMD_OPREGION_ID, ++ ecl_opregion_cmd_handler); ++ ++ return -ENODEV; ++ } ++ ++ dev_dbg(cl_data_to_dev(opr_dev), "Opregion handlers are installed\n"); ++ ++ return 0; ++} ++ ++static void ecl_acpi_invoke_dsm(struct work_struct *work) ++{ ++ struct ishtp_opregion_dev *opr_dev; ++ union acpi_object *obj; ++ ++ opr_dev = container_of(work, struct ishtp_opregion_dev, event_work); ++ ++ obj = acpi_evaluate_dsm(opr_dev->acpi_handle, &ecl_acpi_guid, 0, ++ opr_dev->dsm_event_id, NULL); ++ if (!obj) { ++ dev_warn(cl_data_to_dev(opr_dev), "_DSM fn call failed\n"); ++ return; ++ } ++ ++ dev_dbg(cl_data_to_dev(opr_dev), "Exec DSM function code: %d success\n", ++ opr_dev->dsm_event_id); ++ ACPI_FREE(obj); ++} ++ ++static void ecl_ish_process_rx_data(struct ishtp_opregion_dev *opr_dev) ++{ ++ struct ecl_message *message = ++ (struct ecl_message *)opr_dev->rb->buffer.data; ++ ++ dev_dbg(cl_data_to_dev(opr_dev), ++ "[ish_rd] Resp: off : %x, len : %x\n", ++ message->header.offset, ++ message->header.data_len); ++ ++ memcpy(opr_dev->opr_context.data_area.data + message->header.offset, ++ message->payload, message->header.data_len); ++ ++ wake_up_interruptible(&opr_dev->read_wait); ++} ++ ++static void ecl_ish_process_rx_event(struct ishtp_opregion_dev *opr_dev) ++{ ++ struct ecl_message_header *header = ++ (struct ecl_message_header *)opr_dev->rb->buffer.data; ++ ++ dev_dbg(cl_data_to_dev(opr_dev), ++ "[ish_ev] Evt received: %8x\n", header->event); ++ ++ opr_dev->dsm_event_id = header->event; ++ ++ schedule_work(&opr_dev->event_work); ++ ++ opr_dev->rb = NULL; ++} ++ ++static void ecl_ishtp_cl_event_cb(struct ishtp_cl_device *cl_device) ++{ ++ struct ishtp_opregion_dev *opr_dev; ++ struct ecl_message_header *header; ++ struct ishtp_cl_rb *rb; ++ struct ishtp_cl *ecl_ishtp_cl = ishtp_get_drvdata(cl_device); ++ ++ opr_dev = ishtp_get_client_data(ecl_ishtp_cl); ++ ++ while ((rb = ishtp_cl_rx_get_rb(opr_dev->ecl_ishtp_cl)) != NULL) { ++ opr_dev->rb = rb; ++ header = (struct ecl_message_header *)rb->buffer.data; ++ ++ if (header->data_type == ECL_MSG_DATA) ++ ecl_ish_process_rx_data(opr_dev); ++ else if (header->data_type == ECL_MSG_EVENT) ++ ecl_ish_process_rx_event(opr_dev); ++ else ++ /* got an event with wrong data_type, ignore it */ ++ dev_err(cl_data_to_dev(opr_dev), ++ "[ish_cb] Received wrong data_type\n"); ++ ++ ishtp_cl_io_rb_recycle(rb); ++ } ++} ++ ++static int ecl_ishtp_cl_init(struct ishtp_cl *ecl_ishtp_cl) ++{ ++ int rv; ++ struct ishtp_device *dev; ++ struct ishtp_fw_client *fw_client; ++ struct ishtp_opregion_dev *opr_dev = ++ ishtp_get_client_data(ecl_ishtp_cl); ++ ++ rv = ishtp_cl_link(ecl_ishtp_cl); ++ if (rv) { ++ dev_err(cl_data_to_dev(opr_dev), "ishtp_cl_link failed\n"); ++ return rv; ++ } ++ ++ dev = ishtp_get_ishtp_device(ecl_ishtp_cl); ++ ++ /* Connect to FW client */ ++ ishtp_set_tx_ring_size(ecl_ishtp_cl, ECL_CL_TX_RING_SIZE); ++ ishtp_set_rx_ring_size(ecl_ishtp_cl, ECL_CL_RX_RING_SIZE); ++ ++ fw_client = ishtp_fw_cl_get_client(dev, &ecl_ishtp_guid); ++ if (!fw_client) { ++ dev_err(cl_data_to_dev(opr_dev), "fw client not found\n"); ++ return -ENOENT; ++ } ++ ++ ishtp_cl_set_fw_client_id(ecl_ishtp_cl, ishtp_get_fw_client_id(fw_client)); ++ ishtp_set_connection_state(ecl_ishtp_cl, ISHTP_CL_CONNECTING); ++ ++ rv = ishtp_cl_connect(ecl_ishtp_cl); ++ if (rv) { ++ dev_err(cl_data_to_dev(opr_dev), "client connect failed\n"); ++ ++ ishtp_cl_unlink(ecl_ishtp_cl); ++ return rv; ++ } ++ ++ dev_dbg(cl_data_to_dev(opr_dev), "Host connected to fw client\n"); ++ ++ return rv; ++} ++ ++static void ecl_ishtp_cl_deinit(struct ishtp_cl *ecl_ishtp_cl) ++{ ++ ishtp_cl_unlink(ecl_ishtp_cl); ++ ishtp_cl_flush_queues(ecl_ishtp_cl); ++ ishtp_cl_free(ecl_ishtp_cl); ++} ++ ++static void ecl_ishtp_cl_reset_handler(struct work_struct *work) ++{ ++ struct ishtp_opregion_dev *opr_dev; ++ struct ishtp_cl_device *cl_device; ++ struct ishtp_cl *ecl_ishtp_cl; ++ int rv; ++ ++ opr_dev = container_of(work, struct ishtp_opregion_dev, reset_work); ++ ++ cl_device = opr_dev->cl_device; ++ ecl_ishtp_cl = opr_dev->ecl_ishtp_cl; ++ ++ ecl_ishtp_cl_deinit(ecl_ishtp_cl); ++ ++ ecl_ishtp_cl = ishtp_cl_allocate(cl_device); ++ if (!ecl_ishtp_cl) ++ return; ++ ++ ishtp_set_drvdata(cl_device, ecl_ishtp_cl); ++ ishtp_set_client_data(ecl_ishtp_cl, opr_dev); ++ ++ opr_dev->ecl_ishtp_cl = ecl_ishtp_cl; ++ ++ rv = ecl_ishtp_cl_init(ecl_ishtp_cl); ++ if (rv) { ++ ishtp_cl_free(ecl_ishtp_cl); ++ opr_dev->ecl_ishtp_cl = NULL; ++ } ++ ++ ishtp_register_event_cb(cl_device, ecl_ishtp_cl_event_cb); ++} ++ ++static int ecl_ishtp_cl_probe(struct ishtp_cl_device *cl_device) ++{ ++ struct ishtp_cl *ecl_ishtp_cl; ++ struct ishtp_opregion_dev *opr_dev; ++ int rv; ++ ++ opr_dev = devm_kzalloc(ishtp_device(cl_device), sizeof(*opr_dev), ++ GFP_KERNEL); ++ if (!opr_dev) ++ return -ENOMEM; ++ ++ ecl_ishtp_cl = ishtp_cl_allocate(cl_device); ++ if (!ecl_ishtp_cl) ++ return -ENOMEM; ++ ++ ishtp_set_drvdata(cl_device, ecl_ishtp_cl); ++ ishtp_set_client_data(ecl_ishtp_cl, opr_dev); ++ opr_dev->ecl_ishtp_cl = ecl_ishtp_cl; ++ opr_dev->cl_device = cl_device; ++ ++ init_waitqueue_head(&opr_dev->read_wait); ++ INIT_WORK(&opr_dev->event_work, ecl_acpi_invoke_dsm); ++ INIT_WORK(&opr_dev->reset_work, ecl_ishtp_cl_reset_handler); ++ ++ /* Initialize ish client device */ ++ rv = ecl_ishtp_cl_init(ecl_ishtp_cl); ++ if (rv) { ++ dev_err(cl_data_to_dev(opr_dev), "Client init failed\n"); ++ goto err_exit; ++ } ++ ++ dev_dbg(cl_data_to_dev(opr_dev), "eclite-ishtp client initialised\n"); ++ ++ /* Register a handler for eclite fw events */ ++ ishtp_register_event_cb(cl_device, ecl_ishtp_cl_event_cb); ++ ++ ishtp_get_device(cl_device); ++ ++ /* Now find ACPI device and init opregion handlers */ ++ rv = acpi_opregion_init(opr_dev); ++ if (rv) { ++ dev_err(cl_data_to_dev(opr_dev), "ACPI opregion init failed\n"); ++ ++ goto err_exit; ++ } ++ ++ /* Reprobe devices depending on ECLite - battery, fan, etc. */ ++ acpi_walk_dep_device_list(opr_dev->acpi_handle); ++ ++ return 0; ++ ++err_exit: ++ ishtp_set_connection_state(ecl_ishtp_cl, ISHTP_CL_DISCONNECTING); ++ ishtp_cl_disconnect(ecl_ishtp_cl); ++ ecl_ishtp_cl_deinit(ecl_ishtp_cl); ++ ++ ishtp_put_device(cl_device); ++ ++ return rv; ++} ++ ++static int ecl_ishtp_cl_remove(struct ishtp_cl_device *cl_device) ++{ ++ struct ishtp_cl *ecl_ishtp_cl = ishtp_get_drvdata(cl_device); ++ struct ishtp_opregion_dev *opr_dev = ++ ishtp_get_client_data(ecl_ishtp_cl); ++ ++ acpi_remove_address_space_handler(opr_dev->acpi_handle, ++ ECLITE_CMD_OPREGION_ID, ++ ecl_opregion_cmd_handler); ++ ++ acpi_remove_address_space_handler(opr_dev->acpi_handle, ++ ECLITE_DATA_OPREGION_ID, ++ ecl_opregion_data_handler); ++ ++ cancel_work_sync(&opr_dev->reset_work); ++ cancel_work_sync(&opr_dev->event_work); ++ ++ ishtp_set_connection_state(ecl_ishtp_cl, ISHTP_CL_DISCONNECTING); ++ ishtp_cl_disconnect(ecl_ishtp_cl); ++ ecl_ishtp_cl_deinit(ecl_ishtp_cl); ++ ++ ishtp_put_device(cl_device); ++ ++ return 0; ++} ++ ++static int ecl_ishtp_cl_reset(struct ishtp_cl_device *cl_device) ++{ ++ struct ishtp_cl *ecl_ishtp_cl = ishtp_get_drvdata(cl_device); ++ struct ishtp_opregion_dev *opr_dev = ++ ishtp_get_client_data(ecl_ishtp_cl); ++ ++ schedule_work(&opr_dev->reset_work); ++ ++ return 0; ++} ++ ++static struct ishtp_cl_driver ecl_ishtp_cl_driver = { ++ .name = "ishtp-eclite", ++ .guid = &ecl_ishtp_guid, ++ .probe = ecl_ishtp_cl_probe, ++ .remove = ecl_ishtp_cl_remove, ++ .reset = ecl_ishtp_cl_reset, ++}; ++ ++static int __init ecl_ishtp_init(void) ++{ ++ return ishtp_cl_driver_register(&ecl_ishtp_cl_driver, THIS_MODULE); ++} ++ ++static void __exit ecl_ishtp_exit(void) ++{ ++ return ishtp_cl_driver_unregister(&ecl_ishtp_cl_driver); ++} ++ ++late_initcall(ecl_ishtp_init); ++module_exit(ecl_ishtp_exit); ++ ++MODULE_DESCRIPTION("ISH ISHTP eclite client opregion driver"); ++MODULE_AUTHOR("K Naduvalath, Sumesh "); ++ ++MODULE_LICENSE("GPL v2"); ++MODULE_ALIAS("ishtp:*"); +-- +2.17.1 + diff --git a/patches/0011-mfd-intel-lpss-Make-driver-probe-asynchronous.lpss b/patches/0011-mfd-intel-lpss-Make-driver-probe-asynchronous.lpss new file mode 100644 index 0000000000..4a72905764 --- /dev/null +++ b/patches/0011-mfd-intel-lpss-Make-driver-probe-asynchronous.lpss @@ -0,0 +1,45 @@ +From bcc750310ad76103a43204629094a29fabf23f17 Mon Sep 17 00:00:00 2001 +From: Feng Tang +Date: Mon, 4 Jun 2018 15:17:42 +0800 +Subject: [PATCH 11/40] mfd: intel-lpss: Make driver probe asynchronous + +LPSS driver's probe takes quite some time, around 50ms to 100ms +on Apollo Lake platform for all the spi/uart/i2c controllers. Making +the driver probe async can help to reduce the boot time. + +Signed-off-by: Feng Tang +Cc: Andy Shevchenko +Cc: Mika Westerberg +Signed-off-by: Andy Shevchenko +--- + drivers/mfd/intel-lpss-acpi.c | 1 + + drivers/mfd/intel-lpss-pci.c | 1 + + 2 files changed, 2 insertions(+) + +diff --git a/drivers/mfd/intel-lpss-acpi.c b/drivers/mfd/intel-lpss-acpi.c +index c8fe334b5fe8..dea828ba395b 100644 +--- a/drivers/mfd/intel-lpss-acpi.c ++++ b/drivers/mfd/intel-lpss-acpi.c +@@ -138,6 +138,7 @@ static struct platform_driver intel_lpss_acpi_driver = { + .name = "intel-lpss", + .acpi_match_table = intel_lpss_acpi_ids, + .pm = &intel_lpss_acpi_pm_ops, ++ .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, + }; + +diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c +index 9355db29d2f9..2f3cf3be06b2 100644 +--- a/drivers/mfd/intel-lpss-pci.c ++++ b/drivers/mfd/intel-lpss-pci.c +@@ -322,6 +322,7 @@ static struct pci_driver intel_lpss_pci_driver = { + .remove = intel_lpss_pci_remove, + .driver = { + .pm = &intel_lpss_pci_pm_ops, ++ .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, + }; + +-- +2.17.1 + diff --git a/patches/0011-net-stmmac-add-EHL-SGMII-2.5Gbps-PCI-info-and.connectivity b/patches/0011-net-stmmac-add-EHL-SGMII-2.5Gbps-PCI-info-and.connectivity new file mode 100644 index 0000000000..49f542c427 --- /dev/null +++ b/patches/0011-net-stmmac-add-EHL-SGMII-2.5Gbps-PCI-info-and.connectivity @@ -0,0 +1,37 @@ +From 30c2c8b0731e34bdf0caa181e2974eb067b268d9 Mon Sep 17 00:00:00 2001 +From: Voon Weifeng +Date: Sat, 27 Jul 2019 07:53:45 +0800 +Subject: [PATCH 011/108] net: stmmac: add EHL SGMII 2.5Gbps PCI info and PCI + ID + +Add EHL SGMII 2.5Gbps PCI info and PCI ID + +Signed-off-by: Voon Weifeng +Signed-off-by: Ong Boon Leong +--- + drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +index 17ddf71f1fbe..3899a2e4154b 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +@@ -648,6 +648,7 @@ static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume); + #define STMMAC_DEVICE_ID 0x1108 + #define STMMAC_EHL_RGMII1G_ID 0x4b30 + #define STMMAC_EHL_SGMII1G_ID 0x4b31 ++#define STMMAC_EHL_SGMII2G5_ID 0x4b32 + #define STMMAC_EHL_PSE0_RGMII1G_ID 0x4ba0 + #define STMMAC_EHL_PSE0_SGMII1G_ID 0x4ba1 + #define STMMAC_EHL_PSE0_SGMII2G5_ID 0x4ba2 +@@ -668,6 +669,7 @@ static const struct pci_device_id stmmac_id_table[] = { + STMMAC_DEVICE(INTEL, STMMAC_QUARK_ID, quark_pci_info), + STMMAC_DEVICE(INTEL, STMMAC_EHL_RGMII1G_ID, ehl_rgmii1g_pci_info), + STMMAC_DEVICE(INTEL, STMMAC_EHL_SGMII1G_ID, ehl_sgmii1g_pci_info), ++ STMMAC_DEVICE(INTEL, STMMAC_EHL_SGMII2G5_ID, ehl_sgmii1g_pci_info), + STMMAC_DEVICE(INTEL, STMMAC_EHL_PSE0_RGMII1G_ID, + ehl_pse0_rgmii1g_pci_info), + STMMAC_DEVICE(INTEL, STMMAC_EHL_PSE0_SGMII1G_ID, +-- +2.17.1 + diff --git a/patches/0011-tools-rpmb-add-RPBM-access-tool.security b/patches/0011-tools-rpmb-add-RPBM-access-tool.security new file mode 100644 index 0000000000..c136b54c3b --- /dev/null +++ b/patches/0011-tools-rpmb-add-RPBM-access-tool.security @@ -0,0 +1,1211 @@ +From 6a79e07fa29a7fd3b7e94e0d804c0fb545766ac4 Mon Sep 17 00:00:00 2001 +From: Tomas Winkler +Date: Sun, 20 Mar 2016 11:32:42 +0200 +Subject: [PATCH 11/65] tools rpmb: add RPBM access tool + +Add simple RPMB host testing tool. It can be used +to program key, write and read data block, and retrieve +write counter. + +V2: Resend. +V3: Fix missing objtool. +V4: Add verbose option. +V5: 1. Adjust to the new API. + 2. Exercise both request and sequence ioctls. +V6: 1. Add includes to openssl/rand.h and endian.h. + 2. Fix some signed, unsigned comparisons. + 3. Check results more thoroughly. + 4. use HOSTCFLAGS in compilation. + 5. Allocate frames dynamically. +V7: 1. Fix rpmb_alloc_frames, it has always allocated one frame instead of + requested number. + 2. Use an inline function instead of macro for rw blocking wrapper. +V8: 1. Free frames in write_counter functions. + 2. Enhance error and debug messages. + 3. Tool can be compiled statically if RPMB_STATIC is set + 4. Support for openssl 1.1.0 +V9: 1. Remove request_cmd() interface. + 2. Fix c&p error in rpmb_result_str + 3. Use new version and capability ioctls. + 4. Add get-info argument + 5. Use SPDX identifiers. + 6. Remove inlines conflicts with compiler_types.h + 7. Use __packed + +Change-Id: I914bb9918e7a555932b38b11950b22fd5187799b +Signed-off-by: Tomas Winkler +Signed-off-by: Alexander Usyskin +--- + MAINTAINERS | 1 + + tools/Makefile | 14 +- + tools/rpmb/.gitignore | 2 + + tools/rpmb/Makefile | 41 ++ + tools/rpmb/rpmb.c | 1020 +++++++++++++++++++++++++++++++++++++++++ + 5 files changed, 1073 insertions(+), 5 deletions(-) + create mode 100644 tools/rpmb/.gitignore + create mode 100644 tools/rpmb/Makefile + create mode 100644 tools/rpmb/rpmb.c + +diff --git a/MAINTAINERS b/MAINTAINERS +index f9f579837aac..3863d92b1fe9 100644 +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -13984,6 +13984,7 @@ F: drivers/char/rpmb/* + F: include/uapi/linux/rpmb.h + F: include/linux/rpmb.h + F: Documentation/ABI/testing/sysfs-class-rpmb ++F: tools/rpmb/ + + RTL2830 MEDIA DRIVER + M: Antti Palosaari +diff --git a/tools/Makefile b/tools/Makefile +index 7e42f7b8bfa7..a4879173e679 100644 +--- a/tools/Makefile ++++ b/tools/Makefile +@@ -27,6 +27,7 @@ help: + @echo ' objtool - an ELF object analysis tool' + @echo ' pci - PCI tools' + @echo ' perf - Linux performance measurement and analysis tool' ++ @echo ' rpmb - Replay protected memory block access tool' + @echo ' selftests - various kernel selftests' + @echo ' spi - spi tools' + @echo ' tmon - thermal monitoring and tuning tool' +@@ -63,7 +64,7 @@ acpi: FORCE + cpupower: FORCE + $(call descend,power/$@) + +-cgroup firewire hv guest spi usb virtio vm bpf iio gpio objtool leds wmi pci firmware debugging: FORCE ++cgroup firewire hv guest rpmb spi usb virtio vm bpf iio gpio objtool leds wmi pci firmware debugging: FORCE + $(call descend,$@) + + liblockdep: FORCE +@@ -96,7 +97,7 @@ kvm_stat: FORCE + $(call descend,kvm/$@) + + all: acpi cgroup cpupower gpio hv firewire liblockdep \ +- perf selftests spi turbostat usb \ ++ perf rpmb selftests spi turbostat usb \ + virtio vm bpf x86_energy_perf_policy \ + tmon freefall iio objtool kvm_stat wmi \ + pci debugging +@@ -107,7 +108,7 @@ acpi_install: + cpupower_install: + $(call descend,power/$(@:_install=),install) + +-cgroup_install firewire_install gpio_install hv_install iio_install perf_install spi_install usb_install virtio_install vm_install bpf_install objtool_install wmi_install pci_install debugging_install: ++cgroup_install firewire_install gpio_install hv_install iio_install rpmb_install perf_install spi_install usb_install virtio_install vm_install bpf_install objtool_install wmi_install pci_install debugging_install: + $(call descend,$(@:_install=),install) + + liblockdep_install: +@@ -130,7 +131,7 @@ kvm_stat_install: + + install: acpi_install cgroup_install cpupower_install gpio_install \ + hv_install firewire_install iio_install liblockdep_install \ +- perf_install selftests_install turbostat_install usb_install \ ++ perf_install rpmb_install selftests_install turbostat_install usb_install \ + virtio_install vm_install bpf_install x86_energy_perf_policy_install \ + tmon_install freefall_install objtool_install kvm_stat_install \ + wmi_install pci_install debugging_install intel-speed-select_install +@@ -160,6 +161,9 @@ perf_clean: + $(Q)mkdir -p $(PERF_O) . + $(Q)$(MAKE) --no-print-directory -C perf O=$(PERF_O) subdir= clean + ++rpmb_clean: ++ $(call descend,$(@:_clean=),clean) ++ + selftests_clean: + $(call descend,testing/$(@:_clean=),clean) + +@@ -176,7 +180,7 @@ build_clean: + $(call descend,build,clean) + + clean: acpi_clean cgroup_clean cpupower_clean hv_clean firewire_clean \ +- perf_clean selftests_clean turbostat_clean spi_clean usb_clean virtio_clean \ ++ perf_clean rpmb_clean selftests_clean turbostat_clean spi_clean usb_clean virtio_clean \ + vm_clean bpf_clean iio_clean x86_energy_perf_policy_clean tmon_clean \ + freefall_clean build_clean libbpf_clean libsubcmd_clean liblockdep_clean \ + gpio_clean objtool_clean leds_clean wmi_clean pci_clean firmware_clean debugging_clean \ +diff --git a/tools/rpmb/.gitignore b/tools/rpmb/.gitignore +new file mode 100644 +index 000000000000..218f680548e6 +--- /dev/null ++++ b/tools/rpmb/.gitignore +@@ -0,0 +1,2 @@ ++*.o ++rpmb +diff --git a/tools/rpmb/Makefile b/tools/rpmb/Makefile +new file mode 100644 +index 000000000000..69fdd6a36556 +--- /dev/null ++++ b/tools/rpmb/Makefile +@@ -0,0 +1,41 @@ ++# SPDX-License-Identifier: GPL-2.0 ++include ../scripts/Makefile.include ++ ++CC ?= $(CROSS_COMPILE)gcc ++LD ?= $(CROSS_COMPILE)ld ++PKG_CONFIG = $(CROSS_COMPILE)pkg-config ++ ++ifeq ($(srctree),) ++srctree := $(patsubst %/,%,$(dir $(shell pwd))) ++srctree := $(patsubst %/,%,$(dir $(srctree))) ++#$(info Determined 'srctree' to be $(srctree)) ++endif ++ ++INSTALL = install ++prefix ?= /usr/local ++bindir = $(prefix)/bin ++ ++ ++CFLAGS += $(HOSTCFLAGS) ++CFLAGS += -D__EXPORTED_HEADERS__ ++CFLAGS += -Wall -Wextra ++ifdef RPMB_STATIC ++LDFLAGS += -pthread -static ++CFLAGS += -pthread -static ++PKG_STATIC = --static ++endif ++CFLAGS += -I$(srctree)/include/uapi -I$(srctree)/include ++LDLIBS += $(shell $(PKG_CONFIG) --libs $(PKG_STATIC) libcrypto) ++ ++prog := rpmb ++ ++all : $(prog) ++ ++$(prog): rpmb.o ++ ++clean : ++ $(RM) $(prog) *.o ++ ++install: $(prog) ++ $(INSTALL) -m755 -d $(DESTDIR)$(bindir) ++ $(INSTALL) $(prog) $(DESTDIR)$(bindir) +diff --git a/tools/rpmb/rpmb.c b/tools/rpmb/rpmb.c +new file mode 100644 +index 000000000000..6b0c2b74e8ce +--- /dev/null ++++ b/tools/rpmb/rpmb.c +@@ -0,0 +1,1020 @@ ++// SPDX-License-Identifier: BSD-3-Clause ++/* ++ * Copyright (C) 2016-2019 Intel Corp. All rights reserved ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++ ++#include "linux/rpmb.h" ++ ++#define RPMB_KEY_SIZE 32 ++#define RPMB_MAC_SIZE 32 ++#define RPMB_NONCE_SIZE 16 ++ ++static bool verbose; ++#define rpmb_dbg(fmt, ARGS...) do { \ ++ if (verbose) \ ++ fprintf(stderr, "rpmb: " fmt, ##ARGS); \ ++} while (0) ++ ++#define rpmb_msg(fmt, ARGS...) \ ++ fprintf(stderr, "rpmb: " fmt, ##ARGS) ++ ++#define rpmb_err(fmt, ARGS...) \ ++ fprintf(stderr, "rpmb: error: " fmt, ##ARGS) ++ ++static const char *rpmb_op_str(uint16_t op) ++{ ++#define RPMB_OP(_op) case RPMB_##_op: return #_op ++ ++ switch (op) { ++ RPMB_OP(PROGRAM_KEY); ++ RPMB_OP(GET_WRITE_COUNTER); ++ RPMB_OP(WRITE_DATA); ++ RPMB_OP(READ_DATA); ++ RPMB_OP(RESULT_READ); ++ break; ++ default: ++ return "unknown"; ++ } ++#undef RPMB_OP ++} ++ ++static const char *rpmb_result_str(enum rpmb_op_result result) ++{ ++#define str(x) #x ++#define RPMB_ERR(_res) case RPMB_ERR_##_res: \ ++ { if (result & RPMB_ERR_COUNTER_EXPIRED) \ ++ return "COUNTER_EXPIRE:" str(_res); \ ++ else \ ++ return str(_res); \ ++ } ++ ++ switch (result & 0x000F) { ++ RPMB_ERR(OK); ++ RPMB_ERR(GENERAL); ++ RPMB_ERR(AUTH); ++ RPMB_ERR(COUNTER); ++ RPMB_ERR(ADDRESS); ++ RPMB_ERR(WRITE); ++ RPMB_ERR(READ); ++ RPMB_ERR(NO_KEY); ++ break; ++ default: ++ return "unknown"; ++ } ++#undef RPMB_ERR ++#undef str ++}; ++ ++static void __dump_buffer(const char *buf) ++{ ++ fprintf(stderr, "%s\n", buf); ++} ++ ++static void ++dump_hex_buffer(const char *title, const void *buf, size_t len) ++{ ++#define PBUF_SZ (16 * 3) ++ const unsigned char *_buf = (const unsigned char *)buf; ++ char pbuf[PBUF_SZ]; ++ int j = 0; ++ ++ if (title) ++ fprintf(stderr, "%s\n", title); ++ while (len-- > 0) { ++ snprintf(&pbuf[j], PBUF_SZ - j, "%02X ", *_buf++); ++ j += 3; ++ if (j == PBUF_SZ) { ++ __dump_buffer(pbuf); ++ j = 0; ++ } ++ } ++ if (j) ++ __dump_buffer(pbuf); ++} ++ ++static int open_dev_file(const char *devfile, struct rpmb_ioc_cap_cmd *cap) ++{ ++ struct rpmb_ioc_ver_cmd ver; ++ int fd; ++ int ret; ++ ++ fd = open(devfile, O_RDWR); ++ if (fd < 0) ++ rpmb_err("Cannot open: %s: %s.\n", devfile, strerror(errno)); ++ ++ ret = ioctl(fd, RPMB_IOC_VER_CMD, &ver); ++ if (ret < 0) { ++ rpmb_err("ioctl failure %d: %s.\n", ret, strerror(errno)); ++ goto err; ++ } ++ ++ printf("RPMB API Version %X\n", ver.api_version); ++ ++ ret = ioctl(fd, RPMB_IOC_CAP_CMD, cap); ++ if (ret < 0) { ++ rpmb_err("ioctl failure %d: %s.\n", ret, strerror(errno)); ++ goto err; ++ } ++ ++ rpmb_dbg("RPMB device_type = %hd\n", cap->device_type); ++ rpmb_dbg("RPMB rpmb_target = %hd\n", cap->target); ++ rpmb_dbg("RPMB capacity = %hd\n", cap->capacity); ++ rpmb_dbg("RPMB block_size = %hd\n", cap->block_size); ++ rpmb_dbg("RPMB wr_cnt_max = %hd\n", cap->wr_cnt_max); ++ rpmb_dbg("RPMB rd_cnt_max = %hd\n", cap->rd_cnt_max); ++ rpmb_dbg("RPMB auth_method = %hd\n", cap->auth_method); ++ ++ return fd; ++err: ++ close(fd); ++ return -1; ++} ++ ++static int open_rd_file(const char *datafile, const char *type) ++{ ++ int fd; ++ ++ if (!strcmp(datafile, "-")) ++ fd = STDIN_FILENO; ++ else ++ fd = open(datafile, O_RDONLY); ++ ++ if (fd < 0) ++ rpmb_err("Cannot open %s: %s: %s.\n", ++ type, datafile, strerror(errno)); ++ ++ return fd; ++} ++ ++static int open_wr_file(const char *datafile, const char *type) ++{ ++ int fd; ++ ++ if (!strcmp(datafile, "-")) ++ fd = STDOUT_FILENO; ++ else ++ fd = open(datafile, O_WRONLY | O_CREAT | O_APPEND, 0600); ++ if (fd < 0) ++ rpmb_err("Cannot open %s: %s: %s.\n", ++ type, datafile, strerror(errno)); ++ return fd; ++} ++ ++static void close_fd(int fd) ++{ ++ if (fd > 0 && fd != STDIN_FILENO && fd != STDOUT_FILENO) ++ close(fd); ++} ++ ++/* need to just cast out 'const' in write(2) */ ++typedef ssize_t (*rwfunc_t)(int fd, void *buf, size_t count); ++/* blocking rw wrapper */ ++static ssize_t rw(rwfunc_t func, int fd, unsigned char *buf, size_t size) ++{ ++ ssize_t ntotal = 0, n; ++ char *_buf = (char *)buf; ++ ++ do { ++ n = func(fd, _buf + ntotal, size - ntotal); ++ if (n == -1 && errno != EINTR) { ++ ntotal = -1; ++ break; ++ } else if (n > 0) { ++ ntotal += n; ++ } ++ } while (n != 0 && (size_t)ntotal != size); ++ ++ return ntotal; ++} ++ ++static ssize_t read_file(int fd, unsigned char *data, size_t size) ++{ ++ ssize_t ret; ++ ++ ret = rw(read, fd, data, size); ++ if (ret < 0) { ++ rpmb_err("cannot read file: %s\n.", strerror(errno)); ++ } else if ((size_t)ret != size) { ++ rpmb_err("read %zd but must be %zu bytes length.\n", ret, size); ++ ret = -EINVAL; ++ } ++ ++ return ret; ++} ++ ++static ssize_t write_file(int fd, unsigned char *data, size_t size) ++{ ++ ssize_t ret; ++ ++ ret = rw((rwfunc_t)write, fd, data, size); ++ if (ret < 0) { ++ rpmb_err("cannot read file: %s.\n", strerror(errno)); ++ } else if ((size_t)ret != size) { ++ rpmb_err("data is %zd but must be %zu bytes length.\n", ++ ret, size); ++ ret = -EINVAL; ++ } ++ return ret; ++} ++ ++static void dbg_dump_frame(const char *title, const struct rpmb_frame_jdec *f) ++{ ++ uint16_t result, req_resp; ++ ++ if (!verbose) ++ return; ++ ++ if (!f) ++ return; ++ ++ result = be16toh(f->result); ++ req_resp = be16toh(f->req_resp); ++ if (req_resp & 0xf00) ++ req_resp = RPMB_RESP2REQ(req_resp); ++ ++ fprintf(stderr, "--------------- %s ---------------\n", ++ title ? title : "start"); ++ fprintf(stderr, "ptr: %p\n", f); ++ dump_hex_buffer("key_mac: ", f->key_mac, 32); ++ dump_hex_buffer("data: ", f->data, 256); ++ dump_hex_buffer("nonce: ", f->nonce, 16); ++ fprintf(stderr, "write_counter: %u\n", be32toh(f->write_counter)); ++ fprintf(stderr, "address: %0X\n", be16toh(f->addr)); ++ fprintf(stderr, "block_count: %u\n", be16toh(f->block_count)); ++ fprintf(stderr, "result %s:%d\n", rpmb_result_str(result), result); ++ fprintf(stderr, "req_resp %s\n", rpmb_op_str(req_resp)); ++ fprintf(stderr, "--------------- End ---------------\n"); ++} ++ ++static struct rpmb_frame_jdec *rpmb_alloc_frames(unsigned int cnt) ++{ ++ return calloc(1, rpmb_ioc_frames_len_jdec(cnt)); ++} ++ ++#if OPENSSL_VERSION_NUMBER < 0x10100000L ++static int rpmb_calc_hmac_sha256(struct rpmb_frame_jdec *frames, ++ size_t blocks_cnt, ++ const unsigned char key[], ++ unsigned int key_size, ++ unsigned char mac[], ++ unsigned int mac_size) ++{ ++ HMAC_CTX ctx; ++ int ret; ++ unsigned int i; ++ ++ /* SSL returns 1 on success 0 on failure */ ++ ++ HMAC_CTX_init(&ctx); ++ ret = HMAC_Init_ex(&ctx, key, key_size, EVP_sha256(), NULL); ++ if (ret == 0) ++ goto out; ++ for (i = 0; i < blocks_cnt; i++) ++ HMAC_Update(&ctx, frames[i].data, hmac_data_len); ++ ++ ret = HMAC_Final(&ctx, mac, &mac_size); ++ if (ret == 0) ++ goto out; ++ if (mac_size != RPMB_MAC_SIZE) ++ ret = 0; ++ ++ ret = 1; ++out: ++ HMAC_CTX_cleanup(&ctx); ++ return ret == 1 ? 0 : -1; ++} ++#else ++static int rpmb_calc_hmac_sha256(struct rpmb_frame_jdec *frames, ++ size_t blocks_cnt, ++ const unsigned char key[], ++ unsigned int key_size, ++ unsigned char mac[], ++ unsigned int mac_size) ++{ ++ HMAC_CTX *ctx; ++ int ret; ++ unsigned int i; ++ ++ /* SSL returns 1 on success 0 on failure */ ++ ++ ctx = HMAC_CTX_new(); ++ ++ ret = HMAC_Init_ex(ctx, key, key_size, EVP_sha256(), NULL); ++ if (ret == 0) ++ goto out; ++ for (i = 0; i < blocks_cnt; i++) ++ HMAC_Update(ctx, frames[i].data, rpmb_jdec_hmac_data_len); ++ ++ ret = HMAC_Final(ctx, mac, &mac_size); ++ if (ret == 0) ++ goto out; ++ if (mac_size != RPMB_MAC_SIZE) ++ ret = 0; ++ ++ ret = 1; ++out: ++ HMAC_CTX_free(ctx); ++ return ret == 1 ? 0 : -1; ++} ++#endif ++ ++static int rpmb_check_req_resp(uint16_t req, struct rpmb_frame_jdec *frame_out) ++{ ++ if (RPMB_REQ2RESP(req) != be16toh(frame_out->req_resp)) { ++ rpmb_err("RPMB response mismatch %04X != %04X\n.", ++ RPMB_REQ2RESP(req), be16toh(frame_out->req_resp)); ++ return -1; ++ } ++ return 0; ++} ++ ++static int rpmb_check_mac(const unsigned char *key, ++ struct rpmb_frame_jdec *frames_out, ++ unsigned int cnt_out) ++{ ++ unsigned char mac[RPMB_MAC_SIZE]; ++ ++ if (cnt_out == 0) { ++ rpmb_err("RPMB 0 output frames.\n"); ++ return -1; ++ } ++ ++ rpmb_calc_hmac_sha256(frames_out, cnt_out, ++ key, RPMB_KEY_SIZE, ++ mac, RPMB_MAC_SIZE); ++ ++ if (memcmp(mac, frames_out[cnt_out - 1].key_mac, RPMB_MAC_SIZE)) { ++ rpmb_err("RPMB hmac mismatch:\n"); ++ dump_hex_buffer("Result MAC: ", ++ frames_out[cnt_out - 1].key_mac, RPMB_MAC_SIZE); ++ dump_hex_buffer("Expected MAC: ", mac, RPMB_MAC_SIZE); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++static int rpmb_ioctl(int fd, uint16_t req, ++ const struct rpmb_frame_jdec *frames_in, ++ unsigned int cnt_in, ++ struct rpmb_frame_jdec *frames_out, ++ unsigned int cnt_out) ++{ ++ int ret; ++ struct __packed { ++ struct rpmb_ioc_seq_cmd h; ++ struct rpmb_ioc_cmd cmd[3]; ++ } iseq = {}; ++ struct rpmb_frame_jdec *frame_res = NULL; ++ int i; ++ uint32_t flags; ++ ++ rpmb_dbg("RPMB OP: %s\n", rpmb_op_str(req)); ++ dbg_dump_frame("In Frame: ", frames_in); ++ ++ i = 0; ++ flags = RPMB_F_WRITE; ++ if (req == RPMB_WRITE_DATA || req == RPMB_PROGRAM_KEY) ++ flags |= RPMB_F_REL_WRITE; ++ rpmb_ioc_cmd_set(iseq.cmd[i], flags, frames_in, cnt_in); ++ i++; ++ ++ if (req == RPMB_WRITE_DATA || req == RPMB_PROGRAM_KEY) { ++ frame_res = rpmb_alloc_frames(0); ++ if (!frame_res) ++ return -ENOMEM; ++ frame_res->req_resp = htobe16(RPMB_RESULT_READ); ++ rpmb_ioc_cmd_set(iseq.cmd[i], RPMB_F_WRITE, frame_res, 0); ++ i++; ++ } ++ ++ rpmb_ioc_cmd_set(iseq.cmd[i], 0, frames_out, cnt_out); ++ i++; ++ ++ iseq.h.num_of_cmds = i; ++ ret = ioctl(fd, RPMB_IOC_SEQ_CMD, &iseq); ++ if (ret < 0) ++ rpmb_err("ioctl failure %d: %s.\n", ret, strerror(errno)); ++ ++ ret = rpmb_check_req_resp(req, frames_out); ++ ++ dbg_dump_frame("Res Frame: ", frame_res); ++ dbg_dump_frame("Out Frame: ", frames_out); ++ free(frame_res); ++ return ret; ++} ++ ++static int op_get_info(int nargs, char *argv[]) ++{ ++ int dev_fd; ++ struct rpmb_ioc_cap_cmd cap; ++ ++ if (nargs != 1) ++ return -EINVAL; ++ ++ memset(&cap, 0, sizeof(cap)); ++ dev_fd = open_dev_file(argv[0], &cap); ++ if (dev_fd < 0) ++ return -errno; ++ argv++; ++ ++ printf("RPMB device_type = %hd\n", cap.device_type); ++ printf("RPMB rpmb_target = %hd\n", cap.target); ++ printf("RPMB capacity = %hd\n", cap.capacity); ++ printf("RPMB block_size = %hd\n", cap.block_size); ++ printf("RPMB wr_cnt_max = %hd\n", cap.wr_cnt_max); ++ printf("RPMB rd_cnt_max = %hd\n", cap.rd_cnt_max); ++ printf("RPMB auth_method = %hd\n", cap.auth_method); ++ ++ close(dev_fd); ++ ++ return 0; ++} ++ ++static int op_rpmb_program_key(int nargs, char *argv[]) ++{ ++ int ret; ++ int dev_fd = -1, key_fd = -1; ++ uint16_t req = RPMB_PROGRAM_KEY; ++ struct rpmb_ioc_cap_cmd cap; ++ struct rpmb_frame_jdec *frame_in = NULL, *frame_out = NULL; ++ ++ ret = -EINVAL; ++ if (nargs != 2) ++ return ret; ++ ++ dev_fd = open_dev_file(argv[0], &cap); ++ if (dev_fd < 0) ++ goto out; ++ argv++; ++ ++ key_fd = open_rd_file(argv[0], "key file"); ++ if (key_fd < 0) ++ goto out; ++ argv++; ++ ++ frame_in = rpmb_alloc_frames(0); ++ frame_out = rpmb_alloc_frames(0); ++ if (!frame_in || !frame_out) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ frame_in->req_resp = htobe16(req); ++ ++ read_file(key_fd, frame_in->key_mac, RPMB_KEY_SIZE); ++ ++ ret = rpmb_ioctl(dev_fd, req, frame_in, 0, frame_out, 0); ++ if (ret) ++ goto out; ++ ++ if (RPMB_REQ2RESP(req) != be16toh(frame_out->req_resp)) { ++ rpmb_err("RPMB response mismatch.\n"); ++ ret = -1; ++ goto out; ++ } ++ ++ ret = be16toh(frame_out->result); ++ if (ret) ++ rpmb_err("RPMB operation %s failed, %s[0x%04x].\n", ++ rpmb_op_str(req), rpmb_result_str(ret), ret); ++ ++out: ++ free(frame_in); ++ free(frame_out); ++ close_fd(dev_fd); ++ close_fd(key_fd); ++ ++ return ret; ++} ++ ++static int rpmb_get_write_counter(int dev_fd, unsigned int *cnt, ++ const unsigned char *key) ++{ ++ int ret; ++ uint16_t res = 0x000F; ++ uint16_t req = RPMB_GET_WRITE_COUNTER; ++ struct rpmb_frame_jdec *frame_in = NULL; ++ struct rpmb_frame_jdec *frame_out = NULL; ++ ++ frame_in = rpmb_alloc_frames(0); ++ frame_out = rpmb_alloc_frames(0); ++ if (!frame_in || !frame_out) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ frame_in->req_resp = htobe16(req); ++ RAND_bytes(frame_in->nonce, RPMB_NONCE_SIZE); ++ ++ ret = rpmb_ioctl(dev_fd, req, frame_in, 0, frame_out, 0); ++ if (ret) ++ goto out; ++ ++ res = be16toh(frame_out->result); ++ if (res != RPMB_ERR_OK) { ++ ret = -1; ++ goto out; ++ } ++ ++ if (memcmp(&frame_in->nonce, &frame_out->nonce, RPMB_NONCE_SIZE)) { ++ rpmb_err("RPMB NONCE mismatch\n"); ++ dump_hex_buffer("Result NONCE:", ++ &frame_out->nonce, RPMB_NONCE_SIZE); ++ dump_hex_buffer("Expected NONCE: ", ++ &frame_in->nonce, RPMB_NONCE_SIZE); ++ ret = -1; ++ goto out; ++ } ++ ++ if (key) { ++ ret = rpmb_check_mac(key, frame_out, 1); ++ if (ret) ++ goto out; ++ } ++ ++ *cnt = be32toh(frame_out->write_counter); ++ ++out: ++ if (ret) ++ rpmb_err("RPMB operation %s failed=%d %s[0x%04x]\n", ++ rpmb_op_str(req), ret, rpmb_result_str(res), res); ++ free(frame_in); ++ free(frame_out); ++ return ret; ++} ++ ++static int op_rpmb_get_write_counter(int nargs, char **argv) ++{ ++ int ret; ++ int dev_fd = -1, key_fd = -1; ++ bool has_key; ++ struct rpmb_ioc_cap_cmd cap; ++ unsigned char key[RPMB_KEY_SIZE]; ++ unsigned int cnt; ++ ++ if (nargs == 2) ++ has_key = true; ++ else if (nargs == 1) ++ has_key = false; ++ else ++ return -EINVAL; ++ ++ ret = -EINVAL; ++ dev_fd = open_dev_file(argv[0], &cap); ++ if (dev_fd < 0) ++ return ret; ++ argv++; ++ ++ if (has_key) { ++ key_fd = open_rd_file(argv[0], "key file"); ++ if (key_fd < 0) ++ goto out; ++ argv++; ++ ++ ret = read_file(key_fd, key, RPMB_KEY_SIZE); ++ if (ret < 0) ++ goto out; ++ ++ ret = rpmb_get_write_counter(dev_fd, &cnt, key); ++ } else { ++ ret = rpmb_get_write_counter(dev_fd, &cnt, NULL); ++ } ++ ++ if (!ret) ++ printf("Counter value: 0x%08x\n", cnt); ++ ++out: ++ close_fd(dev_fd); ++ close_fd(key_fd); ++ return ret; ++} ++ ++static int op_rpmb_read_blocks(int nargs, char **argv) ++{ ++ int i, ret; ++ int dev_fd = -1, data_fd = -1, key_fd = -1; ++ uint16_t req = RPMB_READ_DATA; ++ uint16_t addr, blocks_cnt; ++ unsigned char key[RPMB_KEY_SIZE]; ++ unsigned long numarg; ++ bool has_key; ++ struct rpmb_ioc_cap_cmd cap; ++ struct rpmb_frame_jdec *frame_in = NULL; ++ struct rpmb_frame_jdec *frames_out = NULL; ++ struct rpmb_frame_jdec *frame_out; ++ ++ ret = -EINVAL; ++ if (nargs == 4) ++ has_key = false; ++ else if (nargs == 5) ++ has_key = true; ++ else ++ return ret; ++ ++ dev_fd = open_dev_file(argv[0], &cap); ++ if (dev_fd < 0) ++ goto out; ++ argv++; ++ ++ errno = 0; ++ numarg = strtoul(argv[0], NULL, 0); ++ if (errno || numarg > USHRT_MAX) { ++ rpmb_err("wrong block address\n"); ++ goto out; ++ } ++ addr = (uint16_t)numarg; ++ argv++; ++ ++ errno = 0; ++ numarg = strtoul(argv[0], NULL, 0); ++ if (errno || numarg > USHRT_MAX) { ++ rpmb_err("wrong blocks count\n"); ++ goto out; ++ } ++ blocks_cnt = (uint16_t)numarg; ++ argv++; ++ ++ if (blocks_cnt == 0) { ++ rpmb_err("wrong blocks count\n"); ++ goto out; ++ } ++ ++ data_fd = open_wr_file(argv[0], "output data"); ++ if (data_fd < 0) ++ goto out; ++ argv++; ++ ++ if (has_key) { ++ key_fd = open_rd_file(argv[0], "key file"); ++ if (key_fd < 0) ++ goto out; ++ argv++; ++ ++ ret = read_file(key_fd, key, RPMB_KEY_SIZE); ++ if (ret < 0) ++ goto out; ++ } ++ ++ ret = 0; ++ frames_out = rpmb_alloc_frames(blocks_cnt); ++ frame_in = rpmb_alloc_frames(0); ++ if (!frames_out || !frame_in) { ++ rpmb_err("Cannot allocate %d RPMB frames\n", blocks_cnt); ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ frame_in->req_resp = htobe16(req); ++ frame_in->addr = htobe16(addr); ++ /* eMMc spec ask for 0 here this will be translated by the rpmb layer */ ++ frame_in->block_count = htobe16(blocks_cnt); ++ RAND_bytes(frame_in->nonce, RPMB_NONCE_SIZE); ++ ++ ret = rpmb_ioctl(dev_fd, req, frame_in, 0, frames_out, blocks_cnt); ++ if (ret) ++ goto out; ++ ++ frame_out = &frames_out[blocks_cnt - 1]; ++ ret = be16toh(frame_out->result); ++ if (ret) { ++ rpmb_err("RPMB operation %s failed, %s[0x%04x]\n", ++ rpmb_op_str(req), rpmb_result_str(ret), ret); ++ goto out; ++ } ++ ++ if (has_key) { ++ ret = rpmb_check_mac(key, frames_out, blocks_cnt); ++ if (ret) ++ goto out; ++ } ++ ++ for (i = 0; i < blocks_cnt; i++) { ++ ret = write_file(data_fd, frames_out[i].data, ++ sizeof(frames_out[i].data)); ++ if (ret < 0) ++ goto out; ++ } ++ ++out: ++ free(frame_in); ++ free(frames_out); ++ close_fd(dev_fd); ++ close_fd(data_fd); ++ close_fd(key_fd); ++ ++ return ret; ++} ++ ++static int op_rpmb_write_blocks(int nargs, char **argv) ++{ ++ int ret; ++ int dev_fd = -1, key_fd = -1, data_fd = -1; ++ int i; ++ uint16_t req = RPMB_WRITE_DATA; ++ unsigned char key[RPMB_KEY_SIZE]; ++ unsigned char mac[RPMB_MAC_SIZE]; ++ unsigned long numarg; ++ uint16_t addr, blocks_cnt; ++ uint32_t write_counter; ++ struct rpmb_ioc_cap_cmd cap; ++ struct rpmb_frame_jdec *frames_in = NULL; ++ struct rpmb_frame_jdec *frame_out = NULL; ++ ++ ret = -EINVAL; ++ if (nargs != 5) ++ goto out; ++ ++ dev_fd = open_dev_file(argv[0], &cap); ++ if (dev_fd < 0) ++ goto out; ++ argv++; ++ ++ errno = 0; ++ numarg = strtoul(argv[0], NULL, 0); ++ if (errno || numarg > USHRT_MAX) { ++ rpmb_err("wrong block address %s\n", argv[0]); ++ goto out; ++ } ++ addr = (uint16_t)numarg; ++ argv++; ++ ++ errno = 0; ++ numarg = strtoul(argv[0], NULL, 0); ++ if (errno || numarg > USHRT_MAX) { ++ rpmb_err("wrong blocks count\n"); ++ goto out; ++ } ++ blocks_cnt = (uint16_t)numarg; ++ argv++; ++ ++ if (blocks_cnt == 0) { ++ rpmb_err("wrong blocks count\n"); ++ goto out; ++ } ++ ++ data_fd = open_rd_file(argv[0], "data file"); ++ if (data_fd < 0) ++ goto out; ++ argv++; ++ ++ key_fd = open_rd_file(argv[0], "key file"); ++ if (key_fd < 0) ++ goto out; ++ argv++; ++ ++ ret = read_file(key_fd, key, RPMB_KEY_SIZE); ++ if (ret < 0) ++ goto out; ++ ++ frames_in = rpmb_alloc_frames(blocks_cnt); ++ frame_out = rpmb_alloc_frames(0); ++ if (!frames_in || !frame_out) { ++ rpmb_err("can't allocate memory for RPMB outer frames\n"); ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ ret = rpmb_get_write_counter(dev_fd, &write_counter, key); ++ if (ret) ++ goto out; ++ ++ for (i = 0; i < blocks_cnt; i++) { ++ frames_in[i].req_resp = htobe16(req); ++ frames_in[i].block_count = htobe16(blocks_cnt); ++ frames_in[i].addr = htobe16(addr); ++ frames_in[i].write_counter = htobe32(write_counter); ++ } ++ ++ for (i = 0; i < blocks_cnt; i++) { ++ ret = read_file(data_fd, frames_in[i].data, ++ sizeof(frames_in[0].data)); ++ if (ret < 0) ++ goto out; ++ } ++ ++ rpmb_calc_hmac_sha256(frames_in, blocks_cnt, ++ key, RPMB_KEY_SIZE, ++ mac, RPMB_MAC_SIZE); ++ memcpy(frames_in[blocks_cnt - 1].key_mac, mac, RPMB_MAC_SIZE); ++ ret = rpmb_ioctl(dev_fd, req, frames_in, blocks_cnt, frame_out, 0); ++ if (ret != 0) ++ goto out; ++ ++ ret = be16toh(frame_out->result); ++ if (ret) { ++ rpmb_err("RPMB operation %s failed, %s[0x%04x]\n", ++ rpmb_op_str(req), rpmb_result_str(ret), ret); ++ ret = -1; ++ } ++ ++ if (be16toh(frame_out->addr) != addr) { ++ rpmb_err("RPMB addr mismatchs res=%04x req=%04x\n", ++ be16toh(frame_out->addr), addr); ++ ret = -1; ++ } ++ ++ if (be32toh(frame_out->write_counter) <= write_counter) { ++ rpmb_err("RPMB write counter not incremented res=%x req=%x\n", ++ be32toh(frame_out->write_counter), write_counter); ++ ret = -1; ++ } ++ ++ ret = rpmb_check_mac(key, frame_out, 1); ++out: ++ free(frames_in); ++ free(frame_out); ++ close_fd(dev_fd); ++ close_fd(data_fd); ++ close_fd(key_fd); ++ return ret; ++} ++ ++typedef int (*rpmb_op)(int argc, char *argv[]); ++ ++struct rpmb_cmd { ++ const char *op_name; ++ rpmb_op op; ++ const char *usage; /* usage title */ ++ const char *help; /* help */ ++}; ++ ++static const struct rpmb_cmd cmds[] = { ++ { ++ "get-info", ++ op_get_info, ++ "", ++ " Get RPMB device info\n", ++ }, ++ { ++ "program-key", ++ op_rpmb_program_key, ++ " ", ++ " Program authentication key of 32 bytes length from the KEY_FILE\n" ++ " when KEY_FILE is -, read standard input.\n" ++ " NOTE: This is a one-time programmable irreversible change.\n", ++ }, ++ { ++ "write-counter", ++ op_rpmb_get_write_counter, ++ " [KEY_FILE]", ++ " Rertrive write counter value from the to stdout.\n" ++ " When KEY_FILE is present data is verified via HMAC\n" ++ " when KEY_FILE is -, read standard input.\n" ++ }, ++ { ++ "write-blocks", ++ op_rpmb_write_blocks, ++ "
", ++ " of 256 bytes will be written from the DATA_FILE\n" ++ " to the at block offset
.\n" ++ " When DATA_FILE is -, read from standard input.\n", ++ }, ++ { ++ "read-blocks", ++ op_rpmb_read_blocks, ++ "
[KEY_FILE]", ++ " of 256 bytes will be read from \n" ++ " to the OUTPUT_FILE\n" ++ " When KEY_FILE is present data is verified via HMAC\n" ++ " When OUTPUT/KEY_FILE is -, read from standard input.\n" ++ " When OUTPUT_FILE is -, write to standard output\n", ++ }, ++ ++ { NULL, NULL, NULL, NULL } ++}; ++ ++static void help(const char *prog, const struct rpmb_cmd *cmd) ++{ ++ printf("%s %s %s\n", prog, cmd->op_name, cmd->usage); ++ printf("%s\n", cmd->help); ++} ++ ++static void usage(const char *prog) ++{ ++ int i; ++ ++ printf("\n"); ++ printf("Usage: %s [-v] \n\n", prog); ++ for (i = 0; cmds[i].op_name; i++) ++ printf(" %s %s %s\n", ++ prog, cmds[i].op_name, cmds[i].usage); ++ ++ printf("\n"); ++ printf(" %s -v/--verbose: runs in verbose mode\n", prog); ++ printf(" %s help : shows this help\n", prog); ++ printf(" %s help : shows detailed help\n", prog); ++} ++ ++static bool call_for_help(const char *arg) ++{ ++ return !strcmp(arg, "help") || ++ !strcmp(arg, "-h") || ++ !strcmp(arg, "--help"); ++} ++ ++static bool parse_verbose(const char *arg) ++{ ++ return !strcmp(arg, "-v") || ++ !strcmp(arg, "--verbose"); ++} ++ ++static const ++struct rpmb_cmd *parse_args(const char *prog, int *_argc, char **_argv[]) ++{ ++ int i; ++ int argc = *_argc; ++ char **argv = *_argv; ++ const struct rpmb_cmd *cmd = NULL; ++ bool need_help = false; ++ ++ argc--; argv++; ++ ++ if (argc == 0) ++ goto out; ++ ++ if (call_for_help(argv[0])) { ++ argc--; argv++; ++ if (argc == 0) ++ goto out; ++ ++ need_help = true; ++ } ++ ++ if (parse_verbose(argv[0])) { ++ argc--; argv++; ++ if (argc == 0) ++ goto out; ++ ++ verbose = true; ++ } ++ ++ for (i = 0; cmds[i].op_name; i++) { ++ if (!strncmp(argv[0], cmds[i].op_name, ++ strlen(cmds[i].op_name))) { ++ cmd = &cmds[i]; ++ argc--; argv++; ++ break; ++ } ++ } ++ ++ if (!cmd) ++ goto out; ++ ++ if (need_help || (argc > 0 && call_for_help(argv[0]))) { ++ help(prog, cmd); ++ argc--; argv++; ++ return NULL; ++ } ++ ++out: ++ *_argc = argc; ++ *_argv = argv; ++ ++ if (!cmd) ++ usage(prog); ++ ++ return cmd; ++} ++ ++int main(int argc, char *argv[]) ++{ ++ const char *prog = basename(argv[0]); ++ const struct rpmb_cmd *cmd; ++ int ret; ++ ++ cmd = parse_args(prog, &argc, &argv); ++ if (!cmd) ++ exit(EXIT_SUCCESS); ++ ++ ret = cmd->op(argc, argv); ++ if (ret == -EINVAL) ++ help(prog, cmd); ++ ++ if (ret) ++ exit(EXIT_FAILURE); ++ ++ exit(EXIT_SUCCESS); ++} +-- +2.17.1 + diff --git a/patches/0011-trusty-add-trusty-ipc-driver.trusty b/patches/0011-trusty-add-trusty-ipc-driver.trusty new file mode 100644 index 0000000000..c68f0546e3 --- /dev/null +++ b/patches/0011-trusty-add-trusty-ipc-driver.trusty @@ -0,0 +1,1825 @@ +From 6228a5f72f32e1ed9cbaf23f48a8fb9b6af1e934 Mon Sep 17 00:00:00 2001 +From: Michael Ryleev +Date: Wed, 7 Jan 2015 15:47:37 -0800 +Subject: [PATCH 11/63] trusty: add trusty-ipc driver + +Trusty IPC driver provides message passing interface +between non-secure side (Linux) and secure side running +Trusty. It is handling a set of trusty IPC virtio devices +instantiated and configured by trusty-virtio driver based +on device description retrieved from secure side. + +Change-Id: I7249384380850dfb8795c0e0d5e39dfb907400c6 +Signed-off-by: Michael Ryleev +--- + drivers/trusty/Kconfig | 10 + + drivers/trusty/Makefile | 1 + + drivers/trusty/trusty-ipc.c | 1672 +++++++++++++++++++++++++++++ + include/linux/trusty/trusty_ipc.h | 88 ++ + 4 files changed, 1771 insertions(+) + create mode 100644 drivers/trusty/trusty-ipc.c + create mode 100644 include/linux/trusty/trusty_ipc.h + +diff --git a/drivers/trusty/Kconfig b/drivers/trusty/Kconfig +index 2255c0a9a815..052cd8e91ab0 100644 +--- a/drivers/trusty/Kconfig ++++ b/drivers/trusty/Kconfig +@@ -39,4 +39,14 @@ config TRUSTY_VIRTIO + select VIRTIO + default y + ++config TRUSTY_VIRTIO_IPC ++ tristate "Trusty Virtio IPC driver" ++ depends on TRUSTY_VIRTIO ++ default y ++ help ++ This module adds support for communications with Trusty Services ++ ++ If you choose to build a module, it'll be called trusty-ipc. ++ Say N if unsure. ++ + endmenu +diff --git a/drivers/trusty/Makefile b/drivers/trusty/Makefile +index beb89a87f115..9ca451e50dee 100644 +--- a/drivers/trusty/Makefile ++++ b/drivers/trusty/Makefile +@@ -10,3 +10,4 @@ obj-$(CONFIG_TRUSTY_FIQ_ARM64) += trusty-fiq-arm64.o trusty-fiq-arm64-glue.o + obj-$(CONFIG_TRUSTY_LOG) += trusty-log.o + obj-$(CONFIG_TRUSTY) += trusty-mem.o + obj-$(CONFIG_TRUSTY_VIRTIO) += trusty-virtio.o ++obj-$(CONFIG_TRUSTY_VIRTIO_IPC) += trusty-ipc.o +diff --git a/drivers/trusty/trusty-ipc.c b/drivers/trusty/trusty-ipc.c +new file mode 100644 +index 000000000000..06e026344e67 +--- /dev/null ++++ b/drivers/trusty/trusty-ipc.c +@@ -0,0 +1,1672 @@ ++/* ++ * Copyright (C) 2015 Google, Inc. ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++ ++#include ++ ++#define MAX_DEVICES 4 ++ ++#define REPLY_TIMEOUT 5000 ++#define TXBUF_TIMEOUT 15000 ++ ++#define MAX_SRV_NAME_LEN 256 ++#define MAX_DEV_NAME_LEN 32 ++ ++#define DEFAULT_MSG_BUF_SIZE PAGE_SIZE ++#define DEFAULT_MSG_BUF_ALIGN PAGE_SIZE ++ ++#define TIPC_CTRL_ADDR 53 ++#define TIPC_ANY_ADDR 0xFFFFFFFF ++ ++#define TIPC_MIN_LOCAL_ADDR 1024 ++ ++#define TIPC_IOC_MAGIC 'r' ++#define TIPC_IOC_CONNECT _IOW(TIPC_IOC_MAGIC, 0x80, char *) ++#if defined(CONFIG_COMPAT) ++#define TIPC_IOC_CONNECT_COMPAT _IOW(TIPC_IOC_MAGIC, 0x80, \ ++ compat_uptr_t) ++#endif ++ ++struct tipc_virtio_dev; ++ ++struct tipc_dev_config { ++ u32 msg_buf_max_size; ++ u32 msg_buf_alignment; ++ char dev_name[MAX_DEV_NAME_LEN]; ++} __packed; ++ ++struct tipc_msg_hdr { ++ u32 src; ++ u32 dst; ++ u32 reserved; ++ u16 len; ++ u16 flags; ++ u8 data[0]; ++} __packed; ++ ++enum tipc_ctrl_msg_types { ++ TIPC_CTRL_MSGTYPE_GO_ONLINE = 1, ++ TIPC_CTRL_MSGTYPE_GO_OFFLINE, ++ TIPC_CTRL_MSGTYPE_CONN_REQ, ++ TIPC_CTRL_MSGTYPE_CONN_RSP, ++ TIPC_CTRL_MSGTYPE_DISC_REQ, ++}; ++ ++struct tipc_ctrl_msg { ++ u32 type; ++ u32 body_len; ++ u8 body[0]; ++} __packed; ++ ++struct tipc_conn_req_body { ++ char name[MAX_SRV_NAME_LEN]; ++} __packed; ++ ++struct tipc_conn_rsp_body { ++ u32 target; ++ u32 status; ++ u32 remote; ++ u32 max_msg_size; ++ u32 max_msg_cnt; ++} __packed; ++ ++struct tipc_disc_req_body { ++ u32 target; ++} __packed; ++ ++struct tipc_cdev_node { ++ struct cdev cdev; ++ struct device *dev; ++ unsigned int minor; ++}; ++ ++enum tipc_device_state { ++ VDS_OFFLINE = 0, ++ VDS_ONLINE, ++ VDS_DEAD, ++}; ++ ++struct tipc_virtio_dev { ++ struct kref refcount; ++ struct mutex lock; /* protects access to this device */ ++ struct virtio_device *vdev; ++ struct virtqueue *rxvq; ++ struct virtqueue *txvq; ++ uint msg_buf_cnt; ++ uint msg_buf_max_cnt; ++ size_t msg_buf_max_sz; ++ uint free_msg_buf_cnt; ++ struct list_head free_buf_list; ++ wait_queue_head_t sendq; ++ struct idr addr_idr; ++ enum tipc_device_state state; ++ struct tipc_cdev_node cdev_node; ++ char cdev_name[MAX_DEV_NAME_LEN]; ++}; ++ ++enum tipc_chan_state { ++ TIPC_DISCONNECTED = 0, ++ TIPC_CONNECTING, ++ TIPC_CONNECTED, ++ TIPC_STALE, ++}; ++ ++struct tipc_chan { ++ struct mutex lock; /* protects channel state */ ++ struct kref refcount; ++ enum tipc_chan_state state; ++ struct tipc_virtio_dev *vds; ++ const struct tipc_chan_ops *ops; ++ void *ops_arg; ++ u32 remote; ++ u32 local; ++ u32 max_msg_size; ++ u32 max_msg_cnt; ++ char srv_name[MAX_SRV_NAME_LEN]; ++}; ++ ++static struct class *tipc_class; ++static unsigned int tipc_major; ++ ++struct virtio_device *default_vdev; ++ ++static DEFINE_IDR(tipc_devices); ++static DEFINE_MUTEX(tipc_devices_lock); ++ ++static int _match_any(int id, void *p, void *data) ++{ ++ return id; ++} ++ ++static int _match_data(int id, void *p, void *data) ++{ ++ return (p == data); ++} ++ ++static void *_alloc_shareable_mem(size_t sz, phys_addr_t *ppa, gfp_t gfp) ++{ ++ return alloc_pages_exact(sz, gfp); ++} ++ ++static void _free_shareable_mem(size_t sz, void *va, phys_addr_t pa) ++{ ++ free_pages_exact(va, sz); ++} ++ ++static struct tipc_msg_buf *_alloc_msg_buf(size_t sz) ++{ ++ struct tipc_msg_buf *mb; ++ ++ /* allocate tracking structure */ ++ mb = kzalloc(sizeof(struct tipc_msg_buf), GFP_KERNEL); ++ if (!mb) ++ return NULL; ++ ++ /* allocate buffer that can be shared with secure world */ ++ mb->buf_va = _alloc_shareable_mem(sz, &mb->buf_pa, GFP_KERNEL); ++ if (!mb->buf_va) ++ goto err_alloc; ++ ++ mb->buf_sz = sz; ++ ++ return mb; ++ ++err_alloc: ++ kfree(mb); ++ return NULL; ++} ++ ++static void _free_msg_buf(struct tipc_msg_buf *mb) ++{ ++ _free_shareable_mem(mb->buf_sz, mb->buf_va, mb->buf_pa); ++ kfree(mb); ++} ++ ++static void _free_msg_buf_list(struct list_head *list) ++{ ++ struct tipc_msg_buf *mb = NULL; ++ ++ mb = list_first_entry_or_null(list, struct tipc_msg_buf, node); ++ while (mb) { ++ list_del(&mb->node); ++ _free_msg_buf(mb); ++ mb = list_first_entry_or_null(list, struct tipc_msg_buf, node); ++ } ++} ++ ++static inline void mb_reset(struct tipc_msg_buf *mb) ++{ ++ mb->wpos = 0; ++ mb->rpos = 0; ++} ++ ++static void _free_chan(struct kref *kref) ++{ ++ struct tipc_chan *ch = container_of(kref, struct tipc_chan, refcount); ++ kfree(ch); ++} ++ ++static void _free_vds(struct kref *kref) ++{ ++ struct tipc_virtio_dev *vds = ++ container_of(kref, struct tipc_virtio_dev, refcount); ++ kfree(vds); ++} ++ ++static struct tipc_msg_buf *vds_alloc_msg_buf(struct tipc_virtio_dev *vds) ++{ ++ return _alloc_msg_buf(vds->msg_buf_max_sz); ++} ++ ++static void vds_free_msg_buf(struct tipc_virtio_dev *vds, ++ struct tipc_msg_buf *mb) ++{ ++ _free_msg_buf(mb); ++} ++ ++static bool _put_txbuf_locked(struct tipc_virtio_dev *vds, ++ struct tipc_msg_buf *mb) ++{ ++ list_add_tail(&mb->node, &vds->free_buf_list); ++ return vds->free_msg_buf_cnt++ == 0; ++} ++ ++static struct tipc_msg_buf *_get_txbuf_locked(struct tipc_virtio_dev *vds) ++{ ++ struct tipc_msg_buf *mb; ++ ++ if (vds->state != VDS_ONLINE) ++ return ERR_PTR(-ENODEV); ++ ++ if (vds->free_msg_buf_cnt) { ++ /* take it out of free list */ ++ mb = list_first_entry(&vds->free_buf_list, ++ struct tipc_msg_buf, node); ++ list_del(&mb->node); ++ vds->free_msg_buf_cnt--; ++ } else { ++ if (vds->msg_buf_cnt >= vds->msg_buf_max_cnt) ++ return ERR_PTR(-EAGAIN); ++ ++ /* try to allocate it */ ++ mb = _alloc_msg_buf(vds->msg_buf_max_sz); ++ if (!mb) ++ return ERR_PTR(-ENOMEM); ++ ++ vds->msg_buf_cnt++; ++ } ++ return mb; ++} ++ ++static struct tipc_msg_buf *_vds_get_txbuf(struct tipc_virtio_dev *vds) ++{ ++ struct tipc_msg_buf *mb; ++ ++ mutex_lock(&vds->lock); ++ mb = _get_txbuf_locked(vds); ++ mutex_unlock(&vds->lock); ++ ++ return mb; ++} ++ ++static void vds_put_txbuf(struct tipc_virtio_dev *vds, struct tipc_msg_buf *mb) ++{ ++ if (!vds) ++ return; ++ ++ mutex_lock(&vds->lock); ++ _put_txbuf_locked(vds, mb); ++ wake_up_interruptible(&vds->sendq); ++ mutex_unlock(&vds->lock); ++} ++ ++static struct tipc_msg_buf *vds_get_txbuf(struct tipc_virtio_dev *vds, ++ long timeout) ++{ ++ struct tipc_msg_buf *mb; ++ ++ if (!vds) ++ return ERR_PTR(-EINVAL); ++ ++ mb = _vds_get_txbuf(vds); ++ ++ if ((PTR_ERR(mb) == -EAGAIN) && timeout) { ++ DEFINE_WAIT_FUNC(wait, woken_wake_function); ++ ++ timeout = msecs_to_jiffies(timeout); ++ add_wait_queue(&vds->sendq, &wait); ++ for (;;) { ++ timeout = wait_woken(&wait, TASK_INTERRUPTIBLE, ++ timeout); ++ if (!timeout) { ++ mb = ERR_PTR(-ETIMEDOUT); ++ break; ++ } ++ ++ if (signal_pending(current)) { ++ mb = ERR_PTR(-ERESTARTSYS); ++ break; ++ } ++ ++ mb = _vds_get_txbuf(vds); ++ if (PTR_ERR(mb) != -EAGAIN) ++ break; ++ } ++ remove_wait_queue(&vds->sendq, &wait); ++ } ++ ++ if (IS_ERR(mb)) ++ return mb; ++ ++ BUG_ON(!mb); ++ ++ /* reset and reserve space for message header */ ++ mb_reset(mb); ++ mb_put_data(mb, sizeof(struct tipc_msg_hdr)); ++ ++ return mb; ++} ++ ++static int vds_queue_txbuf(struct tipc_virtio_dev *vds, ++ struct tipc_msg_buf *mb) ++{ ++ int err; ++ struct scatterlist sg; ++ bool need_notify = false; ++ ++ if (!vds) ++ return -EINVAL; ++ ++ mutex_lock(&vds->lock); ++ if (vds->state == VDS_ONLINE) { ++ sg_init_one(&sg, mb->buf_va, mb->wpos); ++ err = virtqueue_add_outbuf(vds->txvq, &sg, 1, mb, GFP_KERNEL); ++ need_notify = virtqueue_kick_prepare(vds->txvq); ++ } else { ++ err = -ENODEV; ++ } ++ mutex_unlock(&vds->lock); ++ ++ if (need_notify) ++ virtqueue_notify(vds->txvq); ++ ++ return err; ++} ++ ++static int vds_add_channel(struct tipc_virtio_dev *vds, ++ struct tipc_chan *chan) ++{ ++ int ret; ++ ++ mutex_lock(&vds->lock); ++ if (vds->state == VDS_ONLINE) { ++ ret = idr_alloc(&vds->addr_idr, chan, ++ TIPC_MIN_LOCAL_ADDR, TIPC_ANY_ADDR - 1, ++ GFP_KERNEL); ++ if (ret > 0) { ++ chan->local = ret; ++ kref_get(&chan->refcount); ++ ret = 0; ++ } ++ } else { ++ ret = -EINVAL; ++ } ++ mutex_unlock(&vds->lock); ++ ++ return ret; ++} ++ ++static void vds_del_channel(struct tipc_virtio_dev *vds, ++ struct tipc_chan *chan) ++{ ++ mutex_lock(&vds->lock); ++ if (chan->local) { ++ idr_remove(&vds->addr_idr, chan->local); ++ chan->local = 0; ++ chan->remote = 0; ++ kref_put(&chan->refcount, _free_chan); ++ } ++ mutex_unlock(&vds->lock); ++} ++ ++static struct tipc_chan *vds_lookup_channel(struct tipc_virtio_dev *vds, ++ u32 addr) ++{ ++ int id; ++ struct tipc_chan *chan = NULL; ++ ++ mutex_lock(&vds->lock); ++ if (addr == TIPC_ANY_ADDR) { ++ id = idr_for_each(&vds->addr_idr, _match_any, NULL); ++ if (id > 0) ++ chan = idr_find(&vds->addr_idr, id); ++ } else { ++ chan = idr_find(&vds->addr_idr, addr); ++ } ++ if (chan) ++ kref_get(&chan->refcount); ++ mutex_unlock(&vds->lock); ++ ++ return chan; ++} ++ ++static struct tipc_chan *vds_create_channel(struct tipc_virtio_dev *vds, ++ const struct tipc_chan_ops *ops, ++ void *ops_arg) ++{ ++ int ret; ++ struct tipc_chan *chan = NULL; ++ ++ if (!vds) ++ return ERR_PTR(-ENOENT); ++ ++ if (!ops) ++ return ERR_PTR(-EINVAL); ++ ++ chan = kzalloc(sizeof(*chan), GFP_KERNEL); ++ if (!chan) ++ return ERR_PTR(-ENOMEM); ++ ++ kref_get(&vds->refcount); ++ chan->vds = vds; ++ chan->ops = ops; ++ chan->ops_arg = ops_arg; ++ mutex_init(&chan->lock); ++ kref_init(&chan->refcount); ++ chan->state = TIPC_DISCONNECTED; ++ ++ ret = vds_add_channel(vds, chan); ++ if (ret) { ++ kfree(chan); ++ kref_put(&vds->refcount, _free_vds); ++ return ERR_PTR(ret); ++ } ++ ++ return chan; ++} ++ ++static void fill_msg_hdr(struct tipc_msg_buf *mb, u32 src, u32 dst) ++{ ++ struct tipc_msg_hdr *hdr = mb_get_data(mb, sizeof(*hdr)); ++ ++ hdr->src = src; ++ hdr->dst = dst; ++ hdr->len = mb_avail_data(mb); ++ hdr->flags = 0; ++ hdr->reserved = 0; ++} ++ ++/*****************************************************************************/ ++ ++struct tipc_chan *tipc_create_channel(struct device *dev, ++ const struct tipc_chan_ops *ops, ++ void *ops_arg) ++{ ++ struct virtio_device *vd; ++ struct tipc_chan *chan; ++ struct tipc_virtio_dev *vds; ++ ++ mutex_lock(&tipc_devices_lock); ++ if (dev) { ++ vd = container_of(dev, struct virtio_device, dev); ++ } else { ++ vd = default_vdev; ++ if (!vd) { ++ mutex_unlock(&tipc_devices_lock); ++ return ERR_PTR(-ENOENT); ++ } ++ } ++ vds = vd->priv; ++ kref_get(&vds->refcount); ++ mutex_unlock(&tipc_devices_lock); ++ ++ chan = vds_create_channel(vds, ops, ops_arg); ++ kref_put(&vds->refcount, _free_vds); ++ return chan; ++} ++EXPORT_SYMBOL(tipc_create_channel); ++ ++struct tipc_msg_buf *tipc_chan_get_rxbuf(struct tipc_chan *chan) ++{ ++ return vds_alloc_msg_buf(chan->vds); ++} ++EXPORT_SYMBOL(tipc_chan_get_rxbuf); ++ ++void tipc_chan_put_rxbuf(struct tipc_chan *chan, struct tipc_msg_buf *mb) ++{ ++ vds_free_msg_buf(chan->vds, mb); ++} ++EXPORT_SYMBOL(tipc_chan_put_rxbuf); ++ ++struct tipc_msg_buf *tipc_chan_get_txbuf_timeout(struct tipc_chan *chan, ++ long timeout) ++{ ++ return vds_get_txbuf(chan->vds, timeout); ++} ++EXPORT_SYMBOL(tipc_chan_get_txbuf_timeout); ++ ++void tipc_chan_put_txbuf(struct tipc_chan *chan, struct tipc_msg_buf *mb) ++{ ++ vds_put_txbuf(chan->vds, mb); ++} ++EXPORT_SYMBOL(tipc_chan_put_txbuf); ++ ++int tipc_chan_queue_msg(struct tipc_chan *chan, struct tipc_msg_buf *mb) ++{ ++ int err; ++ ++ mutex_lock(&chan->lock); ++ switch (chan->state) { ++ case TIPC_CONNECTED: ++ fill_msg_hdr(mb, chan->local, chan->remote); ++ err = vds_queue_txbuf(chan->vds, mb); ++ if (err) { ++ /* this should never happen */ ++ pr_err("%s: failed to queue tx buffer (%d)\n", ++ __func__, err); ++ } ++ break; ++ case TIPC_DISCONNECTED: ++ case TIPC_CONNECTING: ++ err = -ENOTCONN; ++ break; ++ case TIPC_STALE: ++ err = -ESHUTDOWN; ++ break; ++ default: ++ err = -EBADFD; ++ pr_err("%s: unexpected channel state %d\n", ++ __func__, chan->state); ++ } ++ mutex_unlock(&chan->lock); ++ return err; ++} ++EXPORT_SYMBOL(tipc_chan_queue_msg); ++ ++ ++int tipc_chan_connect(struct tipc_chan *chan, const char *name) ++{ ++ int err; ++ struct tipc_ctrl_msg *msg; ++ struct tipc_conn_req_body *body; ++ struct tipc_msg_buf *txbuf; ++ ++ txbuf = vds_get_txbuf(chan->vds, TXBUF_TIMEOUT); ++ if (IS_ERR(txbuf)) ++ return PTR_ERR(txbuf); ++ ++ /* reserve space for connection request control message */ ++ msg = mb_put_data(txbuf, sizeof(*msg) + sizeof(*body)); ++ body = (struct tipc_conn_req_body *)msg->body; ++ ++ /* fill message */ ++ msg->type = TIPC_CTRL_MSGTYPE_CONN_REQ; ++ msg->body_len = sizeof(*body); ++ ++ strncpy(body->name, name, sizeof(body->name)); ++ body->name[sizeof(body->name)-1] = '\0'; ++ ++ mutex_lock(&chan->lock); ++ switch (chan->state) { ++ case TIPC_DISCONNECTED: ++ /* save service name we are connecting to */ ++ strcpy(chan->srv_name, body->name); ++ ++ fill_msg_hdr(txbuf, chan->local, TIPC_CTRL_ADDR); ++ err = vds_queue_txbuf(chan->vds, txbuf); ++ if (err) { ++ /* this should never happen */ ++ pr_err("%s: failed to queue tx buffer (%d)\n", ++ __func__, err); ++ } else { ++ chan->state = TIPC_CONNECTING; ++ txbuf = NULL; /* prevents discarding buffer */ ++ } ++ break; ++ case TIPC_CONNECTED: ++ case TIPC_CONNECTING: ++ /* check if we are trying to connect to the same service */ ++ if (strcmp(chan->srv_name, body->name) == 0) ++ err = 0; ++ else ++ if (chan->state == TIPC_CONNECTING) ++ err = -EALREADY; /* in progress */ ++ else ++ err = -EISCONN; /* already connected */ ++ break; ++ ++ case TIPC_STALE: ++ err = -ESHUTDOWN; ++ break; ++ default: ++ err = -EBADFD; ++ pr_err("%s: unexpected channel state %d\n", ++ __func__, chan->state); ++ break; ++ } ++ mutex_unlock(&chan->lock); ++ ++ if (txbuf) ++ tipc_chan_put_txbuf(chan, txbuf); /* discard it */ ++ ++ return err; ++} ++EXPORT_SYMBOL(tipc_chan_connect); ++ ++int tipc_chan_shutdown(struct tipc_chan *chan) ++{ ++ int err; ++ struct tipc_ctrl_msg *msg; ++ struct tipc_disc_req_body *body; ++ struct tipc_msg_buf *txbuf = NULL; ++ ++ /* get tx buffer */ ++ txbuf = vds_get_txbuf(chan->vds, TXBUF_TIMEOUT); ++ if (IS_ERR(txbuf)) ++ return PTR_ERR(txbuf); ++ ++ mutex_lock(&chan->lock); ++ if (chan->state == TIPC_CONNECTED || chan->state == TIPC_CONNECTING) { ++ /* reserve space for disconnect request control message */ ++ msg = mb_put_data(txbuf, sizeof(*msg) + sizeof(*body)); ++ body = (struct tipc_disc_req_body *)msg->body; ++ ++ msg->type = TIPC_CTRL_MSGTYPE_DISC_REQ; ++ msg->body_len = sizeof(*body); ++ body->target = chan->remote; ++ ++ fill_msg_hdr(txbuf, chan->local, TIPC_CTRL_ADDR); ++ err = vds_queue_txbuf(chan->vds, txbuf); ++ if (err) { ++ /* this should never happen */ ++ pr_err("%s: failed to queue tx buffer (%d)\n", ++ __func__, err); ++ } ++ } else { ++ err = -ENOTCONN; ++ } ++ chan->state = TIPC_STALE; ++ mutex_unlock(&chan->lock); ++ ++ if (err) { ++ /* release buffer */ ++ tipc_chan_put_txbuf(chan, txbuf); ++ } ++ ++ return err; ++} ++EXPORT_SYMBOL(tipc_chan_shutdown); ++ ++void tipc_chan_destroy(struct tipc_chan *chan) ++{ ++ mutex_lock(&chan->lock); ++ if (chan->vds) { ++ vds_del_channel(chan->vds, chan); ++ kref_put(&chan->vds->refcount, _free_vds); ++ chan->vds = NULL; ++ } ++ mutex_unlock(&chan->lock); ++ kref_put(&chan->refcount, _free_chan); ++} ++EXPORT_SYMBOL(tipc_chan_destroy); ++ ++/***************************************************************************/ ++ ++struct tipc_dn_chan { ++ int state; ++ struct mutex lock; /* protects rx_msg_queue list and channel state */ ++ struct tipc_chan *chan; ++ wait_queue_head_t readq; ++ struct completion reply_comp; ++ struct list_head rx_msg_queue; ++}; ++ ++static int dn_wait_for_reply(struct tipc_dn_chan *dn, int timeout) ++{ ++ int ret; ++ ++ ret = wait_for_completion_interruptible_timeout(&dn->reply_comp, ++ msecs_to_jiffies(timeout)); ++ if (ret < 0) ++ return ret; ++ ++ mutex_lock(&dn->lock); ++ if (!ret) { ++ /* no reply from remote */ ++ dn->state = TIPC_STALE; ++ ret = -ETIMEDOUT; ++ } else { ++ /* got reply */ ++ if (dn->state == TIPC_CONNECTED) ++ ret = 0; ++ else if (dn->state == TIPC_DISCONNECTED) ++ if (!list_empty(&dn->rx_msg_queue)) ++ ret = 0; ++ else ++ ret = -ENOTCONN; ++ else ++ ret = -EIO; ++ } ++ mutex_unlock(&dn->lock); ++ ++ return ret; ++} ++ ++struct tipc_msg_buf *dn_handle_msg(void *data, struct tipc_msg_buf *rxbuf) ++{ ++ struct tipc_dn_chan *dn = data; ++ struct tipc_msg_buf *newbuf = rxbuf; ++ ++ mutex_lock(&dn->lock); ++ if (dn->state == TIPC_CONNECTED) { ++ /* get new buffer */ ++ newbuf = tipc_chan_get_rxbuf(dn->chan); ++ if (newbuf) { ++ /* queue an old buffer and return a new one */ ++ list_add_tail(&rxbuf->node, &dn->rx_msg_queue); ++ wake_up_interruptible(&dn->readq); ++ } else { ++ /* ++ * return an old buffer effectively discarding ++ * incoming message ++ */ ++ pr_err("%s: discard incoming message\n", __func__); ++ newbuf = rxbuf; ++ } ++ } ++ mutex_unlock(&dn->lock); ++ ++ return newbuf; ++} ++ ++static void dn_connected(struct tipc_dn_chan *dn) ++{ ++ mutex_lock(&dn->lock); ++ dn->state = TIPC_CONNECTED; ++ ++ /* complete all pending */ ++ complete(&dn->reply_comp); ++ ++ mutex_unlock(&dn->lock); ++} ++ ++static void dn_disconnected(struct tipc_dn_chan *dn) ++{ ++ mutex_lock(&dn->lock); ++ dn->state = TIPC_DISCONNECTED; ++ ++ /* complete all pending */ ++ complete(&dn->reply_comp); ++ ++ /* wakeup all readers */ ++ wake_up_interruptible_all(&dn->readq); ++ ++ mutex_unlock(&dn->lock); ++} ++ ++static void dn_shutdown(struct tipc_dn_chan *dn) ++{ ++ mutex_lock(&dn->lock); ++ ++ /* set state to STALE */ ++ dn->state = TIPC_STALE; ++ ++ /* complete all pending */ ++ complete(&dn->reply_comp); ++ ++ /* wakeup all readers */ ++ wake_up_interruptible_all(&dn->readq); ++ ++ mutex_unlock(&dn->lock); ++} ++ ++static void dn_handle_event(void *data, int event) ++{ ++ struct tipc_dn_chan *dn = data; ++ ++ switch (event) { ++ case TIPC_CHANNEL_SHUTDOWN: ++ dn_shutdown(dn); ++ break; ++ ++ case TIPC_CHANNEL_DISCONNECTED: ++ dn_disconnected(dn); ++ break; ++ ++ case TIPC_CHANNEL_CONNECTED: ++ dn_connected(dn); ++ break; ++ ++ default: ++ pr_err("%s: unhandled event %d\n", __func__, event); ++ break; ++ } ++} ++ ++static struct tipc_chan_ops _dn_ops = { ++ .handle_msg = dn_handle_msg, ++ .handle_event = dn_handle_event, ++}; ++ ++#define cdev_to_cdn(c) container_of((c), struct tipc_cdev_node, cdev) ++#define cdn_to_vds(cdn) container_of((cdn), struct tipc_virtio_dev, cdev_node) ++ ++static struct tipc_virtio_dev *_dn_lookup_vds(struct tipc_cdev_node *cdn) ++{ ++ int ret; ++ struct tipc_virtio_dev *vds = NULL; ++ ++ mutex_lock(&tipc_devices_lock); ++ ret = idr_for_each(&tipc_devices, _match_data, cdn); ++ if (ret) { ++ vds = cdn_to_vds(cdn); ++ kref_get(&vds->refcount); ++ } ++ mutex_unlock(&tipc_devices_lock); ++ return vds; ++} ++ ++static int tipc_open(struct inode *inode, struct file *filp) ++{ ++ int ret; ++ struct tipc_virtio_dev *vds; ++ struct tipc_dn_chan *dn; ++ struct tipc_cdev_node *cdn = cdev_to_cdn(inode->i_cdev); ++ ++ vds = _dn_lookup_vds(cdn); ++ if (!vds) { ++ ret = -ENOENT; ++ goto err_vds_lookup; ++ } ++ ++ dn = kzalloc(sizeof(*dn), GFP_KERNEL); ++ if (!dn) { ++ ret = -ENOMEM; ++ goto err_alloc_chan; ++ } ++ ++ mutex_init(&dn->lock); ++ init_waitqueue_head(&dn->readq); ++ init_completion(&dn->reply_comp); ++ INIT_LIST_HEAD(&dn->rx_msg_queue); ++ ++ dn->state = TIPC_DISCONNECTED; ++ ++ dn->chan = vds_create_channel(vds, &_dn_ops, dn); ++ if (IS_ERR(dn->chan)) { ++ ret = PTR_ERR(dn->chan); ++ goto err_create_chan; ++ } ++ ++ filp->private_data = dn; ++ kref_put(&vds->refcount, _free_vds); ++ return 0; ++ ++err_create_chan: ++ kfree(dn); ++err_alloc_chan: ++ kref_put(&vds->refcount, _free_vds); ++err_vds_lookup: ++ return ret; ++} ++ ++ ++static int dn_connect_ioctl(struct tipc_dn_chan *dn, char __user *usr_name) ++{ ++ int err; ++ char name[MAX_SRV_NAME_LEN]; ++ ++ /* copy in service name from user space */ ++ err = strncpy_from_user(name, usr_name, sizeof(name)); ++ if (err < 0) { ++ pr_err("%s: copy_from_user (%p) failed (%d)\n", ++ __func__, usr_name, err); ++ return err; ++ } ++ name[sizeof(name)-1] = '\0'; ++ ++ /* send connect request */ ++ err = tipc_chan_connect(dn->chan, name); ++ if (err) ++ return err; ++ ++ /* and wait for reply */ ++ return dn_wait_for_reply(dn, REPLY_TIMEOUT); ++} ++ ++static long tipc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ++{ ++ int ret; ++ struct tipc_dn_chan *dn = filp->private_data; ++ ++ if (_IOC_TYPE(cmd) != TIPC_IOC_MAGIC) ++ return -EINVAL; ++ ++ switch (cmd) { ++ case TIPC_IOC_CONNECT: ++ ret = dn_connect_ioctl(dn, (char __user *)arg); ++ break; ++ default: ++ pr_warn("%s: Unhandled ioctl cmd: 0x%x\n", ++ __func__, cmd); ++ ret = -EINVAL; ++ } ++ return ret; ++} ++ ++#if defined(CONFIG_COMPAT) ++static long tipc_compat_ioctl(struct file *filp, ++ unsigned int cmd, unsigned long arg) ++{ ++ int ret; ++ struct tipc_dn_chan *dn = filp->private_data; ++ void __user *user_req = compat_ptr(arg); ++ ++ if (_IOC_TYPE(cmd) != TIPC_IOC_MAGIC) ++ return -EINVAL; ++ ++ switch (cmd) { ++ case TIPC_IOC_CONNECT_COMPAT: ++ ret = dn_connect_ioctl(dn, user_req); ++ break; ++ default: ++ pr_warn("%s: Unhandled ioctl cmd: 0x%x\n", ++ __func__, cmd); ++ ret = -EINVAL; ++ } ++ return ret; ++} ++#endif ++ ++static inline bool _got_rx(struct tipc_dn_chan *dn) ++{ ++ if (dn->state != TIPC_CONNECTED) ++ return true; ++ ++ if (!list_empty(&dn->rx_msg_queue)) ++ return true; ++ ++ return false; ++} ++ ++static ssize_t tipc_read_iter(struct kiocb *iocb, struct iov_iter *iter) ++{ ++ ssize_t ret; ++ size_t len; ++ struct tipc_msg_buf *mb; ++ struct file *filp = iocb->ki_filp; ++ struct tipc_dn_chan *dn = filp->private_data; ++ ++ mutex_lock(&dn->lock); ++ ++ while (list_empty(&dn->rx_msg_queue)) { ++ if (dn->state != TIPC_CONNECTED) { ++ if (dn->state == TIPC_CONNECTING) ++ ret = -ENOTCONN; ++ else if (dn->state == TIPC_DISCONNECTED) ++ ret = -ENOTCONN; ++ else if (dn->state == TIPC_STALE) ++ ret = -ESHUTDOWN; ++ else ++ ret = -EBADFD; ++ goto out; ++ } ++ ++ mutex_unlock(&dn->lock); ++ ++ if (filp->f_flags & O_NONBLOCK) ++ return -EAGAIN; ++ ++ if (wait_event_interruptible(dn->readq, _got_rx(dn))) ++ return -ERESTARTSYS; ++ ++ mutex_lock(&dn->lock); ++ } ++ ++ mb = list_first_entry(&dn->rx_msg_queue, struct tipc_msg_buf, node); ++ ++ len = mb_avail_data(mb); ++ if (len > iov_iter_count(iter)) { ++ ret = -EMSGSIZE; ++ goto out; ++ } ++ ++ if (copy_to_iter(mb_get_data(mb, len), len, iter) != len) { ++ ret = -EFAULT; ++ goto out; ++ } ++ ++ ret = len; ++ list_del(&mb->node); ++ tipc_chan_put_rxbuf(dn->chan, mb); ++ ++out: ++ mutex_unlock(&dn->lock); ++ return ret; ++} ++ ++static ssize_t tipc_write_iter(struct kiocb *iocb, struct iov_iter *iter) ++{ ++ ssize_t ret; ++ size_t len; ++ long timeout = TXBUF_TIMEOUT; ++ struct tipc_msg_buf *txbuf = NULL; ++ struct file *filp = iocb->ki_filp; ++ struct tipc_dn_chan *dn = filp->private_data; ++ ++ if (filp->f_flags & O_NONBLOCK) ++ timeout = 0; ++ ++ txbuf = tipc_chan_get_txbuf_timeout(dn->chan, timeout); ++ if (IS_ERR(txbuf)) ++ return PTR_ERR(txbuf); ++ ++ /* message length */ ++ len = iov_iter_count(iter); ++ ++ /* check available space */ ++ if (len > mb_avail_space(txbuf)) { ++ ret = -EMSGSIZE; ++ goto err_out; ++ } ++ ++ /* copy in message data */ ++ if (copy_from_iter(mb_put_data(txbuf, len), len, iter) != len) { ++ ret = -EFAULT; ++ goto err_out; ++ } ++ ++ /* queue message */ ++ ret = tipc_chan_queue_msg(dn->chan, txbuf); ++ if (ret) ++ goto err_out; ++ ++ return len; ++ ++err_out: ++ tipc_chan_put_txbuf(dn->chan, txbuf); ++ return ret; ++} ++ ++static unsigned int tipc_poll(struct file *filp, poll_table *wait) ++{ ++ unsigned int mask = 0; ++ struct tipc_dn_chan *dn = filp->private_data; ++ ++ mutex_lock(&dn->lock); ++ ++ poll_wait(filp, &dn->readq, wait); ++ ++ /* Writes always succeed for now */ ++ mask |= POLLOUT | POLLWRNORM; ++ ++ if (!list_empty(&dn->rx_msg_queue)) ++ mask |= POLLIN | POLLRDNORM; ++ ++ if (dn->state != TIPC_CONNECTED) ++ mask |= POLLERR; ++ ++ mutex_unlock(&dn->lock); ++ return mask; ++} ++ ++ ++static int tipc_release(struct inode *inode, struct file *filp) ++{ ++ struct tipc_dn_chan *dn = filp->private_data; ++ ++ dn_shutdown(dn); ++ ++ /* free all pending buffers */ ++ _free_msg_buf_list(&dn->rx_msg_queue); ++ ++ /* shutdown channel */ ++ tipc_chan_shutdown(dn->chan); ++ ++ /* and destroy it */ ++ tipc_chan_destroy(dn->chan); ++ ++ kfree(dn); ++ ++ return 0; ++} ++ ++static const struct file_operations tipc_fops = { ++ .open = tipc_open, ++ .release = tipc_release, ++ .unlocked_ioctl = tipc_ioctl, ++#if defined(CONFIG_COMPAT) ++ .compat_ioctl = tipc_compat_ioctl, ++#endif ++ .read_iter = tipc_read_iter, ++ .write_iter = tipc_write_iter, ++ .poll = tipc_poll, ++ .owner = THIS_MODULE, ++}; ++ ++/*****************************************************************************/ ++ ++static void chan_trigger_event(struct tipc_chan *chan, int event) ++{ ++ if (!event) ++ return; ++ ++ chan->ops->handle_event(chan->ops_arg, event); ++} ++ ++static void _cleanup_vq(struct virtqueue *vq) ++{ ++ struct tipc_msg_buf *mb; ++ ++ while ((mb = virtqueue_detach_unused_buf(vq)) != NULL) ++ _free_msg_buf(mb); ++} ++ ++static int _create_cdev_node(struct device *parent, ++ struct tipc_cdev_node *cdn, ++ const char *name) ++{ ++ int ret; ++ dev_t devt; ++ ++ if (!name) { ++ dev_dbg(parent, "%s: cdev name has to be provided\n", ++ __func__); ++ return -EINVAL; ++ } ++ ++ /* allocate minor */ ++ ret = idr_alloc(&tipc_devices, cdn, 0, MAX_DEVICES-1, GFP_KERNEL); ++ if (ret < 0) { ++ dev_dbg(parent, "%s: failed (%d) to get id\n", ++ __func__, ret); ++ return ret; ++ } ++ ++ cdn->minor = ret; ++ cdev_init(&cdn->cdev, &tipc_fops); ++ cdn->cdev.owner = THIS_MODULE; ++ ++ /* Add character device */ ++ devt = MKDEV(tipc_major, cdn->minor); ++ ret = cdev_add(&cdn->cdev, devt, 1); ++ if (ret) { ++ dev_dbg(parent, "%s: cdev_add failed (%d)\n", ++ __func__, ret); ++ goto err_add_cdev; ++ } ++ ++ /* Create a device node */ ++ cdn->dev = device_create(tipc_class, parent, ++ devt, NULL, "trusty-ipc-%s", name); ++ if (IS_ERR(cdn->dev)) { ++ ret = PTR_ERR(cdn->dev); ++ dev_dbg(parent, "%s: device_create failed: %d\n", ++ __func__, ret); ++ goto err_device_create; ++ } ++ ++ return 0; ++ ++err_device_create: ++ cdn->dev = NULL; ++ cdev_del(&cdn->cdev); ++err_add_cdev: ++ idr_remove(&tipc_devices, cdn->minor); ++ return ret; ++} ++ ++static void create_cdev_node(struct tipc_virtio_dev *vds, ++ struct tipc_cdev_node *cdn) ++{ ++ int err; ++ ++ mutex_lock(&tipc_devices_lock); ++ ++ if (!default_vdev) { ++ kref_get(&vds->refcount); ++ default_vdev = vds->vdev; ++ } ++ ++ if (vds->cdev_name[0] && !cdn->dev) { ++ kref_get(&vds->refcount); ++ err = _create_cdev_node(&vds->vdev->dev, cdn, vds->cdev_name); ++ if (err) { ++ dev_err(&vds->vdev->dev, ++ "failed (%d) to create cdev node\n", err); ++ kref_put(&vds->refcount, _free_vds); ++ } ++ } ++ mutex_unlock(&tipc_devices_lock); ++} ++ ++static void destroy_cdev_node(struct tipc_virtio_dev *vds, ++ struct tipc_cdev_node *cdn) ++{ ++ mutex_lock(&tipc_devices_lock); ++ if (cdn->dev) { ++ device_destroy(tipc_class, MKDEV(tipc_major, cdn->minor)); ++ cdev_del(&cdn->cdev); ++ idr_remove(&tipc_devices, cdn->minor); ++ cdn->dev = NULL; ++ kref_put(&vds->refcount, _free_vds); ++ } ++ ++ if (default_vdev == vds->vdev) { ++ default_vdev = NULL; ++ kref_put(&vds->refcount, _free_vds); ++ } ++ ++ mutex_unlock(&tipc_devices_lock); ++} ++ ++static void _go_online(struct tipc_virtio_dev *vds) ++{ ++ mutex_lock(&vds->lock); ++ if (vds->state == VDS_OFFLINE) ++ vds->state = VDS_ONLINE; ++ mutex_unlock(&vds->lock); ++ ++ create_cdev_node(vds, &vds->cdev_node); ++ ++ dev_info(&vds->vdev->dev, "is online\n"); ++} ++ ++static void _go_offline(struct tipc_virtio_dev *vds) ++{ ++ struct tipc_chan *chan; ++ ++ /* change state to OFFLINE */ ++ mutex_lock(&vds->lock); ++ if (vds->state != VDS_ONLINE) { ++ mutex_unlock(&vds->lock); ++ return; ++ } ++ vds->state = VDS_OFFLINE; ++ mutex_unlock(&vds->lock); ++ ++ /* wakeup all waiters */ ++ wake_up_interruptible_all(&vds->sendq); ++ ++ /* shutdown all channels */ ++ while ((chan = vds_lookup_channel(vds, TIPC_ANY_ADDR))) { ++ mutex_lock(&chan->lock); ++ chan->state = TIPC_STALE; ++ chan->remote = 0; ++ chan_trigger_event(chan, TIPC_CHANNEL_SHUTDOWN); ++ mutex_unlock(&chan->lock); ++ kref_put(&chan->refcount, _free_chan); ++ } ++ ++ /* shutdown device node */ ++ destroy_cdev_node(vds, &vds->cdev_node); ++ ++ dev_info(&vds->vdev->dev, "is offline\n"); ++} ++ ++static void _handle_conn_rsp(struct tipc_virtio_dev *vds, ++ struct tipc_conn_rsp_body *rsp, size_t len) ++{ ++ struct tipc_chan *chan; ++ ++ if (sizeof(*rsp) != len) { ++ dev_err(&vds->vdev->dev, "%s: Invalid response length %zd\n", ++ __func__, len); ++ return; ++ } ++ ++ dev_dbg(&vds->vdev->dev, ++ "%s: connection response: for addr 0x%x: " ++ "status %d remote addr 0x%x\n", ++ __func__, rsp->target, rsp->status, rsp->remote); ++ ++ /* Lookup channel */ ++ chan = vds_lookup_channel(vds, rsp->target); ++ if (chan) { ++ mutex_lock(&chan->lock); ++ if (chan->state == TIPC_CONNECTING) { ++ if (!rsp->status) { ++ chan->state = TIPC_CONNECTED; ++ chan->remote = rsp->remote; ++ chan->max_msg_cnt = rsp->max_msg_cnt; ++ chan->max_msg_size = rsp->max_msg_size; ++ chan_trigger_event(chan, ++ TIPC_CHANNEL_CONNECTED); ++ } else { ++ chan->state = TIPC_DISCONNECTED; ++ chan->remote = 0; ++ chan_trigger_event(chan, ++ TIPC_CHANNEL_DISCONNECTED); ++ } ++ } ++ mutex_unlock(&chan->lock); ++ kref_put(&chan->refcount, _free_chan); ++ } ++} ++ ++static void _handle_disc_req(struct tipc_virtio_dev *vds, ++ struct tipc_disc_req_body *req, size_t len) ++{ ++ struct tipc_chan *chan; ++ ++ if (sizeof(*req) != len) { ++ dev_err(&vds->vdev->dev, "%s: Invalid request length %zd\n", ++ __func__, len); ++ return; ++ } ++ ++ dev_dbg(&vds->vdev->dev, "%s: disconnect request: for addr 0x%x\n", ++ __func__, req->target); ++ ++ chan = vds_lookup_channel(vds, req->target); ++ if (chan) { ++ mutex_lock(&chan->lock); ++ if (chan->state == TIPC_CONNECTED || ++ chan->state == TIPC_CONNECTING) { ++ chan->state = TIPC_DISCONNECTED; ++ chan->remote = 0; ++ chan_trigger_event(chan, TIPC_CHANNEL_DISCONNECTED); ++ } ++ mutex_unlock(&chan->lock); ++ kref_put(&chan->refcount, _free_chan); ++ } ++} ++ ++static void _handle_ctrl_msg(struct tipc_virtio_dev *vds, ++ void *data, int len, u32 src) ++{ ++ struct tipc_ctrl_msg *msg = data; ++ ++ if ((len < sizeof(*msg)) || (sizeof(*msg) + msg->body_len != len)) { ++ dev_err(&vds->vdev->dev, ++ "%s: Invalid message length ( %d vs. %d)\n", ++ __func__, (int)(sizeof(*msg) + msg->body_len), len); ++ return; ++ } ++ ++ dev_dbg(&vds->vdev->dev, ++ "%s: Incoming ctrl message: src 0x%x type %d len %d\n", ++ __func__, src, msg->type, msg->body_len); ++ ++ switch (msg->type) { ++ case TIPC_CTRL_MSGTYPE_GO_ONLINE: ++ _go_online(vds); ++ break; ++ ++ case TIPC_CTRL_MSGTYPE_GO_OFFLINE: ++ _go_offline(vds); ++ break; ++ ++ case TIPC_CTRL_MSGTYPE_CONN_RSP: ++ _handle_conn_rsp(vds, (struct tipc_conn_rsp_body *)msg->body, ++ msg->body_len); ++ break; ++ ++ case TIPC_CTRL_MSGTYPE_DISC_REQ: ++ _handle_disc_req(vds, (struct tipc_disc_req_body *)msg->body, ++ msg->body_len); ++ break; ++ ++ default: ++ dev_warn(&vds->vdev->dev, ++ "%s: Unexpected message type: %d\n", ++ __func__, msg->type); ++ } ++} ++ ++static int _handle_rxbuf(struct tipc_virtio_dev *vds, ++ struct tipc_msg_buf *rxbuf, size_t rxlen) ++{ ++ int err; ++ struct scatterlist sg; ++ struct tipc_msg_hdr *msg; ++ struct device *dev = &vds->vdev->dev; ++ ++ /* message sanity check */ ++ if (rxlen > rxbuf->buf_sz) { ++ dev_warn(dev, "inbound msg is too big: %zd\n", rxlen); ++ goto drop_it; ++ } ++ ++ if (rxlen < sizeof(*msg)) { ++ dev_warn(dev, "inbound msg is too short: %zd\n", rxlen); ++ goto drop_it; ++ } ++ ++ /* reset buffer and put data */ ++ mb_reset(rxbuf); ++ mb_put_data(rxbuf, rxlen); ++ ++ /* get message header */ ++ msg = mb_get_data(rxbuf, sizeof(*msg)); ++ if (mb_avail_data(rxbuf) != msg->len) { ++ dev_warn(dev, "inbound msg length mismatch: (%d vs. %d)\n", ++ (uint) mb_avail_data(rxbuf), (uint)msg->len); ++ goto drop_it; ++ } ++ ++ dev_dbg(dev, "From: %d, To: %d, Len: %d, Flags: 0x%x, Reserved: %d\n", ++ msg->src, msg->dst, msg->len, msg->flags, msg->reserved); ++ ++ /* message directed to control endpoint is a special case */ ++ if (msg->dst == TIPC_CTRL_ADDR) { ++ _handle_ctrl_msg(vds, msg->data, msg->len, msg->src); ++ } else { ++ struct tipc_chan *chan = NULL; ++ /* Lookup channel */ ++ chan = vds_lookup_channel(vds, msg->dst); ++ if (chan) { ++ /* handle it */ ++ rxbuf = chan->ops->handle_msg(chan->ops_arg, rxbuf); ++ BUG_ON(!rxbuf); ++ kref_put(&chan->refcount, _free_chan); ++ } ++ } ++ ++drop_it: ++ /* add the buffer back to the virtqueue */ ++ sg_init_one(&sg, rxbuf->buf_va, rxbuf->buf_sz); ++ err = virtqueue_add_inbuf(vds->rxvq, &sg, 1, rxbuf, GFP_KERNEL); ++ if (err < 0) { ++ dev_err(dev, "failed to add a virtqueue buffer: %d\n", err); ++ return err; ++ } ++ ++ return 0; ++} ++ ++static void _rxvq_cb(struct virtqueue *rxvq) ++{ ++ unsigned int len; ++ struct tipc_msg_buf *mb; ++ unsigned int msg_cnt = 0; ++ struct tipc_virtio_dev *vds = rxvq->vdev->priv; ++ ++ while ((mb = virtqueue_get_buf(rxvq, &len)) != NULL) { ++ if (_handle_rxbuf(vds, mb, len)) ++ break; ++ msg_cnt++; ++ } ++ ++ /* tell the other size that we added rx buffers */ ++ if (msg_cnt) ++ virtqueue_kick(rxvq); ++} ++ ++static void _txvq_cb(struct virtqueue *txvq) ++{ ++ unsigned int len; ++ struct tipc_msg_buf *mb; ++ bool need_wakeup = false; ++ struct tipc_virtio_dev *vds = txvq->vdev->priv; ++ ++ dev_dbg(&txvq->vdev->dev, "%s\n", __func__); ++ ++ /* detach all buffers */ ++ mutex_lock(&vds->lock); ++ while ((mb = virtqueue_get_buf(txvq, &len)) != NULL) ++ need_wakeup |= _put_txbuf_locked(vds, mb); ++ mutex_unlock(&vds->lock); ++ ++ if (need_wakeup) { ++ /* wake up potential senders waiting for a tx buffer */ ++ wake_up_interruptible_all(&vds->sendq); ++ } ++} ++ ++static int tipc_virtio_probe(struct virtio_device *vdev) ++{ ++ int err, i; ++ struct tipc_virtio_dev *vds; ++ struct tipc_dev_config config; ++ struct virtqueue *vqs[2]; ++ vq_callback_t *vq_cbs[] = {_rxvq_cb, _txvq_cb}; ++ const char *vq_names[] = { "rx", "tx" }; ++ ++ dev_dbg(&vdev->dev, "%s:\n", __func__); ++ ++ vds = kzalloc(sizeof(*vds), GFP_KERNEL); ++ if (!vds) ++ return -ENOMEM; ++ ++ vds->vdev = vdev; ++ ++ mutex_init(&vds->lock); ++ kref_init(&vds->refcount); ++ init_waitqueue_head(&vds->sendq); ++ INIT_LIST_HEAD(&vds->free_buf_list); ++ idr_init(&vds->addr_idr); ++ ++ /* set default max message size and alignment */ ++ memset(&config, 0, sizeof(config)); ++ config.msg_buf_max_size = DEFAULT_MSG_BUF_SIZE; ++ config.msg_buf_alignment = DEFAULT_MSG_BUF_ALIGN; ++ ++ /* get configuration if present */ ++ vdev->config->get(vdev, 0, &config, sizeof(config)); ++ ++ /* copy dev name */ ++ strncpy(vds->cdev_name, config.dev_name, sizeof(vds->cdev_name)); ++ vds->cdev_name[sizeof(vds->cdev_name)-1] = '\0'; ++ ++ /* find tx virtqueues (rx and tx and in this order) */ ++ err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, vq_names); ++ if (err) ++ goto err_find_vqs; ++ ++ vds->rxvq = vqs[0]; ++ vds->txvq = vqs[1]; ++ ++ /* save max buffer size and count */ ++ vds->msg_buf_max_sz = config.msg_buf_max_size; ++ vds->msg_buf_max_cnt = virtqueue_get_vring_size(vds->txvq); ++ ++ /* set up the receive buffers */ ++ for (i = 0; i < virtqueue_get_vring_size(vds->rxvq); i++) { ++ struct scatterlist sg; ++ struct tipc_msg_buf *rxbuf; ++ ++ rxbuf = _alloc_msg_buf(vds->msg_buf_max_sz); ++ if (!rxbuf) { ++ dev_err(&vdev->dev, "failed to allocate rx buffer\n"); ++ err = -ENOMEM; ++ goto err_free_rx_buffers; ++ } ++ ++ sg_init_one(&sg, rxbuf->buf_va, rxbuf->buf_sz); ++ err = virtqueue_add_inbuf(vds->rxvq, &sg, 1, rxbuf, GFP_KERNEL); ++ WARN_ON(err); /* sanity check; this can't really happen */ ++ } ++ ++ vdev->priv = vds; ++ vds->state = VDS_OFFLINE; ++ ++ dev_dbg(&vdev->dev, "%s: done\n", __func__); ++ return 0; ++ ++err_free_rx_buffers: ++ _cleanup_vq(vds->rxvq); ++err_find_vqs: ++ kref_put(&vds->refcount, _free_vds); ++ return err; ++} ++ ++static void tipc_virtio_remove(struct virtio_device *vdev) ++{ ++ struct tipc_virtio_dev *vds = vdev->priv; ++ ++ _go_offline(vds); ++ ++ mutex_lock(&vds->lock); ++ vds->state = VDS_DEAD; ++ vds->vdev = NULL; ++ mutex_unlock(&vds->lock); ++ ++ vdev->config->reset(vdev); ++ ++ idr_destroy(&vds->addr_idr); ++ ++ _cleanup_vq(vds->rxvq); ++ _cleanup_vq(vds->txvq); ++ _free_msg_buf_list(&vds->free_buf_list); ++ ++ vdev->config->del_vqs(vds->vdev); ++ ++ kref_put(&vds->refcount, _free_vds); ++} ++ ++static struct virtio_device_id tipc_virtio_id_table[] = { ++ { VIRTIO_ID_TRUSTY_IPC, VIRTIO_DEV_ANY_ID }, ++ { 0 }, ++}; ++ ++static unsigned int features[] = { ++ 0, ++}; ++ ++static struct virtio_driver virtio_tipc_driver = { ++ .feature_table = features, ++ .feature_table_size = ARRAY_SIZE(features), ++ .driver.name = KBUILD_MODNAME, ++ .driver.owner = THIS_MODULE, ++ .id_table = tipc_virtio_id_table, ++ .probe = tipc_virtio_probe, ++ .remove = tipc_virtio_remove, ++}; ++ ++static int __init tipc_init(void) ++{ ++ int ret; ++ dev_t dev; ++ ++ ret = alloc_chrdev_region(&dev, 0, MAX_DEVICES, KBUILD_MODNAME); ++ if (ret) { ++ pr_err("%s: alloc_chrdev_region failed: %d\n", __func__, ret); ++ return ret; ++ } ++ ++ tipc_major = MAJOR(dev); ++ tipc_class = class_create(THIS_MODULE, KBUILD_MODNAME); ++ if (IS_ERR(tipc_class)) { ++ ret = PTR_ERR(tipc_class); ++ pr_err("%s: class_create failed: %d\n", __func__, ret); ++ goto err_class_create; ++ } ++ ++ ret = register_virtio_driver(&virtio_tipc_driver); ++ if (ret) { ++ pr_err("failed to register virtio driver: %d\n", ret); ++ goto err_register_virtio_drv; ++ } ++ ++ return 0; ++ ++err_register_virtio_drv: ++ class_destroy(tipc_class); ++ ++err_class_create: ++ unregister_chrdev_region(dev, MAX_DEVICES); ++ return ret; ++} ++ ++static void __exit tipc_exit(void) ++{ ++ unregister_virtio_driver(&virtio_tipc_driver); ++ class_destroy(tipc_class); ++ unregister_chrdev_region(MKDEV(tipc_major, 0), MAX_DEVICES); ++} ++ ++/* We need to init this early */ ++subsys_initcall(tipc_init); ++module_exit(tipc_exit); ++ ++MODULE_DEVICE_TABLE(tipc, tipc_virtio_id_table); ++MODULE_DESCRIPTION("Trusty IPC driver"); ++MODULE_LICENSE("GPL v2"); +diff --git a/include/linux/trusty/trusty_ipc.h b/include/linux/trusty/trusty_ipc.h +new file mode 100644 +index 000000000000..4ca15938a854 +--- /dev/null ++++ b/include/linux/trusty/trusty_ipc.h +@@ -0,0 +1,88 @@ ++/* ++ * Copyright (C) 2015 Google, Inc. ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++#ifndef __LINUX_TRUSTY_TRUSTY_IPC_H ++#define __LINUX_TRUSTY_TRUSTY_IPC_H ++ ++struct tipc_chan; ++ ++struct tipc_msg_buf { ++ void *buf_va; ++ phys_addr_t buf_pa; ++ size_t buf_sz; ++ size_t wpos; ++ size_t rpos; ++ struct list_head node; ++}; ++ ++enum tipc_chan_event { ++ TIPC_CHANNEL_CONNECTED = 1, ++ TIPC_CHANNEL_DISCONNECTED, ++ TIPC_CHANNEL_SHUTDOWN, ++}; ++ ++struct tipc_chan_ops { ++ void (*handle_event)(void *cb_arg, int event); ++ struct tipc_msg_buf *(*handle_msg)(void *cb_arg, ++ struct tipc_msg_buf *mb); ++}; ++ ++struct tipc_chan *tipc_create_channel(struct device *dev, ++ const struct tipc_chan_ops *ops, ++ void *cb_arg); ++ ++int tipc_chan_connect(struct tipc_chan *chan, const char *port); ++ ++int tipc_chan_queue_msg(struct tipc_chan *chan, struct tipc_msg_buf *mb); ++ ++int tipc_chan_shutdown(struct tipc_chan *chan); ++ ++void tipc_chan_destroy(struct tipc_chan *chan); ++ ++struct tipc_msg_buf *tipc_chan_get_rxbuf(struct tipc_chan *chan); ++ ++void tipc_chan_put_rxbuf(struct tipc_chan *chan, struct tipc_msg_buf *mb); ++ ++struct tipc_msg_buf * ++tipc_chan_get_txbuf_timeout(struct tipc_chan *chan, long timeout); ++ ++void tipc_chan_put_txbuf(struct tipc_chan *chan, struct tipc_msg_buf *mb); ++ ++static inline size_t mb_avail_space(struct tipc_msg_buf *mb) ++{ ++ return mb->buf_sz - mb->wpos; ++} ++ ++static inline size_t mb_avail_data(struct tipc_msg_buf *mb) ++{ ++ return mb->wpos - mb->rpos; ++} ++ ++static inline void *mb_put_data(struct tipc_msg_buf *mb, size_t len) ++{ ++ void *pos = (u8 *)mb->buf_va + mb->wpos; ++ BUG_ON(mb->wpos + len > mb->buf_sz); ++ mb->wpos += len; ++ return pos; ++} ++ ++static inline void *mb_get_data(struct tipc_msg_buf *mb, size_t len) ++{ ++ void *pos = (u8 *)mb->buf_va + mb->rpos; ++ BUG_ON(mb->rpos + len > mb->wpos); ++ mb->rpos += len; ++ return pos; ++} ++ ++#endif /* __LINUX_TRUSTY_TRUSTY_IPC_H */ ++ +-- +2.17.1 + diff --git a/patches/0011-usb-typec-Remove-the-callback-members-from-struc.usb-typec b/patches/0011-usb-typec-Remove-the-callback-members-from-struc.usb-typec new file mode 100644 index 0000000000..202bf684c7 --- /dev/null +++ b/patches/0011-usb-typec-Remove-the-callback-members-from-struc.usb-typec @@ -0,0 +1,196 @@ +From 1f49b1d8a1f51ea026a684bf0413399d0acb1902 Mon Sep 17 00:00:00 2001 +From: Heikki Krogerus +Date: Tue, 1 Oct 2019 12:21:39 +0300 +Subject: [PATCH 11/18] usb: typec: Remove the callback members from struct + typec_capability + +There are no more users for them. + +Signed-off-by: Heikki Krogerus +--- + drivers/usb/typec/class.c | 65 +++++++++++++-------------------------- + include/linux/usb/typec.h | 17 ---------- + 2 files changed, 22 insertions(+), 60 deletions(-) + +diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c +index 542be63795db..58e83fc54aa6 100644 +--- a/drivers/usb/typec/class.c ++++ b/drivers/usb/typec/class.c +@@ -58,7 +58,6 @@ struct typec_port { + struct typec_switch *sw; + struct typec_mux *mux; + +- const struct typec_capability *cap; + const struct typec_operations *ops; + }; + +@@ -970,19 +969,15 @@ preferred_role_store(struct device *dev, struct device_attribute *attr, + return -EINVAL; + } + +- if (port->ops && port->ops->try_role) { +- ret = port->ops->try_role(port, role); +- if (ret) +- return ret; +- } else if (port->cap && port->cap->try_role) { +- ret = port->cap->try_role(port->cap, role); +- if (ret) +- return ret; +- } else { ++ if (!port->ops || !port->ops->try_role) { + dev_dbg(dev, "Setting preferred role not supported\n"); + return -EOPNOTSUPP; + } + ++ ret = port->ops->try_role(port, role); ++ if (ret) ++ return ret; ++ + port->prefer_role = role; + return size; + } +@@ -1020,20 +1015,16 @@ static ssize_t data_role_store(struct device *dev, + goto unlock_and_ret; + } + +- if (port->ops && port->ops->dr_set) { +- ret = port->ops->dr_set(port, ret); +- if (ret) +- goto unlock_and_ret; +- } else if (port->cap && port->cap->dr_set) { +- ret = port->cap->dr_set(port->cap, ret); +- if (ret) +- goto unlock_and_ret; +- } else { ++ if (!port->ops || !port->ops->dr_set) { + dev_dbg(dev, "data role swapping not supported\n"); + ret = -EOPNOTSUPP; + goto unlock_and_ret; + } + ++ ret = port->ops->dr_set(port, ret); ++ if (ret) ++ goto unlock_and_ret; ++ + ret = size; + unlock_and_ret: + mutex_unlock(&port->port_type_lock); +@@ -1082,21 +1073,17 @@ static ssize_t power_role_store(struct device *dev, + goto unlock_and_ret; + } + +- if (port->ops && port->ops->pr_set) { +- ret = port->ops->pr_set(port, ret); +- if (ret) +- goto unlock_and_ret; +- } else if (port->cap && port->cap->pr_set) { +- ret = port->cap->pr_set(port->cap, ret); +- if (ret) +- goto unlock_and_ret; +- } else { ++ if (!port->ops || !port->ops->pr_set) { + dev_dbg(dev, "power role swapping not supported\n"); + ret = -EOPNOTSUPP; + goto unlock_and_ret; + } + ret = size; + ++ ret = port->ops->pr_set(port, ret); ++ if (ret) ++ goto unlock_and_ret; ++ + unlock_and_ret: + mutex_unlock(&port->port_type_lock); + return ret; +@@ -1124,7 +1111,7 @@ port_type_store(struct device *dev, struct device_attribute *attr, + enum typec_port_type type; + + if ((!port->ops || !port->ops->port_type_set) || +- !port->cap->port_type_set || port->fixed_role != TYPEC_PORT_DRP) { ++ port->fixed_role != TYPEC_PORT_DRP) { + dev_dbg(dev, "changing port type not supported\n"); + return -EOPNOTSUPP; + } +@@ -1141,10 +1128,7 @@ port_type_store(struct device *dev, struct device_attribute *attr, + goto unlock_and_ret; + } + +- if (port->ops && port->ops->port_type_set) +- ret = port->ops->port_type_set(port, type); +- else +- ret = port->cap->port_type_set(port->cap, type); ++ ret = port->ops->port_type_set(port, type); + if (ret) + goto unlock_and_ret; + +@@ -1204,19 +1188,15 @@ static ssize_t vconn_source_store(struct device *dev, + if (ret) + return ret; + +- if (port->ops && port->ops->vconn_set) { +- ret = port->ops->vconn_set(port, source); +- if (ret) +- return ret; +- } else if (port->cap && port->cap->vconn_set) { +- ret = port->cap->vconn_set(port->cap, (enum typec_role)source); +- if (ret) +- return ret; +- } else { ++ if (!port->ops || !port->ops->vconn_set) { + dev_dbg(dev, "VCONN swapping not supported\n"); + return -EOPNOTSUPP; + } + ++ ret = port->ops->vconn_set(port, source); ++ if (ret) ++ return ret; ++ + return size; + } + +@@ -1619,7 +1599,6 @@ struct typec_port *typec_register_port(struct device *parent, + mutex_init(&port->port_type_lock); + + port->id = id; +- port->cap = cap; + port->ops = cap->ops; + port->port_type = cap->type; + port->fixed_role = cap->type; +diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h +index 6c95a9ff43c6..e759668f8af9 100644 +--- a/include/linux/usb/typec.h ++++ b/include/linux/usb/typec.h +@@ -197,11 +197,6 @@ struct typec_operations { + * @fwnode: Optional fwnode of the port + * @driver_data: Private pointer for driver specific info + * @ops: Port operations vector +- * @try_role: Set data role preference for DRP port +- * @dr_set: Set Data Role +- * @pr_set: Set Power Role +- * @vconn_set: Set VCONN Role +- * @port_type_set: Set port type + * + * Static capabilities of a single USB Type-C port. + */ +@@ -219,18 +214,6 @@ struct typec_capability { + void *driver_data; + + const struct typec_operations *ops; +- +- int (*try_role)(const struct typec_capability *, +- int role); +- +- int (*dr_set)(const struct typec_capability *, +- enum typec_data_role); +- int (*pr_set)(const struct typec_capability *, +- enum typec_role); +- int (*vconn_set)(const struct typec_capability *, +- enum typec_role); +- int (*port_type_set)(const struct typec_capability *, +- enum typec_port_type); + }; + + /* Specific to try_role(). Indicates the user want's to clear the preference. */ +-- +2.17.1 + diff --git a/patches/0011-vcpu-export-vcpu-create-interface-to-DM.acrn b/patches/0011-vcpu-export-vcpu-create-interface-to-DM.acrn new file mode 100644 index 0000000000..4f858a77ee --- /dev/null +++ b/patches/0011-vcpu-export-vcpu-create-interface-to-DM.acrn @@ -0,0 +1,90 @@ +From c9402384990478f439156514417de51ed0329053 Mon Sep 17 00:00:00 2001 +From: Yin Fengwei +Date: Fri, 31 Aug 2018 10:58:56 +0800 +Subject: [PATCH 011/150] vcpu: export vcpu create interface to DM + +Change-Id: I64a179c2452f67285f347bbaa60c702dec5f55de +Tracked-On: 218445 +Signed-off-by: Yin Fengwei +Reviewed-on: +Reviewed-by: Chi, Mingqiang +Reviewed-by: Dong, Eddie +Tested-by: Dong, Eddie +--- + drivers/char/vhm/vhm_dev.c | 17 +++++++++++++++++ + include/linux/vhm/acrn_common.h | 5 +++++ + include/linux/vhm/acrn_hv_defs.h | 1 + + include/linux/vhm/vhm_ioctl_defs.h | 1 + + 4 files changed, 24 insertions(+) + +diff --git a/drivers/char/vhm/vhm_dev.c b/drivers/char/vhm/vhm_dev.c +index e12445e68c44..ab5f687f809f 100644 +--- a/drivers/char/vhm/vhm_dev.c ++++ b/drivers/char/vhm/vhm_dev.c +@@ -214,6 +214,23 @@ static long vhm_dev_ioctl(struct file *filep, + return ret; + } + ++ case IC_CREATE_VCPU: { ++ struct acrn_create_vcpu cv; ++ ++ if (copy_from_user(&cv, (void *)ioctl_param, ++ sizeof(struct acrn_create_vcpu))) ++ return -EFAULT; ++ ++ ret = acrn_hypercall2(HC_CREATE_VCPU, vm->vmid, ++ virt_to_phys(&cv)); ++ if (ret < 0) { ++ pr_err("vhm: failed to create vcpu %ld!\n", cv.vcpuid); ++ return -EFAULT; ++ } ++ ++ return ret; ++ } ++ + case IC_ALLOC_MEMSEG: { + struct vm_memseg memseg; + +diff --git a/include/linux/vhm/acrn_common.h b/include/linux/vhm/acrn_common.h +index 71b8c2606f57..fc64f4cc2cac 100644 +--- a/include/linux/vhm/acrn_common.h ++++ b/include/linux/vhm/acrn_common.h +@@ -199,6 +199,11 @@ struct acrn_create_vm { + unsigned long vcpu_num; /* IN: VM vcpu number */ + } __attribute__((aligned(8))); + ++struct acrn_create_vcpu { ++ int vcpuid; /* IN: vcpu id */ ++ int pcpuid; /* IN: pcpu id */ ++} __attribute__((aligned(8))); ++ + struct acrn_set_ioreq_buffer { + long req_buf; /* IN: gpa of per VM request_buffer*/ + } __attribute__((aligned(8))); +diff --git a/include/linux/vhm/acrn_hv_defs.h b/include/linux/vhm/acrn_hv_defs.h +index 3e43da56813d..329c38b961e5 100644 +--- a/include/linux/vhm/acrn_hv_defs.h ++++ b/include/linux/vhm/acrn_hv_defs.h +@@ -73,6 +73,7 @@ + #define HC_RESUME_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x03) + #define HC_PAUSE_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x04) + #define HC_QUERY_VMSTATE _HC_ID(HC_ID, HC_ID_VM_BASE + 0x05) ++#define HC_CREATE_VCPU _HC_ID(HC_ID, HC_ID_VM_BASE + 0x06) + + /* IRQ and Interrupts */ + #define HC_ID_IRQ_BASE 0x100UL +diff --git a/include/linux/vhm/vhm_ioctl_defs.h b/include/linux/vhm/vhm_ioctl_defs.h +index 8d03d38b788d..5ec2d10fc350 100644 +--- a/include/linux/vhm/vhm_ioctl_defs.h ++++ b/include/linux/vhm/vhm_ioctl_defs.h +@@ -63,6 +63,7 @@ + #define IC_RESUME_VM _IC_ID(IC_ID, IC_ID_VM_BASE + 0x03) + #define IC_PAUSE_VM _IC_ID(IC_ID, IC_ID_VM_BASE + 0x04) + #define IC_QUERY_VMSTATE _IC_ID(IC_ID, IC_ID_VM_BASE + 0x05) ++#define IC_CREATE_VCPU _IC_ID(IC_ID, IC_ID_VM_BASE + 0x06) + + /* IRQ and Interrupts */ + #define IC_ID_IRQ_BASE 0x100UL +-- +2.17.1 + diff --git a/patches/0012-ASoC-Intel-Skylake-Update-interrupt-disabling-routin.audio b/patches/0012-ASoC-Intel-Skylake-Update-interrupt-disabling-routin.audio new file mode 100644 index 0000000000..4b44f16368 --- /dev/null +++ b/patches/0012-ASoC-Intel-Skylake-Update-interrupt-disabling-routin.audio @@ -0,0 +1,36 @@ +From 84b17dee231ae889087bf04346f639091aef30db Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Sat, 17 Aug 2019 21:38:14 +0200 +Subject: [PATCH 012/193] ASoC: Intel: Skylake: Update interrupt disabling + routine + +CNL disable interrupt routine correctly makes use of locked _update_bits +whereas SKL lagged behind and still invokes unlocked variants. Update +SKL equivalent to match its CNL brother. + +Signed-off-by: Cezary Rojewski +--- + sound/soc/intel/skylake/skl-sst-ipc.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/sound/soc/intel/skylake/skl-sst-ipc.c b/sound/soc/intel/skylake/skl-sst-ipc.c +index 91b5440c643d..4875a518dd54 100644 +--- a/sound/soc/intel/skylake/skl-sst-ipc.c ++++ b/sound/soc/intel/skylake/skl-sst-ipc.c +@@ -582,11 +582,11 @@ void skl_ipc_op_int_enable(struct sst_dsp *ctx) + void skl_ipc_op_int_disable(struct sst_dsp *ctx) + { + /* disable IPC DONE interrupt */ +- sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_HIPCCTL, ++ sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_HIPCCTL, + SKL_ADSP_REG_HIPCCTL_DONE, 0); + + /* Disable IPC BUSY interrupt */ +- sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_HIPCCTL, ++ sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_HIPCCTL, + SKL_ADSP_REG_HIPCCTL_BUSY, 0); + + } +-- +2.17.1 + diff --git a/patches/0012-DEBUG-x86-debug-tsc-to-art-conversion.felipeb-5.4 b/patches/0012-DEBUG-x86-debug-tsc-to-art-conversion.felipeb-5.4 new file mode 100644 index 0000000000..f56a99af38 --- /dev/null +++ b/patches/0012-DEBUG-x86-debug-tsc-to-art-conversion.felipeb-5.4 @@ -0,0 +1,37 @@ +From 0039911b06b52eebe148dbc8c3318a90b1bad207 Mon Sep 17 00:00:00 2001 +From: Felipe Balbi +Date: Mon, 23 Sep 2019 12:42:01 +0300 +Subject: [PATCH 12/14] DEBUG: x86: debug tsc to art conversion + +Signed-off-by: Felipe Balbi +--- + arch/x86/kernel/tsc.c | 10 ++++++++++ + 1 file changed, 10 insertions(+) + +diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c +index 9df1faa9e8c7..5159cdcc9f04 100644 +--- a/arch/x86/kernel/tsc.c ++++ b/arch/x86/kernel/tsc.c +@@ -1257,9 +1257,19 @@ u64 get_art_ns_now(void) + { + struct system_counterval_t tsc_cycles; + u64 tsc_ns; ++ unsigned int eax; ++ unsigned int ebx; ++ unsigned int ecx; ++ unsigned int edx; + + get_tsc_ns(&tsc_cycles, &tsc_ns); + ++ /* CPUID 15H TSC/Crystal ratio, plus optionally Crystal Hz */ ++ cpuid(ART_CPUID_LEAF, &eax, &ebx, &ecx, &edx); ++ ++ printk(KERN_INFO "====> tsc_ns %llu %llu\n", tsc_ns, ++ DIV_ROUND_UP_ULL(ecx * ebx, eax)); ++ + return tsc_ns; + } + EXPORT_SYMBOL(get_art_ns_now); +-- +2.17.1 + diff --git a/patches/0012-Platform-x86-Update-SoCWatch-driver-code-to-2.sep-socwatch b/patches/0012-Platform-x86-Update-SoCWatch-driver-code-to-2.sep-socwatch new file mode 100644 index 0000000000..78fa5cd6c7 --- /dev/null +++ b/patches/0012-Platform-x86-Update-SoCWatch-driver-code-to-2.sep-socwatch @@ -0,0 +1,16843 @@ +From 8e7f96ecb9bcad7372447fc5117e4a0a95f55aad Mon Sep 17 00:00:00 2001 +From: Faycal Benmlih +Date: Tue, 23 Apr 2019 14:23:37 -0500 +Subject: [PATCH 12/27] Platform/x86: Update SoCWatch driver code to 2.10 + +Signed-off-by: Faycal Benmlih +--- + drivers/platform/x86/socwatch/Makefile | 2 +- + .../platform/x86/socwatch/inc/sw_collector.h | 140 +- + .../platform/x86/socwatch/inc/sw_defines.h | 135 +- + .../platform/x86/socwatch/inc/sw_file_ops.h | 109 +- + .../x86/socwatch/inc/sw_hardware_io.h | 143 +- + .../platform/x86/socwatch/inc/sw_internal.h | 146 ++- + drivers/platform/x86/socwatch/inc/sw_ioctl.h | 404 +++--- + .../x86/socwatch/inc/sw_kernel_defines.h | 194 +-- + drivers/platform/x86/socwatch/inc/sw_list.h | 134 +- + .../platform/x86/socwatch/inc/sw_lock_defs.h | 166 +-- + drivers/platform/x86/socwatch/inc/sw_mem.h | 115 +- + .../x86/socwatch/inc/sw_ops_provider.h | 109 +- + .../x86/socwatch/inc/sw_output_buffer.h | 120 +- + .../socwatch/inc/sw_overhead_measurements.h | 219 ++-- + .../platform/x86/socwatch/inc/sw_structs.h | 358 ++--- + drivers/platform/x86/socwatch/inc/sw_telem.h | 120 +- + .../socwatch/inc/sw_trace_notifier_provider.h | 109 +- + .../x86/socwatch/inc/sw_tracepoint_handlers.h | 194 ++- + drivers/platform/x86/socwatch/inc/sw_types.h | 117 +- + .../platform/x86/socwatch/inc/sw_version.h | 117 +- + drivers/platform/x86/socwatch/sw_collector.c | 528 ++++---- + drivers/platform/x86/socwatch/sw_driver.c | 477 ++++--- + drivers/platform/x86/socwatch/sw_file_ops.c | 275 ++-- + .../platform/x86/socwatch/sw_hardware_io.c | 150 +-- + drivers/platform/x86/socwatch/sw_internal.c | 165 +-- + drivers/platform/x86/socwatch/sw_mem.c | 197 ++- + .../platform/x86/socwatch/sw_ops_provider.c | 1160 ++++++++--------- + .../platform/x86/socwatch/sw_output_buffer.c | 602 ++++++--- + drivers/platform/x86/socwatch/sw_reader.c | 162 ++- + drivers/platform/x86/socwatch/sw_telem.c | 1026 ++++++++++----- + .../x86/socwatch/sw_trace_notifier_provider.c | 490 ++++--- + .../x86/socwatch/sw_tracepoint_handlers.c | 250 ++-- + .../platform/x86/socwatchhv/inc/asm_helper.h | 324 +++-- + drivers/platform/x86/socwatchhv/inc/control.h | 211 ++- + .../platform/x86/socwatchhv/inc/pw_types.h | 111 +- + .../platform/x86/socwatchhv/inc/pw_version.h | 109 +- + .../platform/x86/socwatchhv/inc/sw_defines.h | 141 +- + .../platform/x86/socwatchhv/inc/sw_ioctl.h | 404 +++--- + .../platform/x86/socwatchhv/inc/sw_structs.h | 359 ++--- + .../platform/x86/socwatchhv/inc/sw_types.h | 117 +- + .../platform/x86/socwatchhv/inc/sw_version.h | 117 +- + .../platform/x86/socwatchhv/inc/swhv_acrn.h | 161 ++- + .../x86/socwatchhv/inc/swhv_acrn_sbuf.h | 107 +- + .../x86/socwatchhv/inc/swhv_defines.h | 128 +- + .../platform/x86/socwatchhv/inc/swhv_driver.h | 119 +- + .../platform/x86/socwatchhv/inc/swhv_ioctl.h | 205 ++- + .../x86/socwatchhv/inc/swhv_structs.h | 183 +-- + drivers/platform/x86/socwatchhv/swhv_acrn.c | 332 ++--- + drivers/platform/x86/socwatchhv/swhv_driver.c | 192 ++- + 49 files changed, 6341 insertions(+), 5612 deletions(-) + +diff --git a/drivers/platform/x86/socwatch/Makefile b/drivers/platform/x86/socwatch/Makefile +index 15ac18fcfdc0..073397d5aec0 100644 +--- a/drivers/platform/x86/socwatch/Makefile ++++ b/drivers/platform/x86/socwatch/Makefile +@@ -4,7 +4,7 @@ + + DRIVER_BASE=socwatch + DRIVER_MAJOR=2 +-DRIVER_MINOR=6 ++DRIVER_MINOR=10 + # basic name of driver + DRIVER_NAME=${DRIVER_BASE}${DRIVER_MAJOR}_${DRIVER_MINOR} + +diff --git a/drivers/platform/x86/socwatch/inc/sw_collector.h b/drivers/platform/x86/socwatch/inc/sw_collector.h +index 41430cbeddef..69a7a4833b1d 100644 +--- a/drivers/platform/x86/socwatch/inc/sw_collector.h ++++ b/drivers/platform/x86/socwatch/inc/sw_collector.h +@@ -1,58 +1,58 @@ +-/* +- +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + +-*/ + #ifndef __SW_COLLECTOR_H__ + + #include "sw_internal.h" +@@ -80,19 +80,18 @@ struct sw_hw_ops; + * @per_msg_payload_size: Data size + * @msg: Ptr to collected data + */ +-typedef struct sw_collector_data { ++struct sw_collector_data { + SW_LIST_ENTRY(list, sw_collector_data); +- struct cpumask cpumask; ++ struct cpumask cpumask; + struct sw_driver_interface_info *info; +- const struct sw_hw_ops **ops; +- size_t per_msg_payload_size; +- u64 last_update_jiffies; ++ const struct sw_hw_ops **ops; ++ size_t per_msg_payload_size; ++ u64 last_update_jiffies; + struct sw_driver_msg *msg; +-} sw_collector_data_t; +-#define GET_MSG_SLOT_FOR_CPU(msgs, cpu, size) \ +- ((struct sw_driver_msg *)&( \ +- ((char *)(msgs))[(cpu) * \ +- (sizeof(struct sw_driver_msg) + (size))])) ++}; ++ ++#define GET_MSG_SLOT_FOR_CPU(msgs, cpu, size) ((struct sw_driver_msg *) & \ ++ (((char *)(msgs))[(cpu) * (sizeof(struct sw_driver_msg) + (size))])) + + struct sw_collector_data *sw_alloc_collector_node(void); + void sw_free_collector_node(struct sw_collector_data *node); +@@ -103,21 +102,20 @@ int sw_write_collector_node(struct sw_collector_data *data); + void sw_init_collector_list(void *list_head); + void sw_destroy_collector_list(void *list_head); + int sw_handle_collector_list(void *list_head, +- int (*func)(struct sw_collector_data *data)); ++ int (*func)(struct sw_collector_data *data)); + int sw_handle_collector_list_on_cpu(void *list_head, +- int (*func)(struct sw_collector_data *data, +- int cpu), +- int cpu); ++ int (*func)(struct sw_collector_data *data, int cpu), ++ int cpu); + +-int sw_handle_driver_io_descriptor( +- char *dst_vals, int cpu, ++int sw_handle_driver_io_descriptor(char *dst_vals, ++ int cpu, + const struct sw_driver_io_descriptor *descriptor, + const struct sw_hw_ops *hw_ops); + int sw_init_driver_io_descriptor(struct sw_driver_io_descriptor *descriptor); + int sw_reset_driver_io_descriptor(struct sw_driver_io_descriptor *descriptor); + + int sw_add_driver_info(void *list_head, +- const struct sw_driver_interface_info *info); ++ const struct sw_driver_interface_info *info); + + void sw_handle_per_cpu_msg(void *info); + void sw_handle_per_cpu_msg_no_sched(void *info); +diff --git a/drivers/platform/x86/socwatch/inc/sw_defines.h b/drivers/platform/x86/socwatch/inc/sw_defines.h +index ab0f4911332d..a670904e4e39 100644 +--- a/drivers/platform/x86/socwatch/inc/sw_defines.h ++++ b/drivers/platform/x86/socwatch/inc/sw_defines.h +@@ -1,58 +1,57 @@ +-/* +- +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-*/ ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + + #ifndef _PW_DEFINES_H_ + #define _PW_DEFINES_H_ 1 +@@ -63,9 +62,9 @@ + * Common to kernel and userspace. + * *************************************************** + */ +-#define PW_SUCCESS 0 +-#define PW_ERROR 1 +-#define PW_SUCCESS_NO_COLLECT 2 ++#define PW_SUCCESS 0 ++#define PW_ERROR 1 ++#define PW_SUCCESS_NO_COLLECT 2 + + /* + * Helper macro to convert 'u64' to 'unsigned long long' to avoid gcc warnings. +@@ -94,13 +93,10 @@ + /* + * Circularly decrement 'i'. + */ +-#define CIRCULAR_DEC(i, m) \ +- ({ \ +- int __tmp1 = (i); \ +- if (--__tmp1 < 0) \ +- __tmp1 = (m); \ +- __tmp1; \ +- }) ++#define CIRCULAR_DEC(i, m) ({ \ ++ int __tmp1 = (i); \ ++ if (--__tmp1 < 0) \ ++ __tmp1 = (m); __tmp1; }) + /* + * Retrieve size of an array. + */ +@@ -114,7 +110,7 @@ + * Assumes version numbers are 8b unsigned ints. + */ + #define SW_GET_SCU_FW_VERSION_MAJOR(ver) (((ver) >> 8) & 0xff) +-#define SW_GET_SCU_FW_VERSION_MINOR(ver) ((ver)&0xff) ++#define SW_GET_SCU_FW_VERSION_MINOR(ver) ((ver) & 0xff) + /* + * Max size of process name retrieved from kernel. + */ +@@ -152,5 +148,12 @@ typedef enum { + #define MAX_UNSIGNED_24_BIT_VALUE 0xFFFFFF + #define MAX_UNSIGNED_32_BIT_VALUE 0xFFFFFFFF + #define MAX_UNSIGNED_64_BIT_VALUE 0xFFFFFFFFFFFFFFFF ++/* ++ * TELEM BAR CONFIG ++ */ ++#define MAX_TELEM_BAR_CFG 3 ++#define TELEM_MCHBAR_CFG 0 ++#define TELEM_IPC1BAR_CFG 1 ++#define TELEM_SSRAMBAR_CFG 2 + + #endif /* _PW_DEFINES_H_ */ +diff --git a/drivers/platform/x86/socwatch/inc/sw_file_ops.h b/drivers/platform/x86/socwatch/inc/sw_file_ops.h +index c3a30a17a7b0..bba7e5ddbb87 100644 +--- a/drivers/platform/x86/socwatch/inc/sw_file_ops.h ++++ b/drivers/platform/x86/socwatch/inc/sw_file_ops.h +@@ -1,58 +1,57 @@ +-/* +- +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-*/ ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + #ifndef __SW_FILE_OPS_H__ + #define __SW_FILE_OPS_H__ + +diff --git a/drivers/platform/x86/socwatch/inc/sw_hardware_io.h b/drivers/platform/x86/socwatch/inc/sw_hardware_io.h +index 5cc9ebe18cf1..9e207f2a473a 100644 +--- a/drivers/platform/x86/socwatch/inc/sw_hardware_io.h ++++ b/drivers/platform/x86/socwatch/inc/sw_hardware_io.h +@@ -1,73 +1,72 @@ +-/* +- +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-*/ ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + #ifndef __SW_HARDWARE_IO_H__ + #define __SW_HARDWARE_IO_H__ + + #include "sw_structs.h" + +-typedef int (*sw_io_desc_init_func_t)( +- struct sw_driver_io_descriptor *descriptor); +-typedef void (*sw_hardware_op_func_t)( +- char *dst_vals, int cpu, +- const struct sw_driver_io_descriptor *descriptor, ++typedef int (*sw_io_desc_init_func_t) ++ (struct sw_driver_io_descriptor *descriptor); ++typedef void (*sw_hardware_op_func_t) ++ (char *dst_vals, ++ int cpu, const struct sw_driver_io_descriptor *descriptor, + u16 counter_size_in_bytes); +-typedef int (*sw_io_desc_print_func_t)( +- const struct sw_driver_io_descriptor *descriptor); +-typedef int (*sw_io_desc_reset_func_t)( +- const struct sw_driver_io_descriptor *descriptor); ++typedef int (*sw_io_desc_print_func_t) ++ (const struct sw_driver_io_descriptor *descriptor); ++typedef int (*sw_io_desc_reset_func_t) ++ (const struct sw_driver_io_descriptor *descriptor); + typedef bool (*sw_io_desc_available_func_t)(void); + typedef bool (*sw_hw_op_post_config_func_t)(void); + +@@ -87,13 +86,13 @@ typedef bool (*sw_hw_op_post_config_func_t)(void); + */ + struct sw_hw_ops { + const char *name; +- sw_io_desc_init_func_t init; +- sw_hardware_op_func_t read; +- sw_hardware_op_func_t write; +- sw_io_desc_print_func_t print; +- sw_io_desc_reset_func_t reset; +- sw_io_desc_available_func_t available; +- sw_hw_op_post_config_func_t post_config; ++ sw_io_desc_init_func_t init; ++ sw_hardware_op_func_t read; ++ sw_hardware_op_func_t write; ++ sw_io_desc_print_func_t print; ++ sw_io_desc_reset_func_t reset; ++ sw_io_desc_available_func_t available; ++ sw_hw_op_post_config_func_t post_config; + }; + + bool sw_is_valid_hw_op_id(int id); +@@ -102,7 +101,7 @@ const struct sw_hw_ops *sw_get_hw_ops_for(int id); + const char *sw_get_hw_op_abstract_name(const struct sw_hw_ops *op); + + int sw_for_each_hw_op(int (*func)(const struct sw_hw_ops *op, void *priv), +- void *priv, bool return_on_error); ++ void *priv, bool return_on_error); + + /** + * Add an operation to the list of providers. +diff --git a/drivers/platform/x86/socwatch/inc/sw_internal.h b/drivers/platform/x86/socwatch/inc/sw_internal.h +index c8d9da330756..3e027e4f63fc 100644 +--- a/drivers/platform/x86/socwatch/inc/sw_internal.h ++++ b/drivers/platform/x86/socwatch/inc/sw_internal.h +@@ -1,58 +1,58 @@ +-/* +- +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + +-*/ + #ifndef __SW_DATA_STRUCTS_H__ + #define __SW_DATA_STRUCTS_H__ + +@@ -66,27 +66,27 @@ + #include + #include + #include +-#include /* inode */ +-#include /* class_create */ +-#include /* cdev_alloc */ ++#include /* inode */ ++#include /* class_create */ ++#include /* cdev_alloc */ + #include /* vmalloc */ +-#include /* TASK_INTERRUPTIBLE */ +-#include /* wait_event_interruptible */ +-#include /* pci_get_bus_and_slot */ ++#include /* TASK_INTERRUPTIBLE */ ++#include /* wait_event_interruptible */ ++#include /* pci_get_bus_and_slot */ + #include /* LINUX_VERSION_CODE */ +-#include /* For SFI F/W version */ ++#include /* For SFI F/W version */ + #include + #include +-#include /* local_t */ ++#include /* local_t */ + #include /* "in_atomic" */ + #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) +-#include /* copy_to_user */ ++ #include /* copy_to_user */ + #else +-#include /* copy_to_user */ ++ #include /* copy_to_user */ + #endif /* LINUX_VERSION_CODE */ + + #ifdef CONFIG_X86_WANT_INTEL_MID +-#include ++ #include + #endif /* CONFIG_X86_WANT_INTEL_MID */ + /* + * End taken from sw_driver +@@ -101,6 +101,7 @@ + * ****************************************** + */ + #define GET_POLLED_CPU() (sw_max_num_cpus) ++#define CAS32(p, o, n) (cmpxchg((p), (o), (n)) == (o)) + + /* ****************************************** + * Function declarations. +@@ -109,15 +110,14 @@ + /* + * Output to user. + */ +-unsigned long sw_copy_to_user(char __user *dst, +- char *src, size_t bytes_to_copy); ++unsigned long sw_copy_to_user(char __user *dst, char *src, size_t bytes_to_copy); + bool sw_check_output_buffer_params(void __user *buffer, size_t bytes_to_read, +- size_t buff_size); ++ size_t buff_size); + /* + * smp call function. + */ + void sw_schedule_work(const struct cpumask *mask, void (*work)(void *), +- void *data); ++ void *data); + /* + * Save IRQ flags and retrieve cpu number. + */ +@@ -134,5 +134,13 @@ int sw_set_module_scope_for_cpus(void); + * reset module scope for cpu frequencies. + */ + int sw_reset_module_scope_for_cpus(void); ++/* ++ * Setup p-unit/pmc telemetry ++ */ ++int sw_setup_telem(u64 addrs[3]); ++/* ++ * Tear down p-unit/pmc telemetry ++ */ ++void sw_destroy_telem(void); + + #endif /* __SW_DATA_STRUCTS_H__ */ +diff --git a/drivers/platform/x86/socwatch/inc/sw_ioctl.h b/drivers/platform/x86/socwatch/inc/sw_ioctl.h +index 1f8e903a0e1c..43a1f69af9ab 100644 +--- a/drivers/platform/x86/socwatch/inc/sw_ioctl.h ++++ b/drivers/platform/x86/socwatch/inc/sw_ioctl.h +@@ -1,71 +1,71 @@ +-/* +- +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + +-*/ + #ifndef __SW_IOCTL_H__ + #define __SW_IOCTL_H__ 1 + + #if defined(__linux__) || defined(__QNX__) +-#if __KERNEL__ +-#include +-#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) +-#include +-#include +-#endif /* COMPAT && x64 */ +-#else /* !__KERNEL__ */ +-#include +-#endif /* __KERNEL__ */ ++ #if __KERNEL__ ++ #include ++ #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) ++ #include ++ #include ++ #endif /* COMPAT && x64 */ ++ #else /* !__KERNEL__ */ ++ #include ++ #endif /* __KERNEL__ */ + #endif /* __linux__ */ + /* + * Ensure we pull in definition of 'DO_COUNT_DROPPED_SAMPLES'! +@@ -107,6 +107,9 @@ enum sw_ioctl_cmd { + sw_ioctl_cmd_avail_notify, + sw_ioctl_cmd_avail_collect, + sw_ioctl_cmd_topology_changes, ++ sw_ioctl_cmd_config_continuous, ++ sw_ioctl_cmd_read_continuous, ++ sw_ioctl_cmd_telem_bar, + }; + /* + * The actual IOCTL commands. +@@ -120,150 +123,136 @@ enum sw_ioctl_cmd { + * (similar to the file "read" and "write" calls). + */ + #ifdef SWW_MERGE /* Windows */ +-/* +- * Device type -- in the "User Defined" range." +- */ +-#define POWER_I_CONF_TYPE 40000 ++ /* ++ * Device type -- in the "User Defined" range." ++ */ ++ #define POWER_I_CONF_TYPE 40000 + +-/* List assigned tracepoint id */ +-#define CSIR_TRACEPOINT_ID_MASK 1 +-#define DEVICE_STATE_TRACEPOINT_ID_MASK 2 +-#define CSIR_SEPARATE_TRACEPOINT_ID_MASK 3 +-#define RESET_TRACEPOINT_ID_MASK 4 +-#define DISPLAY_ON_TRACEPOINT_ID_MASK 5 ++ /* List assigned tracepoint id */ ++ #define CSIR_TRACEPOINT_ID_MASK 1 ++ #define DEVICE_STATE_TRACEPOINT_ID_MASK 2 ++ #define CSIR_SEPARATE_TRACEPOINT_ID_MASK 3 ++ #define RESET_TRACEPOINT_ID_MASK 4 ++ #define DISPLAY_ON_TRACEPOINT_ID_MASK 5 + +-#ifdef SWW_MERGE +-/* +- * TELEM BAR CONFIG +- */ +-#define MAX_TELEM_BAR_CFG 3 +-#define TELEM_MCHBAR_CFG 0 +-#define TELEM_IPC1BAR_CFG 1 +-#define TELEM_SSRAMBAR_CFG 2 +-#endif +- +-/* +- * The IOCTL function codes from 0x800 to 0xFFF are for customer use. +- */ +-#define PW_IOCTL_CONFIG \ ++ /* ++ * The IOCTL function codes from 0x800 to 0xFFF are for customer use. ++ */ ++ #define PW_IOCTL_CONFIG \ + CTL_CODE(POWER_I_CONF_TYPE, 0x900, METHOD_BUFFERED, FILE_ANY_ACCESS) +-#define PW_IOCTL_START_COLLECTION \ ++ #define PW_IOCTL_START_COLLECTION \ + CTL_CODE(POWER_I_CONF_TYPE, 0x901, METHOD_BUFFERED, FILE_ANY_ACCESS) +-#define PW_IOCTL_STOP_COLLECTION \ ++ #define PW_IOCTL_STOP_COLLECTION \ + CTL_CODE(POWER_I_CONF_TYPE, 0x902, METHOD_BUFFERED, FILE_ANY_ACCESS) + +-/* TODO: pause, resume, cancel not supported yet */ +-#define PW_IOCTL_PAUSE_COLLECTION \ ++ /* TODO: pause, resume, cancel not supported yet */ ++ #define PW_IOCTL_PAUSE_COLLECTION \ + CTL_CODE(POWER_I_CONF_TYPE, 0x903, METHOD_BUFFERED, FILE_ANY_ACCESS) +-#define PW_IOCTL_RESUME_COLLECTION \ ++ #define PW_IOCTL_RESUME_COLLECTION \ + CTL_CODE(POWER_I_CONF_TYPE, 0x904, METHOD_BUFFERED, FILE_ANY_ACCESS) +-#define PW_IOCTL_CANCEL_COLLECTION \ ++ #define PW_IOCTL_CANCEL_COLLECTION \ + CTL_CODE(POWER_I_CONF_TYPE, 0x905, METHOD_BUFFERED, FILE_ANY_ACCESS) + +-#define PW_IOCTL_GET_PROCESSOR_GROUP_TOPOLOGY \ ++ #define PW_IOCTL_GET_PROCESSOR_GROUP_TOPOLOGY \ + CTL_CODE(POWER_I_CONF_TYPE, 0x906, METHOD_BUFFERED, FILE_ANY_ACCESS) +-#define PW_IOCTL_TOPOLOGY \ ++ #define PW_IOCTL_TOPOLOGY \ + CTL_CODE(POWER_I_CONF_TYPE, 0x907, METHOD_BUFFERED, FILE_ANY_ACCESS) +-#define PW_IOCTL_GET_AVAILABLE_COLLECTORS \ ++ #define PW_IOCTL_GET_AVAILABLE_COLLECTORS \ + CTL_CODE(POWER_I_CONF_TYPE, 0x908, METHOD_BUFFERED, FILE_ANY_ACCESS) +-#define PW_IOCTL_IMMEDIATE_IO \ ++ #define PW_IOCTL_IMMEDIATE_IO \ + CTL_CODE(POWER_I_CONF_TYPE, 0x909, METHOD_BUFFERED, FILE_ANY_ACCESS) +-#define PW_IOCTL_DRV_CLEANUP \ ++ #define PW_IOCTL_DRV_CLEANUP \ + CTL_CODE(POWER_I_CONF_TYPE, 0x90A, METHOD_BUFFERED, FILE_ANY_ACCESS) +-#define PW_IOCTL_SET_COLLECTION_EVENT \ ++ #define PW_IOCTL_SET_COLLECTION_EVENT \ + CTL_CODE(POWER_I_CONF_TYPE, 0x90B, METHOD_BUFFERED, FILE_ANY_ACCESS) +-#define PW_IOCTL_TRY_STOP_EVENT \ ++ #define PW_IOCTL_TRY_STOP_EVENT \ + CTL_CODE(POWER_I_CONF_TYPE, 0x90C, METHOD_BUFFERED, FILE_ANY_ACCESS) +-#define PW_IOCTL_SET_PCH_ACTIVE_INTERVAL \ ++ #define PW_IOCTL_SET_PCH_ACTIVE_INTERVAL \ + CTL_CODE(POWER_I_CONF_TYPE, 0x90D, METHOD_BUFFERED, FILE_ANY_ACCESS) +-#define PW_IOCTL_SET_TELEM_BAR \ ++ #define PW_IOCTL_SET_TELEM_BAR \ + CTL_CODE(POWER_I_CONF_TYPE, 0x90E, METHOD_BUFFERED, FILE_ANY_ACCESS) +-#define PW_IOCTL_METADATA \ ++ #define PW_IOCTL_METADATA \ + CTL_CODE(POWER_I_CONF_TYPE, 0x90F, METHOD_BUFFERED, FILE_ANY_ACCESS) +-#define PW_IOCTL_SET_GBE_INTERVAL \ ++ #define PW_IOCTL_SET_GBE_INTERVAL \ + CTL_CODE(POWER_I_CONF_TYPE, 0x910, METHOD_BUFFERED, FILE_ANY_ACCESS) +-#define PW_IOCTL_ENABLE_COLLECTION \ ++ #define PW_IOCTL_ENABLE_COLLECTION \ + CTL_CODE(POWER_I_CONF_TYPE, 0x911, METHOD_BUFFERED, FILE_ANY_ACCESS) +-#define PW_IOCTL_DISABLE_COLLECTION \ ++ #define PW_IOCTL_DISABLE_COLLECTION \ + CTL_CODE(POWER_I_CONF_TYPE, 0x912, METHOD_BUFFERED, FILE_ANY_ACCESS) +-#define PW_IOCTL_DRIVER_BUILD_DATE \ +- CTL_CODE(POWER_I_CONF_TYPE, 0x913, METHOD_BUFFERED, FILE_ANY_ACCESS) ++ #define PW_IOCTL_DRIVER_BUILD_DATE \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x913, METHOD_BUFFERED, FILE_ANY_ACCESS) ++ #define PW_IOCTL_CONFIG_CONTINUOUS \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x914, METHOD_BUFFERED, FILE_ANY_ACCESS) ++ #define PW_IOCTL_READ_CONTINUOUS \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x915, METHOD_BUFFERED, FILE_ANY_ACCESS) + + #elif !defined(__APPLE__) +-#define PW_IOCTL_CONFIG \ +- _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_config, \ +- struct sw_driver_ioctl_arg *) +-#if DO_COUNT_DROPPED_SAMPLES +-#define PW_IOCTL_CMD \ +- _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, \ +- struct sw_driver_ioctl_arg *) +-#else +-#define PW_IOCTL_CMD \ +- _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, \ +- struct sw_driver_ioctl_arg *) +-#endif /* DO_COUNT_DROPPED_SAMPLES */ +-#define PW_IOCTL_POLL _IO(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_poll) +-#define PW_IOCTL_IMMEDIATE_IO \ +- _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_immediate_io, \ +- struct sw_driver_ioctl_arg *) +-#define PW_IOCTL_GET_SCU_FW_VERSION \ +- _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_scu_version, \ +- struct sw_driver_ioctl_arg *) +-#define PW_IOCTL_READ_IMMEDIATE \ +- _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_read_immediate, \ +- struct sw_driver_ioctl_arg *) +-#define PW_IOCTL_GET_DRIVER_VERSION \ +- _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_driver_version, \ +- struct sw_driver_ioctl_arg *) +-#define PW_IOCTL_GET_AVAILABLE_TRACEPOINTS \ +- _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_trace, \ +- struct sw_driver_ioctl_arg *) +-#define PW_IOCTL_GET_AVAILABLE_NOTIFIERS \ +- _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_notify, \ +- struct sw_driver_ioctl_arg *) +-#define PW_IOCTL_GET_AVAILABLE_COLLECTORS \ +- _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_collect, \ +- struct sw_driver_ioctl_arg *) +-#define PW_IOCTL_GET_TOPOLOGY_CHANGES \ +- _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_topology_changes, \ +- struct sw_driver_ioctl_arg *) ++ #define PW_IOCTL_CONFIG \ ++ _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_config, struct sw_driver_ioctl_arg *) ++ #if DO_COUNT_DROPPED_SAMPLES ++ #define PW_IOCTL_CMD \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, struct sw_driver_ioctl_arg *) ++ #else ++ #define PW_IOCTL_CMD \ ++ _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, struct sw_driver_ioctl_arg *) ++ #endif /* DO_COUNT_DROPPED_SAMPLES */ ++ #define PW_IOCTL_POLL _IO(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_poll) ++ #define PW_IOCTL_IMMEDIATE_IO \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_immediate_io, struct sw_driver_ioctl_arg *) ++ #define PW_IOCTL_GET_SCU_FW_VERSION \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_scu_version, struct sw_driver_ioctl_arg *) ++ #define PW_IOCTL_READ_IMMEDIATE \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_read_immediate, struct sw_driver_ioctl_arg *) ++ #define PW_IOCTL_GET_DRIVER_VERSION \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_driver_version, struct sw_driver_ioctl_arg *) ++ #define PW_IOCTL_GET_AVAILABLE_TRACEPOINTS \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_trace, struct sw_driver_ioctl_arg *) ++ #define PW_IOCTL_GET_AVAILABLE_NOTIFIERS \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_notify, struct sw_driver_ioctl_arg *) ++ #define PW_IOCTL_GET_AVAILABLE_COLLECTORS \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_collect, struct sw_driver_ioctl_arg *) ++ #define PW_IOCTL_GET_TOPOLOGY_CHANGES \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_topology_changes, struct sw_driver_ioctl_arg *) ++ #define PW_IOCTL_CONFIG_CONTINUOUS \ ++ _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_config_continuous, struct sw_driver_ioctl_arg *) ++ #define PW_IOCTL_READ_CONTINUOUS \ ++ _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_read_continuous, struct sw_driver_ioctl_arg *) ++ #define PW_IOCTL_SET_TELEM_BAR \ ++ _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_telem_bar, struct sw_driver_ioctl_arg *) + #else /* __APPLE__ */ +-#define PW_IOCTL_CONFIG \ +- _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_config, \ +- struct sw_driver_ioctl_arg) +-#if DO_COUNT_DROPPED_SAMPLES +-#define PW_IOCTL_CMD \ +- _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, \ +- struct sw_driver_ioctl_arg) +-#else +-#define PW_IOCTL_CMD \ +- _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, struct sw_driver_ioctl_arg) +-#endif /* DO_COUNT_DROPPED_SAMPLES */ +-#define PW_IOCTL_POLL _IO(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_poll) +-#define PW_IOCTL_IMMEDIATE_IO \ +- _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_immediate_io, \ +- struct sw_driver_ioctl_arg) +-#define PW_IOCTL_GET_SCU_FW_VERSION \ +- _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_scu_version, \ +- struct sw_driver_ioctl_arg) +-#define PW_IOCTL_READ_IMMEDIATE \ +- _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_read_immediate, \ +- struct sw_driver_ioctl_arg) +-#define PW_IOCTL_GET_DRIVER_VERSION \ +- _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_driver_version, \ +- struct sw_driver_ioctl_arg) +-#define PW_IOCTL_GET_AVAILABLE_TRACEPOINTS \ +- _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_trace, \ +- struct sw_driver_ioctl_arg) +-#define PW_IOCTL_GET_AVAILABLE_NOTIFIERS \ +- _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_notify, \ +- struct sw_driver_ioctl_arg) +-#define PW_IOCTL_GET_AVAILABLE_COLLECTORS \ +- _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_collect, \ +- struct sw_driver_ioctl_arg) +-#define PW_IOCTL_GET_TOPOLOGY_CHANGES \ +- _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_topology_changes, \ +- struct sw_driver_ioctl_arg) ++ #define PW_IOCTL_CONFIG \ ++ _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_config, struct sw_driver_ioctl_arg) ++ #if DO_COUNT_DROPPED_SAMPLES ++ #define PW_IOCTL_CMD \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, struct sw_driver_ioctl_arg) ++ #else ++ #define PW_IOCTL_CMD \ ++ _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, struct sw_driver_ioctl_arg) ++ #endif /* DO_COUNT_DROPPED_SAMPLES */ ++ #define PW_IOCTL_POLL \ ++ _IO(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_poll) ++ #define PW_IOCTL_IMMEDIATE_IO \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_immediate_io, struct sw_driver_ioctl_arg) ++ #define PW_IOCTL_GET_SCU_FW_VERSION \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_scu_version, struct sw_driver_ioctl_arg) ++ #define PW_IOCTL_READ_IMMEDIATE \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_read_immediate, struct sw_driver_ioctl_arg) ++ #define PW_IOCTL_GET_DRIVER_VERSION \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_driver_version, struct sw_driver_ioctl_arg) ++ #define PW_IOCTL_GET_AVAILABLE_TRACEPOINTS \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_trace, struct sw_driver_ioctl_arg) ++ #define PW_IOCTL_GET_AVAILABLE_NOTIFIERS \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_notify, struct sw_driver_ioctl_arg) ++ #define PW_IOCTL_GET_AVAILABLE_COLLECTORS \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_collect, struct sw_driver_ioctl_arg) ++ #define PW_IOCTL_GET_TOPOLOGY_CHANGES \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_topology_changes, struct sw_driver_ioctl_arg) ++ #define PW_IOCTL_CONFIG_CONTINUOUS \ ++ _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_config_continuous, struct sw_driver_ioctl_arg) ++ #define PW_IOCTL_READ_CONTINUOUS \ ++ _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_read_continuous, struct sw_driver_ioctl_arg) ++ #define PW_IOCTL_SET_TELEM_BAR \ ++ _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_telem_bar, struct sw_driver_ioctl_arg *) + #endif /* __APPLE__ */ + + /* +@@ -273,31 +262,38 @@ enum sw_ioctl_cmd { + * and ONLY by the driver. + */ + #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) +-#define PW_IOCTL_CONFIG32 \ +- _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_config, compat_uptr_t) ++ #define PW_IOCTL_CONFIG32 \ ++ _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_config, compat_uptr_t) + #if DO_COUNT_DROPPED_SAMPLES +-#define PW_IOCTL_CMD32 \ +- _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, compat_uptr_t) ++ #define PW_IOCTL_CMD32 \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, compat_uptr_t) + #else +-#define PW_IOCTL_CMD32 \ +- _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, compat_uptr_t) ++ #define PW_IOCTL_CMD32 \ ++ _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, compat_uptr_t) + #endif /* DO_COUNT_DROPPED_SAMPLES */ +-#define PW_IOCTL_POLL32 _IO(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_poll) +-#define PW_IOCTL_IMMEDIATE_IO32 \ +- _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_immediate_io, compat_uptr_t) +-#define PW_IOCTL_GET_SCU_FW_VERSION32 \ +- _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_scu_version, compat_uptr_t) +-#define PW_IOCTL_READ_IMMEDIATE32 \ +- _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_read_immediate, compat_uptr_t) +-#define PW_IOCTL_GET_DRIVER_VERSION32 \ +- _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_driver_version, compat_uptr_t) +-#define PW_IOCTL_GET_AVAILABLE_TRACEPOINTS32 \ +- _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_trace, compat_uptr_t) +-#define PW_IOCTL_GET_AVAILABLE_NOTIFIERS32 \ +- _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_notify, compat_uptr_t) +-#define PW_IOCTL_GET_AVAILABLE_COLLECTORS32 \ +- _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_collect, compat_uptr_t) +-#define PW_IOCTL_GET_TOPOLOGY_CHANGES32 \ +- _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_topology_changes, compat_uptr_t) ++ #define PW_IOCTL_POLL32 \ ++ _IO(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_poll) ++ #define PW_IOCTL_IMMEDIATE_IO32 \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_immediate_io, compat_uptr_t) ++ #define PW_IOCTL_GET_SCU_FW_VERSION32 \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_scu_version, compat_uptr_t) ++ #define PW_IOCTL_READ_IMMEDIATE32 \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_read_immediate, compat_uptr_t) ++ #define PW_IOCTL_GET_DRIVER_VERSION32 \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_driver_version, compat_uptr_t) ++ #define PW_IOCTL_GET_AVAILABLE_TRACEPOINTS32 \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_trace, compat_uptr_t) ++ #define PW_IOCTL_GET_AVAILABLE_NOTIFIERS32 \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_notify, compat_uptr_t) ++ #define PW_IOCTL_GET_AVAILABLE_COLLECTORS32 \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_collect, compat_uptr_t) ++ #define PW_IOCTL_GET_TOPOLOGY_CHANGES32 \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_topology_changes, compat_uptr_t) ++ #define PW_IOCTL_CONFIG_CONTINUOUS32 \ ++ _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_config_continuous, compat_uptr_t) ++ #define PW_IOCTL_READ_CONTINUOUS32 \ ++ _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_read_continuous, compat_uptr_t) ++ #define PW_IOCTL_SET_TELEM_BAR32 \ ++ _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_telem_bar, compat_uptr_t) + #endif /* defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) */ + #endif /* __SW_IOCTL_H__ */ +diff --git a/drivers/platform/x86/socwatch/inc/sw_kernel_defines.h b/drivers/platform/x86/socwatch/inc/sw_kernel_defines.h +index 26328645b232..eaa730491a6e 100644 +--- a/drivers/platform/x86/socwatch/inc/sw_kernel_defines.h ++++ b/drivers/platform/x86/socwatch/inc/sw_kernel_defines.h +@@ -1,74 +1,74 @@ +-/* +- +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + +-*/ + #ifndef _SW_KERNEL_DEFINES_H_ + #define _SW_KERNEL_DEFINES_H_ 1 + + #include "sw_defines.h" + + #if defined(__APPLE__) +-#define likely(x) (x) +-#define unlikely(x) (x) ++ #define likely(x) (x) ++ #define unlikely(x) (x) + #endif /* __APPLE__ */ + + #if !defined(__APPLE__) +-#define CPU() (raw_smp_processor_id()) +-#define RAW_CPU() (raw_smp_processor_id()) ++ #define CPU() (raw_smp_processor_id()) ++ #define RAW_CPU() (raw_smp_processor_id()) + #else +-#define CPU() (cpu_number()) +-#define RAW_CPU() (cpu_number()) ++ #define CPU() (cpu_number()) ++ #define RAW_CPU() (cpu_number()) + #endif /* __APPLE__ */ + + #define TID() (current->pid) +@@ -77,7 +77,7 @@ + #define PKG(c) (cpu_data(c).phys_proc_id) + #define IT_REAL_INCR() (current->signal->it_real_incr.tv64) + +-#define ATOMIC_CAS(ptr, old_val, new_val) \ ++#define ATOMIC_CAS(ptr, old_val, new_val) \ + (cmpxchg((ptr), (old_val), (new_val)) == (old_val)) + + /* +@@ -98,19 +98,19 @@ + * and 'DO_TRACK_MEMORY_USAGE' to be TRUE. + */ + #if DO_DRIVER_PROFILING +-#if !DO_OVERHEAD_MEASUREMENTS +-#undef DO_OVERHEAD_MEASUREMENTS +-#define DO_OVERHEAD_MEASUREMENTS 1 +-#endif /* DO_OVERHEAD_MEASUREMENTS */ +-#if !DO_TRACK_MEMORY_USAGE +-#undef DO_TRACK_MEMORY_USAGE +-#define DO_TRACK_MEMORY_USAGE 1 +-#endif /* DO_TRACK_MEMORY_USAGE */ ++ #if !DO_OVERHEAD_MEASUREMENTS ++ #undef DO_OVERHEAD_MEASUREMENTS ++ #define DO_OVERHEAD_MEASUREMENTS 1 ++ #endif /* DO_OVERHEAD_MEASUREMENTS */ ++ #if !DO_TRACK_MEMORY_USAGE ++ #undef DO_TRACK_MEMORY_USAGE ++ #define DO_TRACK_MEMORY_USAGE 1 ++ #endif /* DO_TRACK_MEMORY_USAGE */ + #endif /* DO_DRIVER_PROFILING */ + /* + * Should we allow debug output. + * Set to: "1" ==> 'OUTPUT' is enabled. +- * "0" ==> 'OUTPUT' is disabled. ++ * "0" ==> 'OUTPUT' is disabled. + */ + #define DO_DEBUG_OUTPUT 0 + /* +@@ -118,47 +118,47 @@ + * These are independent of the 'OUTPUT' macro + * (which controls debug messages). + * Set to '1' ==> Print driver error messages (to '/var/log/messages') +- * '0' ==> Do NOT print driver error messages ++ * '0' ==> Do NOT print driver error messages + */ + #define DO_PRINT_DRIVER_ERROR_MESSAGES 1 + /* + * Macros to control output printing. + */ + #if !defined(__APPLE__) +-#if DO_DEBUG_OUTPUT +-#define pw_pr_debug(...) printk(KERN_INFO __VA_ARGS__) +-#define pw_pr_warn(...) printk(KERN_WARNING __VA_ARGS__) ++ #if DO_DEBUG_OUTPUT ++ #define pw_pr_debug(...) pr_info(__VA_ARGS__) ++ #define pw_pr_warn(...) pr_warn(__VA_ARGS__) ++ #else ++ #define pw_pr_debug(...) ++ #define pw_pr_warn(...) ++ #endif ++ #define pw_pr_force(...) pr_info(__VA_ARGS__) + #else +-#define pw_pr_debug(...) +-#define pw_pr_warn(...) +-#endif +-#define pw_pr_force(...) printk(KERN_INFO __VA_ARGS__) +-#else +-#if DO_DEBUG_OUTPUT +-#define pw_pr_debug(...) IOLog(__VA_ARGS__) +-#define pw_pr_warn(...) IOLog(__VA_ARGS__) +-#else +-#define pw_pr_debug(...) +-#define pw_pr_warn(...) +-#endif +-#define pw_pr_force(...) IOLog(__VA_ARGS__) ++ #if DO_DEBUG_OUTPUT ++ #define pw_pr_debug(...) IOLog(__VA_ARGS__) ++ #define pw_pr_warn(...) IOLog(__VA_ARGS__) ++ #else ++ #define pw_pr_debug(...) ++ #define pw_pr_warn(...) ++ #endif ++ #define pw_pr_force(...) IOLog(__VA_ARGS__) + #endif /* __APPLE__ */ + + /* + * Macro for driver error messages. + */ + #if !defined(__APPLE__) +-#if (DO_PRINT_DRIVER_ERROR_MESSAGES || DO_DEBUG_OUTPUT) +-#define pw_pr_error(...) printk(KERN_ERR __VA_ARGS__) +-#else +-#define pw_pr_error(...) +-#endif +-#else +-#if (DO_PRINT_DRIVER_ERROR_MESSAGES || DO_DEBUG_OUTPUT) +-#define pw_pr_error(...) IOLog(__VA_ARGS__) ++ #if (DO_PRINT_DRIVER_ERROR_MESSAGES || DO_DEBUG_OUTPUT) ++ #define pw_pr_error(...) pr_err(__VA_ARGS__) ++ #else ++ #define pw_pr_error(...) ++ #endif + #else +-#define pw_pr_error(...) +-#endif ++ #if (DO_PRINT_DRIVER_ERROR_MESSAGES || DO_DEBUG_OUTPUT) ++ #define pw_pr_error(...) IOLog(__VA_ARGS__) ++ #else ++ #define pw_pr_error(...) ++ #endif + #endif /* __APPLE__ */ + + #endif /* _SW_KERNEL_DEFINES_H_ */ +diff --git a/drivers/platform/x86/socwatch/inc/sw_list.h b/drivers/platform/x86/socwatch/inc/sw_list.h +index 9c17e50ac5bf..9b632beefa84 100644 +--- a/drivers/platform/x86/socwatch/inc/sw_list.h ++++ b/drivers/platform/x86/socwatch/inc/sw_list.h +@@ -1,76 +1,76 @@ +-/* +- +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-*/ ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + #ifndef __SW_LIST_H__ + #define __SW_LIST_H__ + + #include + +-#define SW_DEFINE_LIST_HEAD(name, dummy) struct list_head name +-#define SW_DECLARE_LIST_HEAD(name, dummy) extern struct list_head name +-#define SW_LIST_ENTRY(name, dummy) struct list_head name +-#define SW_LIST_HEAD_VAR(dummy) struct list_head +-#define SW_LIST_HEAD_INIT(head) INIT_LIST_HEAD(head) +-#define SW_LIST_ENTRY_INIT(node, field) INIT_LIST_HEAD(&node->field) +-#define SW_LIST_ADD(head, node, field) list_add_tail(&node->field, head) +-#define SW_LIST_GET_HEAD_ENTRY(head, type, field) \ ++#define SW_DEFINE_LIST_HEAD(name, dummy) struct list_head name ++#define SW_DECLARE_LIST_HEAD(name, dummy) extern struct list_head name ++#define SW_LIST_ENTRY(name, dummy) struct list_head name ++#define SW_LIST_HEAD_VAR(dummy) struct list_head ++#define SW_LIST_HEAD_INIT(head) INIT_LIST_HEAD(head) ++#define SW_LIST_ENTRY_INIT(node, field) INIT_LIST_HEAD(&node->field) ++#define SW_LIST_ADD(head, node, field) \ ++ list_add_tail(&node->field, head) ++#define SW_LIST_GET_HEAD_ENTRY(head, type, field) \ + list_first_entry(head, struct type, field) +-#define SW_LIST_UNLINK(node, field) list_del(&node->field) +-#define SW_LIST_FOR_EACH_ENTRY(node, head, field) \ ++#define SW_LIST_UNLINK(node, field) list_del(&node->field) ++#define SW_LIST_FOR_EACH_ENTRY(node, head, field) \ + list_for_each_entry(node, head, field) +-#define SW_LIST_EMPTY(head) list_empty(head) +-#define SW_LIST_HEAD_INITIALIZER(head) LIST_HEAD_INIT(head) ++#define SW_LIST_EMPTY(head) list_empty(head) ++#define SW_LIST_HEAD_INITIALIZER(head) LIST_HEAD_INIT(head) + + #endif /* __SW_LIST_H__ */ +diff --git a/drivers/platform/x86/socwatch/inc/sw_lock_defs.h b/drivers/platform/x86/socwatch/inc/sw_lock_defs.h +index be44bfab01a7..42914f8998f7 100644 +--- a/drivers/platform/x86/socwatch/inc/sw_lock_defs.h ++++ b/drivers/platform/x86/socwatch/inc/sw_lock_defs.h +@@ -1,58 +1,57 @@ +-/* +- +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-*/ ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + + /* + * Description: file containing locking routines +@@ -62,37 +61,44 @@ + #ifndef __SW_LOCK_DEFS_H__ + #define __SW_LOCK_DEFS_H__ + +-#define SW_DEFINE_SPINLOCK(s) DEFINE_SPINLOCK(s) +-#define SW_DECLARE_SPINLOCK(s) static spinlock_t s ++/* ++ * Spinlocks ++ */ ++#define SW_DEFINE_SPINLOCK(s) DEFINE_SPINLOCK(s) ++#define SW_DECLARE_SPINLOCK(s) static spinlock_t s ++ ++#define SW_INIT_SPINLOCK(s) spin_lock_init(&s) ++#define SW_DESTROY_SPINLOCK(s) /* NOP */ + +-#define SW_INIT_SPINLOCK(s) spin_lock_init(&s) +-#define SW_DESTROY_SPINLOCK(s) /* NOP */ ++#define LOCK(l) { \ ++ unsigned long l##_flags; \ ++ spin_lock_irqsave(&(l), l##_flags); + +-#define LOCK(l) \ +- { \ +- unsigned long _tmp_l_flags; \ +- spin_lock_irqsave(&(l), _tmp_l_flags); ++#define UNLOCK(l) \ ++ spin_unlock_irqrestore(&(l), l##_flags); \ ++} + +-#define UNLOCK(l) \ +- spin_unlock_irqrestore(&(l), _tmp_l_flags); \ +- } ++/* ++ * R/W locks ++ */ ++#define SW_DECLARE_RWLOCK(l) static rwlock_t l ++#define SW_INIT_RWLOCK(l) rwlock_init(&(l)) ++#define SW_DESTROY_RWLOCK(l) /* NOP */ + +-#define READ_LOCK(l) \ +- { \ +- unsigned long _tmp_l_flags; \ +- read_lock_irqsave(&(l), _tmp_l_flags); ++#define READ_LOCK(l) { \ ++ unsigned long l##_flags; \ ++ read_lock_irqsave(&(l), l##_flags); + +-#define READ_UNLOCK(l) \ +- read_unlock_irqrestore(&(l), _tmp_l_flags); \ +- } ++#define READ_UNLOCK(l) \ ++ read_unlock_irqrestore(&(l), l##_flags); \ ++} + +-#define WRITE_LOCK(l) \ +- { \ +- unsigned long _tmp_l_flags; \ +- write_lock_irqsave(&(l), _tmp_l_flags); ++#define WRITE_LOCK(l) { \ ++ unsigned long l##_flags; \ ++ write_lock_irqsave(&(l), l##_flags); + +-#define WRITE_UNLOCK(l) \ +- write_unlock_irqrestore(&(l), _tmp_l_flags); \ +- } ++#define WRITE_UNLOCK(l) \ ++ write_unlock_irqrestore(&(l), l##_flags); \ ++} + + #endif /* __SW_LOCK_DEFS_H__ */ +diff --git a/drivers/platform/x86/socwatch/inc/sw_mem.h b/drivers/platform/x86/socwatch/inc/sw_mem.h +index 0d6de7f3a21b..b8797fd1dab1 100644 +--- a/drivers/platform/x86/socwatch/inc/sw_mem.h ++++ b/drivers/platform/x86/socwatch/inc/sw_mem.h +@@ -1,58 +1,57 @@ +-/* +- +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-*/ ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + + /* + * Description: file containing memory management routines +@@ -64,13 +63,13 @@ + + #include "sw_types.h" + +-void *sw_kmalloc(size_t size, gfp_t flags); ++void *sw_kmalloc(size_t size, unsigned int flags); + void sw_kfree(const void *obj); + /* + * Allocate free pages. + */ +-unsigned long sw_allocate_pages(gfp_t flags, +- unsigned int alloc_size_in_bytes); ++unsigned long sw_allocate_pages(unsigned int flags, ++ unsigned int alloc_size_in_bytes); + /* + * Free up previously allocated pages. + */ +diff --git a/drivers/platform/x86/socwatch/inc/sw_ops_provider.h b/drivers/platform/x86/socwatch/inc/sw_ops_provider.h +index bb841bf65cb6..69b1b70a3fdc 100644 +--- a/drivers/platform/x86/socwatch/inc/sw_ops_provider.h ++++ b/drivers/platform/x86/socwatch/inc/sw_ops_provider.h +@@ -1,58 +1,57 @@ +-/* +- +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-*/ ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + #ifndef __SW_OPS_PROVIDER_H__ + #define __SW_OPS_PROVIDER_H__ + +diff --git a/drivers/platform/x86/socwatch/inc/sw_output_buffer.h b/drivers/platform/x86/socwatch/inc/sw_output_buffer.h +index 8d6518222ce3..d7138a7aa866 100644 +--- a/drivers/platform/x86/socwatch/inc/sw_output_buffer.h ++++ b/drivers/platform/x86/socwatch/inc/sw_output_buffer.h +@@ -1,58 +1,57 @@ +-/* +- +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-*/ ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + + #ifndef _SW_OUTPUT_BUFFER_H_ + #define _SW_OUTPUT_BUFFER_H_ 1 +@@ -97,11 +96,10 @@ void sw_reset_per_cpu_buffers(void); + + void sw_count_samples_produced_dropped(void); + +-int sw_produce_polled_msg(struct sw_driver_msg *, enum sw_wakeup_action); + int sw_produce_generic_msg(struct sw_driver_msg *, enum sw_wakeup_action); + + bool sw_any_seg_full(u32 *val, bool is_flush_mode); +-size_t sw_consume_data(u32 mask, void __user *buffer, size_t bytes_to_read); ++ssize_t sw_consume_data(u32 mask, void __user *buffer, size_t bytes_to_read); + + unsigned int sw_get_output_buffer_size(void); + +@@ -133,4 +131,12 @@ void sw_cancel_reader(void); + */ + void sw_print_reader_stats(void); + ++/* ************************************************* ++ * For circular buffer (continuous profiling) ++ * ************************************************* ++ */ ++long initialize_circular_buffer(size_t size); ++void reset_circular_buffer(void); ++void destroy_circular_buffer(void); ++ + #endif /* _SW_OUTPUT_BUFFER_H_ */ +diff --git a/drivers/platform/x86/socwatch/inc/sw_overhead_measurements.h b/drivers/platform/x86/socwatch/inc/sw_overhead_measurements.h +index 4052555419a8..b665f5438a3f 100644 +--- a/drivers/platform/x86/socwatch/inc/sw_overhead_measurements.h ++++ b/drivers/platform/x86/socwatch/inc/sw_overhead_measurements.h +@@ -1,58 +1,57 @@ +-/* +- +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-*/ ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + + /* + * Description: file containing overhead measurement +@@ -81,26 +80,26 @@ + #if DO_OVERHEAD_MEASUREMENTS + + #ifndef __get_cpu_var +-/* +- * Kernels >= 3.19 don't include a definition +- * of '__get_cpu_var'. Create one now. +- */ +-#define __get_cpu_var(var) (*this_cpu_ptr(&var)) ++ /* ++ * Kernels >= 3.19 don't include a definition ++ * of '__get_cpu_var'. Create one now. ++ */ ++ #define __get_cpu_var(var) (*this_cpu_ptr(&var)) + #endif /* __get_cpu_var */ + #ifndef __raw_get_cpu_var +-/* +- * Kernels >= 3.19 don't include a definition +- * of '__raw_get_cpu_var'. Create one now. +- */ +-#define __raw_get_cpu_var(var) (*raw_cpu_ptr(&var)) ++ /* ++ * Kernels >= 3.19 don't include a definition ++ * of '__raw_get_cpu_var'. Create one now. ++ */ ++ #define __raw_get_cpu_var(var) (*raw_cpu_ptr(&var)) + #endif /* __get_cpu_var */ + + extern u64 sw_timestamp(void); + + #define DECLARE_OVERHEAD_VARS(name) \ + static DEFINE_PER_CPU(u64, name##_elapsed_time); \ +- static DEFINE_PER_CPU(local_t, name##_num_iters) = LOCAL_INIT(0); \ +- \ ++ static DEFINE_PER_CPU(local_t, name##_num_iters) = \ ++ LOCAL_INIT(0); \ + static inline u64 get_my_cumulative_elapsed_time_##name(void) \ + { \ + return *(&__get_cpu_var(name##_elapsed_time)); \ +@@ -109,81 +108,77 @@ extern u64 sw_timestamp(void); + { \ + return local_read(&__get_cpu_var(name##_num_iters)); \ + } \ +- \ +- static inline u64 name##_get_cumulative_elapsed_time_for(int cpu) \ ++ static inline u64 name##_get_cumulative_elapsed_time_for( \ ++ int cpu) \ + { \ + return *(&per_cpu(name##_elapsed_time, cpu)); \ + } \ +- \ + static inline int name##_get_cumulative_num_iters_for(int cpu) \ + { \ + return local_read(&per_cpu(name##_num_iters, cpu)); \ + } \ +- \ +- static inline void name##_get_cumulative_overhead_params(u64 *time, \ +- int *iters) \ ++ static inline void name##_get_cumulative_overhead_params( \ ++ u64 *time, \ ++ int *iters) \ + { \ + int cpu = 0; \ +- *time = 0; \ +- *iters = 0; \ ++ *time = 0; *iters = 0; \ + for_each_online_cpu(cpu) { \ +- *iters += name##_get_cumulative_num_iters_for(cpu); \ +- *time += name##_get_cumulative_elapsed_time_for(cpu); \ ++ *iters += name##_get_cumulative_num_iters_for( \ ++ cpu); \ ++ *time += name##_get_cumulative_elapsed_time_for(\ ++ cpu); \ + } \ + return; \ + } \ +- \ + static inline void name##_print_cumulative_overhead_params( \ +- const char *str) \ ++ const char *str)\ + { \ + int num = 0; \ + u64 time = 0; \ + name##_get_cumulative_overhead_params(&time, &num); \ +- printk(KERN_INFO "%s: %d iters took %llu nano seconds!\n", \ +- str, num, time); \ ++ pw_pr_error("%s: %d iters took %llu nano seconds!\n", \ ++ str, num, time); \ + } + +-#define DO_PER_CPU_OVERHEAD_FUNC(func, ...) \ +- do { \ +- u64 *__v = &__raw_get_cpu_var(func##_elapsed_time); \ +- u64 tmp_1 = 0, tmp_2 = 0; \ +- local_inc(&__raw_get_cpu_var(func##_num_iters)); \ +- tmp_1 = sw_timestamp(); \ +- { \ +- func(__VA_ARGS__); \ +- } \ +- tmp_2 = sw_timestamp(); \ +- *(__v) += (tmp_2 - tmp_1); \ +- } while (0) +- +-#define DO_PER_CPU_OVERHEAD_FUNC_RET(type, func, ...) \ +- ({ \ +- type __ret; \ +- u64 *__v = &__raw_get_cpu_var(func##_elapsed_time); \ +- u64 tmp_1 = 0, tmp_2 = 0; \ +- local_inc(&__raw_get_cpu_var(func##_num_iters)); \ +- tmp_1 = sw_timestamp(); \ +- { \ +- __ret = func(__VA_ARGS__); \ +- } \ +- tmp_2 = sw_timestamp(); \ +- *(__v) += (tmp_2 - tmp_1); \ +- __ret; \ +- }) ++#define DO_PER_CPU_OVERHEAD_FUNC(func, ...) do { \ ++ u64 *__v = &__raw_get_cpu_var(func##_elapsed_time); \ ++ u64 tmp_1 = 0, tmp_2 = 0; \ ++ local_inc(&__raw_get_cpu_var(func##_num_iters)); \ ++ tmp_1 = sw_timestamp(); \ ++ { \ ++ func(__VA_ARGS__); \ ++ } \ ++ tmp_2 = sw_timestamp(); \ ++ *(__v) += (tmp_2 - tmp_1); \ ++} while (0) ++ ++#define DO_PER_CPU_OVERHEAD_FUNC_RET(type, func, ...) ({ \ ++ type __ret; \ ++ u64 *__v = &__raw_get_cpu_var(func##_elapsed_time); \ ++ u64 tmp_1 = 0, tmp_2 = 0; \ ++ local_inc(&__raw_get_cpu_var(func##_num_iters)); \ ++ tmp_1 = sw_timestamp(); \ ++ { \ ++ __ret = func(__VA_ARGS__); \ ++ } \ ++ tmp_2 = sw_timestamp(); \ ++ *(__v) += (tmp_2 - tmp_1); \ ++ __ret; \ ++}) + + #else /* !DO_OVERHEAD_MEASUREMENTS */ + #define DECLARE_OVERHEAD_VARS(name) \ + static inline void name##_print_cumulative_overhead_params( \ +- const char *str) \ +- { /* NOP */ \ +- } ++ const char *str)\ ++ { /* NOP */ } + + #define DO_PER_CPU_OVERHEAD_FUNC(func, ...) func(__VA_ARGS__) + #define DO_PER_CPU_OVERHEAD_FUNC_RET(type, func, ...) func(__VA_ARGS__) + + #endif /* DO_OVERHEAD_MEASUREMENTS */ + +-#define PRINT_CUMULATIVE_OVERHEAD_PARAMS(name, str) \ ++#define PRINT_CUMULATIVE_OVERHEAD_PARAMS(name, str) \ + name##_print_cumulative_overhead_params(str) + + #endif /* _PW_OVERHEAD_MEASUREMENTS_H_ */ +diff --git a/drivers/platform/x86/socwatch/inc/sw_structs.h b/drivers/platform/x86/socwatch/inc/sw_structs.h +index de5ad2b6eb70..738edd35de24 100644 +--- a/drivers/platform/x86/socwatch/inc/sw_structs.h ++++ b/drivers/platform/x86/socwatch/inc/sw_structs.h +@@ -1,58 +1,58 @@ +-/* ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-*/ + #ifndef __SW_STRUCTS_H__ + #define __SW_STRUCTS_H__ 1 + +@@ -82,7 +82,7 @@ typedef struct sw_string_type { + char data[1]; + } sw_string_type_t; + #pragma pack(pop) +-#define SW_STRING_TYPE_HEADER_SIZE() \ ++#define SW_STRING_TYPE_HEADER_SIZE() \ + (sizeof(struct sw_string_type) - sizeof(char[1])) + + #pragma pack(push, 1) +@@ -91,21 +91,22 @@ struct sw_key_value_payload { + char data[1]; + }; + #pragma pack(pop) +-#define SW_KEY_VALUE_PAYLOAD_HEADER_SIZE() \ ++#define SW_KEY_VALUE_PAYLOAD_HEADER_SIZE() \ + (sizeof(struct sw_key_value_payload) - sizeof(char[1])) + + typedef enum sw_kernel_wakelock_type { +- SW_WAKE_LOCK = 0, /* A kernel wakelock was acquired */ +- SW_WAKE_UNLOCK = 1, /* A kernel wakelock was released */ +- SW_WAKE_LOCK_TIMEOUT = +- 2, /* A kernel wakelock was acquired with a timeout */ +- SW_WAKE_LOCK_INITIAL = 3, /* A kernel wakelock was acquired +- * before the +- * collection started +- */ +- SW_WAKE_UNLOCK_ALL = 4, /* All previously held kernel wakelocks were +- * released -- used in ACPI S3 notifications +- */ ++ /* A kernel wakelock was acquired */ ++ SW_WAKE_LOCK = 0, ++ /* A kernel wakelock was released */ ++ SW_WAKE_UNLOCK = 1, ++ /* A kernel wakelock was acquired with a timeout */ ++ SW_WAKE_LOCK_TIMEOUT = 2, ++ /* A kernel wakelock was acquired before the collection started*/ ++ SW_WAKE_LOCK_INITIAL = 3, ++ /* All previously held kernel wakelocks were ++ * released -- used in ACPI S3 notifications ++ */ ++ SW_WAKE_UNLOCK_ALL = 4, + } sw_kernel_wakelock_type_t; + + typedef enum sw_when_type { +@@ -118,18 +119,23 @@ typedef enum sw_when_type { + } sw_when_type_t; + + /** +- * trigger_bits is defined to use type pw_u8_t +- * that makes only upto 8 types possible ++ * trigger_bits is defined to use type pw_u8_t that makes only ++ * upto 8 types possible + */ +-#define SW_TRIGGER_BEGIN_MASK() (1U << SW_WHEN_TYPE_BEGIN) +-#define SW_TRIGGER_END_MASK() (1U << SW_WHEN_TYPE_END) +-#define SW_TRIGGER_POLL_MASK() (1U << SW_WHEN_TYPE_POLL) +-#define SW_TRIGGER_TRACEPOINT_MASK() (1U << SW_WHEN_TYPE_TRACEPOINT) +-#define SW_TRIGGER_NOTIFIER_MASK() (1U << SW_WHEN_TYPE_NOTIFIER) +-#define SW_GET_TRIGGER_MASK_VALUE(m) (1U << (m)) +-#define SW_TRIGGER_MASK_ALL() (0xFF) ++#define SW_TRIGGER_BEGIN_MASK() (1U << SW_WHEN_TYPE_BEGIN) ++#define SW_TRIGGER_END_MASK() (1U << SW_WHEN_TYPE_END) ++#define SW_TRIGGER_POLL_MASK() (1U << SW_WHEN_TYPE_POLL) ++#define SW_TRIGGER_TRACEPOINT_MASK() (1U << SW_WHEN_TYPE_TRACEPOINT) ++#define SW_TRIGGER_NOTIFIER_MASK() (1U << SW_WHEN_TYPE_NOTIFIER) ++#define SW_GET_TRIGGER_MASK_VALUE(m) (1U << (m)) ++#define SW_TRIGGER_MASK_ALL() (0xFF) ++ ++enum sw_io_cmd { ++ SW_IO_CMD_READ = 0, ++ SW_IO_CMD_WRITE, ++ SW_IO_CMD_MAX ++}; + +-enum sw_io_cmd { SW_IO_CMD_READ = 0, SW_IO_CMD_WRITE, SW_IO_CMD_MAX }; + + #pragma pack(push, 1) + struct sw_driver_msr_io_descriptor { +@@ -143,10 +149,8 @@ struct sw_driver_ipc_mmio_io_descriptor { + union { + #ifdef SWW_MERGE + #pragma warning(push) +-#pragma warning( \ +- disable : 4201) /* disable C4201: nonstandard extension used: +- * nameless struct/union +- */ ++/* disable C4201: nonstandard extension used: nameless struct/union */ ++#pragma warning(disable:4201) + #endif + struct { + pw_u16_t command; +@@ -156,13 +160,14 @@ struct sw_driver_ipc_mmio_io_descriptor { + #pragma warning(pop) /* enable C4201 */ + #endif + union { +- pw_u32_t ipc_command; /* (sub_command << 12) +- * | (command) +- */ ++ /* (sub_command << 12) | (command) */ ++ pw_u32_t ipc_command; + pw_u8_t is_gbe; /* Used only for GBE MMIO */ + }; + }; +- /* TODO: add a section for 'ctrl_address' and 'ctrl_remapped_address' */ ++ /* TODO: add a section for 'ctrl_address' and ++ * 'ctrl_remapped_address' ++ */ + union { + pw_u64_t data_address; /* Will be "io_remapped" */ + pw_u64_t data_remapped_address; +@@ -196,10 +201,12 @@ struct sw_driver_configdb_io_descriptor { + + #pragma pack(push, 1) + struct sw_driver_trace_args_io_descriptor { +- pw_u8_t num_args; /* Number of valid entries in the 'args' array, +- * below; 1 <= num_args <= 7 +- */ +- pw_u8_t args[7]; /* Max of 7 args can be recorded */ ++ /* Number of valid entries in the 'args' array, below; ++ * 1 <= num_args <= 7 ++ */ ++ pw_u8_t num_args; ++ /* Max of 7 args can be recorded */ ++ pw_u8_t args[7]; + }; + #pragma pack(pop) + +@@ -207,14 +214,14 @@ struct sw_driver_trace_args_io_descriptor { + /** + * struct - sw_driver_telem_io_descriptor - Telemetry Metric descriptor + * +- * @id: (Client & Driver) Telemetry ID of the counter to read. ++ * @id: (Client & Driver) Telemetry ID of the counter to read. + * @idx: (Driver only) index into telem array to read, or the row +- * of the telem_indirect table to lookup the telem array index. ++ * of the telem_indirect table to lookup the telem array index. + * @unit: Unit from which to collect: 0 = PMC, 1 = PUNIT +- * Values come from the telemetry_unit enum. ++ * Values come from the telemetry_unit enum. + * @scale_op: When there are multiple instances of a telem value (e.g. +- * module C-states) the operation to use when scaling the CPU ID +- * and adding it to the telemetry data ID. ++ * module C-states) the operation to use when scaling the CPU ID ++ * and adding it to the telemetry data ID. + * @scale_val: Amount to scale an ID (when scaling one.) + * + * Like all hardware mechanism descriptors, the client uses this to pass +@@ -225,13 +232,13 @@ struct sw_driver_trace_args_io_descriptor { + * the equation: ID = ID_value + (cpuid ) + * where is one of +, *, /, or %, and scaling_val is an integer + * value. This gives you: +- * Operation scale_op scale_val +- * Single instance of an ID * 0 +- * Sequentially increasing +- * CPU-specific values * 1 +- * Per module cpu-specific +- * values (2 cores/module) / 2 +- * Round Robin assignment % cpu_count ++ * Operation scale_op scale_val ++ * Single instance of an ID * 0 ++ * sequentially increasing ++ * CPU-specific values * 1 ++ * Per module cpu-specific ++ * values (2 cores/module) / 2 ++ * Round Robin assignment % cpu_count + * + * Note that scaling_value of 0 implies that no scaling should be + * applied. While (*, 1) is equivalent to (+, 0), the scaling value of 0 +@@ -243,21 +250,21 @@ struct sw_driver_trace_args_io_descriptor { + struct sw_driver_telem_io_descriptor { + union { + pw_u16_t id; +- pw_u8_t idx; ++ pw_u8_t idx; + }; +- pw_u8_t unit; +- pw_u8_t scale_op; +- pw_u16_t scale_val; ++ pw_u8_t unit; ++ pw_u8_t scale_op; ++ pw_u16_t scale_val; + }; + #pragma pack(pop) + enum telemetry_unit { TELEM_PUNIT = 0, TELEM_PMC, TELEM_UNIT_NONE }; +-#define TELEM_MAX_ID 0xFFFF /* Maximum value of a Telemtry event ID. */ +-#define TELEM_MAX_SCALE 0xFFFF /* Maximum ID scaling value. */ +-#define TELEM_OP_ADD '+' /* Addition operator */ +-#define TELEM_OP_MULT '*' /* Multiplication operator */ +-#define TELEM_OP_DIV '/' /* Division operator */ +-#define TELEM_OP_MOD '%' /* Modulus operator */ +-#define TELEM_OP_NONE 'X' /* No operator--Not a scaled ID */ ++#define TELEM_MAX_ID 0xFFFF /* Maximum value of a Telemtry event ID. */ ++#define TELEM_MAX_SCALE 0xFFFF /* Maximum ID scaling value. */ ++#define TELEM_OP_ADD '+' /* Addition operator */ ++#define TELEM_OP_MULT '*' /* Multiplication operator */ ++#define TELEM_OP_DIV '/' /* Division operator */ ++#define TELEM_OP_MOD '%' /* Modulus operator */ ++#define TELEM_OP_NONE 'X' /* No operator--Not a scaled ID */ + + #pragma pack(push, 1) + struct sw_driver_mailbox_io_descriptor { +@@ -293,8 +300,8 @@ struct sw_driver_pch_mailbox_io_descriptor { + }; + union { + /* +- * Will be "io_remapped" +- */ ++ * Will be "io_remapped" ++ */ + pw_u64_t msg_full_sts_address; + pw_u64_t msg_full_sts_remapped_address; + }; +@@ -314,20 +321,17 @@ typedef struct sw_driver_io_descriptor { + pw_u16_t collection_type; + /* TODO: specify READ/WRITE */ + pw_s16_t collection_command; /* One of 'enum sw_io_cmd' */ +- pw_u16_t counter_size_in_bytes; /* The number of bytes to +- * READ or WRITE +- */ ++ pw_u16_t counter_size_in_bytes; /* The number of bytes to READ or WRITE */ + union { +- struct sw_driver_msr_io_descriptor msr_descriptor; +- struct sw_driver_ipc_mmio_io_descriptor ipc_descriptor; +- struct sw_driver_ipc_mmio_io_descriptor mmio_descriptor; +- struct sw_driver_pci_io_descriptor pci_descriptor; +- struct sw_driver_configdb_io_descriptor configdb_descriptor; +- struct sw_driver_trace_args_io_descriptor trace_args_descriptor; +- struct sw_driver_telem_io_descriptor telem_descriptor; +- struct sw_driver_pch_mailbox_io_descriptor +- pch_mailbox_descriptor; +- struct sw_driver_mailbox_io_descriptor mailbox_descriptor; ++ struct sw_driver_msr_io_descriptor msr_descriptor; ++ struct sw_driver_ipc_mmio_io_descriptor ipc_descriptor; ++ struct sw_driver_ipc_mmio_io_descriptor mmio_descriptor; ++ struct sw_driver_pci_io_descriptor pci_descriptor; ++ struct sw_driver_configdb_io_descriptor configdb_descriptor; ++ struct sw_driver_trace_args_io_descriptor trace_args_descriptor; ++ struct sw_driver_telem_io_descriptor telem_descriptor; ++ struct sw_driver_pch_mailbox_io_descriptor pch_mailbox_descriptor; ++ struct sw_driver_mailbox_io_descriptor mailbox_descriptor; + }; + pw_u64_t write_value; /* The value to WRITE */ + } sw_driver_io_descriptor_t; +@@ -345,48 +349,38 @@ typedef struct sw_driver_io_descriptor { + struct sw_driver_interface_info { + pw_u64_t tracepoint_id_mask; + pw_u64_t notifier_id_mask; +- pw_s16_t cpu_mask; /* On which CPU(s) should the driver +- * read the data? +- * Currently: -2 ==> read on ALL CPUs, +- * -1 ==> read on ANY CPU, +- * >= 0 ==> the specific CPU to read on +- */ ++ pw_s16_t cpu_mask; /* On which CPU(s) should the driver read the data? */ ++ /* Currently: -2 ==> read on ALL CPUs, */ ++ /* -1 ==> read on ANY CPU, */ ++ /* >= 0 ==> the specific CPU to read on */ + pw_s16_t plugin_id; /* Metric Plugin SID */ +- pw_s16_t metric_id; /* Domain-specific ID assigned by +- * each Metric Plugin +- */ ++ pw_s16_t metric_id; /* Domain-specific ID assigned by each Metric Plugin */ + pw_s16_t msg_id; /* Msg ID retrieved from the SoC Watch config file */ +- pw_u16_t num_io_descriptors; /* Number of descriptors in the array, +- * below. +- */ +- pw_u8_t trigger_bits; /* Mask of 'when bits' to fire this collector. */ ++ pw_u16_t num_io_descriptors; /* Number of descriptors in the array, below. */ ++ pw_u8_t trigger_bits; /* Mask of 'when bits' to fire this collector. */ + pw_u16_t sampling_interval_msec; /* Sampling interval, in msecs */ +- pw_u8_t descriptors[1]; /* Array of sw_driver_io_descriptor structs. */ ++ pw_u8_t descriptors[1]; /* Array of sw_driver_io_descriptor structs. */ + }; + #pragma pack(pop) + +-#define SW_DRIVER_INTERFACE_INFO_HEADER_SIZE() \ ++#define SW_DRIVER_INTERFACE_INFO_HEADER_SIZE() \ + (sizeof(struct sw_driver_interface_info) - sizeof(pw_u8_t[1])) + + #pragma pack(push, 1) + struct sw_driver_interface_msg { +- pw_u16_t num_infos; /* Number of 'sw_driver_interface_info' +- * structs contained within the 'infos' variable, +- * below +- */ +- pw_u16_t min_polling_interval_msecs; /* Min time to wait before +- * polling; used exclusively +- * with the low overhead, +- * context-switch based +- * polling mode +- */ +- /* pw_u16_t infos_size_bytes; Size of data inlined +- * within the 'infos' variable, below ++ /* Number of 'sw_driver_interface_info' structs contained within ++ * the 'infos' variable, below ++ */ ++ pw_u16_t num_infos; ++ /* Min time to wait before polling; used exclusively ++ * with the low overhead, context-switch based ++ * polling mode + */ ++ pw_u16_t min_polling_interval_msecs; + pw_u8_t infos[1]; + }; + #pragma pack(pop) +-#define SW_DRIVER_INTERFACE_MSG_HEADER_SIZE() \ ++#define SW_DRIVER_INTERFACE_MSG_HEADER_SIZE() \ + (sizeof(struct sw_driver_interface_msg) - sizeof(pw_u8_t[1])) + + typedef enum sw_name_id_type { +@@ -403,7 +397,7 @@ struct sw_name_id_pair { + struct sw_string_type name; + }; + #pragma pack(pop) +-#define SW_NAME_ID_HEADER_SIZE() \ ++#define SW_NAME_ID_HEADER_SIZE() \ + (sizeof(struct sw_name_id_pair) - sizeof(struct sw_string_type)) + + #pragma pack(push, 1) +@@ -424,20 +418,23 @@ struct sw_name_info_msg { + typedef struct sw_driver_msg { + pw_u64_t tsc; + pw_u16_t cpuidx; +- pw_u8_t plugin_id; /* Cannot have more than 256 plugins */ +- pw_u8_t metric_id; /* Each plugin cannot handle more than 256 metrics */ +- pw_u8_t msg_id; /* Each metric cannot have more than 256 components */ ++ /* Cannot have more than 256 plugins */ ++ pw_u8_t plugin_id; ++ /* Each plugin cannot handle more than 256 metrics */ ++ pw_u8_t metric_id; ++ /* Each metric cannot have more than 256 components */ ++ pw_u8_t msg_id; + pw_u16_t payload_len; +- /* pw_u64_t p_payload; Ptr to payload */ ++ /* pw_u64_t p_payload; // Ptr to payload */ + union { +- pw_u64_t __dummy; /* Ensure size of struct is consistent +- * on x86, x64 +- */ +- char *p_payload; /* Ptr to payload (collected data values). */ ++ /* Ensure size of struct is consistent on x86, x64 */ ++ pw_u64_t __dummy; ++ /* Ptr to payload (collected data values). */ ++ char *p_payload; + }; + } sw_driver_msg_t; + #pragma pack(pop) +-#define SW_DRIVER_MSG_HEADER_SIZE() \ ++#define SW_DRIVER_MSG_HEADER_SIZE() \ + (sizeof(struct sw_driver_msg) - sizeof(pw_u64_t)) + + typedef enum sw_driver_collection_cmd { +@@ -501,6 +498,21 @@ enum sw_pm_mode { + + #define SW_PM_VALUE(mode, action) ((mode) << 16 | (action)) + ++#pragma pack(push, 1) ++/* ++ * Structure for continuous collection ++ */ ++struct sw_driver_continuous_collect { ++ /* Size of data that needs to be collected every second */ ++ pw_u32_t collection_size; ++ /* struct sw_driver_interface_msg for this collection */ ++ pw_u8_t payload[1]; ++}; ++#define SW_DRIVER_CONTINUOUS_COLLECT_HEADER_SIZE() \ ++ (sizeof(struct sw_driver_continuous_collect) - \ ++ sizeof(pw_u8_t[1])) ++#pragma pack(pop) ++ + /* + * Wrapper for ioctl arguments. + * EVERY ioctl MUST use this struct! +@@ -509,8 +521,8 @@ enum sw_pm_mode { + struct sw_driver_ioctl_arg { + pw_s32_t in_len; + pw_s32_t out_len; +- /* pw_u64_t p_in_arg; Pointer to input arg */ +- /* pw_u64_t p_out_arg; Pointer to output arg */ ++ /* pw_u64_t p_in_arg; // Pointer to input arg */ ++ /* pw_u64_t p_out_arg; // Pointer to output arg */ + char *in_arg; + char *out_arg; + }; +@@ -518,10 +530,14 @@ struct sw_driver_ioctl_arg { + + #pragma pack(push, 1) + typedef struct sw_driver_msg_interval { +- pw_u8_t plugin_id; /* Cannot have more than 256 plugins */ +- pw_u8_t metric_id; /* Each plugin cannot handle more than 256 metrics */ +- pw_u8_t msg_id; /* Each metric cannot have more than 256 components */ +- pw_u16_t interval; /* collection interval */ ++ /* Cannot have more than 256 plugins */ ++ pw_u8_t plugin_id; ++ /* Each plugin cannot handle more than 256 metrics */ ++ pw_u8_t metric_id; ++ /* Each metric cannot have more than 256 components */ ++ pw_u8_t msg_id; ++ /* collection interval */ ++ pw_u16_t interval; + } sw_driver_msg_interval_t; + #pragma pack(pop) + +diff --git a/drivers/platform/x86/socwatch/inc/sw_telem.h b/drivers/platform/x86/socwatch/inc/sw_telem.h +index e324ff681b2e..3cfbb4ec515d 100644 +--- a/drivers/platform/x86/socwatch/inc/sw_telem.h ++++ b/drivers/platform/x86/socwatch/inc/sw_telem.h +@@ -1,74 +1,76 @@ +-/* +- +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-*/ ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + + #ifndef _SW_TELEM_H_ + #define _SW_TELEM_H_ 1 + +-#include "sw_structs.h" /* sw_driver_io_descriptor */ +-#include "sw_types.h" /* u8 and other types */ ++#include "sw_structs.h" /* sw_driver_io_descriptor */ ++#include "sw_types.h" /* u8 and other types */ + + int sw_telem_init_func(struct sw_driver_io_descriptor *descriptor); + void sw_read_telem_info(char *dst_vals, int cpu, + const struct sw_driver_io_descriptor *descriptor, + u16 counter_size_in_bytes); + void sw_write_telem_info(char *dst_vals, int cpu, +- const struct sw_driver_io_descriptor *descriptor, +- u16 counter_size_in_bytes); ++ const struct sw_driver_io_descriptor *descriptor, ++ u16 counter_size_in_bytes); + int sw_reset_telem(const struct sw_driver_io_descriptor *descriptor); + bool sw_telem_available(void); + bool sw_telem_post_config(void); + ++int setup_telem(u64 addrs[3]); ++void destroy_telem(void); ++ + #endif /* SW_TELEM_H */ +diff --git a/drivers/platform/x86/socwatch/inc/sw_trace_notifier_provider.h b/drivers/platform/x86/socwatch/inc/sw_trace_notifier_provider.h +index 3ec4930c9010..470f962858a8 100644 +--- a/drivers/platform/x86/socwatch/inc/sw_trace_notifier_provider.h ++++ b/drivers/platform/x86/socwatch/inc/sw_trace_notifier_provider.h +@@ -1,58 +1,57 @@ +-/* +- +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-*/ ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + #ifndef __SW_TRACE_NOTIFIER_PROVIDER_H__ + #define __SW_TRACE_NOTIFIER_PROVIDER_H__ + +diff --git a/drivers/platform/x86/socwatch/inc/sw_tracepoint_handlers.h b/drivers/platform/x86/socwatch/inc/sw_tracepoint_handlers.h +index d8a54c099d36..70e5b83a72ca 100644 +--- a/drivers/platform/x86/socwatch/inc/sw_tracepoint_handlers.h ++++ b/drivers/platform/x86/socwatch/inc/sw_tracepoint_handlers.h +@@ -1,58 +1,58 @@ +-/* ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-*/ + #ifndef __SW_TRACEPOINT_HANDLERS_H__ + #define __SW_TRACEPOINT_HANDLERS_H__ + +@@ -66,45 +66,41 @@ enum sw_trace_data_type { + }; + + struct sw_trace_notifier_name { +- const char * +- kernel_name; /* The tracepoint name; used by the kernel +- * to identify tracepoints +- */ +- const char * +- abstract_name; /* An abstract name used by plugins to +- * specify tracepoints-of-interest; +- * shared with Ring-3 +- */ ++ const char *kernel_name; /* The tracepoint name; used by the kernel ++ * to identify tracepoints ++ */ ++ const char *abstract_name; /* An abstract name used by plugins ++ * tospecify tracepoints-of-interest; ++ * shared with Ring-3 ++ */ + }; + + typedef struct sw_trace_notifier_data sw_trace_notifier_data_t; ++ + typedef int (*sw_trace_notifier_register_func)( +- struct sw_trace_notifier_data *node); ++ struct sw_trace_notifier_data *node); + typedef int (*sw_trace_notifier_unregister_func)( +- struct sw_trace_notifier_data *node); ++ struct sw_trace_notifier_data *node); + + struct sw_trace_notifier_data { +- enum sw_trace_data_type type; /* Tracepoint or Notifier */ +- const struct sw_trace_notifier_name *name; /* Tracepoint name(s) */ +- sw_trace_notifier_register_func probe_register; /* probe register +- * function +- */ +- sw_trace_notifier_unregister_func probe_unregister; /* probe unregister +- * function +- */ ++ /* Tracepoint or Notifier */ ++ enum sw_trace_data_type type; ++ /* Tracepoint name(s) */ ++ const struct sw_trace_notifier_name *name; ++ /* probe register function */ ++ sw_trace_notifier_register_func probe_register; ++ /* probe unregister function */ ++ sw_trace_notifier_unregister_func probe_unregister; + struct tracepoint *tp; +- bool always_register; /* Set to TRUE if this tracepoint/notifier +- * must ALWAYS be registered, regardless +- * of whether the user has specified +- * anything to collect +- */ ++ bool always_register; /* Set to TRUE if this tracepoint/notifier must ++ * ALWAYS be registered, regardless of whether ++ * the user has specified anything to collect ++ */ + bool was_registered; +- SW_DEFINE_LIST_HEAD( +- list, +- sw_collector_data); /* List of 'sw_collector_data' +- * instances for this tracepoint +- * or notifier +- */ ++ /* List of 'sw_collector_data' instances for this ++ * tracepoint or notifier ++ */ ++ SW_DEFINE_LIST_HEAD(list, sw_collector_data); + }; + + struct sw_topology_node { +@@ -112,11 +108,9 @@ struct sw_topology_node { + + SW_LIST_ENTRY(list, sw_topology_node); + }; +-SW_DECLARE_LIST_HEAD( +- sw_topology_list, +- sw_topology_node); /* List of entries tracking +- * changes in CPU topology +- */ ++ ++/* List of entries tracking changes in CPU topology */ ++SW_DECLARE_LIST_HEAD(sw_topology_list, sw_topology_node); + extern size_t sw_num_topology_entries; /* Size of the 'sw_topology_list' */ + + int sw_extract_tracepoints(void); +@@ -137,19 +131,21 @@ void sw_reset_trace_notifier_lists(void); + + void sw_print_trace_notifier_overheads(void); + +-int sw_for_each_tracepoint_node(int (*func)(struct sw_trace_notifier_data *node, +- void *priv), +- void *priv, bool return_on_error); +-int sw_for_each_notifier_node(int (*func)(struct sw_trace_notifier_data *node, +- void *priv), +- void *priv, bool return_on_error); ++int sw_for_each_tracepoint_node( ++ int (*func)(struct sw_trace_notifier_data *node, void *priv), ++ void *priv, ++ bool return_on_error); ++int sw_for_each_notifier_node( ++ int (*func)(struct sw_trace_notifier_data *node, void *priv), ++ void *priv, ++ bool return_on_error); + + int sw_get_trace_notifier_id(struct sw_trace_notifier_data *node); + +-const char * +-sw_get_trace_notifier_kernel_name(struct sw_trace_notifier_data *node); +-const char * +-sw_get_trace_notifier_abstract_name(struct sw_trace_notifier_data *node); ++const char *sw_get_trace_notifier_kernel_name( ++ struct sw_trace_notifier_data *node); ++const char *sw_get_trace_notifier_abstract_name( ++ struct sw_trace_notifier_data *node); + + /* + * Clear out the topology list. +diff --git a/drivers/platform/x86/socwatch/inc/sw_types.h b/drivers/platform/x86/socwatch/inc/sw_types.h +index 156c92c8349a..e9af829c31c8 100644 +--- a/drivers/platform/x86/socwatch/inc/sw_types.h ++++ b/drivers/platform/x86/socwatch/inc/sw_types.h +@@ -1,58 +1,57 @@ +-/* +- +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-*/ ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + + #ifndef _PW_TYPES_H_ + #define _PW_TYPES_H_ +@@ -68,7 +67,7 @@ + /* + * UNSIGNED types... + */ +-typedef uint8_t u8; ++typedef uint8_t u8; + typedef uint16_t u16; + typedef uint32_t u32; + typedef uint64_t u64; +@@ -87,13 +86,13 @@ typedef int64_t s64; + #include + #include /* Grab 'uint64_t' etc. */ + +-typedef uint8_t u8; ++typedef uint8_t u8; + typedef uint16_t u16; + typedef uint32_t u32; + typedef uint64_t u64; + /* +- * SIGNED types... +- */ ++* SIGNED types... ++*/ + typedef int8_t s8; + typedef int16_t s16; + typedef int32_t s32; +diff --git a/drivers/platform/x86/socwatch/inc/sw_version.h b/drivers/platform/x86/socwatch/inc/sw_version.h +index 5476b0d79ac5..b6fe1eecdd0e 100644 +--- a/drivers/platform/x86/socwatch/inc/sw_version.h ++++ b/drivers/platform/x86/socwatch/inc/sw_version.h +@@ -1,58 +1,57 @@ +-/* +- +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-*/ ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + + #ifndef __SW_VERSION_H__ + #define __SW_VERSION_H__ 1 +@@ -61,14 +60,14 @@ + * SOCWatch driver version + */ + #define SW_DRIVER_VERSION_MAJOR 2 +-#define SW_DRIVER_VERSION_MINOR 6 +-#define SW_DRIVER_VERSION_OTHER 2 ++#define SW_DRIVER_VERSION_MINOR 10 ++#define SW_DRIVER_VERSION_OTHER 0 + + /* + * Every SOC Watch userspace component shares the same version number. + */ + #define SOCWATCH_VERSION_MAJOR 2 +-#define SOCWATCH_VERSION_MINOR 8 +-#define SOCWATCH_VERSION_OTHER 0 ++#define SOCWATCH_VERSION_MINOR 10 ++#define SOCWATCH_VERSION_OTHER 1 + + #endif /* __SW_VERSION_H__ */ +diff --git a/drivers/platform/x86/socwatch/sw_collector.c b/drivers/platform/x86/socwatch/sw_collector.c +index db855bab4fd8..ebc65666caf6 100644 +--- a/drivers/platform/x86/socwatch/sw_collector.c ++++ b/drivers/platform/x86/socwatch/sw_collector.c +@@ -1,86 +1,66 @@ +-/* ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of condiions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-*/ +-#include "sw_internal.h" +-#include "sw_structs.h" + #include "sw_collector.h" ++#include "sw_hardware_io.h" ++#include "sw_internal.h" + #include "sw_kernel_defines.h" + #include "sw_mem.h" +-#include "sw_types.h" +-#include "sw_hardware_io.h" + #include "sw_output_buffer.h" ++#include "sw_structs.h" ++#include "sw_types.h" + +-/* ------------------------------------------------- +- * Local function declarations. +- * ------------------------------------------------- +- */ +-void sw_free_driver_interface_info_i(struct sw_driver_interface_info *info); +-const struct sw_hw_ops **sw_alloc_ops_i(pw_u16_t num_io_descriptors); +-void sw_free_ops_i(const struct sw_hw_ops **ops); +-struct sw_driver_interface_info * +-sw_copy_driver_interface_info_i(const struct sw_driver_interface_info *info); +-int sw_init_driver_interface_info_i(struct sw_driver_interface_info *info); +-int sw_reset_driver_interface_info_i(struct sw_driver_interface_info *info); +-int sw_init_ops_i(const struct sw_hw_ops **ops, +- const struct sw_driver_interface_info *info); +-sw_driver_msg_t * +-sw_alloc_collector_msg_i(const struct sw_driver_interface_info *info, +- size_t per_msg_payload_size); +-void sw_free_collector_msg_i(sw_driver_msg_t *msg); +-size_t sw_get_payload_size_i(const struct sw_driver_interface_info *info); +-void sw_handle_per_cpu_msg_i(void *info, enum sw_wakeup_action action); + /* ------------------------------------------------- + * Variables. + * ------------------------------------------------- +@@ -94,9 +74,154 @@ const static struct sw_hw_ops *s_hw_ops; + * Driver interface info functions. + */ + ++/** ++ * sw_copy_driver_interface_info_i - Allocate and copy the passed-in "info". ++ * ++ * @info: Information about the metric and collection properties ++ * ++ * Returns: a pointer to the newly allocated sw_driver_interface_info, ++ * which is a copy of the version passed in via the info pointer. ++ */ ++struct sw_driver_interface_info * ++sw_copy_driver_interface_info_i(const struct sw_driver_interface_info *info) ++{ ++ size_t size; ++ struct sw_driver_interface_info *node = NULL; ++ ++ if (!info) { ++ pw_pr_error("ERROR: NULL sw_driver_interface_info in alloc!\n"); ++ return node; ++ } ++ ++ size = SW_DRIVER_INTERFACE_INFO_HEADER_SIZE() + ++ (info->num_io_descriptors * ++ sizeof(struct sw_driver_io_descriptor)); ++ node = (struct sw_driver_interface_info *)sw_kmalloc(size, GFP_KERNEL); ++ if (!node) { ++ pw_pr_error("ERROR allocating driver interface info!\n"); ++ return node; ++ } ++ memcpy((char *)node, (const char *)info, size); ++ ++ /* ++ * Do debug dump. ++ */ ++ pw_pr_debug( ++ "DRIVER info has plugin_ID = %d, metric_ID = %d, msg_ID = %d\n", ++ node->plugin_id, node->metric_id, node->msg_id); ++ ++ return node; ++} ++ ++int sw_init_driver_interface_info_i(struct sw_driver_interface_info *info) ++{ ++ /* ++ * Do any initialization here. ++ * For now, only IPC/MMIO descriptors need to be initialized. ++ */ ++ int i = 0; ++ struct sw_driver_io_descriptor *descriptor = NULL; ++ ++ if (!info) { ++ pw_pr_error("ERROR: no info!\n"); ++ return -PW_ERROR; ++ } ++ for (i = 0, ++ descriptor = (struct sw_driver_io_descriptor *)info->descriptors; ++ i < info->num_io_descriptors; ++i, ++descriptor) { ++ if (sw_init_driver_io_descriptor(descriptor)) ++ return -PW_ERROR; ++ } ++ return PW_SUCCESS; ++} ++ ++int sw_init_ops_i(const struct sw_hw_ops **ops, ++ const struct sw_driver_interface_info *info) ++{ ++ int i = 0; ++ struct sw_driver_io_descriptor *descriptor = NULL; ++ ++ if (!ops || !info) ++ return -PW_ERROR; ++ ++ for (i = 0, ++ descriptor = (struct sw_driver_io_descriptor *)info->descriptors; ++ i < info->num_io_descriptors; ++i, ++descriptor) { ++ ops[i] = sw_get_hw_ops_for(descriptor->collection_type); ++ if (ops[i] == NULL) ++ return -PW_ERROR; ++ } ++ return PW_SUCCESS; ++} ++ ++size_t sw_get_payload_size_i(const struct sw_driver_interface_info *info) ++{ ++ size_t size = 0; ++ int i = 0; ++ ++ if (info) { ++ for (i = 0; ++ i < info->num_io_descriptors; ++ size += ((struct sw_driver_io_descriptor *) ++ info->descriptors)[i].counter_size_in_bytes, ++ ++i ++ ) ++ ; ++ } ++ return size; ++} ++ ++sw_driver_msg_t * ++sw_alloc_collector_msg_i(const struct sw_driver_interface_info *info, ++ size_t per_msg_payload_size) ++{ ++ size_t per_msg_size = 0, total_size = 0; ++ sw_driver_msg_t *msg = NULL; ++ ++ if (!info) ++ return NULL; ++ ++ per_msg_size = sizeof(struct sw_driver_msg) + per_msg_payload_size; ++ total_size = per_msg_size * num_possible_cpus(); ++ msg = (sw_driver_msg_t *)sw_kmalloc(total_size, GFP_KERNEL); ++ if (msg) { ++ int cpu = -1; ++ ++ memset(msg, 0, total_size); ++ for_each_possible_cpu(cpu) { ++ sw_driver_msg_t *__msg = GET_MSG_SLOT_FOR_CPU( ++ msg, cpu, per_msg_payload_size); ++ char *__payload = ++ (char *)__msg + sizeof(struct sw_driver_msg); ++ ++ __msg->cpuidx = (pw_u16_t)cpu; ++ __msg->plugin_id = (pw_u8_t)info->plugin_id; ++ __msg->metric_id = (pw_u8_t)info->metric_id; ++ __msg->msg_id = (pw_u8_t)info->msg_id; ++ __msg->payload_len = per_msg_payload_size; ++ __msg->p_payload = __payload; ++ pw_pr_debug( ++ "[%d]: per_msg_payload_size = %zx, msg = %p, payload = %p\n", ++ cpu, per_msg_payload_size, __msg, __payload); ++ } ++ } ++ return msg; ++} ++ ++const struct sw_hw_ops **sw_alloc_ops_i(pw_u16_t num_io_descriptors) ++{ ++ size_t size = num_io_descriptors * sizeof(struct sw_hw_ops *); ++ const struct sw_hw_ops **ops = sw_kmalloc(size, GFP_KERNEL); ++ ++ if (ops) ++ memset(ops, 0, size); ++ ++ return ops; ++} ++ + /** + * sw_add_driver_info() - Add a collector node to the list called at this +- * "when type". ++ * "when type". + * @head: The collector node list to add the new node to. + * @info: Driver information to add to the list. + * +@@ -179,8 +304,8 @@ int sw_add_driver_info(void *list_head, + pw_pr_debug("OK: set CPU = %d\n", node->info->cpu_mask); + } else if (node->info->cpu_mask == -1) { + /* +- * Collect data on ANY CPU. Leave empty as a flag +- * to signify user wishes to collect data on 'ANY' cpu. ++ * Collect data on ANY CPU. Leave empty as a flag to ++ * signify user wishes to collect data on 'ANY' cpu. + */ + pw_pr_debug("OK: set ANY CPU\n"); + } else { +@@ -195,90 +320,16 @@ int sw_add_driver_info(void *list_head, + return PW_SUCCESS; + } + +-const struct sw_hw_ops **sw_alloc_ops_i(pw_u16_t num_io_descriptors) +-{ +- size_t size = num_io_descriptors * sizeof(struct sw_hw_ops *); +- const struct sw_hw_ops **ops = sw_kmalloc(size, GFP_KERNEL); +- +- if (ops) { +- memset(ops, 0, size); +- } +- return ops; +-} +- + void sw_free_driver_interface_info_i(struct sw_driver_interface_info *info) + { +- if (info) { ++ if (info) + sw_kfree(info); +- } + } + + void sw_free_ops_i(const struct sw_hw_ops **ops) + { +- if (ops) { ++ if (ops) + sw_kfree(ops); +- } +-} +- +-/** +- * sw_copy_driver_interface_info_i - Allocate and copy the passed-in "info". +- * +- * @info: Information about the metric and collection properties +- * +- * Returns: a pointer to the newly allocated sw_driver_interface_info, +- * which is a copy of the version passed in via the info pointer. +- */ +-struct sw_driver_interface_info * +-sw_copy_driver_interface_info_i(const struct sw_driver_interface_info *info) +-{ +- size_t size; +- struct sw_driver_interface_info *node = NULL; +- +- if (!info) { +- pw_pr_error("ERROR: NULL sw_driver_interface_info in alloc!\n"); +- return node; +- } +- +- size = SW_DRIVER_INTERFACE_INFO_HEADER_SIZE() + +- (info->num_io_descriptors * +- sizeof(struct sw_driver_io_descriptor)); +- node = (struct sw_driver_interface_info *)sw_kmalloc(size, GFP_KERNEL); +- if (!node) { +- pw_pr_error("ERROR allocating driver interface info!\n"); +- return node; +- } +- memcpy((char *)node, (const char *)info, size); +- +- /* +- * Do debug dump. +- */ +- pw_pr_debug("DRIVER info has plugin_ID = %d, metric_ID = %d, " +- "msg_ID = %d\n", +- node->plugin_id, node->metric_id, node->msg_id); +- +- return node; +-} +-int sw_init_driver_interface_info_i(struct sw_driver_interface_info *info) +-{ +- /* +- * Do any initialization here. +- * For now, only IPC/MMIO descriptors need to be initialized. +- */ +- int i = 0; +- struct sw_driver_io_descriptor *descriptor = NULL; +- +- if (!info) { +- pw_pr_error("ERROR: no info!\n"); +- return -PW_ERROR; +- } +- for (i = 0, +- descriptor = (struct sw_driver_io_descriptor *)info->descriptors; +- i < info->num_io_descriptors; ++i, ++descriptor) { +- if (sw_init_driver_io_descriptor(descriptor)) { +- return -PW_ERROR; +- } +- } +- return PW_SUCCESS; + } + + int sw_reset_driver_interface_info_i(struct sw_driver_interface_info *info) +@@ -297,42 +348,21 @@ int sw_reset_driver_interface_info_i(struct sw_driver_interface_info *info) + for (i = 0, + descriptor = (struct sw_driver_io_descriptor *)info->descriptors; + i < info->num_io_descriptors; ++i, ++descriptor) { +- if (sw_reset_driver_io_descriptor(descriptor)) { +- return -PW_ERROR; +- } +- } +- return PW_SUCCESS; +-} +-int sw_init_ops_i(const struct sw_hw_ops **ops, +- const struct sw_driver_interface_info *info) +-{ +- int i = 0; +- struct sw_driver_io_descriptor *descriptor = NULL; +- +- if (!ops || !info) { +- return -PW_ERROR; +- } +- for (i = 0, +- descriptor = (struct sw_driver_io_descriptor *)info->descriptors; +- i < info->num_io_descriptors; ++i, ++descriptor) { +- ops[i] = sw_get_hw_ops_for(descriptor->collection_type); +- if (ops[i] == NULL) { ++ if (sw_reset_driver_io_descriptor(descriptor)) + return -PW_ERROR; +- } + } + return PW_SUCCESS; + } + +-/* +- * If this descriptor's collector has an init function, call it passing in +- * this descriptor. That allows the collector to perform any initialization +- * or registration specific to this metric. +- */ ++/* If this descriptor's collector has an init function, call it passing in */ ++/* this descriptor. That allows the collector to perform any initialization */ ++/* or registration specific to this metric. */ + int sw_init_driver_io_descriptor(struct sw_driver_io_descriptor *descriptor) + { + sw_io_desc_init_func_t init_func = NULL; + const struct sw_hw_ops *ops = + sw_get_hw_ops_for(descriptor->collection_type); ++ + if (ops == NULL) { + pw_pr_error("NULL ops found in init_driver_io_desc: type %d\n", + descriptor->collection_type); +@@ -343,10 +373,10 @@ int sw_init_driver_io_descriptor(struct sw_driver_io_descriptor *descriptor) + if (init_func) { + int retval = (*init_func)(descriptor); + +- if (retval) { ++ if (retval) + pw_pr_error("(*init) return value for type %d: %d\n", + descriptor->collection_type, retval); +- } ++ + return retval; + } + return PW_SUCCESS; +@@ -354,14 +384,16 @@ int sw_init_driver_io_descriptor(struct sw_driver_io_descriptor *descriptor) + + /* + * If this descriptor's collector has a finalize function, call it passing in +- * this descriptor. This allows the collector to perform any finalization +- * specific to this metric. ++ * this ++ * descriptor. This allows the collector to perform any finalization specific to ++ * this metric. + */ + int sw_reset_driver_io_descriptor(struct sw_driver_io_descriptor *descriptor) + { + sw_io_desc_reset_func_t reset_func = NULL; + const struct sw_hw_ops *ops = + sw_get_hw_ops_for(descriptor->collection_type); ++ + if (ops == NULL) { + pw_pr_error("NULL ops found in reset_driver_io_desc: type %d\n", + descriptor->collection_type); +@@ -374,10 +406,10 @@ int sw_reset_driver_io_descriptor(struct sw_driver_io_descriptor *descriptor) + if (reset_func) { + int retval = (*reset_func)(descriptor); + +- if (retval) { ++ if (retval) + pw_pr_error("(*reset) return value for type %d: %d\n", + descriptor->collection_type, retval); +- } ++ + return retval; + } + return PW_SUCCESS; +@@ -418,63 +450,10 @@ int sw_handle_driver_io_descriptor( + return PW_SUCCESS; + } + +-sw_driver_msg_t * +-sw_alloc_collector_msg_i(const struct sw_driver_interface_info *info, +- size_t per_msg_payload_size) +-{ +- size_t per_msg_size = 0, total_size = 0; +- sw_driver_msg_t *msg = NULL; +- +- if (!info) { +- return NULL; +- } +- per_msg_size = sizeof(struct sw_driver_msg) + per_msg_payload_size; +- total_size = per_msg_size * num_possible_cpus(); +- msg = (sw_driver_msg_t *)sw_kmalloc(total_size, GFP_KERNEL); +- if (msg) { +- int cpu = -1; +- +- memset(msg, 0, total_size); +- for_each_possible_cpu(cpu) { +- sw_driver_msg_t *__msg = GET_MSG_SLOT_FOR_CPU( +- msg, cpu, per_msg_payload_size); +- char *__payload = +- (char *)__msg + sizeof(struct sw_driver_msg); +- __msg->cpuidx = (pw_u16_t)cpu; +- __msg->plugin_id = (pw_u8_t)info->plugin_id; +- __msg->metric_id = (pw_u8_t)info->metric_id; +- __msg->msg_id = (pw_u8_t)info->msg_id; +- __msg->payload_len = per_msg_payload_size; +- __msg->p_payload = __payload; +- pw_pr_debug( +- "[%d]: per_msg_payload_size = %zx, msg = %p, payload = %p\n", +- cpu, per_msg_payload_size, __msg, __payload); +- } +- } +- return msg; +-} +- + void sw_free_collector_msg_i(sw_driver_msg_t *msg) + { +- if (msg) { ++ if (msg) + sw_kfree(msg); +- } +-} +- +-size_t sw_get_payload_size_i(const struct sw_driver_interface_info *info) +-{ +- size_t size = 0; +- int i = 0; +- +- if (info) { +- for (i = 0; i < info->num_io_descriptors; +- size += +- ((struct sw_driver_io_descriptor *)info->descriptors)[i] +- .counter_size_in_bytes, +- ++i) +- ; +- } +- return size; + } + + void sw_handle_per_cpu_msg_i(void *info, enum sw_wakeup_action action) +@@ -482,8 +461,8 @@ void sw_handle_per_cpu_msg_i(void *info, enum sw_wakeup_action action) + /* + * Basic algo: + * For each descriptor in 'node->info->descriptors'; do: +- * 1. Perform H/W read; use 'descriptor->collection_type' +- * to determine type of read; use 'descriptor->counter_size_in_bytes' ++ * 1. Perform H/W read; use 'descriptor->collection_type' to ++ * determine type of read; use 'descriptor->counter_size_in_bytes' + * for read size. Use msg->p_payload[dst_idx] as dst address + * 2. Increment dst idx by 'descriptor->counter_size_in_bytes' + */ +@@ -507,14 +486,13 @@ void sw_handle_per_cpu_msg_i(void *info, enum sw_wakeup_action action) + pw_pr_debug("NULL OPS!\n"); + continue; + } +- if (descriptors->collection_command == SW_IO_CMD_WRITE) { ++ if (descriptors->collection_command == SW_IO_CMD_WRITE) + wasAnyWrite = true; +- } ++ + if (sw_handle_driver_io_descriptor(dst_vals, cpu, descriptors, +- ops[i])) { ++ ops[i])) + pw_pr_error("ERROR reading descriptor with type %d\n", + descriptors->collection_type); +- } + } + + /* +@@ -524,12 +502,9 @@ void sw_handle_per_cpu_msg_i(void *info, enum sw_wakeup_action action) + * message. + */ + if (likely(wasAnyWrite == false)) { +- if (sw_produce_generic_msg(msg, action)) { ++ if (sw_produce_generic_msg(msg, action)) + pw_pr_warn("WARNING: could NOT produce message!\n"); +- } + } +- +- return; + } + + /* +@@ -539,6 +514,7 @@ struct sw_collector_data *sw_alloc_collector_node(void) + { + struct sw_collector_data *node = (struct sw_collector_data *)sw_kmalloc( + sizeof(struct sw_collector_data), GFP_KERNEL); ++ + if (node) { + node->per_msg_payload_size = 0x0; + node->last_update_jiffies = 0x0; +@@ -552,9 +528,9 @@ struct sw_collector_data *sw_alloc_collector_node(void) + + void sw_free_collector_node(struct sw_collector_data *node) + { +- if (!node) { ++ if (node) + return; +- } ++ + if (node->info) { + sw_reset_driver_interface_info_i(node->info); + sw_free_driver_interface_info_i(node->info); +@@ -569,14 +545,13 @@ void sw_free_collector_node(struct sw_collector_data *node) + node->msg = NULL; + } + sw_kfree(node); +- return; + } + + int sw_handle_collector_node(struct sw_collector_data *node) + { +- if (!node || !node->info || !node->ops || !node->msg) { ++ if (!node || !node->info || !node->ops || !node->msg) + return -PW_ERROR; +- } ++ + pw_pr_debug("Calling SMP_CALL_FUNCTION_MANY!\n"); + sw_schedule_work(&node->cpumask, &sw_handle_per_cpu_msg, node); + return PW_SUCCESS; +@@ -584,9 +559,9 @@ int sw_handle_collector_node(struct sw_collector_data *node) + + int sw_handle_collector_node_on_cpu(struct sw_collector_data *node, int cpu) + { +- if (!node || !node->info || !node->ops || !node->msg) { ++ if (!node || !node->info || !node->ops || !node->msg) + return -PW_ERROR; +- } ++ + /* + * Check if this node indicates it should be scheduled + * on the given cpu. If so, clear all other CPUs from the +@@ -614,7 +589,10 @@ void sw_destroy_collector_list(void *list_head) + SW_LIST_HEAD_VAR(sw_collector_data) * head = list_head; + while (!SW_LIST_EMPTY(head)) { + struct sw_collector_data *curr = +- SW_LIST_GET_HEAD_ENTRY(head, sw_collector_data, list); ++ SW_LIST_GET_HEAD_ENTRY(head, ++ sw_collector_data, ++ list); ++ + BUG_ON(!curr->info); + SW_LIST_UNLINK(curr, list); + sw_free_collector_node(curr); +@@ -623,8 +601,8 @@ void sw_destroy_collector_list(void *list_head) + + /** + * sw_handle_collector_list - Iterate through the collector list, calling +- * func() upon each element. +- * @list_head: The collector list head. ++ * func() upon each element. ++ * @list_head: The collector list head. + * @func: The function to call for each collector. + * + * This function is called when one of the "when types" fires, since the +@@ -639,15 +617,15 @@ int sw_handle_collector_list(void *list_head, + int retVal = PW_SUCCESS; + struct sw_collector_data *curr = NULL; + +- if (!head || !func) { ++ if (!head || !func) + return -PW_ERROR; +- } ++ + SW_LIST_FOR_EACH_ENTRY(curr, head, list) + { + pw_pr_debug("HANDLING\n"); +- if ((*func)(curr)) { ++ if ((*func)(curr)) + retVal = -PW_ERROR; +- } ++ + } + return retVal; + } +@@ -660,15 +638,16 @@ int sw_handle_collector_list_on_cpu(void *list_head, + SW_LIST_HEAD_VAR(sw_collector_data) * head = list_head; + int retVal = PW_SUCCESS; + struct sw_collector_data *curr = NULL; +- if (!head || !func) { ++ ++ if (!head || !func) + return -PW_ERROR; +- } ++ + SW_LIST_FOR_EACH_ENTRY(curr, head, list) + { + pw_pr_debug("HANDLING\n"); +- if ((*func)(curr, cpu)) { ++ if ((*func)(curr, cpu)) + retVal = -PW_ERROR; +- } ++ + } + return retVal; + } +@@ -685,9 +664,9 @@ void sw_handle_per_cpu_msg_no_sched(void *info) + + void sw_handle_per_cpu_msg_on_cpu(int cpu, void *info) + { +- if (unlikely(cpu == RAW_CPU())) { ++ if (unlikely(cpu == RAW_CPU())) + sw_handle_per_cpu_msg_no_sched(info); +- } else { ++ else { + pw_pr_debug("[%d] is handling for %d\n", RAW_CPU(), cpu); + /* + * No need to disable preemption -- 'smp_call_function_single' +@@ -695,9 +674,8 @@ void sw_handle_per_cpu_msg_on_cpu(int cpu, void *info) + */ + smp_call_function_single( + cpu, &sw_handle_per_cpu_msg_no_sched, info, +- false /* false ==> do NOT wait for function +- * completion +- */); ++ false ++ /* false ==> do NOT wait for function completion */); + } + } + +diff --git a/drivers/platform/x86/socwatch/sw_driver.c b/drivers/platform/x86/socwatch/sw_driver.c +index 661a42555baa..0a8ad70b800b 100644 +--- a/drivers/platform/x86/socwatch/sw_driver.c ++++ b/drivers/platform/x86/socwatch/sw_driver.c +@@ -1,58 +1,58 @@ +-/* ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-*/ + #define MOD_AUTHOR "Gautam Upadhyaya " + #define MOD_DESC "SoC Watch kernel module" + +@@ -103,21 +103,20 @@ int sw_collection_start_i(void); + int sw_collection_stop_i(void); + int sw_collection_poll_i(void); + size_t sw_get_payload_size_i(const struct sw_driver_interface_info *info); +-sw_driver_msg_t * +-sw_alloc_collector_msg_i(const struct sw_driver_interface_info *info, +- size_t per_msg_payload_size); ++sw_driver_msg_t *sw_alloc_collector_msg_i( ++ const struct sw_driver_interface_info *info, ++ size_t per_msg_payload_size); + static long sw_unlocked_handle_ioctl_i(unsigned int ioctl_num, +- void *p_local_args); +-static long +-sw_set_driver_infos_i(struct sw_driver_interface_msg __user *remote_msg, +- int local_len); +-static long sw_handle_cmd_i(sw_driver_collection_cmd_t cmd, +- u64 __user *remote_out_args); ++ void *p_local_args); ++static long sw_set_driver_infos_i( ++ struct sw_driver_interface_msg __user *remote_msg, int local_len); ++static long sw_handle_cmd_i( ++ sw_driver_collection_cmd_t cmd, u64 __user* remote_out_args); + static void sw_do_extract_scu_fw_version(void); +-static long +-sw_get_available_name_id_mappings_i(enum sw_name_id_type type, +- struct sw_name_info_msg __user *remote_info, +- size_t local_len); ++static long sw_get_available_name_id_mappings_i( ++ enum sw_name_id_type type, ++ struct sw_name_info_msg __user* remote_info, ++ size_t local_len); + static enum sw_driver_collection_cmd sw_get_collection_cmd_i(void); + static bool sw_should_flush_buffer_i(void); + +@@ -132,9 +131,11 @@ static bool sw_should_flush_buffer_i(void); + * and is NOT exported. + */ + struct swa_internal_state { +- sw_driver_collection_cmd_t +- cmd; /* indicates which command was specified */ +- /* last e.g. START, STOP etc. */ ++ /* ++ * Indicates which command was specified ++ * last e.g. START, STOP etc. ++ */ ++ sw_driver_collection_cmd_t cmd; + /* + * Should we write to our per-cpu output buffers? + * YES if we're actively collecting. +@@ -154,13 +155,13 @@ struct swa_internal_state { + * ------------------------------------------------- + */ + static bool do_force_module_scope_for_cpu_frequencies; +-module_param(do_force_module_scope_for_cpu_frequencies, bool, S_IRUSR); ++module_param(do_force_module_scope_for_cpu_frequencies, bool, 0400); + MODULE_PARM_DESC( + do_force_module_scope_for_cpu_frequencies, + "Toggle module scope for cpu frequencies. Sets \"affected_cpus\" and \"related_cpus\" of cpufreq_policy."); + + static unsigned short sw_buffer_num_pages = 16; +-module_param(sw_buffer_num_pages, ushort, S_IRUSR); ++module_param(sw_buffer_num_pages, ushort, 0400); + MODULE_PARM_DESC( + sw_buffer_num_pages, + "Specify number of 4kB pages to use for each per-cpu buffer. MUST be a power of 2! Default value = 16 (64 kB)"); +@@ -204,7 +205,7 @@ DECLARE_OVERHEAD_VARS(sw_any_seg_full); + * Debugging ONLY! + */ + #if DO_DEBUG_OUTPUT +-static const char *s_when_type_names[] = { "BEGIN", "POLL", "NOTIFIER", ++static const char * const s_when_type_names[] = { "BEGIN", "POLL", "NOTIFIER", + "TRACEPOINT", "END" }; + #endif /* DO_DEBUG_OUTPUT */ + +@@ -256,9 +257,9 @@ int sw_print_collector_node_i(struct sw_collector_data *curr) + struct sw_driver_io_descriptor *descriptor = NULL; + struct sw_driver_interface_info *info = NULL; + +- if (!curr) { ++ if (!curr) + return -PW_ERROR; +- } ++ + info = curr->info; + descriptor = (struct sw_driver_io_descriptor *)info->descriptors; + pw_pr_debug( +@@ -268,13 +269,13 @@ int sw_print_collector_node_i(struct sw_collector_data *curr) + --num_descriptors, ++descriptor) { + const struct sw_hw_ops *ops = + sw_get_hw_ops_for(descriptor->collection_type); +- if (ops == NULL) { ++ if (ops == NULL) + return -PW_ERROR; +- } ++ + print_func = ops->print; +- if (print_func && (*print_func)(descriptor)) { ++ if (print_func && (*print_func)(descriptor)) + return -PW_ERROR; +- } ++ + } + return PW_SUCCESS; + } +@@ -297,9 +298,9 @@ static int sw_reset_collector_node_i(struct sw_collector_data *coll) + int num_descriptors; + int retcode = PW_SUCCESS; + +- if (!coll) { ++ if (!coll) + return -PW_ERROR; +- } ++ + info = coll->info; + + descriptor = (struct sw_driver_io_descriptor *)info->descriptors; +@@ -310,9 +311,9 @@ static int sw_reset_collector_node_i(struct sw_collector_data *coll) + --num_descriptors, ++descriptor) { + const struct sw_hw_ops *ops = + sw_get_hw_ops_for(descriptor->collection_type); +- if (ops && ops->reset && (*ops->reset)(descriptor)) { ++ if (ops && ops->reset && (*ops->reset)(descriptor)) + retcode = -PW_ERROR; +- } ++ + } + return retcode; + } +@@ -330,25 +331,25 @@ void sw_iterate_driver_info_lists_i(void) + + for (which = SW_WHEN_TYPE_BEGIN; which <= SW_WHEN_TYPE_END; ++which) { + pw_pr_debug("ITERATING list %s\n", s_when_type_names[which]); ++ /* Should NEVER happen! */ + if (sw_handle_collector_list( + &sw_collector_lists[which], +- &sw_print_collector_node_i)) { +- /* Should NEVER happen! */ ++ &sw_print_collector_node_i)) + pw_pr_error( + "WARNING: error occurred while printing values!\n"); +- } ++ + } + + if (sw_for_each_tracepoint_node(&sw_iterate_trace_notifier_list_i, NULL, +- false /*return-on-error*/)) { ++ false /*return-on-error*/)) + pw_pr_error( + "WARNING: error occurred while printing tracepoint values!\n"); +- } ++ + if (sw_for_each_notifier_node(&sw_iterate_trace_notifier_list_i, NULL, +- false /*return-on-error*/)) { ++ false /*return-on-error*/)) + pw_pr_error( + "WARNING: error occurred while printing notifier values!\n"); +- } ++ + } + + static void sw_reset_collectors_i(void) +@@ -358,10 +359,10 @@ static void sw_reset_collectors_i(void) + for (which = SW_WHEN_TYPE_BEGIN; which <= SW_WHEN_TYPE_END; ++which) { + pw_pr_debug("ITERATING list %s\n", s_when_type_names[which]); + if (sw_handle_collector_list(&sw_collector_lists[which], +- &sw_reset_collector_node_i)) { ++ &sw_reset_collector_node_i)) + pw_pr_error( + "WARNING: error occurred while resetting a collector!\n"); +- } ++ + } + } + +@@ -403,6 +404,9 @@ void sw_destroy_data_structures_i(void) + sw_destroy_per_cpu_buffers(); + sw_destroy_collector_lists_i(); + sw_remove_trace_notify(); ++ ++ /* Should already have been called from 'collection_stop' */ ++ sw_destroy_telem(); + } + + int sw_get_arch_details_i(void) +@@ -421,11 +425,11 @@ static int + sw_init_destroy_trace_notifier_lists_i(struct sw_trace_notifier_data *node, + void *is_init) + { +- if (is_init == INIT_FLAG) { ++ if (is_init == INIT_FLAG) + sw_init_collector_list(&node->list); +- } else { ++ else + sw_destroy_collector_list(&node->list); +- } ++ + node->was_registered = false; + + return PW_SUCCESS; +@@ -435,9 +439,9 @@ int sw_init_collector_lists_i(void) + { + int i = 0; + +- for (i = 0; i < NUM_COLLECTOR_MODES; ++i) { ++ for (i = 0; i < NUM_COLLECTOR_MODES; ++i) + sw_init_collector_list(&sw_collector_lists[i]); +- } ++ + sw_for_each_tracepoint_node(&sw_init_destroy_trace_notifier_lists_i, + INIT_FLAG, false /*return-on-error*/); + sw_for_each_notifier_node(&sw_init_destroy_trace_notifier_lists_i, +@@ -450,9 +454,9 @@ void sw_destroy_collector_lists_i(void) + { + int i = 0; + +- for (i = 0; i < NUM_COLLECTOR_MODES; ++i) { ++ for (i = 0; i < NUM_COLLECTOR_MODES; ++i) + sw_destroy_collector_list(&sw_collector_lists[i]); +- } ++ + sw_for_each_tracepoint_node(&sw_init_destroy_trace_notifier_lists_i, + DESTROY_FLAG, false /*return-on-error*/); + sw_for_each_notifier_node(&sw_init_destroy_trace_notifier_lists_i, +@@ -462,7 +466,6 @@ void sw_destroy_collector_lists_i(void) + /* + * Used for {READ,WRITE}_IMMEDIATE requests. + */ +-typedef struct sw_immediate_request_info sw_immediate_request_info_t; + struct sw_immediate_request_info { + struct sw_driver_io_descriptor *local_descriptor; + char *dst_vals; +@@ -476,15 +479,13 @@ void sw_handle_immediate_request_i(void *request) + char *dst_vals = info->dst_vals; + const struct sw_hw_ops *ops = + sw_get_hw_ops_for(descriptor->collection_type); +- if (likely(ops != NULL)) { ++ if (likely(ops != NULL)) + *(info->retVal) = sw_handle_driver_io_descriptor( + dst_vals, RAW_CPU(), descriptor, ops); +- } else { ++ else + pw_pr_error( + "No operations found to satisfy collection type %u!\n", + descriptor->collection_type); +- } +- return; + } + + static int num_times_polled; +@@ -542,10 +543,10 @@ int sw_collection_stop_i(void) + /* + * Unregister any registered tracepoints and notifiers. + */ +- if (sw_unregister_trace_notifiers()) { ++ if (sw_unregister_trace_notifiers()) + pw_pr_warn( + "Warning: some trace_notifier probe functions could NOT be unregistered!\n"); +- } ++ + /* + * Handle 'STOP' snapshots, if any. + */ +@@ -564,7 +565,7 @@ int sw_collection_stop_i(void) + * close to the 'wake_up_interruptible', below. + */ + s_internal_state.drain_buffers = true; +- smp_mb(); ++ smp_mb(); /* order memory access */ + /* + * Wakeup any sleeping readers, and cleanup any + * timers in the reader subsys. +@@ -576,10 +577,10 @@ int sw_collection_stop_i(void) + */ + sw_count_samples_produced_dropped(); + #if DO_OVERHEAD_MEASUREMENTS +- pw_pr_force( +- "DEBUG: there were %llu samples produced and %llu samples dropped in buffer v5!\n", +- sw_num_samples_produced, sw_num_samples_dropped); +-#endif /* DO_OVERHEAD_MEASUREMENTS */ ++ pw_pr_force("DEBUG: there were %llu samples produced and %llu samples \ ++ dropped in buffer v5!\n", sw_num_samples_produced, ++ sw_num_samples_dropped); ++#endif // DO_OVERHEAD_MEASUREMENTS + /* + * DEBUG: iterate over collection lists. + */ +@@ -592,6 +593,14 @@ int sw_collection_stop_i(void) + * Clear out the collector lists. + */ + sw_destroy_collector_lists_i(); ++ /* ++ * Free up circular buffer ++ */ ++ destroy_circular_buffer(); ++ /* ++ * Remove telemetry mappings ++ */ ++ sw_destroy_telem(); + pw_pr_debug("OK, STOPPED collection!\n"); + #if DO_OVERHEAD_MEASUREMENTS + pw_pr_force("There were %d poll ticks!\n", num_times_polled); +@@ -604,9 +613,9 @@ int sw_collection_poll_i(void) + /* + * Handle 'POLL' timer expirations. + */ +- if (SW_LIST_EMPTY(&sw_collector_lists[SW_WHEN_TYPE_POLL])) { ++ if (SW_LIST_EMPTY(&sw_collector_lists[SW_WHEN_TYPE_POLL])) + pw_pr_debug("DEBUG: EMPTY POLL LIST\n"); +- } ++ + ++num_times_polled; + return sw_handle_collector_list(&sw_collector_lists[SW_WHEN_TYPE_POLL], + &sw_handle_collector_node); +@@ -642,13 +651,13 @@ sw_add_trace_notifier_driver_info_i(struct sw_trace_notifier_data *node, + + static int sw_post_config_i(const struct sw_hw_ops *op, void *priv) + { +- if (!op->available || !(*op->available)()) { +- /* op not available */ ++ /* op not available */ ++ if (!op->available || !(*op->available)()) + return 0; +- } +- if (!op->post_config || (*op->post_config)()) { ++ ++ if (!op->post_config || (*op->post_config)()) + return 0; +- } ++ + return -EIO; + } + +@@ -753,19 +762,18 @@ sw_set_driver_infos_i(struct sw_driver_interface_msg __user *remote_msg, + } else { + if (sw_add_driver_info( + &sw_collector_lists[i], +- local_info)) { ++ local_info)) + pw_pr_error( + "WARNING: could NOT add driver info to list for 'when type' %d!\n", + i); +- } + } + } + } + } + if (sw_for_each_hw_op(&sw_post_config_i, NULL, +- false /*return-on-error*/)) { ++ false /*return-on-error*/)) + pw_pr_error("POST-CONFIG error!\n"); +- } ++ + vfree(local_msg); + memset(&s_internal_state, 0, sizeof(s_internal_state)); + /* +@@ -788,14 +796,14 @@ static long sw_handle_cmd_i(sw_driver_collection_cmd_t cmd, + } + switch (cmd) { + case SW_DRIVER_START_COLLECTION: +- if (sw_collection_start_i()) { ++ if (sw_collection_start_i()) + return -PW_ERROR; +- } ++ + break; + case SW_DRIVER_STOP_COLLECTION: +- if (sw_collection_stop_i()) { ++ if (sw_collection_stop_i()) + return -PW_ERROR; +- } ++ + break; + default: + pw_pr_error("WARNING: unsupported command %d\n", cmd); +@@ -844,9 +852,9 @@ static void sw_do_extract_scu_fw_version(void) + sw_scu_fw_major_minor = 0x0; + #ifdef SFI_SIG_OEMB + if (sfi_table_parse(SFI_SIG_OEMB, NULL, NULL, +- &sw_do_parse_sfi_oemb_table)) { ++ &sw_do_parse_sfi_oemb_table)) + pw_pr_force("WARNING: NO SFI information!\n"); +- } ++ + #endif /* SFI_SIG_OEMB */ + } + +@@ -899,18 +907,19 @@ sw_get_available_trace_notifiers_i(enum sw_name_id_type type, + { + long retVal = PW_SUCCESS; + +- if (type == SW_NAME_TYPE_TRACEPOINT) { ++ if (type == SW_NAME_TYPE_TRACEPOINT) + retVal = sw_for_each_tracepoint_node(&sw_gather_tracepoint_i, + local_info, + false /*return-on-error*/); +- } else { ++ else + retVal = sw_for_each_notifier_node(&sw_gather_notifier_i, + local_info, + false /*return-on-error*/); +- } ++ + pw_pr_debug( + "There are %u extracted traces/notifiers for a total of %u bytes!\n", + local_info->num_name_id_pairs, local_info->payload_len); ++ + return retVal; + } + +@@ -972,17 +981,17 @@ sw_get_available_name_id_mappings_i(enum sw_name_id_type type, + memset(buffer, 0, local_len); + local_info = (struct sw_name_info_msg *)buffer; + +- if (type == SW_NAME_TYPE_COLLECTOR) { ++ if (type == SW_NAME_TYPE_COLLECTOR) + retVal = sw_get_available_collectors_i(local_info); +- } else { ++ else + retVal = sw_get_available_trace_notifiers_i(type, local_info); +- } ++ + if (retVal == PW_SUCCESS) { + retVal = copy_to_user(remote_info, local_info, local_len); +- if (retVal) { ++ if (retVal) + pw_pr_error( + "ERROR: couldn't copy tracepoint info to user space!\n"); +- } ++ + } + vfree(buffer); + return retVal; +@@ -1001,7 +1010,7 @@ sw_get_topology_changes_i(struct sw_driver_topology_msg __user *remote_msg, + struct sw_driver_topology_change *dst = NULL; + size_t dst_idx = 0; + +- SW_LIST_HEAD_VAR(sw_topology_node) *head = (void *)&sw_topology_list; ++ SW_LIST_HEAD_VAR(sw_topology_node) * head = (void *)&sw_topology_list; + struct sw_topology_node *tnode = NULL; + + if (local_len < buffer_len) { +@@ -1030,14 +1039,74 @@ sw_get_topology_changes_i(struct sw_driver_topology_msg __user *remote_msg, + memcpy(&dst[dst_idx++], change, sizeof(*change)); + } + retVal = copy_to_user(remote_msg, local_msg, buffer_len); +- if (retVal) { ++ if (retVal) + pw_pr_error( + "ERROR: couldn't copy topology changes to user space!\n"); +- } ++ + vfree(buffer); + return retVal; + } + ++static long sw_read_continuous_i(char *remote_buffer, size_t local_len) ++{ ++ /* TODO: call 'consume_buffer' directly? */ ++ ssize_t val = sw_consume_data(0 /*mask, dummy*/, remote_buffer, ++ local_len); ++ if (val <= 0) ++ return val; ++ ++ return 0; ++} ++ ++static long sw_set_telem_cfgs_i(char *remote_cfg, size_t local_len) ++{ ++ u64 *local_cfg = vmalloc(local_len); ++ int retval = 0; ++ ++ if (!local_cfg) { ++ pw_pr_error("ERROR allocating space for local telem cfgs!\n"); ++ return -EFAULT; ++ } ++ if (copy_from_user(local_cfg, remote_cfg, local_len)) { ++ pw_pr_error("ERROR copying message from user space!\n"); ++ retval = -EFAULT; ++ goto done_set_telem_cfgs; ++ } ++ if (sw_setup_telem(local_cfg)) { ++ pw_pr_error("Couldn't setup telemetry\n"); ++ retval = -1; ++ } ++done_set_telem_cfgs: ++ vfree(local_cfg); ++ return retval; ++} ++ ++static long sw_set_continuous_i( ++ struct sw_driver_continuous_collect __user *remote_msg, ++ int local_len) ++{ ++ pw_u32_t buffer_size = 0; ++ long ret = get_user(buffer_size, &remote_msg->collection_size); ++ if (ret) ++ return ret; ++ ++ if (buffer_size == 0) { ++ pw_pr_error("Cannot allocate a zero length buffer!\n"); ++ return -EINVAL; ++ } ++ ret = initialize_circular_buffer(buffer_size); ++ if (ret) ++ return ret; ++ ++ ret = sw_set_driver_infos_i((struct sw_driver_interface_msg __user *) ++ remote_msg->payload, local_len); ++ if (ret) { ++ destroy_circular_buffer(); ++ return ret; ++ } ++ return 0; ++} ++ + #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) + #define MATCH_IOCTL(num, pred) ((num) == (pred) || (num) == (pred##32)) + #else +@@ -1045,7 +1114,7 @@ sw_get_topology_changes_i(struct sw_driver_topology_msg __user *remote_msg, + #endif + + static long sw_unlocked_handle_ioctl_i(unsigned int ioctl_num, +- void *p_local_args) ++ void *p_local_args) + { + struct sw_driver_ioctl_arg local_args; + int local_in_len, local_out_len; +@@ -1063,7 +1132,8 @@ static long sw_unlocked_handle_ioctl_i(unsigned int ioctl_num, + */ + if (_IOC_TYPE(ioctl_num) != APWR_IOCTL_MAGIC_NUM) { + pw_pr_error( +- "ERROR: requested IOCTL TYPE (%d) != APWR_IOCTL_MAGIC_NUM (%d)\n", ++ "ERROR: requested IOCTL TYPE (%d) != \ ++ APWR_IOCTL_MAGIC_NUM (%d)\n", + _IOC_TYPE(ioctl_num), APWR_IOCTL_MAGIC_NUM); + return -PW_ERROR; + } +@@ -1075,7 +1145,7 @@ static long sw_unlocked_handle_ioctl_i(unsigned int ioctl_num, + local_in_len = local_args.in_len; + local_out_len = local_args.out_len; + pw_pr_debug("GU: local_in_len = %d, local_out_len = %d\n", local_in_len, +- local_out_len); ++ local_out_len); + /* + * (3) Service individual IOCTL requests. + */ +@@ -1085,17 +1155,22 @@ static long sw_unlocked_handle_ioctl_i(unsigned int ioctl_num, + (struct sw_driver_interface_msg __user *) + local_args.in_arg, + local_in_len); ++ } else if (MATCH_IOCTL(ioctl_num, PW_IOCTL_CONFIG_CONTINUOUS)) { ++ pw_pr_debug("DEBUG: PW_IOCTL_CONFIG_CONTINUOUS\n"); ++ return sw_set_continuous_i( ++ (struct sw_driver_continuous_collect __user *) ++ local_args.in_arg, ++ local_in_len); + } else if (MATCH_IOCTL(ioctl_num, PW_IOCTL_CMD)) { + sw_driver_collection_cmd_t local_cmd; + + pw_pr_debug("PW_IOCTL_CMD\n"); +- if (get_user(local_cmd, (sw_driver_collection_cmd_t __user *) +- local_args.in_arg)) { +- pw_pr_error("ERROR: could NOT extract cmd value!\n"); +- return -PW_ERROR; +- } +- return sw_handle_cmd_i(local_cmd, +- (u64 __user *)local_args.out_arg); ++ if (get_user(local_cmd, ++ (sw_driver_collection_cmd_t __user *)local_args.in_arg)) { ++ pw_pr_error("ERROR: could NOT extract cmd value!\n"); ++ return -PW_ERROR; ++ } ++ return sw_handle_cmd_i(local_cmd, (u64 __user *)local_args.out_arg); + } else if (MATCH_IOCTL(ioctl_num, PW_IOCTL_POLL)) { + pw_pr_debug("PW_IOCTL_POLL\n"); + return DO_PER_CPU_OVERHEAD_FUNC_RET(int, sw_collection_poll_i); +@@ -1146,15 +1221,15 @@ static long sw_unlocked_handle_ioctl_i(unsigned int ioctl_num, + goto ret_immediate_io; + } + local_descriptor = ((struct sw_driver_io_descriptor *) +- local_info->descriptors); ++ local_info->descriptors); + pw_pr_debug("Collection type after %d\n", +- local_descriptor->collection_type); ++ local_descriptor->collection_type); + /* + * Check cpu mask for correctness here. For now, we do NOT allow + * reading on ALL cpus. + */ + if ((int)local_info->cpu_mask < -1 || +- (int)local_info->cpu_mask >= (int)sw_max_num_cpus) { ++ (int)local_info->cpu_mask >= (int)sw_max_num_cpus) { + pw_pr_error( + "ERROR: invalid cpu mask %d specified in immediate IO; valid values are: -1, [0 -- %d]!\n", + local_info->cpu_mask, sw_max_num_cpus - 1); +@@ -1169,7 +1244,7 @@ static long sw_unlocked_handle_ioctl_i(unsigned int ioctl_num, + local_descriptor->collection_type, + local_info->cpu_mask); + if (sw_is_valid_hw_op_id(local_descriptor->collection_type) == +- false) { ++ false) { + pw_pr_error( + "ERROR: invalid collection type %d specified for immediate IO\n", + (int)local_descriptor->collection_type); +@@ -1180,7 +1255,7 @@ static long sw_unlocked_handle_ioctl_i(unsigned int ioctl_num, + * Check collection cmd for correctness here + */ + if (local_descriptor->collection_command < SW_IO_CMD_READ || +- local_descriptor->collection_command > SW_IO_CMD_WRITE) { ++ local_descriptor->collection_command > SW_IO_CMD_WRITE) { + pw_pr_error( + "ERROR: invalid collection command %d specified for immediate IO\n", + local_descriptor->collection_command); +@@ -1215,7 +1290,7 @@ static long sw_unlocked_handle_ioctl_i(unsigned int ioctl_num, + default: /* IO on a particular CPU */ + cpumask_set_cpu(local_info->cpu_mask, &cpumask); + pw_pr_debug("[%d] setting for %d\n", RAW_CPU(), +- local_info->cpu_mask); ++ local_info->cpu_mask); + break; + } + sw_schedule_work(&cpumask, +@@ -1257,16 +1332,17 @@ static long sw_unlocked_handle_ioctl_i(unsigned int ioctl_num, + } + ret_immediate_io: + vfree(src_vals); +- if (dst_vals) { ++ if (dst_vals) + vfree(dst_vals); +- } ++ + return retVal; + } else if (MATCH_IOCTL(ioctl_num, PW_IOCTL_GET_SCU_FW_VERSION)) { + u32 local_data = (u32)sw_scu_fw_major_minor; + + if (put_user(local_data, (u32 __user *)local_args.out_arg)) { + pw_pr_error( +- "ERROR copying scu fw version to userspace!\n"); ++ "ERROR copying scu fw version to userspace!\n" ++ ); + return -PW_ERROR; + } + return PW_SUCCESS; +@@ -1275,44 +1351,51 @@ static long sw_unlocked_handle_ioctl_i(unsigned int ioctl_num, + (pw_u64_t)SW_DRIVER_VERSION_MAJOR << 32 | + (pw_u64_t)SW_DRIVER_VERSION_MINOR << 16 | + (pw_u64_t)SW_DRIVER_VERSION_OTHER; +- if (put_user(local_version, (u64 __user *)local_args.out_arg)) { ++ if (put_user(local_version, ++ (u64 __user *)local_args.out_arg)) { + pw_pr_error( +- "ERROR copying driver version to userspace!\n"); ++ "ERROR copying driver version to userspace!\n" ++ ); + return -PW_ERROR; + } + return PW_SUCCESS; +- } else if (MATCH_IOCTL(ioctl_num, PW_IOCTL_GET_AVAILABLE_TRACEPOINTS)) { ++ } else if (MATCH_IOCTL(ioctl_num, ++ PW_IOCTL_GET_AVAILABLE_TRACEPOINTS)) { + pw_pr_debug("DEBUG: AVAIL tracepoints! local_out_len = %u\n", +- local_out_len); ++ local_out_len); + return sw_get_available_name_id_mappings_i( + SW_NAME_TYPE_TRACEPOINT, + (struct sw_name_info_msg __user *)local_args.out_arg, + local_out_len); + } else if (MATCH_IOCTL(ioctl_num, PW_IOCTL_GET_AVAILABLE_NOTIFIERS)) { +- pw_pr_debug("DEBUG: AVAIL tracepoints! local_out_len = %u\n", +- local_out_len); ++ pw_pr_debug("DEBUG: AVAIL notifiers! local_out_len = %u\n", ++ local_out_len); + return sw_get_available_name_id_mappings_i( + SW_NAME_TYPE_NOTIFIER, + (struct sw_name_info_msg __user *)local_args.out_arg, + local_out_len); + } else if (MATCH_IOCTL(ioctl_num, PW_IOCTL_GET_AVAILABLE_COLLECTORS)) { +- pw_pr_debug("DEBUG: AVAIL tracepoints! local_out_len = %u\n", +- local_out_len); ++ pw_pr_debug("DEBUG: AVAIL collectors! local_out_len = %u\n", ++ local_out_len); + return sw_get_available_name_id_mappings_i( + SW_NAME_TYPE_COLLECTOR, + (struct sw_name_info_msg __user *)local_args.out_arg, + local_out_len); + } else if (MATCH_IOCTL(ioctl_num, PW_IOCTL_GET_TOPOLOGY_CHANGES)) { + pw_pr_debug("DEBUG: TOPOLOGY changes! local_out_len = %u\n", +- local_out_len); ++ local_out_len); + return sw_get_topology_changes_i( + (struct sw_driver_topology_msg __user *) +- local_args.out_arg, +- local_out_len); +- } else { +- pw_pr_error("ERROR: invalid ioctl num: %u\n", +- _IOC_NR(ioctl_num)); ++ local_args.out_arg, local_out_len); ++ } else if (MATCH_IOCTL(ioctl_num, PW_IOCTL_READ_CONTINUOUS)) { ++ pw_pr_debug("DEBUG: READ_CONTINUOUS!\n"); ++ return sw_read_continuous_i(local_args.out_arg, local_out_len); ++ } else if (MATCH_IOCTL(ioctl_num, PW_IOCTL_SET_TELEM_BAR)) { ++ pw_pr_debug("DEBUG: got a request to set telem bar!\n"); ++ return sw_set_telem_cfgs_i(local_args.in_arg, local_in_len); + } ++ ++ pw_pr_error("ERROR: invalid ioctl num: %u\n", _IOC_NR(ioctl_num)); + return -PW_ERROR; + } + +@@ -1373,17 +1456,15 @@ int sw_load_driver_i(void) + if (sw_set_module_scope_for_cpus()) { + pw_pr_force("ERROR setting affected cpus\n"); + return -PW_ERROR; +- } else { +- pw_pr_debug("OK, setting worked\n"); + } ++ pw_pr_debug("OK, setting worked\n"); + } + if (sw_init_data_structures_i()) { + pw_pr_error("ERROR initializing data structures!\n"); + goto err_ret_init_data; + } +- if (sw_register_dev(&s_ops)) { ++ if (sw_register_dev(&s_ops)) + goto err_ret_register_dev; +- } + /* + * Retrieve a list of tracepoint structs to use when + * registering probe functions. +@@ -1399,7 +1480,7 @@ int sw_load_driver_i(void) + pw_pr_force("OK: LOADED SoC Watch Driver\n"); + #ifdef CONFIG_X86_WANT_INTEL_MID + pw_pr_force("SOC Identifier = %u, Stepping = %u\n", +- intel_mid_identify_cpu(), intel_mid_soc_stepping()); ++ intel_mid_identify_cpu(), intel_mid_soc_stepping()); + #endif /* CONFIG_X86_WANT_INTEL_MID */ + pw_pr_force("-----------------------------------------\n"); + return PW_SUCCESS; +@@ -1410,11 +1491,10 @@ int sw_load_driver_i(void) + sw_destroy_data_structures_i(); + err_ret_init_data: + if (do_force_module_scope_for_cpu_frequencies) { +- if (sw_reset_module_scope_for_cpus()) { ++ if (sw_reset_module_scope_for_cpus()) + pw_pr_force("ERROR resetting affected cpus\n"); +- } else { ++ else + pw_pr_debug("OK, resetting worked\n"); +- } + } + return -PW_ERROR; + } +@@ -1428,11 +1508,10 @@ void sw_unload_driver_i(void) + sw_destroy_data_structures_i(); + + if (do_force_module_scope_for_cpu_frequencies) { +- if (sw_reset_module_scope_for_cpus()) { ++ if (sw_reset_module_scope_for_cpus()) + pw_pr_force("ERROR resetting affected cpus\n"); +- } else { ++ else + pw_pr_debug("OK, resetting worked\n"); +- } + } + + pw_pr_force("-----------------------------------------\n"); +@@ -1443,24 +1522,22 @@ void sw_unload_driver_i(void) + PRINT_CUMULATIVE_OVERHEAD_PARAMS(sw_collection_poll_i, "POLL"); + PRINT_CUMULATIVE_OVERHEAD_PARAMS(sw_any_seg_full, "ANY_SEG_FULL"); + #if DO_TRACK_MEMORY_USAGE +- { +- /* +- * Dump memory stats. +- */ ++ /* ++ * Dump memory stats. ++ */ ++ pw_pr_force( ++ "TOTAL # BYTES ALLOCED = %llu, CURR # BYTES ALLOCED = %llu, MAX # BYTES ALLOCED = %llu\n", ++ sw_get_total_bytes_alloced(), ++ sw_get_curr_bytes_alloced(), ++ sw_get_max_bytes_alloced()); ++ if (unlikely(sw_get_curr_bytes_alloced())) { + pw_pr_force( +- "TOTAL # BYTES ALLOCED = %llu, CURR # BYTES ALLOCED = %llu, MAX # BYTES ALLOCED = %llu\n", +- sw_get_total_bytes_alloced(), +- sw_get_curr_bytes_alloced(), +- sw_get_max_bytes_alloced()); +- if (unlikely(sw_get_curr_bytes_alloced())) { +- pw_pr_force( +- "***********************************************************************\n"); +- pw_pr_force( +- "WARNING: possible memory leak: there are %llu bytes still allocated!\n", +- sw_get_curr_bytes_alloced()); +- pw_pr_force( +- "***********************************************************************\n"); +- } ++ "***********************************************************************\n"); ++ pw_pr_force( ++ "WARNING: possible memory leak: there are %llu bytes still allocated!\n", ++ sw_get_curr_bytes_alloced()); ++ pw_pr_force( ++ "***********************************************************************\n"); + } + #endif /* DO_TRACK_MEMORY_USAGE */ + pw_pr_force("-----------------------------------------\n"); +diff --git a/drivers/platform/x86/socwatch/sw_file_ops.c b/drivers/platform/x86/socwatch/sw_file_ops.c +index ea84d252a4d3..199ae560801e 100644 +--- a/drivers/platform/x86/socwatch/sw_file_ops.c ++++ b/drivers/platform/x86/socwatch/sw_file_ops.c +@@ -1,70 +1,70 @@ +-/* +- +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + +-*/ +-#include /* try_module_get */ +-#include /* inode */ +-#include /* class_create */ +-#include /* cdev_alloc */ ++#include /* try_module_get */ ++#include /* inode */ ++#include /* class_create */ ++#include /* cdev_alloc */ + #include /* LINUX_VERSION_CODE */ +-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) +-#include /* copy_to_user */ ++#if KERNEL_VERSION(4, 12, 0) > LINUX_VERSION_CODE ++ #include /* copy_to_user */ + #else +-#include /* copy_to_user */ ++ #include /* copy_to_user */ + #endif /* LINUX_VERSION_CODE */ +-#include /* wait_event_interruptible */ +-#include /* TASK_INTERRUPTIBLE */ ++#include /* wait_event_interruptible */ ++#include /* TASK_INTERRUPTIBLE */ + + #include "sw_kernel_defines.h" + #include "sw_types.h" +@@ -84,56 +84,20 @@ + /* + * Check if we're currently collecting data. + */ +-#define IS_COLLECTING() \ +- ({ \ +- sw_driver_collection_cmd_t __cmd = GET_CMD(); \ +- bool __val = (__cmd == SW_DRIVER_START_COLLECTION || \ +- __cmd == SW_DRIVER_RESUME_COLLECTION); \ +- __val; \ +- }) ++#define IS_COLLECTING() ({ \ ++ sw_driver_collection_cmd_t __cmd = GET_CMD(); \ ++ bool __val = (__cmd == SW_DRIVER_START_COLLECTION || \ ++ __cmd == SW_DRIVER_RESUME_COLLECTION); \ ++ __val; }) ++ + /* + * Check if we're currently paused. + */ +-#define IS_SLEEPING() \ +- ({ \ +- sw_driver_collection_cmd_t __cmd = GET_CMD(); \ +- bool __val = __cmd == SW_DRIVER_PAUSE_COLLECTION; \ +- __val; \ +- }) +-/* ------------------------------------------------- +- * Typedefs +- * ------------------------------------------------- +- */ +-typedef unsigned long sw_bits_t; +- +-/* ------------------------------------------------- +- * Local function declarations. +- * ------------------------------------------------- +- */ +-static int sw_device_open_i(struct inode *inode, struct file *file); +-static int sw_device_release_i(struct inode *inode, struct file *file); +-static ssize_t sw_device_read_i(struct file *file, char __user *buffer, +- size_t length, loff_t *offset); +-static long sw_device_unlocked_ioctl_i(struct file *filp, +- unsigned int ioctl_num, +- unsigned long ioctl_param); +-#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) +-static long sw_device_compat_ioctl_i(struct file *file, unsigned int ioctl_num, +- unsigned long ioctl_param); +-#endif ++#define IS_SLEEPING() ({ \ ++ sw_driver_collection_cmd_t __cmd = GET_CMD(); \ ++ bool __val = __cmd == SW_DRIVER_PAUSE_COLLECTION; \ ++ __val; }) + +-/* +- * File operations exported by the driver. +- */ +-static struct file_operations s_fops = { +- .open = &sw_device_open_i, +- .read = &sw_device_read_i, +- .unlocked_ioctl = &sw_device_unlocked_ioctl_i, +-#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) +- .compat_ioctl = &sw_device_compat_ioctl_i, +-#endif /* COMPAT && x64 */ +- .release = &sw_device_release_i, +-}; + /* + * Character device file MAJOR + * number -- we're now obtaining +@@ -155,7 +119,7 @@ static struct sw_file_ops *s_file_ops; + * concurent access into the same device. + */ + #define DEV_IS_OPEN 0 /* see if device is in use */ +-static volatile sw_bits_t dev_status; ++static volatile unsigned long dev_status; + + /* + * File operations. +@@ -168,14 +132,13 @@ static int sw_device_open_i(struct inode *inode, struct file *file) + /* + * We don't want to talk to two processes at the same time + */ +- if (test_and_set_bit(DEV_IS_OPEN, &dev_status)) { +- /* Device is busy */ +- return -EBUSY; +- } ++ if (test_and_set_bit(DEV_IS_OPEN, &dev_status)) ++ return -EBUSY; /* Device is busy */ ++ + + if (!try_module_get(THIS_MODULE)) { +- /* No such device */ +- return -ENODEV; ++ pw_pr_error("ERROR: Device not found!\n"); ++ return -ENODEV;/* No such device */ + } + pw_pr_debug("OK, allowed client open!\n"); + return PW_SUCCESS; +@@ -205,9 +168,9 @@ static int sw_device_release_i(struct inode *inode, struct file *file) + } + + static ssize_t sw_device_read_i(struct file *file, char __user *user_buffer, +- size_t length, loff_t *offset) ++ size_t length, loff_t *offset) + { +- size_t bytes_read = 0; ++ ssize_t bytes_read = 0; + u32 val = 0; + + if (!user_buffer) { +@@ -217,29 +180,26 @@ static ssize_t sw_device_read_i(struct file *file, char __user *user_buffer, + } + do { + val = SW_ALL_WRITES_DONE_MASK; +- if (wait_event_interruptible( +- sw_reader_queue, +- (sw_any_seg_full(&val, +- (*s_file_ops->should_flush)()) || +- (!IS_COLLECTING() && !IS_SLEEPING())))) { ++ if (wait_event_interruptible(sw_reader_queue, ++ (sw_any_seg_full(&val, (*s_file_ops->should_flush)()) || ++ (!IS_COLLECTING() && !IS_SLEEPING())))) { + pw_pr_error("wait_event_interruptible error\n"); + return -ERESTARTSYS; + } +- pw_pr_debug(KERN_INFO "After wait: val = %u\n", val); ++ pw_pr_debug("After wait: val = %u\n", val); + } while (val == SW_NO_DATA_AVAIL_MASK); + /* + * Are we done producing/consuming? + */ +- if (val == SW_ALL_WRITES_DONE_MASK) { ++ if (val == SW_ALL_WRITES_DONE_MASK) + return 0; /* "0" ==> EOF */ +- } ++ + /* + * Copy the buffer contents into userspace. + */ +- bytes_read = sw_consume_data( +- val, user_buffer, +- length); /* 'read' returns # of bytes actually read */ +- if (unlikely(bytes_read == 0)) { ++ /* 'read' returns # of bytes actually read */ ++ bytes_read = sw_consume_data(val, user_buffer, length); ++ if (unlikely(bytes_read <= 0)) { + /* Cannot be EOF since that has already been checked above */ + return -EIO; + } +@@ -250,12 +210,11 @@ static ssize_t sw_device_read_i(struct file *file, char __user *user_buffer, + * (1) Handle 32b IOCTLs in 32b kernel-space. + * (2) Handle 64b IOCTLs in 64b kernel-space. + */ +-static long sw_device_unlocked_ioctl_i(struct file *filp, +- unsigned int ioctl_num, +- unsigned long ioctl_param) ++static long sw_device_unlocked_ioctl_i( ++ struct file *filp, unsigned int ioctl_num, unsigned long ioctl_param) + { + struct sw_driver_ioctl_arg __user *remote_args = +- (struct sw_driver_ioctl_arg __user *)ioctl_param; ++ (struct sw_driver_ioctl_arg __user *)ioctl_param; + struct sw_driver_ioctl_arg local_args; + + if (copy_from_user(&local_args, remote_args, sizeof(local_args))) { +@@ -284,32 +243,46 @@ struct sw_driver_ioctl_arg32 { + /* + * Handle 32b IOCTLs in 64b kernel-space. + */ +-static long sw_device_compat_ioctl_i(struct file *file, unsigned int ioctl_num, +- unsigned long ioctl_param) ++static long sw_device_compat_ioctl_i( ++ struct file *file, unsigned int ioctl_num, unsigned long ioctl_param) + { + struct sw_driver_ioctl_arg32 __user *remote_args32 = +- compat_ptr(ioctl_param); ++ compat_ptr(ioctl_param); + struct sw_driver_ioctl_arg local_args; + u32 data; + +- if (get_user(local_args.in_len, &remote_args32->in_len)) { ++ if (get_user(local_args.in_len, &remote_args32->in_len)) + return -PW_ERROR; +- } +- if (get_user(local_args.out_len, &remote_args32->out_len)) { ++ ++ if (get_user(local_args.out_len, &remote_args32->out_len)) + return -PW_ERROR; +- } +- if (get_user(data, &remote_args32->in_arg)) { ++ ++ if (get_user(data, &remote_args32->in_arg)) + return -PW_ERROR; +- } ++ + local_args.in_arg = (char *)(unsigned long)data; +- if (get_user(data, &remote_args32->out_arg)) { ++ if (get_user(data, &remote_args32->out_arg)) + return -PW_ERROR; +- } ++ + local_args.out_arg = (char *)(unsigned long)data; + return (*s_file_ops->ioctl_handler)(ioctl_num, &local_args); + } + #endif + ++/* ++ * File operations exported by the driver. ++ */ ++static const struct file_operations s_fops = { ++ .open = &sw_device_open_i, ++ .read = &sw_device_read_i, ++ .unlocked_ioctl = &sw_device_unlocked_ioctl_i, ++#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) ++ .compat_ioctl = &sw_device_compat_ioctl_i, ++#endif /* COMPAT && x64 */ ++ .release = &sw_device_release_i, ++}; ++ ++ + /* + * Device creation, deletion operations. + */ +@@ -330,20 +303,20 @@ int sw_register_dev(struct sw_file_ops *ops) + ret = alloc_chrdev_region(&apwr_dev, 0, 1, PW_DEVICE_NAME); + apwr_dev_major_num = MAJOR(apwr_dev); + apwr_class = class_create(THIS_MODULE, "apwr"); +- if (IS_ERR(apwr_class)) { +- printk(KERN_ERR "Error registering apwr class\n"); +- } ++ if (IS_ERR(apwr_class)) ++ pw_pr_error("Error registering apwr class\n"); ++ + + device_create(apwr_class, NULL, apwr_dev, NULL, PW_DEVICE_NAME); + apwr_cdev = cdev_alloc(); + if (apwr_cdev == NULL) { +- printk("Error allocating character device\n"); ++ pw_pr_error("Error allocating character device\n"); + return ret; + } + apwr_cdev->owner = THIS_MODULE; + apwr_cdev->ops = &s_fops; +- if (cdev_add(apwr_cdev, apwr_dev, 1) < 0) { +- printk("Error registering device driver\n"); ++ if (cdev_add(apwr_cdev, apwr_dev, 1) < 0) { ++ pw_pr_error("Error registering device driver\n"); + return ret; + } + s_file_ops = ops; +diff --git a/drivers/platform/x86/socwatch/sw_hardware_io.c b/drivers/platform/x86/socwatch/sw_hardware_io.c +index 759288ac546e..ce5d05692165 100644 +--- a/drivers/platform/x86/socwatch/sw_hardware_io.c ++++ b/drivers/platform/x86/socwatch/sw_hardware_io.c +@@ -1,58 +1,57 @@ +-/* +- +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-*/ ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + + #include "sw_types.h" + #include "sw_kernel_defines.h" +@@ -61,6 +60,7 @@ + #include "sw_internal.h" + #include "sw_hardware_io.h" + ++ + struct sw_ops_node { + const struct sw_hw_ops *op; + int id; +@@ -68,8 +68,8 @@ struct sw_ops_node { + SW_LIST_ENTRY(list, sw_ops_node); + }; + +-static SW_DEFINE_LIST_HEAD(s_ops, +-sw_in sw_ops_node) = SW_LIST_HEAD_INITIALIZER(s_ops); ++static SW_DEFINE_LIST_HEAD(s_ops, sw_ops_node) = ++ SW_LIST_HEAD_INITIALIZER(s_ops); + + static int s_op_idx = -1; + +@@ -81,12 +81,10 @@ int sw_get_hw_op_id(const struct sw_hw_ops *ops) + if (ops && ops->name) { + struct sw_ops_node *node = NULL; + +- SW_LIST_FOR_EACH_ENTRY(node, &s_ops, list) +- { ++ SW_LIST_FOR_EACH_ENTRY(node, &s_ops, list) { + if (node->op->name && +- !strcmp(node->op->name, ops->name)) { ++ !strcmp(node->op->name, ops->name)) + return node->id; +- } + } + } + return -1; +@@ -96,11 +94,9 @@ const struct sw_hw_ops *sw_get_hw_ops_for(int id) + { + struct sw_ops_node *node = NULL; + +- SW_LIST_FOR_EACH_ENTRY(node, &s_ops, list) +- { +- if (node->id == id) { ++ SW_LIST_FOR_EACH_ENTRY(node, &s_ops, list) { ++ if (node->id == id) + return node->op; +- } + } + return NULL; + } +@@ -109,36 +105,33 @@ bool sw_is_valid_hw_op_id(int id) + { + struct sw_ops_node *node = NULL; + +- SW_LIST_FOR_EACH_ENTRY(node, &s_ops, list) +- { +- if (node->id == id) { ++ SW_LIST_FOR_EACH_ENTRY(node, &s_ops, list) { ++ if (node->id == id) + return true; +- } + } + return false; + } + + const char *sw_get_hw_op_abstract_name(const struct sw_hw_ops *op) + { +- if (op) { ++ if (op) + return op->name; +- } ++ + return NULL; + } + + int sw_for_each_hw_op(int (*func)(const struct sw_hw_ops *op, void *priv), +- void *priv, bool return_on_error) { ++ void *priv, bool return_on_error) ++{ + int retval = PW_SUCCESS; + struct sw_ops_node *node = NULL; + + if (func) { +- SW_LIST_FOR_EACH_ENTRY(node, &s_ops, list) +- { ++ SW_LIST_FOR_EACH_ENTRY(node, &s_ops, list) { + if ((*func)(node->op, priv)) { + retval = -EIO; +- if (return_on_error) { ++ if (return_on_error) + break; +- } + } + } + } +@@ -150,12 +143,12 @@ int sw_register_hw_op(const struct sw_hw_ops *op) + struct sw_ops_node *node = NULL; + + if (!op) { +- pw_pr_error("NULL input node in \"sw_register_hw_op\""); ++ pw_pr_error("NULL input node in \"%s\"", __func__); + return -EIO; + } + node = sw_kmalloc(sizeof(struct sw_ops_node), GFP_KERNEL); + if (!node) { +- pw_pr_error("sw_kmalloc error in \"sw_register_hw_op\""); ++ pw_pr_error("sw_kmalloc error in \"%s\"", __func__); + return -ENOMEM; + } + node->op = op; +@@ -178,6 +171,7 @@ void sw_free_hw_ops(void) + while (!SW_LIST_EMPTY(&s_ops)) { + struct sw_ops_node *node = + SW_LIST_GET_HEAD_ENTRY(&s_ops, sw_ops_node, list); ++ + SW_LIST_UNLINK(node, list); + sw_kfree(node); + } +diff --git a/drivers/platform/x86/socwatch/sw_internal.c b/drivers/platform/x86/socwatch/sw_internal.c +index 04544b8fecb3..8ad36a989fd2 100644 +--- a/drivers/platform/x86/socwatch/sw_internal.c ++++ b/drivers/platform/x86/socwatch/sw_internal.c +@@ -1,65 +1,66 @@ +-/* +- +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-*/ ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ + #include "sw_hardware_io.h" + #include "sw_mem.h" + #include "sw_kernel_defines.h" ++#include "sw_telem.h" + #include "sw_internal.h" + +-bool sw_check_output_buffer_params(void __user *buffer, size_t bytes_to_read, +- size_t buff_size) ++bool sw_check_output_buffer_params( ++ void __user *buffer, size_t bytes_to_read, size_t buff_size) + { + if (!buffer) { + pw_pr_error("ERROR: NULL ptr in sw_consume_data!\n"); +@@ -67,7 +68,7 @@ bool sw_check_output_buffer_params(void __user *buffer, size_t bytes_to_read, + } + if (bytes_to_read != buff_size) { + pw_pr_error("Error: bytes_to_read = %zu, required to be %zu\n", +- bytes_to_read, buff_size); ++ bytes_to_read, buff_size); + return false; + } + return true; +@@ -78,29 +79,27 @@ unsigned long sw_copy_to_user(char __user *dst, char *src, size_t bytes_to_copy) + return copy_to_user(dst, src, bytes_to_copy); + } + +-void sw_schedule_work(const struct cpumask *mask, void (*work)(void *), +- void *data) ++void sw_schedule_work( ++ const struct cpumask *mask, void (*work)(void *), void *data) + { + /* + * Did the user ask us to run on 'ANY' CPU? + */ +- if (cpumask_empty(mask)) { ++ if (cpumask_empty(mask)) + (*work)(data); /* Call on current CPU */ +- } else { ++ else { + preempt_disable(); + { + /* + * Did the user ask to run on this CPU? + */ +- if (cpumask_test_cpu(RAW_CPU(), mask)) { ++ if (cpumask_test_cpu(RAW_CPU(), mask)) + (*work)(data); /* Call on current CPU */ +- } ++ + /* + * OK, now check other CPUs. + */ +- smp_call_function_many( +- mask, work, data, +- true /* Wait for all funcs to complete */); ++ smp_call_function_many(mask, work, data, true); + } + preempt_enable(); + } +@@ -119,19 +118,19 @@ void sw_put_cpu(unsigned long flags) + } + + #ifndef CONFIG_NR_CPUS_PER_MODULE +-#define CONFIG_NR_CPUS_PER_MODULE 2 ++ #define CONFIG_NR_CPUS_PER_MODULE 2 + #endif /* CONFIG_NR_CPUS_PER_MODULE */ + + static void sw_get_cpu_sibling_mask(int cpu, struct cpumask *sibling_mask) + { + unsigned int base = +- (cpu / CONFIG_NR_CPUS_PER_MODULE) * CONFIG_NR_CPUS_PER_MODULE; ++ (cpu/CONFIG_NR_CPUS_PER_MODULE) * CONFIG_NR_CPUS_PER_MODULE; + unsigned int i; + + cpumask_clear(sibling_mask); +- for (i = base; i < (base + CONFIG_NR_CPUS_PER_MODULE); ++i) { ++ for (i = base; i < (base+CONFIG_NR_CPUS_PER_MODULE); ++i) + cpumask_set_cpu(i, sibling_mask); +- } ++ + } + + struct pw_cpufreq_node { +@@ -150,15 +149,14 @@ int sw_set_module_scope_for_cpus(void) + int cpu = 0; + + INIT_LIST_HEAD(&pw_cpufreq_policy_lists); +- + for_each_online_cpu(cpu) { + struct cpumask sibling_mask; + struct pw_cpufreq_node *node = NULL; + struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); + +- if (!policy) { ++ if (!policy) + continue; +- } ++ + /* + * Get siblings for this cpu. + */ +@@ -195,8 +193,8 @@ int sw_set_module_scope_for_cpus(void) + /* + * Ensure 'related_cpus' is a superset of 'cpus' + */ +- cpumask_or(policy->related_cpus, policy->related_cpus, +- policy->cpus); ++ cpumask_or(policy->related_cpus, ++ policy->related_cpus, policy->cpus); + /* + * Ensure 'cpus' only contains online cpus. + */ +@@ -221,9 +219,10 @@ int sw_reset_module_scope_for_cpus(void) + list_first_entry(head, struct pw_cpufreq_node, list); + int cpu = node->cpu; + struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); +- if (!policy) { ++ ++ if (!policy) + continue; +- } ++ + policy->shared_type = node->shared_type; + cpumask_copy(policy->related_cpus, &node->related_cpus); + cpumask_copy(policy->cpus, &node->cpus); +@@ -236,3 +235,13 @@ int sw_reset_module_scope_for_cpus(void) + } + return PW_SUCCESS; + } ++ ++int sw_setup_telem(u64 addrs[3]) ++{ ++ return setup_telem(addrs); ++} ++ ++void sw_destroy_telem(void) ++{ ++ destroy_telem(); ++} +diff --git a/drivers/platform/x86/socwatch/sw_mem.c b/drivers/platform/x86/socwatch/sw_mem.c +index ac7725387c78..c1e22611ba67 100644 +--- a/drivers/platform/x86/socwatch/sw_mem.c ++++ b/drivers/platform/x86/socwatch/sw_mem.c +@@ -1,58 +1,57 @@ +-/* +- +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-*/ ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + #include + + #include "sw_kernel_defines.h" +@@ -79,18 +78,16 @@ static atomic_t pw_mem_should_panic = ATOMIC_INIT(0); + /* + * Macro to check if PANIC is on. + */ +-#define MEM_PANIC() \ +- do { \ +- atomic_set(&pw_mem_should_panic, 1); \ +- smp_mb(); \ ++#define MEM_PANIC() do { \ ++ atomic_set(&pw_mem_should_panic, 1); \ ++ smp_mb(); /* memory access ordering */ \ + } while (0) +-#define SHOULD_TRACE() \ +- ({ \ +- bool __tmp = false; \ +- smp_mb(); \ +- __tmp = (atomic_read(&pw_mem_should_panic) == 0); \ +- __tmp; \ +- }) ++ ++#define SHOULD_TRACE() ({ \ ++ bool __tmp = false; \ ++ smp_mb(); /* memory access ordering */ \ ++ __tmp = (atomic_read(&pw_mem_should_panic) == 0); \ ++ __tmp; }) + + #else /* if !DO_MEM_PANIC_ON_ALLOC_ERROR */ + +@@ -137,16 +134,18 @@ u64 sw_get_curr_bytes_alloced(void) + * Allocate free pages. + * TODO: add memory tracker? + */ +-unsigned long sw_allocate_pages(gfp_t flags, +- unsigned int alloc_size_in_bytes) ++unsigned long sw_allocate_pages( ++ unsigned int flags, unsigned int alloc_size_in_bytes) + { +- return __get_free_pages(flags, get_order(alloc_size_in_bytes)); ++ return __get_free_pages( ++ (gfp_t)flags, get_order(alloc_size_in_bytes)); + } + /* + * Free up previously allocated pages. + * TODO: add memory tracker? + */ +-void sw_release_pages(unsigned long addr, unsigned int alloc_size_in_bytes) ++void sw_release_pages( ++ unsigned long addr, unsigned int alloc_size_in_bytes) + { + free_pages(addr, get_order(alloc_size_in_bytes)); + } +@@ -195,36 +194,28 @@ static SW_DEFINE_SPINLOCK(sw_kmalloc_lock); + */ + #define PW_MEM_MAGIC 0xdeadbeef + +-#define PW_ADD_MAGIC(x) \ +- ({ \ +- char *__tmp1 = (char *)(x); \ +- *((int *)__tmp1) = PW_MEM_MAGIC; \ +- __tmp1 += sizeof(int); \ +- __tmp1; \ +- }) +-#define PW_ADD_SIZE(x, s) \ +- ({ \ +- char *__tmp1 = (char *)(x); \ +- *((int *)__tmp1) = (s); \ +- __tmp1 += sizeof(int); \ +- __tmp1; \ +- }) ++#define PW_ADD_MAGIC(x) ({ \ ++ char *__tmp1 = (char *)(x); \ ++ *((int *)__tmp1) = PW_MEM_MAGIC; \ ++ __tmp1 += sizeof(int); __tmp1; }) ++ ++#define PW_ADD_SIZE(x, s) ({ \ ++ char *__tmp1 = (char *)(x); \ ++ *((int *)__tmp1) = (s); \ ++ __tmp1 += sizeof(int); __tmp1; }) ++ + #define PW_ADD_STAMP(x, s) PW_ADD_MAGIC(PW_ADD_SIZE((x), (s))) + +-#define PW_IS_MAGIC(x) \ +- ({ \ +- int *__tmp1 = (int *)((char *)(x) - sizeof(int)); \ +- *__tmp1 == PW_MEM_MAGIC; \ +- }) +-#define PW_REMOVE_STAMP(x) \ +- ({ \ +- char *__tmp1 = (char *)(x); \ +- __tmp1 -= sizeof(int) * 2; \ +- __tmp1; \ +- }) ++#define PW_IS_MAGIC(x) ({ \ ++ int *__tmp1 = (int *)((char *)(x) - sizeof(int)); \ ++ *__tmp1 == PW_MEM_MAGIC; }) ++#define PW_REMOVE_STAMP(x) ({ \ ++ char *__tmp1 = (char *)(x); \ ++ __tmp1 -= sizeof(int) * 2; __tmp1; }) ++ + #define PW_GET_SIZE(x) (*((int *)(x))) + +-void *sw_kmalloc(size_t size, gfp_t flags) ++void *sw_kmalloc(size_t size, unsigned int flags) + { + size_t act_size = 0; + void *retVal = NULL; +@@ -234,21 +225,20 @@ void *sw_kmalloc(size_t size, gfp_t flags) + * previously! + */ + { +- if (!SHOULD_TRACE()) { ++ if (!SHOULD_TRACE()) + return NULL; +- } + } + /* + * (1) Allocate requested block. + */ + act_size = size + sizeof(int) * 2; +- retVal = kmalloc(act_size, flags); ++ retVal = kmalloc(act_size, (gfp_t)flags); + if (!retVal) { + /* + * Panic if we couldn't allocate + * requested memory. + */ +- printk(KERN_INFO "ERROR: could NOT allocate memory!\n"); ++ pw_pr_debug("ERROR: could NOT allocate memory!\n"); + MEM_PANIC(); + return NULL; + } +@@ -280,7 +270,7 @@ void sw_kfree(const void *obj) + * by us. + */ + if (!PW_IS_MAGIC(obj)) { +- printk(KERN_INFO "ERROR: %p is NOT a PW_MAGIC ptr!\n", obj); ++ pw_pr_debug("ERROR: %p is NOT a PW_MAGIC ptr!\n", obj); + return; + } + /* +@@ -307,12 +297,13 @@ void sw_kfree(const void *obj) + + #else /* !DO_TRACK_MEMORY_USAGE */ + +-void *sw_kmalloc(size_t size, gfp_t flags) ++void *sw_kmalloc(size_t size, unsigned int flags) + { + void *ret = NULL; + + if (SHOULD_TRACE()) { +- if (!(ret = kmalloc(size, flags))) { ++ ret = kmalloc(size, (gfp_t)flags); ++ if (!ret) { + /* + * Panic if we couldn't allocate + * requested memory. +diff --git a/drivers/platform/x86/socwatch/sw_ops_provider.c b/drivers/platform/x86/socwatch/sw_ops_provider.c +index 6e0c77204657..6277d7d550ca 100644 +--- a/drivers/platform/x86/socwatch/sw_ops_provider.c ++++ b/drivers/platform/x86/socwatch/sw_ops_provider.c +@@ -1,65 +1,65 @@ +-/* ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-*/ + #include + #include + #include /* "pci_get_domain_bus_and_slot" */ + #include /* "udelay" */ + #include + #ifdef CONFIG_RPMSG_IPC +-#include ++ #include + #endif /* CONFIG_RPMSG_IPC */ + + #include "sw_types.h" +@@ -74,20 +74,20 @@ + /* + * Should we be doing 'direct' PCI reads and writes? + * '1' ==> YES, call "pci_{read,write}_config_dword()" directly +- * '0' ==> NO, Use the "intel_mid_msgbus_{read32,write32}_raw()" +- * API (defined in 'intel_mid_pcihelpers.c') ++ * '0' ==> NO, Use the "intel_mid_msgbus_{read32,write32}_raw()" API ++ * (defined in 'intel_mid_pcihelpers.c') + */ + #define DO_DIRECT_PCI_READ_WRITE 0 + #if !IS_ENABLED(CONFIG_ANDROID) || !defined(CONFIG_X86_WANT_INTEL_MID) +-/* +- * 'intel_mid_pcihelpers.h' is probably not present -- force +- * direct PCI calls in this case. +- */ +-#undef DO_DIRECT_PCI_READ_WRITE +-#define DO_DIRECT_PCI_READ_WRITE 1 ++ /* ++ * 'intel_mid_pcihelpers.h' is probably not present -- force ++ * direct PCI calls in this case. ++ */ ++ #undef DO_DIRECT_PCI_READ_WRITE ++ #define DO_DIRECT_PCI_READ_WRITE 1 + #endif + #if !DO_DIRECT_PCI_READ_WRITE +-#include ++ #include + #endif + + #define SW_PCI_MSG_CTRL_REG 0x000000D0 +@@ -97,7 +97,7 @@ + * NUM_RETRY & USEC_DELAY are used in PCH Mailbox (sw_read_pch_mailbox_info_i). + * Tested on KBL + SPT-LP. May need to revisit. + */ +-#define NUM_RETRY 100 ++#define NUM_RETRY 100 + #define USEC_DELAY 100 + + #define EXTCNF_CTRL 0xF00 /* offset for hw semaphore. */ +@@ -117,23 +117,23 @@ + /* + * TODO: separate into H/W and S/W IO? + */ +-typedef enum sw_io_type { +- SW_IO_MSR = 0, +- SW_IO_IPC = 1, +- SW_IO_MMIO = 2, +- SW_IO_PCI = 3, +- SW_IO_CONFIGDB = 4, +- SW_IO_TRACE_ARGS = 5, +- SW_IO_WAKEUP = 6, +- SW_IO_SOCPERF = 7, +- SW_IO_PROC_NAME = 8, +- SW_IO_IRQ_NAME = 9, +- SW_IO_WAKELOCK = 10, +- SW_IO_TELEM = 11, +- SW_IO_PCH_MAILBOX = 12, +- SW_IO_MAILBOX = 13, +- SW_IO_MAX = 14, +-} sw_io_type_t; ++enum sw_io_type { ++ SW_IO_MSR = 0, ++ SW_IO_IPC = 1, ++ SW_IO_MMIO = 2, ++ SW_IO_PCI = 3, ++ SW_IO_CONFIGDB = 4, ++ SW_IO_TRACE_ARGS = 5, ++ SW_IO_WAKEUP = 6, ++ SW_IO_SOCPERF = 7, ++ SW_IO_PROC_NAME = 8, ++ SW_IO_IRQ_NAME = 9, ++ SW_IO_WAKELOCK = 10, ++ SW_IO_TELEM = 11, ++ SW_IO_PCH_MAILBOX = 12, ++ SW_IO_MAILBOX = 13, ++ SW_IO_MAX = 14, ++}; + + /* + * "io_remapped" values for HW and FW semaphores +@@ -141,7 +141,7 @@ typedef enum sw_io_type { + static struct { + volatile void __iomem *hw_semaphore; + volatile void __iomem *fw_semaphore; +-} s_gbe_semaphore = { NULL, NULL }; ++} s_gbe_semaphore = {NULL, NULL}; + + /* + * Function declarations. +@@ -149,92 +149,51 @@ static struct { + /* + * Exported by the SOCPERF driver. + */ +-extern void SOCPERF_Read_Data2(void *data_buffer); ++extern void __weak SOCPERF_Read_Data3(void *data_buffer); + + /* + * Init functions. + */ +-int sw_ipc_mmio_descriptor_init_func_i( +- struct sw_driver_io_descriptor *descriptor); +-int sw_pch_mailbox_descriptor_init_func_i( +- struct sw_driver_io_descriptor *descriptor); +-int sw_mailbox_descriptor_init_func_i( +- struct sw_driver_io_descriptor *descriptor); ++int sw_ipc_mmio_descriptor_init_func_i(struct sw_driver_io_descriptor *descriptor); ++int sw_pch_mailbox_descriptor_init_func_i(struct sw_driver_io_descriptor *descriptor); ++int sw_mailbox_descriptor_init_func_i(struct sw_driver_io_descriptor *descriptor); + + /* + * Read functions. + */ +-void sw_read_msr_info_i(char *dst_vals, int cpu, +- const struct sw_driver_io_descriptor *descriptor, +- u16 counter_size_in_bytes); +-void sw_read_ipc_info_i(char *dst_vals, int cpu, +- const struct sw_driver_io_descriptor *descriptor, +- u16 counter_size_in_bytes); +-void sw_read_mmio_info_i(char *dst_vals, int cpu, +- const struct sw_driver_io_descriptor *descriptor, +- u16 counter_size_in_bytes); +-void sw_read_pch_mailbox_info_i(char *dst_vals, int cpu, +- const struct sw_driver_io_descriptor *descriptor, +- u16 counter_size_in_bytes); +-void sw_read_mailbox_info_i(char *dst_vals, int cpu, +- const struct sw_driver_io_descriptor *descriptor, +- u16 counter_size_in_bytes); +-void sw_read_pci_info_i(char *dst_vals, int cpu, +- const struct sw_driver_io_descriptor *descriptor, +- u16 counter_size_in_bytes); +-void sw_read_configdb_info_i(char *dst_vals, int cpu, +- const struct sw_driver_io_descriptor *descriptor, +- u16 counter_size_in_bytes); +-void sw_read_socperf_info_i(char *dst_vals, int cpu, +- const struct sw_driver_io_descriptor *descriptor, +- u16 counter_size_in_bytes); ++void sw_read_msr_info_i(char *dst_vals, int cpu, const struct sw_driver_io_descriptor *descriptor, u16 counter_size_in_bytes); ++void sw_read_ipc_info_i(char *dst_vals, int cpu, const struct sw_driver_io_descriptor *descriptor, u16 counter_size_in_bytes); ++void sw_read_mmio_info_i(char *dst_vals, int cpu, const struct sw_driver_io_descriptor *descriptor, u16 counter_size_in_bytes); ++void sw_read_pch_mailbox_info_i(char *dst_vals, int cpu, const struct sw_driver_io_descriptor *descriptor, u16 counter_size_in_bytes); ++void sw_read_mailbox_info_i(char *dst_vals, int cpu, const struct sw_driver_io_descriptor *descriptor, u16 counter_size_in_bytes); ++void sw_read_pci_info_i(char *dst_vals, int cpu, const struct sw_driver_io_descriptor *descriptor, u16 counter_size_in_bytes); ++void sw_read_configdb_info_i(char *dst_vals, int cpu, const struct sw_driver_io_descriptor *descriptor, u16 counter_size_in_bytes); ++void sw_read_socperf_info_i(char *dst_vals, int cpu, const struct sw_driver_io_descriptor *descriptor, u16 counter_size_in_bytes); + + /* + * Write functions. + */ +-void sw_write_msr_info_i(char *dst_vals, int cpu, +- const struct sw_driver_io_descriptor *descriptor, +- u16 counter_size_in_bytes); +-void sw_write_ipc_info_i(char *dst_vals, int cpu, +- const struct sw_driver_io_descriptor *descriptor, +- u16 counter_size_in_bytes); +-void sw_write_mmio_info_i(char *dst_vals, int cpu, +- const struct sw_driver_io_descriptor *descriptor, +- u16 counter_size_in_bytes); +-void sw_write_mailbox_info_i(char *dst_vals, int cpu, +- const struct sw_driver_io_descriptor *descriptor, +- u16 counter_size_in_bytes); +-void sw_write_pci_info_i(char *dst_vals, int cpu, +- const struct sw_driver_io_descriptor *descriptor, +- u16 counter_size_in_bytes); +-void sw_write_configdb_info_i(char *dst_vals, int cpu, +- const struct sw_driver_io_descriptor *descriptor, +- u16 counter_size_in_bytes); +-void sw_write_trace_args_info_i(char *dst_vals, int cpu, +- const struct sw_driver_io_descriptor *descriptor, +- u16 counter_size_in_bytes); +-void sw_write_wakeup_info_i(char *dst_vals, int cpu, +- const struct sw_driver_io_descriptor *descriptor, +- u16 counter_size_in_bytes); +-void sw_write_socperf_info_i(char *dst_vals, int cpu, +- const struct sw_driver_io_descriptor *descriptor, +- u16 counter_size_in_bytes); ++void sw_write_msr_info_i(char *dst_vals, int cpu, const struct sw_driver_io_descriptor *descriptor, u16 counter_size_in_bytes); ++void sw_write_ipc_info_i(char *dst_vals, int cpu, const struct sw_driver_io_descriptor *descriptor, u16 counter_size_in_bytes); ++void sw_write_mmio_info_i(char *dst_vals, int cpu, const struct sw_driver_io_descriptor *descriptor, u16 counter_size_in_bytes); ++void sw_write_mailbox_info_i(char *dst_vals, int cpu, const struct sw_driver_io_descriptor *descriptor, u16 counter_size_in_bytes); ++void sw_write_pci_info_i(char *dst_vals, int cpu, const struct sw_driver_io_descriptor *descriptor, u16 counter_size_in_bytes); ++void sw_write_configdb_info_i(char *dst_vals, int cpu, const struct sw_driver_io_descriptor *descriptor, u16 counter_size_in_bytes); ++void sw_write_trace_args_info_i(char *dst_vals, int cpu, const struct sw_driver_io_descriptor *descriptor, u16 counter_size_in_bytes); ++void sw_write_wakeup_info_i(char *dst_vals, int cpu, const struct sw_driver_io_descriptor *descriptor, u16 counter_size_in_bytes); ++void sw_write_socperf_info_i(char *dst_vals, int cpu, const struct sw_driver_io_descriptor *descriptor, u16 counter_size_in_bytes); + + /* + * Print functions. + */ +-int sw_print_msr_io_descriptor(const struct sw_driver_io_descriptor +- *descriptor); ++int sw_print_msr_io_descriptor(const struct sw_driver_io_descriptor *descriptor); + + /* + * Reset functions -- equal but opposite of init. + */ +-int sw_ipc_mmio_descriptor_reset_func_i( +- const struct sw_driver_io_descriptor *descriptor); +-int sw_pch_mailbox_descriptor_reset_func_i( +- const struct sw_driver_io_descriptor *descriptor); +-int sw_mailbox_descriptor_reset_func_i( +- const struct sw_driver_io_descriptor *descriptor); ++int sw_ipc_mmio_descriptor_reset_func_i(const struct sw_driver_io_descriptor *descriptor); ++int sw_pch_mailbox_descriptor_reset_func_i(const struct sw_driver_io_descriptor *descriptor); ++int sw_mailbox_descriptor_reset_func_i(const struct sw_driver_io_descriptor *descriptor); + + /* + * Available functions. +@@ -245,100 +204,99 @@ bool sw_socperf_available_i(void); + * Helper functions. + */ + u32 sw_platform_configdb_read32(u32 address); +-u32 sw_platform_pci_read32(u32 bus, u32 device, u32 function, u32 ctrl_offset, +- u32 ctrl_value, u32 data_offset); +-u64 sw_platform_pci_read64(u32 bus, u32 device, u32 function, u32 ctrl_offset, +- u32 ctrl_value, u32 data_offset); +-bool sw_platform_pci_write32(u32 bus, u32 device, u32 function, +- u32 write_offset, u32 data_value); ++u32 sw_platform_pci_read32(u32 bus, u32 device, u32 function, u32 ctrl_offset, u32 ctrl_value, u32 data_offset); ++u64 sw_platform_pci_read64(u32 bus, u32 device, u32 function, u32 ctrl_offset, u32 ctrl_value, u32 data_offset); ++bool sw_platform_pci_write32(u32 bus, u32 device, u32 function, u32 write_offset, u32 data_value); + + /* + * Table of collector operations. + */ + static const struct sw_hw_ops s_hw_ops[] = { +- [SW_IO_MSR] = { .name = "MSR", +- .init = NULL, +- .read = &sw_read_msr_info_i, +- .write = &sw_write_msr_info_i, +- .print = &sw_print_msr_io_descriptor, +- .reset = NULL, +- .available = NULL }, ++ [SW_IO_MSR] = { ++ .name = "MSR", ++ .init = NULL, ++ .read = &sw_read_msr_info_i, ++ .write = &sw_write_msr_info_i, ++ .print = &sw_print_msr_io_descriptor, ++ .reset = NULL, ++ .available = NULL ++ }, + [SW_IO_IPC] = { +- .name = "IPC", +- .init = &sw_ipc_mmio_descriptor_init_func_i, +- .read = &sw_read_ipc_info_i, +- .reset = &sw_ipc_mmio_descriptor_reset_func_i, +- /* Other fields are don't care (will be set to NULL) */ +- }, ++ .name = "IPC", ++ .init = &sw_ipc_mmio_descriptor_init_func_i, ++ .read = &sw_read_ipc_info_i, ++ .reset = &sw_ipc_mmio_descriptor_reset_func_i, ++ /* Other fields are don't care (will be set to NULL) */ ++ }, + [SW_IO_MMIO] = { +- .name = "MMIO", +- .init = &sw_ipc_mmio_descriptor_init_func_i, +- .read = &sw_read_mmio_info_i, +- .write = &sw_write_mmio_info_i, +- .reset = &sw_ipc_mmio_descriptor_reset_func_i, +- /* Other fields are don't care (will be set to NULL) */ +- }, ++ .name = "MMIO", ++ .init = &sw_ipc_mmio_descriptor_init_func_i, ++ .read = &sw_read_mmio_info_i, ++ .write = &sw_write_mmio_info_i, ++ .reset = &sw_ipc_mmio_descriptor_reset_func_i, ++ /* Other fields are don't care (will be set to NULL) */ ++ }, + [SW_IO_PCI] = { +- .name = "PCI", +- .read = &sw_read_pci_info_i, +- .write = &sw_write_pci_info_i, +- /* Other fields are don't care (will be set to NULL) */ +- }, ++ .name = "PCI", ++ .read = &sw_read_pci_info_i, ++ .write = &sw_write_pci_info_i, ++ /* Other fields are don't care (will be set to NULL) */ ++ }, + [SW_IO_CONFIGDB] = { +- .name = "CONFIGDB", +- .read = &sw_read_configdb_info_i, +- /* Other fields are don't care (will be set to NULL) */ +- }, ++ .name = "CONFIGDB", ++ .read = &sw_read_configdb_info_i, ++ /* Other fields are don't care (will be set to NULL) */ ++ }, + [SW_IO_WAKEUP] = { +- .name = "WAKEUP", +- /* Other fields are don't care (will be set to NULL) */ +- }, ++ .name = "WAKEUP", ++ /* Other fields are don't care (will be set to NULL) */ ++ }, + [SW_IO_SOCPERF] = { +- .name = "SOCPERF", +- .read = &sw_read_socperf_info_i, +- .available = &sw_socperf_available_i, +- /* Other fields are don't care (will be set to NULL) */ +- }, ++ .name = "SOCPERF", ++ .read = &sw_read_socperf_info_i, ++ .available = &sw_socperf_available_i, ++ /* Other fields are don't care (will be set to NULL) */ ++ }, + [SW_IO_PROC_NAME] = { +- .name = "PROC-NAME", +- /* Other fields are don't care (will be set to NULL) */ +- }, ++ .name = "PROC-NAME", ++ /* Other fields are don't care (will be set to NULL) */ ++ }, + [SW_IO_IRQ_NAME] = { +- .name = "IRQ-NAME", +- /* Other fields are don't care (will be set to NULL) */ +- }, ++ .name = "IRQ-NAME", ++ /* Other fields are don't care (will be set to NULL) */ ++ }, + [SW_IO_WAKELOCK] = { +- .name = "WAKELOCK", +- /* Other fields are don't care (will be set to NULL) */ +- }, ++ .name = "WAKELOCK", ++ /* Other fields are don't care (will be set to NULL) */ ++ }, + [SW_IO_TELEM] = { +- .name = "TELEM", +- .init = &sw_telem_init_func, +- .read = &sw_read_telem_info, +- .reset = &sw_reset_telem, +- .available = &sw_telem_available, +- .post_config = &sw_telem_post_config, +- /* Other fields are don't care (will be set to NULL) */ +- }, ++ .name = "TELEM", ++ .init = &sw_telem_init_func, ++ .read = &sw_read_telem_info, ++ .reset = &sw_reset_telem, ++ .available = &sw_telem_available, ++ .post_config = &sw_telem_post_config, ++ /* Other fields are don't care (will be set to NULL) */ ++ }, + [SW_IO_PCH_MAILBOX] = { +- .name = "PCH-MAILBOX", +- .init = &sw_pch_mailbox_descriptor_init_func_i, +- .read = &sw_read_pch_mailbox_info_i, +- .reset = &sw_pch_mailbox_descriptor_reset_func_i, +- /* Other fields are don't care (will be set to NULL) */ +- }, ++ .name = "PCH-MAILBOX", ++ .init = &sw_pch_mailbox_descriptor_init_func_i, ++ .read = &sw_read_pch_mailbox_info_i, ++ .reset = &sw_pch_mailbox_descriptor_reset_func_i, ++ /* Other fields are don't care (will be set to NULL) */ ++ }, + [SW_IO_MAILBOX] = { +- .name = "MAILBOX", +- .init = &sw_mailbox_descriptor_init_func_i, +- .read = &sw_read_mailbox_info_i, +- .write = &sw_write_mailbox_info_i, +- .reset = &sw_mailbox_descriptor_reset_func_i, +- /* Other fields are don't care (will be set to NULL) */ +- }, ++ .name = "MAILBOX", ++ .init = &sw_mailbox_descriptor_init_func_i, ++ .read = &sw_read_mailbox_info_i, ++ .write = &sw_write_mailbox_info_i, ++ .reset = &sw_mailbox_descriptor_reset_func_i, ++ /* Other fields are don't care (will be set to NULL) */ ++ }, + [SW_IO_MAX] = { +- .name = NULL, +- /* Other fields are don't care (will be set to NULL) */ +- } ++ .name = NULL, ++ /* Other fields are don't care (will be set to NULL) */ ++ } + }; + + /* +@@ -351,79 +309,60 @@ int sw_ipc_mmio_descriptor_init_func_i( + struct sw_driver_ipc_mmio_io_descriptor *__ipc_mmio = NULL; + u64 data_address = 0; + +- if (!descriptor) { /* Should NEVER happen */ ++ if (!descriptor) /* Should NEVER happen */ + return -PW_ERROR; +- } +- if (descriptor->collection_type == SW_IO_IPC) { ++ ++ if (descriptor->collection_type == SW_IO_IPC) + __ipc_mmio = &descriptor->ipc_descriptor; +- } else { ++ else + __ipc_mmio = &descriptor->mmio_descriptor; +- } +- pw_pr_debug("cmd = %u, sub-cmd = %u, data_addr = 0x%llx\n", +- __ipc_mmio->command, __ipc_mmio->sub_command, +- __ipc_mmio->data_address); ++ ++ pw_pr_debug("cmd = %u, sub-cmd = %u, data_addr = 0x%llx\n" ++ __ipc_mmio->command, __ipc_mmio->sub_command, ++ __ipc_mmio->data_address); + data_address = __ipc_mmio->data_address; +- /* +- * if (__ipc_mmio->command || __ipc_mmio->sub_command) { +- * __ipc_mmio->ipc_command = +- * ((pw_u32_t)__ipc_mmio->sub_command << 12) +- * | (pw_u32_t)__ipc_mmio->command; +- * } +- */ +- if (data_address) { +- __ipc_mmio->data_remapped_address = +- (pw_u64_t)(unsigned long)ioremap_nocache( +- (unsigned long)data_address, ++ ++ if (!data_address) ++ return PW_SUCCESS; ++ ++ __ipc_mmio->data_remapped_address = ++ (pw_u64_t)(unsigned long)ioremap_nocache( ++ (unsigned long)data_address, ++ descriptor->counter_size_in_bytes); ++ if ((void *)(unsigned long)__ipc_mmio->data_remapped_address == NULL) ++ return -EIO; ++ ++ pw_pr_debug("mapped addr 0x%llx\n", __ipc_mmio->data_remapped_address); ++ if ((__ipc_mmio->is_gbe) && ++ (!s_gbe_semaphore.hw_semaphore || ++ !s_gbe_semaphore.fw_semaphore) && ++ (data_address >= GBE_CTRL_OFFSET)) { ++ ++ u64 hw_addr = (data_address - GBE_CTRL_OFFSET) + EXTCNF_CTRL; ++ u64 fw_addr = (data_address - GBE_CTRL_OFFSET) + FWSM_CTRL; ++ pw_pr_debug("Initializing GBE semaphore\n"); ++ ++ s_gbe_semaphore.hw_semaphore = ++ ioremap_nocache( ++ (unsigned long)hw_addr, ++ descriptor->counter_size_in_bytes); ++ s_gbe_semaphore.fw_semaphore = ++ ioremap_nocache( ++ (unsigned long)fw_addr, + descriptor->counter_size_in_bytes); +- if ((void *)(unsigned long)__ipc_mmio->data_remapped_address == +- NULL) { ++ if (s_gbe_semaphore.hw_semaphore == NULL || ++ s_gbe_semaphore.fw_semaphore == NULL) { ++ pw_pr_error( ++ "couldn't mmap hw/fw semaphores for GBE MMIO op!\n"); + return -EIO; + } +- pw_pr_debug("mapped addr 0x%llx\n", +- __ipc_mmio->data_remapped_address); +- if (__ipc_mmio->is_gbe) { +- if (!s_gbe_semaphore.hw_semaphore || +- !s_gbe_semaphore.fw_semaphore) { +- pw_pr_debug("Initializing GBE semaphore\n"); +- if (data_address >= GBE_CTRL_OFFSET) { +- u64 hw_addr = (data_address - +- GBE_CTRL_OFFSET) + +- EXTCNF_CTRL; +- u64 fw_addr = (data_address - +- GBE_CTRL_OFFSET) + +- FWSM_CTRL; +- s_gbe_semaphore.hw_semaphore = +- ioremap_nocache( +- (unsigned long)hw_addr, +- descriptor +- ->counter_size_in_bytes); +- s_gbe_semaphore.fw_semaphore = +- ioremap_nocache( +- (unsigned long)fw_addr, +- descriptor +- ->counter_size_in_bytes); +- if (s_gbe_semaphore.hw_semaphore == +- NULL || +- s_gbe_semaphore.fw_semaphore == +- NULL) { +- pw_pr_error( +- "couldn't mmap hw/fw semaphores for GBE MMIO op!\n"); +- return -EIO; +- } +- pw_pr_debug( +- "GBE has hw_sem = 0x%llx, fw_sem = 0x%llx, size = %u\n", +- (unsigned long long) +- s_gbe_semaphore +- .hw_semaphore, +- (unsigned long long) +- s_gbe_semaphore +- .fw_semaphore, +- descriptor +- ->counter_size_in_bytes); +- } +- } +- } ++ pw_pr_debug( ++ "GBE has hw_sem = 0x%llx, fw_sem = 0x%llx, size = %u\n", ++ (unsigned long long)s_gbe_semaphore.hw_semaphore, ++ (unsigned long long)s_gbe_semaphore.fw_semaphore, ++ descriptor->counter_size_in_bytes); + } ++ + return PW_SUCCESS; + } + +@@ -433,23 +372,23 @@ int sw_pch_mailbox_descriptor_init_func_i( + /* Perform any required 'io_remap' calls here */ + struct sw_driver_pch_mailbox_io_descriptor *__pch_mailbox = NULL; + +- if (!descriptor) { /* Should NEVER happen */ ++ if (!descriptor) /* Should NEVER happen */ + return -PW_ERROR; +- } ++ + __pch_mailbox = &descriptor->pch_mailbox_descriptor; + pw_pr_debug("pch_mailbox data_addr = 0x%llx\n", +- (unsigned long long)__pch_mailbox->data_address); ++ (unsigned long long)__pch_mailbox->data_address); + if (__pch_mailbox->mtpmc_address) { + __pch_mailbox->mtpmc_remapped_address = + (pw_u64_t)(unsigned long)ioremap_nocache( + (unsigned long)__pch_mailbox->mtpmc_address, + descriptor->counter_size_in_bytes); + if ((void *)(unsigned long) +- __pch_mailbox->mtpmc_remapped_address == NULL) { ++ __pch_mailbox->mtpmc_remapped_address == NULL) + return -PW_ERROR; +- } ++ + pw_pr_debug("mtpmc_mapped addr 0x%llx\n", +- __pch_mailbox->mtpmc_remapped_address); ++ __pch_mailbox->mtpmc_remapped_address); + } + if (__pch_mailbox->msg_full_sts_address) { + __pch_mailbox->msg_full_sts_remapped_address = +@@ -457,12 +396,12 @@ int sw_pch_mailbox_descriptor_init_func_i( + (unsigned long) + __pch_mailbox->msg_full_sts_address, + descriptor->counter_size_in_bytes); +- if ((void *)(unsigned long)__pch_mailbox +- ->msg_full_sts_remapped_address == NULL) { ++ if ((void *)(unsigned long) ++ __pch_mailbox->msg_full_sts_remapped_address == NULL) + return -PW_ERROR; +- } ++ + pw_pr_debug("msg_full_sts_mapped addr 0x%llx\n", +- __pch_mailbox->msg_full_sts_address); ++ __pch_mailbox->msg_full_sts_address); + } + if (__pch_mailbox->mfpmc_address) { + __pch_mailbox->mfpmc_remapped_address = +@@ -470,24 +409,24 @@ int sw_pch_mailbox_descriptor_init_func_i( + (unsigned long)__pch_mailbox->mfpmc_address, + descriptor->counter_size_in_bytes); + if ((void *)(unsigned long) +- __pch_mailbox->mfpmc_remapped_address == NULL) { ++ __pch_mailbox->mfpmc_remapped_address == NULL) + return -PW_ERROR; +- } ++ + pw_pr_debug("mfpmc_mapped addr 0x%llx\n", +- __pch_mailbox->mfpmc_remapped_address); ++ __pch_mailbox->mfpmc_remapped_address); + } + return PW_SUCCESS; + } + +-int sw_mailbox_descriptor_init_func_i(struct sw_driver_io_descriptor +- *descriptor) ++int sw_mailbox_descriptor_init_func_i( ++ struct sw_driver_io_descriptor *descriptor) + { + /* Perform any required 'io_remap' calls here */ + struct sw_driver_mailbox_io_descriptor *__mailbox = NULL; + +- if (!descriptor) { /* Should NEVER happen */ ++ if (!descriptor) /* Should NEVER happen */ + return -PW_ERROR; +- } ++ + __mailbox = &descriptor->mailbox_descriptor; + + pw_pr_debug( +@@ -499,11 +438,10 @@ int sw_mailbox_descriptor_init_func_i(struct sw_driver_io_descriptor + if (__mailbox->interface_address) { + __mailbox->interface_remapped_address = + (pw_u64_t)(unsigned long)ioremap_nocache( +- (unsigned long) +- __mailbox->interface_address, ++ (unsigned long)__mailbox->interface_address, + descriptor->counter_size_in_bytes); +- if ((void *)(unsigned long)__mailbox +- ->interface_remapped_address == NULL) { ++ if ((void *)(unsigned long) ++ __mailbox->interface_remapped_address == NULL) { + pw_pr_error( + "Couldn't iomap interface_address = 0x%llx\n", + __mailbox->interface_address); +@@ -516,7 +454,7 @@ int sw_mailbox_descriptor_init_func_i(struct sw_driver_io_descriptor + (unsigned long)__mailbox->data_address, + descriptor->counter_size_in_bytes); + if ((void *)(unsigned long) +- __mailbox->data_remapped_address == NULL) { ++ __mailbox->data_remapped_address == NULL) { + pw_pr_error( + "Couldn't iomap data_address = 0x%llx\n", + __mailbox->data_address); +@@ -524,26 +462,29 @@ int sw_mailbox_descriptor_init_func_i(struct sw_driver_io_descriptor + } + } + pw_pr_debug("OK, mapped addr 0x%llx, 0x%llx\n", +- __mailbox->interface_remapped_address, +- __mailbox->data_remapped_address); ++ __mailbox->interface_remapped_address, ++ __mailbox->data_remapped_address); + } + return PW_SUCCESS; + } + +-void sw_read_msr_info_i(char *dst_vals, int cpu, +- const struct sw_driver_io_descriptor *descriptors, +- u16 counter_size_in_bytes) ++void sw_read_msr_info_i( ++ char *dst_vals, int cpu, ++ const struct sw_driver_io_descriptor *descriptors, ++ u16 counter_size_in_bytes) + { + u64 address = descriptors->msr_descriptor.address; + u32 l = 0, h = 0; + +- if (likely(cpu == RAW_CPU())) { +- if (rdmsr_safe((unsigned long)address, &l, &h)) { +- pw_pr_warn("Failed to read MSR address = 0x%llx\n", address); +- } +- } else { +- if (rdmsr_safe_on_cpu(cpu, (unsigned long)address, &l, &h)) { +- pw_pr_warn("Failed to read MSR address = 0x%llx\n", address); ++ if (likely(cpu == RAW_CPU())) ++ rdmsr_safe((unsigned long)address, &l, &h); ++ else { ++ if (rdmsr_safe_on_cpu( ++ cpu, (unsigned long)address, &l, &h)) { ++ pw_pr_warn( ++ "Failed to read MSR address = 0x%llx\n", ++ address); ++ l = 0; h = 0; + } + } + switch (counter_size_in_bytes) { +@@ -552,27 +493,29 @@ void sw_read_msr_info_i(char *dst_vals, int cpu, + break; + case 8: + *((u64 *)dst_vals) = ((u64)h << 32) | l; ++ pw_pr_debug( ++ "read MSR value = %llu\n", *((u64 *)dst_vals)); + break; + default: + break; + } +- return; + } + + #ifdef CONFIG_RPMSG_IPC +-#define SW_DO_IPC(cmd, sub_cmd) rpmsg_send_generic_simple_command(cmd, sub_cmd) ++ #define SW_DO_IPC(cmd, sub_cmd) rpmsg_send_generic_simple_command(cmd, sub_cmd) + #else +-#define SW_DO_IPC(cmd, sub_cmd) (-ENODEV) +-#endif /* CONFIG_RPMSG_IPC */ ++ #define SW_DO_IPC(cmd, sub_cmd) (-ENODEV) ++#endif // CONFIG_RPMSG_IPC + +-void sw_read_ipc_info_i(char *dst_vals, int cpu, +- const struct sw_driver_io_descriptor *descriptors, +- u16 counter_size_in_bytes) ++void sw_read_ipc_info_i( ++ char *dst_vals, int cpu, ++ const struct sw_driver_io_descriptor *descriptors, ++ u16 counter_size_in_bytes) + { +- u16 cmd = descriptors->ipc_descriptor.command, +- sub_cmd = descriptors->ipc_descriptor.sub_command; +- unsigned long remapped_address = +- (unsigned long)descriptors->ipc_descriptor.data_remapped_address; ++ u16 cmd = descriptors->ipc_descriptor.command; ++ u16 sub_cmd = descriptors->ipc_descriptor.sub_command; ++ unsigned long remapped_address = (unsigned long) ++ descriptors->ipc_descriptor.data_remapped_address; + + if (cmd || sub_cmd) { + pw_pr_debug("EXECUTING IPC Cmd = %u, %u\n", cmd, sub_cmd); +@@ -583,23 +526,21 @@ void sw_read_ipc_info_i(char *dst_vals, int cpu, + } + + if (remapped_address) { +- /* memcpy(&value, (void *)remapped_address, counter_size_in_bytes); */ + pw_pr_debug("COPYING MMIO size %u\n", counter_size_in_bytes); + memcpy(dst_vals, (void *)remapped_address, +- counter_size_in_bytes); ++ counter_size_in_bytes); + } + pw_pr_debug("Value = %llu\n", *((u64 *)dst_vals)); + } + +-static void +-sw_read_gbe_mmio_info_i(char *dst_vals, +- const struct sw_driver_io_descriptor *descriptors, +- u16 counter_size_in_bytes) ++static void sw_read_gbe_mmio_info_i( ++ char *dst_vals, ++ const struct sw_driver_io_descriptor *descriptors, ++ u16 counter_size_in_bytes) + { + u32 hw_val = 0, fw_val = 0; +- unsigned long remapped_address = +- (unsigned long) +- descriptors->mmio_descriptor.data_remapped_address; ++ unsigned long remapped_address = (unsigned long) ++ descriptors->mmio_descriptor.data_remapped_address; + u64 write_value = descriptors->write_value; + + memset(dst_vals, 0, counter_size_in_bytes); +@@ -608,56 +549,52 @@ sw_read_gbe_mmio_info_i(char *dst_vals, + "hw_sem = 0x%llx, fw_sem = 0x%llx, addr = 0x%lx, dst_vals = 0x%lx, size = %u\n", + (unsigned long long)s_gbe_semaphore.hw_semaphore, + (unsigned long long)s_gbe_semaphore.fw_semaphore, +- remapped_address, (unsigned long)dst_vals, ++ remapped_address, ++ (unsigned long)dst_vals, + counter_size_in_bytes); + if (!s_gbe_semaphore.hw_semaphore || !s_gbe_semaphore.fw_semaphore || +- !remapped_address) { ++ !remapped_address) + return; +- } + + memcpy_fromio(&hw_val, s_gbe_semaphore.hw_semaphore, sizeof(hw_val)); + memcpy_fromio(&fw_val, s_gbe_semaphore.fw_semaphore, sizeof(fw_val)); +- pw_pr_debug("HW_VAL = 0x%lx, FW_VAL = 0x%lx\n", (unsigned long)hw_val, +- (unsigned long)fw_val); ++ pw_pr_debug("HW_VAL = 0x%lx, FW_VAL = 0x%lx\n", ++ (unsigned long)hw_val, (unsigned long)fw_val); + if (!IS_HW_SEMAPHORE_SET(hw_val) && !IS_FW_SEMAPHORE_SET(fw_val)) { + memcpy_toio((volatile void __iomem *)remapped_address, +- &write_value, +- 4 /* counter_size_in_bytes*/); +- memcpy_fromio(dst_vals, +- (volatile void __iomem *)remapped_address, +- counter_size_in_bytes); ++ &write_value, 4 /* counter_size_in_bytes*/); ++ memcpy_fromio(dst_vals, (volatile void __iomem *)remapped_address, ++ counter_size_in_bytes); + } + } + void sw_read_mmio_info_i(char *dst_vals, int cpu, +- const struct sw_driver_io_descriptor *descriptors, +- u16 counter_size_in_bytes) ++ const struct sw_driver_io_descriptor *descriptors, ++ u16 counter_size_in_bytes) + { + unsigned long remapped_address = +- (unsigned long) +- descriptors->mmio_descriptor.data_remapped_address; +- if (descriptors->mmio_descriptor.is_gbe) { +- /* MMIO for GBE requires a mailbox-like operation */ +- sw_read_gbe_mmio_info_i(dst_vals, descriptors, +- counter_size_in_bytes); +- } else { +- if (remapped_address) { +- memcpy_fromio(dst_vals, +- (volatile void __iomem *)remapped_address, +- counter_size_in_bytes); +- } ++ (unsigned long)descriptors->mmio_descriptor.data_remapped_address; ++ ++ /* MMIO for GBE requires a mailbox-like operation */ ++ if (descriptors->mmio_descriptor.is_gbe) ++ sw_read_gbe_mmio_info_i(dst_vals, descriptors, counter_size_in_bytes); ++ else { ++ if (remapped_address) ++ memcpy_fromio(dst_vals, (volatile void __iomem *)remapped_address, ++ counter_size_in_bytes); + } + pw_pr_debug("Value = %llu\n", *((u64 *)dst_vals)); + } + +-void sw_read_pch_mailbox_info_i(char *dst_vals, int cpu, +- const struct sw_driver_io_descriptor +- *descriptor, u16 counter_size_in_bytes) ++void sw_read_pch_mailbox_info_i( ++ char *dst_vals, int cpu, ++ const struct sw_driver_io_descriptor *descriptor, ++ u16 counter_size_in_bytes) + { + /* + * TODO: spinlock? + */ + const struct sw_driver_pch_mailbox_io_descriptor *pch_mailbox = +- &descriptor->pch_mailbox_descriptor; ++ &descriptor->pch_mailbox_descriptor; + u32 address = pch_mailbox->data_address; + u64 mtpmc_remapped_address = pch_mailbox->mtpmc_remapped_address; + u64 msg_full_sts_remapped_address = +@@ -665,33 +602,31 @@ void sw_read_pch_mailbox_info_i(char *dst_vals, int cpu, + u64 mfpmc_remapped_address = pch_mailbox->mfpmc_remapped_address; + + /* +- * write address of desired device counter to request +- * from PMC (shift and add 2 to format device offset) ++ * write address of desired device counter to request from PMC ++ * (shift and add 2 to format device offset) + */ + if (mtpmc_remapped_address) { + int iter = 0; + u32 written_val = 0; +- u32 write_value = +- (address << 16) + +- 2; /* shift and add 2 to format device offset */ ++ /* shift and add 2 to format device offset */ ++ u32 write_value = (address << 16) + 2; ++ + memcpy_toio( +- (volatile void __iomem *) +- (unsigned long)mtpmc_remapped_address, +- &write_value, 4 /*counter_size_in_bytes*/); ++ (volatile void __iomem *)(unsigned long)mtpmc_remapped_address, ++ &write_value, 4 /*counter_size_in_bytes*/); + /* + * Check if address has been written using a while loop in +- * order to wait for the PMC to consume that address +- * and to introduce sufficient delay so that the message full +- * status bit has time to flip. This should ensure +- * all is ready when begin the wait loop for it to turn 0, +- * which indicates the value is available to be read. ++ * order to wait for the PMC to consume that address and to ++ * introduce sufficient delay so that the message full ++ * status bit has time to flip. This should ensure all is ++ * ready when begin the wait loop for it to turn 0, which ++ * indicates the value is available to be read. + * (This fixes problem where values being read were huge.) + */ + do { + memcpy_fromio(&written_val, +- (volatile void __iomem *)(unsigned long) +- mtpmc_remapped_address, +- 4 /*counter_size_in_bytes*/); ++ (volatile void __iomem *)(unsigned long)mtpmc_remapped_address, ++ 4 /*counter_size_in_bytes*/); + pw_pr_debug( + "DEBUG: written_val = 0x%x, address = 0x%x\n", + written_val, address); +@@ -699,9 +634,10 @@ void sw_read_pch_mailbox_info_i(char *dst_vals, int cpu, + } while ((written_val >> 16) != address && ++iter < NUM_RETRY); + } + ++ + /* +- * wait for PMC to set status indicating that device counter +- * is available for read. ++ * wait for PMC to set status indicating that device ++ * counter is available for read. + */ + if (msg_full_sts_remapped_address) { + u32 status_wait = 0; +@@ -709,32 +645,31 @@ void sw_read_pch_mailbox_info_i(char *dst_vals, int cpu, + + do { + memcpy_fromio(&status_wait, +- (volatile void __iomem *)(unsigned long) +- msg_full_sts_remapped_address, +- 4 /*counter_size_in_bytes*/); +- pw_pr_debug("DEBUG: status_wait = 0x%x\n", status_wait); ++ (volatile void __iomem*)(unsigned long) ++ msg_full_sts_remapped_address, ++ 4 /*counter_size_in_bytes*/); ++ pw_pr_debug("DEBUG: status_wait = 0x%x\n", ++ status_wait); + udelay(USEC_DELAY); + } while ((status_wait & 0x01000000) >> 24 && +- ++iter < NUM_RETRY); ++ ++iter < NUM_RETRY); + } + + /* + * read device counter + */ + if (mfpmc_remapped_address) { +- memcpy_fromio( +- dst_vals, +- (volatile void __iomem *) +- (unsigned long)mfpmc_remapped_address, ++ memcpy_fromio(dst_vals, ++ (volatile void __iomem*)(unsigned long)mfpmc_remapped_address, + 4 /*counter_size_in_bytes*/); + pw_pr_debug("DEBUG: read value = 0x%x\n", +- *((pw_u32_t *)dst_vals)); ++ *((pw_u32_t *)dst_vals)); + } + } + + void sw_read_mailbox_info_i(char *dst_vals, int cpu, +- const struct sw_driver_io_descriptor *descriptor, +- u16 counter_size_in_bytes) ++ const struct sw_driver_io_descriptor *descriptor, ++ u16 counter_size_in_bytes) + { + /* + * TODO: spinlock? +@@ -742,156 +677,152 @@ void sw_read_mailbox_info_i(char *dst_vals, int cpu, + const struct sw_driver_mailbox_io_descriptor *mailbox = + &descriptor->mailbox_descriptor; + unsigned long interface_address = mailbox->interface_address; +- unsigned long interface_remapped_address = +- mailbox->interface_remapped_address; ++ unsigned long interface_remapped_address = mailbox->interface_remapped_address; + unsigned long data_address = mailbox->data_address; + size_t iter = 0; + + if (mailbox->is_msr_type) { + u64 command = 0; + +- if (rdmsrl_safe(interface_address, &command)) { +- pw_pr_warn("Failed to read MSR address = 0x%llx\n", +- interface_address); +- } ++ rdmsrl_safe(interface_address, &command); + command &= mailbox->command_mask; + command |= mailbox->command | (u64)0x1 << mailbox->run_busy_bit; + wrmsrl_safe(interface_address, command); + do { + udelay(1); +- if (rdmsrl_safe(interface_address, &command)) { +- pw_pr_warn("Failed to read MSR address = 0x%llx\n", +- interface_address); +- } ++ rdmsrl_safe(interface_address, &command); + } while ((command & ((u64)0x1 << mailbox->run_busy_bit)) && +- ++iter < MAX_MAILBOX_ITERS); ++ ++iter < MAX_MAILBOX_ITERS); + if (iter >= MAX_MAILBOX_ITERS) { + pw_pr_error("Couldn't write to BIOS mailbox\n"); +- command = 0; +- } else { +- if (rdmsrl_safe(data_address, &command)) { +- pw_pr_warn("Failed to read MSR address = 0x%llx\n", +- data_address); +- } ++ command = MAX_UNSIGNED_64_BIT_VALUE; ++ } else ++ rdmsrl_safe(data_address, &command); ++ switch (counter_size_in_bytes) { ++ case 4: ++ *((u32 *)dst_vals) = (u32)command; ++ break; ++ case 8: ++ *((u64 *)dst_vals) = command; ++ break; ++ default: ++ pw_pr_error("Invalid counter size %u, assuming 4 bytes!\n", counter_size_in_bytes); ++ *((u32 *)dst_vals) = (u32)command; ++ break; + } +- *((u64 *)dst_vals) = command; +- } else { ++ } else { + u32 command = 0; +- const size_t counter_size = +- 4; /* Always use 4 bytes, regardless of +- *'counter_size_in_bytes' +- */ ++ /* Always use 4 bytes, regardless of 'counter_size_in_bytes' */ ++ const size_t counter_size = 4; ++ + memcpy_fromio(&command, +- (volatile void __iomem *)(unsigned long) +- interface_remapped_address, +- sizeof(command)); ++ (volatile void __iomem *)(unsigned long)interface_remapped_address, ++ sizeof(command)); + command &= mailbox->command_mask; + command |= (u32)mailbox->command | +- (u32)0x1 << mailbox->run_busy_bit; +- memcpy_toio((volatile void __iomem *)(unsigned long) +- interface_remapped_address, +- &command, sizeof(command)); ++ (u32)0x1 << mailbox->run_busy_bit; ++ memcpy_toio((volatile void __iomem *)(unsigned long)interface_remapped_address, ++ &command, sizeof(command)); + do { + udelay(1); + memcpy_fromio(&command, +- (volatile void __iomem *)(unsigned long) +- interface_remapped_address, +- sizeof(command)); ++ (volatile void __iomem *)(unsigned long)interface_remapped_address, ++ sizeof(command)); + } while ((command & ((u32)0x1 << mailbox->run_busy_bit)) && +- ++iter < MAX_MAILBOX_ITERS); ++ ++iter < MAX_MAILBOX_ITERS); + if (iter >= MAX_MAILBOX_ITERS) { + pw_pr_error("Couldn't write to BIOS mailbox\n"); +- command = 0; +- } else { ++ command = MAX_UNSIGNED_32_BIT_VALUE; ++ } else + memcpy_fromio(&command, +- (volatile void __iomem *)(unsigned long) +- mailbox->data_remapped_address, +- counter_size); +- } ++ (volatile void __iomem *)(unsigned long)mailbox->data_remapped_address, ++ counter_size); ++ + *((u32 *)dst_vals) = command; + } + } + + void sw_read_pci_info_i(char *dst_vals, int cpu, +- const struct sw_driver_io_descriptor *descriptors, +- u16 counter_size_in_bytes) ++ const struct sw_driver_io_descriptor *descriptors, ++ u16 counter_size_in_bytes) + { +- u32 bus = descriptors->pci_descriptor.bus, +- device = descriptors->pci_descriptor.device; +- u32 function = descriptors->pci_descriptor.function, +- offset = descriptors->pci_descriptor.offset; ++ u32 bus = descriptors->pci_descriptor.bus; ++ u32 device = descriptors->pci_descriptor.device; ++ u32 function = descriptors->pci_descriptor.function; ++ u32 offset = descriptors->pci_descriptor.offset; + u32 data32 = 0; + u64 data64 = 0; + + switch (counter_size_in_bytes) { + case 4: + data32 = sw_platform_pci_read32(bus, device, function, +- 0 /* CTRL-OFFSET */, +- 0 /* CTRL-DATA, don't care */, +- offset /* DATA-OFFSET */); ++ 0 /* CTRL-OFFSET */, 0 /* CTRL-DATA, don't care */, ++ offset /* DATA-OFFSET */); + *((u32 *)dst_vals) = data32; + break; + case 8: + data64 = sw_platform_pci_read64(bus, device, function, +- 0 /* CTRL-OFFSET */, +- 0 /* CTRL-DATA, don't care */, +- offset /* DATA-OFFSET */); ++ 0 /* CTRL-OFFSET */, 0 /* CTRL-DATA, don't care */, ++ offset /* DATA-OFFSET */); + *((u64 *)dst_vals) = data64; + break; + default: + pw_pr_error("ERROR: invalid read size = %u\n", +- counter_size_in_bytes); +- return; ++ counter_size_in_bytes); + } +- return; + } + void sw_read_configdb_info_i(char *dst_vals, int cpu, +- const struct sw_driver_io_descriptor *descriptors, +- u16 counter_size_in_bytes) ++ const struct sw_driver_io_descriptor *descriptors, ++ u16 counter_size_in_bytes) + { +- { +- pw_u32_t address = descriptors->configdb_descriptor.address; +- u32 data = sw_platform_configdb_read32(address); ++ pw_u32_t address = descriptors->configdb_descriptor.address; ++ u32 data = sw_platform_configdb_read32(address); + +- pw_pr_debug( +- "ADDRESS = 0x%x, CPU = %d, dst_vals = %p, counter size = %u, data = %u\n", +- address, cpu, dst_vals, counter_size_in_bytes, data); +- /* +- * 'counter_size_in_bytes' is ignored, for now. +- */ +- *((u32 *)dst_vals) = data; +- } +- return; ++ pw_pr_debug( ++ "ADDRESS = 0x%x, CPU = %d, dst_vals = %p, counter size = %u, data = %u\n", ++ address, cpu, dst_vals, counter_size_in_bytes, data); ++ /* ++ * 'counter_size_in_bytes' is ignored, for now. ++ */ ++ *((u32 *)dst_vals) = data; + } + void sw_read_socperf_info_i(char *dst_vals, int cpu, +- const struct sw_driver_io_descriptor *descriptors, +- u16 counter_size_in_bytes) ++ const struct sw_driver_io_descriptor *descriptors, ++ u16 counter_size_in_bytes) + { +-#if IS_ENABLED(CONFIG_INTEL_SOCPERF) + u64 *socperf_buffer = (u64 *)dst_vals; + + memset(socperf_buffer, 0, counter_size_in_bytes); +- SOCPERF_Read_Data2(socperf_buffer); +-#endif /* IS_ENABLED(CONFIG_INTEL_SOCPERF) */ +- return; ++ SOCPERF_Read_Data3(socperf_buffer); ++ + } + + /** + * Decide if the socperf interface is available for use +- * @returns true if available ++ * @returns true if available + */ + bool sw_socperf_available_i(void) + { + bool retVal = false; +-#if IS_ENABLED(CONFIG_INTEL_SOCPERF) +- retVal = true; +-#endif /* IS_ENABLED(CONFIG_INTEL_SOCPERF) */ ++ ++ /* The symbol below is weak. We return 1 if we have a definition ++ * for this socperf-driver-supplied symbol, or 0 if only the ++ * weak definition exists. This test will suffice to detect if ++ * the socperf driver is loaded. ++ */ ++ if (SOCPERF_Read_Data3 != 0) { ++ pw_pr_debug("INFO: SoCPerf support in ON!\n"); ++ retVal = true; ++ } else ++ pw_pr_debug("INFO: SoCPerf support is OFF!\n"); ++ + return retVal; + } + ++ + /** + * sw_platform_configdb_read32 - for reading PCI space through config registers +- * of the platform. ++ * of the platform. + * @address: An address in the PCI space + * + * Returns: the value read from address. +@@ -900,13 +831,10 @@ u32 sw_platform_configdb_read32(u32 address) + { + u32 read_value = 0; + #if DO_DIRECT_PCI_READ_WRITE +- read_value = +- sw_platform_pci_read32(0 /*bus*/, +- 0 /*device*/, +- 0 /*function*/, +- SW_PCI_MSG_CTRL_REG /*ctrl-offset*/, +- address /*ctrl-value*/, +- SW_PCI_MSG_DATA_REG /*data-offset*/); ++ read_value = sw_platform_pci_read32( ++ 0/*bus*/, 0/*device*/, 0/*function*/, ++ SW_PCI_MSG_CTRL_REG/*ctrl-offset*/, address/*ctrl-value*/, ++ SW_PCI_MSG_DATA_REG/*data-offset*/); + #else /* !DO_DIRECT_PCI_READ_WRITE */ + read_value = intel_mid_msgbus_read32_raw(address); + #endif /* if DO_DIRECT_PCI_READ_WRITE */ +@@ -914,43 +842,47 @@ u32 sw_platform_configdb_read32(u32 address) + return read_value; + } + +-u32 sw_platform_pci_read32(u32 bus, u32 device, u32 function, u32 write_offset, +- u32 write_value, u32 read_offset) ++u32 sw_platform_pci_read32(u32 bus, u32 device, u32 function, ++ u32 write_offset, u32 write_value, u32 read_offset) + { + u32 read_value = 0; +- struct pci_dev *pci_root = pci_get_domain_bus_and_slot( +- 0, bus, PCI_DEVFN(device, function)); /* 0, PCI_DEVFN(0, 0)); */ +- if (!pci_root) { ++ struct pci_dev *pci_root = ++ pci_get_domain_bus_and_slot(0, bus, ++ /* 0, PCI_DEVFN(0, 0)); */ ++ PCI_DEVFN(device, function)); ++ ++ if (!pci_root) + return 0; /* Application will verify the data */ +- } +- if (write_offset) { +- pci_write_config_dword( +- pci_root, write_offset, +- write_value); /* SW_PCI_MSG_CTRL_REG, address); */ +- } +- pci_read_config_dword( +- pci_root, read_offset, +- &read_value); /* SW_PCI_MSG_DATA_REG, &read_value); */ ++ ++ if (write_offset) ++ pci_write_config_dword(pci_root, ++ /* SW_PCI_MSG_CTRL_REG, address); */ ++ write_offset, write_value); ++ ++ pci_read_config_dword(pci_root, ++ /* SW_PCI_MSG_DATA_REG, &read_value); */ ++ read_offset, &read_value); + return read_value; + } + + u64 sw_platform_pci_read64(u32 bus, u32 device, u32 function, u32 write_offset, +- u32 write_value, u32 read_offset) ++ u32 write_value, u32 read_offset) + { +- u32 lo = sw_platform_pci_read32(bus, device, function, +- 0 /* CTRL-OFFSET */, +- 0 /* CTRL-DATA, don't care */, +- read_offset /* DATA-OFFSET */); +- u32 hi = sw_platform_pci_read32(bus, device, function, +- 0 /* CTRL-OFFSET */, +- 0 /* CTRL-DATA, don't care */, +- read_offset + 4 /* DATA-OFFSET */); ++ u32 lo = sw_platform_pci_read32( ++ bus, device, function, 0 /* CTRL-OFFSET */, ++ 0 /* CTRL-DATA, don't care */, ++ read_offset /* DATA-OFFSET */); ++ u32 hi = sw_platform_pci_read32( ++ bus, device, function, 0 /* CTRL-OFFSET */, ++ 0 /* CTRL-DATA, don't care */, ++ read_offset + 4 /* DATA-OFFSET */); ++ + return ((u64)hi << 32) | lo; + } + + void sw_write_msr_info_i(char *dst_vals, int cpu, +- const struct sw_driver_io_descriptor *descriptor, +- u16 counter_size_in_bytes) ++ const struct sw_driver_io_descriptor *descriptor, ++ u16 counter_size_in_bytes) + { + u64 write_value = descriptor->write_value; + u64 address = descriptor->msr_descriptor.address; +@@ -958,45 +890,43 @@ void sw_write_msr_info_i(char *dst_vals, int cpu, + pw_pr_debug( + "ADDRESS = 0x%llx, CPU = %d, counter size = %u, value = %llu\n", + address, cpu, counter_size_in_bytes, write_value); +- if (likely(cpu == RAW_CPU())) { ++ if (likely(cpu == RAW_CPU())) + wrmsrl_safe((unsigned long)address, write_value); +- } else { +- u32 l = write_value & 0xffffffff, +- h = (write_value >> 32) & 0xffffffff; ++ else { ++ u32 l = write_value & 0xffffffff; ++ u32 h = (write_value >> 32) & 0xffffffff; ++ + wrmsr_safe_on_cpu(cpu, (u32)address, l, h); + } +- return; + }; + + void sw_write_mmio_info_i(char *dst_vals, int cpu, +- const struct sw_driver_io_descriptor *descriptor, +- u16 counter_size_in_bytes) ++ const struct sw_driver_io_descriptor *descriptor, ++ u16 counter_size_in_bytes) + { +- unsigned long remapped_address = +- (unsigned long) ++ unsigned long remapped_address = (unsigned long) + descriptor->mmio_descriptor.data_remapped_address; + u64 write_value = descriptor->write_value; + +- if (remapped_address) { +- memcpy_toio((volatile void __iomem *)remapped_address, +- &write_value, +- counter_size_in_bytes); +- } ++ if (remapped_address) ++ memcpy_toio((volatile void __iomem *)remapped_address, &write_value, ++ counter_size_in_bytes); ++ + pw_pr_debug("Value = %llu\n", *((u64 *)dst_vals)); + }; + + void sw_write_mailbox_info_i(char *dst_vals, int cpu, +- const struct sw_driver_io_descriptor *descriptor, +- u16 counter_size_in_bytes) ++ const struct sw_driver_io_descriptor *descriptor, ++ u16 counter_size_in_bytes) + { + /* + * TODO: spinlock? + */ + const struct sw_driver_mailbox_io_descriptor *mailbox = +- &descriptor->mailbox_descriptor; ++ &descriptor->mailbox_descriptor; + unsigned long interface_address = mailbox->interface_address; + unsigned long interface_remapped_address = +- mailbox->interface_remapped_address; ++ mailbox->interface_remapped_address; + unsigned long data_address = mailbox->data_address; + u64 data = descriptor->write_value; + size_t iter = 0; +@@ -1004,82 +934,73 @@ void sw_write_mailbox_info_i(char *dst_vals, int cpu, + if (mailbox->is_msr_type) { + u64 command = 0; + +- if (rdmsrl_safe(interface_address, &command)) { +- pw_pr_warn("Failed to read MSR address = 0x%llx\n", +- interface_address); +- } ++ rdmsrl_safe(interface_address, &command); + command &= mailbox->command_mask; +- command |= mailbox->command | (u64)0x1 << mailbox->run_busy_bit; ++ command |= mailbox->command | ++ (u64)0x1 << mailbox->run_busy_bit; + wrmsrl_safe(data_address, data); + wrmsrl_safe(interface_address, command); + do { +- if (rdmsrl_safe(interface_address, &command)) { +- pw_pr_warn("Failed to read MSR address = 0x%llx\n", +- interface_address); +- } ++ rdmsrl_safe(interface_address, &command); + } while ((command & ((u64)0x1 << mailbox->run_busy_bit)) && +- ++iter < MAX_MAILBOX_ITERS); ++ ++iter < MAX_MAILBOX_ITERS); + } else { + u32 command = 0; + + memcpy_fromio(&command, +- (volatile void __iomem *)(unsigned long) +- interface_remapped_address, +- sizeof(command)); ++ (volatile void __iomem *)(unsigned long)interface_remapped_address, ++ sizeof(command)); + command &= mailbox->command_mask; + command |= (u32)mailbox->command | +- (u32)0x1 << mailbox->run_busy_bit; ++ (u32)0x1 << mailbox->run_busy_bit; + memcpy_toio((volatile void __iomem *)(unsigned long) +- mailbox->data_remapped_address, +- &data, sizeof(data)); +- memcpy_toio((volatile void __iomem *)(unsigned long) +- interface_remapped_address, +- &command, sizeof(command)); ++ mailbox->data_remapped_address, ++ &data, sizeof(data)); ++ memcpy_toio((volatile void __iomem *)(unsigned long)interface_remapped_address, ++ &command, sizeof(command)); + do { +- memcpy_fromio(&command, +- (volatile void __iomem *)(unsigned long) +- interface_remapped_address, +- sizeof(command)); ++ memcpy_fromio(&command, (volatile void __iomem *)(unsigned long) ++ interface_remapped_address, sizeof(command)); + } while ((command & ((u32)0x1 << mailbox->run_busy_bit)) && +- ++iter < MAX_MAILBOX_ITERS); ++ ++iter < MAX_MAILBOX_ITERS); + } + } + + void sw_write_pci_info_i(char *dst_vals, int cpu, +- const struct sw_driver_io_descriptor *descriptor, +- u16 counter_size_in_bytes) ++ const struct sw_driver_io_descriptor *descriptor, ++ u16 counter_size_in_bytes) + { +- u32 bus = descriptor->pci_descriptor.bus, +- device = descriptor->pci_descriptor.device; +- u32 function = descriptor->pci_descriptor.function, +- offset = descriptor->pci_descriptor.offset; ++ u32 bus = descriptor->pci_descriptor.bus; ++ u32 device = descriptor->pci_descriptor.device; ++ u32 function = descriptor->pci_descriptor.function; ++ u32 offset = descriptor->pci_descriptor.offset; + u32 write_value = (u32)descriptor->write_value; + /* + * 'counter_size_in_bytes' is ignored for now. + */ + if (!sw_platform_pci_write32(bus, device, function, offset, +- write_value)) { +- pw_pr_error("ERROR writing to PCI B/D/F/O %u/%u/%u/%u\n", bus, +- device, function, offset); +- } else { +- pw_pr_debug( +- "OK, successfully wrote to PCI B/D/F/O %u/%u/%u/%u\n", ++ write_value)) ++ pw_pr_error("ERROR writing to PCI B/D/F/O %u/%u/%u/%u\n", + bus, device, function, offset); +- } +- return; ++ else ++ pw_pr_debug("OK, successfully wrote to PCI B/D/F/O %u/%u/%u/%u\n", ++ bus, device, function, offset); ++ + }; + + /* + * Write to PCI space via config registers. + */ + bool sw_platform_pci_write32(u32 bus, u32 device, u32 function, +- u32 write_offset, u32 data_value) ++ u32 write_offset, u32 data_value) + { +- struct pci_dev *pci_root = pci_get_domain_bus_and_slot( +- 0, bus, PCI_DEVFN(device, function)); /* 0, PCI_DEVFN(0, 0)); */ +- if (!pci_root) { ++ struct pci_dev *pci_root = ++ pci_get_domain_bus_and_slot(0, bus, ++ PCI_DEVFN(device, function));/* 0, PCI_DEVFN(0, 0)); */ ++ ++ if (!pci_root) + return false; +- } ++ + + pci_write_config_dword(pci_root, write_offset, data_value); + +@@ -1088,11 +1009,11 @@ bool sw_platform_pci_write32(u32 bus, u32 device, u32 function, + + int sw_print_msr_io_descriptor(const struct sw_driver_io_descriptor *descriptor) + { +- if (!descriptor) { ++ if (!descriptor) + return -PW_ERROR; +- } ++ + pw_pr_debug("MSR address = 0x%llx\n", +- descriptor->msr_descriptor.address); ++ descriptor->msr_descriptor.address); + return PW_SUCCESS; + } + +@@ -1102,34 +1023,33 @@ int sw_ipc_mmio_descriptor_reset_func_i( + /* Unmap previously mapped memory here */ + struct sw_driver_ipc_mmio_io_descriptor *__ipc_mmio = NULL; + +- if (!descriptor) { /* Should NEVER happen */ ++ if (!descriptor) /* Should NEVER happen */ + return -PW_ERROR; +- } +- if (descriptor->collection_type == SW_IO_IPC) { +- __ipc_mmio = +- (struct sw_driver_ipc_mmio_io_descriptor *)&descriptor +- ->ipc_descriptor; +- } else { +- __ipc_mmio = +- (struct sw_driver_ipc_mmio_io_descriptor *)&descriptor +- ->mmio_descriptor; +- } ++ ++ if (descriptor->collection_type == SW_IO_IPC) ++ __ipc_mmio = (struct sw_driver_ipc_mmio_io_descriptor *) ++ &descriptor->ipc_descriptor; ++ else ++ __ipc_mmio = (struct sw_driver_ipc_mmio_io_descriptor *) ++ &descriptor->mmio_descriptor; ++ + if (__ipc_mmio->data_remapped_address) { + pw_pr_debug("unmapping addr 0x%llx\n", +- __ipc_mmio->data_remapped_address); ++ __ipc_mmio->data_remapped_address); + iounmap((volatile void __iomem *)(unsigned long) +- __ipc_mmio->data_remapped_address); ++ __ipc_mmio->data_remapped_address); + __ipc_mmio->data_remapped_address = 0; + } + /* Uninitialize the GBE, if it wasn't already done */ +- if (s_gbe_semaphore.hw_semaphore || s_gbe_semaphore.fw_semaphore) { ++ if (s_gbe_semaphore.hw_semaphore || ++ s_gbe_semaphore.fw_semaphore) { + pw_pr_debug("Uninitializing gbe!\n"); +- if (s_gbe_semaphore.hw_semaphore) { ++ if (s_gbe_semaphore.hw_semaphore) + iounmap(s_gbe_semaphore.hw_semaphore); +- } +- if (s_gbe_semaphore.fw_semaphore) { ++ ++ if (s_gbe_semaphore.fw_semaphore) + iounmap(s_gbe_semaphore.fw_semaphore); +- } ++ + memset(&s_gbe_semaphore, 0, sizeof(s_gbe_semaphore)); + } + return PW_SUCCESS; +@@ -1141,31 +1061,30 @@ int sw_pch_mailbox_descriptor_reset_func_i( + /* Unmap previously mapped memory here */ + struct sw_driver_pch_mailbox_io_descriptor *__pch_mailbox = NULL; + +- if (!descriptor) { /* Should NEVER happen */ ++ if (!descriptor) /* Should NEVER happen */ + return -PW_ERROR; +- } +- __pch_mailbox = +- (struct sw_driver_pch_mailbox_io_descriptor *)&descriptor +- ->pch_mailbox_descriptor; ++ ++ __pch_mailbox = (struct sw_driver_pch_mailbox_io_descriptor *) ++ &descriptor->pch_mailbox_descriptor; + if (__pch_mailbox->mtpmc_remapped_address) { + pw_pr_debug("unmapping addr 0x%llx\n", +- __pch_mailbox->mtpmc_remapped_address); ++ __pch_mailbox->mtpmc_remapped_address); + iounmap((volatile void __iomem *)(unsigned long) +- __pch_mailbox->mtpmc_remapped_address); ++ __pch_mailbox->mtpmc_remapped_address); + __pch_mailbox->mtpmc_remapped_address = 0; + } + if (__pch_mailbox->msg_full_sts_remapped_address) { + pw_pr_debug("unmapping addr 0x%llx\n", +- __pch_mailbox->msg_full_sts_remapped_address); ++ __pch_mailbox->msg_full_sts_remapped_address); + iounmap((volatile void __iomem *)(unsigned long) +- __pch_mailbox->msg_full_sts_remapped_address); ++ __pch_mailbox->msg_full_sts_remapped_address); + __pch_mailbox->msg_full_sts_remapped_address = 0; + } + if (__pch_mailbox->mfpmc_remapped_address) { + pw_pr_debug("unmapping addr 0x%llx\n", +- __pch_mailbox->mfpmc_remapped_address); ++ __pch_mailbox->mfpmc_remapped_address); + iounmap((volatile void __iomem *)(unsigned long) +- __pch_mailbox->mfpmc_remapped_address); ++ __pch_mailbox->mfpmc_remapped_address); + __pch_mailbox->mfpmc_remapped_address = 0; + } + return PW_SUCCESS; +@@ -1177,24 +1096,24 @@ int sw_mailbox_descriptor_reset_func_i( + /* Unmap previously mapped memory here */ + struct sw_driver_mailbox_io_descriptor *__mailbox = NULL; + +- if (!descriptor) { /* Should NEVER happen */ ++ if (!descriptor) /* Should NEVER happen */ + return -PW_ERROR; +- } +- __mailbox = (struct sw_driver_mailbox_io_descriptor *)&descriptor +- ->mailbox_descriptor; ++ ++ __mailbox = (struct sw_driver_mailbox_io_descriptor *) ++ &descriptor->mailbox_descriptor; + if (!__mailbox->is_msr_type) { + if (__mailbox->interface_remapped_address) { + pw_pr_debug("unmapping addr 0x%llx\n", +- __mailbox->interface_remapped_address); ++ __mailbox->interface_remapped_address); + iounmap((volatile void __iomem *)(unsigned long) +- __mailbox->interface_remapped_address); ++ __mailbox->interface_remapped_address); + __mailbox->interface_remapped_address = 0; + } + if (__mailbox->data_remapped_address) { + pw_pr_debug("unmapping addr 0x%llx\n", +- __mailbox->data_remapped_address); ++ __mailbox->data_remapped_address); + iounmap((volatile void __iomem *)(unsigned long) +- __mailbox->data_remapped_address); ++ __mailbox->data_remapped_address); + __mailbox->data_remapped_address = 0; + } + } +@@ -1202,19 +1121,18 @@ int sw_mailbox_descriptor_reset_func_i( + } + + #define NUM_HW_OPS SW_ARRAY_SIZE(s_hw_ops) +-#define FOR_EACH_HW_OP(idx, op) \ +- for (idx = 0; idx < NUM_HW_OPS && (op = &s_hw_ops[idx]); ++idx) ++#define FOR_EACH_HW_OP(idx, op) \ ++ for (idx = 0; idx < NUM_HW_OPS && (op = &s_hw_ops[idx]); ++idx) + + int sw_register_ops_providers(void) + { + size_t idx = 0; + const struct sw_hw_ops *op = NULL; + +- FOR_EACH_HW_OP(idx, op) +- { ++ FOR_EACH_HW_OP(idx, op) { + if (op->name && sw_register_hw_op(op)) { +- pw_pr_error("ERROR registering provider %s\n", +- op->name); ++ pw_pr_error( ++ "ERROR registering provider %s\n", op->name); + return -EIO; + } + } +diff --git a/drivers/platform/x86/socwatch/sw_output_buffer.c b/drivers/platform/x86/socwatch/sw_output_buffer.c +index a0c1c5fedd05..eaccc29f18ea 100644 +--- a/drivers/platform/x86/socwatch/sw_output_buffer.c ++++ b/drivers/platform/x86/socwatch/sw_output_buffer.c +@@ -1,58 +1,57 @@ +-/* +- +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-*/ ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + + #include "sw_internal.h" + #include "sw_output_buffer.h" +@@ -84,25 +83,19 @@ + * How much space is available in a given segment? + */ + #define EMPTY_TSC ((u64)-1) +-#define SEG_IS_FULL(seg) \ +- ({ \ +- bool __full = false; \ +- smp_mb(); \ +- __full = ((seg)->is_full != EMPTY_TSC); \ +- __full; \ +- }) +-#define SEG_SET_FULL(seg, tsc) \ +- do { \ +- (seg)->is_full = (tsc); \ +- smp_mb(); \ +- } while (0) +-#define SEG_SET_EMPTY(seg) \ +- do { \ +- barrier(); \ +- (seg)->bytes_written = 0; \ +- SEG_SET_FULL(seg, EMPTY_TSC); \ +- /*smp_mb(); */ \ +- } while (0) ++#define SEG_IS_FULL(seg) ({bool __full = false; \ ++ smp_mb(); /* memory access ordering */\ ++ __full = ((seg)->is_full != EMPTY_TSC); \ ++ __full; }) ++#define SEG_SET_FULL(seg, tsc) do { \ ++ (seg)->is_full = (tsc); \ ++ smp_mb(); /* memory access ordering */\ ++} while (0) ++#define SEG_SET_EMPTY(seg) do { \ ++ barrier(); \ ++ (seg)->bytes_written = 0; \ ++ SEG_SET_FULL(seg, EMPTY_TSC); \ ++} while (0) + #define SPACE_AVAIL(seg) (SW_SEG_DATA_SIZE - (seg)->bytes_written) + #define SEG_IS_EMPTY(seg) (SPACE_AVAIL(seg) == SW_SEG_DATA_SIZE) + +@@ -111,9 +104,10 @@ + * Convenience macro: iterate over each segment in a per-cpu output buffer. + */ + #define for_each_segment(i) for (i = 0; i < NUM_SEGS_PER_BUFFER; ++i) +-#define for_each_seg(buffer, seg) \ +- for (int i = 0; \ +- i < NUM_SEGS_PER_BUFFER && (seg = (buffer)->segments[i]); ++i) ++#define for_each_seg(buffer, seg) \ ++ for (int i = 0; \ ++ i < NUM_SEGS_PER_BUFFER && (seg = (buffer)->segments[i]);\ ++ ++i) + /* + * How many buffers are we using? + */ +@@ -127,17 +121,15 @@ + * Local data structures. + * ------------------------------------------------- + */ +-typedef struct sw_data_buffer sw_data_buffer_t; +-typedef struct sw_output_buffer sw_output_buffer_t; + struct sw_data_buffer { + u64 is_full; + u32 bytes_written; + char *buffer; +-} __attribute__((packed)); ++} __packed; + #define SW_SEG_HEADER_SIZE() (sizeof(struct sw_data_buffer) - sizeof(char *)) + + struct sw_output_buffer { +- sw_data_buffer_t buffers[NUM_SEGS_PER_BUFFER]; ++ struct sw_data_buffer buffers[NUM_SEGS_PER_BUFFER]; + int buff_index; + u32 produced_samples; + u32 dropped_samples; +@@ -146,6 +138,24 @@ struct sw_output_buffer { + unsigned long free_pages; + } ____cacheline_aligned_in_smp; + ++/* ************************************************* ++ * For circular buffer (continuous profiling) ++ * ************************************************* ++ */ ++static char *output_buffer; ++ ++struct buffer { ++ union { ++ char *data; ++ unsigned long free_pages; ++ }; ++ size_t read_index, write_index; ++ unsigned long size; ++}; ++SW_DECLARE_RWLOCK(sw_continuous_lock); ++ ++static struct buffer buffer; /* TODO: rename */ ++ + /* ------------------------------------------------- + * Function declarations. + * ------------------------------------------------- +@@ -163,7 +173,7 @@ DECLARE_OVERHEAD_VARS(sw_produce_generic_msg_i); + /* + * Per-cpu output buffers. + */ +-static sw_output_buffer_t *per_cpu_output_buffers; ++static struct sw_output_buffer *per_cpu_output_buffers; + /* + * Variables for book keeping. + */ +@@ -183,26 +193,254 @@ unsigned long sw_buffer_alloc_size = (1 << 16); /* 64 KB */ + * ------------------------------------------------- + */ + ++/* ************************************************* ++ * For circular buffer (continuous profiling) ++ * ************************************************* ++ */ ++#define MIN(x, y) ((x) <= (y) ? (x) : (y)) ++ ++#define IS_BUFFER_EMPTY(buffer) \ ++ ((buffer).write_index == (buffer).read_index) ++#define IS_BUFFER_FULL(buffer) \ ++ ((buffer).write_index == \ ++ ((buffer).read_index + 1) & (buffer.size - 1)) ++ ++static inline size_t get_space_available(struct buffer *buffer) ++{ ++ size_t read = 0, write = 0; ++ ++ smp_mb(); /* memory access ordering */ ++ read = buffer->read_index; ++ write = buffer->write_index; ++ if (write < read) ++ return read - write; ++ ++ return (buffer->size - write) + read; ++} ++ ++static inline size_t get_data_available(struct buffer *buffer) ++{ ++ size_t read = 0, write = 0; ++ ++ smp_mb(); /* memory access ordering */ ++ read = buffer->read_index; ++ write = buffer->write_index; ++ if (read <= write) ++ return write - read; ++ ++ return (buffer->size - read) + write; ++} ++ ++static void copy_wraparound(const char *src, size_t src_size, size_t *index) ++{ ++ size_t buff_size_left = buffer.size - *index; ++ size_t to_write = MIN(buff_size_left, src_size); ++ size_t _index = *index; ++ ++ if (src_size < buff_size_left) { ++ memcpy(&buffer.data[_index], src, src_size); ++ _index += src_size; ++ } else { ++ memcpy(&buffer.data[_index], src, to_write); ++ _index = 0; ++ src += to_write; ++ to_write = src_size - to_write; ++ memcpy(&buffer.data[_index], src, to_write); ++ _index += to_write; ++ pw_pr_debug("DEBUG: wrap memcpy\n"); ++ } ++ *index = (*index + src_size) & (buffer.size - 1); ++} ++ ++static int enqueue_data(struct sw_driver_msg *msg, enum sw_wakeup_action action) ++{ ++ size_t size = SW_DRIVER_MSG_HEADER_SIZE() + msg->payload_len; ++ bool wrapped = false; ++ ++ msg->tsc = 0; ++ ++ READ_LOCK(sw_continuous_lock); ++ while (true) { ++ size_t old_write_index = buffer.write_index; ++ size_t new_write_index = (old_write_index + size) & ++ (buffer.size - 1); ++ ++ if (get_space_available(&buffer) < size) ++ break; ++ ++ if (CAS32(&buffer.write_index, old_write_index, ++ new_write_index)) { ++ msg->tsc = sw_timestamp(); ++ wrapped = new_write_index <= old_write_index; ++ /* First copy header */ ++ copy_wraparound((const char *)msg, ++ SW_DRIVER_MSG_HEADER_SIZE(), &old_write_index); ++ /* Then copy payload */ ++ copy_wraparound((const char *)msg->p_payload, ++ msg->payload_len, &old_write_index); ++ break; ++ } ++ } ++ READ_UNLOCK(sw_continuous_lock); ++ if (!msg->tsc) ++ pw_pr_error("ERROR: couldn't enqueue data\n"); ++ if (wrapped) ++ pw_pr_debug("DEBUG: wrapped!\n"); ++ ++ return msg->tsc ? 0 : -1; ++} ++ ++/* ++ * Returns # of bytes successfully consumed on success ++ * 0 on EOF (no error condition) ++ */ ++static size_t consume_buffer(void *dest, size_t bytes_to_read) ++{ ++ size_t read_index = 0, write_index = 0, dst_index = 0; ++ size_t to_read = 0; ++ bool wrapped = false; ++ size_t read_size = bytes_to_read; ++ unsigned long bytes_not_copied = 0; ++ struct sw_driver_continuous_collect data = {0}; ++ ++ WRITE_LOCK(sw_continuous_lock); ++ smp_mb(); /* memory access ordering */ ++ read_index = buffer.read_index; ++ write_index = buffer.write_index; ++ /* EXE sends size as header + payload; we only want payload */ ++ read_size -= SW_DRIVER_CONTINUOUS_COLLECT_HEADER_SIZE(); ++ data.collection_size = to_read = ++ MIN(read_size, get_data_available(&buffer)); ++ pw_pr_debug( ++ "DEBUG: read = %zu, write = %zu, avail = %zu, to_read = %zu\n", ++ read_index, write_index, get_data_available(&buffer), to_read); ++ while (to_read) { ++ size_t curr_read = to_read; ++ ++ if (read_index + to_read > buffer.size) { ++ curr_read = buffer.size - read_index; ++ wrapped = true; ++ pw_pr_debug( ++ "DEBUG: read = %zu, to_read = %zu, curr_read = %zu, buffer.size = %lu, WRAPPED!\n", ++ read_index, to_read, curr_read, buffer.size); ++ } ++ memcpy(&output_buffer[dst_index], ++ &buffer.data[read_index], curr_read); ++ read_index = (read_index + curr_read) & (buffer.size - 1); ++ to_read -= curr_read; ++ dst_index += curr_read; ++ } ++ buffer.read_index = read_index; ++ smp_mb(); /* memory access ordering */ ++ pw_pr_debug("DEBUG: read at end of while = %zu\n", buffer.read_index); ++ WRITE_UNLOCK(sw_continuous_lock); ++ ++ /* ++ * Call 'copy_to_user' instead of 'sw_copy_to_user' since ++ * sw_copy_to_user expects to see a 'struct uio' while this ++ * is called from an IOCTL which does NOT have a 'struct uio' ++ */ ++ bytes_not_copied = ++ copy_to_user(dest, (char *)&data, ++ SW_DRIVER_CONTINUOUS_COLLECT_HEADER_SIZE()); ++ if (bytes_not_copied) ++ return 0; ++ ++ pw_pr_debug("DEBUG: collection size = %u\n", data.collection_size); ++ if (data.collection_size) { ++ bytes_not_copied = ++ copy_to_user(dest + ++ SW_DRIVER_CONTINUOUS_COLLECT_HEADER_SIZE(), ++ output_buffer, data.collection_size); ++ if (bytes_not_copied) ++ return 0; ++ ++ } ++ return data.collection_size; ++} ++ ++long initialize_circular_buffer(size_t size) ++{ ++ size_t alloc_size = size, read_size = size; ++ /* ++ * We require a power of two size ++ */ ++ pw_pr_debug("DEBUG: old alloc size = %zu\n", alloc_size); ++ if ((alloc_size & (alloc_size - 1)) != 0) ++ alloc_size = 1 << fls(alloc_size); ++ ++ pw_pr_debug("DEBUG: new alloc size = %zu\n", alloc_size); ++ /* Create double-sized buffer */ ++ alloc_size <<= 1; ++ pw_pr_debug("DEBUG: double alloc size = %zu\n", alloc_size); ++ memset(&buffer, 0, sizeof(buffer)); ++ buffer.free_pages = ++ sw_allocate_pages(GFP_KERNEL | __GFP_ZERO, alloc_size); ++ if (!buffer.free_pages) { ++ pw_pr_error("Couldn't allocate space for buffer!\n"); ++ return -ENOMEM; ++ } ++ buffer.read_index = buffer.write_index = 0; ++ buffer.size = alloc_size; ++ SW_INIT_RWLOCK(sw_continuous_lock); ++ /* ++ * Create temp output buffer ++ */ ++ output_buffer = vmalloc(read_size); ++ if (!output_buffer) { ++ pw_pr_error( ++ "Couldn't create temporary buffer for data output!\n"); ++ return -ENOMEM; ++ } ++ return 0; ++} ++ ++void reset_output_buffers(void) ++{ ++ buffer.read_index = buffer.write_index = 0; ++} ++ ++ ++void destroy_circular_buffer(void) ++{ ++ if (buffer.free_pages) { ++ sw_release_pages(buffer.free_pages, buffer.size); ++ buffer.free_pages = 0; ++ } ++ if (output_buffer) { ++ vfree(output_buffer); ++ output_buffer = NULL; ++ } ++ SW_DESTROY_RWLOCK(sw_continuous_lock); ++ pw_pr_debug("DEBUG: read = %zu, write = %zu\n", buffer.read_index, ++ buffer.write_index); ++} ++ ++/* ************************************************* ++ * For per-cpu buffers (non circular) ++ * ************************************************* ++ */ ++ + static char *reserve_seg_space_i(size_t size, int cpu, bool *should_wakeup, +- u64 *reservation_tsc) ++ u64 *reservation_tsc) + { +- sw_output_buffer_t *buffer = GET_OUTPUT_BUFFER(cpu); ++ struct sw_output_buffer *buffer = GET_OUTPUT_BUFFER(cpu); + int i = 0; + int buff_index = buffer->buff_index; + char *dst = NULL; + +- if (buff_index < 0 || buff_index >= NUM_SEGS_PER_BUFFER) { ++ if (buff_index < 0 || buff_index >= NUM_SEGS_PER_BUFFER) + goto prod_seg_done; +- } ++ + for_each_segment(i) { +- sw_data_buffer_t *seg = &buffer->buffers[buff_index]; ++ struct sw_data_buffer *seg = &buffer->buffers[buff_index]; + + if (SEG_IS_FULL(seg) == false) { + if (SPACE_AVAIL(seg) >= size) { + *reservation_tsc = sw_timestamp(); + dst = &seg->buffer[seg->bytes_written]; + seg->bytes_written += size; +- smp_mb(); ++ smp_mb(); /* memory access ordering */ + buffer->buff_index = buff_index; + buffer->produced_samples++; + goto prod_seg_done; +@@ -213,36 +451,38 @@ static char *reserve_seg_space_i(size_t size, int cpu, bool *should_wakeup, + *should_wakeup = true; + } + prod_seg_done: +- if (!dst) { ++ if (!dst) + buffer->dropped_samples++; +- } ++ + return dst; + }; + +-static int sw_produce_polled_msg_i(struct sw_driver_msg *msg, +- enum sw_wakeup_action action) ++#ifdef CONFIG_PREEMPT_COUNT ++static int produce_polled_msg(struct sw_driver_msg *msg, ++ enum sw_wakeup_action action) + { + int cpu = GET_POLLED_CPU(); + bool should_wakeup = false; + int retVal = PW_SUCCESS; + +- if (!msg) { ++ if (!msg) + return -PW_ERROR; +- } ++ + pw_pr_debug("POLLED! cpu = %d\n", cpu); + LOCK(sw_polled_lock); + { + size_t size = SW_DRIVER_MSG_HEADER_SIZE() + msg->payload_len; +- char *dst = reserve_seg_space_i(size, cpu, &should_wakeup, +- &msg->tsc); ++ char *dst = reserve_seg_space_i(size, cpu, ++ &should_wakeup, &msg->tsc); ++ + if (dst) { + /* + * Assign a special CPU number to this CPU. + * This is OK, because messages enqueued in this buffer + * are always CPU agnostic (otherwise they would + * be invoked from within a preempt_disable()d context +- * in 'sw_handle_collector_node_i()', which ensures they +- * will be enqueued within the ++ * in 'sw_handle_collector_node_i()', which ensures ++ * they will be enqueued within the + * 'sw_produce_generic_msg_on_cpu()' function). + */ + msg->cpuidx = cpu; +@@ -255,14 +495,15 @@ static int sw_produce_polled_msg_i(struct sw_driver_msg *msg, + } + } + UNLOCK(sw_polled_lock); +- if (unlikely(should_wakeup)) { ++ if (unlikely(should_wakeup)) + sw_wakeup_reader(action); +- } ++ + return retVal; + }; ++#endif /* CONFIG_PREEMPT_COUNT */ + + static int sw_produce_generic_msg_i(struct sw_driver_msg *msg, +- enum sw_wakeup_action action) ++ enum sw_wakeup_action action) + { + int retval = PW_SUCCESS; + bool should_wakeup = false; +@@ -274,55 +515,51 @@ static int sw_produce_generic_msg_i(struct sw_driver_msg *msg, + return -PW_ERROR; + } + ++ /* Check if we need to use circular buffer */ ++ if (output_buffer) ++ return enqueue_data(msg, action); ++ + #ifdef CONFIG_PREEMPT_COUNT +- if (!in_atomic()) { +- return sw_produce_polled_msg(msg, action); +- } ++ if (!in_atomic()) ++ return produce_polled_msg(msg, action); + #endif + + cpu = sw_get_cpu(&flags); + { +- size_t size = msg->payload_len + SW_DRIVER_MSG_HEADER_SIZE(); ++ size_t size = msg->payload_len + ++ SW_DRIVER_MSG_HEADER_SIZE(); + char *dst = reserve_seg_space_i(size, cpu, &should_wakeup, + &msg->tsc); ++ + if (likely(dst)) { + memcpy(dst, msg, SW_DRIVER_MSG_HEADER_SIZE()); + dst += SW_DRIVER_MSG_HEADER_SIZE(); + memcpy(dst, msg->p_payload, msg->payload_len); +- } else { ++ } else + retval = -PW_ERROR; +- } + } + sw_put_cpu(flags); + +- if (unlikely(should_wakeup)) { ++ if (unlikely(should_wakeup)) + sw_wakeup_reader(action); +- } + + return retval; + }; + +-int sw_produce_polled_msg(struct sw_driver_msg *msg, +- enum sw_wakeup_action action) +-{ +- return DO_PER_CPU_OVERHEAD_FUNC_RET(int, sw_produce_polled_msg_i, msg, +- action); +-}; +- + int sw_produce_generic_msg(struct sw_driver_msg *msg, +- enum sw_wakeup_action action) ++ enum sw_wakeup_action action) + { +- return DO_PER_CPU_OVERHEAD_FUNC_RET(int, sw_produce_generic_msg_i, msg, +- action); ++ return DO_PER_CPU_OVERHEAD_FUNC_RET(int, sw_produce_generic_msg_i, ++ msg, action); + }; + + static int sw_init_per_cpu_buffers_i(unsigned long per_cpu_mem_size) + { + int cpu = -1; + +- per_cpu_output_buffers = (sw_output_buffer_t *)sw_kmalloc( +- sizeof(sw_output_buffer_t) * GET_NUM_OUTPUT_BUFFERS(), +- GFP_KERNEL | __GFP_ZERO); ++ per_cpu_output_buffers = ++ (struct sw_output_buffer *)sw_kmalloc(sizeof(struct sw_output_buffer) * ++ GET_NUM_OUTPUT_BUFFERS(), GFP_KERNEL | __GFP_ZERO); + if (per_cpu_output_buffers == NULL) { + pw_pr_error( + "ERROR allocating space for per-cpu output buffers!\n"); +@@ -330,17 +567,16 @@ static int sw_init_per_cpu_buffers_i(unsigned long per_cpu_mem_size) + return -PW_ERROR; + } + for_each_output_buffer(cpu) { +- sw_output_buffer_t *buffer = &per_cpu_output_buffers[cpu]; ++ struct sw_output_buffer *buffer = &per_cpu_output_buffers[cpu]; + char *buff = NULL; + int i = 0; + + buffer->mem_alloc_size = per_cpu_mem_size; +- buffer->free_pages = +- sw_allocate_pages(GFP_KERNEL | __GFP_ZERO, +- (unsigned int)per_cpu_mem_size); ++ buffer->free_pages = sw_allocate_pages(GFP_KERNEL | __GFP_ZERO, ++ (unsigned int)per_cpu_mem_size); + if (buffer->free_pages == 0) { + pw_pr_error("ERROR allocating pages for buffer [%d]!\n", +- cpu); ++ cpu); + sw_destroy_per_cpu_buffers(); + return -PW_ERROR; + } +@@ -351,8 +587,7 @@ static int sw_init_per_cpu_buffers_i(unsigned long per_cpu_mem_size) + } + } + pw_pr_debug("PER_CPU_MEM_SIZE = %lu, order = %u\n", +- (unsigned long)per_cpu_mem_size, +- get_order(per_cpu_mem_size)); ++ (unsigned long)per_cpu_mem_size, get_order(per_cpu_mem_size)); + return PW_SUCCESS; + }; + +@@ -364,30 +599,28 @@ int sw_init_per_cpu_buffers(void) + + if (GET_NUM_OUTPUT_BUFFERS() <= 0) { + pw_pr_error("ERROR: max # output buffers= %d\n", +- GET_NUM_OUTPUT_BUFFERS()); ++ GET_NUM_OUTPUT_BUFFERS()); + return -PW_ERROR; + } + + pw_pr_debug("DEBUG: sw_max_num_cpus = %d, num output buffers = %d\n", +- sw_max_num_cpus, GET_NUM_OUTPUT_BUFFERS()); ++ sw_max_num_cpus, GET_NUM_OUTPUT_BUFFERS()); + + /* +- * Try to allocate per-cpu buffers. If allocation fails, +- * decrease buffer size and retry. Stop trying if size +- * drops below 2KB (which means 1KB for each buffer). ++ * Try to allocate per-cpu buffers. If allocation fails, decrease ++ * buffer size and retry. Stop trying if size drops below 2KB ++ * (which means 1KB for each buffer). + */ + while (per_cpu_mem_size >= SW_MIN_OUTPUT_BUFFER_SIZE && +- sw_init_per_cpu_buffers_i(per_cpu_mem_size)) { +- pw_pr_debug( +- "WARNING: couldn't allocate per-cpu buffers with size %u -- trying smaller size!\n", ++ sw_init_per_cpu_buffers_i(per_cpu_mem_size)) { ++ pw_pr_debug("WARNING: couldn't allocate per-cpu buffers with size %u -- trying smaller size!\n", + per_cpu_mem_size); + sw_buffer_alloc_size >>= 1; + per_cpu_mem_size = sw_get_output_buffer_size(); + } + + if (unlikely(per_cpu_output_buffers == NULL)) { +- pw_pr_error( +- "ERROR: couldn't allocate space for per-cpu output buffers!\n"); ++ pw_pr_error("ERROR: couldn't allocate space for per-cpu output buffers!\n"); + return -PW_ERROR; + } + /* +@@ -396,7 +629,7 @@ int sw_init_per_cpu_buffers(void) + SW_INIT_SPINLOCK(sw_polled_lock); + + pw_pr_debug("OK, allocated per-cpu buffers with size = %lu\n", +- (unsigned long)per_cpu_mem_size); ++ (unsigned long)per_cpu_mem_size); + + if (sw_init_reader_queue()) { + pw_pr_error("ERROR initializing reader subsys\n"); +@@ -417,11 +650,12 @@ void sw_destroy_per_cpu_buffers(void) + + if (per_cpu_output_buffers != NULL) { + for_each_output_buffer(cpu) { +- sw_output_buffer_t *buffer = +- &per_cpu_output_buffers[cpu]; ++ struct sw_output_buffer *buffer = ++ &per_cpu_output_buffers[cpu]; ++ + if (buffer->free_pages != 0) { + sw_release_pages(buffer->free_pages, +- buffer->mem_alloc_size); ++ buffer->mem_alloc_size); + buffer->free_pages = 0; + } + } +@@ -435,14 +669,14 @@ void sw_reset_per_cpu_buffers(void) + int cpu = 0, i = 0; + + for_each_output_buffer(cpu) { +- sw_output_buffer_t *buffer = GET_OUTPUT_BUFFER(cpu); ++ struct sw_output_buffer *buffer = GET_OUTPUT_BUFFER(cpu); + + buffer->buff_index = buffer->dropped_samples = + buffer->produced_samples = 0; + buffer->last_seg_read = -1; + + for_each_segment(i) { +- sw_data_buffer_t *seg = &buffer->buffers[i]; ++ struct sw_data_buffer *seg = &buffer->buffers[i]; + + memset(seg->buffer, 0, SW_SEG_DATA_SIZE); + SEG_SET_EMPTY(seg); +@@ -451,6 +685,12 @@ void sw_reset_per_cpu_buffers(void) + sw_last_cpu_read = -1; + sw_last_mask = -1; + pw_pr_debug("OK, reset per-cpu output buffers!\n"); ++ /* ++ * Reset circular buffer if it has been allocated ++ */ ++ if (output_buffer) ++ buffer.read_index = buffer.write_index = 0; ++ + }; + + bool sw_any_seg_full(u32 *val, bool is_flush_mode) +@@ -458,34 +698,34 @@ bool sw_any_seg_full(u32 *val, bool is_flush_mode) + int num_visited = 0, i = 0; + + if (!val) { +- pw_pr_error("ERROR: NULL ptrs in sw_any_seg_full!\n"); ++ pw_pr_error("ERROR: NULL ptrs in %s!\n", __func__); + return false; + } + + *val = SW_NO_DATA_AVAIL_MASK; +- pw_pr_debug("Checking for full seg: val = %u, flush = %s\n", *val, +- GET_BOOL_STRING(is_flush_mode)); ++ pw_pr_debug("Checking for full seg: val = %u, flush = %s\n", ++ *val, GET_BOOL_STRING(is_flush_mode)); + for_each_output_buffer(num_visited) { + int min_seg = EMPTY_SEG, non_empty_seg = EMPTY_SEG; + u64 min_tsc = EMPTY_TSC; +- sw_output_buffer_t *buffer = NULL; ++ struct sw_output_buffer *buffer = NULL; + +- if (++sw_last_cpu_read >= GET_NUM_OUTPUT_BUFFERS()) { ++ if (++sw_last_cpu_read >= GET_NUM_OUTPUT_BUFFERS()) + sw_last_cpu_read = 0; +- } ++ + buffer = GET_OUTPUT_BUFFER(sw_last_cpu_read); + for_each_segment(i) { +- sw_data_buffer_t *seg = &buffer->buffers[i]; ++ struct sw_data_buffer *seg = &buffer->buffers[i]; + u64 seg_tsc = seg->is_full; + +- if (SEG_IS_EMPTY(seg)) { ++ if (SEG_IS_EMPTY(seg)) + continue; +- } ++ + non_empty_seg = i; + if (seg_tsc < min_tsc) { + /* +- * Can only happen if seg was full, +- * provided 'EMPTY_TSC' is set to "(u64)-1" ++ * Can only happen if seg was full, provided ++ * 'EMPTY_TSC' is set to "(u64)-1" + */ + min_tsc = seg_tsc; + min_seg = i; +@@ -493,11 +733,11 @@ bool sw_any_seg_full(u32 *val, bool is_flush_mode) + } + if (min_seg != EMPTY_SEG) { + *val = (sw_last_cpu_read & 0xffff) << 16 | +- (min_seg & 0xffff); ++ (min_seg & 0xffff); + return true; + } else if (is_flush_mode && non_empty_seg != EMPTY_SEG) { + *val = (sw_last_cpu_read & 0xffff) << 16 | +- (non_empty_seg & 0xffff); ++ (non_empty_seg & 0xffff); + return true; + } + } +@@ -518,31 +758,34 @@ bool sw_any_seg_full(u32 *val, bool is_flush_mode) + }; + + /* +- * Has semantics of 'copy_to_user()' -- returns # of bytes that could +- * NOT be copied (On success ==> returns 0). ++ * Returns: number of bytes consumed on SUCCESS, 0 on EOF, negative ++ * error code on FAILURE + */ +-size_t sw_consume_data(u32 mask, void __user *buffer, size_t bytes_to_read) ++ssize_t sw_consume_data(u32 mask, void __user *buffer, size_t bytes_to_read) + { + int which_cpu = -1, which_seg = -1; + unsigned long bytes_not_copied = 0; +- sw_output_buffer_t *buff = NULL; +- sw_data_buffer_t *seg = NULL; ++ struct sw_output_buffer *buff = NULL; ++ struct sw_data_buffer *seg = NULL; + size_t bytes_read = 0; + ++ /* Check if we need to use circular buffer */ ++ if (output_buffer) ++ return (ssize_t)consume_buffer(buffer, bytes_to_read); ++ + if (!sw_check_output_buffer_params(buffer, bytes_to_read, +- SW_SEG_DATA_SIZE)) { +- pw_pr_error("ERROR: invalid params to \"sw_consume_data\"!\n"); +- return -PW_ERROR; ++ SW_SEG_DATA_SIZE)) { ++ pw_pr_error("ERROR: invalid params to \"%s\"!\n", __func__); ++ return -EIO; + } + +- which_cpu = mask >> 16; +- which_seg = mask & 0xffff; ++ which_cpu = mask >> 16; which_seg = mask & 0xffff; + pw_pr_debug("CONSUME: cpu = %d, seg = %d\n", which_cpu, which_seg); + if (which_seg >= NUM_SEGS_PER_BUFFER) { + pw_pr_error( + "Error: which_seg (%d) >= NUM_SEGS_PER_BUFFER (%d)\n", + which_seg, NUM_SEGS_PER_BUFFER); +- return bytes_to_read; ++ return -EIO; + } + /* + * OK to access unlocked; either the segment is FULL, or no collection +@@ -552,16 +795,14 @@ size_t sw_consume_data(u32 mask, void __user *buffer, size_t bytes_to_read) + buff = GET_OUTPUT_BUFFER(which_cpu); + seg = &buff->buffers[which_seg]; + +- bytes_not_copied = sw_copy_to_user(buffer, seg->buffer, +- seg->bytes_written); /* dst, src */ ++ bytes_not_copied = sw_copy_to_user(buffer, ++ seg->buffer, seg->bytes_written); /* dst, src */ + +- /* bytes_not_copied = */ +- /* copy_to_user(buffer, seg->buffer, seg->bytes_written); dst,src */ +- if (likely(bytes_not_copied == 0)) { ++ if (likely(bytes_not_copied == 0)) + bytes_read = seg->bytes_written; +- } else { ++ else { + pw_pr_error("Warning: couldn't copy %lu bytes\n", +- bytes_not_copied); ++ bytes_not_copied); + bytes_read = 0; + } + SEG_SET_EMPTY(seg); +@@ -578,12 +819,11 @@ void sw_count_samples_produced_dropped(void) + int cpu = 0; + + sw_num_samples_produced = sw_num_samples_dropped = 0; +- +- if (per_cpu_output_buffers == NULL) { ++ if (per_cpu_output_buffers == NULL) + return; +- } ++ + for_each_output_buffer(cpu) { +- sw_output_buffer_t *buff = GET_OUTPUT_BUFFER(cpu); ++ struct sw_output_buffer *buff = GET_OUTPUT_BUFFER(cpu); + + sw_num_samples_dropped += buff->dropped_samples; + sw_num_samples_produced += buff->produced_samples; +@@ -593,6 +833,6 @@ void sw_count_samples_produced_dropped(void) + void sw_print_output_buffer_overheads(void) + { + PRINT_CUMULATIVE_OVERHEAD_PARAMS(sw_produce_generic_msg_i, +- "PRODUCE_GENERIC_MSG"); ++ "PRODUCE_GENERIC_MSG"); + sw_print_reader_stats(); + }; +diff --git a/drivers/platform/x86/socwatch/sw_reader.c b/drivers/platform/x86/socwatch/sw_reader.c +index 2e55ae1a54cc..ea039c6fe72a 100644 +--- a/drivers/platform/x86/socwatch/sw_reader.c ++++ b/drivers/platform/x86/socwatch/sw_reader.c +@@ -1,64 +1,63 @@ +-/* +- +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-*/ ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + #include "sw_internal.h" + #include "sw_output_buffer.h" + #include "sw_kernel_defines.h" + +-#define SW_BUFFER_CLEANUP_TIMER_DELAY_NSEC \ +- 1000000 /* delay buffer cleanup by 10^6 nsec i.e. 1 msec */ ++/* delay buffer cleanup by 10^6 nsec i.e. 1 msec */ ++#define SW_BUFFER_CLEANUP_TIMER_DELAY_NSEC 1000000 + + /* + * The alarm queue. +@@ -92,7 +91,8 @@ int sw_init_reader_queue(void) + /* + * Also init wakeup timer (used in low-overhead mode). + */ +- hrtimer_init(&s_reader_wakeup_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ++ hrtimer_init(&s_reader_wakeup_timer, ++ CLOCK_MONOTONIC, HRTIMER_MODE_REL); + s_reader_wakeup_timer.function = &sw_wakeup_callback_i; + + return PW_SUCCESS; +@@ -109,29 +109,26 @@ void sw_destroy_reader_queue(void) + */ + void sw_wakeup_reader(enum sw_wakeup_action action) + { +- if (!waitqueue_active(&sw_reader_queue)) { +- return; +- } +- /* +- * Direct mode? +- */ +- switch (action) { +- case SW_WAKEUP_ACTION_DIRECT: +- wake_up_interruptible(&sw_reader_queue); +- break; +- case SW_WAKEUP_ACTION_TIMER: +- if (!hrtimer_active(&s_reader_wakeup_timer)) { +- ktime_t ktime = +- ns_to_ktime(SW_BUFFER_CLEANUP_TIMER_DELAY_NSEC); +- /* TODO: possible race here -- introduce locks? */ +- hrtimer_start(&s_reader_wakeup_timer, ktime, +- HRTIMER_MODE_REL); ++ if (waitqueue_active(&sw_reader_queue)) { /* direct mode */ ++ switch (action) { ++ case SW_WAKEUP_ACTION_DIRECT: ++ wake_up_interruptible(&sw_reader_queue); ++ break; ++ case SW_WAKEUP_ACTION_TIMER: ++ if (!hrtimer_active(&s_reader_wakeup_timer)) { ++ ktime_t ktime = ns_to_ktime( ++ SW_BUFFER_CLEANUP_TIMER_DELAY_NSEC); ++ /* TODO: possible race here -- ++ * introduce locks? ++ */ ++ hrtimer_start(&s_reader_wakeup_timer, ++ ktime, HRTIMER_MODE_REL); ++ } ++ break; ++ default: ++ break; + } +- break; +- default: +- break; + } +- return; + } + /* + * Wakeup client waiting for a full buffer, and +@@ -143,9 +140,9 @@ void sw_cancel_reader(void) + /* + * Cancel pending wakeup timer (used in low-overhead mode). + */ +- if (hrtimer_active(&s_reader_wakeup_timer)) { ++ if (hrtimer_active(&s_reader_wakeup_timer)) + hrtimer_cancel(&s_reader_wakeup_timer); +- } ++ + /* + * There might be a reader thread blocked on a read: wake + * it up to give it a chance to respond to changed +@@ -157,7 +154,6 @@ void sw_cancel_reader(void) + void sw_print_reader_stats(void) + { + #if DO_OVERHEAD_MEASUREMENTS +- printk(KERN_INFO "# reader queue timer fires = %d\n", +- s_num_timer_fires); ++ pw_pr_debug("# reader queue timer fires = %d\n", s_num_timer_fires); + #endif /* OVERHEAD */ + } +diff --git a/drivers/platform/x86/socwatch/sw_telem.c b/drivers/platform/x86/socwatch/sw_telem.c +index eccb37df44d5..38bfd89d7a2b 100644 +--- a/drivers/platform/x86/socwatch/sw_telem.c ++++ b/drivers/platform/x86/socwatch/sw_telem.c +@@ -1,67 +1,67 @@ +-/* +- +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-*/ ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + + #include + #include +-#include /* Definition of __weak */ +-#include /* LINUX_VERSION_CODE */ +-#include "sw_kernel_defines.h" /* pw_pr_debug */ +-#include "sw_mem.h" /* sw_kmalloc/free */ +-#include "sw_lock_defs.h" /* Various lock-related definitions */ +-#include "sw_telem.h" /* Signatures of fn's exported from here. */ ++#include /* Definition of __weak */ ++#include /* LINUX_VERSION_CODE */ ++#include /* 'udelay' */ ++#include "sw_kernel_defines.h" /* pw_pr_debug */ ++#include "sw_mem.h" /* sw_kmalloc/free */ ++#include "sw_lock_defs.h" /* Various lock-related definitions */ ++#include "sw_telem.h" /* Signatures of fn's exported from here. */ + + /* + * These functions and data structures are exported by the Telemetry +@@ -76,17 +76,17 @@ + * @telem_evtlog: The actual telemetry data. + */ + struct telemetry_evtlog { +- u32 telem_evtid; /* Event ID of a data item. */ +- u64 telem_evtlog; /* Counter data */ ++ u32 telem_evtid; /* Event ID of a data item. */ ++ u64 telem_evtlog; /* Counter data */ + }; + + struct telemetry_evtconfig { +- u32 *evtmap; /* Array of Event-IDs to Enable */ +- u8 num_evts; /* Number of Events (<29) in evtmap */ +- u8 period; /* Sampling period */ ++ u32 *evtmap; /* Array of Event-IDs to Enable */ ++ u8 num_evts; /* Number of Events (<29) in evtmap */ ++ u8 period; /* Sampling period */ + }; + +-#define MAX_TELEM_EVENTS 28 /* Max telem events per unit */ ++#define MAX_TELEM_EVENTS 28 /* Max telem events per unit */ + + /* The enable bit is set when programming events, but is returned + * cleared for queried events requests. +@@ -96,113 +96,599 @@ struct telemetry_evtconfig { + /* + * Sampling Period values. + * The sampling period is encoded in an 7-bit value, where +- * Period = (Value * 16^Exponent) usec where: +- * bits[6:3] -> Value; +- * bits [0:2]-> Exponent; ++ * Period = (Value * 16^Exponent) usec where: ++ * bits[6:3] -> Value; ++ * bits [0:2]-> Exponent; + * Here are some of the calculated possible values: + * | Value Val+Exp | Value | Exponent | Period (usec) | Period (msec) | + * |-----------------+-------+----------+---------------+---------------| +- * | 0xA = 000 1+010 | 1 | 2 | 256 | 0.256 | +- * | 0x12= 001 0+010 | 2 | 2 | 512 | 0.512 | +- * | 0x22= 010 0+010 | 4 | 2 | 1024 | 1.024 | +- * | 0xB = 000 1+011 | 1 | 3 | 4096 | 4.096 | +- * | 0x13= 001 0+011 | 2 | 3 | 8192 | 8.192 | +- * | 0x1B= 001 1+011 | 3 | 3 | 12288 | 12.288 | +- * | 0x0C= 000 1+100 | 1 | 4 | 65536 | 65.536 | +- * | 0x0D= 000 1+101 | 1 | 5 | 1048576 | 1048.576 | ++ * | 0xA = 000 1+010 | 1 | 2 | 256 | 0.256 | ++ * | 0x12= 001 0+010 | 2 | 2 | 512 | 0.512 | ++ * | 0x22= 010 0+010 | 4 | 2 | 1024 | 1.024 | ++ * | 0xB = 000 1+011 | 1 | 3 | 4096 | 4.096 | ++ * | 0x13= 001 0+011 | 2 | 3 | 8192 | 8.192 | ++ * | 0x1B= 001 1+011 | 3 | 3 | 12288 | 12.288 | ++ * | 0x0C= 000 1+100 | 1 | 4 | 65536 | 65.536 | ++ * | 0x0D= 000 1+101 | 1 | 5 | 1048576 | 1048.576 | + */ +-#define TELEM_SAMPLING_1MS 0x22 /* Approximately 1 ms */ +-#define TELEM_SAMPLING_1S 0x0D /* Approximately 1 s */ ++#define TELEM_SAMPLING_1MS 0x22 /* Approximately 1 ms */ ++#define TELEM_SAMPLING_1S 0x0D /* Approximately 1 s */ + + /* These functions make up the main APIs of the telemetry driver. We + * define all of them with weak linkage so that we can still compile + * and load into kernels which don't have a telemetry driver. + */ +-extern int __weak telemetry_raw_read_eventlog(enum telemetry_unit telem_unit, +- struct telemetry_evtlog *evtlog, +- int evcount); +-extern int __weak telemetry_reset(void); +-extern int __weak telemetry_reset_events(void); +-extern int __weak telemetry_get_sampling_period(u8 *punit_min, u8 *punit_max, +- u8 *pmc_min, u8 *pmc_max); +-extern int __weak telemetry_set_sampling_period(u8 punit_period, u8 pmc_period); + extern int __weak telemetry_get_eventconfig( + struct telemetry_evtconfig *punit_config, +- struct telemetry_evtconfig *pmc_config, int punit_len, int pmc_len); +-extern int __weak telemetry_add_events(u8 num_punit_evts, u8 num_pmc_evts, +- u32 *punit_evtmap, u32 *pmc_evtmap); ++ struct telemetry_evtconfig *pmc_config, ++ int punit_len, ++ int pmc_len); + +-extern int __weak +-telemetry_update_events(struct telemetry_evtconfig punit_config, +- struct telemetry_evtconfig pmc_config); ++extern int __weak telemetry_reset_events(void); + ++extern int __weak telemetry_set_sampling_period( ++ u8 punit_period, ++ u8 pmc_period); + /* +- * Some telemetry IDs have multiple instances, indexed by cpu ID. We +- * implement these by defining two types of IDs: 'regular' and 'scaled'. +- * For Telemetry IDs with a single instance (the majority of them), the +- * index into the system's telemetry table is stored in the +- * sw_driver_io_descriptor.idx. At read time, the driver gets the telemetry +- * "slot" from sw_driver_io_descriptor.idx, and reads that data. This case +- * is illustrated by telem_desc_A in the illustration below, where idx 2 +- * indicates that telem_data[2] contains the telem data for this descriptor. +- * +- * telem_desc_A telem_data +- * scale_op: X |..|[0] +- * idx : 2 -------------------- |..|[1] +- * \------->|..|[2] +- * Scaled_IDs |..|[3] +- * telem_desc_B CPU#0 1 2 3 ------>|..|[4] +- * scale_op: / [0]|.|.|.|.| / +- * idx : 1---->[1]|4|4|5|5| / +- * +----------/ +- * +- * Descriptors with scaled IDs contain a scale operation (scale_op) and +- * value. They use a 'scaled_ids' table, which is indexed by descriptor +- * number and CPU id, and stores the telem_data index. So in the +- * illustration above, CPU 0 reading from telem_desc_B would fetch row 1 +- * (from telem_desc_B.idx == 1), and column [0] yielding element 4, so +- * that's the telemetry ID it looks up in the telemetry data. +- * +- * The scaled_ids table is populated at telemetry ID initialization time ++ * Older kernels didn't have the p-unit/pmc ipc command interface ++ */ ++extern int __weak intel_punit_ipc_command( ++ u32 cmd, u32 para1, u32 para2, u32 *in, u32 *out); ++ ++extern int __weak intel_pmc_ipc_command( ++ u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out, u32 outlen); ++/* ++ * Spinlock to guard updates to the 'iters' values. ++ */ ++static SW_DEFINE_SPINLOCK(sw_telem_lock); ++ ++ ++/* ************************************************ ++ * Constants for P-unit/PMC telemetry interface ++ * *********************************************** ++ */ ++ ++#define PUNIT_MAILBOX_INTERFACE_OFFSET 0x7084 ++#define PUNIT_MAILBOX_DATA_OFFSET 0x7080 ++ ++#define PSS_TELEM_SSRAM_OFFSET 0x1A00 ++#define IOSS_TELEM_SSRAM_OFFSET 0x1B00 ++#define TELEM_SSRAM_SIZE 240 ++ ++#define PMC_IPC_CMD 0x0 ++ ++#define PMC_IPC_STATUS 0x04 ++ ++#define PMC_IPC_WRITE_BUFFER 0x80 ++#define PMC_IPC_READ_BUFFER 0x90 ++ ++#define PMC_IPC_PMC_TELEMETRY_COMMAND 0xEB ++ ++ ++#define TELEM_READ_TIMEOUT_TRIAL 10 ++#define TELEM_MAILBOX_STATUS_TIMEOUT 1000 ++ ++#define IPC_BIOS_PUNIT_CMD_BASE 0x00 ++ ++#define IPC_BIOS_PUNIT_CMD_READ_TELE_INFO \ ++ (IPC_BIOS_PUNIT_CMD_BASE + 0x09) ++#define IPC_BIOS_PUNIT_CMD_READ_TELE_EVENT_CTRL \ ++ (IPC_BIOS_PUNIT_CMD_BASE + 0x0c) ++#define IPC_BIOS_PUNIT_CMD_WRITE_TELE_EVENT_CTRL \ ++ (IPC_BIOS_PUNIT_CMD_BASE + 0x0d) ++#define IPC_BIOS_PUNIT_CMD_WRITE_TELE_EVENT \ ++ (IPC_BIOS_PUNIT_CMD_BASE + 0x11) ++ ++#define IOSS_TELEM_EVENT_WRITE 0x1 ++#define IOSS_TELEM_INFO_READ 0x2 ++#define IOSS_TELEM_EVENT_CTL_READ 0x7 ++#define IOSS_TELEM_EVENT_CTL_WRITE 0x8 ++ ++#define IOSS_TELEM_EVT_CTRL_WRITE_SIZE 0x4 ++#define IOSS_TELEM_READ_WORD 0x1 ++#define IOSS_TELEM_EVT_WRITE_SIZE 0x3 ++ ++#ifndef BIT ++ #define BIT(x) (1< 0 \ ++ || s_telemEventInfo[TELEM_PMC].idx > 0) ++ ++static u64 s_mchBarAddrs[3] = {0, 0, 0}; ++ ++static struct { ++ volatile u64 *ssram_virt_addr; ++ int idx, iters; ++ u32 events[MAX_TELEM_EVENTS]; ++ u64 data_buffer[MAX_TELEM_EVENTS]; ++} s_telemEventInfo[TELEM_UNIT_NONE] = { ++ [TELEM_PUNIT] = {NULL, 0, 0}, ++ [TELEM_PMC] = {NULL, 0, 0}, ++}; ++ ++static volatile u64 *s_punitInterfaceAddr; ++static volatile u64 *s_punitDataAddr; ++static volatile u64 *s_pmcIPCCmdAddr; ++static volatile u64 *s_pmcIPCStsAddr; ++static volatile u64 *s_pmcIPCWBufAddr; ++static volatile u64 *s_pmcIPCRBufAddr; ++ ++/** ++ * setup_punit_mbox -- Setup P-Unit virtual mappings + * ++ * Returns: true if setup successfully + */ +-static unsigned char *sw_telem_scaled_ids; /* Allocate on demand */ +-static unsigned int sw_telem_rows_alloced; /* Rows currently allocated */ +-static unsigned int sw_telem_rows_avail; /* Available rows */ ++static bool setup_punit_mbox(void) ++{ ++ s_punitInterfaceAddr = ioremap_nocache( ++ (unsigned long)s_mchBarAddrs[TELEM_MCHBAR_CFG] + ++ PUNIT_MAILBOX_INTERFACE_OFFSET, 0x4); ++ s_punitDataAddr = ioremap_nocache( ++ (unsigned long)s_mchBarAddrs[TELEM_MCHBAR_CFG] + ++ PUNIT_MAILBOX_DATA_OFFSET, 0x4); ++ s_telemEventInfo[TELEM_PUNIT].ssram_virt_addr = ioremap_nocache( ++ (unsigned long) ++ s_mchBarAddrs[TELEM_SSRAMBAR_CFG] + ++ PSS_TELEM_SSRAM_OFFSET, TELEM_SSRAM_SIZE); ++ ++ return (s_punitInterfaceAddr && s_punitDataAddr && ++ s_telemEventInfo[TELEM_PUNIT].ssram_virt_addr); ++} + +-extern int sw_max_num_cpus; /* SoC Watch's copy of cpu count. */ ++/** ++ * destroy_punit_mbox -- Unmap p-unit virtual addresses ++ */ ++static void destroy_punit_mbox(void) ++{ ++ if (s_punitInterfaceAddr) { ++ iounmap(s_punitInterfaceAddr); ++ s_punitInterfaceAddr = NULL; ++ } ++ if (s_punitDataAddr) { ++ iounmap(s_punitDataAddr); ++ s_punitDataAddr = NULL; ++ } ++ if (s_telemEventInfo[TELEM_PUNIT].ssram_virt_addr) { ++ iounmap(s_telemEventInfo[TELEM_PUNIT].ssram_virt_addr); ++ s_telemEventInfo[TELEM_PUNIT].ssram_virt_addr = NULL; ++ } ++} + +-/* Macro for identifying telemetry IDs with either per-cpu, or per-module +- * instances. These IDs need to be 'scaled' as per scale_op and scale_val. ++/** ++ * setup_pmc_mbox -- Setup PMC virtual mappings ++ * ++ * Returns: true if setup successfully + */ +-#define IS_SCALED_ID(td) ((td)->scale_op != TELEM_OP_NONE) +-/* +- * Event map that is populated with user-supplied IDs ++static bool setup_pmc_mbox(void) ++{ ++ s_pmcIPCCmdAddr = ioremap_nocache( ++ (unsigned long)s_mchBarAddrs[TELEM_IPC1BAR_CFG] + ++ PMC_IPC_CMD, 0x4); ++ s_pmcIPCStsAddr = ioremap_nocache( ++ (unsigned long)s_mchBarAddrs[TELEM_IPC1BAR_CFG] + ++ PMC_IPC_STATUS, 0x4); ++ s_pmcIPCWBufAddr = ioremap_nocache( ++ (unsigned long)s_mchBarAddrs[TELEM_IPC1BAR_CFG] + ++ PMC_IPC_WRITE_BUFFER, 0x4); ++ s_pmcIPCRBufAddr = ioremap_nocache( ++ (unsigned long)s_mchBarAddrs[TELEM_IPC1BAR_CFG] + ++ PMC_IPC_READ_BUFFER, 0x4); ++ s_telemEventInfo[TELEM_PMC].ssram_virt_addr = ioremap_nocache( ++ (unsigned long)s_mchBarAddrs[TELEM_SSRAMBAR_CFG] + ++ IOSS_TELEM_SSRAM_OFFSET, TELEM_SSRAM_SIZE); ++ ++ return (s_pmcIPCCmdAddr && s_pmcIPCStsAddr && ++ s_pmcIPCWBufAddr && s_pmcIPCRBufAddr && ++ s_telemEventInfo[TELEM_PMC].ssram_virt_addr); ++} ++ ++/** ++ * destroy_pmc_mbox -- Unmap PMC virtual addresses + */ +-static u32 s_event_map[2][MAX_TELEM_EVENTS]; +-/* +- * Index into event map(s) ++static void destroy_pmc_mbox(void) ++{ ++ if (s_pmcIPCCmdAddr) { ++ iounmap(s_pmcIPCCmdAddr); ++ s_pmcIPCCmdAddr = NULL; ++ } ++ if (s_pmcIPCStsAddr) { ++ iounmap(s_pmcIPCStsAddr); ++ s_pmcIPCStsAddr = NULL; ++ } ++ if (s_pmcIPCWBufAddr) { ++ iounmap(s_pmcIPCWBufAddr); ++ s_pmcIPCWBufAddr = NULL; ++ } ++ if (s_pmcIPCRBufAddr) { ++ iounmap(s_pmcIPCRBufAddr); ++ s_pmcIPCRBufAddr = NULL; ++ } ++ if (s_telemEventInfo[TELEM_PMC].ssram_virt_addr) { ++ iounmap(s_telemEventInfo[TELEM_PMC].ssram_virt_addr); ++ s_telemEventInfo[TELEM_PMC].ssram_virt_addr = NULL; ++ } ++} ++ ++/** ++ * setup_telem - Setup telemetry interface ++ * ++ * Returns: 0 if setup successfully, 1 otherwise + */ +-static size_t s_unit_idx[2] = { 0, 0 }; +-/* +- * Used to decide if telemetry values need refreshing ++int setup_telem(u64 addrs[3]) ++{ ++ /* ++ * Don't setup if already done so ++ */ ++ if (s_mchBarAddrs[TELEM_MCHBAR_CFG]) ++ return 0; ++ ++ memcpy(s_mchBarAddrs, addrs, sizeof(s_mchBarAddrs)); ++ /* ++ * Setup Punit ++ */ ++ if (!setup_punit_mbox()) { ++ pw_pr_error("Couldn't setup PUNIT mbox\n"); ++ return -1; ++ } ++ /* ++ * Setup PMC ++ */ ++ if (!setup_pmc_mbox()) { ++ pw_pr_error("Couldn't setup PMC mbox\n"); ++ return -1; ++ } ++ return 0; ++} ++ ++/** ++ * destroy_telem - Destroy telemetry interface + */ +-static size_t s_unit_iters[2] = { 0, 0 }; +-/* +- * Spinlock to guard updates to the 'iters' values. ++void destroy_telem(void) ++{ ++ destroy_punit_mbox(); ++ destroy_pmc_mbox(); ++ ++ memset(s_mchBarAddrs, 0, sizeof(s_mchBarAddrs)); ++} ++ ++/** ++ * get_or_set_id - Add ID to list of events if not previously added ++ * ++ * Returns: 0 if setup successfully, 1 otherwise + */ +-static SW_DEFINE_SPINLOCK(sw_telem_lock); +-/* +- * Macro to determine if socwatch telemetry system has been configured ++static int get_or_set_id(u32 *events, u32 *unit_idx, u32 id) ++{ ++ u32 i = 0; ++ ++ if (*unit_idx >= MAX_TELEM_EVENTS) ++ return -1; ++ ++ for (i = 0; i < *unit_idx; ++i) { ++ if (events[i] == id) ++ return i; ++ } ++ events[*unit_idx] = id; ++ return (*unit_idx)++; ++} ++ ++static int add_telem_id(enum telemetry_unit unit, u32 id) ++{ ++ return get_or_set_id( ++ s_telemEventInfo[unit].events, ++ &s_telemEventInfo[unit].idx, id); ++} ++ ++static void remove_telem_ids(void) ++{ ++ memset(s_telemEventInfo, 0, sizeof(s_telemEventInfo)); ++} ++ ++ ++static u64 read_telem_data(u64 *dst, volatile void *src, size_t num_events) ++{ ++ u32 i, timeout = 0; ++ u64 prev_timestamp = 0, next_timestamp = 0, start_time = 0, event_data; ++ ++ if (!dst) ++ return 0; ++ ++ do { ++ u64 *_src = (u64 *)src; ++ ++ prev_timestamp = *_src; ++ if (!prev_timestamp) ++ return 0; ++ ++ start_time = *(_src + 1); ++ ++ for (i = 0; i < num_events; ++i) { ++ event_data = *(_src + 2 + i); ++ dst[i] = event_data; ++ } ++ next_timestamp = *_src; ++ ++ if (!next_timestamp) ++ return 0; ++ ++ if (++timeout == TELEM_READ_TIMEOUT_TRIAL) ++ break; ++ ++ } while (prev_timestamp != next_timestamp); ++ return prev_timestamp == next_timestamp ? start_time : 0; ++} ++ ++/** ++ * @returns timestamp (1st entry of SSRAM) ++ */ ++static u64 flush_telem_to_buffer(enum telemetry_unit unit) ++{ ++ return read_telem_data(s_telemEventInfo[unit].data_buffer, ++ s_telemEventInfo[unit].ssram_virt_addr, ++ s_telemEventInfo[unit].idx); ++} ++ ++static void read_telem_from_buffer(u64 *dst, enum telemetry_unit unit) ++{ ++ memcpy(dst, s_telemEventInfo[unit].data_buffer, ++ s_telemEventInfo[unit].idx * sizeof(*dst)); ++} ++ ++static u64 read_event_from_buffer(enum telemetry_unit unit, int idx) ++{ ++ if (idx < 0 || idx >= MAX_TELEM_EVENTS) ++ return SW_TELEM_READ_FAIL_VALUE; ++ ++ return s_telemEventInfo[unit].data_buffer[idx]; ++} ++ ++static bool punit_start_telem(void) ++{ ++ u32 telem_info = 0, telem_ctrl = 0, i; ++ ++ /* Reset data buffer */ ++ memset(s_telemEventInfo[TELEM_PUNIT].data_buffer, 0, ++ sizeof(s_telemEventInfo[TELEM_PUNIT].data_buffer)); ++ ++ /* Read basic config */ ++ if (intel_punit_ipc_command(IPC_BIOS_PUNIT_CMD_READ_TELE_INFO, 0, 0, ++ NULL, &telem_info)) ++ pw_pr_warn("Could not execute P-unit IPC command to read telem info\n"); ++ ++ /* Debug info */ ++ pw_pr_debug("DEBUG: Read P-Unit telem_info = 0x%x\n", telem_info); ++ pw_pr_debug("## SOCWATCHDRV ## PUNIT Telemetry info has events = %u\n", ++ (telem_info & TELEM_INFO_SSRAMEVTS_MASK) >> ++ TELEM_INFO_SSRAMEVTS_SHIFT); ++ pw_pr_debug( ++ "## SOCWATCHDRV ## PUNIT Telemetry info has event_regs = %u\n", ++ telem_info & TELEM_INFO_SSRAMEVTS_MASK); ++ pw_pr_debug( ++ "## SOCWATCHDRV ## PUNIT Telemetry info has min_period = %u\n", ++ TELEM_MIN_PERIOD(telem_info)); ++ pw_pr_debug( ++ "## SOCWATCHDRV ## PUNIT Telemetry info has max_period = %u\n", ++ TELEM_MAX_PERIOD(telem_info)); ++ ++ /*TODO: check if #events or #event_regs is less than 28; exit if so */ ++ ++ /* Read control structure */ ++ if (intel_punit_ipc_command(IPC_BIOS_PUNIT_CMD_READ_TELE_EVENT_CTRL, ++ 0, 0, NULL, &telem_ctrl)) ++ pw_pr_warn("Could not execute P-unit IPC command to read telem ctrl structure\n"); ++ ++ /* Disable telem */ ++ TELEM_DISABLE(telem_ctrl); ++ if (intel_punit_ipc_command(IPC_BIOS_PUNIT_CMD_WRITE_TELE_EVENT_CTRL, ++ 0, 0, &telem_ctrl, NULL)) ++ pw_pr_warn("Could not execute P-unit IPC command to write telem ctrl structure\n"); ++ ++ /* Each event added requires a separate command */ ++ for (i = 0; i < s_telemEventInfo[TELEM_PUNIT].idx; ++i) { ++ u32 event = s_telemEventInfo[TELEM_PUNIT].events[i] | ++ TELEM_EVENT_ENABLE; ++ ++ pw_pr_debug("DEBUG: enabling PUNIT event 0x%x\n", ++ s_telemEventInfo[TELEM_PUNIT].events[i]); ++ if (intel_punit_ipc_command( ++ IPC_BIOS_PUNIT_CMD_WRITE_TELE_EVENT, i, 0, ++ &event, NULL)) ++ pw_pr_warn("Could not execute P-unit IPC command to write telem event\n"); ++ ++ } ++ ++ TELEM_CLEAR_SAMPLE_PERIOD(telem_ctrl); ++ TELEM_ENABLE_SSRAM_EVT_TRACE(telem_ctrl); ++ TELEM_ENABLE_PERIODIC(telem_ctrl); ++ telem_ctrl |= TELEM_DEFAULT_SAMPLING_PERIOD; ++ ++ /* Enable telemetry via control structure */ ++ if (intel_punit_ipc_command(IPC_BIOS_PUNIT_CMD_WRITE_TELE_EVENT_CTRL, ++ 0, 0, &telem_ctrl, NULL)) ++ pw_pr_warn("Could not execute P-unit IPC command to write telem ctrl structure\n"); ++ ++ return true; ++} ++ ++static void punit_stop_telem(void) ++{ ++ u32 telem_ctrl = 0; ++ ++ if (intel_punit_ipc_command( ++ IPC_BIOS_PUNIT_CMD_READ_TELE_EVENT_CTRL, 0, 0, ++ NULL, &telem_ctrl)) ++ pw_pr_warn("Could not execute P-unit IPC command to read telem ctrl structure\n"); ++ ++ /* Disable telem */ ++ TELEM_DISABLE(telem_ctrl); ++ if (intel_punit_ipc_command( ++ IPC_BIOS_PUNIT_CMD_WRITE_TELE_EVENT_CTRL, 0, 0, ++ &telem_ctrl, NULL)) ++ pw_pr_warn("Could not execute P-unit IPC command to write telem ctrl structure\n"); ++} ++ ++static bool pmc_start_telem(void) ++{ ++ u32 telem_info = 0, telem_ctrl = 0, i; ++ ++ /* Reset data buffer */ ++ memset(s_telemEventInfo[TELEM_PMC].data_buffer, ++ 0, sizeof(s_telemEventInfo[TELEM_PMC].data_buffer)); ++ ++ /* Read basic config */ ++ if (intel_pmc_ipc_command(PMC_IPC_PMC_TELEMETRY_COMMAND, ++ IOSS_TELEM_INFO_READ, NULL, 0, &telem_info, ++ IOSS_TELEM_READ_WORD)) ++ pw_pr_warn("Could not execute PMC IPC command to read telemetry info\n"); ++ ++ pw_pr_debug("DEBUG: Read PMC telem_info = 0x%x\n", telem_info); ++ pw_pr_debug("## SOCWATCHDRV ## PMC Telemetry info has events = %u\n", ++ (telem_info & TELEM_INFO_SSRAMEVTS_MASK) >> ++ TELEM_INFO_SSRAMEVTS_SHIFT); ++ pw_pr_debug("## SOCWATCHDRV ## PMC Telemetry info has event_regs = %u\n", ++ telem_info & TELEM_INFO_SSRAMEVTS_MASK); ++ pw_pr_debug("## SOCWATCHDRV ## PMC Telemetry info has min_period = %u\n", ++ TELEM_MIN_PERIOD(telem_info)); ++ pw_pr_debug("## SOCWATCHDRV ## PMC Telemetry info has max_period = %u\n", ++ TELEM_MAX_PERIOD(telem_info)); ++ ++ /*TODO: check if #events or #event_regs is less than 28; exit if so */ ++ ++ /* Read control structure */ ++ if (intel_pmc_ipc_command(PMC_IPC_PMC_TELEMETRY_COMMAND, ++ IOSS_TELEM_EVENT_CTL_READ, NULL, 0, &telem_ctrl, ++ IOSS_TELEM_READ_WORD)) ++ pw_pr_warn("Could not execute PMC IPC command to read telem control info\n"); ++ ++ /* Disable telemetry */ ++ TELEM_DISABLE(telem_ctrl); ++ if (intel_pmc_ipc_command(PMC_IPC_PMC_TELEMETRY_COMMAND, ++ IOSS_TELEM_EVENT_CTL_WRITE, (u8 *)&telem_ctrl, ++ IOSS_TELEM_EVT_CTRL_WRITE_SIZE, NULL, 0)) ++ pw_pr_warn("Could not execute PMC IPC command to read telem control info\n"); ++ ++ ++ /* Each event added requires a separate command */ ++ for (i = 0; i < s_telemEventInfo[TELEM_PMC].idx; ++i) { ++ u32 event = ++ s_telemEventInfo[TELEM_PMC].events[i] | ++ TELEM_EVENT_ENABLE; ++ ++ event <<= TELEM_IOSS_EVTID_SHIFT; ++ event |= i; /* Set the index register */ ++ pw_pr_debug("DEBUG: enabling PMC event 0x%x\n", ++ s_telemEventInfo[TELEM_PMC].events[i]); ++ if (intel_pmc_ipc_command(PMC_IPC_PMC_TELEMETRY_COMMAND, ++ IOSS_TELEM_EVENT_WRITE, (u8 *)&event, ++ IOSS_TELEM_EVT_WRITE_SIZE, NULL, 0)) ++ pw_pr_warn("Could not execute PMC IPC command to read telem control info\n"); ++ } ++ ++ TELEM_CLEAR_SAMPLE_PERIOD(telem_ctrl); ++ TELEM_ENABLE_SSRAM_EVT_TRACE(telem_ctrl); ++ TELEM_ENABLE_PERIODIC(telem_ctrl); ++ telem_ctrl |= TELEM_DEFAULT_SAMPLING_PERIOD; ++ ++ /* Enable telemetry via control structure */ ++ if (intel_pmc_ipc_command(PMC_IPC_PMC_TELEMETRY_COMMAND, ++ IOSS_TELEM_EVENT_CTL_WRITE, (u8 *)&telem_ctrl, ++ IOSS_TELEM_EVT_CTRL_WRITE_SIZE, NULL, 0)) ++ pw_pr_warn("Could not execute PMC IPC command to read telem control info\n"); ++ ++ return true; ++} ++ ++static void pmc_stop_telem(void) ++{ ++ u32 telem_ctrl = 0; ++ ++ /* Read control structure */ ++ if (intel_pmc_ipc_command(PMC_IPC_PMC_TELEMETRY_COMMAND, ++ IOSS_TELEM_EVENT_CTL_READ, NULL, 0, &telem_ctrl, ++ IOSS_TELEM_READ_WORD)) ++ pw_pr_warn("Could not execute PMC IPC command to read telem control info\n"); ++ ++ /* Disable telemetry */ ++ TELEM_DISABLE(telem_ctrl); ++ if (intel_pmc_ipc_command(PMC_IPC_PMC_TELEMETRY_COMMAND, ++ IOSS_TELEM_EVENT_CTL_WRITE, (u8 *)&telem_ctrl, ++ IOSS_TELEM_EVT_CTRL_WRITE_SIZE, NULL, 0)) ++ pw_pr_warn("Could not execute PMC IPC command to read telem control info\n"); ++} ++ ++/** ++ * Configurs events + starts counters ++ * @returns 0 on success + */ +-#define SW_TELEM_CONFIGURED() (s_unit_idx[0] > 0 || s_unit_idx[1] > 0) ++static int start_telem(void) ++{ ++ if (s_telemEventInfo[TELEM_PUNIT].idx) { ++ if (punit_start_telem() == false) ++ return -1; ++ ++ /* Return value is don't care */ ++ flush_telem_to_buffer(TELEM_PUNIT); ++ } ++ ++ if (s_telemEventInfo[TELEM_PMC].idx) { ++ if (pmc_start_telem() == false) ++ return -1; ++ ++ flush_telem_to_buffer(TELEM_PMC); ++ } ++ pw_pr_debug("OK, bypass telem started\n"); ++ return 0; ++} ++ ++static void stop_telem(void) ++{ ++ if (s_telemEventInfo[TELEM_PUNIT].idx) { ++ punit_stop_telem(); ++ s_telemEventInfo[TELEM_PUNIT].idx = 0; ++ } ++ if (s_telemEventInfo[TELEM_PMC].idx) { ++ pmc_stop_telem(); ++ s_telemEventInfo[TELEM_PMC].idx = 0; ++ } ++ pw_pr_debug("OK, bypass telem stopped\n"); ++} ++ ++int read_telem(u64 *dst, enum telemetry_unit unit, bool should_retry) ++{ ++ size_t num_iters = should_retry ? 10 : 0; ++ u64 timestamp = 0; ++ ++ do { ++ timestamp = flush_telem_to_buffer(unit); ++ } while (!timestamp && should_retry && num_iters--); ++ ++ if (timestamp) { ++ read_telem_from_buffer(dst, unit); ++ return 0; ++ } ++ return -1; ++} + + /** +- * telemetry_available - Determine if telemetry driver is present ++ * builtin_telemetry_available - Determine if telemetry driver is present + * + * Returns: 1 if telemetry driver is present, 0 if not. + */ +-static int telemetry_available(void) ++static int builtin_telemetry_available(void) + { + int retval = 0; + struct telemetry_evtconfig punit_evtconfig; +@@ -210,14 +696,15 @@ static int telemetry_available(void) + u32 punit_event_map[MAX_TELEM_EVENTS]; + u32 pmc_event_map[MAX_TELEM_EVENTS]; + ++ + /* The symbol below is weak. We return 1 if we have a definition + * for this telemetry-driver-supplied symbol, or 0 if only the + * weak definition exists. This test will suffice to detect if + * the telemetry driver is loaded. + */ +- if (telemetry_get_eventconfig == NULL) { ++ if (telemetry_get_eventconfig == 0) + return 0; +- } ++ + /* OK, the telemetry driver is loaded. But it's possible it + * hasn't been configured properly. To check that, retrieve + * the number of events currently configured. This should never +@@ -227,191 +714,114 @@ static int telemetry_available(void) + memset(&punit_evtconfig, 0, sizeof(punit_evtconfig)); + memset(&pmc_evtconfig, 0, sizeof(pmc_evtconfig)); + +- punit_evtconfig.evtmap = (u32 *)&punit_event_map; +- pmc_evtconfig.evtmap = (u32 *)&pmc_event_map; ++ punit_evtconfig.evtmap = (u32 *) &punit_event_map; ++ pmc_evtconfig.evtmap = (u32 *) &pmc_event_map; + + retval = telemetry_get_eventconfig(&punit_evtconfig, &pmc_evtconfig, +- MAX_TELEM_EVENTS, MAX_TELEM_EVENTS); +- return retval == 0 && punit_evtconfig.num_evts > 0 && +- pmc_evtconfig.num_evts > 0; +-} +- +-/** +- * sw_get_instance_row -- Get the address of a 'row' of instance IDs. +- * @rownum: The row number of the Instance ID table, whose address to return. +- * Returns: The address of the appropriate row, or NULL if rownum is bad. +- */ +-static unsigned char *sw_get_instance_row_addr(unsigned int rownum) +-{ +- if (rownum >= (sw_telem_rows_alloced - sw_telem_rows_avail)) { +- pw_pr_error("ERROR: Cannot retrieve row Instance ID row %d\n", +- rownum); +- return NULL; +- } +- return &sw_telem_scaled_ids[rownum * sw_max_num_cpus]; ++ MAX_TELEM_EVENTS, MAX_TELEM_EVENTS); ++ return (retval == 0 && punit_evtconfig.num_evts > 0 && ++ pmc_evtconfig.num_evts > 0); + } + + /** +- * sw_free_telem_scaled_id_table - Free the allocated slots. +- * Returns: Nothing ++ * was_telemetry_setup - Check if the P-unit and PMC addresses have been mapped + * +- * Admittedly, a more symmetrical function name would be nice. ++ * Returns: true if successfully mapped + */ +-static void sw_telem_release_scaled_ids(void) ++static bool was_telemetry_setup(void) + { +- sw_telem_rows_alloced = 0; +- sw_telem_rows_avail = 0; +- if (sw_telem_scaled_ids) { +- sw_kfree(sw_telem_scaled_ids); +- } +- sw_telem_scaled_ids = NULL; ++ return s_punitInterfaceAddr && s_punitDataAddr && ++ s_telemEventInfo[TELEM_PUNIT].ssram_virt_addr /* P-unit */ && ++ s_pmcIPCCmdAddr && s_pmcIPCStsAddr && s_pmcIPCWBufAddr && ++ s_pmcIPCRBufAddr && s_telemEventInfo[TELEM_PMC].ssram_virt_addr; + } + ++ + /** + * sw_telem_init_func - Set up the telemetry unit to retrieve a data item +- * (e.g. counter). ++ * (e.g. counter). + * @descriptor: The IO descriptor containing the unit and ID +- * of the telemetry info to gather. ++ * of the telemetry info to gather. + * + * Because we don't (currently) control all of the counters, we + * economize by seeing if it's already being collected before allocate + * a slot for it. + * + * Returns: PW_SUCCESS if the telem collector can collect the requested data. +- * -PW_ERROR if the the addition of that item fails. ++ * -PW_ERROR if the the addition of that item fails. + */ + int sw_telem_init_func(struct sw_driver_io_descriptor *descriptor) + { + struct sw_driver_telem_io_descriptor *td = + &(descriptor->telem_descriptor); +- u8 unit = td->unit; /* Telemetry unit to use. */ ++ u8 unit = td->unit; /* Telemetry unit to use. */ + u32 id; /* Event ID we want telemetry to track. */ +- size_t idx; /* Index into telemetry data array of event ID to gather. */ +- const char *unit_str = unit == TELEM_PUNIT ? "PUNIT" : "PMC"; +- size_t *unit_idx = &s_unit_idx[unit]; + +- if (!telemetry_available()) { ++ if (!was_telemetry_setup()) + return -ENXIO; +- } + + id = (u32)(td->id); + +- /* Check if we've already added this ID */ +- for (idx = 0; idx < *unit_idx && idx < MAX_TELEM_EVENTS; ++idx) { +- if (s_event_map[unit][idx] == id) { +- /* Invariant: idx contains the +- * index of the new data item. +- */ +- /* Save the index for later fast lookup. */ +- td->idx = (u16)idx; +- return 0; +- } +- } +- +- if (*unit_idx >= MAX_TELEM_EVENTS) { +- pw_pr_error( +- "Too many events %s units requested; max of %u available!\n", +- unit_str, MAX_TELEM_EVENTS); +- return -E2BIG; ++ td->idx = add_telem_id(unit, id); ++ if (td->idx < 0) { ++ pw_pr_error("ERROR adding id 0x%x to unit %d\n", id, unit); ++ return -1; + } +- s_event_map[unit][(*unit_idx)++] = id; +- /* Invariant: idx contains the index of the new data item. */ +- /* Save the index for later fast lookup. */ +- td->idx = (u16)idx; +- pw_pr_debug( +- "OK, added id = 0x%x to unit %s at entry %zu; retrieved = 0x%x\n", +- id, unit_str, *unit_idx - 1, s_event_map[unit][*unit_idx - 1]); ++ pw_pr_debug("OK, added id 0x%x to unit %d at pos %d\n", ++ id, unit, td->idx); + + return 0; + } + ++ + /** + * sw_read_telem_info - Read a metric's data from the telemetry driver. +- * @dest: Destination (storage for the read data) +- * @cpu: Which CPU to read from (not used) +- * @descriptor: The descriptor containing the data ID to read ++ * @dest: Destination (storage for the read data) ++ * @cpu: Which CPU to read from (not used) ++ * @descriptor: The descriptor containing the data ID to read + * @data_size_in_bytes: The # of bytes in the result (always 8) + * +- * Returns: Nothing, but stores SW_TELEM_READ_FAIL_VALUE to dest +- * if the read fails. ++ * Returns: Nothing, but stores SW_TELEM_READ_FAIL_VALUE to dest if ++ * the read fails. + */ + void sw_read_telem_info(char *dest, int cpu, +- const sw_driver_io_descriptor_t *descriptor, +- u16 data_size_in_bytes) ++ const sw_driver_io_descriptor_t *descriptor, ++ u16 data_size_in_bytes) + { +- int len; + u64 *data_dest = (u64 *)dest; +- int retry_count; + const struct sw_driver_telem_io_descriptor *td = + &(descriptor->telem_descriptor); +- unsigned int idx; + u8 unit = td->unit; + bool needs_refresh = false; + +-#define TELEM_PKT_SIZE 16 /* sizeof(struct telemetry_evtlog) + padding */ +- static struct telemetry_evtlog events[MAX_TELEM_EVENTS]; +- +- /* Get the event index */ +- if (IS_SCALED_ID(td)) { +- unsigned char *scaled_ids; +- +- scaled_ids = sw_get_instance_row_addr(td->idx); +- if (scaled_ids == NULL) { +- pw_pr_error( +- "Sw_read_telem_info_i: Illegal row index: *%p = %d", +- &td->idx, td->idx); +- *data_dest = SW_TELEM_READ_FAIL_VALUE; +- return; /* Don't set the dest/data buffer. */ +- } +- idx = scaled_ids[RAW_CPU()]; /* Get per-cpu entry */ +- } else { +- idx = td->idx; +- } +- + /* + * Check if we need to refresh the list of values + */ + LOCK(sw_telem_lock); + { +- if (s_unit_iters[unit] == 0) { ++ if (s_telemEventInfo[unit].iters == 0) + needs_refresh = true; +- } +- if (++s_unit_iters[unit] == s_unit_idx[unit]) { +- s_unit_iters[unit] = 0; +- } ++ ++ if (++s_telemEventInfo[unit].iters == ++ s_telemEventInfo[unit].idx) ++ s_telemEventInfo[unit].iters = 0; + } ++ + UNLOCK(sw_telem_lock); + +- /* +- * Because of the enormous overhead of reading telemetry data from +- * the current kernel driver, failure to read the data is not +- * unheard of. As such, 3 times, should the read fail. Once we +- * get a higher-performance read routine, we should be able to +- * eliminate this retry (or maybe decrease it.) +- */ +- retry_count = 3; +- while (needs_refresh && retry_count--) { +- len = telemetry_raw_read_eventlog( +- unit, events, sizeof(events) / TELEM_PKT_SIZE); +- +- if ((len < 0) || (len < idx)) { +- pw_pr_error( +- "sw_read_telem_info_i: read failed: len=%d\n", +- len); +- } else { +- break; ++ if (needs_refresh) { ++ u64 timestamp = flush_telem_to_buffer(unit); ++ ++ pw_pr_debug("DEBUG: unit %d refreshed, timestamp = %llu\n", ++ unit, timestamp); ++ if (!timestamp) { /* failure */ ++ *data_dest = SW_TELEM_READ_FAIL_VALUE; ++ return; + } +- } ++ } else ++ pw_pr_debug("DEBUG: unit %d NOT refreshed\n", unit); + +- if (retry_count) { +- /* TODO: Resolve if we should return something other than +- * SW_TELEM_READ_FAIL_VALUE, if the actual data +- * happens to be that. +- */ +- *data_dest = events[idx].telem_evtlog; +- } else { +- *data_dest = SW_TELEM_READ_FAIL_VALUE; +- } ++ *data_dest = read_event_from_buffer(unit, td->idx); + } + + /** +@@ -420,24 +830,24 @@ void sw_read_telem_info(char *dest, int cpu, + * + * Stop collecting anything extra, and give the driver back to + * debugfs. Because this driver increases the sampling rate, the +- * kernel's telemetry driver can't successfully reset the driver unless ++ * kernel's telemetry driver can't succesfully reset the driver unless + * we first drop the rate back down to a much slower rate. This is a + * temporary measure, since the reset operation will then reset the + * sampling interval to whatever the GMIN driver wants. + * +- * Return: PW_SUCCESS. ++ * Returns: 0 + */ + int sw_reset_telem(const struct sw_driver_io_descriptor *descriptor) + { +- if (telemetry_available() && SW_TELEM_CONFIGURED()) { ++ if (IS_TELEM_CONFIGURED()) { ++ stop_telem(); ++ remove_telem_ids(); ++ /* Return control to 'builtin' telemetry driver */ + telemetry_set_sampling_period(TELEM_SAMPLING_1S, +- TELEM_SAMPLING_1S); ++ TELEM_SAMPLING_1S); + telemetry_reset_events(); +- sw_telem_release_scaled_ids(); +- memset(s_unit_idx, 0, sizeof(s_unit_idx)); +- memset(s_unit_iters, 0, sizeof(s_unit_iters)); + } +- return PW_SUCCESS; ++ return 0; + } + + /** +@@ -445,54 +855,18 @@ int sw_reset_telem(const struct sw_driver_io_descriptor *descriptor) + */ + bool sw_telem_available(void) + { +- return telemetry_available(); ++ /* ++ * Telemetry driver MUST be loaded; we perform this check because ++ * on some systems an error with the p-unit/pmc IPC interface causes ++ * kernel panics. ++ */ ++ return builtin_telemetry_available(); + }; + + bool sw_telem_post_config(void) + { +- bool retval = true; +- size_t i = 0; +- struct telemetry_evtconfig punit_evtconfig; +- struct telemetry_evtconfig pmc_evtconfig; +- +- if (!SW_TELEM_CONFIGURED()) { +- return true; +- } +- +- memset(&punit_evtconfig, 0, sizeof(punit_evtconfig)); +- memset(&pmc_evtconfig, 0, sizeof(pmc_evtconfig)); +- +- telemetry_set_sampling_period(TELEM_SAMPLING_1S, TELEM_SAMPLING_1S); +- +- punit_evtconfig.period = TELEM_SAMPLING_1S; +- pmc_evtconfig.period = TELEM_SAMPLING_1S; +- +- /* Punit */ +- punit_evtconfig.evtmap = (u32 *)&s_event_map[TELEM_PUNIT]; +- punit_evtconfig.num_evts = s_unit_idx[TELEM_PUNIT]; +- /* PMC */ +- pmc_evtconfig.evtmap = (u32 *)&s_event_map[TELEM_PMC]; +- pmc_evtconfig.num_evts = s_unit_idx[TELEM_PMC]; +- +- for (i = 0; i < punit_evtconfig.num_evts; ++i) { +- pw_pr_debug("PUNIT[%zu] = 0x%x\n", i, +- punit_evtconfig.evtmap[i]); +- } +- for (i = 0; i < pmc_evtconfig.num_evts; ++i) { +- pw_pr_debug("PMC[%zu] = 0x%x\n", i, pmc_evtconfig.evtmap[i]); +- } +- +- /* +- * OK, everything done. Now update +- */ +- if (telemetry_update_events(punit_evtconfig, pmc_evtconfig)) { +- pw_pr_error("telemetry_update_events error"); +- retval = false; +- } else { +- pw_pr_debug("OK, telemetry_update_events success\n"); +- } +- +- telemetry_set_sampling_period(TELEM_SAMPLING_1MS, TELEM_SAMPLING_1MS); ++ if (start_telem()) ++ return false; + +- return retval; ++ return true; + } +diff --git a/drivers/platform/x86/socwatch/sw_trace_notifier_provider.c b/drivers/platform/x86/socwatch/sw_trace_notifier_provider.c +index 153fa7010295..4fd32ff25565 100644 +--- a/drivers/platform/x86/socwatch/sw_trace_notifier_provider.c ++++ b/drivers/platform/x86/socwatch/sw_trace_notifier_provider.c +@@ -1,64 +1,64 @@ +-/* +- +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + +-*/ + #include /* "LINUX_VERSION_CODE" */ + #include +-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) +-#include ++#if KERNEL_VERSION(4, 11, 0) > LINUX_VERSION_CODE ++ #include + #else +-#include ++ #include + #endif + #include + #include +@@ -68,12 +68,12 @@ + #include + #include + #include +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) ++#if KERNEL_VERSION(3, 14, 0) <= LINUX_VERSION_CODE + #include /* for the various APIC vector tracepoints + * (e.g. "thermal_apic", + * "local_timer" etc.) + */ +-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) */ ++#endif /* KERNEL_VERSION(3, 14, 0) <= LINUX_VERSION_CODE */ + struct pool_workqueue; + struct cpu_workqueue_struct; + #include +@@ -129,12 +129,12 @@ struct cpu_workqueue_struct; + * helper macros. + */ + #if IS_ENABLED(CONFIG_TRACEPOINTS) +-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35) ++#if KERNEL_VERSION(2, 6, 35) > LINUX_VERSION_CODE + #define DO_REGISTER_SW_TRACEPOINT_PROBE(node, name, probe) \ + WARN_ON(register_trace_##name(probe)) + #define DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, name, probe) \ + unregister_trace_##name(probe) +-#elif LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0) ++#elif KERNEL_VERSION(3, 15, 0) > LINUX_VERSION_CODE + #define DO_REGISTER_SW_TRACEPOINT_PROBE(node, name, probe) \ + WARN_ON(register_trace_##name(probe, NULL)) + #define DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, name, probe) \ +@@ -149,7 +149,7 @@ struct cpu_workqueue_struct; + #define DO_REGISTER_SW_TRACEPOINT_PROBE(...) /* NOP */ + #define DO_UNREGISTER_SW_TRACEPOINT_PROBE(...) /* NOP */ + #endif /* CONFIG_TRACEPOINTS */ +-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35) ++#if KERNEL_VERSION(2, 6, 35) > LINUX_VERSION_CODE + #define _DEFINE_PROBE_FUNCTION(name, ...) static void name(__VA_ARGS__) + #else + #define _DEFINE_PROBE_FUNCTION(name, ...) \ +@@ -161,10 +161,10 @@ struct cpu_workqueue_struct; + * Tracepoint probe function parameters. + * These tracepoint signatures depend on kernel version. + */ +-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36) ++#if KERNEL_VERSION(2, 6, 36) > LINUX_VERSION_CODE + #define PROBE_TPS_PARAMS \ + sw_probe_power_start_i, unsigned int type, unsigned int state +-#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38) ++#elif KERNEL_VERSION(2, 6, 38) > LINUX_VERSION_CODE + #define PROBE_TPS_PARAMS \ + sw_probe_power_start_i, unsigned int type, unsigned int state, \ + unsigned int cpu_id +@@ -173,7 +173,7 @@ struct cpu_workqueue_struct; + sw_probe_cpu_idle_i, unsigned int state, unsigned int cpu_id + #endif + +-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38) ++#if KERNEL_VERSION(2, 6, 38) > LINUX_VERSION_CODE + #define PROBE_TPF_PARAMS \ + sw_probe_power_frequency_i, unsigned int type, unsigned int state + #else +@@ -181,7 +181,7 @@ struct cpu_workqueue_struct; + sw_probe_cpu_frequency_i, unsigned int new_freq, unsigned int cpu + #endif + +-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35) ++#if KERNEL_VERSION(2, 6, 35) > LINUX_VERSION_CODE + #define PROBE_SCHED_WAKEUP_PARAMS \ + sw_probe_sched_wakeup_i, struct rq *rq, struct task_struct *task, \ + int success +@@ -191,7 +191,7 @@ struct cpu_workqueue_struct; + #endif + + #if IS_ENABLED(CONFIG_ANDROID) +-#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0) ++#if KERNEL_VERSION(3, 4, 0) > LINUX_VERSION_CODE + #define PROBE_WAKE_LOCK_PARAMS sw_probe_wake_lock_i, struct wake_lock *lock + #define PROBE_WAKE_UNLOCK_PARAMS \ + sw_probe_wake_unlock_i, struct wake_unlock *unlock +@@ -204,7 +204,7 @@ struct cpu_workqueue_struct; + #endif /* version */ + #endif /* CONFIG_ANDROID */ + +-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35) ++#if KERNEL_VERSION(2, 6, 35) >= LINUX_VERSION_CODE + #define PROBE_WORKQUEUE_PARAMS \ + sw_probe_workqueue_execution_i, struct task_struct *wq_thread, \ + struct work_struct *work +@@ -260,13 +260,13 @@ struct cpu_workqueue_struct; + * Use these macros if all tracepoint ID numbers + * ARE contiguous from 0 -- max tracepoint ID # + */ +-#if 0 ++/* #if 0 + #define IS_VALID_TRACE_NOTIFIER_ID(id) \ + ((id) >= 0 && (id) < SW_ARRAY_SIZE(s_trace_collector_lists)) + #define GET_COLLECTOR_TRACE_NODE(id) (&s_trace_collector_lists[id]) + #define FOR_EACH_trace_notifier_id(idx) \ + for (idx = 0; idx < SW_ARRAY_SIZE(s_trace_collector_lists); ++idx) +-#endif /* if 0 */ ++#endif */ + /* + * Use these macros if all tracepoint ID numbers + * are NOT contiguous from 0 -- max tracepoint ID # +@@ -315,52 +315,38 @@ int sw_unregister_trace_cpu_idle_i(struct sw_trace_notifier_data *node); + int sw_register_trace_cpu_frequency_i(struct sw_trace_notifier_data *node); + int sw_unregister_trace_cpu_frequency_i(struct sw_trace_notifier_data *node); + int sw_register_trace_irq_handler_entry_i(struct sw_trace_notifier_data *node); +-int sw_unregister_trace_irq_handler_entry_i(struct sw_trace_notifier_data +- *node); ++int sw_unregister_trace_irq_handler_entry_i(struct sw_trace_notifier_data *node); + int sw_register_trace_timer_expire_entry_i(struct sw_trace_notifier_data *node); +-int sw_unregister_trace_timer_expire_entry_i( +- struct sw_trace_notifier_data *node); +-int sw_register_trace_hrtimer_expire_entry_i( +- struct sw_trace_notifier_data *node); +-int sw_unregister_trace_hrtimer_expire_entry_i( +- struct sw_trace_notifier_data *node); ++int sw_unregister_trace_timer_expire_entry_i(struct sw_trace_notifier_data *node); ++int sw_register_trace_hrtimer_expire_entry_i(struct sw_trace_notifier_data *node); ++int sw_unregister_trace_hrtimer_expire_entry_i(struct sw_trace_notifier_data *node); + int sw_register_trace_sched_wakeup_i(struct sw_trace_notifier_data *node); + int sw_unregister_trace_sched_wakeup_i(struct sw_trace_notifier_data *node); + int sw_register_trace_sched_process_fork_i(struct sw_trace_notifier_data *node); +-int sw_unregister_trace_sched_process_fork_i( +- struct sw_trace_notifier_data *node); ++int sw_unregister_trace_sched_process_fork_i(struct sw_trace_notifier_data *node); + int sw_register_trace_sched_process_exit_i(struct sw_trace_notifier_data *node); +-int sw_unregister_trace_sched_process_exit_i( +- struct sw_trace_notifier_data *node); +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) +-int sw_register_trace_thermal_apic_entry_i(struct sw_trace_notifier_data *node); +-int sw_unregister_trace_thermal_apic_entry_i( +- struct sw_trace_notifier_data *node); +-int sw_register_trace_thermal_apic_exit_i(struct sw_trace_notifier_data *node); +-int sw_unregister_trace_thermal_apic_exit_i(struct sw_trace_notifier_data +- *node); +-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) */ ++int sw_unregister_trace_sched_process_exit_i(struct sw_trace_notifier_data *node); ++#if KERNEL_VERSION(3,14,0) <= LINUX_VERSION_CODE ++ int sw_register_trace_thermal_apic_entry_i(struct sw_trace_notifier_data *node); ++ int sw_unregister_trace_thermal_apic_entry_i(struct sw_trace_notifier_data *node); ++ int sw_register_trace_thermal_apic_exit_i(struct sw_trace_notifier_data *node); ++ int sw_unregister_trace_thermal_apic_exit_i(struct sw_trace_notifier_data *node); ++#endif // KERNEL_VERSION(3,14,0) <= LINUX_VERSION_CODE + #if IS_ENABLED(CONFIG_ANDROID) +-#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0) +-int sw_register_trace_wake_lock_i(struct sw_trace_notifier_data *node); +-int sw_unregister_trace_wake_lock_i(struct sw_trace_notifier_data *node); +-int sw_register_trace_wake_unlock_i(struct sw_trace_notifier_data *node); +-int sw_unregister_trace_wake_unlock_i(struct sw_trace_notifier_data *node); +-#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0) */ +-int sw_register_trace_wakeup_source_activate_i( +- struct sw_trace_notifier_data *node); +-int sw_unregister_trace_wakeup_source_activate_i( +- struct sw_trace_notifier_data *node); +-int sw_register_trace_wakeup_source_deactivate_i( +- struct sw_trace_notifier_data *node); +-int sw_unregister_trace_wakeup_source_deactivate_i( +- struct sw_trace_notifier_data *node); +-#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) */ +-#endif /* CONFIG_ANDROID */ +-int sw_register_trace_workqueue_execution_i(struct sw_trace_notifier_data +- *node); +-int sw_unregister_trace_workqueue_execution_i( +- struct sw_trace_notifier_data *node); ++ #if KERNEL_VERSION(3,4,0) > LINUX_VERSION_CODE ++ int sw_register_trace_wake_lock_i(struct sw_trace_notifier_data *node); ++ int sw_unregister_trace_wake_lock_i(struct sw_trace_notifier_data *node); ++ int sw_register_trace_wake_unlock_i(struct sw_trace_notifier_data *node); ++ int sw_unregister_trace_wake_unlock_i(struct sw_trace_notifier_data *node); ++ #else // KERNEL_VERSION(3,4,0) > LINUX_VERSION_CODE ++ int sw_register_trace_wakeup_source_activate_i(struct sw_trace_notifier_data *node); ++ int sw_unregister_trace_wakeup_source_activate_i(struct sw_trace_notifier_data *node); ++ int sw_register_trace_wakeup_source_deactivate_i(struct sw_trace_notifier_data *node); ++ int sw_unregister_trace_wakeup_source_deactivate_i(struct sw_trace_notifier_data *node); ++ #endif // KERNEL_VERSION(3,4,0) > LINUX_VERSION_CODE ++#endif // IS_ENABLED(CONFIG_ANDROID) ++int sw_register_trace_workqueue_execution_i(struct sw_trace_notifier_data *node); ++int sw_unregister_trace_workqueue_execution_i(struct sw_trace_notifier_data *node); + int sw_register_trace_sched_switch_i(struct sw_trace_notifier_data *node); + int sw_unregister_trace_sched_switch_i(struct sw_trace_notifier_data *node); + int sw_register_pm_notifier_i(struct sw_trace_notifier_data *node); +@@ -369,25 +355,20 @@ int sw_register_cpufreq_notifier_i(struct sw_trace_notifier_data *node); + int sw_unregister_cpufreq_notifier_i(struct sw_trace_notifier_data *node); + int sw_register_hotcpu_notifier_i(struct sw_trace_notifier_data *node); + int sw_unregister_hotcpu_notifier_i(struct sw_trace_notifier_data *node); +-void sw_handle_sched_wakeup_i(struct sw_collector_data *node, int source_cpu, +- int target_cpu); +-void sw_handle_timer_wakeup_helper_i(struct sw_collector_data *curr, +- struct sw_trace_notifier_data *node, +- pid_t tid); ++void sw_handle_sched_wakeup_i(struct sw_collector_data *node, int source_cpu, int target_cpu); ++void sw_handle_timer_wakeup_helper_i(struct sw_collector_data *curr, struct sw_trace_notifier_data *node, ++ pid_t tid); + void sw_handle_apic_timer_wakeup_i(struct sw_collector_data *node); +-void sw_handle_workqueue_wakeup_helper_i(int cpu, +- struct sw_collector_data *node); ++void sw_handle_workqueue_wakeup_helper_i(int cpu, struct sw_collector_data *node); + void sw_handle_sched_switch_helper_i(void); + void sw_tps_apic_i(int cpu); + void sw_tps_tps_i(int cpu); + void sw_tps_wakeup_i(int cpu); + void sw_tps_i(void); + void sw_tpf_i(int cpu, struct sw_trace_notifier_data *node); +-void sw_process_fork_exit_helper_i(struct sw_collector_data *node, +- struct task_struct *task, bool is_fork); +-void sw_produce_wakelock_msg_i(int cpu, struct sw_collector_data *node, +- const char *name, int type, u64 timeout, int pid, +- int tid, const char *proc_name); ++void sw_process_fork_exit_helper_i(struct sw_collector_data *node, struct task_struct *task, bool is_fork); ++void sw_produce_wakelock_msg_i(int cpu, struct sw_collector_data *node, const char *name, ++ int type, u64 timeout, int pid, int tid, const char *proc_name); + u64 sw_my_local_arch_irq_stats_cpu_i(void); + + /* +@@ -397,8 +378,7 @@ u64 sw_my_local_arch_irq_stats_cpu_i(void); + * The tracepoint handlers. + */ + void sw_handle_trace_notifier_i(struct sw_trace_notifier_data *node); +-void sw_handle_trace_notifier_on_cpu_i(int cpu, +- struct sw_trace_notifier_data *node); ++void sw_handle_trace_notifier_on_cpu_i(int cpu, struct sw_trace_notifier_data *node); + void sw_handle_reset_messages_i(struct sw_trace_notifier_data *node); + + /* ------------------------------------------------- +@@ -507,17 +487,17 @@ static const struct sw_trace_notifier_name s_trace_names[] = { + "PROCESS-FORK" }, + [SW_TRACE_ID_SCHED_PROCESS_EXIT] = { "sched_process_exit", + "PROCESS-EXIT" }, +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) ++#if KERNEL_VERSION(3, 14, 0) <= LINUX_VERSION_CODE + [SW_TRACE_ID_THERMAL_APIC_ENTRY] = { "thermal_apic_entry", + "THERMAL-THROTTLE-ENTRY" }, + [SW_TRACE_ID_THERMAL_APIC_EXIT] = { "thermal_apic_exit", + "THERMAL-THROTTLE-EXIT" }, +-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) */ ++#endif /* KERNEL_VERSION(3, 14, 0) <= LINUX_VERSION_CODE */ + #if IS_ENABLED(CONFIG_ANDROID) +-#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0) ++#if KERNEL_VERSION(3, 4, 0) > LINUX_VERSION_CODE + [SW_TRACE_ID_WAKE_LOCK] = { "wake_lock", "WAKE-LOCK" }, + [SW_TRACE_ID_WAKE_UNLOCK] = { "wake_unlock", "WAKE-UNLOCK" }, +-#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0) */ ++#else /* KERNEL_VERSION(3, 4, 0) <= LINUX_VERSION_CODE */ + [SW_TRACE_ID_WAKE_LOCK] = { "wakeup_source_activate", "WAKE-LOCK" }, + [SW_TRACE_ID_WAKE_UNLOCK] = { "wakeup_source_deactivate", + "WAKE-UNLOCK" }, +@@ -589,7 +569,7 @@ static struct sw_trace_notifier_data s_trace_collector_lists[] = { + &s_trace_names[SW_TRACE_ID_SCHED_PROCESS_EXIT], + &sw_register_trace_sched_process_exit_i, + &sw_unregister_trace_sched_process_exit_i, NULL }, +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) ++#if KERNEL_VERSION(3, 14, 0) <= LINUX_VERSION_CODE + /* + * For thermal throttling. + * We probably only need one of either 'entry' or 'exit'. Use +@@ -606,10 +586,10 @@ static struct sw_trace_notifier_data s_trace_collector_lists[] = { + &s_trace_names[SW_TRACE_ID_THERMAL_APIC_EXIT], + &sw_register_trace_thermal_apic_exit_i, + &sw_unregister_trace_thermal_apic_exit_i, NULL }, +-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) */ ++#endif /* KERNEL_VERSION(3, 14, 0) <= LINUX_VERSION_CODE */ + /* Wakelocks have multiple tracepoints, depending on kernel version */ + #if IS_ENABLED(CONFIG_ANDROID) +-#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0) ++#if KERNEL_VERSION(3, 4, 0) > LINUX_VERSION_CODE + { SW_TRACE_COLLECTOR_TRACEPOINT, &s_trace_names[SW_TRACE_ID_WAKE_LOCK], + &sw_register_trace_wake_lock_i, &sw_unregister_trace_wake_lock_i, + NULL }, +@@ -617,7 +597,7 @@ static struct sw_trace_notifier_data s_trace_collector_lists[] = { + &s_trace_names[SW_TRACE_ID_WAKE_UNLOCK], + &sw_register_trace_wake_unlock_i, &sw_unregister_trace_wake_unlock_i, + NULL }, +-#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0) */ ++#else /* KERNEL_VERSION(3, 4, 0) <= LINUX_VERSION_CODE */ + { SW_TRACE_COLLECTOR_TRACEPOINT, &s_trace_names[SW_TRACE_ID_WAKE_LOCK], + &sw_register_trace_wakeup_source_activate_i, + &sw_unregister_trace_wakeup_source_activate_i, NULL }, +@@ -625,7 +605,7 @@ static struct sw_trace_notifier_data s_trace_collector_lists[] = { + &s_trace_names[SW_TRACE_ID_WAKE_UNLOCK], + &sw_register_trace_wakeup_source_deactivate_i, + &sw_unregister_trace_wakeup_source_deactivate_i, NULL }, +-#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) */ ++#endif /* KERNEL_VERSION(3, 4, 0) > LINUX_VERSION_CODE */ + #endif /* CONFIG_ANDROID */ + { SW_TRACE_COLLECTOR_TRACEPOINT, + &s_trace_names[SW_TRACE_ID_WORKQUEUE_EXECUTE_START], +@@ -746,11 +726,11 @@ u64 sw_my_local_arch_irq_stats_cpu_i(void) + sum += stats->apic_timer_irqs; + sum += stats->irq_spurious_count; + #endif +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 34) ++#if KERNEL_VERSION(2, 6, 34) <= LINUX_VERSION_CODE + sum += stats->x86_platform_ipis; + #endif /* 2,6,34 */ + sum += stats->apic_perf_irqs; +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0) ++#if KERNEL_VERSION(3, 5, 0) <= LINUX_VERSION_CODE + sum += stats->apic_irq_work_irqs; + #endif /* 3,5,0 */ + #ifdef CONFIG_SMP +@@ -765,9 +745,9 @@ u64 sw_my_local_arch_irq_stats_cpu_i(void) + #else + sum += stats->__softirq_pending; + #ifdef CONFIG_SMP +- for (i = 0; i < NR_IPI; ++i) { ++ for (i = 0; i < NR_IPI; ++i) + sum += stats->ipi_irqs[i]; +- } ++ + #endif + #ifdef CONFIG_X86_MCE + sum += stats->mce_exception_count; +@@ -786,9 +766,9 @@ void sw_handle_trace_notifier_i(struct sw_trace_notifier_data *node) + { + struct sw_collector_data *curr = NULL; + +- if (!node) { ++ if (!node) + return; +- } ++ + list_for_each_entry(curr, &node->list, list) { + pw_pr_debug("DEBUG: handling message\n"); + sw_handle_per_cpu_msg(curr); +@@ -803,21 +783,21 @@ void sw_handle_trace_notifier_on_cpu_i(int cpu, + { + struct sw_collector_data *curr = NULL; + +- if (!node) { ++ if (!node) + return; +- } +- list_for_each_entry(curr, &node->list, list) { ++ ++ list_for_each_entry(curr, &node->list, list) + sw_handle_per_cpu_msg_on_cpu(cpu, curr); +- } ++ + }; + + void sw_handle_reset_messages_i(struct sw_trace_notifier_data *node) + { + struct sw_collector_data *curr = NULL; + +- if (!node) { ++ if (!node) + return; +- } ++ + list_for_each_entry(curr, &node->list, list) { + pw_pr_debug("Handling message of unknown cpumask on cpu %d\n", + RAW_CPU()); +@@ -852,9 +832,9 @@ static void sw_handle_timer_wakeup_i(struct sw_collector_data *node, pid_t pid, + dst_vals += sizeof(pid); + *((int *)dst_vals) = tid; + +- if (sw_produce_generic_msg(msg, SW_WAKEUP_ACTION_DIRECT)) { ++ if (sw_produce_generic_msg(msg, SW_WAKEUP_ACTION_DIRECT)) + pw_pr_warn("WARNING: could NOT produce message!\n"); +- } ++ + pw_pr_debug("HANDLED timer expire for %d, %d\n", pid, tid); + }; + +@@ -867,18 +847,16 @@ void sw_handle_timer_wakeup_helper_i(struct sw_collector_data *curr, + { + pid_t pid = -1; + +- if (tid == 0) { ++ if (tid == 0) + pid = 0; +- } else { ++ else { + struct task_struct *task = + pid_task(find_pid_ns(tid, &init_pid_ns), PIDTYPE_PID); +- if (likely(task)) { ++ if (likely(task)) + pid = task->tgid; +- } + } +- list_for_each_entry(curr, &node->list, list) { ++ list_for_each_entry(curr, &node->list, list) + sw_handle_timer_wakeup_i(curr, pid, tid); +- } + }; + + /* +@@ -904,9 +882,9 @@ void sw_handle_sched_wakeup_i(struct sw_collector_data *node, int source_cpu, + dst_vals += sizeof(source_cpu); + *((int *)dst_vals) = target_cpu; + +- if (sw_produce_generic_msg(msg, SW_WAKEUP_ACTION_NONE)) { ++ if (sw_produce_generic_msg(msg, SW_WAKEUP_ACTION_NONE)) + pw_pr_warn("WARNING: could NOT produce message!\n"); +- } ++ + }; + + /* +@@ -926,9 +904,9 @@ void sw_handle_apic_timer_wakeup_i(struct sw_collector_data *node) + /* msg TSC assigned when msg is written to buffer */ + msg->cpuidx = cpu; + +- if (sw_produce_generic_msg(msg, SW_WAKEUP_ACTION_DIRECT)) { ++ if (sw_produce_generic_msg(msg, SW_WAKEUP_ACTION_DIRECT)) + pw_pr_warn("WARNING: could NOT produce message!\n"); +- } ++ + pw_pr_debug("HANDLED APIC timer wakeup for cpu = %d\n", cpu); + }; + +@@ -948,9 +926,8 @@ void sw_handle_workqueue_wakeup_helper_i(int cpu, + /* + * Workqueue wakeup ==> empty message. + */ +- if (sw_produce_generic_msg(msg, SW_WAKEUP_ACTION_DIRECT)) { ++ if (sw_produce_generic_msg(msg, SW_WAKEUP_ACTION_DIRECT)) + pw_pr_error("WARNING: could NOT produce message!\n"); +- } + }; + + /* +@@ -964,9 +941,9 @@ void sw_handle_sched_switch_helper_i(void) + node = GET_COLLECTOR_TRACE_NODE(SW_TRACE_ID_SCHED_SWITCH); + pw_pr_debug("SCHED SWITCH NODE = %p\n", node); + } +- if (!node) { ++ if (!node) + return; +- } ++ + preempt_disable(); + { + struct sw_collector_data *curr; +@@ -980,15 +957,15 @@ void sw_handle_sched_switch_helper_i(void) + struct cpumask *mask = &curr->cpumask; + u16 timeout = curr->info->sampling_interval_msec; + +- if (!timeout) { ++ if (!timeout) + timeout = sw_min_polling_interval_msecs; +- } ++ + /* Has there been enough time since the last + * collection point? + */ +- if (delta_msecs < timeout) { ++ if (delta_msecs < timeout) + continue; +- } ++ + /* Update timestamp and handle message */ + if (cpumask_test_cpu( + RAW_CPU(), +@@ -1052,6 +1029,7 @@ void sw_tps_apic_i(int cpu) + if (local_apic_timer_fired && + SHOULD_PRODUCE_WAKEUP_SAMPLE(cpu)) { + struct sw_collector_data *curr = NULL; ++ + list_for_each_entry(curr, &apic_timer_node->list, + list) { + sw_handle_apic_timer_wakeup_i(curr); +@@ -1118,17 +1096,16 @@ void sw_tpf_i(int cpu, struct sw_trace_notifier_data *node) + #if IS_ENABLED(CONFIG_TRACEPOINTS) + DEFINE_PROBE_FUNCTION(PROBE_TPS_PARAMS) + { +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38) +- if (state == PWR_EVENT_EXIT) { ++#if KERNEL_VERSION(2, 6, 38) <= LINUX_VERSION_CODE ++ if (state == PWR_EVENT_EXIT) + return; +- } + #endif + DO_PER_CPU_OVERHEAD_FUNC(sw_tps_i); + }; + + DEFINE_PROBE_FUNCTION(PROBE_TPF_PARAMS) + { +-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38) ++#if KERNEL_VERSION(2, 6, 38) > LINUX_VERSION_CODE + int cpu = RAW_CPU(); + #endif /* version < 2.6.38 */ + static struct sw_trace_notifier_data *node; +@@ -1160,9 +1137,9 @@ static void sw_handle_irq_wakeup_i(struct sw_collector_data *node, int irq) + */ + *((int *)dst_vals) = irq; + +- if (sw_produce_generic_msg(msg, SW_WAKEUP_ACTION_DIRECT)) { ++ if (sw_produce_generic_msg(msg, SW_WAKEUP_ACTION_DIRECT)) + pw_pr_warn("WARNING: could NOT produce message!\n"); +- } ++ + }; + + /* +@@ -1179,12 +1156,12 @@ DEFINE_PROBE_FUNCTION(PROBE_IRQ_PARAMS) + node = GET_COLLECTOR_TRACE_NODE(SW_TRACE_ID_IRQ_HANDLER_ENTRY); + pw_pr_debug("NODE = %p\n", node); + } +- if (!node || !SHOULD_PRODUCE_WAKEUP_SAMPLE(cpu)) { ++ if (!node || !SHOULD_PRODUCE_WAKEUP_SAMPLE(cpu)) + return; +- } +- list_for_each_entry(curr, &node->list, list) { ++ ++ list_for_each_entry(curr, &node->list, list) + DO_PER_CPU_OVERHEAD_FUNC(sw_handle_irq_wakeup_i, curr, irq); +- } ++ + }; + + /* +@@ -1203,9 +1180,9 @@ DEFINE_PROBE_FUNCTION(PROBE_TIMER_ARGS) + pw_pr_debug("NODE = %p\n", node); + } + +- if (!node || !SHOULD_PRODUCE_WAKEUP_SAMPLE(cpu)) { ++ if (!node || !SHOULD_PRODUCE_WAKEUP_SAMPLE(cpu)) + return; +- } ++ + DO_PER_CPU_OVERHEAD_FUNC(sw_handle_timer_wakeup_helper_i, curr, node, + tid); + }; +@@ -1226,9 +1203,9 @@ DEFINE_PROBE_FUNCTION(PROBE_HRTIMER_PARAMS) + pw_pr_debug("NODE = %p\n", node); + } + +- if (!node || !SHOULD_PRODUCE_WAKEUP_SAMPLE(cpu)) { ++ if (!node || !SHOULD_PRODUCE_WAKEUP_SAMPLE(cpu)) + return; +- } ++ + DO_PER_CPU_OVERHEAD_FUNC(sw_handle_timer_wakeup_helper_i, curr, node, + tid); + }; +@@ -1244,9 +1221,9 @@ DEFINE_PROBE_FUNCTION(PROBE_SCHED_WAKEUP_PARAMS) + /* + * "Self-sched" samples are "don't care". + */ +- if (target_cpu == source_cpu) { ++ if (target_cpu == source_cpu) + return; +- } ++ + if (unlikely(node == NULL)) { + node = GET_COLLECTOR_TRACE_NODE(SW_TRACE_ID_SCHED_WAKEUP); + pw_pr_debug("NODE = %p\n", node); +@@ -1255,9 +1232,9 @@ DEFINE_PROBE_FUNCTION(PROBE_SCHED_WAKEUP_PARAMS) + * Unlike other wakeup sources, we check the per-cpu flag + * of the TARGET cpu to decide if we should produce a sample. + */ +- if (!node || !SHOULD_PRODUCE_WAKEUP_SAMPLE(target_cpu)) { ++ if (!node || !SHOULD_PRODUCE_WAKEUP_SAMPLE(target_cpu)) + return; +- } ++ + list_for_each_entry(curr, &node->list, list) { + /* sw_handle_sched_wakeup_i(curr, source_cpu, target_cpu); */ + DO_PER_CPU_OVERHEAD_FUNC(sw_handle_sched_wakeup_i, curr, +@@ -1292,13 +1269,13 @@ void sw_process_fork_exit_helper_i(struct sw_collector_data *node, + dst_vals += sizeof(pid); + *((int *)dst_vals) = tid; + dst_vals += sizeof(tid); +- if (is_fork) { ++ if (is_fork) + memcpy(dst_vals, name, SW_MAX_PROC_NAME_SIZE); +- } + +- if (sw_produce_generic_msg(msg, SW_WAKEUP_ACTION_DIRECT)) { ++ ++ if (sw_produce_generic_msg(msg, SW_WAKEUP_ACTION_DIRECT)) + pw_pr_warn("WARNING: could NOT produce message!\n"); +- } ++ + pw_pr_debug( + "HANDLED process %s event for task: pid = %d, tid = %d, name = %s\n", + is_fork ? "FORK" : "EXIT", pid, tid, name); +@@ -1313,9 +1290,9 @@ DEFINE_PROBE_FUNCTION(PROBE_PROCESS_FORK_PARAMS) + node = GET_COLLECTOR_TRACE_NODE(SW_TRACE_ID_SCHED_PROCESS_FORK); + pw_pr_debug("NODE = %p\n", node); + } +- if (!node) { ++ if (!node) + return; +- } ++ + list_for_each_entry(curr, &node->list, list) { + DO_PER_CPU_OVERHEAD_FUNC(sw_process_fork_exit_helper_i, curr, + child, true /* true ==> fork */); +@@ -1334,16 +1311,16 @@ DEFINE_PROBE_FUNCTION(PROBE_SCHED_PROCESS_EXIT_PARAMS) + node = GET_COLLECTOR_TRACE_NODE(SW_TRACE_ID_SCHED_PROCESS_EXIT); + pw_pr_debug("NODE = %p\n", node); + } +- if (!node) { ++ if (!node) + return; +- } ++ + list_for_each_entry(curr, &node->list, list) { + DO_PER_CPU_OVERHEAD_FUNC(sw_process_fork_exit_helper_i, curr, + task, false /* false ==> exit */); + } + }; + +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) ++#if KERNEL_VERSION(3, 14, 0) <= LINUX_VERSION_CODE + /* + * 10. THERMAL_APIC entry + */ +@@ -1373,7 +1350,7 @@ DEFINE_PROBE_FUNCTION(PROBE_THERMAL_APIC_EXIT_PARAMS) + } + DO_PER_CPU_OVERHEAD_FUNC(sw_tpf_i, (int)cpu, node); + }; +-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) */ ++#endif /* KERNEL_VERSION(3, 14, 0) <= LINUX_VERSION_CODE */ + + #if IS_ENABLED(CONFIG_ANDROID) + /* +@@ -1412,9 +1389,9 @@ void sw_produce_wakelock_msg_i(int cpu, struct sw_collector_data *node, + strncpy(dst_vals, proc_name, SW_MAX_PROC_NAME_SIZE); + dst_vals += SW_MAX_PROC_NAME_SIZE; + +- if (sw_produce_generic_msg(msg, SW_WAKEUP_ACTION_DIRECT)) { ++ if (sw_produce_generic_msg(msg, SW_WAKEUP_ACTION_DIRECT)) + pw_pr_warn("WARNING: could NOT produce message!\n"); +- } ++ + }; + + /* +@@ -1427,9 +1404,9 @@ void sw_handle_wakelock_i(int cpu, struct sw_trace_notifier_data *node, + const char *proc_name = NAME(); + struct sw_collector_data *curr = NULL; + +- if (!node) { ++ if (!node) + return; +- } ++ + + list_for_each_entry(curr, &node->list, list) { + sw_produce_wakelock_msg_i(cpu, curr, name, type, timeout, pid, +@@ -1443,7 +1420,7 @@ DEFINE_PROBE_FUNCTION(PROBE_WAKE_LOCK_PARAMS) + static struct sw_trace_notifier_data *node; + enum sw_kernel_wakelock_type type = SW_WAKE_LOCK; + u64 timeout = 0; +-#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0) ++#if KERNEL_VERSION(3, 4, 0) > LINUX_VERSION_CODE + const char *name = lock->name; + #endif + +@@ -1451,7 +1428,7 @@ DEFINE_PROBE_FUNCTION(PROBE_WAKE_LOCK_PARAMS) + node = GET_COLLECTOR_TRACE_NODE(SW_TRACE_ID_WAKE_LOCK); + pw_pr_debug("NODE = %p\n", node); + } +-#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0) ++#if KERNEL_VERSION(3, 4, 0) > LINUX_VERSION_CODE + /* + * Was this wakelock acquired with a timeout i.e. + * is this an auto expire wakelock? +@@ -1460,7 +1437,7 @@ DEFINE_PROBE_FUNCTION(PROBE_WAKE_LOCK_PARAMS) + type = SW_WAKE_LOCK_TIMEOUT; + timeout = jiffies_to_msecs(lock->expires - jiffies); + } +-#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) */ ++#endif /* KERNEL_VERSION(3, 4, 0) > LINUX_VERSION_CODE */ + DO_PER_CPU_OVERHEAD_FUNC(sw_handle_wakelock_i, cpu, node, name, + (int)type, timeout); + }; +@@ -1473,7 +1450,7 @@ DEFINE_PROBE_FUNCTION(PROBE_WAKE_UNLOCK_PARAMS) + int cpu = RAW_CPU(); + static struct sw_trace_notifier_data *node; + enum sw_kernel_wakelock_type type = SW_WAKE_UNLOCK; +-#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0) ++#if KERNEL_VERSION(3, 4, 0) > LINUX_VERSION_CODE + const char *name = lock->name; + #endif + +@@ -1501,13 +1478,13 @@ DEFINE_PROBE_FUNCTION(PROBE_WORKQUEUE_PARAMS) + pw_pr_debug("NODE = %p\n", node); + } + +- if (!node || !SHOULD_PRODUCE_WAKEUP_SAMPLE(cpu)) { ++ if (!node || !SHOULD_PRODUCE_WAKEUP_SAMPLE(cpu)) + return; +- } +- list_for_each_entry(curr, &node->list, list) { ++ ++ list_for_each_entry(curr, &node->list, list) + DO_PER_CPU_OVERHEAD_FUNC(sw_handle_workqueue_wakeup_helper_i, + cpu, curr); +- } ++ + }; + + /* +@@ -1542,9 +1519,9 @@ static void sw_send_pm_notification_i(int value) + msg->payload_len = sizeof(value); + msg->p_payload = buffer + sizeof(*msg); + *((int *)msg->p_payload) = value; +- if (sw_produce_generic_msg(msg, SW_WAKEUP_ACTION_DIRECT)) { ++ if (sw_produce_generic_msg(msg, SW_WAKEUP_ACTION_DIRECT)) + pw_pr_error("couldn't produce generic message!\n"); +- } ++ + vfree(buffer); + } + +@@ -1580,23 +1557,23 @@ static void sw_probe_pm_helper_i(int id, int both_id, bool is_enter, + /* + * Exitting HIBERNATION/SUSPEND + */ +- if (sw_is_reset_i() && reset_node) { ++ if (sw_is_reset_i() && reset_node) + sw_handle_reset_messages_i(reset_node); +- } ++ + } +- if (node) { ++ if (node) + sw_handle_trace_notifier_i(node); +- } +- if (both_node) { ++ ++ if (both_node) + sw_handle_trace_notifier_i(both_node); +- } ++ + /* Send the suspend-resume notification */ + sw_send_pm_notification_i(SW_PM_VALUE(mode, action)); + } + + static bool sw_is_suspend_via_firmware(void) + { +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) ++#if KERNEL_VERSION(4, 4, 0) <= LINUX_VERSION_CODE + /* 'pm_suspend_via_firmware' only available in kernel >= 4.4 */ + return pm_suspend_via_firmware(); + #endif +@@ -1666,7 +1643,7 @@ static void sw_store_topology_change_i(enum cpu_action type, + ++sw_num_topology_entries; + } + +-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) ++#if KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE + int sw_probe_hotplug_notifier_i(struct notifier_block *block, + unsigned long action, void *pcpu) + { +@@ -1750,7 +1727,7 @@ static void sw_probe_cpuhp_helper_i(unsigned int cpu, enum cpu_action action) + + static int sw_probe_cpu_offline_i(unsigned int cpu) + { +- printk(KERN_INFO "DEBUG: offline notification for cpu %u at %llu\n", ++ pw_pr_debug("DEBUG: offline notification for cpu %u at %llu\n", + cpu, sw_tscval()); + sw_probe_cpuhp_helper_i(cpu, SW_CPU_ACTION_OFFLINE); + return 0; +@@ -1758,13 +1735,13 @@ static int sw_probe_cpu_offline_i(unsigned int cpu) + + static int sw_probe_cpu_online_i(unsigned int cpu) + { +- printk(KERN_INFO "DEBUG: online notification for cpu %u at %llu\n", cpu, ++ pw_pr_debug("DEBUG: online notification for cpu %u at %llu\n", cpu, + sw_tscval()); + sw_probe_cpuhp_helper_i(cpu, SW_CPU_ACTION_ONLINE_PREPARE); + sw_probe_cpuhp_helper_i(cpu, SW_CPU_ACTION_ONLINE); + return 0; + } +-#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0) */ ++#endif /* KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE */ + + /* + * 2. CPUFREQ notifier +@@ -1798,23 +1775,23 @@ static int sw_probe_cpufreq_notifier_i(struct notifier_block *block, + */ + int sw_register_trace_cpu_idle_i(struct sw_trace_notifier_data *node) + { +-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38) ++#if KERNEL_VERSION(2, 6, 38) > LINUX_VERSION_CODE + DO_REGISTER_SW_TRACEPOINT_PROBE(node, power_start, + sw_probe_power_start_i); + #else /* kernel version >= 2.6.38 */ + DO_REGISTER_SW_TRACEPOINT_PROBE(node, cpu_idle, sw_probe_cpu_idle_i); +-#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) */ ++#endif /* KERNEL_VERSION(2, 6, 38) > LINUX_VERSION_CODE */ + return PW_SUCCESS; + }; + + int sw_unregister_trace_cpu_idle_i(struct sw_trace_notifier_data *node) + { +-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38) ++#if KERNEL_VERSION(2, 6, 38) > LINUX_VERSION_CODE + DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, power_start, + sw_probe_power_start_i); + #else /* kernel version >= 2.6.38 */ + DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, cpu_idle, sw_probe_cpu_idle_i); +-#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) */ ++#endif /* KERNEL_VERSION(2, 6, 38) > LINUX_VERSION_CODE */ + return PW_SUCCESS; + }; + +@@ -1823,25 +1800,25 @@ int sw_unregister_trace_cpu_idle_i(struct sw_trace_notifier_data *node) + */ + int sw_register_trace_cpu_frequency_i(struct sw_trace_notifier_data *node) + { +-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38) ++#if KERNEL_VERSION(2, 6, 38) > LINUX_VERSION_CODE + DO_REGISTER_SW_TRACEPOINT_PROBE(node, power_frequency, + sw_probe_power_frequency_i); + #else /* kernel version >= 2.6.38 */ + DO_REGISTER_SW_TRACEPOINT_PROBE(node, cpu_frequency, + sw_probe_cpu_frequency_i); +-#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) */ ++#endif /* KERNEL_VERSION(2, 6, 38) > LINUX_VERSION_CODE */ + return PW_SUCCESS; + }; + + int sw_unregister_trace_cpu_frequency_i(struct sw_trace_notifier_data *node) + { +-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38) ++#if KERNEL_VERSION(2, 6, 38) > LINUX_VERSION_CODE + DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, power_frequency, + sw_probe_power_frequency_i); + #else /* kernel version >= 2.6.38 */ + DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, cpu_frequency, + sw_probe_cpu_frequency_i); +-#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) */ ++#endif /* KERNEL_VERSION(2, 6, 38) > LINUX_VERSION_CODE */ + return PW_SUCCESS; + }; + +@@ -1955,7 +1932,7 @@ int sw_unregister_trace_sched_process_exit_i(struct sw_trace_notifier_data + /* + * 10. THERMAL_APIC entry + */ +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) ++#if KERNEL_VERSION(3, 14, 0) <= LINUX_VERSION_CODE + int sw_register_trace_thermal_apic_entry_i(struct sw_trace_notifier_data *node) + { + DO_REGISTER_SW_TRACEPOINT_PROBE(node, thermal_apic_entry, +@@ -1987,13 +1964,13 @@ int sw_unregister_trace_thermal_apic_exit_i(struct sw_trace_notifier_data *node) + sw_probe_thermal_apic_exit_i); + return PW_SUCCESS; + }; +-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) */ ++#endif /* KERNEL_VERSION(3, 14, 0) <= LINUX_VERSION_CODE */ + + /* + * 11. WAKE lock / WAKEUP source activate. + */ + #if IS_ENABLED(CONFIG_ANDROID) +-#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0) ++#if KERNEL_VERSION(3, 4, 0) > LINUX_VERSION_CODE + int sw_register_trace_wake_lock_i(struct sw_trace_notifier_data *node) + { + DO_REGISTER_SW_TRACEPOINT_PROBE(node, wake_lock, sw_probe_wake_lock_i); +@@ -2006,7 +1983,7 @@ int sw_unregister_trace_wake_lock_i(struct sw_trace_notifier_data *node) + sw_probe_wake_lock_i); + return PW_SUCCESS; + }; +-#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0) */ ++#else /* KERNEL_VERSION(3, 4, 0) > LINUX_VERSION_CODE */ + int sw_register_trace_wakeup_source_activate_i( + struct sw_trace_notifier_data *node) + { +@@ -2022,12 +1999,12 @@ int sw_unregister_trace_wakeup_source_activate_i( + sw_probe_wakeup_source_activate_i); + return PW_SUCCESS; + }; +-#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) */ ++#endif /* KERNEL_VERSION(3, 4, 0) > LINUX_VERSION_CODE */ + + /* + * 11. WAKE unlock / WAKEUP source deactivate. + */ +-#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0) ++#if KERNEL_VERSION(3, 4, 0) > LINUX_VERSION_CODE + int sw_register_trace_wake_unlock_i(struct sw_trace_notifier_data *node) + { + DO_REGISTER_SW_TRACEPOINT_PROBE(node, wake_unlock, +@@ -2042,7 +2019,7 @@ int sw_unregister_trace_wake_unlock_i(struct sw_trace_notifier_data *node) + return PW_SUCCESS; + }; + +-#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0) */ ++#else /* KERNEL_VERSION(3, 4, 0) > LINUX_VERSION_CODE */ + int sw_register_trace_wakeup_source_deactivate_i( + struct sw_trace_notifier_data *node) + { +@@ -2058,7 +2035,7 @@ int sw_unregister_trace_wakeup_source_deactivate_i( + sw_probe_wakeup_source_deactivate_i); + return PW_SUCCESS; + }; +-#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) */ ++#endif /* KERNEL_VERSION(3, 4, 0) > LINUX_VERSION_CODE */ + #endif /* CONFIG_ANDROID */ + + /* +@@ -2066,7 +2043,7 @@ int sw_unregister_trace_wakeup_source_deactivate_i( + */ + int sw_register_trace_workqueue_execution_i(struct sw_trace_notifier_data *node) + { +-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35) ++#if KERNEL_VERSION(2, 6, 35) >= LINUX_VERSION_CODE + DO_REGISTER_SW_TRACEPOINT_PROBE(node, workqueue_execution, + sw_probe_workqueue_execution_i); + #else +@@ -2079,7 +2056,7 @@ int sw_register_trace_workqueue_execution_i(struct sw_trace_notifier_data *node) + int sw_unregister_trace_workqueue_execution_i( + struct sw_trace_notifier_data *node) + { +-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35) ++#if KERNEL_VERSION(2, 6, 35) >= LINUX_VERSION_CODE + DO_UNREGISTER_SW_TRACEPOINT_PROBE(node, workqueue_execution, + sw_probe_workqueue_execution_i); + #else +@@ -2102,9 +2079,10 @@ int sw_register_trace_sched_switch_i(struct sw_trace_notifier_data *node) + */ + { + int cpu = 0; +- for_each_present_cpu(cpu) { ++ ++ for_each_present_cpu(cpu) + *(&per_cpu(sw_pcpu_polling_jiff, cpu)) = jiffies; +- } ++ + } + DO_REGISTER_SW_TRACEPOINT_PROBE(node, sched_switch, + sw_probe_sched_switch_i); +@@ -2162,7 +2140,7 @@ int sw_unregister_cpufreq_notifier_i(struct sw_trace_notifier_data *node) + return PW_SUCCESS; + }; + +-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) ++#if KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE + /* + * 3. CPU hot plug notifier. + */ +@@ -2182,7 +2160,7 @@ int sw_unregister_hotcpu_notifier_i(struct sw_trace_notifier_data *node) + return PW_SUCCESS; + }; + +-#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0) */ ++#else /* KERNEL_VERSION(4, 10, 0) <= LINUX_VERSION_CODE */ + static int sw_cpuhp_state = -1; + int sw_register_hotcpu_notifier_i(struct sw_trace_notifier_data *node) + { +@@ -2199,18 +2177,18 @@ int sw_register_hotcpu_notifier_i(struct sw_trace_notifier_data *node) + + int sw_unregister_hotcpu_notifier_i(struct sw_trace_notifier_data *node) + { +- if (sw_cpuhp_state >= 0) { ++ if (sw_cpuhp_state >= 0) + cpuhp_remove_state_nocalls((enum cpuhp_state)sw_cpuhp_state); +- } ++ + return 0; + }; +-#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0) */ ++#endif /* KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE */ + + /* + * Tracepoint extraction routines. + * Required for newer kernels (>=3.15) + */ +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) ++#if KERNEL_VERSION(3, 15, 0) <= LINUX_VERSION_CODE + static void sw_extract_tracepoint_callback(struct tracepoint *tp, void *priv) + { + struct sw_trace_notifier_data *node = NULL; +@@ -2239,7 +2217,7 @@ static void sw_extract_tracepoint_callback(struct tracepoint *tp, void *priv) + } + } + }; +-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,15,0) */ ++#endif /* KERNEL_VERSION(3, 15, 0) <= LINUX_VERSION_CODE */ + #endif /* CONFIG_TRACEPOINTS */ + + /* +@@ -2248,7 +2226,7 @@ static void sw_extract_tracepoint_callback(struct tracepoint *tp, void *priv) + */ + int sw_extract_trace_notifier_providers(void) + { +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) && \ ++#if KERNEL_VERSION(3, 15, 0) <= LINUX_VERSION_CODE && \ + IS_ENABLED(CONFIG_TRACEPOINTS) + int numCallbacks = 0; + +@@ -2257,11 +2235,10 @@ int sw_extract_trace_notifier_providers(void) + /* + * Did we get the complete list? + */ +- if (numCallbacks != NUM_VALID_TRACEPOINTS) { +- printk(KERN_WARNING +- "WARNING: Could NOT find tracepoint structs for some tracepoints!\n"); +- } +-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,15,0) */ ++ if (numCallbacks != NUM_VALID_TRACEPOINTS) ++ pw_pr_warn( ++ "WARNING : Could NOT find tracepoint structs for some tracepoints !\n"); ++#endif /* KERNEL_VERSION(3, 15, 0) <= LINUX_VERSION_CODE */ + return PW_SUCCESS; + }; + +@@ -2274,9 +2251,8 @@ void sw_reset_trace_notifier_providers(void) + { + int cpu = 0; + +- for_each_online_cpu(cpu) { ++ for_each_online_cpu(cpu) + RESET_VALID_WAKEUP_EVENT_COUNTER(cpu); +- } + } + /* + * Reset the wakeup event flag. Not strictly required if we +@@ -2319,7 +2295,7 @@ int sw_add_trace_notifier_providers(void) + FOR_EACH_TRACEPOINT_NODE(i, node) + { + if (sw_register_trace_notify_provider(node)) { +- pw_pr_error("ERROR: couldn't add a trace provider!\n"); ++ pw_pr_error("ERROR : couldn't add a trace provider!\n"); + return -EIO; + } + } +@@ -2327,11 +2303,10 @@ int sw_add_trace_notifier_providers(void) + { + if (sw_register_trace_notify_provider(node)) { + pw_pr_error( +- "ERROR: couldn't add a notifier provider!\n"); ++ "ERROR: couldn't add a notifier provider !\n"); + return -EIO; + } + } +-#if IS_ENABLED(CONFIG_TRACEPOINTS) + /* + * Add the cpu hot plug notifier. + */ +@@ -2339,11 +2314,10 @@ int sw_add_trace_notifier_providers(void) + if (sw_register_trace_notify_provider( + &s_hotplug_notifier_data)) { + pw_pr_error( +- "ERROR: couldn't add cpu notifier provider!\n"); ++ "ERROR : couldn't add cpu notifier provider!\n"); + return -EIO; + } + } +-#endif /* CONFIG_TRACEPOINTS */ + return PW_SUCCESS; + } + +diff --git a/drivers/platform/x86/socwatch/sw_tracepoint_handlers.c b/drivers/platform/x86/socwatch/sw_tracepoint_handlers.c +index 8154f6b516c8..b03155c89c14 100644 +--- a/drivers/platform/x86/socwatch/sw_tracepoint_handlers.c ++++ b/drivers/platform/x86/socwatch/sw_tracepoint_handlers.c +@@ -1,58 +1,57 @@ +-/* +- +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-*/ ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + #include "sw_structs.h" + #include "sw_kernel_defines.h" + #include "sw_types.h" +@@ -71,15 +70,16 @@ struct sw_trace_list_node { + SW_LIST_ENTRY(list, sw_trace_list_node); + }; + static SW_DEFINE_LIST_HEAD(s_trace_list, sw_trace_list_node) = +- SW_LIST_HEAD_INITIALIZER(s_trace_list); ++ SW_LIST_HEAD_INITIALIZER(s_trace_list); + static SW_DEFINE_LIST_HEAD(s_notifier_list, sw_trace_list_node) = +- SW_LIST_HEAD_INITIALIZER(s_notifier_list); ++ SW_LIST_HEAD_INITIALIZER(s_notifier_list); + static int s_trace_idx = -1, s_notifier_idx = -1; + + SW_DEFINE_LIST_HEAD(sw_topology_list, sw_topology_node) = +- SW_LIST_HEAD_INITIALIZER(sw_topology_list); ++ SW_LIST_HEAD_INITIALIZER(sw_topology_list); + size_t sw_num_topology_entries; + ++ + /* ------------------------------------------------- + * Function definitions. + * ------------------------------------------------- +@@ -99,46 +99,45 @@ void sw_print_trace_notifier_overheads(void) + sw_print_trace_notifier_provider_overheads(); + } + +-static int sw_for_each_node_i(void *list_head, +- int (*func)(struct sw_trace_notifier_data *node, +- void *priv), +- void *priv, bool return_on_error) ++static int sw_for_each_node_i( ++ void *list_head, ++ int (*func)(struct sw_trace_notifier_data *node, void *priv), ++ void *priv, bool return_on_error) + { + SW_LIST_HEAD_VAR(sw_trace_list_node) * head = list_head; + int retval = PW_SUCCESS; + struct sw_trace_list_node *lnode = NULL; + +- SW_LIST_FOR_EACH_ENTRY(lnode, head, list) +- { ++ SW_LIST_FOR_EACH_ENTRY(lnode, head, list) { + if ((*func)(lnode->data, priv)) { + retval = -EIO; +- if (return_on_error) { ++ if (return_on_error) + break; +- } + } + } + return retval; + } + +-int sw_for_each_tracepoint_node(int (*func)(struct sw_trace_notifier_data *node, +- void *priv), +- void *priv, bool return_on_error) ++int sw_for_each_tracepoint_node( ++ int (*func)(struct sw_trace_notifier_data *node, void *priv), ++ void *priv, bool return_on_error) + { +- if (func) { +- return sw_for_each_node_i(&s_trace_list, func, priv, +- return_on_error); +- } ++ if (func) ++ return sw_for_each_node_i(&s_trace_list, ++ func, priv, return_on_error); ++ + return PW_SUCCESS; + } + +-int sw_for_each_notifier_node(int (*func)(struct sw_trace_notifier_data *node, +- void *priv), +- void *priv, bool return_on_error) ++int sw_for_each_notifier_node( ++ int (*func)(struct sw_trace_notifier_data *node, void *priv), ++ void *priv, bool return_on_error) + { +- if (func) { +- return sw_for_each_node_i(&s_notifier_list, func, priv, +- return_on_error); +- } ++ ++ if (func) ++ return sw_for_each_node_i(&s_notifier_list, ++ func, priv, return_on_error); ++ + return PW_SUCCESS; + } + +@@ -156,7 +155,7 @@ int sw_get_trace_notifier_id(struct sw_trace_notifier_data *tnode) + return -EIO; + } + if (!(tnode->type == SW_TRACE_COLLECTOR_TRACEPOINT || +- tnode->type == SW_TRACE_COLLECTOR_NOTIFIER)) { ++ tnode->type == SW_TRACE_COLLECTOR_NOTIFIER)) { + pw_pr_error( + "ERROR: cannot get ID for invalid trace/notifier data!\n"); + return -EIO; +@@ -166,43 +165,43 @@ int sw_get_trace_notifier_id(struct sw_trace_notifier_data *tnode) + "ERROR: cannot get ID for trace/notifier data without valid name!\n"); + return -EIO; + } +-#ifdef LINUX_VERSION_CODE +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) && \ ++ ++#if defined(LINUX_VERSION_CODE) ++#if KERNEL_VERSION(3, 15, 0) <= LINUX_VERSION_CODE && \ + defined(CONFIG_TRACEPOINTS) ++ + if (tnode->type == SW_TRACE_COLLECTOR_TRACEPOINT && +- tnode->name->kernel_name && !tnode->tp) { ++ tnode->name->kernel_name && !tnode->tp) { + /* No tracepoint structure found so no ID possible */ + return -EIO; + } + #endif + #endif +- if (tnode->type == SW_TRACE_COLLECTOR_NOTIFIER) { ++ if (tnode->type == SW_TRACE_COLLECTOR_NOTIFIER) + head = (void *)&s_notifier_list; +- } +- SW_LIST_FOR_EACH_ENTRY(lnode, head, list) +- { ++ ++ SW_LIST_FOR_EACH_ENTRY(lnode, head, list) { + struct sw_trace_notifier_data *data = lnode->data; + +- if (!strcmp(data->name->abstract_name, +- tnode->name->abstract_name)) { ++ if (!strcmp( ++ data->name->abstract_name, tnode->name->abstract_name)) + return lnode->id; +- } + } + return -1; + } + /* + * Retrieve the "kernel" name for this tracepoint/notifier. + */ +-const char * +-sw_get_trace_notifier_kernel_name(struct sw_trace_notifier_data *node) ++const char *sw_get_trace_notifier_kernel_name( ++ struct sw_trace_notifier_data *node) + { + return node->name->kernel_name; + }; + /* + * Retrieve the "abstract" name for this tracepoint/notifier. + */ +-const char * +-sw_get_trace_notifier_abstract_name(struct sw_trace_notifier_data *node) ++const char *sw_get_trace_notifier_abstract_name( ++ struct sw_trace_notifier_data *node) + { + return node->name->abstract_name; + }; +@@ -220,12 +219,14 @@ int sw_register_trace_notify_provider(struct sw_trace_notifier_data *data) + return -EIO; + } + if (!(data->type == SW_TRACE_COLLECTOR_TRACEPOINT || +- data->type == SW_TRACE_COLLECTOR_NOTIFIER)) { +- pw_pr_error("ERROR: cannot add invalid trace/notifier data!\n"); ++ data->type == SW_TRACE_COLLECTOR_NOTIFIER)) { ++ pw_pr_error( ++ "ERROR: cannot add invalid trace/notifier data!\n"); + return -EIO; + } + /* +- * Kernel name is allowed to be NULL, but abstract name MUST be present! ++ * Kernel name is allowed to be NULL, but abstract name ++ * MUST be present! + */ + if (!data->name || !data->name->abstract_name) { + pw_pr_error( +@@ -264,6 +265,7 @@ static void sw_free_trace_notifier_list_i(void *list_head) + while (!SW_LIST_EMPTY(head)) { + struct sw_trace_list_node *lnode = + SW_LIST_GET_HEAD_ENTRY(head, sw_trace_list_node, list); ++ + SW_LIST_UNLINK(lnode, list); + sw_kfree(lnode); + } +@@ -291,53 +293,53 @@ void sw_remove_trace_notify(void) + #define REG_FLAG (void *)1 + #define UNREG_FLAG (void *)2 + static int sw_reg_unreg_node_i(struct sw_trace_notifier_data *node, +- void *is_reg) ++ void *is_reg) + { + if (is_reg == REG_FLAG) { + /* + * Do we have anything to collect? + * Update: or were we asked to always register? + */ +- if (SW_LIST_EMPTY(&node->list) && !node->always_register) { ++ if (SW_LIST_EMPTY(&node->list) && !node->always_register) + return PW_SUCCESS; +- } ++ + /* +- * Sanity: ensure we have a register AND an +- * unregister function before proceeding! ++ * Sanity: ensure we have a register AND an unregister function ++ * before proceeding! + */ + if (node->probe_register == NULL || +- node->probe_unregister == NULL) { ++ node->probe_unregister == NULL) { + pw_pr_debug( + "WARNING: invalid trace/notifier register/unregister function for %s\n", +- sw_get_trace_notifier_kernel_name(node)); ++ sw_get_trace_notifier_kernel_name(node)); + /* +- * Don't flag this as an error -- +- * some socwatch trace providers don't have a +- * register/unregister function ++ * Don't flag this as an error -- some socwatch ++ * trace providers don't have a register/unregister ++ * function + */ + return PW_SUCCESS; + } +- if ((*node->probe_register)(node)) { ++ if ((*node->probe_register)(node)) + return -EIO; +- } ++ + node->was_registered = true; + return PW_SUCCESS; + } else if (is_reg == UNREG_FLAG) { + if (node->was_registered) { + /* +- * No need to check for validity of probe +- * unregister function -- 'sw_register_notifiers_i()' ++ * No need to check for validity of probe unregister ++ * function -- 'sw_register_notifiers_i()' + * would already have done so! + */ + WARN_ON((*node->probe_unregister)(node)); + node->was_registered = false; + pw_pr_debug("OK, unregistered trace/notifier for %s\n", +- sw_get_trace_notifier_kernel_name(node)); ++ sw_get_trace_notifier_kernel_name(node)); + } + return PW_SUCCESS; + } + pw_pr_error("ERROR: invalid reg/unreg flag value 0x%lx\n", +- (unsigned long)is_reg); ++ (unsigned long)is_reg); + return -EIO; + } + /* +@@ -348,16 +350,16 @@ int sw_register_trace_notifiers(void) + /* + * First, the tracepoints. + */ +- if (sw_for_each_tracepoint_node(&sw_reg_unreg_node_i, REG_FLAG, +- true /* return on error */)) { ++ if (sw_for_each_tracepoint_node(&sw_reg_unreg_node_i, ++ REG_FLAG, true /* return on error */)) { + pw_pr_error("ERROR registering some tracepoints\n"); + return -EIO; + } + /* + * And then the notifiers. + */ +- if (sw_for_each_notifier_node(&sw_reg_unreg_node_i, REG_FLAG, +- true /* return on error */)) { ++ if (sw_for_each_notifier_node(&sw_reg_unreg_node_i, ++ REG_FLAG, true /* return on error */)) { + pw_pr_error("ERROR registering some tracepoints\n"); + return -EIO; + } +@@ -372,15 +374,16 @@ int sw_unregister_trace_notifiers(void) + * First, the notifiers. + */ + if (sw_for_each_notifier_node(&sw_reg_unreg_node_i, UNREG_FLAG, +- true /* return on error */)) { ++ true /* return on error */)) { + pw_pr_error("ERROR registering some tracepoints\n"); + return -EIO; + } + /* + * And then the tracepoints. + */ +- if (sw_for_each_tracepoint_node(&sw_reg_unreg_node_i, UNREG_FLAG, +- true /* return on error */)) { ++ if (sw_for_each_tracepoint_node( ++ &sw_reg_unreg_node_i, ++ UNREG_FLAG, true /* return on error */)) { + pw_pr_error("ERROR registering some tracepoints\n"); + return -EIO; + } +@@ -393,10 +396,11 @@ void sw_clear_topology_list(void) + while (!SW_LIST_EMPTY(head)) { + struct sw_topology_node *lnode = + SW_LIST_GET_HEAD_ENTRY(head, sw_topology_node, list); ++ + pw_pr_debug("Clearing topology node for cpu %d\n", +- lnode->change.cpu); ++ lnode->change.cpu); + SW_LIST_UNLINK(lnode, list); + sw_kfree(lnode); + } +- sw_num_topology_entries = 0; ++ sw_num_topology_entries = 0; + } +diff --git a/drivers/platform/x86/socwatchhv/inc/asm_helper.h b/drivers/platform/x86/socwatchhv/inc/asm_helper.h +index 10e95190e4f0..ee5585e45686 100644 +--- a/drivers/platform/x86/socwatchhv/inc/asm_helper.h ++++ b/drivers/platform/x86/socwatchhv/inc/asm_helper.h +@@ -1,65 +1,64 @@ +-/* +- +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-*/ ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + + #ifndef _ASM_HELPER_H_ + #define _ASM_HELPER_H_ + + #include + +-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) ++#if KERNEL_VERSION(4, 1, 0) > LINUX_VERSION_CODE + + #include + #include +@@ -68,91 +67,150 @@ + + #ifdef CONFIG_AS_CFI + +-#define CFI_STARTPROC (.cfi_startproc) +-#define CFI_ENDPROC (.cfi_endproc) ++#define CFI_STARTPROC (.cfi_startproc) ++#define CFI_ENDPROC (.cfi_endproc) + #define CFI_ADJUST_CFA_OFFSET (.cfi_adjust_cfa_offset) +-#define CFI_REL_OFFSET (.cfi_rel_offset) +-#define CFI_RESTORE (.cfi_restore) ++#define CFI_REL_OFFSET (.cfi_rel_offset) ++#define CFI_RESTORE (.cfi_restore) + + #else + +-.macro cfi_ignore a = 0, b = 0, c = 0, d = 0.endm ++.macro cfi_ignore a = 0, b = 0, c = 0, d = 0 ++.endm + +-#define CFI_STARTPROC cfi_ignore +-#define CFI_ENDPROC cfi_ignore +-#define CFI_ADJUST_CFA_OFFSET cfi_ignore +-#define CFI_REL_OFFSET cfi_ignore +-#define CFI_RESTORE cfi_ignore ++#define CFI_STARTPROC cfi_ignore ++#define CFI_ENDPROC cfi_ignore ++#define CFI_ADJUST_CFA_OFFSET cfi_ignore ++#define CFI_REL_OFFSET cfi_ignore ++#define CFI_RESTORE cfi_ignore + #endif + + #ifdef CONFIG_X86_64 +-.macro SAVE_C_REGS_HELPER +- offset = 0 rax = 1 rcx = 1 r8910 = 1 r11 = 1.if \r11 movq % r11, +- 6 * 8 +\offset(% rsp) CFI_REL_OFFSET r11, \offset.endif.if \r8910 movq +- % r10, +- 7 * 8 +\offset(% rsp) CFI_REL_OFFSET r10, \offset movq % r9, +- 8 * 8 +\offset(% rsp) CFI_REL_OFFSET r9, \offset movq % r8, +- 9 * 8 +\offset(% rsp) CFI_REL_OFFSET r8, \offset.endif.if \rax movq +- % rax, +- 10 * 8 +\offset(% rsp) CFI_REL_OFFSET rax, \offset.endif.if \rcx movq +- % rcx, +- 11 * 8 +\offset(% rsp) CFI_REL_OFFSET rcx, \offset.endif movq % rdx, +- 12 * 8 +\offset(% rsp) CFI_REL_OFFSET rdx, \offset movq % rsi, +- 13 * 8 +\offset(% rsp) CFI_REL_OFFSET rsi, \offset movq % rdi, +- 14 * 8 +\offset(% rsp) CFI_REL_OFFSET rdi, \offset.endm.macro +- SAVE_C_REGS offset = +- 0 SAVE_C_REGS_HELPER \offset +- , +- 1, 1, 1, 1.endm.macro SAVE_EXTRA_REGS offset = 0 movq % r15, +- 0 * 8 +\offset(% rsp) CFI_REL_OFFSET r15, \offset movq % r14, +- 1 * 8 +\offset(% rsp) CFI_REL_OFFSET r14, \offset movq % r13, +- 2 * 8 +\offset(% rsp) CFI_REL_OFFSET r13, \offset movq % r12, +- 3 * 8 +\offset(% rsp) CFI_REL_OFFSET r12, \offset movq % rbp, +- 4 * 8 +\offset(% rsp) CFI_REL_OFFSET rbp, \offset movq % rbx, +- 5 * 8 +\offset(% rsp) CFI_REL_OFFSET rbx, \offset.endm +- +- .macro +- RESTORE_EXTRA_REGS offset = +- 0 movq 0 * 8 +\offset( +- % rsp), +- % r15 CFI_RESTORE r15 movq 1 * 8 +\offset(% rsp), +- % r14 CFI_RESTORE r14 movq 2 * 8 +\offset(% rsp), +- % r13 CFI_RESTORE r13 movq 3 * 8 +\offset(% rsp), +- % r12 CFI_RESTORE r12 movq 4 * 8 +\offset(% rsp), +- % rbp CFI_RESTORE rbp movq 5 * 8 +\offset(% rsp), +- % rbx CFI_RESTORE rbx.endm.macro RESTORE_C_REGS_HELPER rstor_rax = 1, +- rstor_rcx = 1, rstor_r11 = 1, +- rstor_r8910 = 1, rstor_rdx = 1.if \rstor_r11 movq 6 * 8(% rsp), +- % r11 CFI_RESTORE r11.endif.if \rstor_r8910 movq 7 * 8(% rsp), +- % r10 CFI_RESTORE r10 movq 8 * 8(% rsp), +- % r9 CFI_RESTORE r9 movq 9 * 8(% rsp), +- % r8 CFI_RESTORE r8.endif.if \rstor_rax movq 10 * 8(% rsp), +- % rax CFI_RESTORE rax.endif.if \rstor_rcx movq 11 * 8(% rsp), +- % rcx CFI_RESTORE rcx.endif.if \rstor_rdx movq 12 * 8(% rsp), +- % rdx CFI_RESTORE rdx.endif movq 13 * 8(% rsp), +- % rsi CFI_RESTORE rsi movq 14 * 8(% rsp), +- % rdi CFI_RESTORE rdi.endm.macro RESTORE_C_REGS RESTORE_C_REGS_HELPER 1, +- 1, 1, 1, +- 1.endm +- +- .macro ALLOC_PT_GPREGS_ON_STACK addskip = 0 subq $15 * +- 8 +\addskip, +- % rsp CFI_ADJUST_CFA_OFFSET +- 15 * 8 +\addskip.endm +- +- .macro REMOVE_PT_GPREGS_FROM_STACK +- addskip = 0 addq $15 * 8 +\addskip, +- % rsp CFI_ADJUST_CFA_OFFSET - +- (15 * 8 +\addskip) +- .endm +- +- .macro SAVE_ALL ALLOC_PT_GPREGS_ON_STACK SAVE_C_REGS +- SAVE_EXTRA_REGS +- .endm +- +- .macro RESTORE_ALL RESTORE_EXTRA_REGS RESTORE_C_REGS +- REMOVE_PT_GPREGS_FROM_STACK.endm +-#endif /*CONFIG_X86_64 */ ++ .macro SAVE_C_REGS_HELPER offset = 0 rax = 1 rcx = 1 r8910 = 1 r11 = 1 ++ .if \r11 ++ movq % r11, 6*8+\offset(%rsp) ++ CFI_REL_OFFSET r11, \offset ++ .endif ++ .if \r8910 ++ movq % r10, 7*8+\offset(%rsp) ++ CFI_REL_OFFSET r10, \offset ++ ++ movq % r9, 8*8+\offset(%rsp) ++ CFI_REL_OFFSET r9, \offset ++ ++ movq % r8, 9*8+\offset(%rsp) ++ CFI_REL_OFFSET r8, \offset ++ .endif ++ .if \rax ++ movq % rax, 10*8+\offset(%rsp) ++ CFI_REL_OFFSET rax, \offset ++ .endif ++ .if \rcx ++ movq % rcx, 11*8+\offset(%rsp) ++ CFI_REL_OFFSET rcx, \offset ++ .endif ++ movq % rdx, 12*8+\offset(%rsp) ++ CFI_REL_OFFSET rdx, \offset ++ ++ movq % rsi, 13*8+\offset(%rsp) ++ CFI_REL_OFFSET rsi, \offset ++ ++ movq % rdi, 14*8+\offset(%rsp) ++ CFI_REL_OFFSET rdi, \offset ++ .endm ++ .macro SAVE_C_REGS offset = 0 ++ SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1 ++ .endm ++ .macro SAVE_EXTRA_REGS offset = 0 ++ movq % r15, 0*8+\offset(%rsp) ++ CFI_REL_OFFSET r15, \offset ++ ++ movq % r14, 1*8+\offset(%rsp) ++ CFI_REL_OFFSET r14, \offset ++ ++ movq % r13, 2*8+\offset(%rsp) ++ CFI_REL_OFFSET r13, \offset ++ ++ movq % r12, 3*8+\offset(%rsp) ++ CFI_REL_OFFSET r12, \offset ++ ++ movq % rbp, 4*8+\offset(%rsp) ++ CFI_REL_OFFSET rbp, \offset ++ ++ movq % rbx, 5*8+\offset(%rsp) ++ CFI_REL_OFFSET rbx, \offset ++ .endm ++ ++ .macro RESTORE_EXTRA_REGS offset = 0 ++ movq 0*8+\offset(%rsp), % r15 ++ CFI_RESTORE r15 ++ movq 1*8+\offset(%rsp), % r14 ++ CFI_RESTORE r14 ++ movq 2*8+\offset(%rsp), % r13 ++ CFI_RESTORE r13 ++ movq 3*8+\offset(%rsp), % r12 ++ CFI_RESTORE r12 ++ movq 4*8+\offset(%rsp), % rbp ++ CFI_RESTORE rbp ++ movq 5*8+\offset(%rsp), % rbx ++ CFI_RESTORE rbx ++ .endm ++ .macro RESTORE_C_REGS_HELPER rstor_rax = 1, rstor_rcx = 1, rstor_r11 = 1, rstor_r8910 = 1, rstor_rdx = 1 ++ .if \rstor_r11 ++ movq 6*8(%rsp), % r11 ++ CFI_RESTORE r11 ++ .endif ++ .if \rstor_r8910 ++ movq 7*8(%rsp), % r10 ++ CFI_RESTORE r10 ++ movq 8*8(%rsp), % r9 ++ CFI_RESTORE r9 ++ movq 9*8(%rsp), % r8 ++ CFI_RESTORE r8 ++ .endif ++ .if \rstor_rax ++ movq 10*8(%rsp), % rax ++ CFI_RESTORE rax ++ .endif ++ .if \rstor_rcx ++ movq 11*8(%rsp), % rcx ++ CFI_RESTORE rcx ++ .endif ++ .if \rstor_rdx ++ movq 12*8(%rsp), % rdx ++ CFI_RESTORE rdx ++ .endif ++ movq 13*8(%rsp), % rsi ++ CFI_RESTORE rsi ++ movq 14*8(%rsp), % rdi ++ CFI_RESTORE rdi ++ .endm ++ .macro RESTORE_C_REGS ++ RESTORE_C_REGS_HELPER 1, 1, 1, 1, 1 ++ .endm ++ ++ .macro ALLOC_PT_GPREGS_ON_STACK addskip = 0 ++ subq $15*8+\addskip, % rsp ++ CFI_ADJUST_CFA_OFFSET 15*8+\addskip ++ .endm ++ ++ .macro REMOVE_PT_GPREGS_FROM_STACK addskip = 0 ++ addq $15*8+\addskip, % rsp ++ CFI_ADJUST_CFA_OFFSET - (15*8+\addskip) ++ .endm ++ ++ .macro SAVE_ALL ++ ALLOC_PT_GPREGS_ON_STACK ++ SAVE_C_REGS ++ SAVE_EXTRA_REGS ++ .endm ++ ++ .macro RESTORE_ALL ++ RESTORE_EXTRA_REGS ++ RESTORE_C_REGS ++ REMOVE_PT_GPREGS_FROM_STACK ++ .endm ++#endif /* CONFIG_X86_64 */ + #endif + + #endif +diff --git a/drivers/platform/x86/socwatchhv/inc/control.h b/drivers/platform/x86/socwatchhv/inc/control.h +index 7403150dd679..a1629c3aa2d7 100644 +--- a/drivers/platform/x86/socwatchhv/inc/control.h ++++ b/drivers/platform/x86/socwatchhv/inc/control.h +@@ -1,58 +1,57 @@ +-/* +- +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-*/ ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + + #ifndef _CONTROL_H_ + #define _CONTROL_H_ +@@ -73,9 +72,9 @@ typedef void *pvoid; + /* + * These routines have macros defined in asm/system.h + */ +-#define SYS_Local_Irq_Enable() local_irq_enable() +-#define SYS_Local_Irq_Disable() local_irq_disable() +-#define SYS_Local_Irq_Save(flags) local_irq_save(flags) ++#define SYS_Local_Irq_Enable() local_irq_enable() ++#define SYS_Local_Irq_Disable() local_irq_disable() ++#define SYS_Local_Irq_Save(flags) local_irq_save(flags) + #define SYS_Local_Irq_Restore(flags) local_irq_restore(flags) + + /* +@@ -86,7 +85,7 @@ typedef void *pvoid; + * CPU number of the processor being executed on + * + */ +-#define CONTROL_THIS_CPU() smp_processor_id() ++#define CONTROL_THIS_CPU() smp_processor_id() + + /**************************************************************************** + ** Interface definitions +@@ -96,99 +95,93 @@ typedef void *pvoid; + * Execution Control Functions + */ + +-extern void CONTROL_Invoke_Cpu(s32 cpuid, void (*func)(pvoid), pvoid ctx); ++extern void ++CONTROL_Invoke_Cpu( ++ s32 cpuid, ++ void (*func)(pvoid), ++ pvoid ctx ++); + + /* + * @fn VOID CONTROL_Invoke_Parallel_Service(func, ctx, blocking, exclude) + * +- * @param func - function to be invoked by each core in the system +- * @param ctx - pointer to the parameter block for each function +- * invocation +- * @param blocking - Wait for invoked function to complete +- * @param exclude - exclude the current core from executing the code ++ * @param func - function to be invoked by each core in the system ++ * @param ctx - pointer to the parameter block for each function invocation ++ * @param blocking - Wait for invoked function to complete ++ * @param exclude - exclude the current core from executing the code + * + * @returns none + * +- * @brief Service routine to handle all kinds of parallel invoke on +- * all CPU calls ++ * @brief Service routine to handle all kinds of parallel invoke on all CPU calls + * + * Special Notes: +- * Invoke the function provided in parallel in either a +- * blocking/non-blocking mode. +- * The current core may be excluded if desired. +- * NOTE - Do not call this function directly from source code. +- * Use the aliases +- * CONTROL_Invoke_Parallel(), CONTROL_Invoke_Parallel_NB(), +- * CONTROL_Invoke_Parallel_XS(). ++ * Invoke the function provided in parallel in either a blocking/non-blocking mode. ++ * The current core may be excluded if desired. ++ * NOTE - Do not call this function directly from source code. Use the aliases ++ * CONTROL_Invoke_Parallel(), CONTROL_Invoke_Parallel_NB(), CONTROL_Invoke_Parallel_XS(). + * + */ +-extern void CONTROL_Invoke_Parallel_Service(void (*func)(pvoid), pvoid ctx, +- s32 blocking, s32 exclude); ++extern void ++CONTROL_Invoke_Parallel_Service( ++ void (*func)(pvoid), ++ pvoid ctx, ++ s32 blocking, ++ s32 exclude ++); + + /* + * @fn VOID CONTROL_Invoke_Parallel(func, ctx) + * +- * @param func - function to be invoked by each core in the system +- * @param ctx - pointer to the parameter block for each function +- * invocation ++ * @param func - function to be invoked by each core in the system ++ * @param ctx - pointer to the parameter block for each function invocation + * + * @returns none + * +- * @brief Invoke the named function in parallel. Wait for all the +- * functions to complete. ++ * @brief Invoke the named function in parallel. Wait for all the functions to complete. + * + * Special Notes: +- * Invoke the function named in parallel, including the CPU +- * that the control is being invoked on +- * +- * Macro built on the service routine ++ * Invoke the function named in parallel, including the CPU that the control is ++ * being invoked on ++ * Macro built on the service routine + * + */ +-#define CONTROL_Invoke_Parallel(a, b) \ +- CONTROL_Invoke_Parallel_Service((a), (b), TRUE, FALSE) ++#define CONTROL_Invoke_Parallel(a, b) CONTROL_Invoke_Parallel_Service((a), (b), TRUE, FALSE) + + /* + * @fn VOID CONTROL_Invoke_Parallel_NB(func, ctx) + * +- * @param func - function to be invoked by each core in the system +- * @param ctx - pointer to the parameter block for each function +- * invocation ++ * @param func - function to be invoked by each core in the system ++ * @param ctx - pointer to the parameter block for each function invocation + * + * @returns none + * +- * @brief Invoke the named function in parallel. DO NOT Wait for all +- * the functions to complete. ++ * @brief Invoke the named function in parallel. DO NOT Wait for all the functions to complete. + * + * Special Notes: +- * Invoke the function named in parallel, including the CPU +- * that the control is being invoked on +- * +- * Macro built on the service routine ++ * Invoke the function named in parallel, including the CPU that the control is ++ * being invoked on ++ * Macro built on the service routine + * + */ +-#define CONTROL_Invoke_Parallel_NB(a, b) \ +- CONTROL_Invoke_Parallel_Service((a), (b), FALSE, FALSE) ++#define CONTROL_Invoke_Parallel_NB(a, b) CONTROL_Invoke_Parallel_Service((a), (b), FALSE, FALSE) + + /* + * @fn VOID CONTROL_Invoke_Parallel_XS(func, ctx) + * +- * @param func - function to be invoked by each core in the system +- * @param ctx - pointer to the parameter block for each function +- * invocation ++ * @param func - function to be invoked by each core in the system ++ * @param ctx - pointer to the parameter block for each function invocation + * + * @returns none + * +- * @brief Invoke the named function in parallel. Wait for all +- * the functions to complete. ++ * @brief Invoke the named function in parallel. Wait for all the functions to complete. + * + * Special Notes: +- * Invoke the function named in parallel, excluding the CPU +- * that the control is being invoked on +- * +- * Macro built on the service routine ++ * Invoke the function named in parallel, excluding the CPU that the control is ++ * being invoked on ++ * Macro built on the service routine + * + */ +-#define CONTROL_Invoke_Parallel_XS(a, b) \ +- CONTROL_Invoke_Parallel_Service((a), (b), TRUE, TRUE) ++#define CONTROL_Invoke_Parallel_XS(a, b) CONTROL_Invoke_Parallel_Service((a), (b), TRUE, TRUE) ++ + + #endif +diff --git a/drivers/platform/x86/socwatchhv/inc/pw_types.h b/drivers/platform/x86/socwatchhv/inc/pw_types.h +index 8b56e5c265dc..b5047b3ed04b 100644 +--- a/drivers/platform/x86/socwatchhv/inc/pw_types.h ++++ b/drivers/platform/x86/socwatchhv/inc/pw_types.h +@@ -1,58 +1,57 @@ +-/* +- +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-*/ ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + + #ifndef _PW_TYPES_H_ + #define _PW_TYPES_H_ +@@ -68,7 +67,7 @@ + /* + * UNSIGNED types... + */ +-typedef uint8_t u8; ++typedef uint8_t u8; + typedef uint16_t u16; + typedef uint32_t u32; + typedef uint64_t u64; +diff --git a/drivers/platform/x86/socwatchhv/inc/pw_version.h b/drivers/platform/x86/socwatchhv/inc/pw_version.h +index 7f1a40d82d71..714096691f6e 100644 +--- a/drivers/platform/x86/socwatchhv/inc/pw_version.h ++++ b/drivers/platform/x86/socwatchhv/inc/pw_version.h +@@ -1,58 +1,57 @@ +-/* +- +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-*/ ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + + #ifndef _PW_VERSION_H_ + #define _PW_VERSION_H_ 1 +diff --git a/drivers/platform/x86/socwatchhv/inc/sw_defines.h b/drivers/platform/x86/socwatchhv/inc/sw_defines.h +index f0ef6baceb3f..a670904e4e39 100644 +--- a/drivers/platform/x86/socwatchhv/inc/sw_defines.h ++++ b/drivers/platform/x86/socwatchhv/inc/sw_defines.h +@@ -1,58 +1,57 @@ +-/* +- +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-*/ ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + + #ifndef _PW_DEFINES_H_ + #define _PW_DEFINES_H_ 1 +@@ -63,17 +62,17 @@ + * Common to kernel and userspace. + * *************************************************** + */ +-#define PW_SUCCESS 0 +-#define PW_ERROR 1 +-#define PW_SUCCESS_NO_COLLECT 2 ++#define PW_SUCCESS 0 ++#define PW_ERROR 1 ++#define PW_SUCCESS_NO_COLLECT 2 + + /* + * Helper macro to convert 'u64' to 'unsigned long long' to avoid gcc warnings. + */ + #define TO_ULL(x) (unsigned long long)(x) + /* +- * Convert an arg to 'long long' +- */ ++* Convert an arg to 'long long' ++*/ + #define TO_LL(x) (long long)(x) + /* + * Convert an arg to 'unsigned long' +@@ -94,13 +93,10 @@ + /* + * Circularly decrement 'i'. + */ +-#define CIRCULAR_DEC(i, m) \ +- ({ \ +- int __tmp1 = (i); \ +- if (--__tmp1 < 0) \ +- __tmp1 = (m); \ +- __tmp1; \ +- }) ++#define CIRCULAR_DEC(i, m) ({ \ ++ int __tmp1 = (i); \ ++ if (--__tmp1 < 0) \ ++ __tmp1 = (m); __tmp1; }) + /* + * Retrieve size of an array. + */ +@@ -114,7 +110,7 @@ + * Assumes version numbers are 8b unsigned ints. + */ + #define SW_GET_SCU_FW_VERSION_MAJOR(ver) (((ver) >> 8) & 0xff) +-#define SW_GET_SCU_FW_VERSION_MINOR(ver) ((ver)&0xff) ++#define SW_GET_SCU_FW_VERSION_MINOR(ver) ((ver) & 0xff) + /* + * Max size of process name retrieved from kernel. + */ +@@ -136,7 +132,7 @@ + #define SW_MAX_KERNEL_WAKELOCK_NAME_SIZE 100 + + /* Data value read when a telemetry data read fails. */ +-#define SW_TELEM_READ_FAIL_VALUE 0xF00DF00DF00DF00D ++#define SW_TELEM_READ_FAIL_VALUE 0xF00DF00DF00DF00DUL + + #ifdef SWW_MERGE + typedef enum { +@@ -152,5 +148,12 @@ typedef enum { + #define MAX_UNSIGNED_24_BIT_VALUE 0xFFFFFF + #define MAX_UNSIGNED_32_BIT_VALUE 0xFFFFFFFF + #define MAX_UNSIGNED_64_BIT_VALUE 0xFFFFFFFFFFFFFFFF ++/* ++ * TELEM BAR CONFIG ++ */ ++#define MAX_TELEM_BAR_CFG 3 ++#define TELEM_MCHBAR_CFG 0 ++#define TELEM_IPC1BAR_CFG 1 ++#define TELEM_SSRAMBAR_CFG 2 + + #endif /* _PW_DEFINES_H_ */ +diff --git a/drivers/platform/x86/socwatchhv/inc/sw_ioctl.h b/drivers/platform/x86/socwatchhv/inc/sw_ioctl.h +index 1f8e903a0e1c..43a1f69af9ab 100644 +--- a/drivers/platform/x86/socwatchhv/inc/sw_ioctl.h ++++ b/drivers/platform/x86/socwatchhv/inc/sw_ioctl.h +@@ -1,71 +1,71 @@ +-/* +- +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + +-*/ + #ifndef __SW_IOCTL_H__ + #define __SW_IOCTL_H__ 1 + + #if defined(__linux__) || defined(__QNX__) +-#if __KERNEL__ +-#include +-#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) +-#include +-#include +-#endif /* COMPAT && x64 */ +-#else /* !__KERNEL__ */ +-#include +-#endif /* __KERNEL__ */ ++ #if __KERNEL__ ++ #include ++ #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) ++ #include ++ #include ++ #endif /* COMPAT && x64 */ ++ #else /* !__KERNEL__ */ ++ #include ++ #endif /* __KERNEL__ */ + #endif /* __linux__ */ + /* + * Ensure we pull in definition of 'DO_COUNT_DROPPED_SAMPLES'! +@@ -107,6 +107,9 @@ enum sw_ioctl_cmd { + sw_ioctl_cmd_avail_notify, + sw_ioctl_cmd_avail_collect, + sw_ioctl_cmd_topology_changes, ++ sw_ioctl_cmd_config_continuous, ++ sw_ioctl_cmd_read_continuous, ++ sw_ioctl_cmd_telem_bar, + }; + /* + * The actual IOCTL commands. +@@ -120,150 +123,136 @@ enum sw_ioctl_cmd { + * (similar to the file "read" and "write" calls). + */ + #ifdef SWW_MERGE /* Windows */ +-/* +- * Device type -- in the "User Defined" range." +- */ +-#define POWER_I_CONF_TYPE 40000 ++ /* ++ * Device type -- in the "User Defined" range." ++ */ ++ #define POWER_I_CONF_TYPE 40000 + +-/* List assigned tracepoint id */ +-#define CSIR_TRACEPOINT_ID_MASK 1 +-#define DEVICE_STATE_TRACEPOINT_ID_MASK 2 +-#define CSIR_SEPARATE_TRACEPOINT_ID_MASK 3 +-#define RESET_TRACEPOINT_ID_MASK 4 +-#define DISPLAY_ON_TRACEPOINT_ID_MASK 5 ++ /* List assigned tracepoint id */ ++ #define CSIR_TRACEPOINT_ID_MASK 1 ++ #define DEVICE_STATE_TRACEPOINT_ID_MASK 2 ++ #define CSIR_SEPARATE_TRACEPOINT_ID_MASK 3 ++ #define RESET_TRACEPOINT_ID_MASK 4 ++ #define DISPLAY_ON_TRACEPOINT_ID_MASK 5 + +-#ifdef SWW_MERGE +-/* +- * TELEM BAR CONFIG +- */ +-#define MAX_TELEM_BAR_CFG 3 +-#define TELEM_MCHBAR_CFG 0 +-#define TELEM_IPC1BAR_CFG 1 +-#define TELEM_SSRAMBAR_CFG 2 +-#endif +- +-/* +- * The IOCTL function codes from 0x800 to 0xFFF are for customer use. +- */ +-#define PW_IOCTL_CONFIG \ ++ /* ++ * The IOCTL function codes from 0x800 to 0xFFF are for customer use. ++ */ ++ #define PW_IOCTL_CONFIG \ + CTL_CODE(POWER_I_CONF_TYPE, 0x900, METHOD_BUFFERED, FILE_ANY_ACCESS) +-#define PW_IOCTL_START_COLLECTION \ ++ #define PW_IOCTL_START_COLLECTION \ + CTL_CODE(POWER_I_CONF_TYPE, 0x901, METHOD_BUFFERED, FILE_ANY_ACCESS) +-#define PW_IOCTL_STOP_COLLECTION \ ++ #define PW_IOCTL_STOP_COLLECTION \ + CTL_CODE(POWER_I_CONF_TYPE, 0x902, METHOD_BUFFERED, FILE_ANY_ACCESS) + +-/* TODO: pause, resume, cancel not supported yet */ +-#define PW_IOCTL_PAUSE_COLLECTION \ ++ /* TODO: pause, resume, cancel not supported yet */ ++ #define PW_IOCTL_PAUSE_COLLECTION \ + CTL_CODE(POWER_I_CONF_TYPE, 0x903, METHOD_BUFFERED, FILE_ANY_ACCESS) +-#define PW_IOCTL_RESUME_COLLECTION \ ++ #define PW_IOCTL_RESUME_COLLECTION \ + CTL_CODE(POWER_I_CONF_TYPE, 0x904, METHOD_BUFFERED, FILE_ANY_ACCESS) +-#define PW_IOCTL_CANCEL_COLLECTION \ ++ #define PW_IOCTL_CANCEL_COLLECTION \ + CTL_CODE(POWER_I_CONF_TYPE, 0x905, METHOD_BUFFERED, FILE_ANY_ACCESS) + +-#define PW_IOCTL_GET_PROCESSOR_GROUP_TOPOLOGY \ ++ #define PW_IOCTL_GET_PROCESSOR_GROUP_TOPOLOGY \ + CTL_CODE(POWER_I_CONF_TYPE, 0x906, METHOD_BUFFERED, FILE_ANY_ACCESS) +-#define PW_IOCTL_TOPOLOGY \ ++ #define PW_IOCTL_TOPOLOGY \ + CTL_CODE(POWER_I_CONF_TYPE, 0x907, METHOD_BUFFERED, FILE_ANY_ACCESS) +-#define PW_IOCTL_GET_AVAILABLE_COLLECTORS \ ++ #define PW_IOCTL_GET_AVAILABLE_COLLECTORS \ + CTL_CODE(POWER_I_CONF_TYPE, 0x908, METHOD_BUFFERED, FILE_ANY_ACCESS) +-#define PW_IOCTL_IMMEDIATE_IO \ ++ #define PW_IOCTL_IMMEDIATE_IO \ + CTL_CODE(POWER_I_CONF_TYPE, 0x909, METHOD_BUFFERED, FILE_ANY_ACCESS) +-#define PW_IOCTL_DRV_CLEANUP \ ++ #define PW_IOCTL_DRV_CLEANUP \ + CTL_CODE(POWER_I_CONF_TYPE, 0x90A, METHOD_BUFFERED, FILE_ANY_ACCESS) +-#define PW_IOCTL_SET_COLLECTION_EVENT \ ++ #define PW_IOCTL_SET_COLLECTION_EVENT \ + CTL_CODE(POWER_I_CONF_TYPE, 0x90B, METHOD_BUFFERED, FILE_ANY_ACCESS) +-#define PW_IOCTL_TRY_STOP_EVENT \ ++ #define PW_IOCTL_TRY_STOP_EVENT \ + CTL_CODE(POWER_I_CONF_TYPE, 0x90C, METHOD_BUFFERED, FILE_ANY_ACCESS) +-#define PW_IOCTL_SET_PCH_ACTIVE_INTERVAL \ ++ #define PW_IOCTL_SET_PCH_ACTIVE_INTERVAL \ + CTL_CODE(POWER_I_CONF_TYPE, 0x90D, METHOD_BUFFERED, FILE_ANY_ACCESS) +-#define PW_IOCTL_SET_TELEM_BAR \ ++ #define PW_IOCTL_SET_TELEM_BAR \ + CTL_CODE(POWER_I_CONF_TYPE, 0x90E, METHOD_BUFFERED, FILE_ANY_ACCESS) +-#define PW_IOCTL_METADATA \ ++ #define PW_IOCTL_METADATA \ + CTL_CODE(POWER_I_CONF_TYPE, 0x90F, METHOD_BUFFERED, FILE_ANY_ACCESS) +-#define PW_IOCTL_SET_GBE_INTERVAL \ ++ #define PW_IOCTL_SET_GBE_INTERVAL \ + CTL_CODE(POWER_I_CONF_TYPE, 0x910, METHOD_BUFFERED, FILE_ANY_ACCESS) +-#define PW_IOCTL_ENABLE_COLLECTION \ ++ #define PW_IOCTL_ENABLE_COLLECTION \ + CTL_CODE(POWER_I_CONF_TYPE, 0x911, METHOD_BUFFERED, FILE_ANY_ACCESS) +-#define PW_IOCTL_DISABLE_COLLECTION \ ++ #define PW_IOCTL_DISABLE_COLLECTION \ + CTL_CODE(POWER_I_CONF_TYPE, 0x912, METHOD_BUFFERED, FILE_ANY_ACCESS) +-#define PW_IOCTL_DRIVER_BUILD_DATE \ +- CTL_CODE(POWER_I_CONF_TYPE, 0x913, METHOD_BUFFERED, FILE_ANY_ACCESS) ++ #define PW_IOCTL_DRIVER_BUILD_DATE \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x913, METHOD_BUFFERED, FILE_ANY_ACCESS) ++ #define PW_IOCTL_CONFIG_CONTINUOUS \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x914, METHOD_BUFFERED, FILE_ANY_ACCESS) ++ #define PW_IOCTL_READ_CONTINUOUS \ ++ CTL_CODE(POWER_I_CONF_TYPE, 0x915, METHOD_BUFFERED, FILE_ANY_ACCESS) + + #elif !defined(__APPLE__) +-#define PW_IOCTL_CONFIG \ +- _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_config, \ +- struct sw_driver_ioctl_arg *) +-#if DO_COUNT_DROPPED_SAMPLES +-#define PW_IOCTL_CMD \ +- _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, \ +- struct sw_driver_ioctl_arg *) +-#else +-#define PW_IOCTL_CMD \ +- _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, \ +- struct sw_driver_ioctl_arg *) +-#endif /* DO_COUNT_DROPPED_SAMPLES */ +-#define PW_IOCTL_POLL _IO(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_poll) +-#define PW_IOCTL_IMMEDIATE_IO \ +- _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_immediate_io, \ +- struct sw_driver_ioctl_arg *) +-#define PW_IOCTL_GET_SCU_FW_VERSION \ +- _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_scu_version, \ +- struct sw_driver_ioctl_arg *) +-#define PW_IOCTL_READ_IMMEDIATE \ +- _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_read_immediate, \ +- struct sw_driver_ioctl_arg *) +-#define PW_IOCTL_GET_DRIVER_VERSION \ +- _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_driver_version, \ +- struct sw_driver_ioctl_arg *) +-#define PW_IOCTL_GET_AVAILABLE_TRACEPOINTS \ +- _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_trace, \ +- struct sw_driver_ioctl_arg *) +-#define PW_IOCTL_GET_AVAILABLE_NOTIFIERS \ +- _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_notify, \ +- struct sw_driver_ioctl_arg *) +-#define PW_IOCTL_GET_AVAILABLE_COLLECTORS \ +- _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_collect, \ +- struct sw_driver_ioctl_arg *) +-#define PW_IOCTL_GET_TOPOLOGY_CHANGES \ +- _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_topology_changes, \ +- struct sw_driver_ioctl_arg *) ++ #define PW_IOCTL_CONFIG \ ++ _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_config, struct sw_driver_ioctl_arg *) ++ #if DO_COUNT_DROPPED_SAMPLES ++ #define PW_IOCTL_CMD \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, struct sw_driver_ioctl_arg *) ++ #else ++ #define PW_IOCTL_CMD \ ++ _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, struct sw_driver_ioctl_arg *) ++ #endif /* DO_COUNT_DROPPED_SAMPLES */ ++ #define PW_IOCTL_POLL _IO(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_poll) ++ #define PW_IOCTL_IMMEDIATE_IO \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_immediate_io, struct sw_driver_ioctl_arg *) ++ #define PW_IOCTL_GET_SCU_FW_VERSION \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_scu_version, struct sw_driver_ioctl_arg *) ++ #define PW_IOCTL_READ_IMMEDIATE \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_read_immediate, struct sw_driver_ioctl_arg *) ++ #define PW_IOCTL_GET_DRIVER_VERSION \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_driver_version, struct sw_driver_ioctl_arg *) ++ #define PW_IOCTL_GET_AVAILABLE_TRACEPOINTS \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_trace, struct sw_driver_ioctl_arg *) ++ #define PW_IOCTL_GET_AVAILABLE_NOTIFIERS \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_notify, struct sw_driver_ioctl_arg *) ++ #define PW_IOCTL_GET_AVAILABLE_COLLECTORS \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_collect, struct sw_driver_ioctl_arg *) ++ #define PW_IOCTL_GET_TOPOLOGY_CHANGES \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_topology_changes, struct sw_driver_ioctl_arg *) ++ #define PW_IOCTL_CONFIG_CONTINUOUS \ ++ _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_config_continuous, struct sw_driver_ioctl_arg *) ++ #define PW_IOCTL_READ_CONTINUOUS \ ++ _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_read_continuous, struct sw_driver_ioctl_arg *) ++ #define PW_IOCTL_SET_TELEM_BAR \ ++ _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_telem_bar, struct sw_driver_ioctl_arg *) + #else /* __APPLE__ */ +-#define PW_IOCTL_CONFIG \ +- _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_config, \ +- struct sw_driver_ioctl_arg) +-#if DO_COUNT_DROPPED_SAMPLES +-#define PW_IOCTL_CMD \ +- _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, \ +- struct sw_driver_ioctl_arg) +-#else +-#define PW_IOCTL_CMD \ +- _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, struct sw_driver_ioctl_arg) +-#endif /* DO_COUNT_DROPPED_SAMPLES */ +-#define PW_IOCTL_POLL _IO(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_poll) +-#define PW_IOCTL_IMMEDIATE_IO \ +- _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_immediate_io, \ +- struct sw_driver_ioctl_arg) +-#define PW_IOCTL_GET_SCU_FW_VERSION \ +- _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_scu_version, \ +- struct sw_driver_ioctl_arg) +-#define PW_IOCTL_READ_IMMEDIATE \ +- _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_read_immediate, \ +- struct sw_driver_ioctl_arg) +-#define PW_IOCTL_GET_DRIVER_VERSION \ +- _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_driver_version, \ +- struct sw_driver_ioctl_arg) +-#define PW_IOCTL_GET_AVAILABLE_TRACEPOINTS \ +- _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_trace, \ +- struct sw_driver_ioctl_arg) +-#define PW_IOCTL_GET_AVAILABLE_NOTIFIERS \ +- _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_notify, \ +- struct sw_driver_ioctl_arg) +-#define PW_IOCTL_GET_AVAILABLE_COLLECTORS \ +- _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_collect, \ +- struct sw_driver_ioctl_arg) +-#define PW_IOCTL_GET_TOPOLOGY_CHANGES \ +- _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_topology_changes, \ +- struct sw_driver_ioctl_arg) ++ #define PW_IOCTL_CONFIG \ ++ _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_config, struct sw_driver_ioctl_arg) ++ #if DO_COUNT_DROPPED_SAMPLES ++ #define PW_IOCTL_CMD \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, struct sw_driver_ioctl_arg) ++ #else ++ #define PW_IOCTL_CMD \ ++ _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, struct sw_driver_ioctl_arg) ++ #endif /* DO_COUNT_DROPPED_SAMPLES */ ++ #define PW_IOCTL_POLL \ ++ _IO(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_poll) ++ #define PW_IOCTL_IMMEDIATE_IO \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_immediate_io, struct sw_driver_ioctl_arg) ++ #define PW_IOCTL_GET_SCU_FW_VERSION \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_scu_version, struct sw_driver_ioctl_arg) ++ #define PW_IOCTL_READ_IMMEDIATE \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_read_immediate, struct sw_driver_ioctl_arg) ++ #define PW_IOCTL_GET_DRIVER_VERSION \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_driver_version, struct sw_driver_ioctl_arg) ++ #define PW_IOCTL_GET_AVAILABLE_TRACEPOINTS \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_trace, struct sw_driver_ioctl_arg) ++ #define PW_IOCTL_GET_AVAILABLE_NOTIFIERS \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_notify, struct sw_driver_ioctl_arg) ++ #define PW_IOCTL_GET_AVAILABLE_COLLECTORS \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_collect, struct sw_driver_ioctl_arg) ++ #define PW_IOCTL_GET_TOPOLOGY_CHANGES \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_topology_changes, struct sw_driver_ioctl_arg) ++ #define PW_IOCTL_CONFIG_CONTINUOUS \ ++ _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_config_continuous, struct sw_driver_ioctl_arg) ++ #define PW_IOCTL_READ_CONTINUOUS \ ++ _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_read_continuous, struct sw_driver_ioctl_arg) ++ #define PW_IOCTL_SET_TELEM_BAR \ ++ _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_telem_bar, struct sw_driver_ioctl_arg *) + #endif /* __APPLE__ */ + + /* +@@ -273,31 +262,38 @@ enum sw_ioctl_cmd { + * and ONLY by the driver. + */ + #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) +-#define PW_IOCTL_CONFIG32 \ +- _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_config, compat_uptr_t) ++ #define PW_IOCTL_CONFIG32 \ ++ _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_config, compat_uptr_t) + #if DO_COUNT_DROPPED_SAMPLES +-#define PW_IOCTL_CMD32 \ +- _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, compat_uptr_t) ++ #define PW_IOCTL_CMD32 \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, compat_uptr_t) + #else +-#define PW_IOCTL_CMD32 \ +- _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, compat_uptr_t) ++ #define PW_IOCTL_CMD32 \ ++ _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_cmd, compat_uptr_t) + #endif /* DO_COUNT_DROPPED_SAMPLES */ +-#define PW_IOCTL_POLL32 _IO(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_poll) +-#define PW_IOCTL_IMMEDIATE_IO32 \ +- _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_immediate_io, compat_uptr_t) +-#define PW_IOCTL_GET_SCU_FW_VERSION32 \ +- _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_scu_version, compat_uptr_t) +-#define PW_IOCTL_READ_IMMEDIATE32 \ +- _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_read_immediate, compat_uptr_t) +-#define PW_IOCTL_GET_DRIVER_VERSION32 \ +- _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_driver_version, compat_uptr_t) +-#define PW_IOCTL_GET_AVAILABLE_TRACEPOINTS32 \ +- _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_trace, compat_uptr_t) +-#define PW_IOCTL_GET_AVAILABLE_NOTIFIERS32 \ +- _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_notify, compat_uptr_t) +-#define PW_IOCTL_GET_AVAILABLE_COLLECTORS32 \ +- _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_collect, compat_uptr_t) +-#define PW_IOCTL_GET_TOPOLOGY_CHANGES32 \ +- _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_topology_changes, compat_uptr_t) ++ #define PW_IOCTL_POLL32 \ ++ _IO(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_poll) ++ #define PW_IOCTL_IMMEDIATE_IO32 \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_immediate_io, compat_uptr_t) ++ #define PW_IOCTL_GET_SCU_FW_VERSION32 \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_scu_version, compat_uptr_t) ++ #define PW_IOCTL_READ_IMMEDIATE32 \ ++ _IOWR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_read_immediate, compat_uptr_t) ++ #define PW_IOCTL_GET_DRIVER_VERSION32 \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_driver_version, compat_uptr_t) ++ #define PW_IOCTL_GET_AVAILABLE_TRACEPOINTS32 \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_trace, compat_uptr_t) ++ #define PW_IOCTL_GET_AVAILABLE_NOTIFIERS32 \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_notify, compat_uptr_t) ++ #define PW_IOCTL_GET_AVAILABLE_COLLECTORS32 \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_avail_collect, compat_uptr_t) ++ #define PW_IOCTL_GET_TOPOLOGY_CHANGES32 \ ++ _IOR(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_topology_changes, compat_uptr_t) ++ #define PW_IOCTL_CONFIG_CONTINUOUS32 \ ++ _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_config_continuous, compat_uptr_t) ++ #define PW_IOCTL_READ_CONTINUOUS32 \ ++ _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_read_continuous, compat_uptr_t) ++ #define PW_IOCTL_SET_TELEM_BAR32 \ ++ _IOW(APWR_IOCTL_MAGIC_NUM, sw_ioctl_cmd_telem_bar, compat_uptr_t) + #endif /* defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) */ + #endif /* __SW_IOCTL_H__ */ +diff --git a/drivers/platform/x86/socwatchhv/inc/sw_structs.h b/drivers/platform/x86/socwatchhv/inc/sw_structs.h +index baac8520e7fd..738edd35de24 100644 +--- a/drivers/platform/x86/socwatchhv/inc/sw_structs.h ++++ b/drivers/platform/x86/socwatchhv/inc/sw_structs.h +@@ -1,58 +1,58 @@ +-/* ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-*/ + #ifndef __SW_STRUCTS_H__ + #define __SW_STRUCTS_H__ 1 + +@@ -82,7 +82,7 @@ typedef struct sw_string_type { + char data[1]; + } sw_string_type_t; + #pragma pack(pop) +-#define SW_STRING_TYPE_HEADER_SIZE() \ ++#define SW_STRING_TYPE_HEADER_SIZE() \ + (sizeof(struct sw_string_type) - sizeof(char[1])) + + #pragma pack(push, 1) +@@ -91,19 +91,22 @@ struct sw_key_value_payload { + char data[1]; + }; + #pragma pack(pop) +-#define SW_KEY_VALUE_PAYLOAD_HEADER_SIZE() \ ++#define SW_KEY_VALUE_PAYLOAD_HEADER_SIZE() \ + (sizeof(struct sw_key_value_payload) - sizeof(char[1])) + + typedef enum sw_kernel_wakelock_type { +- SW_WAKE_LOCK = 0, /* A kernel wakelock was acquired */ +- SW_WAKE_UNLOCK = 1, /* A kernel wakelock was released */ +- SW_WAKE_LOCK_TIMEOUT = +- 2, /* A kernel wakelock was acquired with a timeout */ +- SW_WAKE_LOCK_INITIAL = 3, /* A kernel wakelock was acquired +- * before the collection started +- */ +- SW_WAKE_UNLOCK_ALL = 4, /* All previously held kernel wakelocks were */ +- /* released -- used in ACPI S3 notifications */ ++ /* A kernel wakelock was acquired */ ++ SW_WAKE_LOCK = 0, ++ /* A kernel wakelock was released */ ++ SW_WAKE_UNLOCK = 1, ++ /* A kernel wakelock was acquired with a timeout */ ++ SW_WAKE_LOCK_TIMEOUT = 2, ++ /* A kernel wakelock was acquired before the collection started*/ ++ SW_WAKE_LOCK_INITIAL = 3, ++ /* All previously held kernel wakelocks were ++ * released -- used in ACPI S3 notifications ++ */ ++ SW_WAKE_UNLOCK_ALL = 4, + } sw_kernel_wakelock_type_t; + + typedef enum sw_when_type { +@@ -116,18 +119,23 @@ typedef enum sw_when_type { + } sw_when_type_t; + + /** +- * trigger_bits is defined to use type pw_u8_t that makes only up +- * to 8 types possible ++ * trigger_bits is defined to use type pw_u8_t that makes only ++ * upto 8 types possible + */ +-#define SW_TRIGGER_BEGIN_MASK() (1U << SW_WHEN_TYPE_BEGIN) +-#define SW_TRIGGER_END_MASK() (1U << SW_WHEN_TYPE_END) +-#define SW_TRIGGER_POLL_MASK() (1U << SW_WHEN_TYPE_POLL) +-#define SW_TRIGGER_TRACEPOINT_MASK() (1U << SW_WHEN_TYPE_TRACEPOINT) +-#define SW_TRIGGER_NOTIFIER_MASK() (1U << SW_WHEN_TYPE_NOTIFIER) +-#define SW_GET_TRIGGER_MASK_VALUE(m) (1U << (m)) +-#define SW_TRIGGER_MASK_ALL() (0xFF) ++#define SW_TRIGGER_BEGIN_MASK() (1U << SW_WHEN_TYPE_BEGIN) ++#define SW_TRIGGER_END_MASK() (1U << SW_WHEN_TYPE_END) ++#define SW_TRIGGER_POLL_MASK() (1U << SW_WHEN_TYPE_POLL) ++#define SW_TRIGGER_TRACEPOINT_MASK() (1U << SW_WHEN_TYPE_TRACEPOINT) ++#define SW_TRIGGER_NOTIFIER_MASK() (1U << SW_WHEN_TYPE_NOTIFIER) ++#define SW_GET_TRIGGER_MASK_VALUE(m) (1U << (m)) ++#define SW_TRIGGER_MASK_ALL() (0xFF) ++ ++enum sw_io_cmd { ++ SW_IO_CMD_READ = 0, ++ SW_IO_CMD_WRITE, ++ SW_IO_CMD_MAX ++}; + +-enum sw_io_cmd { SW_IO_CMD_READ = 0, SW_IO_CMD_WRITE, SW_IO_CMD_MAX }; + + #pragma pack(push, 1) + struct sw_driver_msr_io_descriptor { +@@ -141,10 +149,8 @@ struct sw_driver_ipc_mmio_io_descriptor { + union { + #ifdef SWW_MERGE + #pragma warning(push) +-#pragma warning( \ +- disable : 4201) /* disable C4201: nonstandard extension used: +- * nameless struct/union +- */ ++/* disable C4201: nonstandard extension used: nameless struct/union */ ++#pragma warning(disable:4201) + #endif + struct { + pw_u16_t command; +@@ -154,13 +160,14 @@ struct sw_driver_ipc_mmio_io_descriptor { + #pragma warning(pop) /* enable C4201 */ + #endif + union { +- pw_u32_t ipc_command; /* (sub_command << 12) +- * | (command) +- */ ++ /* (sub_command << 12) | (command) */ ++ pw_u32_t ipc_command; + pw_u8_t is_gbe; /* Used only for GBE MMIO */ + }; + }; +- /* TODO: add a section for 'ctrl_address' and 'ctrl_remapped_address' */ ++ /* TODO: add a section for 'ctrl_address' and ++ * 'ctrl_remapped_address' ++ */ + union { + pw_u64_t data_address; /* Will be "io_remapped" */ + pw_u64_t data_remapped_address; +@@ -194,10 +201,12 @@ struct sw_driver_configdb_io_descriptor { + + #pragma pack(push, 1) + struct sw_driver_trace_args_io_descriptor { +- pw_u8_t num_args; /* Number of valid entries in the 'args' array, +- * below; 1 <= num_args <= 7 +- */ +- pw_u8_t args[7]; /* Max of 7 args can be recorded */ ++ /* Number of valid entries in the 'args' array, below; ++ * 1 <= num_args <= 7 ++ */ ++ pw_u8_t num_args; ++ /* Max of 7 args can be recorded */ ++ pw_u8_t args[7]; + }; + #pragma pack(pop) + +@@ -205,14 +214,14 @@ struct sw_driver_trace_args_io_descriptor { + /** + * struct - sw_driver_telem_io_descriptor - Telemetry Metric descriptor + * +- * @id: (Client & Driver) Telemetry ID of the counter to read. ++ * @id: (Client & Driver) Telemetry ID of the counter to read. + * @idx: (Driver only) index into telem array to read, or the row +- * of the telem_indirect table to lookup the telem array index. ++ * of the telem_indirect table to lookup the telem array index. + * @unit: Unit from which to collect: 0 = PMC, 1 = PUNIT +- * Values come from the telemetry_unit enum. ++ * Values come from the telemetry_unit enum. + * @scale_op: When there are multiple instances of a telem value (e.g. +- * module C-states) the operation to use when scaling the CPU ID +- * and adding it to the telemetry data ID. ++ * module C-states) the operation to use when scaling the CPU ID ++ * and adding it to the telemetry data ID. + * @scale_val: Amount to scale an ID (when scaling one.) + * + * Like all hardware mechanism descriptors, the client uses this to pass +@@ -223,13 +232,13 @@ struct sw_driver_trace_args_io_descriptor { + * the equation: ID = ID_value + (cpuid ) + * where is one of +, *, /, or %, and scaling_val is an integer + * value. This gives you: +- * Operation scale_op scale_val +- * Single instance of an ID * 0 +- * Sequentially increasing +- * CPU-specific values * 1 +- * Per module cpu-specific +- * values (2 cores/module) / 2 +- * Round Robin assignment % cpu_count ++ * Operation scale_op scale_val ++ * Single instance of an ID * 0 ++ * sequentially increasing ++ * CPU-specific values * 1 ++ * Per module cpu-specific ++ * values (2 cores/module) / 2 ++ * Round Robin assignment % cpu_count + * + * Note that scaling_value of 0 implies that no scaling should be + * applied. While (*, 1) is equivalent to (+, 0), the scaling value of 0 +@@ -241,21 +250,21 @@ struct sw_driver_trace_args_io_descriptor { + struct sw_driver_telem_io_descriptor { + union { + pw_u16_t id; +- pw_u8_t idx; ++ pw_u8_t idx; + }; +- pw_u8_t unit; +- pw_u8_t scale_op; +- pw_u16_t scale_val; ++ pw_u8_t unit; ++ pw_u8_t scale_op; ++ pw_u16_t scale_val; + }; + #pragma pack(pop) + enum telemetry_unit { TELEM_PUNIT = 0, TELEM_PMC, TELEM_UNIT_NONE }; +-#define TELEM_MAX_ID 0xFFFF /* Maximum value of a Telemtry event ID. */ +-#define TELEM_MAX_SCALE 0xFFFF /* Maximum ID scaling value. */ +-#define TELEM_OP_ADD '+' /* Addition operator */ +-#define TELEM_OP_MULT '*' /* Multiplication operator */ +-#define TELEM_OP_DIV '/' /* Division operator */ +-#define TELEM_OP_MOD '%' /* Modulus operator */ +-#define TELEM_OP_NONE 'X' /* No operator--Not a scaled ID */ ++#define TELEM_MAX_ID 0xFFFF /* Maximum value of a Telemtry event ID. */ ++#define TELEM_MAX_SCALE 0xFFFF /* Maximum ID scaling value. */ ++#define TELEM_OP_ADD '+' /* Addition operator */ ++#define TELEM_OP_MULT '*' /* Multiplication operator */ ++#define TELEM_OP_DIV '/' /* Division operator */ ++#define TELEM_OP_MOD '%' /* Modulus operator */ ++#define TELEM_OP_NONE 'X' /* No operator--Not a scaled ID */ + + #pragma pack(push, 1) + struct sw_driver_mailbox_io_descriptor { +@@ -291,8 +300,8 @@ struct sw_driver_pch_mailbox_io_descriptor { + }; + union { + /* +- * Will be "io_remapped" +- */ ++ * Will be "io_remapped" ++ */ + pw_u64_t msg_full_sts_address; + pw_u64_t msg_full_sts_remapped_address; + }; +@@ -312,20 +321,17 @@ typedef struct sw_driver_io_descriptor { + pw_u16_t collection_type; + /* TODO: specify READ/WRITE */ + pw_s16_t collection_command; /* One of 'enum sw_io_cmd' */ +- pw_u16_t counter_size_in_bytes; /* The number of bytes to +- * READ or WRITE +- */ ++ pw_u16_t counter_size_in_bytes; /* The number of bytes to READ or WRITE */ + union { +- struct sw_driver_msr_io_descriptor msr_descriptor; +- struct sw_driver_ipc_mmio_io_descriptor ipc_descriptor; +- struct sw_driver_ipc_mmio_io_descriptor mmio_descriptor; +- struct sw_driver_pci_io_descriptor pci_descriptor; +- struct sw_driver_configdb_io_descriptor configdb_descriptor; +- struct sw_driver_trace_args_io_descriptor trace_args_descriptor; +- struct sw_driver_telem_io_descriptor telem_descriptor; +- struct sw_driver_pch_mailbox_io_descriptor +- pch_mailbox_descriptor; +- struct sw_driver_mailbox_io_descriptor mailbox_descriptor; ++ struct sw_driver_msr_io_descriptor msr_descriptor; ++ struct sw_driver_ipc_mmio_io_descriptor ipc_descriptor; ++ struct sw_driver_ipc_mmio_io_descriptor mmio_descriptor; ++ struct sw_driver_pci_io_descriptor pci_descriptor; ++ struct sw_driver_configdb_io_descriptor configdb_descriptor; ++ struct sw_driver_trace_args_io_descriptor trace_args_descriptor; ++ struct sw_driver_telem_io_descriptor telem_descriptor; ++ struct sw_driver_pch_mailbox_io_descriptor pch_mailbox_descriptor; ++ struct sw_driver_mailbox_io_descriptor mailbox_descriptor; + }; + pw_u64_t write_value; /* The value to WRITE */ + } sw_driver_io_descriptor_t; +@@ -343,49 +349,38 @@ typedef struct sw_driver_io_descriptor { + struct sw_driver_interface_info { + pw_u64_t tracepoint_id_mask; + pw_u64_t notifier_id_mask; +- pw_s16_t cpu_mask; /* On which CPU(s) should the driver +- * read the data? +- * Currently: -2 ==> read on ALL CPUs, +- * -1 ==> read on ANY CPU, +- * >= 0 ==> the specific CPU to read on +- */ ++ pw_s16_t cpu_mask; /* On which CPU(s) should the driver read the data? */ ++ /* Currently: -2 ==> read on ALL CPUs, */ ++ /* -1 ==> read on ANY CPU, */ ++ /* >= 0 ==> the specific CPU to read on */ + pw_s16_t plugin_id; /* Metric Plugin SID */ +- pw_s16_t metric_id; /* Domain-specific ID assigned by each +- * Metric Plugin +- */ ++ pw_s16_t metric_id; /* Domain-specific ID assigned by each Metric Plugin */ + pw_s16_t msg_id; /* Msg ID retrieved from the SoC Watch config file */ +- pw_u16_t num_io_descriptors; /* Number of descriptors in the array, +- * below. +- */ +- pw_u8_t trigger_bits; /* Mask of 'when bits' to fire this collector. */ ++ pw_u16_t num_io_descriptors; /* Number of descriptors in the array, below. */ ++ pw_u8_t trigger_bits; /* Mask of 'when bits' to fire this collector. */ + pw_u16_t sampling_interval_msec; /* Sampling interval, in msecs */ +- pw_u8_t descriptors[1]; /* Array of sw_driver_io_descriptor structs. */ ++ pw_u8_t descriptors[1]; /* Array of sw_driver_io_descriptor structs. */ + }; + #pragma pack(pop) + +-#define SW_DRIVER_INTERFACE_INFO_HEADER_SIZE() \ ++#define SW_DRIVER_INTERFACE_INFO_HEADER_SIZE() \ + (sizeof(struct sw_driver_interface_info) - sizeof(pw_u8_t[1])) + + #pragma pack(push, 1) + struct sw_driver_interface_msg { +- pw_u16_t num_infos; /* Number of 'sw_driver_interface_info' +- * structs contained within the 'infos' variable, +- * below +- */ +- pw_u16_t min_polling_interval_msecs; /* Min time to wait before +- * polling; used exclusively +- * with the low overhead, +- * context-switch based +- * polling mode +- */ +- /* pw_u16_t infos_size_bytes; +- * Size of data inlined within the +- * 'infos' variable, below +- */ ++ /* Number of 'sw_driver_interface_info' structs contained within ++ * the 'infos' variable, below ++ */ ++ pw_u16_t num_infos; ++ /* Min time to wait before polling; used exclusively ++ * with the low overhead, context-switch based ++ * polling mode ++ */ ++ pw_u16_t min_polling_interval_msecs; + pw_u8_t infos[1]; + }; + #pragma pack(pop) +-#define SW_DRIVER_INTERFACE_MSG_HEADER_SIZE() \ ++#define SW_DRIVER_INTERFACE_MSG_HEADER_SIZE() \ + (sizeof(struct sw_driver_interface_msg) - sizeof(pw_u8_t[1])) + + typedef enum sw_name_id_type { +@@ -402,7 +397,7 @@ struct sw_name_id_pair { + struct sw_string_type name; + }; + #pragma pack(pop) +-#define SW_NAME_ID_HEADER_SIZE() \ ++#define SW_NAME_ID_HEADER_SIZE() \ + (sizeof(struct sw_name_id_pair) - sizeof(struct sw_string_type)) + + #pragma pack(push, 1) +@@ -423,20 +418,23 @@ struct sw_name_info_msg { + typedef struct sw_driver_msg { + pw_u64_t tsc; + pw_u16_t cpuidx; +- pw_u8_t plugin_id; /* Cannot have more than 256 plugins */ +- pw_u8_t metric_id; /* Each plugin cannot handle more than 256 metrics */ +- pw_u8_t msg_id; /* Each metric cannot have more than 256 components */ ++ /* Cannot have more than 256 plugins */ ++ pw_u8_t plugin_id; ++ /* Each plugin cannot handle more than 256 metrics */ ++ pw_u8_t metric_id; ++ /* Each metric cannot have more than 256 components */ ++ pw_u8_t msg_id; + pw_u16_t payload_len; +- /* pw_u64_t p_payload; Ptr to payload */ ++ /* pw_u64_t p_payload; // Ptr to payload */ + union { +- pw_u64_t __dummy; /* Ensure size of struct is +- * consistent on x86, x64 +- */ +- char *p_payload; /* Ptr to payload (collected data values). */ ++ /* Ensure size of struct is consistent on x86, x64 */ ++ pw_u64_t __dummy; ++ /* Ptr to payload (collected data values). */ ++ char *p_payload; + }; + } sw_driver_msg_t; + #pragma pack(pop) +-#define SW_DRIVER_MSG_HEADER_SIZE() \ ++#define SW_DRIVER_MSG_HEADER_SIZE() \ + (sizeof(struct sw_driver_msg) - sizeof(pw_u64_t)) + + typedef enum sw_driver_collection_cmd { +@@ -500,6 +498,21 @@ enum sw_pm_mode { + + #define SW_PM_VALUE(mode, action) ((mode) << 16 | (action)) + ++#pragma pack(push, 1) ++/* ++ * Structure for continuous collection ++ */ ++struct sw_driver_continuous_collect { ++ /* Size of data that needs to be collected every second */ ++ pw_u32_t collection_size; ++ /* struct sw_driver_interface_msg for this collection */ ++ pw_u8_t payload[1]; ++}; ++#define SW_DRIVER_CONTINUOUS_COLLECT_HEADER_SIZE() \ ++ (sizeof(struct sw_driver_continuous_collect) - \ ++ sizeof(pw_u8_t[1])) ++#pragma pack(pop) ++ + /* + * Wrapper for ioctl arguments. + * EVERY ioctl MUST use this struct! +@@ -508,8 +521,8 @@ enum sw_pm_mode { + struct sw_driver_ioctl_arg { + pw_s32_t in_len; + pw_s32_t out_len; +- /* pw_u64_t p_in_arg; Pointer to input arg */ +- /* pw_u64_t p_out_arg; Pointer to output arg */ ++ /* pw_u64_t p_in_arg; // Pointer to input arg */ ++ /* pw_u64_t p_out_arg; // Pointer to output arg */ + char *in_arg; + char *out_arg; + }; +@@ -517,10 +530,14 @@ struct sw_driver_ioctl_arg { + + #pragma pack(push, 1) + typedef struct sw_driver_msg_interval { +- pw_u8_t plugin_id; /* Cannot have more than 256 plugins */ +- pw_u8_t metric_id; /* Each plugin cannot handle more than 256 metrics */ +- pw_u8_t msg_id; /* Each metric cannot have more than 256 components */ +- pw_u16_t interval; /* collection interval */ ++ /* Cannot have more than 256 plugins */ ++ pw_u8_t plugin_id; ++ /* Each plugin cannot handle more than 256 metrics */ ++ pw_u8_t metric_id; ++ /* Each metric cannot have more than 256 components */ ++ pw_u8_t msg_id; ++ /* collection interval */ ++ pw_u16_t interval; + } sw_driver_msg_interval_t; + #pragma pack(pop) + +diff --git a/drivers/platform/x86/socwatchhv/inc/sw_types.h b/drivers/platform/x86/socwatchhv/inc/sw_types.h +index 156c92c8349a..e9af829c31c8 100644 +--- a/drivers/platform/x86/socwatchhv/inc/sw_types.h ++++ b/drivers/platform/x86/socwatchhv/inc/sw_types.h +@@ -1,58 +1,57 @@ +-/* +- +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-*/ ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + + #ifndef _PW_TYPES_H_ + #define _PW_TYPES_H_ +@@ -68,7 +67,7 @@ + /* + * UNSIGNED types... + */ +-typedef uint8_t u8; ++typedef uint8_t u8; + typedef uint16_t u16; + typedef uint32_t u32; + typedef uint64_t u64; +@@ -87,13 +86,13 @@ typedef int64_t s64; + #include + #include /* Grab 'uint64_t' etc. */ + +-typedef uint8_t u8; ++typedef uint8_t u8; + typedef uint16_t u16; + typedef uint32_t u32; + typedef uint64_t u64; + /* +- * SIGNED types... +- */ ++* SIGNED types... ++*/ + typedef int8_t s8; + typedef int16_t s16; + typedef int32_t s32; +diff --git a/drivers/platform/x86/socwatchhv/inc/sw_version.h b/drivers/platform/x86/socwatchhv/inc/sw_version.h +index 5476b0d79ac5..b6fe1eecdd0e 100644 +--- a/drivers/platform/x86/socwatchhv/inc/sw_version.h ++++ b/drivers/platform/x86/socwatchhv/inc/sw_version.h +@@ -1,58 +1,57 @@ +-/* +- +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-*/ ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + + #ifndef __SW_VERSION_H__ + #define __SW_VERSION_H__ 1 +@@ -61,14 +60,14 @@ + * SOCWatch driver version + */ + #define SW_DRIVER_VERSION_MAJOR 2 +-#define SW_DRIVER_VERSION_MINOR 6 +-#define SW_DRIVER_VERSION_OTHER 2 ++#define SW_DRIVER_VERSION_MINOR 10 ++#define SW_DRIVER_VERSION_OTHER 0 + + /* + * Every SOC Watch userspace component shares the same version number. + */ + #define SOCWATCH_VERSION_MAJOR 2 +-#define SOCWATCH_VERSION_MINOR 8 +-#define SOCWATCH_VERSION_OTHER 0 ++#define SOCWATCH_VERSION_MINOR 10 ++#define SOCWATCH_VERSION_OTHER 1 + + #endif /* __SW_VERSION_H__ */ +diff --git a/drivers/platform/x86/socwatchhv/inc/swhv_acrn.h b/drivers/platform/x86/socwatchhv/inc/swhv_acrn.h +index 2bcc97a84bbc..cfacb3bba7c8 100644 +--- a/drivers/platform/x86/socwatchhv/inc/swhv_acrn.h ++++ b/drivers/platform/x86/socwatchhv/inc/swhv_acrn.h +@@ -1,3 +1,58 @@ ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ + #ifndef _SWHV_ACRN_H_ + #define _SWHV_ACRN_H_ 1 + +@@ -18,30 +73,27 @@ + #include "swhv_defines.h" + #include "pw_version.h" + +-#define SW_DEFINE_LIST_HEAD(name, dummy) struct list_head name +-#define SW_DECLARE_LIST_HEAD(name, dummy) extern struct list_head name +-#define SW_LIST_ENTRY(name, dummy) struct list_head name +-#define SW_LIST_HEAD_VAR(dummy) struct list_head +-#define SW_LIST_HEAD_INIT(head) INIT_LIST_HEAD(head) +-#define SW_LIST_ENTRY_INIT(node, field) INIT_LIST_HEAD(&node->field) +-#define SW_LIST_ADD(head, node, field) list_add_tail(&node->field, head) +-#define SW_LIST_GET_HEAD_ENTRY(head, type, field) \ +- list_first_entry(head, struct type, field) +-#define SW_LIST_UNLINK(node, field) list_del(&node->field) +-#define SW_LIST_FOR_EACH_ENTRY(node, head, field) \ +- list_for_each_entry(node, head, field) +-#define SW_LIST_EMPTY(head) list_empty(head) +-#define SW_LIST_HEAD_INITIALIZER(head) LIST_HEAD_INIT(head) ++#define SW_DEFINE_LIST_HEAD(name, dummy) struct list_head name ++#define SW_DECLARE_LIST_HEAD(name, dummy) extern struct list_head name ++#define SW_LIST_ENTRY(name, dummy) struct list_head name ++#define SW_LIST_HEAD_VAR(dummy) struct list_head ++#define SW_LIST_HEAD_INIT(head) INIT_LIST_HEAD(head) ++#define SW_LIST_ENTRY_INIT(node, field) INIT_LIST_HEAD(&node->field) ++#define SW_LIST_ADD(head, node, field) list_add_tail(&node->field, head) ++#define SW_LIST_GET_HEAD_ENTRY(head, type, field) list_first_entry(head, struct type, field) ++#define SW_LIST_UNLINK(node, field) list_del(&node->field) ++#define SW_LIST_FOR_EACH_ENTRY(node, head, field) list_for_each_entry(node, head, field) ++#define SW_LIST_EMPTY(head) list_empty(head) ++#define SW_LIST_HEAD_INITIALIZER(head) LIST_HEAD_INIT(head) + + int device_open_i(struct inode *inode, struct file *file); + + ssize_t device_read_i(struct file *file, /* see include/linux/fs.h */ +- char __user *buffer, /* buffer to be filled with data */ +- size_t length, /* length of the buffer */ +- loff_t *offset); ++ char __user *buffer, /* buffer to be filled with data */ ++ size_t length, /* length of the buffer */ ++ loff_t *offset); + +-long swhv_configure(struct swhv_driver_interface_msg __user *remote_msg, +- int local_len); ++long swhv_configure(struct swhv_driver_interface_msg __user *remote_msg, int local_len); + long swhv_start(void); + long swhv_stop(void); + long swhv_get_cpu_count(u32 __user *remote_args); +@@ -55,63 +107,60 @@ long swhv_msr_read(u32 __user *remote_in_args, u64 __user *remote_args); + long swhv_collection_poll(void); + + enum MSR_CMD_TYPE { +- MSR_OP_NONE = 0, +- MSR_OP_READ, +- MSR_OP_WRITE, +- MSR_OP_READ_CLEAR ++ MSR_OP_NONE = 0, ++ MSR_OP_READ, ++ MSR_OP_WRITE, ++ MSR_OP_READ_CLEAR + }; + +-enum MSR_CMD_STATUS { MSR_OP_READY = 0, MSR_OP_REQUESTED, MSR_OP_HANDLED }; ++enum MSR_CMD_STATUS { ++ MSR_OP_READY = 0, ++ MSR_OP_REQUESTED, ++ MSR_OP_HANDLED ++}; + + struct profiling_msr_op { +- /* value to write or location to write into */ +- uint64_t value; +- /* MSR address to read/write; last entry will have value of -1 */ +- uint32_t msr_id; +- /* parameter; usage depends on operation */ +- uint16_t param; +- uint8_t msr_op_type; +- uint8_t reg_type; ++ /* value to write or location to write into */ ++ uint64_t value; ++ /* MSR address to read/write; last entry will have value of -1 */ ++ uint32_t msr_id; ++ /* parameter; usage depends on operation */ ++ uint16_t param; ++ uint8_t msr_op_type; ++ uint8_t reg_type; + }; + + #define MAX_MSR_LIST_NUM 15 + struct profiling_msr_ops_list { +- int32_t collector_id; +- uint32_t num_entries; +- int32_t msr_op_state; /* enum value from 'MSR_CMD_STATUS' */ +- struct profiling_msr_op entries[MAX_MSR_LIST_NUM]; ++ int32_t collector_id; ++ uint32_t num_entries; ++ int32_t msr_op_state; /* enum value from 'MSR_CMD_STATUS' */ ++ struct profiling_msr_op entries[MAX_MSR_LIST_NUM]; + }; + + #define COLLECTOR_SOCWATCH 1 + + struct profiling_control { +- int32_t collector_id; +- int32_t reserved; +- uint64_t switches; ++ int32_t collector_id; ++ int32_t reserved; ++ uint64_t switches; + }; + + /** +- * struct - sw_collector_data +- * Information about the collector to be invoked at collection time. +- * +- * The collector_lists array holds linked lists of collectors to +- * be exercised at specific points in time during the collection +- * (e.g. begin, poll, end, etc.). At a trigger time, the driver walks +- * that time's list of nodes, and exercises the collectors on that list. ++ * struct - swhv_acrn_msr_collector_data ++ * Information about the MSR collector to be invoked at collection time. + * + * @list: List/link implementation +- * @cpumask: Collect if cpu matches mask +- * @info: Ptr to metric info +- * @ops: Ptr to collector's operations +- * @last_update_jiffies: Indicates when this node was last exercised. ++ * @cpu_mask: Collect if cpu matches mask ++ * @sample_id: ID of the metric requesting these operations ++ * @msr_ops_list: Ptr to list of MSR read/write operations + * @per_msg_payload_size: Data size +- * @msg: Ptr to collected data + */ + typedef struct swhv_acrn_msr_collector_data { +- SW_LIST_ENTRY(list, swhv_acrn_msr_collector_data); +- pw_s16_t cpu_mask; +- pw_s16_t sample_id; +- struct profiling_msr_ops_list *msr_ops_list; +- size_t per_msg_payload_size; ++ SW_LIST_ENTRY(list, swhv_acrn_msr_collector_data); ++ pw_s16_t cpu_mask; ++ pw_s16_t sample_id; ++ struct profiling_msr_ops_list *msr_ops_list; ++ size_t per_msg_payload_size; + } swhv_acrn_msr_collector_data_t; + #endif /* _SWHV_ACRN_H_ */ +diff --git a/drivers/platform/x86/socwatchhv/inc/swhv_acrn_sbuf.h b/drivers/platform/x86/socwatchhv/inc/swhv_acrn_sbuf.h +index 5f62c2d43e11..4bbaafb72923 100644 +--- a/drivers/platform/x86/socwatchhv/inc/swhv_acrn_sbuf.h ++++ b/drivers/platform/x86/socwatchhv/inc/swhv_acrn_sbuf.h +@@ -1,3 +1,58 @@ ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ + #ifndef _SWHV_ACRN_SBUF_H_ + #define _SWHV_ACRN_SBUF_H_ 1 + +@@ -8,11 +63,11 @@ + */ + static inline bool sbuf_is_empty(struct shared_buf *sbuf) + { +- return (sbuf->head == sbuf->tail); ++ return (sbuf->head == sbuf->tail); + } + +-static inline uint32_t sbuf_next_ptr(uint32_t pos, uint32_t span, +- uint32_t scope) ++static inline uint32_t sbuf_next_ptr(uint32_t pos, ++ uint32_t span, uint32_t scope) + { + pos += span; + pos = (pos >= scope) ? (pos - scope) : pos; +@@ -28,9 +83,9 @@ inline uint32_t sbuf_available_space(struct shared_buf *sbuf) + uint32_t remaining_space; + /* + * if tail isn't wrapped around +- * subtract difference of tail and head from size ++ * subtract difference of tail and head from size + * otherwise +- * difference between head and tail ++ * difference between head and tail + */ + if (sbuf->tail >= sbuf->head) + remaining_space = sbuf->size - (sbuf->tail - sbuf->head); +@@ -53,8 +108,8 @@ int sbuf_get_variable(struct shared_buf *sbuf, void **data, uint32_t size) + * 2. check if enough ('size' bytes) data to be read is present. + * 3. Continue if buffer has enough data + * 4. Copy data from buffer +- * 4a. copy data in 2 parts if there is a wrap-around +- * 4b. Otherwise do a simple copy ++ * 4a. copy data in 2 parts if there is a wrap-around ++ * 4b. Otherwise do a simple copy + */ + const void *from; + uint32_t current_data_size, offset = 0, next_head; +@@ -113,34 +168,33 @@ int sbuf_get_wrapper(struct shared_buf *sbuf, uint8_t **data) + uint32_t payload_size, sample_size, _size; + + /* +- * Assumption: A partial variable sample will not be written +- * to the buffer. ++ * Assumption: A partial variable sample will not be written to ++ * the buffer. + * do while buf isn't empty + * Read header from the buffer +- * write to data +- * get size of payload +- * check if the size of 'data' is enough for the +- * variable sample to be read to ++ * write to data ++ * get size of payload ++ * check if the size of 'data' is enough for the variable ++ * sample to be read to + * Read the payload +- * Keep reading ele_size chunks till available and write to data +- * if the last chunk is less than ele_size, do a partial copy to +- * data ++ * Keep reading ele_size chunks till available and write to data ++ * if the last chunk is less than ele_size, do a partial ++ * copy to data + * + * + */ + if ((sbuf == NULL) || (data == NULL)) + return -EINVAL; + +- if (sbuf_is_empty(sbuf)) { +- /* no data available */ ++ if (sbuf_is_empty(sbuf)) /* no data available */ + return 0; +- } ++ + + sample_offset = 0; + + header = vmalloc(sizeof(ACRN_MSG_HEADER_SIZE)); + memset(header, 0, sizeof(ACRN_MSG_HEADER_SIZE)); +- /*read header */ ++ /* read header */ + sbuf_get(sbuf, (uint8_t *)header); + + payload_size = header->payload_size; +@@ -149,7 +203,7 @@ int sbuf_get_wrapper(struct shared_buf *sbuf, uint8_t **data) + + sample = vmalloc(sample_size); + +- /*copy header */ ++ /* copy header */ + memcpy((void *)sample, (void *)header, ACRN_MSG_HEADER_SIZE); + + sample_offset += ACRN_MSG_HEADER_SIZE; +@@ -165,16 +219,17 @@ int sbuf_get_wrapper(struct shared_buf *sbuf, uint8_t **data) + "error: payload has to be multiple of 32\n"); + return 0; + /* +- * This code can be enabled when support for variable +- * sized samples needs to be added. ++ * This code can be enabled when support for ++ * variable sized samples needs to be added. + */ +-#if 0 ++/* #if 0 + chunk = malloc(sbuf->ele_size); + sbuf_get(sbuf, chunk); +- memcpys((void *)(sample + sample_offset), _size, chunk); ++ memcpys((void *)(sample + sample_offset), ++ _size, chunk); + _size -= _size; + free(chunk); +-#endif ++#endif */ + } + } + +diff --git a/drivers/platform/x86/socwatchhv/inc/swhv_defines.h b/drivers/platform/x86/socwatchhv/inc/swhv_defines.h +index 2f51a5d760f6..b70cef1616a9 100644 +--- a/drivers/platform/x86/socwatchhv/inc/swhv_defines.h ++++ b/drivers/platform/x86/socwatchhv/inc/swhv_defines.h +@@ -1,70 +1,68 @@ +-/* +- +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-*/ ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + + #ifndef _SWHV_DEFINES_H_ +-#define _SWHV_DEFINES_H_ ++#define _SWHV_DEFINES_H_ + + /* *************************************************** + * Common to kernel and userspace. + * *************************************************** + */ +-#define PW_SUCCESS 0 +-#define PW_ERROR 1 +-#define PW_SUCCESS_NO_COLLECT 2 ++#define PW_SUCCESS 0 ++#define PW_ERROR 1 ++#define PW_SUCCESS_NO_COLLECT 2 + + /* + * Start off with none of the OS'es are defined +@@ -81,8 +79,8 @@ + + /* + * Make sure one (and only one) of the OS'es gets defined here +- * +- * Unfortunately entirex defines _WIN32 so we need to check for linux ++ */ ++/* Unfortunately entirex defines _WIN32 so we need to check for linux + * first. The definition of these flags is one and only one + * _OS_xxx is allowed to be defined. + */ +@@ -101,11 +99,11 @@ + * as well as one (and only one) pointer__ size + */ + #if defined(_M_IX86) || defined(__i386__) +-#define SWDRV_IA32 ++ #define SWDRV_IA32 + #elif defined(_M_AMD64) || defined(__x86_64__) +-#define SWDRV_EM64T ++ #define SWDRV_EM64T + #else +-#error "Unknown architecture for compilation" ++ #error "Unknown architecture for compilation" + #endif + + #endif /* _SWHV_DEFINES_H_ */ +diff --git a/drivers/platform/x86/socwatchhv/inc/swhv_driver.h b/drivers/platform/x86/socwatchhv/inc/swhv_driver.h +index f2f9f662b311..d1940649ec01 100644 +--- a/drivers/platform/x86/socwatchhv/inc/swhv_driver.h ++++ b/drivers/platform/x86/socwatchhv/inc/swhv_driver.h +@@ -1,58 +1,57 @@ +-/* +- +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-*/ ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + + #ifndef _SWHV_DRIVER_H_ + #define _SWHV_DRIVER_H_ 1 +@@ -75,7 +74,7 @@ extern void SYS_Perfvec_Handler(void); + extern short SYS_Get_cs(void); + + #if defined(SWDRV_IA32) && (SOCWATCH_IDT_IRQ) +-extern void *SYS_Get_IDT_Base_HWR(void); /* IDT base from hardware IDTR */ ++extern void *SYS_Get_IDT_Base_HWR(void); /* / IDT base from hardware IDTR */ + + #define SYS_Get_IDT_Base SYS_Get_IDT_Base_HWR + #endif /* defined(SWDRV_IA32) && (SOCWATCH_IDT_IRQ) */ +@@ -94,10 +93,10 @@ typedef struct gate_struct64 gate_struct_t; + #define CPU() (raw_smp_processor_id()) + #define GET_BOOL_STRING(b) ((b) ? "TRUE" : "FALSE") + +-#define _STRINGIFY(x) #x +-#define STRINGIFY(x) _STRINGIFY(x) +-#define _STRINGIFY_W(x) (L#x) +-#define STRINGIFY_W(x) _STRINGIFY_W(x) ++#define _STRINGIFY(x) #x ++#define STRINGIFY(x) _STRINGIFY(x) ++#define _STRINGIFY_W(x) L#x ++#define STRINGIFY_W(x) _STRINGIFY_W(x) + + /* + * 64bit Compare-and-swap. +diff --git a/drivers/platform/x86/socwatchhv/inc/swhv_ioctl.h b/drivers/platform/x86/socwatchhv/inc/swhv_ioctl.h +index 0d2a368c12ca..0807177f3bf1 100644 +--- a/drivers/platform/x86/socwatchhv/inc/swhv_ioctl.h ++++ b/drivers/platform/x86/socwatchhv/inc/swhv_ioctl.h +@@ -1,72 +1,72 @@ +-/* +- +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + +-*/ + #ifndef __SWHV_IOCTL_H__ + #define __SWHV_IOCTL_H__ + + #include "pw_types.h" + + #if defined(__linux__) || defined(__QNX__) +-#if __KERNEL__ +-#include +-#if defined(HAVE_COMPAT_IOCTL) && defined(CONFIG_X86_64) +-#include +-#endif /* COMPAT && x64 */ +-#else /* !__KERNEL__ */ +-#include +-#endif /* __KERNEL__ */ ++ #if __KERNEL__ ++ #include ++ #if defined(HAVE_COMPAT_IOCTL) && defined(CONFIG_X86_64) ++ #include ++ #endif /* COMPAT && x64 */ ++ #else /* !__KERNEL__ */ ++ #include ++ #endif /* __KERNEL__ */ + #endif /* __linux__ */ + /* + * Path to the Hypervisor driver device file. +@@ -80,20 +80,20 @@ + * are delivered to the correct + * driver. + */ +-#define SP_IOC_MAGIC 99 ++#define SP_IOC_MAGIC 99 + /* + * CONSTANTS that define the various operations. + * TODO: convert to enum? + */ +-#define SWHVDRV_OPERATION_CONFIGURE 1 /* configure a collection */ +-#define SWHVDRV_OPERATION_CMD 2 /* control a collection */ +-#define SWHVDRV_OPERATION_VERSION 3 /* retrieve driver version info */ +-#define SWHVDRV_OPERATION_CLOCK 4 /* retrieve STM clock */ +-#define SWHVDRV_OPERATION_TOPOLOGY 5 /* retrieve CPU topology */ +-#define SWHVDRV_OPERATION_CPUCOUNT 6 /* retrieve CPU count */ ++#define SWHVDRV_OPERATION_CONFIGURE 1 /* configure a collection */ ++#define SWHVDRV_OPERATION_CMD 2 /* control a collection */ ++#define SWHVDRV_OPERATION_VERSION 3 /* retrieve driver version info */ ++#define SWHVDRV_OPERATION_CLOCK 4 /* retrieve STM clock */ ++#define SWHVDRV_OPERATION_TOPOLOGY 5 /* retrieve CPU topology */ ++#define SWHVDRV_OPERATION_CPUCOUNT 6 /* retrieve CPU count */ + #define SWHVDRV_OPERATION_HYPERVISOR_TYPE 7 /* retrieve hypervisor type */ +-#define SWHVDRV_OPERATION_MSR_READ 8 /* retrieve MSR value */ +-#define SWHVDRV_OPERATION_POLL 9 /* Polling tick */ ++#define SWHVDRV_OPERATION_MSR_READ 8 /* retrieve MSR value */ ++#define SWHVDRV_OPERATION_POLL 9 /* Polling tick */ + + enum swhv_ioctl_cmd { + swhv_ioctl_cmd_none = 0, +@@ -116,49 +116,46 @@ enum swhv_ioctl_cmd { + * Where "Read" and "Write" are from the user's perspective + * (similar to the file "read" and "write" calls). + */ +-#define SWHVDRV_IOCTL_CONFIGURE \ +- _IOW(SP_IOC_MAGIC, SWHVDRV_OPERATION_CONFIGURE, \ +- struct spdrv_ioctl_arg *) +-#define SWHVDRV_IOCTL_CMD \ ++#define SWHVDRV_IOCTL_CONFIGURE \ ++ _IOW(SP_IOC_MAGIC, SWHVDRV_OPERATION_CONFIGURE, struct spdrv_ioctl_arg *) ++#define SWHVDRV_IOCTL_CMD \ + _IOW(SP_IOC_MAGIC, SWHVDRV_OPERATION_CMD, struct spdrv_ioctl_arg *) +-#define SWHVDRV_IOCTL_VERSION \ ++#define SWHVDRV_IOCTL_VERSION \ + _IOR(SP_IOC_MAGIC, SWHVDRV_OPERATION_VERSION, struct spdrv_ioctl_arg *) +-#define SWHVDRV_IOCTL_CLOCK \ ++#define SWHVDRV_IOCTL_CLOCK \ + _IOR(SP_IOC_MAGIC, SWHVDRV_OPERATION_CLOCK, struct spdrv_ioctl_arg *) +-#define SWHVDRV_IOCTL_TOPOLOGY \ ++#define SWHVDRV_IOCTL_TOPOLOGY \ + _IOR(SP_IOC_MAGIC, SWHVDRV_OPERATION_TOPOLOGY, struct spdrv_ioctl_arg *) +-#define SWHVDRV_IOCTL_CPUCOUNT \ ++#define SWHVDRV_IOCTL_CPUCOUNT \ + _IOR(SP_IOC_MAGIC, SWHVDRV_OPERATION_CPUCOUNT, struct spdrv_ioctl_arg *) +-#define SWHVDRV_IOCTL_HYPERVISOR_TYPE \ +- _IOR(SP_IOC_MAGIC, SWHVDRV_OPERATION_HYPERVISOR_TYPE, \ +- struct spdrv_ioctl_arg *) +-#define SWHVDRV_IOCTL_MSR_READ \ +- _IOWR(SP_IOC_MAGIC, SWHVDRV_OPERATION_MSR_READ, \ +- struct spdrv_ioctl_arg *) +-#define SWHVDRV_IOCTL_POLL \ ++#define SWHVDRV_IOCTL_HYPERVISOR_TYPE \ ++ _IOR(SP_IOC_MAGIC, SWHVDRV_OPERATION_HYPERVISOR_TYPE, struct spdrv_ioctl_arg *) ++#define SWHVDRV_IOCTL_MSR_READ \ ++ _IOWR(SP_IOC_MAGIC, SWHVDRV_OPERATION_MSR_READ, struct spdrv_ioctl_arg *) ++#define SWHVDRV_IOCTL_POLL \ + _IO(SP_IOC_MAGIC, SWHVDRV_OPERATION_POLL, struct spdrv_ioctl_arg *) + + #if defined(HAVE_COMPAT_IOCTL) && defined(CONFIG_X86_64) +-#include ++ #include + +-#define SWHVDRV_IOCTL_CONFIGURE32 \ +- _IOW(SP_IOC_MAGIC, SWHVDRV_OPERATION_CONFIGURE, compat_uptr_t) +-#define SWHVDRV_IOCTL_CMD32 \ ++ #define SWHVDRV_IOCTL_CONFIGURE32 \ ++ _IOW(SP_IOC_MAGIC, SWHVDRV_OPERATION_CONFIGURE, compat_uptr_t) ++ #define SWHVDRV_IOCTL_CMD32 \ + _IOW(SP_IOC_MAGIC, SWHVDRV_OPERATION_CMD, compat_uptr_t) +-#define SWHVDRV_IOCTL_VERSION32 \ +- _IOR(SP_IOC_MAGIC, SWHVDRV_OPERATION_VERSION, compat_uptr_t) +-#define SWHVDRV_IOCTL_CLOCK32 \ +- _IOR(SP_IOC_MAGIC, SWHVDRV_OPERATION_CLOCK, compat_uptr_t) +-#define SWHVDRV_IOCTL_TOPOLOGY32 \ +- _IOR(SP_IOC_MAGIC, SWHVDRV_OPERATION_TOPOLOGY, compat_uptr_t) +-#define SWHVDRV_IOCTL_CPUCOUNT32 \ +- _IOR(SP_IOC_MAGIC, SWHVDRV_OPERATION_CPUCOUNT, compat_uptr_t) +-#define SWHVDRV_IOCTL_HYPERVISOR_TYPE32 \ +- _IOR(SP_IOC_MAGIC, SWHVDRV_OPERATION_HYPERVISOR_TYPE, compat_uptr_t) +-#define SWHVDRV_IOCTL_MSR_READ32 \ +- _IOWR(SP_IOC_MAGIC, SWHVDRV_OPERATION_MSR_READ, compat_uptr_t) +-#define SWHVDRV_IOCTL_POLL32 \ +- _IO(SP_IOC_MAGIC, SWHVDRV_OPERATION_POLL, compat_uptr_t) ++ #define SWHVDRV_IOCTL_VERSION32 \ ++ _IOR(SP_IOC_MAGIC, SWHVDRV_OPERATION_VERSION, compat_uptr_t) ++ #define SWHVDRV_IOCTL_CLOCK32 \ ++ _IOR(SP_IOC_MAGIC, SWHVDRV_OPERATION_CLOCK, compat_uptr_t) ++ #define SWHVDRV_IOCTL_TOPOLOGY32 \ ++ _IOR(SP_IOC_MAGIC, SWHVDRV_OPERATION_TOPOLOGY, compat_uptr_t) ++ #define SWHVDRV_IOCTL_CPUCOUNT32 \ ++ _IOR(SP_IOC_MAGIC, SWHVDRV_OPERATION_CPUCOUNT, compat_uptr_t) ++ #define SWHVDRV_IOCTL_HYPERVISOR_TYPE32 \ ++ _IOR(SP_IOC_MAGIC, SWHVDRV_OPERATION_HYPERVISOR_TYPE, compat_uptr_t) ++ #define SWHVDRV_IOCTL_MSR_READ32 \ ++ _IOWR(SP_IOC_MAGIC, SWHVDRV_OPERATION_MSR_READ, compat_uptr_t) ++ #define SWHVDRV_IOCTL_POLL32 \ ++ _IO(SP_IOC_MAGIC, SWHVDRV_OPERATION_POLL, compat_uptr_t) + #endif /* COMPAT && x64 */ + + #endif /* __SWHV_IOCTL_H__ */ +diff --git a/drivers/platform/x86/socwatchhv/inc/swhv_structs.h b/drivers/platform/x86/socwatchhv/inc/swhv_structs.h +index 0393a95e4875..3922d39601f4 100644 +--- a/drivers/platform/x86/socwatchhv/inc/swhv_structs.h ++++ b/drivers/platform/x86/socwatchhv/inc/swhv_structs.h +@@ -1,58 +1,57 @@ +-/* +- +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-*/ ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + + #ifndef _SWHV_STRUCTS_H_ + #define _SWHV_STRUCTS_H_ 1 +@@ -100,7 +99,11 @@ enum swhv_collector_type { + SWHV_COLLECTOR_TYPE_MSR, + }; + +-enum swhv_io_cmd { SWHV_IO_CMD_READ = 0, SWHV_IO_CMD_WRITE, SWHV_IO_CMD_MAX }; ++enum swhv_io_cmd { ++ SWHV_IO_CMD_READ = 0, ++ SWHV_IO_CMD_WRITE, ++ SWHV_IO_CMD_MAX ++}; + + #pragma pack(push, 1) + struct swhv_driver_msr_io_descriptor { +@@ -117,11 +120,12 @@ struct swhv_driver_switch_io_descriptor { + + #pragma pack(push, 1) + typedef struct swhv_driver_io_descriptor { +- pw_u16_t collection_type; /* One of 'enum swhv_collector_type' */ +- pw_s16_t collection_command; /* One of 'enum swhv_io_cmd' */ +- pw_u16_t counter_size_in_bytes; /* The number of bytes to +- * READ or WRITE +- */ ++ /* One of 'enum swhv_collector_type' */ ++ pw_u16_t collection_type; ++ /* One of 'enum swhv_io_cmd' */ ++ pw_s16_t collection_command; ++ /* The number of bytes to READ or WRITE */ ++ pw_u16_t counter_size_in_bytes; + union { + struct swhv_driver_msr_io_descriptor msr_descriptor; + struct swhv_driver_switch_io_descriptor switch_descriptor; +@@ -132,47 +136,42 @@ typedef struct swhv_driver_io_descriptor { + + #pragma pack(push, 1) + struct swhv_driver_interface_info { +- pw_s16_t cpu_mask; /* On which CPU(s) should the driver +- * read the data? +- */ +- /* Currently: -2 ==> read on ALL CPUs, +- * -1 ==> read on ANY CPU, +- * >= 0 ==> the specific CPU to read on +- */ +- pw_s16_t sample_id; /* Sample ID, used to map it back +- * to Metric Plugin, Metric and Msg ID combo +- */ +- pw_u16_t num_io_descriptors; /* Number of descriptors in the array, +- * below. +- */ +- pw_u8_t descriptors[1]; /* Array of swhv_driver_io_descriptor +- * structs. +- */ ++ /* On which CPU(s) should the driver read the data? ++ * Currently: -2 ==> read on ALL CPUs, ++ * -1 ==> read on ANY CPU, ++ * >= 0 ==> the specific CPU to read on ++ */ ++ pw_s16_t cpu_mask; ++ /* Sample ID, used to map it back to Metric Plugin, ++ * Metric and Msg ID combo ++ */ ++ pw_s16_t sample_id; ++ /* Number of descriptors in the array, below. */ ++ pw_u16_t num_io_descriptors; ++ /* Array of swhv_driver_io_descriptor structs. */ ++ pw_u8_t descriptors[1]; + }; + #pragma pack(pop) +-#define SWHV_DRIVER_INTERFACE_INFO_HEADER_SIZE() \ ++#define SWHV_DRIVER_INTERFACE_INFO_HEADER_SIZE() \ + (sizeof(struct swhv_driver_interface_info) - sizeof(pw_u8_t[1])) + + #pragma pack(push, 1) + struct swhv_driver_interface_msg { +- pw_u16_t num_infos; /* Number of 'swhv_driver_interface_info' +- * structs contained within the 'infos' variable, +- * below +- */ +- /* pw_u16_t infos_size_bytes; Size of data inlined within ++ /* Number of 'swhv_driver_interface_info' structs contained within + * the 'infos' variable, below + */ ++ pw_u16_t num_infos; + pw_u8_t infos[1]; + }; + #pragma pack(pop) +-#define SWHV_DRIVER_INTERFACE_MSG_HEADER_SIZE() \ ++#define SWHV_DRIVER_INTERFACE_MSG_HEADER_SIZE() \ + (sizeof(struct swhv_driver_interface_msg) - sizeof(pw_u8_t[1])) + + /* + * ACRN specific structs, copied from the ACRN profiling service + * DO NOT modify these below stucts + */ +-#define SBUF_HEAD_SIZE 64 /* bytes */ ++#define SBUF_HEAD_SIZE 64 /* bytes */ + + typedef enum PROFILING_SOCWATCH_FEATURE { + SOCWATCH_COMMAND = 0, +@@ -205,7 +204,7 @@ struct vm_switch_trace { + #define VM_SWITCH_TRACE_SIZE ((uint64_t)sizeof(struct vm_switch_trace)) + + #define CONFIG_MAX_VCPUS_PER_VM 8 +-#define CONFIG_MAX_VM_NUM 6 ++#define CONFIG_MAX_VM_NUM 6 + + struct profiling_vcpu_pcpu_map { + int16_t vcpu_id; +@@ -236,16 +235,18 @@ typedef struct vm_switch_trace vmswitch_trace_t; + * ACRN specific constants shared between the driver and user-mode + */ + /* Per CPU buffer size */ +-#define ACRN_BUF_SIZE ((4 * 1024 * 1024) - SBUF_HEAD_SIZE /* 64 bytes */) ++#define ACRN_BUF_SIZE \ ++ ((4 * 1024 * 1024) - SBUF_HEAD_SIZE /* 64 bytes */) + /* Size of buffer at which data should be transferred to user-mode */ +-#define ACRN_BUF_TRANSFER_SIZE (ACRN_BUF_SIZE / 2) ++#define ACRN_BUF_TRANSFER_SIZE (ACRN_BUF_SIZE / 2) + /* + * The ACRN 'sbuf' buffers consist of fixed size elements. + * This is how they are intended to be used, though SoCWatch only uses it to + * allocate the correct buffer size. + */ +-#define ACRN_BUF_ELEMENT_SIZE 32 /* byte */ +-#define ACRN_BUF_ELEMENT_NUM (ACRN_BUF_SIZE / ACRN_BUF_ELEMENT_SIZE) +-#define ACRN_BUF_FILLED_SIZE(sbuf) (sbuf->size - sbuf_available_space(sbuf)) ++#define ACRN_BUF_ELEMENT_SIZE 32 /* byte */ ++#define ACRN_BUF_ELEMENT_NUM (ACRN_BUF_SIZE / ACRN_BUF_ELEMENT_SIZE) ++#define ACRN_BUF_FILLED_SIZE(sbuf) \ ++ (sbuf->size - sbuf_available_space(sbuf)) + + #endif /* _SWHV_STRUCTS_H_ */ +diff --git a/drivers/platform/x86/socwatchhv/swhv_acrn.c b/drivers/platform/x86/socwatchhv/swhv_acrn.c +index 962db47cec45..2506388cb1a7 100644 +--- a/drivers/platform/x86/socwatchhv/swhv_acrn.c ++++ b/drivers/platform/x86/socwatchhv/swhv_acrn.c +@@ -1,3 +1,58 @@ ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ + #include + #include + #include +@@ -34,9 +89,8 @@ + static int pcpu_num; + bool flush_mode; + +-/* TODO is this needed? +- * module_param(nr_cpus, int, S_IRUSR | S_IWUSR); +- */ ++/* TODO is this needed? */ ++/* module_param(nr_cpus, int, S_IRUSR | S_IWUSR); */ + + static struct shared_buf **sbuf_per_cpu; + +@@ -51,31 +105,32 @@ bool buffer_not_ready(int *cpu); + struct swhv_acrn_msr_collector_data *swhv_alloc_msr_collector_node(void) + { + struct swhv_acrn_msr_collector_data *node = +- (struct swhv_acrn_msr_collector_data *)kmalloc( +- sizeof(struct swhv_acrn_msr_collector_data), +- GFP_KERNEL); ++ (struct swhv_acrn_msr_collector_data *) ++ kmalloc(sizeof(struct swhv_acrn_msr_collector_data), ++ GFP_KERNEL); ++ + if (node) { + node->per_msg_payload_size = 0x0; + node->sample_id = 0x0; +- node->msr_ops_list = kmalloc( +- pcpu_num * sizeof(struct profiling_msr_ops_list), +- GFP_KERNEL); +- memset(node->msr_ops_list, 0, +- pcpu_num * sizeof(struct profiling_msr_ops_list)); ++ node->msr_ops_list = ++ kmalloc_array(pcpu_num, ++ sizeof(struct profiling_msr_ops_list), ++ GFP_KERNEL); ++ memset(node->msr_ops_list, 0, pcpu_num * ++ sizeof(struct profiling_msr_ops_list)); + SW_LIST_ENTRY_INIT(node, list); + } + return node; + } +-struct swhv_acrn_msr_collector_data * +-swhv_add_driver_msr_info(void *list_head, +- const struct swhv_driver_interface_info *info) ++struct swhv_acrn_msr_collector_data *swhv_add_driver_msr_info(void *list_head, ++ const struct swhv_driver_interface_info *info) + { + int cpu; + + SW_LIST_HEAD_VAR(swhv_acrn_msr_collector_data) * head = list_head; + +- struct swhv_acrn_msr_collector_data *node = +- swhv_alloc_msr_collector_node(); ++ struct swhv_acrn_msr_collector_data *node = swhv_alloc_msr_collector_node(); ++ + if (!node) { + pw_pr_error("ERROR allocating MSR collector node!\n"); + return NULL; +@@ -83,8 +138,7 @@ swhv_add_driver_msr_info(void *list_head, + + node->sample_id = info->sample_id; + node->cpu_mask = info->cpu_mask; +- foreach_cpu(cpu, pcpu_num) +- { ++ foreach_cpu(cpu, pcpu_num) { + node->msr_ops_list[cpu].collector_id = COLLECTOR_SOCWATCH; + node->msr_ops_list[cpu].msr_op_state = MSR_OP_REQUESTED; + } +@@ -93,6 +147,7 @@ swhv_add_driver_msr_info(void *list_head, + return node; + } + ++ + int swhv_add_driver_msr_io_desc(struct swhv_acrn_msr_collector_data *node, + struct swhv_driver_io_descriptor *info) + { +@@ -102,17 +157,14 @@ int swhv_add_driver_msr_io_desc(struct swhv_acrn_msr_collector_data *node, + + /* Confirm this is an MSR IO descriptor */ + if (info->collection_type != SWHV_COLLECTOR_TYPE_MSR) { +- pw_pr_error( +- "ERROR trying to configure MSR collector with other data!\n"); ++ pw_pr_error("ERROR trying to configure MSR collector with other data!\n"); + return -EINVAL; + } + +- foreach_cpu(cpu, pcpu_num) +- { ++ foreach_cpu(cpu, pcpu_num) { + num_entries = node->msr_ops_list[cpu].num_entries; + if (num_entries >= MAX_MSR_LIST_NUM) { +- pw_pr_error( +- "ERROR trying to add too many MSRs to collect!\n"); ++ pw_pr_error("ERROR trying to add too many MSRs to collect!\n"); + return -PW_ERROR; + } + +@@ -121,25 +173,23 @@ int swhv_add_driver_msr_io_desc(struct swhv_acrn_msr_collector_data *node, + msr_op = &(node->msr_ops_list[cpu].entries[idx]); + + msr_op->msr_id = info->msr_descriptor.address; +- if (info->collection_command == SWHV_IO_CMD_READ) { ++ if (info->collection_command == SWHV_IO_CMD_READ) + msr_op->msr_op_type = MSR_OP_READ; +- } else if (info->collection_command == SWHV_IO_CMD_WRITE) { ++ else if (info->collection_command == SWHV_IO_CMD_WRITE) + msr_op->msr_op_type = MSR_OP_WRITE; +- } ++ + + /* + * Use the param field to set sample id. +- * This'll be used in the hypervisor to +- * set the id in the samples ++ * This'll be used in the hypervisor to set the id in the samples + */ + msr_op->param = (uint16_t)node->sample_id; + + num_entries++; + +- if (num_entries < MAX_MSR_LIST_NUM) { +- node->msr_ops_list[cpu].entries[num_entries].msr_id = +- -1; +- } ++ if (num_entries < MAX_MSR_LIST_NUM) ++ node->msr_ops_list[cpu].entries[num_entries].msr_id = -1; ++ + node->msr_ops_list[cpu].num_entries = num_entries; + } + return PW_SUCCESS; +@@ -151,22 +201,19 @@ int swhv_init_per_cpu_buffers(void) + + sbuf_per_cpu = vmalloc(pcpu_num * sizeof(struct shared_buf *)); + +- foreach_cpu(cpu, pcpu_num) +- { ++ foreach_cpu(cpu, pcpu_num) { + /* allocate shared_buf */ + sbuf_per_cpu[cpu] = sbuf_allocate(ACRN_BUF_ELEMENT_NUM, +- ACRN_BUF_ELEMENT_SIZE); ++ ACRN_BUF_ELEMENT_SIZE); + if (!sbuf_per_cpu[cpu]) { +- pw_pr_error("Failed to allocate buffer for cpu %d\n", +- cpu); ++ pw_pr_error("Failed to allocate buffer for cpu %d\n", cpu); + ret = -ENOMEM; + goto out_free; + } + } + + /* TODO understand the use of this API */ +- foreach_cpu(cpu, pcpu_num) +- { ++ foreach_cpu(cpu, pcpu_num) { + ret = sbuf_share_setup(cpu, ACRN_SOCWATCH, sbuf_per_cpu[cpu]); + if (ret < 0) { + pw_pr_error("Failed to setup buffer for cpu %d\n", cpu); +@@ -176,15 +223,15 @@ int swhv_init_per_cpu_buffers(void) + + return PW_SUCCESS; + out_sbuf: +- for (i = --cpu; i >= 0; i--) { ++ for (i = --cpu; i >= 0; i--) + sbuf_share_setup(i, ACRN_SOCWATCH, NULL); +- } ++ + cpu = pcpu_num; + + out_free: +- for (i = --cpu; i >= 0; i--) { ++ for (i = --cpu; i >= 0; i--) + sbuf_free(sbuf_per_cpu[i]); +- } ++ + + vfree(sbuf_per_cpu); + return ret; +@@ -196,11 +243,9 @@ void swhv_destroy_per_cpu_buffers(void) + + pw_pr_debug("%s, pcpu_num: %d\n", __func__, pcpu_num); + +- foreach_cpu(cpu, pcpu_num) +- { +- /* TODO anything else to de-register? +- * deregister devices +- */ ++ foreach_cpu(cpu, pcpu_num) { ++ /* TODO anything else to de-register? */ ++ /* deregister devices */ + + /* set sbuf pointer to NULL in HV */ + sbuf_share_setup(cpu, ACRN_SOCWATCH, NULL); +@@ -213,13 +258,10 @@ void swhv_destroy_per_cpu_buffers(void) + + void swhv_free_msr_collector_node(struct swhv_acrn_msr_collector_data *node) + { +- if (!node) { +- return; ++ if (node) { ++ kfree(node->msr_ops_list); ++ kfree(node); + } +- +- kfree(node->msr_ops_list); +- kfree(node); +- return; + } + + void swhv_init_msr_collector_list(void) +@@ -237,8 +279,9 @@ void swhv_destroy_msr_collector_list(void) + SW_LIST_HEAD_VAR(swhv_acrn_msr_collector_data) * head = list_head; + while (!SW_LIST_EMPTY(head)) { + struct swhv_acrn_msr_collector_data *curr = +- SW_LIST_GET_HEAD_ENTRY( +- head, swhv_acrn_msr_collector_data, list); ++ SW_LIST_GET_HEAD_ENTRY(head, ++ swhv_acrn_msr_collector_data, list); ++ + SW_LIST_UNLINK(curr, list); + swhv_free_msr_collector_node(curr); + } +@@ -254,15 +297,13 @@ void swhv_handle_hypervisor_collector(uint32_t control_cmd) + + acrn_profiling_control->collector_id = COLLECTOR_SOCWATCH; + +- if (control_cmd == 1) { /* start collection + send switch bitmask */ ++ if (control_cmd == 1) { ++ /* start collection + send switch bitmask */ + pw_pr_debug("STARTING ACRN PROFILING SERVICE\n"); +- global_collection_switch |= +- control_cmd; /* first bit controls start/stop +- * of collection +- */ +- } else if (control_cmd == 0) { /* stop collection +- * + reset switch bitmask +- */ ++ /* first bit controls start/stop of collection */ ++ global_collection_switch |= control_cmd; ++ } else if (control_cmd == 0) { ++ /* stop collection + reset switch bitmask */ + pw_pr_debug("STOPPING ACRN PROFILING SERVICE\n"); + global_collection_switch = control_cmd; + } +@@ -270,7 +311,7 @@ void swhv_handle_hypervisor_collector(uint32_t control_cmd) + + /* send collection command + switch bitmask */ + acrn_hypercall2(HC_PROFILING_OPS, PROFILING_SET_CONTROL_SWITCH, +- virt_to_phys(acrn_profiling_control)); ++ virt_to_phys(acrn_profiling_control)); + kfree(acrn_profiling_control); + } + +@@ -287,22 +328,20 @@ int swhv_handle_msr_collector_list(void) + return retVal; + } + +- if (!head) { ++ if (!head) + return -PW_ERROR; +- } +- SW_LIST_FOR_EACH_ENTRY(curr, head, list) +- { ++ ++ SW_LIST_FOR_EACH_ENTRY(curr, head, list) { + pw_pr_debug("HANDLING MSR NODE\n"); + +- /*hypervisor call to do immediate MSR read */ ++ /* hypervisor call to do immediate MSR read */ + acrn_hypercall2(HC_PROFILING_OPS, PROFILING_MSR_OPS, + virt_to_phys(curr->msr_ops_list)); + } + return retVal; + } + +-long swhv_configure(struct swhv_driver_interface_msg __user *remote_msg, +- int local_len) ++long swhv_configure(struct swhv_driver_interface_msg __user *remote_msg, int local_len) + { + struct swhv_driver_interface_info *local_info = NULL; + struct swhv_driver_io_descriptor *local_io_desc = NULL; +@@ -341,33 +380,33 @@ long swhv_configure(struct swhv_driver_interface_msg __user *remote_msg, + num_infos = local_msg->num_infos; + pw_pr_debug("LOCAL NUM INFOS = %u\n", num_infos); + for (; num_infos > 0 && !done; --num_infos) { +- local_info = +- (struct swhv_driver_interface_info *)&__data[dst_idx]; ++ local_info = (struct swhv_driver_interface_info *)&__data[dst_idx]; + desc_idx = dst_idx + SWHV_DRIVER_INTERFACE_INFO_HEADER_SIZE(); + dst_idx += (SWHV_DRIVER_INTERFACE_INFO_HEADER_SIZE() + +- local_info->num_io_descriptors * +- sizeof(struct swhv_driver_io_descriptor)); ++ local_info->num_io_descriptors * ++ sizeof(struct swhv_driver_io_descriptor)); + pw_pr_debug("# msrs = %u\n", +- (unsigned int)local_info->num_io_descriptors); ++ (unsigned)local_info->num_io_descriptors); + + num_io_desc = local_info->num_io_descriptors; + pw_pr_debug("LOCAL NUM IO DESC = %u\n", num_io_desc); + + driver_info_added = false; + for (; num_io_desc > 0; --num_io_desc) { +- local_io_desc = (struct swhv_driver_io_descriptor +- *)&__data[desc_idx]; ++ local_io_desc = (struct swhv_driver_io_descriptor *) ++ &__data[desc_idx]; + desc_idx += sizeof(struct swhv_driver_io_descriptor); + if (local_io_desc->collection_type == +- SWHV_COLLECTOR_TYPE_MSR) { ++ SWHV_COLLECTOR_TYPE_MSR) { ++ + if (!driver_info_added) { + msr_collector_node = + swhv_add_driver_msr_info( + &swhv_msr_collector, + local_info); +- if (msr_collector_node == NULL) { ++ if (msr_collector_node == NULL) + return -PW_ERROR; +- } ++ + driver_info_added = true; + } + +@@ -376,15 +415,12 @@ long swhv_configure(struct swhv_driver_interface_msg __user *remote_msg, + local_io_desc->msr_descriptor.address, + local_io_desc->msr_descriptor.type, + local_io_desc->collection_command); +- swhv_add_driver_msr_io_desc(msr_collector_node, +- local_io_desc); ++ swhv_add_driver_msr_io_desc(msr_collector_node, local_io_desc); + } else if (local_io_desc->collection_type == +- SWHV_COLLECTOR_TYPE_SWITCH) { +- local_config_bitmap = +- local_io_desc->switch_descriptor +- .switch_bitmask; +- pw_pr_debug("local bitmask = %u\n", +- local_config_bitmap); ++ SWHV_COLLECTOR_TYPE_SWITCH) { ++ ++ local_config_bitmap = local_io_desc->switch_descriptor.switch_bitmask; ++ pw_pr_debug("local bitmask = %u\n", local_config_bitmap); + + global_collection_switch = local_config_bitmap; + +@@ -393,11 +429,11 @@ long swhv_configure(struct swhv_driver_interface_msg __user *remote_msg, + */ + done = 1; + break; +- } else { ++ } else + pw_pr_error( + "WARNING: unknown collector configuration requested, collector id: %u!\n", + local_io_desc->collection_type); +- } ++ + } + driver_info_added = false; + } +@@ -411,7 +447,7 @@ long swhv_stop(void) + + pw_pr_debug("socwatch: stop called\n"); + +- /*If MSR ops are present, perform them to get begin snapshot data. */ ++ /* If MSR ops are present, perform them to get begin snapshot data. */ + swhv_handle_msr_collector_list(); + + /* stop collection + reset switch bitmask */ +@@ -446,15 +482,12 @@ long swhv_start(void) + swhv_handle_msr_collector_list(); + + #if 0 +- /* Expand this eventually to retrieve VM-related info +- * from the hypervisor. Leaving it here for now. +- */ +- vm_info_list = kmalloc(sizeof(struct profiling_vm_info_list), +- GFP_KERNEL); ++ /* Expand this eventually to retrive VM-realted info from the hypervisor */ ++ /* Leaving it here for now. */ ++ vm_info_list = kmalloc(sizeof(struct profiling_vm_info_list), GFP_KERNEL); + memset(vm_info_list, 0, sizeof(struct profiling_vm_info_list)); + +- acrn_hypercall2(HC_PROFILING_OPS, PROFILING_GET_VMINFO, +- virt_to_phys(vm_info_list)); ++ acrn_hypercall2(HC_PROFILING_OPS, PROFILING_GET_VMINFO, virt_to_phys(vm_info_list)); + + pw_pr_debug("Number of VMs: %d\n", vm_info_list->num_vms); + for (i = 0; i < vm_info_list->num_vms; ++i) { +@@ -493,7 +526,7 @@ long swhv_get_hypervisor_type(u32 __user *remote_args) + uint32_t hypervisor_type = swhv_hypervisor_acrn; + + return copy_to_user(remote_args, &hypervisor_type, +- sizeof(hypervisor_type)); ++ sizeof(hypervisor_type)); + } + + long swhv_msr_read(u32 __user *remote_in_args, u64 __user *remote_args) +@@ -503,44 +536,39 @@ long swhv_msr_read(u32 __user *remote_in_args, u64 __user *remote_args) + int ret = PW_SUCCESS; + + if (get_user(msr_addr, remote_in_args)) { +- pw_pr_error( +- "ERROR: couldn't copy remote args for read MSR IOCTL!\n"); +- return -1; ++ pw_pr_error("ERROR: couldn't copy remote args for read MSR IOCTL!\n"); ++ return -1; + } + + if (!msr_read_ops_list) { +- msr_read_ops_list = kmalloc( +- pcpu_num * sizeof(struct profiling_msr_ops_list), +- GFP_KERNEL); +- if (!msr_read_ops_list) { +- pw_pr_error( +- "couldn't allocate memory for doing an MSR read!\n"); +- return -1; +- } +- memset(msr_read_ops_list, 0, +- pcpu_num * sizeof(struct profiling_msr_ops_list)); ++ msr_read_ops_list = kmalloc_array(pcpu_num, ++ sizeof(struct profiling_msr_ops_list), GFP_KERNEL); ++ if (!msr_read_ops_list) { ++ pw_pr_error("couldn't allocate memory for doing an MSR read!\n"); ++ return -1; ++ } ++ memset(msr_read_ops_list, 0, pcpu_num * sizeof(struct profiling_msr_ops_list)); + } + + /* + * The hypercall is set in such a way that the MSR read will occur on + * all CPUs and as a result we have to set up structures for each CPU. + */ +- foreach_cpu(cpu, pcpu_num) +- { ++ foreach_cpu(cpu, pcpu_num) { + msr_read_ops_list[cpu].collector_id = COLLECTOR_SOCWATCH; + msr_read_ops_list[cpu].msr_op_state = MSR_OP_REQUESTED; + msr_read_ops_list[cpu].num_entries = 1; + msr_read_ops_list[cpu].entries[0].msr_id = msr_addr; + msr_read_ops_list[cpu].entries[0].msr_op_type = MSR_OP_READ; +- msr_read_ops_list[cpu].entries[1].msr_id = +- -1; /* the next entry is expected to be set to -1 */ +- msr_read_ops_list[cpu].entries[1].param = +- 0; /* set to 0 to not generate sample in hypervisor */ ++ /* the next entry is expected to be set to -1 */ ++ msr_read_ops_list[cpu].entries[1].msr_id = -1; ++ /* set to 0 to not generate sample in hypervisor */ ++ msr_read_ops_list[cpu].entries[1].param = 0; + } + + /* hypervisor call to do immediate MSR read */ + acrn_hypercall2(HC_PROFILING_OPS, PROFILING_MSR_OPS, +- virt_to_phys(msr_read_ops_list)); ++ virt_to_phys(msr_read_ops_list)); + + /* copy value to remote args, pick from any CPU */ + value = msr_read_ops_list[0].entries[0].value; +@@ -559,9 +587,9 @@ long swhv_collection_poll(void) + /* + * Handle 'POLL' timer expirations. + */ +- if (SW_LIST_EMPTY(&swhv_msr_collector)) { ++ if (SW_LIST_EMPTY(&swhv_msr_collector)) + pw_pr_debug("DEBUG: EMPTY MSR COLLECTOR POLL LIST\n"); +- } ++ + + if (swhv_handle_msr_collector_list()) { + pw_pr_error("ERROR: unable to copy MSR value to userspace!\n"); +@@ -570,8 +598,7 @@ long swhv_collection_poll(void) + return ret; + } + +-ssize_t swhv_transfer_data(void *user_buffer, struct shared_buf *sbuf_to_copy, +- size_t bytes_to_read) ++ssize_t swhv_transfer_data(void *user_buffer, struct shared_buf *sbuf_to_copy, size_t bytes_to_read) + { + unsigned long bytes_not_copied; + ssize_t bytes_read; +@@ -579,29 +606,26 @@ ssize_t swhv_transfer_data(void *user_buffer, struct shared_buf *sbuf_to_copy, + void *data_read = NULL; + + if (bytes_to_read == 0) { +- pw_pr_debug( +- "%s - 0 bytes requested to transfer! Returning...\n", +- __func__); +- ++ pw_pr_debug("%s - 0 bytes requested to transfer! Returning...\n", __func__); + return bytes_to_read; + } + + data_read = vmalloc(bytes_to_read); + if (!data_read) { +- pw_pr_error( +- "couldn't allocate memory when trying to transfer data to userspace!\n"); ++ pw_pr_error("couldn't allocate memory when trying to transfer data to userspace!\n"); + return 0; + } + + pw_pr_debug("%s - bytes to transfer %zu\n", __func__, bytes_to_read); + + if (sbuf_to_copy) { +- bytes_read = sbuf_get_variable(sbuf_to_copy, &data_read, +- bytes_to_read); ++ bytes_read = sbuf_get_variable(sbuf_to_copy, &data_read, bytes_to_read); ++ ++ if (bytes_read != bytes_to_read) ++ pw_pr_warn( ++ "%s - bytes read (%zu bytes) are not equal to expected bytes (%zu bytes) to be read!", ++ __func__, bytes_read, bytes_to_read); + +- if (bytes_read != bytes_to_read) { +- pw_pr_warn("%s - bytes read (%zu bytes) are not equal to expected bytes (%zu bytes) to be read!", __func__, bytes_read, bytes_to_read); +- } + + if (bytes_read < 0) { + pw_pr_error("Error reading this buffer\n"); +@@ -616,8 +640,8 @@ ssize_t swhv_transfer_data(void *user_buffer, struct shared_buf *sbuf_to_copy, + goto ret_free; + } + +- bytes_not_copied = copy_to_user(user_buffer, data_read, +- bytes_read); ++ bytes_not_copied = ++ copy_to_user(user_buffer, data_read, bytes_read); + /* TODO check if this is meaningful enough to have */ + /* *offset += bytes_read - bytes_not_copied; */ + +@@ -631,10 +655,10 @@ ssize_t swhv_transfer_data(void *user_buffer, struct shared_buf *sbuf_to_copy, + } + ret = bytes_read; + goto ret_free; +- } else { ++ } else + pw_pr_debug( + "Buffer empty! nothing more to read from this buffer\n"); +- } ++ + } + + ret_free: +@@ -644,8 +668,8 @@ ssize_t swhv_transfer_data(void *user_buffer, struct shared_buf *sbuf_to_copy, + + bool buffer_not_ready(int *cpu) + { +- /* cycle through and confirm buffers on all CPUs +- * are less than ACRN_BUF_TRANSFER_SIZE ++ /* cycle through and confirm buffers on all CPUs are less than ++ * ACRN_BUF_TRANSFER_SIZE + * as well as flush mode has not been requested + */ + int i = 0; +@@ -654,15 +678,14 @@ bool buffer_not_ready(int *cpu) + pw_pr_debug( + "checking if a buffer is ready to be copied to the device file\n"); + /* +- * It's possible that the buffer from cpu0 may always have +- * data to transfer and can potentially prevent buffers from +- * other cpus from ever being serviced. ++ * It's possible that the buffer from cpu0 may always have data to ++ * transfer and can potentially prevent buffers from other cpus from ++ * ever being serviced. + * TODO Consider adding an optimization to check for last cpu read. + */ + for (i = 0; i < pcpu_num; ++i) { +- if (ACRN_BUF_FILLED_SIZE(sbuf_per_cpu[i]) >= +- ACRN_BUF_TRANSFER_SIZE || +- (flush_mode && ACRN_BUF_FILLED_SIZE(sbuf_per_cpu[i]))) { ++ if (ACRN_BUF_FILLED_SIZE(sbuf_per_cpu[i]) >= ACRN_BUF_TRANSFER_SIZE || ++ (flush_mode && ACRN_BUF_FILLED_SIZE(sbuf_per_cpu[i]))) { + not_enough_data = false; + *cpu = i; + pw_pr_debug( +@@ -675,20 +698,21 @@ bool buffer_not_ready(int *cpu) + } + + ssize_t device_read_i(struct file *file, char __user *user_buffer, +- size_t length, loff_t *offset) ++ size_t length, loff_t *offset) + { + ssize_t bytes_read = 0; + int cpu = 0; + + pw_pr_debug("%s - usermode attempting to read device file\n", __func__); ++ + if (buffer_not_ready(&cpu)) { + pw_pr_debug("%s - no buffer ready to be read\n", __func__); + return bytes_read; + } + +- if (flush_mode) { ++ if (flush_mode) + pw_pr_debug("flush mode on, ready to flush a buffer\n"); +- } ++ + length = ACRN_BUF_FILLED_SIZE(sbuf_per_cpu[cpu]); + pw_pr_debug("on cpu %d, buffer size is %zu bytes\n", cpu, length); + +@@ -716,9 +740,9 @@ int swhv_load_driver_i(void) + pw_pr_debug("%s, pcpu_num: %d\n", __func__, pcpu_num); + + ret = swhv_init_per_cpu_buffers(); +- if (ret < 0) { ++ if (ret < 0) + return ret; +- } ++ + + swhv_init_msr_collector_list(); + +diff --git a/drivers/platform/x86/socwatchhv/swhv_driver.c b/drivers/platform/x86/socwatchhv/swhv_driver.c +index 7a4e6c57ab45..50c87bdcbec8 100644 +--- a/drivers/platform/x86/socwatchhv/swhv_driver.c ++++ b/drivers/platform/x86/socwatchhv/swhv_driver.c +@@ -1,58 +1,57 @@ +-/* +- +- This file is provided under a dual BSD/GPLv2 license. When using or +- redistributing this file, you may do so under either license. +- +- GPL LICENSE SUMMARY +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- This program is free software; you can redistribute it and/or modify +- it under the terms of version 2 of the GNU General Public License as +- published by the Free Software Foundation. +- +- This program is distributed in the hope that it will be useful, but +- WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- General Public License for more details. +- +- Contact Information: +- SoC Watch Developer Team +- Intel Corporation, +- 1300 S Mopac Expwy, +- Austin, TX 78746 +- +- BSD LICENSE +- +- Copyright(c) 2014 - 2018 Intel Corporation. +- +- Redistribution and use in source and binary forms, with or without +- modification, are permitted provided that the following conditions +- are met: +- +- * Redistributions of source code must retain the above copyright +- notice, this list of conditions and the following disclaimer. +- * Redistributions in binary form must reproduce the above copyright +- notice, this list of conditions and the following disclaimer in +- the documentation and/or other materials provided with the +- distribution. +- * Neither the name of Intel Corporation nor the names of its +- contributors may be used to endorse or promote products derived +- from this software without specific prior written permission. +- +- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-*/ ++/* SPDX-License-Identifier: GPL-2.0 AND BSD-3-Clause ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: ++ * SoC Watch Developer Team ++ * Intel Corporation, ++ * 1300 S Mopac Expwy, ++ * Austin, TX 78746 ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2014 - 2019 Intel Corporation. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + #define MOD_AUTHOR "SoCWatch Team" + #define MOD_DESC "SoCWatch kernel module to communicate with hypervisors" + +@@ -85,7 +84,6 @@ + * Compile-time constants + * ******************************************* + */ +- + /* ******************************************* + * Local data structures. + * ******************************************* +@@ -97,10 +95,10 @@ + * from 32b user programs in 64b kernels. + */ + struct spdrv_ioctl_arg32 { +- pw_s32_t in_len; +- pw_s32_t out_len; +- compat_caddr_t in_arg; +- compat_caddr_t out_arg; ++ pw_s32_t in_len; ++ pw_s32_t out_len; ++ compat_caddr_t in_arg; ++ compat_caddr_t out_arg; + }; + #endif /* COMPAT && x64 */ + +@@ -115,7 +113,7 @@ static struct class *sp_class; + */ + + /* Per-CPU variable containing the currently running vcpu. */ +-/*static DEFINE_PER_CPU(int, curr_vcpu) = 0; */ ++/* static DEFINE_PER_CPU(int, curr_vcpu) = 0; */ + + /* ******************************************* + * Function definitions. +@@ -153,24 +151,22 @@ static long swhv_handle_cmd(u32 __user *remote_cmd) + long swhv_get_version(u64 __user *remote_args) + { + u64 local_version = (u64)SWHVDRV_VERSION_MAJOR << 32 | +- (u64)SWHVDRV_VERSION_MINOR << 16 | +- (u64)SWHVDRV_VERSION_OTHER; ++ (u64)SWHVDRV_VERSION_MINOR << 16 | ++ (u64)SWHVDRV_VERSION_OTHER; + + return put_user(local_version, remote_args); + }; + + #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) +-#define MATCH_IOCTL(num, pred) ((num) == (pred) || (num) == (pred##32)) ++ #define MATCH_IOCTL(num, pred) ((num) == (pred) || (num) == (pred##32)) + #else +-#define MATCH_IOCTL(num, pred) ((num) == (pred)) ++ #define MATCH_IOCTL(num, pred) ((num) == (pred)) + #endif + +-static long handle_ioctl(unsigned int ioctl_num, +- struct spdrv_ioctl_arg __user *remote_args) ++static long handle_ioctl(unsigned int ioctl_num, struct spdrv_ioctl_arg __user *remote_args) + { + long status = 0; + struct spdrv_ioctl_arg local_args; +- + int local_in_len, local_out_len; + + if (copy_from_user(&local_args, remote_args, sizeof(local_args))) { +@@ -189,10 +185,8 @@ static long handle_ioctl(unsigned int ioctl_num, + + case SWHVDRV_OPERATION_CONFIGURE: + pw_pr_debug("Trying to configure driver!\n"); +- status = swhv_configure( +- (struct swhv_driver_interface_msg __user *) +- local_args.in_arg, +- local_in_len); ++ status = swhv_configure((struct swhv_driver_interface_msg __user *) ++ local_args.in_arg, local_in_len); + break; + + case SWHVDRV_OPERATION_VERSION: +@@ -202,7 +196,8 @@ static long handle_ioctl(unsigned int ioctl_num, + + case SWHVDRV_OPERATION_CLOCK: + pw_pr_debug("Trying to get hypervisor type!\n"); +- status = swhv_get_clock((u32 __user *)local_args.in_arg, ++ status = swhv_get_clock((u32 __user *) ++ local_args.in_arg, + (u64 __user *)local_args.out_arg); + break; + +@@ -218,14 +213,14 @@ static long handle_ioctl(unsigned int ioctl_num, + + case SWHVDRV_OPERATION_HYPERVISOR_TYPE: + pw_pr_debug("Trying to get hypervisor type!\n"); +- status = swhv_get_hypervisor_type( +- (u32 __user *)local_args.out_arg); ++ status = swhv_get_hypervisor_type((u32 __user *) ++ local_args.out_arg); + break; + + case SWHVDRV_OPERATION_MSR_READ: + pw_pr_debug("Trying to do MSR read!\n"); + status = swhv_msr_read((u32 __user *)local_args.in_arg, +- (u64 __user *)local_args.out_arg); ++ (u64 __user *)local_args.out_arg); + break; + case SWHVDRV_OPERATION_POLL: + pw_pr_debug("Polling tick!\n"); +@@ -236,42 +231,43 @@ static long handle_ioctl(unsigned int ioctl_num, + } + + static long device_unlocked_ioctl(struct file *filep, unsigned int ioctl_num, +- unsigned long ioctl_param) ++ unsigned long ioctl_param) + { + return handle_ioctl(_IOC_NR(ioctl_num), +- (struct spdrv_ioctl_arg __user *)ioctl_param); ++ (struct spdrv_ioctl_arg __user *)ioctl_param); + }; + ++ + #if defined(HAVE_COMPAT_IOCTL) && defined(CONFIG_X86_64) +-static long device_compat_ioctl(struct file *file, unsigned int ioctl_num, +- unsigned long ioctl_param) ++static long device_compat_ioctl(struct file *file, ++ unsigned int ioctl_num, unsigned long ioctl_param) + { + struct spdrv_ioctl_arg32 __user *remote_args32 = +- compat_ptr(ioctl_param); ++ compat_ptr(ioctl_param); + struct spdrv_ioctl_arg __user *remote_args = +- compat_alloc_user_space(sizeof(*remote_args)); ++ compat_alloc_user_space(sizeof(*remote_args)); + int tmp; + u32 data; + +- if (!remote_args) { ++ if (!remote_args) + return -1; +- } ++ + if (get_user(tmp, &remote_args32->in_len) || +- put_user(tmp, &remote_args->in_len)) { ++ put_user(tmp, &remote_args->in_len)) + return -1; +- } ++ + if (get_user(tmp, &remote_args32->out_len) || +- put_user(tmp, &remote_args->out_len)) { ++ put_user(tmp, &remote_args->out_len)) + return -1; +- } ++ + if (get_user(data, &remote_args32->in_arg) || +- put_user(compat_ptr(data), &remote_args->in_arg)) { ++ put_user(compat_ptr(data), &remote_args->in_arg)) + return -1; +- } ++ + if (get_user(data, &remote_args32->out_arg) || +- put_user(compat_ptr(data), &remote_args->out_arg)) { ++ put_user(compat_ptr(data), &remote_args->out_arg)) + return -1; +- } ++ + return handle_ioctl(_IOC_NR(ioctl_num), remote_args); + }; + #endif /* COMPAT && x64 */ +@@ -281,11 +277,11 @@ static int device_open(struct inode *inode, struct file *file) + return device_open_i(inode, file); + } + +-static ssize_t +-device_read(struct file *file, /* see include/linux/fs.h */ +- char __user *buffer, /* buffer to be filled with data */ +- size_t length, /* length of the buffer */ +- loff_t *offset) ++static ssize_t device_read(struct file *file, /* see include/linux/fs.h */ ++ char __user *buffer, /* buffer to be filled with data */ ++ size_t length, /* length of the buffer */ ++ ++ loff_t *offset) + { + return device_read_i(file, buffer, length, offset); + } +@@ -338,7 +334,7 @@ int __init swhv_load_driver(void) + } + sp_cdev->owner = THIS_MODULE; + sp_cdev->ops = &s_fops; +- if (cdev_add(sp_cdev, sp_dev, 1) < 0) { ++ if (cdev_add(sp_cdev, sp_dev, 1) < 0) { + error = -1; + pw_pr_error("Error registering device driver\n"); + goto cleanup_return_error; +-- +2.17.1 + diff --git a/patches/0012-Revert-mfd-intel-lpss-Make-driver-probe-asynchronous.lpss b/patches/0012-Revert-mfd-intel-lpss-Make-driver-probe-asynchronous.lpss new file mode 100644 index 0000000000..399caaea66 --- /dev/null +++ b/patches/0012-Revert-mfd-intel-lpss-Make-driver-probe-asynchronous.lpss @@ -0,0 +1,39 @@ +From fe05e42e66452b9f0b90f1bdb86399253737d565 Mon Sep 17 00:00:00 2001 +From: Andy Shevchenko +Date: Wed, 21 Aug 2019 11:35:49 +0300 +Subject: [PATCH 12/40] Revert "mfd: intel-lpss: Make driver probe + asynchronous" + +This reverts commit a1ac3e09a100cfd365252f59037011e18a56eefc. +--- + drivers/mfd/intel-lpss-acpi.c | 1 - + drivers/mfd/intel-lpss-pci.c | 1 - + 2 files changed, 2 deletions(-) + +diff --git a/drivers/mfd/intel-lpss-acpi.c b/drivers/mfd/intel-lpss-acpi.c +index dea828ba395b..c8fe334b5fe8 100644 +--- a/drivers/mfd/intel-lpss-acpi.c ++++ b/drivers/mfd/intel-lpss-acpi.c +@@ -138,7 +138,6 @@ static struct platform_driver intel_lpss_acpi_driver = { + .name = "intel-lpss", + .acpi_match_table = intel_lpss_acpi_ids, + .pm = &intel_lpss_acpi_pm_ops, +- .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, + }; + +diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c +index 2f3cf3be06b2..9355db29d2f9 100644 +--- a/drivers/mfd/intel-lpss-pci.c ++++ b/drivers/mfd/intel-lpss-pci.c +@@ -322,7 +322,6 @@ static struct pci_driver intel_lpss_pci_driver = { + .remove = intel_lpss_pci_remove, + .driver = { + .pm = &intel_lpss_pci_pm_ops, +- .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, + }; + +-- +2.17.1 + diff --git a/patches/0012-drm-panfrost-Add-errata-descriptions-from-kbase.drm b/patches/0012-drm-panfrost-Add-errata-descriptions-from-kbase.drm new file mode 100644 index 0000000000..304d5ce544 --- /dev/null +++ b/patches/0012-drm-panfrost-Add-errata-descriptions-from-kbase.drm @@ -0,0 +1,152 @@ +From 4103a4c912eb330aa01201fb01bd2c39915b27a1 Mon Sep 17 00:00:00 2001 +From: Alyssa Rosenzweig +Date: Fri, 23 Aug 2019 08:51:49 -0700 +Subject: [PATCH 012/690] drm/panfrost: Add errata descriptions from kbase + +While newer kbase include only the numbers of errata, older kbase +releases included one-line descriptions for each errata, which is useful +for those working on the driver. Import these descriptions. Most are +from kbase verbatim; a few I edited for clarity. + +v2: Wrote a description for the workaround of an issue whose cause is +still unknown (Stephen). Errata which pertain to newer models +unsupported by the mainline driver, for which Arm has not yet released +errata information, have been removed from the issue list as the kernel +need not concern itself with these. + +v3: Readded errata not yet handled, adding descriptions based on the +workarounds in the latest kbase release. + +Signed-off-by: Alyssa Rosenzweig +Signed-off-by: Rob Herring +Link: https://patchwork.freedesktop.org/patch/msgid/20190823155149.7272-1-alyssa.rosenzweig@collabora.com +--- + drivers/gpu/drm/panfrost/panfrost_issues.h | 81 ++++++++++++++++++++++ + 1 file changed, 81 insertions(+) + +diff --git a/drivers/gpu/drm/panfrost/panfrost_issues.h b/drivers/gpu/drm/panfrost/panfrost_issues.h +index cec6dcdadb5c..8e59d765bf19 100644 +--- a/drivers/gpu/drm/panfrost/panfrost_issues.h ++++ b/drivers/gpu/drm/panfrost/panfrost_issues.h +@@ -13,37 +13,118 @@ + * to care about. + */ + enum panfrost_hw_issue { ++ /* Need way to guarantee that all previously-translated memory accesses ++ * are commited */ + HW_ISSUE_6367, ++ ++ /* On job complete with non-done the cache is not flushed */ + HW_ISSUE_6787, ++ ++ /* Write of PRFCNT_CONFIG_MODE_MANUAL to PRFCNT_CONFIG causes a ++ * instrumentation dump if PRFCNT_TILER_EN is enabled */ + HW_ISSUE_8186, ++ ++ /* TIB: Reports faults from a vtile which has not yet been allocated */ + HW_ISSUE_8245, ++ ++ /* uTLB deadlock could occur when writing to an invalid page at the ++ * same time as access to a valid page in the same uTLB cache line ( == ++ * 4 PTEs == 16K block of mapping) */ + HW_ISSUE_8316, ++ ++ /* HT: TERMINATE for RUN command ignored if previous LOAD_DESCRIPTOR is ++ * still executing */ + HW_ISSUE_8394, ++ ++ /* CSE: Sends a TERMINATED response for a task that should not be ++ * terminated */ + HW_ISSUE_8401, ++ ++ /* Repeatedly Soft-stopping a job chain consisting of (Vertex Shader, ++ * Cache Flush, Tiler) jobs causes DATA_INVALID_FAULT on tiler job. */ + HW_ISSUE_8408, ++ ++ /* Disable the Pause Buffer in the LS pipe. */ + HW_ISSUE_8443, ++ ++ /* Change in RMUs in use causes problems related with the core's SDC */ + HW_ISSUE_8987, ++ ++ /* Compute endpoint has a 4-deep queue of tasks, meaning a soft stop ++ * won't complete until all 4 tasks have completed */ + HW_ISSUE_9435, ++ ++ /* HT: Tiler returns TERMINATED for non-terminated command */ + HW_ISSUE_9510, ++ ++ /* Occasionally the GPU will issue multiple page faults for the same ++ * address before the MMU page table has been read by the GPU */ + HW_ISSUE_9630, ++ ++ /* RA DCD load request to SDC returns invalid load ignore causing ++ * colour buffer mismatch */ + HW_ISSUE_10327, ++ ++ /* MMU TLB invalidation hazards */ + HW_ISSUE_10649, ++ ++ /* Missing cache flush in multi core-group configuration */ + HW_ISSUE_10676, ++ ++ /* Chicken bit on T72X for a hardware workaround in compiler */ + HW_ISSUE_10797, ++ ++ /* Soft-stopping fragment jobs might fail with TILE_RANGE_FAULT */ + HW_ISSUE_10817, ++ ++ /* Intermittent missing interrupt on job completion */ + HW_ISSUE_10883, ++ ++ /* Soft-stopping fragment jobs might fail with TILE_RANGE_ERROR ++ * (similar to issue 10817) and can use #10817 workaround */ + HW_ISSUE_10959, ++ ++ /* Soft-stopped fragment shader job can restart with out-of-bound ++ * restart index */ + HW_ISSUE_10969, ++ ++ /* Race condition can cause tile list corruption */ + HW_ISSUE_11020, ++ ++ /* Write buffer can cause tile list corruption */ + HW_ISSUE_11024, ++ ++ /* Pause buffer can cause a fragment job hang */ + HW_ISSUE_11035, ++ ++ /* Dynamic Core Scaling not supported due to errata */ + HW_ISSUE_11056, ++ ++ /* Clear encoder state for a hard stopped fragment job which is AFBC ++ * encoded by soft resetting the GPU. Only for T76X r0p0, r0p1 and ++ * r0p1_50rel0 */ + HW_ISSUE_T76X_3542, ++ ++ /* Keep tiler module clock on to prevent GPU stall */ + HW_ISSUE_T76X_3953, ++ ++ /* Must ensure L2 is not transitioning when we reset. Workaround with a ++ * busy wait until L2 completes transition; ensure there is a maximum ++ * loop count as she may never complete her transition. (On chips ++ * without this errata, it's totally okay if L2 transitions.) */ + HW_ISSUE_TMIX_8463, ++ ++ /* Don't set SC_LS_ATTR_CHECK_DISABLE/SC_LS_ALLOW_ATTR_TYPES */ + GPUCORE_1619, ++ ++ /* When a hard-stop follows close after a soft-stop, the completion ++ * code for the terminated job may be incorrectly set to STOPPED */ + HW_ISSUE_TMIX_8438, ++ ++ /* "Protected mode" is buggy on Mali-G31 some Bifrost chips, so the ++ * kernel must fiddle with L2 caches to prevent data leakage */ + HW_ISSUE_TGOX_R1_1234, ++ + HW_ISSUE_END + }; + +-- +2.17.1 + diff --git a/patches/0012-ishtp-ACPI-ID-change-for-Intel-ishtp-eclite-drive.core-ehl b/patches/0012-ishtp-ACPI-ID-change-for-Intel-ishtp-eclite-drive.core-ehl new file mode 100644 index 0000000000..f87100fce3 --- /dev/null +++ b/patches/0012-ishtp-ACPI-ID-change-for-Intel-ishtp-eclite-drive.core-ehl @@ -0,0 +1,29 @@ +From a1b8998e34ad7f4e41bb0e2b9d165ee8212f2efc Mon Sep 17 00:00:00 2001 +From: "K Naduvalath, Sumesh" +Date: Sat, 28 Sep 2019 12:34:32 +0530 +Subject: [PATCH 12/12] ishtp: ACPI ID change for Intel ishtp eclite driver + +This fix will replace old ACPI ID with new EHL ACPI ID. + +Change-Id: If727a9efb8575ff3dadff9321f8c813a24d34a7c +Signed-off-by: K Naduvalath, Sumesh +--- + drivers/platform/x86/intel_ishtp_eclite.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/platform/x86/intel_ishtp_eclite.c b/drivers/platform/x86/intel_ishtp_eclite.c +index f2fbf768731b..ce2a9d6ca659 100644 +--- a/drivers/platform/x86/intel_ishtp_eclite.c ++++ b/drivers/platform/x86/intel_ishtp_eclite.c +@@ -233,7 +233,7 @@ static int acpi_opregion_init(struct ishtp_opregion_dev *opr_dev) + struct acpi_device *adev; + + /* find ECLite device and install opregion handlers */ +- adev = acpi_dev_get_first_match_dev("INT3538", NULL, -1); ++ adev = acpi_dev_get_first_match_dev("INTC1035", NULL, -1); + if (!adev) { + dev_err(cl_data_to_dev(opr_dev), "eclite ACPI device not found\n"); + return -ENODEV; +-- +2.17.1 + diff --git a/patches/0012-mmc-block-register-RPMB-partition-with-the-RPMB-s.security b/patches/0012-mmc-block-register-RPMB-partition-with-the-RPMB-s.security new file mode 100644 index 0000000000..981458b659 --- /dev/null +++ b/patches/0012-mmc-block-register-RPMB-partition-with-the-RPMB-s.security @@ -0,0 +1,316 @@ +From fce8108b706a8cd1106bec00b03b423307b3b932 Mon Sep 17 00:00:00 2001 +From: Tomas Winkler +Date: Wed, 28 Jan 2015 17:01:34 +0200 +Subject: [PATCH 12/65] mmc: block: register RPMB partition with the RPMB + subsystem + +Register eMMC RPMB partition with the RPMB subsystem and provide +implementation for the RPMB access operations abstracting +actual multi step process. + +V2: resend +V3: commit message fix +V4: Kconfig: use select RPMB to ensure valid configuration + Switch back to main area after RPMB access +V5: Revamp code using new sequence command + Support for 8K packets in e.MMC v5.1 +V6: Resend. +V7: Resend. +V8: Rebase after block.c was moved under core/ + Rebase for 4.14 +V9: Rebase for 4.16 and 4.17 + Build RPMB connection above ioctl layer + Supply RPMB capabilities. + +Change-Id: I6de67f475ef738e30dc3b8c78185a1bee24595b2 +Signed-off-by: Tomas Winkler +Signed-off-by: Alexander Usyskin +Tested-by: Avri Altman +--- + drivers/mmc/core/Kconfig | 1 + + drivers/mmc/core/block.c | 222 ++++++++++++++++++++++++++++++++++++++- + 2 files changed, 220 insertions(+), 3 deletions(-) + +diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig +index c12fe13e4b14..4a4cd850cae0 100644 +--- a/drivers/mmc/core/Kconfig ++++ b/drivers/mmc/core/Kconfig +@@ -37,6 +37,7 @@ config PWRSEQ_SIMPLE + config MMC_BLOCK + tristate "MMC block device driver" + depends on BLOCK ++ select RPMB + default y + help + Say Y here to enable the MMC block device driver support. +diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c +index 2c71a434c915..ce52edd884f0 100644 +--- a/drivers/mmc/core/block.c ++++ b/drivers/mmc/core/block.c +@@ -44,6 +44,7 @@ + #include + #include + #include ++#include + + #include + +@@ -408,8 +409,8 @@ static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr, + return 0; + } + +-static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status, +- u32 retries_max) ++static int ioctl_mmc_blk_rpmb_status_poll(struct mmc_card *card, u32 *status, ++ u32 retries_max) + { + int err; + u32 retry_count = 0; +@@ -616,7 +617,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, + * Ensure RPMB command has completed by polling CMD13 + * "Send Status". + */ +- err = ioctl_rpmb_card_status_poll(card, &status, 5); ++ err = ioctl_mmc_blk_rpmb_status_poll(card, &status, 5); + if (err) + dev_err(mmc_dev(card->host), + "%s: Card Status=0x%08X, error %d\n", +@@ -1120,6 +1121,217 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req) + blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK); + } + ++static int mmc_blk_rpmb_process(struct mmc_blk_data *md, ++ struct mmc_blk_ioc_data *idata[], ++ u64 num_of_cmds) ++{ ++ struct mmc_card *card; ++ struct mmc_queue *mq; ++ int err = 0; ++ struct request *req; ++ int op_mode; ++ ++ card = md->queue.card; ++ if (IS_ERR(card)) { ++ err = PTR_ERR(card); ++ goto cmd_err; ++ } ++ ++ /* ++ * Dispatch the ioctl()s into the block request queue. ++ */ ++ mq = &md->queue; ++ op_mode = idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, ++ req = blk_get_request(mq->queue, op_mode, 0); ++ if (IS_ERR(req)) { ++ err = PTR_ERR(req); ++ goto cmd_err; ++ } ++ ++ req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL_RPMB; ++ req_to_mmc_queue_req(req)->drv_op_data = idata; ++ req_to_mmc_queue_req(req)->ioc_count = num_of_cmds; ++ ++ blk_execute_rq(mq->queue, NULL, req, 0); ++ ++ err = req_to_mmc_queue_req(req)->drv_op_result; ++ ++ blk_put_request(req); ++ ++cmd_err: ++ return err; ++} ++ ++static ++struct mmc_blk_ioc_data *mmc_blk_rpmb_cmd_to_ioc_data(struct rpmb_cmd *cmd) ++{ ++ struct mmc_blk_ioc_data *idata; ++ int err; ++ ++ idata = kzalloc(sizeof(*idata), GFP_KERNEL); ++ if (!idata) { ++ err = -ENOMEM; ++ goto out; ++ } ++ ++ if (cmd->flags & RPMB_F_WRITE) { ++ idata->ic.opcode = MMC_WRITE_MULTIPLE_BLOCK; ++ idata->ic.write_flag = 1; ++ if (cmd->flags & RPMB_F_REL_WRITE) ++ idata->ic.write_flag |= 1 << 31; ++ } else { ++ idata->ic.opcode = MMC_READ_MULTIPLE_BLOCK; ++ } ++ ++ /* nframes == 0 in case there is only meta data in the frame */ ++ idata->ic.blocks = cmd->nframes ?: 1; ++ idata->ic.blksz = 512; ++ ++ idata->buf_bytes = (u64)idata->ic.blksz * idata->ic.blocks; ++ if (idata->buf_bytes > MMC_IOC_MAX_BYTES) { ++ err = -EOVERFLOW; ++ goto out; ++ } ++ ++ idata->buf = (unsigned char *)cmd->frames; ++ ++ return idata; ++out: ++ kfree(idata); ++ return ERR_PTR(err); ++} ++ ++static int mmc_blk_rpmb_cmd_seq(struct device *dev, u8 target, ++ struct rpmb_cmd *cmds, ++ u32 num_of_cmds) ++{ ++ struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev); ++ struct mmc_blk_ioc_data **idata; ++ int err = 0; ++ u32 i; ++ ++ if (!rpmb) ++ return -ENODEV; ++ ++ idata = kcalloc(num_of_cmds, sizeof(*idata), GFP_KERNEL); ++ if (!idata) ++ return -ENOMEM; ++ ++ for (i = 0; i < num_of_cmds; i++) { ++ idata[i] = mmc_blk_rpmb_cmd_to_ioc_data(&cmds[i]); ++ if (IS_ERR(idata[i])) { ++ err = PTR_ERR(idata[i]); ++ num_of_cmds = i; ++ goto cmd_err; ++ } ++ idata[i]->rpmb = rpmb; ++ } ++ ++ get_device(&rpmb->dev); ++ mmc_blk_get(rpmb->md->disk); ++ ++ err = mmc_blk_rpmb_process(rpmb->md, idata, num_of_cmds); ++ ++cmd_err: ++ for (i = 0; i < num_of_cmds; i++) ++ kfree(idata[i]); ++ ++ kfree(idata); ++ ++ put_device(&rpmb->dev); ++ mmc_blk_put(rpmb->md); ++ ++ return err; ++} ++ ++static int mmc_blk_rpmb_get_capacity(struct device *dev, u8 target) ++{ ++ struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev); ++ struct mmc_card *card; ++ ++ card = rpmb->md->queue.card; ++ return card->ext_csd.raw_rpmb_size_mult; ++} ++ ++static struct rpmb_ops mmc_rpmb_dev_ops = { ++ .cmd_seq = mmc_blk_rpmb_cmd_seq, ++ .get_capacity = mmc_blk_rpmb_get_capacity, ++ .type = RPMB_TYPE_EMMC, ++ .auth_method = RPMB_HMAC_ALGO_SHA_256, ++}; ++ ++static void mmc_blk_rpmb_unset_dev_id(struct rpmb_ops *ops) ++{ ++ kfree(ops->dev_id); ++ ops->dev_id = NULL; ++} ++ ++static int mmc_blk_rpmb_set_dev_id(struct rpmb_ops *ops, struct mmc_card *card) ++{ ++ char *id; ++ ++ id = kmalloc(sizeof(card->raw_cid), GFP_KERNEL); ++ if (!id) ++ return -ENOMEM; ++ ++ memcpy(id, card->raw_cid, sizeof(card->raw_cid)); ++ ops->dev_id = id; ++ ops->dev_id_len = sizeof(card->raw_cid); ++ ++ return 0; ++} ++ ++static void mmc_blk_rpmb_set_cap(struct rpmb_ops *ops, ++ struct mmc_card *card) ++{ ++ u16 rel_wr_cnt; ++ ++ /* RPMB blocks are written in half sectors hence '* 2' */ ++ rel_wr_cnt = card->ext_csd.rel_sectors * 2; ++ /* eMMC 5.1 may support RPMB 8K (32) frames */ ++ if (card->ext_csd.rev >= 8) { ++ if (card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ++ rel_wr_cnt = 32; ++ else ++ rel_wr_cnt = 2; ++ } ++ ops->wr_cnt_max = rel_wr_cnt; ++ ops->rd_cnt_max = card->host->max_blk_count; ++ ops->block_size = 1; /* 256B */ ++} ++ ++static void mmc_blk_rpmb_add(struct mmc_card *card) ++{ ++ struct mmc_blk_data *md = dev_get_drvdata(&card->dev); ++ struct rpmb_dev *rdev; ++ struct mmc_rpmb_data *rpmb; ++ u8 i = 0; ++ ++ mmc_blk_rpmb_set_dev_id(&mmc_rpmb_dev_ops, card); ++ mmc_blk_rpmb_set_cap(&mmc_rpmb_dev_ops, card); ++ ++ /* Add RPMB partitions */ ++ list_for_each_entry(rpmb, &md->rpmbs, node) { ++ rdev = rpmb_dev_register(&rpmb->dev, i++, &mmc_rpmb_dev_ops); ++ if (IS_ERR(rdev)) { ++ pr_warn("%s: cannot register to rpmb %ld\n", ++ dev_name(&rpmb->dev), PTR_ERR(rdev)); ++ } ++ } ++} ++ ++static void mmc_blk_rpmb_remove(struct mmc_card *card) ++{ ++ struct mmc_blk_data *md = dev_get_drvdata(&card->dev); ++ struct mmc_rpmb_data *rpmb; ++ u8 i = 0; ++ ++ list_for_each_entry(rpmb, &md->rpmbs, node) ++ rpmb_dev_unregister_by_device(&rpmb->dev, i++); ++ ++ mmc_blk_rpmb_unset_dev_id(&mmc_rpmb_dev_ops); ++} ++ + static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) + { + struct mmc_blk_data *md = mq->blkdata; +@@ -2943,6 +3155,9 @@ static int mmc_blk_probe(struct mmc_card *card) + goto out; + } + ++ /* Add rpmb layer */ ++ mmc_blk_rpmb_add(card); ++ + /* Add two debugfs entries */ + mmc_blk_add_debugfs(card, md); + +@@ -2971,6 +3186,7 @@ static void mmc_blk_remove(struct mmc_card *card) + struct mmc_blk_data *md = dev_get_drvdata(&card->dev); + + mmc_blk_remove_debugfs(card, md); ++ mmc_blk_rpmb_remove(card); + mmc_blk_remove_parts(card, md); + pm_runtime_get_sync(&card->dev); + if (md->part_curr != md->part_type) { +-- +2.17.1 + diff --git a/patches/0012-net-stmmac-add-number-of-trailing-clk-for-mdi.connectivity b/patches/0012-net-stmmac-add-number-of-trailing-clk-for-mdi.connectivity new file mode 100644 index 0000000000..33b976ae02 --- /dev/null +++ b/patches/0012-net-stmmac-add-number-of-trailing-clk-for-mdi.connectivity @@ -0,0 +1,64 @@ +From d5cff2d0009cbc225b715e4f880f1ff742c977c1 Mon Sep 17 00:00:00 2001 +From: Voon Weifeng +Date: Thu, 31 Jan 2019 21:57:25 +0800 +Subject: [PATCH 012/108] net: stmmac: add number of trailing clk for mdio_read + +To controls the number of trailing clock cycles generated +after the end of transmission of MDIO frame. The valid values +can be from 0 to 7. + +Signed-off-by: Voon Weifeng +Signed-off-by: Ong Boon Leong +--- + drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c | 2 ++ + drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c | 1 + + include/linux/stmmac.h | 1 + + 3 files changed, 4 insertions(+) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +index 40c42637ad75..a1189bfcf848 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +@@ -28,6 +28,7 @@ + + /* GMAC4 defines */ + #define MII_GMAC4_GOC_SHIFT 2 ++#define MII_GMAC4_NTC_SHIFT 12 + #define MII_GMAC4_REG_ADDR_SHIFT 16 + #define MII_GMAC4_WRITE (1 << MII_GMAC4_GOC_SHIFT) + #define MII_GMAC4_READ (3 << MII_GMAC4_GOC_SHIFT) +@@ -162,6 +163,7 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) + int data = 0; + u32 v; + ++ value |= (priv->plat->clk_trail_n << MII_GMAC4_NTC_SHIFT); + value |= (phyaddr << priv->hw->mii.addr_shift) + & priv->hw->mii.addr_mask; + value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask; +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +index 3899a2e4154b..92d090a17afd 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +@@ -115,6 +115,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, + int i; + + plat->clk_csr = 5; ++ plat->clk_trail_n = 2; + plat->has_gmac = 0; + plat->has_gmac4 = 1; + plat->force_sf_dma_mode = 0; +diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h +index dc60d03c4b60..848dbe22d648 100644 +--- a/include/linux/stmmac.h ++++ b/include/linux/stmmac.h +@@ -139,6 +139,7 @@ struct plat_stmmacenet_data { + struct stmmac_dma_cfg *dma_cfg; + int clk_csr; + int has_gmac; ++ int clk_trail_n; + int enh_desc; + int tx_coe; + int rx_coe; +-- +2.17.1 + diff --git a/patches/0012-sos-Update-the-common-head-file.acrn b/patches/0012-sos-Update-the-common-head-file.acrn new file mode 100644 index 0000000000..b52caf2fa1 --- /dev/null +++ b/patches/0012-sos-Update-the-common-head-file.acrn @@ -0,0 +1,103 @@ +From a34624eea35e193992b3987c260002c2857ede5a Mon Sep 17 00:00:00 2001 +From: Mingqiang Chi +Date: Fri, 31 Aug 2018 10:58:56 +0800 +Subject: [PATCH 012/150] sos: Update the common head file + +--remove unused data structures +--move data structure(vm_gpa2hpa) to acrn_hv_defs.h +--combine 2 data structures + vm_exit(dm) and vhm_requeset(hv sos) to vhm_request + and put it in acrn_common.h + +Change-Id: Ice1f93bf7083b08001b2dfdea257aa7d58e9e751 +Tracked-On:218445 +Signed-off-by: Mingqiang Chi +Reviewed-on: +Reviewed-by: Dong, Eddie +Tested-by: Dong, Eddie +--- + include/linux/vhm/acrn_common.h | 29 ++++++----------------------- + include/linux/vhm/acrn_hv_defs.h | 5 +++++ + 2 files changed, 11 insertions(+), 23 deletions(-) + +diff --git a/include/linux/vhm/acrn_common.h b/include/linux/vhm/acrn_common.h +index fc64f4cc2cac..42e2c53e3a3a 100644 +--- a/include/linux/vhm/acrn_common.h ++++ b/include/linux/vhm/acrn_common.h +@@ -62,12 +62,6 @@ + * Commmon structures for ACRN/VHM/DM + */ + +-enum irq_mode { +- IRQ_PULSE, +- IRQ_ASSERT, +- IRQ_DEASSERT, +-} __attribute__((aligned(4))); +- + /* ISA type + * inject interrut to both PIC and IOAPIC + */ +@@ -120,14 +114,6 @@ struct msr_request { + long value; + } __attribute__((aligned(8))); + +-struct cpuid_request { +- long eax_in; +- long ecx_in; +- long eax_out; +- long ebx_out; +- long ecx_out; +- long edx_out; +-} __attribute__((aligned(8))); + + struct mmio_request { + enum request_direction direction; +@@ -157,13 +143,15 @@ struct pci_request { + /* vhm_request are 256Bytes aligned */ + struct vhm_request { + /* offset: 0bytes - 63bytes */ +- enum request_type type; +- int reserved0[15]; +- ++ union { ++ int exitcode; ++ enum request_type type; ++ unsigned long rip; ++ int reserved0[16]; ++ }; + /* offset: 64bytes-127bytes */ + union { + struct msr_request msr_request; +- struct cpuid_request cpuid_request; + struct io_request pio_request; + struct pci_request pci_request; + struct mmio_request mmio_request; +@@ -231,11 +219,6 @@ struct acrn_nmi_entry { + unsigned long vcpuid; /* IN: -1 means vcpu0 */ + } __attribute__((aligned(8))); + +-struct vm_gpa2hpa { +- unsigned long gpa; /* IN: gpa to translation */ +- unsigned long hpa; /* OUT: -1 means invalid gpa */ +-} __attribute__((aligned(8))); +- + struct acrn_ptdev_irq { + enum irq_type type; + unsigned short virt_bdf; /* IN: Device virtual BDF# */ +diff --git a/include/linux/vhm/acrn_hv_defs.h b/include/linux/vhm/acrn_hv_defs.h +index 329c38b961e5..1d21bf21c91c 100644 +--- a/include/linux/vhm/acrn_hv_defs.h ++++ b/include/linux/vhm/acrn_hv_defs.h +@@ -127,4 +127,9 @@ struct vm_set_memmap { + int prot; + } __attribute__((aligned(8))); + ++struct vm_gpa2hpa { ++ unsigned long gpa; /* IN: gpa to translation */ ++ unsigned long hpa; /* OUT: -1 means invalid gpa */ ++} __attribute__((aligned(8))); ++ + #endif /* ACRN_HV_DEFS_H */ +-- +2.17.1 + diff --git a/patches/0012-trusty-Select-api-version.trusty b/patches/0012-trusty-Select-api-version.trusty new file mode 100644 index 0000000000..9d944b43d1 --- /dev/null +++ b/patches/0012-trusty-Select-api-version.trusty @@ -0,0 +1,128 @@ +From 0ef4b228323da5e66ec1b757bfe3747417329acf Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?= +Date: Thu, 11 Jun 2015 19:34:28 -0700 +Subject: [PATCH 12/63] trusty: Select api version + +Select api version in probe, and store it in trusty_state. +This enables new return codes from trusty, and will later be used +to enable a nop stdcall that does not take smc_lock. + +Change-Id: I8011325265da818725ef65f094bf820402878eb5 +--- + drivers/trusty/trusty.c | 35 +++++++++++++++++++++++++++++++++++ + include/linux/trusty/smcall.h | 19 +++++++++++++++++++ + include/linux/trusty/trusty.h | 1 + + 3 files changed, 55 insertions(+) + +diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c +index 16c595bf5e29..fcdbba518797 100644 +--- a/drivers/trusty/trusty.c ++++ b/drivers/trusty/trusty.c +@@ -29,6 +29,7 @@ struct trusty_state { + struct mutex smc_lock; + struct atomic_notifier_head notifier; + char *version_str; ++ u32 api_version; + }; + + #ifdef CONFIG_ARM64 +@@ -265,6 +266,35 @@ static void trusty_init_version(struct trusty_state *s, struct device *dev) + dev_err(dev, "failed to get version: %d\n", ret); + } + ++u32 trusty_get_api_version(struct device *dev) ++{ ++ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); ++ ++ return s->api_version; ++} ++EXPORT_SYMBOL(trusty_get_api_version); ++ ++static int trusty_init_api_version(struct trusty_state *s, struct device *dev) ++{ ++ u32 api_version; ++ api_version = trusty_fast_call32(dev, SMC_FC_API_VERSION, ++ TRUSTY_API_VERSION_CURRENT, 0, 0); ++ if (api_version == SM_ERR_UNDEFINED_SMC) ++ api_version = 0; ++ ++ if (api_version > TRUSTY_API_VERSION_CURRENT) { ++ dev_err(dev, "unsupported api version %u > %u\n", ++ api_version, TRUSTY_API_VERSION_CURRENT); ++ return -EINVAL; ++ } ++ ++ dev_info(dev, "selected api version: %u (requested %u)\n", ++ api_version, TRUSTY_API_VERSION_CURRENT); ++ s->api_version = api_version; ++ ++ return 0; ++} ++ + static int trusty_probe(struct platform_device *pdev) + { + int ret; +@@ -287,6 +317,10 @@ static int trusty_probe(struct platform_device *pdev) + + trusty_init_version(s, &pdev->dev); + ++ ret = trusty_init_api_version(s, &pdev->dev); ++ if (ret < 0) ++ goto err_api_version; ++ + ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to add children: %d\n", ret); +@@ -296,6 +330,7 @@ static int trusty_probe(struct platform_device *pdev) + return 0; + + err_add_children: ++err_api_version: + if (s->version_str) { + device_remove_file(&pdev->dev, &dev_attr_trusty_version); + kfree(s->version_str); +diff --git a/include/linux/trusty/smcall.h b/include/linux/trusty/smcall.h +index a2be2e3579f3..cdb4eccd7bc3 100644 +--- a/include/linux/trusty/smcall.h ++++ b/include/linux/trusty/smcall.h +@@ -78,6 +78,25 @@ + #define SMC_FC_AARCH_SWITCH SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 9) + #define SMC_FC_GET_VERSION_STR SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 10) + ++/** ++ * SMC_FC_API_VERSION - Find and select supported API version. ++ * ++ * @r1: Version supported by client. ++ * ++ * Returns version supported by trusty. ++ * ++ * If multiple versions are supported, the client should start by calling ++ * SMC_FC_API_VERSION with the largest version it supports. Trusty will then ++ * return a version it supports. If the client does not support the version ++ * returned by trusty and the version returned is less than the version ++ * requested, repeat the call with the largest supported version less than the ++ * last returned version. ++ * ++ * This call must be made before any calls that are affected by the api version. ++ */ ++#define TRUSTY_API_VERSION_CURRENT (0) ++#define SMC_FC_API_VERSION SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 11) ++ + /* TRUSTED_OS entity calls */ + #define SMC_SC_VIRTIO_GET_DESCR SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 20) + #define SMC_SC_VIRTIO_START SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 21) +diff --git a/include/linux/trusty/trusty.h b/include/linux/trusty/trusty.h +index d084d9d68a7b..24fe2101a528 100644 +--- a/include/linux/trusty/trusty.h ++++ b/include/linux/trusty/trusty.h +@@ -56,6 +56,7 @@ int trusty_call_notifier_register(struct device *dev, + int trusty_call_notifier_unregister(struct device *dev, + struct notifier_block *n); + const char *trusty_version_str_get(struct device *dev); ++u32 trusty_get_api_version(struct device *dev); + + struct ns_mem_page_info { + uint64_t attr; +-- +2.17.1 + diff --git a/patches/0012-usb-typec-ucsi-Simplified-registration-and-I-O-A.usb-typec b/patches/0012-usb-typec-ucsi-Simplified-registration-and-I-O-A.usb-typec new file mode 100644 index 0000000000..44282158d6 --- /dev/null +++ b/patches/0012-usb-typec-ucsi-Simplified-registration-and-I-O-A.usb-typec @@ -0,0 +1,551 @@ +From b7f63f8d19ccf0e6cf33c021ab7f19c3bbaac0e3 Mon Sep 17 00:00:00 2001 +From: Heikki Krogerus +Date: Thu, 26 Sep 2019 12:38:25 +0300 +Subject: [PATCH 12/18] usb: typec: ucsi: Simplified registration and I/O API + +Adding more simplified API for interface registration and +read and write operations. + +The registration is split into separate creation and +registration phases. That allows the drivers to properly +initialize the interface before registering it if necessary. + +The read and write operations are supplied in a completely +separate struct ucsi_operations that is passed to the +ucsi_register() function during registration. The new read +and write operations will work more traditionally so that +the read callback function reads a requested amount of data +from an offset, and the write callback functions write the +given data to the offset. The drivers will have to support +both non-blocking writing and blocking writing. In blocking +writing the driver itself is responsible of waiting for the +completion event. + +The new API makes it possible for the drivers to perform +tasks also independently of the core ucsi.c, and that should +allow for example quirks to be handled completely in the +drivers without the need to touch ucsi.c. + +The old API is kept until all drivers have been converted to +the new API. + +Signed-off-by: Heikki Krogerus +--- + drivers/usb/typec/ucsi/ucsi.c | 326 +++++++++++++++++++++++++++++++--- + drivers/usb/typec/ucsi/ucsi.h | 57 ++++++ + 2 files changed, 354 insertions(+), 29 deletions(-) + +diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c +index edd722fb88b8..2ba890327b9d 100644 +--- a/drivers/usb/typec/ucsi/ucsi.c ++++ b/drivers/usb/typec/ucsi/ucsi.c +@@ -98,6 +98,98 @@ static int ucsi_ack(struct ucsi *ucsi, u8 ack) + return ret; + } + ++static int ucsi_acknowledge_command(struct ucsi *ucsi) ++{ ++ u64 ctrl; ++ ++ ctrl = UCSI_ACK_CC_CI; ++ ctrl |= UCSI_ACK_COMMAND_COMPLETE; ++ ++ return ucsi->ops->sync_write(ucsi, UCSI_CONTROL, &ctrl, sizeof(ctrl)); ++} ++ ++static int ucsi_acknowledge_connector_change(struct ucsi *ucsi) ++{ ++ u64 ctrl; ++ ++ ctrl = UCSI_ACK_CC_CI; ++ ctrl |= UCSI_ACK_CONNECTOR_CHANGE; ++ ++ return ucsi->ops->async_write(ucsi, UCSI_CONTROL, &ctrl, sizeof(ctrl)); ++} ++ ++static int ucsi_exec_command(struct ucsi *ucsi, u64 command); ++ ++static int ucsi_read_error(struct ucsi *ucsi) ++{ ++ u16 error; ++ int ret; ++ ++ /* Acknowlege the command that failed */ ++ ret = ucsi_acknowledge_command(ucsi); ++ if (ret) ++ return ret; ++ ++ ret = ucsi_exec_command(ucsi, UCSI_GET_ERROR_STATUS); ++ if (ret < 0) ++ return ret; ++ ++ ret = ucsi->ops->read(ucsi, UCSI_MESSAGE_IN, &error, sizeof(error)); ++ if (ret) ++ return ret; ++ ++ switch (error) { ++ case UCSI_ERROR_INCOMPATIBLE_PARTNER: ++ return -EOPNOTSUPP; ++ case UCSI_ERROR_CC_COMMUNICATION_ERR: ++ return -ECOMM; ++ case UCSI_ERROR_CONTRACT_NEGOTIATION_FAIL: ++ return -EPROTO; ++ case UCSI_ERROR_DEAD_BATTERY: ++ dev_warn(ucsi->dev, "Dead battery condition!\n"); ++ return -EPERM; ++ /* The following mean a bug in this driver */ ++ case UCSI_ERROR_INVALID_CON_NUM: ++ case UCSI_ERROR_UNREGONIZED_CMD: ++ case UCSI_ERROR_INVALID_CMD_ARGUMENT: ++ dev_err(ucsi->dev, "possible UCSI driver bug (0x%x)\n", error); ++ return -EINVAL; ++ default: ++ dev_err(ucsi->dev, "%s: error without status\n", __func__); ++ return -EIO; ++ } ++ ++ return 0; ++} ++ ++static int ucsi_exec_command(struct ucsi *ucsi, u64 cmd) ++{ ++ u32 cci; ++ int ret; ++ ++ ret = ucsi->ops->sync_write(ucsi, UCSI_CONTROL, &cmd, sizeof(cmd)); ++ if (ret) ++ return ret; ++ ++ ret = ucsi->ops->read(ucsi, UCSI_CCI, &cci, sizeof(cci)); ++ if (ret) ++ return ret; ++ ++ if (cci & UCSI_CCI_BUSY) ++ return -EBUSY; ++ ++ if (!(cci & UCSI_CCI_COMMAND_COMPLETE)) ++ return -EIO; ++ ++ if (cci & UCSI_CCI_NOT_SUPPORTED) ++ return -EOPNOTSUPP; ++ ++ if (cci & UCSI_CCI_ERROR) ++ return ucsi_read_error(ucsi); ++ ++ return UCSI_CCI_LENGTH(cci); ++} ++ + static int ucsi_run_command(struct ucsi *ucsi, struct ucsi_control *ctrl, + void *data, size_t size) + { +@@ -106,6 +198,26 @@ static int ucsi_run_command(struct ucsi *ucsi, struct ucsi_control *ctrl, + u16 error; + int ret; + ++ if (ucsi->ops) { ++ ret = ucsi_exec_command(ucsi, ctrl->raw_cmd); ++ if (ret < 0) ++ return ret; ++ ++ data_length = ret; ++ ++ if (data) { ++ ret = ucsi->ops->read(ucsi, UCSI_MESSAGE_IN, data, size); ++ if (ret) ++ return ret; ++ } ++ ++ ret = ucsi_acknowledge_command(ucsi); ++ if (ret) ++ return ret; ++ ++ return data_length; ++ } ++ + ret = ucsi_command(ucsi, ctrl); + if (ret) + goto err; +@@ -518,7 +630,7 @@ static void ucsi_partner_change(struct ucsi_connector *con) + ucsi_altmode_update_active(con); + } + +-static void ucsi_connector_change(struct work_struct *work) ++static void ucsi_handle_connector_change(struct work_struct *work) + { + struct ucsi_connector *con = container_of(work, struct ucsi_connector, + work); +@@ -580,7 +692,10 @@ static void ucsi_connector_change(struct work_struct *work) + if (con->status.change & UCSI_CONSTAT_PARTNER_CHANGE) + ucsi_partner_change(con); + +- ret = ucsi_ack(ucsi, UCSI_ACK_EVENT); ++ if (ucsi->ops) ++ ret = ucsi_acknowledge_connector_change(ucsi); ++ else ++ ret = ucsi_ack(ucsi, UCSI_ACK_EVENT); + if (ret) + dev_err(ucsi->dev, "%s: ACK failed (%d)", __func__, ret); + +@@ -591,6 +706,20 @@ static void ucsi_connector_change(struct work_struct *work) + mutex_unlock(&con->lock); + } + ++/** ++ * ucsi_connector_change - Process Connector Change Event ++ * @ucsi: UCSI Interface ++ * @num: Connector number ++ */ ++void ucsi_connector_change(struct ucsi *ucsi, u8 num) ++{ ++ struct ucsi_connector *con = &ucsi->connector[num - 1]; ++ ++ if (!test_and_set_bit(EVENT_PENDING, &ucsi->flags)) ++ schedule_work(&con->work); ++} ++EXPORT_SYMBOL_GPL(ucsi_connector_change); ++ + /** + * ucsi_notify - PPM notification handler + * @ucsi: Source UCSI Interface for the notifications +@@ -647,6 +776,39 @@ static int ucsi_reset_ppm(struct ucsi *ucsi) + unsigned long tmo; + int ret; + ++ if (ucsi->ops) { ++ u64 command = UCSI_PPM_RESET; ++ u32 cci; ++ ++ ret = ucsi->ops->async_write(ucsi, UCSI_CONTROL, &command, ++ sizeof(command)); ++ if (ret < 0) ++ return ret; ++ ++ tmo = jiffies + msecs_to_jiffies(UCSI_TIMEOUT_MS); ++ ++ do { ++ if (time_is_before_jiffies(tmo)) ++ return -ETIMEDOUT; ++ ++ ret = ucsi->ops->read(ucsi, UCSI_CCI, &cci, sizeof(cci)); ++ if (ret) ++ return ret; ++ ++ if (cci & ~UCSI_CCI_RESET_COMPLETE) { ++ ret = ucsi->ops->async_write(ucsi, UCSI_CONTROL, ++ &command, ++ sizeof(command)); ++ if (ret < 0) ++ return ret; ++ } ++ ++ msleep(20); ++ } while (!(cci & UCSI_CCI_RESET_COMPLETE)); ++ ++ return 0; ++ } ++ + ctrl.raw_cmd = 0; + ctrl.cmd.cmd = UCSI_PPM_RESET; + trace_ucsi_command(&ctrl); +@@ -807,7 +969,7 @@ static int ucsi_register_port(struct ucsi *ucsi, int index) + struct ucsi_control ctrl; + int ret; + +- INIT_WORK(&con->work, ucsi_connector_change); ++ INIT_WORK(&con->work, ucsi_handle_connector_change); + init_completion(&con->complete); + mutex_init(&con->lock); + con->num = index + 1; +@@ -898,9 +1060,14 @@ static int ucsi_register_port(struct ucsi *ucsi, int index) + return 0; + } + +-static void ucsi_init(struct work_struct *work) ++/** ++ * ucsi_init - Initialize UCSI interface ++ * @ucsi: UCSI to be initialized ++ * ++ * Registers all ports @ucsi has and enables all notification events. ++ */ ++int ucsi_init(struct ucsi *ucsi) + { +- struct ucsi *ucsi = container_of(work, struct ucsi, work); + struct ucsi_connector *con; + struct ucsi_control ctrl; + int ret; +@@ -956,7 +1123,7 @@ static void ucsi_init(struct work_struct *work) + + mutex_unlock(&ucsi->ppm_lock); + +- return; ++ return 0; + + err_unregister: + for (con = ucsi->connector; con->port; con++) { +@@ -970,49 +1137,106 @@ static void ucsi_init(struct work_struct *work) + ucsi_reset_ppm(ucsi); + err: + mutex_unlock(&ucsi->ppm_lock); +- dev_err(ucsi->dev, "PPM init failed (%d)\n", ret); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(ucsi_init); ++ ++static void ucsi_init_work(struct work_struct *work) ++{ ++ struct ucsi *ucsi = container_of(work, struct ucsi, work); ++ int ret; ++ ++ ret = ucsi_init(ucsi); ++ if (ret) ++ dev_err(ucsi->dev, "PPM init failed (%d)\n", ret); + } + + /** +- * ucsi_register_ppm - Register UCSI PPM Interface +- * @dev: Device interface to the PPM +- * @ppm: The PPM interface +- * +- * Allocates UCSI instance, associates it with @ppm and returns it to the +- * caller, and schedules initialization of the interface. ++ * ucsi_get_drvdata - Return private driver data pointer ++ * @ucsi: UCSI interface + */ +-struct ucsi *ucsi_register_ppm(struct device *dev, struct ucsi_ppm *ppm) ++void *ucsi_get_drvdata(struct ucsi *ucsi) ++{ ++ return ucsi->driver_data; ++} ++EXPORT_SYMBOL_GPL(ucsi_get_drvdata); ++ ++/** ++ * ucsi_get_drvdata - Assign private driver data pointer ++ * @ucsi: UCSI interface ++ * @data: Private data pointer ++ */ ++void ucsi_set_drvdata(struct ucsi *ucsi, void *data) ++{ ++ ucsi->driver_data = data; ++} ++EXPORT_SYMBOL_GPL(ucsi_set_drvdata); ++ ++/** ++ * ucsi_create - Allocate UCSI instance ++ * @dev: Device interface to the PPM (Platform Policy Manager) ++ * @ops: I/O routines ++ */ ++struct ucsi *ucsi_create(struct device *dev, const struct ucsi_operations *ops) + { + struct ucsi *ucsi; + ++ if (!ops || !ops->read || !ops->sync_write || !ops->async_write) ++ return ERR_PTR(-EINVAL); ++ + ucsi = kzalloc(sizeof(*ucsi), GFP_KERNEL); + if (!ucsi) + return ERR_PTR(-ENOMEM); + +- INIT_WORK(&ucsi->work, ucsi_init); +- init_completion(&ucsi->complete); ++ INIT_WORK(&ucsi->work, ucsi_init_work); + mutex_init(&ucsi->ppm_lock); +- + ucsi->dev = dev; +- ucsi->ppm = ppm; ++ ucsi->ops = ops; ++ ++ return ucsi; ++} ++EXPORT_SYMBOL_GPL(ucsi_create); ++ ++/** ++ * ucsi_destroy - Free UCSI instance ++ * @ucsi: UCSI instance to be freed ++ */ ++void ucsi_destroy(struct ucsi *ucsi) ++{ ++ kfree(ucsi); ++} ++EXPORT_SYMBOL_GPL(ucsi_destroy); ++ ++/** ++ * ucsi_register - Register UCSI interface ++ * @ucsi: UCSI instance ++ */ ++int ucsi_register(struct ucsi *ucsi) ++{ ++ u16 version; ++ int ret; ++ ++ ret = ucsi->ops->read(ucsi, UCSI_VERSION, &version, sizeof(version)); ++ if (ret) ++ return ret; ++ ++ if (!version) ++ return -ENODEV; + +- /* +- * Communication with the PPM takes a lot of time. It is not reasonable +- * to initialize the driver here. Using a work for now. +- */ + queue_work(system_long_wq, &ucsi->work); + +- return ucsi; ++ return 0; + } +-EXPORT_SYMBOL_GPL(ucsi_register_ppm); ++EXPORT_SYMBOL_GPL(ucsi_register); + + /** +- * ucsi_unregister_ppm - Unregister UCSI PPM Interface +- * @ucsi: struct ucsi associated with the PPM ++ * ucsi_unregister - Unregister UCSI interface ++ * @ucsi: UCSI interface to be unregistered + * +- * Unregister UCSI PPM that was created with ucsi_register(). ++ * Unregister UCSI interface that was created with ucsi_register(). + */ +-void ucsi_unregister_ppm(struct ucsi *ucsi) ++void ucsi_unregister(struct ucsi *ucsi) + { + struct ucsi_control ctrl; + int i; +@@ -1035,7 +1259,51 @@ void ucsi_unregister_ppm(struct ucsi *ucsi) + ucsi_reset_ppm(ucsi); + + kfree(ucsi->connector); +- kfree(ucsi); ++} ++EXPORT_SYMBOL_GPL(ucsi_unregister); ++ ++/** ++ * ucsi_register_ppm - Register UCSI PPM Interface ++ * @dev: Device interface to the PPM ++ * @ppm: The PPM interface ++ * ++ * Allocates UCSI instance, associates it with @ppm and returns it to the ++ * caller, and schedules initialization of the interface. ++ */ ++struct ucsi *ucsi_register_ppm(struct device *dev, struct ucsi_ppm *ppm) ++{ ++ struct ucsi *ucsi; ++ ++ ucsi = kzalloc(sizeof(*ucsi), GFP_KERNEL); ++ if (!ucsi) ++ return ERR_PTR(-ENOMEM); ++ ++ INIT_WORK(&ucsi->work, ucsi_init_work); ++ init_completion(&ucsi->complete); ++ mutex_init(&ucsi->ppm_lock); ++ ++ ucsi->dev = dev; ++ ucsi->ppm = ppm; ++ ++ /* ++ * Communication with the PPM takes a lot of time. It is not reasonable ++ * to initialize the driver here. Using a work for now. ++ */ ++ queue_work(system_long_wq, &ucsi->work); ++ ++ return ucsi; ++} ++EXPORT_SYMBOL_GPL(ucsi_register_ppm); ++ ++/** ++ * ucsi_unregister_ppm - Unregister UCSI PPM Interface ++ * @ucsi: struct ucsi associated with the PPM ++ * ++ * Unregister UCSI PPM that was created with ucsi_register(). ++ */ ++void ucsi_unregister_ppm(struct ucsi *ucsi) ++{ ++ ucsi_unregister(ucsi); + } + EXPORT_SYMBOL_GPL(ucsi_unregister_ppm); + +diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h +index de87d0b8319d..3e9a4ba912e9 100644 +--- a/drivers/usb/typec/ucsi/ucsi.h ++++ b/drivers/usb/typec/ucsi/ucsi.h +@@ -10,6 +10,56 @@ + + /* -------------------------------------------------------------------------- */ + ++struct ucsi; ++ ++/* UCSI offsets (Bytes) */ ++#define UCSI_VERSION 0 ++#define UCSI_CCI 4 ++#define UCSI_CONTROL 8 ++#define UCSI_MESSAGE_IN 16 ++#define UCSI_MESSAGE_OUT 32 ++ ++/* Command Status and Connector Change Indication (CCI) bits */ ++#define UCSI_CCI_CONNECTOR(_c_) (((_c_) & GENMASK(7, 0)) >> 1) ++#define UCSI_CCI_LENGTH(_c_) (((_c_) & GENMASK(15, 8)) >> 8) ++#define UCSI_CCI_NOT_SUPPORTED BIT(25) ++#define UCSI_CCI_CANCEL_COMPLETE BIT(26) ++#define UCSI_CCI_RESET_COMPLETE BIT(27) ++#define UCSI_CCI_BUSY BIT(28) ++#define UCSI_CCI_ACK_COMPLETE BIT(29) ++#define UCSI_CCI_ERROR BIT(30) ++#define UCSI_CCI_COMMAND_COMPLETE BIT(31) ++ ++/** ++ * struct ucsi_operations - UCSI I/O operations ++ * @read: Read operation ++ * @sync_write: Blocking write operation ++ * @async_write: Non-blocking write operation ++ * ++ * Read and write routines for UCSI interface. @sync_write must wait for the ++ * Command Completion Event from the PPM before returning, and @async_write must ++ * return immediately after sending the data to the PPM. ++ */ ++struct ucsi_operations { ++ int (*read)(struct ucsi *ucsi, unsigned int offset, ++ void *val, size_t val_len); ++ int (*sync_write)(struct ucsi *ucsi, unsigned int offset, ++ const void *val, size_t val_len); ++ int (*async_write)(struct ucsi *ucsi, unsigned int offset, ++ const void *val, size_t val_len); ++}; ++ ++struct ucsi *ucsi_create(struct device *dev, const struct ucsi_operations *ops); ++void ucsi_destroy(struct ucsi *ucsi); ++int ucsi_register(struct ucsi *ucsi); ++void ucsi_unregister(struct ucsi *ucsi); ++void *ucsi_get_drvdata(struct ucsi *ucsi); ++void ucsi_set_drvdata(struct ucsi *ucsi, void *data); ++ ++void ucsi_connector_change(struct ucsi *ucsi, u8 num); ++ ++/* -------------------------------------------------------------------------- */ ++ + /* Command Status and Connector Change Indication (CCI) data structure */ + struct ucsi_cci { + u8:1; /* reserved */ +@@ -207,6 +257,10 @@ struct ucsi_control { + #define UCSI_ACK_EVENT 1 + #define UCSI_ACK_CMD 2 + ++/* Bits for ACK CC or CI */ ++#define UCSI_ACK_CONNECTOR_CHANGE BIT(16) ++#define UCSI_ACK_COMMAND_COMPLETE BIT(17) ++ + /* Bits for SET_NOTIFICATION_ENABLE command */ + #define UCSI_ENABLE_NTFY_CMD_COMPLETE BIT(0) + #define UCSI_ENABLE_NTFY_EXT_PWR_SRC_CHANGE BIT(1) +@@ -385,6 +439,9 @@ enum ucsi_status { + struct ucsi { + struct device *dev; + struct ucsi_ppm *ppm; ++ struct driver_data *driver_data; ++ ++ const struct ucsi_operations *ops; + + enum ucsi_status status; + struct completion complete; +-- +2.17.1 + diff --git a/patches/0013-ASoC-Intel-Skylake-Inline-ipc-free-operations.audio b/patches/0013-ASoC-Intel-Skylake-Inline-ipc-free-operations.audio new file mode 100644 index 0000000000..e2f9ef2bad --- /dev/null +++ b/patches/0013-ASoC-Intel-Skylake-Inline-ipc-free-operations.audio @@ -0,0 +1,165 @@ +From badf2e6399d292a2b84b530cbb3b3e9209480825 Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Sat, 23 Mar 2019 16:46:37 +0100 +Subject: [PATCH 013/193] ASoC: Intel: Skylake: Inline ipc free operations + +skl_ipc_free and its equivalents are simple wrappers. Let's inline them, +making room for cleanup mechanism unification. + +Signed-off-by: Cezary Rojewski +--- + sound/soc/intel/skylake/bxt-sst.c | 1 - + sound/soc/intel/skylake/cnl-sst-dsp.c | 12 +++++------- + sound/soc/intel/skylake/cnl-sst-dsp.h | 1 - + sound/soc/intel/skylake/cnl-sst.c | 1 - + sound/soc/intel/skylake/skl-sst-dsp.c | 5 ++++- + sound/soc/intel/skylake/skl-sst-ipc.c | 13 ------------- + sound/soc/intel/skylake/skl-sst-ipc.h | 1 - + sound/soc/intel/skylake/skl-sst.c | 1 - + 8 files changed, 9 insertions(+), 26 deletions(-) + +diff --git a/sound/soc/intel/skylake/bxt-sst.c b/sound/soc/intel/skylake/bxt-sst.c +index c0e9a05b53cb..f548523055fa 100644 +--- a/sound/soc/intel/skylake/bxt-sst.c ++++ b/sound/soc/intel/skylake/bxt-sst.c +@@ -599,7 +599,6 @@ void bxt_sst_dsp_cleanup(struct device *dev, struct skl_dev *skl) + if (skl->dsp->fw) + release_firmware(skl->dsp->fw); + list_del_init(&skl->module_list); +- skl_ipc_free(&skl->ipc); + skl->dsp->ops->free(skl->dsp); + } + EXPORT_SYMBOL_GPL(bxt_sst_dsp_cleanup); +diff --git a/sound/soc/intel/skylake/cnl-sst-dsp.c b/sound/soc/intel/skylake/cnl-sst-dsp.c +index 3ef1b194add1..189c1c7086e3 100644 +--- a/sound/soc/intel/skylake/cnl-sst-dsp.c ++++ b/sound/soc/intel/skylake/cnl-sst-dsp.c +@@ -14,9 +14,9 @@ + */ + #include + #include "../common/sst-dsp.h" +-#include "../common/sst-ipc.h" + #include "../common/sst-dsp-priv.h" + #include "cnl-sst-dsp.h" ++#include "skl.h" + + /* various timeout values */ + #define CNL_DSP_PU_TO 50 +@@ -209,10 +209,13 @@ irqreturn_t cnl_dsp_sst_interrupt(int irq, void *dev_id) + + void cnl_dsp_free(struct sst_dsp *dsp) + { ++ struct skl_dev *skl = dsp->thread_context; ++ ++ cnl_ipc_op_int_disable(dsp); ++ sst_ipc_fini(&skl->ipc); + cnl_ipc_int_disable(dsp); + + free_irq(dsp->irq, dsp); +- cnl_ipc_op_int_disable(dsp); + cnl_dsp_disable_core(dsp, SKL_DSP_CORE0_MASK); + } + EXPORT_SYMBOL_GPL(cnl_dsp_free); +@@ -259,8 +262,3 @@ bool cnl_ipc_int_status(struct sst_dsp *ctx) + CNL_ADSPIS_IPC; + } + +-void cnl_ipc_free(struct sst_generic_ipc *ipc) +-{ +- cnl_ipc_op_int_disable(ipc->dsp); +- sst_ipc_fini(ipc); +-} +diff --git a/sound/soc/intel/skylake/cnl-sst-dsp.h b/sound/soc/intel/skylake/cnl-sst-dsp.h +index 50f4a53a607c..b1417639bc1c 100644 +--- a/sound/soc/intel/skylake/cnl-sst-dsp.h ++++ b/sound/soc/intel/skylake/cnl-sst-dsp.h +@@ -92,7 +92,6 @@ void cnl_ipc_int_disable(struct sst_dsp *ctx); + void cnl_ipc_op_int_enable(struct sst_dsp *ctx); + void cnl_ipc_op_int_disable(struct sst_dsp *ctx); + bool cnl_ipc_int_status(struct sst_dsp *ctx); +-void cnl_ipc_free(struct sst_generic_ipc *ipc); + + int cnl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, + const char *fw_name, struct skl_dsp_loader_ops dsp_ops, +diff --git a/sound/soc/intel/skylake/cnl-sst.c b/sound/soc/intel/skylake/cnl-sst.c +index 35dae6ed6668..13821fac408d 100644 +--- a/sound/soc/intel/skylake/cnl-sst.c ++++ b/sound/soc/intel/skylake/cnl-sst.c +@@ -469,7 +469,6 @@ void cnl_sst_dsp_cleanup(struct device *dev, struct skl_dev *skl) + release_firmware(skl->dsp->fw); + + list_del_init(&skl->module_list); +- cnl_ipc_free(&skl->ipc); + + skl->dsp->ops->free(skl->dsp); + } +diff --git a/sound/soc/intel/skylake/skl-sst-dsp.c b/sound/soc/intel/skylake/skl-sst-dsp.c +index 15acbe80711e..e0807db225f4 100644 +--- a/sound/soc/intel/skylake/skl-sst-dsp.c ++++ b/sound/soc/intel/skylake/skl-sst-dsp.c +@@ -463,10 +463,13 @@ int skl_dsp_acquire_irq(struct sst_dsp *sst) + + void skl_dsp_free(struct sst_dsp *dsp) + { ++ struct skl_dev *skl = dsp->thread_context; ++ ++ skl_ipc_op_int_disable(dsp); ++ sst_ipc_fini(&skl->ipc); + skl_ipc_int_disable(dsp); + + free_irq(dsp->irq, dsp); +- skl_ipc_op_int_disable(dsp); + skl_dsp_disable_core(dsp, SKL_DSP_CORE0_MASK); + } + EXPORT_SYMBOL_GPL(skl_dsp_free); +diff --git a/sound/soc/intel/skylake/skl-sst-ipc.c b/sound/soc/intel/skylake/skl-sst-ipc.c +index 4875a518dd54..2700f882103d 100644 +--- a/sound/soc/intel/skylake/skl-sst-ipc.c ++++ b/sound/soc/intel/skylake/skl-sst-ipc.c +@@ -620,19 +620,6 @@ int skl_ipc_init(struct device *dev, struct skl_dev *skl) + return 0; + } + +-void skl_ipc_free(struct sst_generic_ipc *ipc) +-{ +- /* Disable IPC DONE interrupt */ +- sst_dsp_shim_update_bits(ipc->dsp, SKL_ADSP_REG_HIPCCTL, +- SKL_ADSP_REG_HIPCCTL_DONE, 0); +- +- /* Disable IPC BUSY interrupt */ +- sst_dsp_shim_update_bits(ipc->dsp, SKL_ADSP_REG_HIPCCTL, +- SKL_ADSP_REG_HIPCCTL_BUSY, 0); +- +- sst_ipc_fini(ipc); +-} +- + int skl_ipc_create_pipeline(struct sst_generic_ipc *ipc, + u16 ppl_mem_size, u8 ppl_type, u8 instance_id, u8 lp_mode) + { +diff --git a/sound/soc/intel/skylake/skl-sst-ipc.h b/sound/soc/intel/skylake/skl-sst-ipc.h +index c54272609f0a..50fedc213837 100644 +--- a/sound/soc/intel/skylake/skl-sst-ipc.h ++++ b/sound/soc/intel/skylake/skl-sst-ipc.h +@@ -392,7 +392,6 @@ void skl_ipc_op_int_disable(struct sst_dsp *ctx); + void skl_ipc_int_disable(struct sst_dsp *dsp); + + bool skl_ipc_int_status(struct sst_dsp *dsp); +-void skl_ipc_free(struct sst_generic_ipc *ipc); + int skl_ipc_init(struct device *dev, struct skl_dev *skl); + void skl_clear_module_cnt(struct sst_dsp *ctx); + +diff --git a/sound/soc/intel/skylake/skl-sst.c b/sound/soc/intel/skylake/skl-sst.c +index 3553fcf9f930..d3bbdcf22941 100644 +--- a/sound/soc/intel/skylake/skl-sst.c ++++ b/sound/soc/intel/skylake/skl-sst.c +@@ -619,7 +619,6 @@ void skl_sst_dsp_cleanup(struct device *dev, struct skl_dev *skl) + release_firmware(skl->dsp->fw); + skl_clear_module_table(skl->dsp); + list_del_init(&skl->module_list); +- skl_ipc_free(&skl->ipc); + skl->dsp->ops->free(skl->dsp); + if (skl->boot_complete) { + skl->dsp->cl_dev.ops.cl_cleanup_controller(skl->dsp); +-- +2.17.1 + diff --git a/patches/0013-Make-tsc-reliable-flag-apply-to-tsc-early-cloc.felipeb-5.4 b/patches/0013-Make-tsc-reliable-flag-apply-to-tsc-early-cloc.felipeb-5.4 new file mode 100644 index 0000000000..20a418f9b7 --- /dev/null +++ b/patches/0013-Make-tsc-reliable-flag-apply-to-tsc-early-cloc.felipeb-5.4 @@ -0,0 +1,39 @@ +From e641b3b9973810a00c105a467caec04f82664e7c Mon Sep 17 00:00:00 2001 +From: Christopher Hall +Date: Thu, 10 Oct 2019 17:08:00 -0700 +Subject: [PATCH 13/14] Make 'tsc reliable' flag apply to tsc-early clocksource + +Signed-off-by: Felipe Balbi +--- + arch/x86/kernel/tsc.c | 8 +++++--- + 1 file changed, 5 insertions(+), 3 deletions(-) + +diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c +index 5159cdcc9f04..72253d59dd4f 100644 +--- a/arch/x86/kernel/tsc.c ++++ b/arch/x86/kernel/tsc.c +@@ -1411,9 +1411,6 @@ static int __init init_tsc_clocksource(void) + if (tsc_unstable) + goto unreg; + +- if (tsc_clocksource_reliable || no_tsc_watchdog) +- clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY; +- + if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3)) + clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP; + +@@ -1522,6 +1519,11 @@ void __init tsc_init(void) + return; + } + ++ if (tsc_clocksource_reliable || no_tsc_watchdog) { ++ clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY; ++ clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY; ++ } ++ + if (!tsc_khz) { + /* We failed to determine frequencies earlier, try again */ + if (!determine_cpu_tsc_frequencies(false)) { +-- +2.17.1 + diff --git a/patches/0013-Revert-FIXUP-mfd-intel-lpss-Probe-UART-devices-synchr.lpss b/patches/0013-Revert-FIXUP-mfd-intel-lpss-Probe-UART-devices-synchr.lpss new file mode 100644 index 0000000000..78c0481024 --- /dev/null +++ b/patches/0013-Revert-FIXUP-mfd-intel-lpss-Probe-UART-devices-synchr.lpss @@ -0,0 +1,60 @@ +From 2125fcff50da06d939852896fa3df02bdc071222 Mon Sep 17 00:00:00 2001 +From: Andy Shevchenko +Date: Wed, 21 Aug 2019 11:35:49 +0300 +Subject: [PATCH 13/40] Revert "FIXUP: mfd: intel-lpss: Probe UART devices + synchronously" + +This reverts commit eda87dfede0c3dc549347a1d96b9e7470010473d. +--- + drivers/mfd/intel-lpss.c | 20 +++----------------- + 1 file changed, 3 insertions(+), 17 deletions(-) + +diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c +index 557ee9d96579..e8eb3ce98d30 100644 +--- a/drivers/mfd/intel-lpss.c ++++ b/drivers/mfd/intel-lpss.c +@@ -372,8 +372,9 @@ static void intel_lpss_unregister_clock(struct intel_lpss *lpss) + intel_lpss_unregister_clock_tree(lpss->clk); + } + +-static int intel_lpss_add_devices(struct intel_lpss *lpss) ++static void intel_lpss_async_add_devices(void *_lpss, async_cookie_t cookie) + { ++ struct intel_lpss *lpss = _lpss; + int ret; + + if (intel_lpss_has_idma(lpss)) { +@@ -392,12 +393,6 @@ static int intel_lpss_add_devices(struct intel_lpss *lpss) + intel_lpss_ltr_hide(lpss); + intel_lpss_unregister_clock(lpss); + } +- return ret; +-} +- +-static void intel_lpss_async_add_devices(void *lpss, async_cookie_t cookie) +-{ +- intel_lpss_add_devices(lpss); + } + + int intel_lpss_probe(struct device *dev, +@@ -446,16 +441,7 @@ int intel_lpss_probe(struct device *dev, + if (ret) + dev_warn(dev, "Failed to create debugfs entries\n"); + +- /* +- * Probe UART devices synchronously to avoid serial interface +- * enumeration unpredictability. +- */ +- if (lpss->type == LPSS_DEV_UART) { +- ret = intel_lpss_add_devices(lpss); +- if (ret) +- goto err_clk_register; +- } else +- async_schedule(intel_lpss_async_add_devices, lpss); ++ async_schedule(intel_lpss_async_add_devices, lpss); + + dev_pm_set_driver_flags(dev, DPM_FLAG_SMART_SUSPEND); + +-- +2.17.1 + diff --git a/patches/0013-drm-i915-Use-variable-for-debugfs-device-status.drm b/patches/0013-drm-i915-Use-variable-for-debugfs-device-status.drm new file mode 100644 index 0000000000..08e7c884b7 --- /dev/null +++ b/patches/0013-drm-i915-Use-variable-for-debugfs-device-status.drm @@ -0,0 +1,93 @@ +From 2d6208830fef1b4eb69505bb1abd167c6df3bc91 Mon Sep 17 00:00:00 2001 +From: Stuart Summers +Date: Fri, 23 Aug 2019 09:02:57 -0700 +Subject: [PATCH 013/690] drm/i915: Use variable for debugfs device status + +Use a local variable to find SSEU runtime information +in various debugfs functions. + +v2: Remove extra line breaks per feedback from Chris + +Signed-off-by: Stuart Summers +Reviewed-by: Chris Wilson +Signed-off-by: Chris Wilson +Link: https://patchwork.freedesktop.org/patch/msgid/20190823160307.180813-2-stuart.summers@intel.com +--- + drivers/gpu/drm/i915/i915_debugfs.c | 26 +++++++++++--------------- + 1 file changed, 11 insertions(+), 15 deletions(-) + +diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c +index e103fcba6435..806db87affb2 100644 +--- a/drivers/gpu/drm/i915/i915_debugfs.c ++++ b/drivers/gpu/drm/i915/i915_debugfs.c +@@ -3849,8 +3849,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, + sseu->slice_mask |= BIT(s); + + if (IS_GEN9_BC(dev_priv)) +- sseu->subslice_mask[s] = +- RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s]; ++ sseu->subslice_mask[s] = info->sseu.subslice_mask[s]; + + for (ss = 0; ss < info->sseu.max_subslices; ss++) { + unsigned int eu_cnt; +@@ -3877,25 +3876,22 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, + static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv, + struct sseu_dev_info *sseu) + { ++ const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv); + u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO); + int s; + + sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK; + + if (sseu->slice_mask) { +- sseu->eu_per_subslice = +- RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice; +- for (s = 0; s < fls(sseu->slice_mask); s++) { +- sseu->subslice_mask[s] = +- RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s]; +- } ++ sseu->eu_per_subslice = info->sseu.eu_per_subslice; ++ for (s = 0; s < fls(sseu->slice_mask); s++) ++ sseu->subslice_mask[s] = info->sseu.subslice_mask[s]; + sseu->eu_total = sseu->eu_per_subslice * + intel_sseu_subslice_total(sseu); + + /* subtract fused off EU(s) from enabled slice(s) */ + for (s = 0; s < fls(sseu->slice_mask); s++) { +- u8 subslice_7eu = +- RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s]; ++ u8 subslice_7eu = info->sseu.subslice_7eu[s]; + + sseu->eu_total -= hweight8(subslice_7eu); + } +@@ -3942,6 +3938,7 @@ static void i915_print_sseu_info(struct seq_file *m, bool is_available_info, + static int i915_sseu_status(struct seq_file *m, void *unused) + { + struct drm_i915_private *dev_priv = node_to_i915(m->private); ++ const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv); + struct sseu_dev_info sseu; + intel_wakeref_t wakeref; + +@@ -3949,14 +3946,13 @@ static int i915_sseu_status(struct seq_file *m, void *unused) + return -ENODEV; + + seq_puts(m, "SSEU Device Info\n"); +- i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu); ++ i915_print_sseu_info(m, true, &info->sseu); + + seq_puts(m, "SSEU Device Status\n"); + memset(&sseu, 0, sizeof(sseu)); +- sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices; +- sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices; +- sseu.max_eus_per_subslice = +- RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice; ++ sseu.max_slices = info->sseu.max_slices; ++ sseu.max_subslices = info->sseu.max_subslices; ++ sseu.max_eus_per_subslice = info->sseu.max_eus_per_subslice; + + with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { + if (IS_CHERRYVIEW(dev_priv)) +-- +2.17.1 + diff --git a/patches/0013-net-stmmac-Add-support-for-MDIO-interrupts.connectivity b/patches/0013-net-stmmac-Add-support-for-MDIO-interrupts.connectivity new file mode 100644 index 0000000000..49176e8e39 --- /dev/null +++ b/patches/0013-net-stmmac-Add-support-for-MDIO-interrupts.connectivity @@ -0,0 +1,258 @@ +From ef713b4a2980fcf379ecc316d61be459f49739b8 Mon Sep 17 00:00:00 2001 +From: "Chuah, Kim Tatt" +Date: Wed, 18 Jul 2018 08:55:08 +0800 +Subject: [PATCH 013/108] net: stmmac: Add support for MDIO interrupts + +DW EQoS v5.xx controllers added capability for interrupt generation +when MDIO interface is done (GMII Busy bit is cleared). +This patch adds support for this interrupt on supported HW to avoid +polling on GMII Busy bit. + +stmmac_mdio_read() & stmmac_mdio_write() will sleep until wake_up() is +called by the interrupt handler. + +Reviewed-by: Voon Weifeng +Reviewed-by: Kweh, Hock Leong +Reviewed-by: Ong Boon Leong +Signed-off-by: Chuah, Kim Tatt +Signed-off-by: Ong Boon Leong +--- + drivers/net/ethernet/stmicro/stmmac/common.h | 2 + + drivers/net/ethernet/stmicro/stmmac/dwmac4.h | 1 + + .../net/ethernet/stmicro/stmmac/dwmac4_core.c | 6 +++ + drivers/net/ethernet/stmicro/stmmac/dwmac5.h | 3 ++ + drivers/net/ethernet/stmicro/stmmac/hwif.c | 12 +++++ + .../net/ethernet/stmicro/stmmac/stmmac_mdio.c | 48 +++++++++++++++---- + 6 files changed, 64 insertions(+), 8 deletions(-) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h +index 912bbb6515b2..33c9f3aa10e4 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/common.h ++++ b/drivers/net/ethernet/stmicro/stmmac/common.h +@@ -450,6 +450,8 @@ struct mac_device_info { + unsigned int pcs; + unsigned int pmt; + unsigned int ps; ++ bool mdio_intr_en; ++ wait_queue_head_t mdio_busy_wait; + }; + + struct stmmac_rx_routing { +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +index bd3e75b47613..b960d14014f7 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +@@ -125,6 +125,7 @@ enum dwmac4_irq_status { + mmc_irq = 0x00000100, + lpi_irq = 0x00000020, + pmt_irq = 0x00000010, ++ mdio_irq = 0x00040000, + }; + + /* MAC PMT bitmap */ +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +index 422fd1c54dfe..e8879a78bcce 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +@@ -53,6 +53,9 @@ static void dwmac4_core_init(struct mac_device_info *hw, + if (hw->pcs) + value |= GMAC_PCS_IRQ_DEFAULT; + ++ if (hw->mdio_intr_en) ++ value |= GMAC_INT_MDIO_EN; ++ + writel(value, ioaddr + GMAC_INT_EN); + } + +@@ -624,6 +627,9 @@ static int dwmac4_irq_status(struct mac_device_info *hw, + x->irq_rx_path_exit_lpi_mode_n++; + } + ++ if (intr_status & mdio_irq) ++ wake_up(&hw->mdio_busy_wait); ++ + dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x); + if (intr_status & PCS_RGSMIIIS_IRQ) + dwmac4_phystatus(ioaddr, x); +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h +index 775db776b3cc..48550d617b01 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h +@@ -72,6 +72,9 @@ + #define TCEIE BIT(0) + #define DMA_ECC_INT_STATUS 0x00001088 + ++/* MDIO interrupt enable in MAC_Interrupt_Enable register */ ++#define GMAC_INT_MDIO_EN BIT(18) ++ + int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp); + int dwmac5_safety_feat_irq_status(struct net_device *ndev, + void __iomem *ioaddr, unsigned int asp, +diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c +index 3af2e5015245..11c7f92e99b4 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/hwif.c ++++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c +@@ -73,6 +73,7 @@ static const struct stmmac_hwif_entry { + bool gmac; + bool gmac4; + bool xgmac; ++ bool mdio_intr_en; + u32 min_id; + const struct stmmac_regs_off regs; + const void *desc; +@@ -90,6 +91,7 @@ static const struct stmmac_hwif_entry { + .gmac = false, + .gmac4 = false, + .xgmac = false, ++ .mdio_intr_en = false, + .min_id = 0, + .regs = { + .ptp_off = PTP_GMAC3_X_OFFSET, +@@ -108,6 +110,7 @@ static const struct stmmac_hwif_entry { + .gmac = true, + .gmac4 = false, + .xgmac = false, ++ .mdio_intr_en = false, + .min_id = 0, + .regs = { + .ptp_off = PTP_GMAC3_X_OFFSET, +@@ -126,6 +129,7 @@ static const struct stmmac_hwif_entry { + .gmac = false, + .gmac4 = true, + .xgmac = false, ++ .mdio_intr_en = false, + .min_id = 0, + .regs = { + .ptp_off = PTP_GMAC4_OFFSET, +@@ -144,6 +148,7 @@ static const struct stmmac_hwif_entry { + .gmac = false, + .gmac4 = true, + .xgmac = false, ++ .mdio_intr_en = false, + .min_id = DWMAC_CORE_4_00, + .regs = { + .ptp_off = PTP_GMAC4_OFFSET, +@@ -162,6 +167,7 @@ static const struct stmmac_hwif_entry { + .gmac = false, + .gmac4 = true, + .xgmac = false, ++ .mdio_intr_en = false, + .min_id = DWMAC_CORE_4_10, + .regs = { + .ptp_off = PTP_GMAC4_OFFSET, +@@ -180,6 +186,7 @@ static const struct stmmac_hwif_entry { + .gmac = false, + .gmac4 = true, + .xgmac = false, ++ .mdio_intr_en = true, + .min_id = DWMAC_CORE_5_10, + .regs = { + .ptp_off = PTP_GMAC4_OFFSET, +@@ -198,6 +205,7 @@ static const struct stmmac_hwif_entry { + .gmac = false, + .gmac4 = false, + .xgmac = true, ++ .mdio_intr_en = false, + .min_id = DWXGMAC_CORE_2_10, + .regs = { + .ptp_off = PTP_XGMAC_OFFSET, +@@ -276,6 +284,10 @@ int stmmac_hwif_init(struct stmmac_priv *priv) + mac->mode = mac->mode ? : entry->mode; + mac->tc = mac->tc ? : entry->tc; + mac->mmc = mac->mmc ? : entry->mmc; ++ mac->mdio_intr_en = mac->mdio_intr_en ? : entry->mdio_intr_en; ++ ++ if (mac->mdio_intr_en) ++ init_waitqueue_head(&mac->mdio_busy_wait); + + priv->hw = mac; + priv->ptpaddr = priv->ioaddr + entry->regs.ptp_off; +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +index a1189bfcf848..144321f46f27 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +@@ -143,6 +143,15 @@ static int stmmac_xgmac2_mdio_write(struct mii_bus *bus, int phyaddr, + !(tmp & MII_XGMAC_BUSY), 100, 10000); + } + ++static bool stmmac_mdio_intr_done(struct mii_bus *bus) ++{ ++ struct net_device *ndev = bus->priv; ++ struct stmmac_priv *priv = netdev_priv(ndev); ++ unsigned int mii_address = priv->hw->mii.addr; ++ ++ return !(readl(priv->ioaddr + mii_address) & MII_BUSY); ++} ++ + /** + * stmmac_mdio_read + * @bus: points to the mii_bus structure +@@ -183,16 +192,26 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) + } + } + +- if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), +- 100, 10000)) ++ if (priv->hw->mdio_intr_en) { ++ if (!wait_event_timeout(priv->hw->mdio_busy_wait, ++ stmmac_mdio_intr_done(bus), HZ / 100)) ++ return -EBUSY; ++ } else if (readl_poll_timeout(priv->ioaddr + mii_address, v, ++ !(v & MII_BUSY), 100, 10000)) { + return -EBUSY; ++ } + + writel(data, priv->ioaddr + mii_data); + writel(value, priv->ioaddr + mii_address); + +- if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), +- 100, 10000)) ++ if (priv->hw->mdio_intr_en) { ++ if (!wait_event_timeout(priv->hw->mdio_busy_wait, ++ stmmac_mdio_intr_done(bus), HZ / 100)) ++ return -EBUSY; ++ } else if (readl_poll_timeout(priv->ioaddr + mii_address, v, ++ !(v & MII_BUSY), 100, 10000)) { + return -EBUSY; ++ } + + /* Read the data from the MII data register */ + data = (int)readl(priv->ioaddr + mii_data) & MII_DATA_MASK; +@@ -242,17 +261,30 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg, + } + + /* Wait until any existing MII operation is complete */ +- if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), +- 100, 10000)) ++ if (priv->hw->mdio_intr_en) { ++ if (!wait_event_timeout(priv->hw->mdio_busy_wait, ++ stmmac_mdio_intr_done(bus), HZ / 100)) ++ return -EBUSY; ++ } else if (readl_poll_timeout(priv->ioaddr + mii_address, v, ++ !(v & MII_BUSY), 100, 10000)) { + return -EBUSY; ++ } + + /* Set the MII address register to write */ + writel(data, priv->ioaddr + mii_data); + writel(value, priv->ioaddr + mii_address); + + /* Wait until any existing MII operation is complete */ +- return readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), +- 100, 10000); ++ if (priv->hw->mdio_intr_en) { ++ if (!wait_event_timeout(priv->hw->mdio_busy_wait, ++ stmmac_mdio_intr_done(bus), HZ / 100)) ++ return -EBUSY; ++ } else if (readl_poll_timeout(priv->ioaddr + mii_address, v, ++ !(v & MII_BUSY), 100, 10000)) { ++ return -EBUSY; ++ } ++ ++ return 0; + } + + /** +-- +2.17.1 + diff --git a/patches/0013-platform-x86-SoCWatch-errors-and-warnings.sep-socwatch b/patches/0013-platform-x86-SoCWatch-errors-and-warnings.sep-socwatch new file mode 100644 index 0000000000..dc332336c5 --- /dev/null +++ b/patches/0013-platform-x86-SoCWatch-errors-and-warnings.sep-socwatch @@ -0,0 +1,87 @@ +From 47931671d17d33dd891b2e79d5b61dc31c5b826b Mon Sep 17 00:00:00 2001 +From: Faycal Benmlih +Date: Thu, 25 Apr 2019 15:59:47 -0500 +Subject: [PATCH 13/27] platform/x86: SoCWatch errors and warnings + +Reported-by: kbuild test robot +Signed-off-by: Faycal Benmlih +--- + drivers/platform/x86/socwatch/sw_telem.c | 19 ++++++++++--------- + .../x86/socwatch/sw_trace_notifier_provider.c | 5 +++++ + 2 files changed, 15 insertions(+), 9 deletions(-) + +diff --git a/drivers/platform/x86/socwatch/sw_telem.c b/drivers/platform/x86/socwatch/sw_telem.c +index 38bfd89d7a2b..1aed81c8c119 100644 +--- a/drivers/platform/x86/socwatch/sw_telem.c ++++ b/drivers/platform/x86/socwatch/sw_telem.c +@@ -57,7 +57,8 @@ + #include + #include /* Definition of __weak */ + #include /* LINUX_VERSION_CODE */ +-#include /* 'udelay' */ ++#include /* 'udelay' */ ++#include /* Definition of ioremap_nocache and iounmap */ + #include "sw_kernel_defines.h" /* pw_pr_debug */ + #include "sw_mem.h" /* sw_kmalloc/free */ + #include "sw_lock_defs.h" /* Various lock-related definitions */ +@@ -235,13 +236,13 @@ static volatile u64 *s_pmcIPCRBufAddr; + */ + static bool setup_punit_mbox(void) + { +- s_punitInterfaceAddr = ioremap_nocache( ++ s_punitInterfaceAddr = (u64 *)ioremap_nocache( + (unsigned long)s_mchBarAddrs[TELEM_MCHBAR_CFG] + + PUNIT_MAILBOX_INTERFACE_OFFSET, 0x4); +- s_punitDataAddr = ioremap_nocache( ++ s_punitDataAddr = (u64 *)ioremap_nocache( + (unsigned long)s_mchBarAddrs[TELEM_MCHBAR_CFG] + + PUNIT_MAILBOX_DATA_OFFSET, 0x4); +- s_telemEventInfo[TELEM_PUNIT].ssram_virt_addr = ioremap_nocache( ++ s_telemEventInfo[TELEM_PUNIT].ssram_virt_addr = (u64 *)ioremap_nocache( + (unsigned long) + s_mchBarAddrs[TELEM_SSRAMBAR_CFG] + + PSS_TELEM_SSRAM_OFFSET, TELEM_SSRAM_SIZE); +@@ -276,19 +277,19 @@ static void destroy_punit_mbox(void) + */ + static bool setup_pmc_mbox(void) + { +- s_pmcIPCCmdAddr = ioremap_nocache( ++ s_pmcIPCCmdAddr = (u64 *)ioremap_nocache( + (unsigned long)s_mchBarAddrs[TELEM_IPC1BAR_CFG] + + PMC_IPC_CMD, 0x4); +- s_pmcIPCStsAddr = ioremap_nocache( ++ s_pmcIPCStsAddr = (u64 *)ioremap_nocache( + (unsigned long)s_mchBarAddrs[TELEM_IPC1BAR_CFG] + + PMC_IPC_STATUS, 0x4); +- s_pmcIPCWBufAddr = ioremap_nocache( ++ s_pmcIPCWBufAddr = (u64 *)ioremap_nocache( + (unsigned long)s_mchBarAddrs[TELEM_IPC1BAR_CFG] + + PMC_IPC_WRITE_BUFFER, 0x4); +- s_pmcIPCRBufAddr = ioremap_nocache( ++ s_pmcIPCRBufAddr = (u64 *)ioremap_nocache( + (unsigned long)s_mchBarAddrs[TELEM_IPC1BAR_CFG] + + PMC_IPC_READ_BUFFER, 0x4); +- s_telemEventInfo[TELEM_PMC].ssram_virt_addr = ioremap_nocache( ++ s_telemEventInfo[TELEM_PMC].ssram_virt_addr = (u64 *)ioremap_nocache( + (unsigned long)s_mchBarAddrs[TELEM_SSRAMBAR_CFG] + + IOSS_TELEM_SSRAM_OFFSET, TELEM_SSRAM_SIZE); + +diff --git a/drivers/platform/x86/socwatch/sw_trace_notifier_provider.c b/drivers/platform/x86/socwatch/sw_trace_notifier_provider.c +index 4fd32ff25565..2bba11072985 100644 +--- a/drivers/platform/x86/socwatch/sw_trace_notifier_provider.c ++++ b/drivers/platform/x86/socwatch/sw_trace_notifier_provider.c +@@ -671,6 +671,11 @@ static struct sw_trace_notifier_data s_trace_collector_lists[] = { + static struct sw_trace_notifier_data s_notifier_collector_lists[] = { + /* EMPTY */ }; + ++static struct sw_trace_notifier_data s_hotplug_notifier_data = { ++ /* EMPTY */ ++}; ++ ++ + #endif /* CONFIG_TRACEPOINTS */ + + /* +-- +2.17.1 + diff --git a/patches/0013-scsi-ufs-revamp-string-descriptor-reading.security b/patches/0013-scsi-ufs-revamp-string-descriptor-reading.security new file mode 100644 index 0000000000..0b23a2233d --- /dev/null +++ b/patches/0013-scsi-ufs-revamp-string-descriptor-reading.security @@ -0,0 +1,137 @@ +From 47ac29551b3643788bcdccc0ec9f75b90ca9e8f1 Mon Sep 17 00:00:00 2001 +From: Tomas Winkler +Date: Wed, 4 Jan 2017 14:14:21 +0200 +Subject: [PATCH 13/65] scsi: ufs: revamp string descriptor reading + +Define new a type: uc_string_id for easier string +handling and less casting. Reduce number or string +copies in price of a dynamic allocation. +In addition drop usage of variable length array (VLA) +as it's not considered to be safe. + +V9: Fix memory corruption. + +Change-Id: Ieda6a4b68e60b8a2d8a2d93a371ff5396dec989b +Signed-off-by: Tomas Winkler +--- + drivers/scsi/ufs/ufs-sysfs.c | 2 +- + drivers/scsi/ufs/ufs.h | 2 +- + drivers/scsi/ufs/ufshcd.c | 20 ++++++++++---------- + drivers/scsi/ufs/ufshcd.h | 4 ++-- + 4 files changed, 14 insertions(+), 14 deletions(-) + +diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c +index 0926b1c0c3f7..98a18fa3b04f 100644 +--- a/drivers/scsi/ufs/ufs-sysfs.c ++++ b/drivers/scsi/ufs/ufs-sysfs.c +@@ -570,7 +570,7 @@ static ssize_t _name##_show(struct device *dev, \ + struct ufs_hba *hba = dev_get_drvdata(dev); \ + int ret; \ + int desc_len = QUERY_DESC_MAX_SIZE; \ +- u8 *desc_buf; \ ++ char *desc_buf; \ + \ + desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_ATOMIC); \ + if (!desc_buf) \ +diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h +index ed54eac4e6d7..ee93ba19c78f 100644 +--- a/drivers/scsi/ufs/ufs.h ++++ b/drivers/scsi/ufs/ufs.h +@@ -552,7 +552,7 @@ struct ufs_dev_desc { + u8 subclass; + u32 min_uma_sz; + u16 wmanufacturerid; +- u8 *model; ++ char *model; + }; + + /** +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c +index 35767da207de..86a00f2349dd 100644 +--- a/drivers/scsi/ufs/ufshcd.c ++++ b/drivers/scsi/ufs/ufshcd.c +@@ -3153,7 +3153,7 @@ int ufshcd_read_desc_param(struct ufs_hba *hba, + enum desc_idn desc_id, + int desc_index, + u8 param_offset, +- u8 *param_read_buf, ++ void *param_read_buf, + u8 param_size) + { + int ret; +@@ -3253,7 +3253,7 @@ struct uc_string_id { + } __packed; + + /* replace non-printable or non-ASCII characters with spaces */ +-static inline char ufshcd_remove_non_printable(u8 ch) ++static inline char blank_non_printable(char ch) + { + return (ch >= 0x20 && ch <= 0x7e) ? ch : ' '; + } +@@ -3267,16 +3267,15 @@ static inline char ufshcd_remove_non_printable(u8 ch) + * @ascii: if true convert from unicode to ascii characters + * null terminated string. + * +- * Return: +- * * string size on success. +- * * -ENOMEM: on allocation failure +- * * -EINVAL: on a wrong parameter ++ * Return: string size on success. ++ * -ENOMEM: on allocation failure ++ * -EINVAL: on a wrong parameter + */ + int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, +- u8 **buf, bool ascii) ++ char **buf, bool ascii) + { + struct uc_string_id *uc_str; +- u8 *str; ++ char *str; + int ret; + + if (!buf) +@@ -3324,16 +3323,17 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, + + /* replace non-printable or non-ASCII characters with spaces */ + for (i = 0; i < ret; i++) +- str[i] = ufshcd_remove_non_printable(str[i]); ++ str[i] = blank_non_printable(str[i]); + + str[ret++] = '\0'; + + } else { +- str = kmemdup(uc_str, uc_str->len, GFP_KERNEL); ++ str = kzalloc(uc_str->len, GFP_KERNEL); + if (!str) { + ret = -ENOMEM; + goto out; + } ++ memcpy(str, uc_str, uc_str->len); + ret = uc_str->len; + } + out: +diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h +index 3b9a3b56c6b9..4eee863b83f3 100644 +--- a/drivers/scsi/ufs/ufshcd.h ++++ b/drivers/scsi/ufs/ufshcd.h +@@ -916,7 +916,7 @@ int ufshcd_read_desc_param(struct ufs_hba *hba, + enum desc_idn desc_id, + int desc_index, + u8 param_offset, +- u8 *param_read_buf, ++ void *param_read_buf, + u8 param_size); + int __ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, + enum attr_idn idn, u8 index, u8 selector, u32 *attr_val, +@@ -929,7 +929,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, + #define SD_ASCII_STD true + #define SD_RAW false + int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, +- u8 **buf, bool ascii); ++ char **buf, bool ascii); + + int ufshcd_hold(struct ufs_hba *hba, bool async); + void ufshcd_release(struct ufs_hba *hba); +-- +2.17.1 + diff --git a/patches/0013-sos-cleanup-ptdev-irq-structure.acrn b/patches/0013-sos-cleanup-ptdev-irq-structure.acrn new file mode 100644 index 0000000000..5d3e1b50be --- /dev/null +++ b/patches/0013-sos-cleanup-ptdev-irq-structure.acrn @@ -0,0 +1,147 @@ +From 3036903552ed139e738aac057cb77833caa8347c Mon Sep 17 00:00:00 2001 +From: Binbin Wu +Date: Fri, 31 Aug 2018 10:58:56 +0800 +Subject: [PATCH 013/150] sos: cleanup ptdev irq structure + +- Use individual data struct of ptdev irq for ioctl and hypercall + +Change-Id: Id7b02038d0c149a0d1206206f18d54c91c7350d3 +Tracked-On: 218445 +Signed-off-by: Binbin Wu +Reviewed-on: +Reviewed-by: Chi, Mingqiang +Reviewed-by: Dong, Eddie +Tested-by: Dong, Eddie +--- + drivers/char/vhm/vhm_dev.c | 24 +++++++++++++----------- + include/linux/vhm/acrn_hv_defs.h | 20 ++++++++++++++++++++ + include/linux/vhm/vhm_ioctl_defs.h | 27 +++++++++++++++++++++++++++ + 3 files changed, 60 insertions(+), 11 deletions(-) + +diff --git a/drivers/char/vhm/vhm_dev.c b/drivers/char/vhm/vhm_dev.c +index ab5f687f809f..6236b524d4b9 100644 +--- a/drivers/char/vhm/vhm_dev.c ++++ b/drivers/char/vhm/vhm_dev.c +@@ -139,9 +139,13 @@ static long vhm_dev_ioctl(struct file *filep, + { + long ret = 0; + struct vhm_vm *vm; ++ struct ic_ptdev_irq ic_pt_irq; ++ struct hc_ptdev_irq hc_pt_irq; + + trace_printk("[%s] ioctl_num=0x%x\n", __func__, ioctl_num); + ++ memset(&hc_pt_irq, 0, sizeof(hc_pt_irq)); ++ memset(&ic_pt_irq, 0, sizeof(ic_pt_irq)); + vm = (struct vhm_vm *)filep->private_data; + if (vm == NULL) { + pr_err("vhm: invalid VM !\n"); +@@ -381,15 +385,14 @@ static long vhm_dev_ioctl(struct file *filep, + } + + case IC_SET_PTDEV_INTR_INFO: { +- struct acrn_ptdev_irq pt_irq; +- int i; + +- if (copy_from_user(&pt_irq, +- (void *)ioctl_param, sizeof(pt_irq))) ++ if (copy_from_user(&ic_pt_irq, ++ (void *)ioctl_param, sizeof(ic_pt_irq))) + return -EFAULT; + ++ memcpy(&hc_pt_irq, &ic_pt_irq, sizeof(hc_pt_irq)); + ret = hcall_set_ptdev_intr_info(vm->vmid, +- virt_to_phys(&pt_irq)); ++ virt_to_phys(&hc_pt_irq)); + if (ret < 0) { + pr_err("vhm: failed to set intr info for ptdev!\n"); + return -EFAULT; +@@ -398,15 +401,14 @@ static long vhm_dev_ioctl(struct file *filep, + break; + } + case IC_RESET_PTDEV_INTR_INFO: { +- struct acrn_ptdev_irq pt_irq; +- int i; ++ if (copy_from_user(&ic_pt_irq, ++ (void *)ioctl_param, sizeof(ic_pt_irq))) ++ return -EFAULT; + +- if (copy_from_user(&pt_irq, +- (void *)ioctl_param, sizeof(pt_irq))) +- return -EFAULT; ++ memcpy(&hc_pt_irq, &ic_pt_irq, sizeof(hc_pt_irq)); + + ret = hcall_reset_ptdev_intr_info(vm->vmid, +- virt_to_phys(&pt_irq)); ++ virt_to_phys(&hc_pt_irq)); + if (ret < 0) { + pr_err("vhm: failed to reset intr info for ptdev!\n"); + return -EFAULT; +diff --git a/include/linux/vhm/acrn_hv_defs.h b/include/linux/vhm/acrn_hv_defs.h +index 1d21bf21c91c..eeac0e9b4e76 100644 +--- a/include/linux/vhm/acrn_hv_defs.h ++++ b/include/linux/vhm/acrn_hv_defs.h +@@ -132,4 +132,24 @@ struct vm_gpa2hpa { + unsigned long hpa; /* OUT: -1 means invalid gpa */ + } __attribute__((aligned(8))); + ++struct hc_ptdev_irq { ++#define IRQ_INTX 0 ++#define IRQ_MSI 1 ++#define IRQ_MSIX 2 ++ uint32_t type; ++ uint16_t virt_bdf; /* IN: Device virtual BDF# */ ++ uint16_t phys_bdf; /* IN: Device physical BDF# */ ++ union { ++ struct { ++ uint32_t virt_pin; /* IN: virtual IOAPIC pin */ ++ uint32_t phys_pin; /* IN: physical IOAPIC pin */ ++ uint32_t pic_pin; /* IN: pin from PIC? */ ++ } intx; ++ struct { ++ /* IN: vector count of MSI/MSIX */ ++ uint32_t vector_cnt; ++ } msix; ++ }; ++} __attribute__((aligned(8))); ++ + #endif /* ACRN_HV_DEFS_H */ +diff --git a/include/linux/vhm/vhm_ioctl_defs.h b/include/linux/vhm/vhm_ioctl_defs.h +index 5ec2d10fc350..df07e3c93467 100644 +--- a/include/linux/vhm/vhm_ioctl_defs.h ++++ b/include/linux/vhm/vhm_ioctl_defs.h +@@ -129,4 +129,31 @@ struct vm_memmap { + }; + }; + ++struct ic_ptdev_irq { ++#define IRQ_INTX 0 ++#define IRQ_MSI 1 ++#define IRQ_MSIX 2 ++ uint32_t type; ++ uint16_t virt_bdf; /* IN: Device virtual BDF# */ ++ uint16_t phys_bdf; /* IN: Device physical BDF# */ ++ union { ++ struct { ++ uint32_t virt_pin; /* IN: virtual IOAPIC pin */ ++ uint32_t phys_pin; /* IN: physical IOAPIC pin */ ++ uint32_t pic_pin; /* IN: pin from PIC? */ ++ } intx; ++ struct { ++ /* IN: vector count of MSI/MSIX, ++ * Keep this filed on top of msix */ ++ uint32_t vector_cnt; ++ ++ /* IN: size of MSI-X table (round up to 4K) */ ++ uint32_t table_size; ++ ++ /* IN: physical address of MSI-X table */ ++ uint64_t table_paddr; ++ } msix; ++ }; ++}; ++ + #endif /* VHM_IOCTL_DEFS_H */ +-- +2.17.1 + diff --git a/patches/0013-trusty-Handle-fiqs-without-calling-notifier-and-ena.trusty b/patches/0013-trusty-Handle-fiqs-without-calling-notifier-and-ena.trusty new file mode 100644 index 0000000000..6e3156aaa2 --- /dev/null +++ b/patches/0013-trusty-Handle-fiqs-without-calling-notifier-and-ena.trusty @@ -0,0 +1,73 @@ +From fc1b13f67e0088b952117bf26e4b02553c1fb6a1 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?= +Date: Thu, 11 Jun 2015 19:51:54 -0700 +Subject: [PATCH 13/63] trusty: Handle fiqs without calling notifier and + enabling interrupts + +Change-Id: I9c147376bd1596f4ecd1e932b30140c87410c860 +--- + drivers/trusty/trusty.c | 2 ++ + include/linux/trusty/sm_err.h | 1 + + include/linux/trusty/smcall.h | 15 ++++++++++++++- + 3 files changed, 17 insertions(+), 1 deletion(-) + +diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c +index fcdbba518797..4b5d3552720b 100644 +--- a/drivers/trusty/trusty.c ++++ b/drivers/trusty/trusty.c +@@ -108,6 +108,8 @@ static ulong trusty_std_call_inner(struct device *dev, ulong smcnr, + __func__, smcnr, a0, a1, a2); + while (true) { + ret = smc(smcnr, a0, a1, a2); ++ while ((s32)ret == SM_ERR_FIQ_INTERRUPTED) ++ ret = smc(SMC_SC_RESTART_FIQ, 0, 0, 0); + if ((int)ret != SM_ERR_BUSY || !retry) + break; + +diff --git a/include/linux/trusty/sm_err.h b/include/linux/trusty/sm_err.h +index 4ee67589ce63..7de09b46fddb 100644 +--- a/include/linux/trusty/sm_err.h ++++ b/include/linux/trusty/sm_err.h +@@ -35,5 +35,6 @@ + #define SM_ERR_NOT_ALLOWED -9 /* SMC call not allowed */ + #define SM_ERR_END_OF_INPUT -10 + #define SM_ERR_PANIC -11 /* Secure OS crashed */ ++#define SM_ERR_FIQ_INTERRUPTED -12 /* Got interrupted by FIQ. Call back with SMC_SC_RESTART_FIQ on same CPU */ + + #endif +diff --git a/include/linux/trusty/smcall.h b/include/linux/trusty/smcall.h +index cdb4eccd7bc3..7d8950a8890e 100644 +--- a/include/linux/trusty/smcall.h ++++ b/include/linux/trusty/smcall.h +@@ -58,6 +58,18 @@ + #define SMC_SC_RESTART_LAST SMC_STDCALL_NR (SMC_ENTITY_SECURE_MONITOR, 0) + #define SMC_SC_NOP SMC_STDCALL_NR (SMC_ENTITY_SECURE_MONITOR, 1) + ++/** ++ * SMC_SC_RESTART_FIQ - Re-enter trusty after it was interrupted by an fiq ++ * ++ * No arguments, no return value. ++ * ++ * Re-enter trusty after returning to ns to process an fiq. Must be called iff ++ * trusty returns SM_ERR_FIQ_INTERRUPTED. ++ * ++ * Enable by selecting api version TRUSTY_API_VERSION_RESTART_FIQ (1) or later. ++ */ ++#define SMC_SC_RESTART_FIQ SMC_STDCALL_NR (SMC_ENTITY_SECURE_MONITOR, 2) ++ + /* + * Return from secure os to non-secure os with return value in r1 + */ +@@ -94,7 +106,8 @@ + * + * This call must be made before any calls that are affected by the api version. + */ +-#define TRUSTY_API_VERSION_CURRENT (0) ++#define TRUSTY_API_VERSION_RESTART_FIQ (1) ++#define TRUSTY_API_VERSION_CURRENT (1) + #define SMC_FC_API_VERSION SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 11) + + /* TRUSTED_OS entity calls */ +-- +2.17.1 + diff --git a/patches/0013-usb-typec-ucsi-acpi-Move-to-the-new-API.usb-typec b/patches/0013-usb-typec-ucsi-acpi-Move-to-the-new-API.usb-typec new file mode 100644 index 0000000000..0fd981fbf2 --- /dev/null +++ b/patches/0013-usb-typec-ucsi-acpi-Move-to-the-new-API.usb-typec @@ -0,0 +1,179 @@ +From e01ac560556f0de8ad909d083191d901411affc7 Mon Sep 17 00:00:00 2001 +From: Heikki Krogerus +Date: Thu, 26 Sep 2019 12:38:25 +0300 +Subject: [PATCH 13/18] usb: typec: ucsi: acpi: Move to the new API + +Replacing the old "cmd" and "sync" callbacks with an +implementation of struct ucsi_operations. The ACPI +notification (interrupt) handler will from now on read the +CCI (Command Status and Connector Change Indication) +register, and call ucsi_connector_change() function and/or +complete pending command completions based on it. + +Signed-off-by: Heikki Krogerus +--- + drivers/usb/typec/ucsi/ucsi_acpi.c | 94 ++++++++++++++++++++++++------ + 1 file changed, 75 insertions(+), 19 deletions(-) + +diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c +index a18112a83fae..9a418ab68546 100644 +--- a/drivers/usb/typec/ucsi/ucsi_acpi.c ++++ b/drivers/usb/typec/ucsi/ucsi_acpi.c +@@ -19,7 +19,9 @@ + struct ucsi_acpi { + struct device *dev; + struct ucsi *ucsi; +- struct ucsi_ppm ppm; ++ void __iomem *base; ++ struct completion complete; ++ unsigned long flags; + guid_t guid; + }; + +@@ -39,27 +41,76 @@ static int ucsi_acpi_dsm(struct ucsi_acpi *ua, int func) + return 0; + } + +-static int ucsi_acpi_cmd(struct ucsi_ppm *ppm, struct ucsi_control *ctrl) ++static int ucsi_acpi_read(struct ucsi *ucsi, unsigned int offset, ++ void *val, size_t val_len) + { +- struct ucsi_acpi *ua = container_of(ppm, struct ucsi_acpi, ppm); ++ struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi); ++ int ret; ++ ++ ret = ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_READ); ++ if (ret) ++ return ret; + +- ppm->data->ctrl.raw_cmd = ctrl->raw_cmd; ++ memcpy(val, ua->base + offset, val_len); ++ ++ return 0; ++} ++ ++static int ucsi_acpi_async_write(struct ucsi *ucsi, unsigned int offset, ++ const void *val, size_t val_len) ++{ ++ struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi); ++ ++ memcpy(ua->base + offset, val, val_len); + + return ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_WRITE); + } + +-static int ucsi_acpi_sync(struct ucsi_ppm *ppm) ++static int ucsi_acpi_sync_write(struct ucsi *ucsi, unsigned int offset, ++ const void *val, size_t val_len) + { +- struct ucsi_acpi *ua = container_of(ppm, struct ucsi_acpi, ppm); ++ struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi); ++ int ret; ++ ++ set_bit(COMMAND_PENDING, &ua->flags); ++ ++ ret = ucsi_acpi_async_write(ucsi, offset, val, val_len); ++ if (ret) ++ goto out_clear_bit; ++ ++ if (!wait_for_completion_timeout(&ua->complete, msecs_to_jiffies(5000))) ++ ret = -ETIMEDOUT; + +- return ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_READ); ++out_clear_bit: ++ clear_bit(COMMAND_PENDING, &ua->flags); ++ ++ return ret; + } + ++static const struct ucsi_operations ucsi_acpi_ops = { ++ .read = ucsi_acpi_read, ++ .sync_write = ucsi_acpi_sync_write, ++ .async_write = ucsi_acpi_async_write ++}; ++ + static void ucsi_acpi_notify(acpi_handle handle, u32 event, void *data) + { + struct ucsi_acpi *ua = data; ++ u32 cci; ++ int ret; ++ ++ ret = ucsi_acpi_read(ua->ucsi, UCSI_CCI, &cci, sizeof(cci)); ++ if (ret) { ++ dev_err(ua->dev, "failed to read CCI\n"); ++ return; ++ } ++ ++ if (UCSI_CCI_CONNECTOR(cci)) ++ ucsi_connector_change(ua->ucsi, UCSI_CCI_CONNECTOR(cci)); + +- ucsi_notify(ua->ucsi); ++ if (test_bit(COMMAND_PENDING, &ua->flags) && ++ cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE)) ++ complete(&ua->complete); + } + + static int ucsi_acpi_probe(struct platform_device *pdev) +@@ -90,35 +141,39 @@ static int ucsi_acpi_probe(struct platform_device *pdev) + * it can not be requested here, and we can not use + * devm_ioremap_resource(). + */ +- ua->ppm.data = devm_ioremap(&pdev->dev, res->start, resource_size(res)); +- if (!ua->ppm.data) ++ ua->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); ++ if (!ua->base) + return -ENOMEM; + +- if (!ua->ppm.data->version) +- return -ENODEV; +- + ret = guid_parse(UCSI_DSM_UUID, &ua->guid); + if (ret) + return ret; + +- ua->ppm.cmd = ucsi_acpi_cmd; +- ua->ppm.sync = ucsi_acpi_sync; ++ init_completion(&ua->complete); + ua->dev = &pdev->dev; + ++ ua->ucsi = ucsi_create(&pdev->dev, &ucsi_acpi_ops); ++ if (IS_ERR(ua->ucsi)) ++ return PTR_ERR(ua->ucsi); ++ ++ ucsi_set_drvdata(ua->ucsi, ua); ++ + status = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev), + ACPI_DEVICE_NOTIFY, + ucsi_acpi_notify, ua); + if (ACPI_FAILURE(status)) { + dev_err(&pdev->dev, "failed to install notify handler\n"); ++ ucsi_destroy(ua->ucsi); + return -ENODEV; + } + +- ua->ucsi = ucsi_register_ppm(&pdev->dev, &ua->ppm); +- if (IS_ERR(ua->ucsi)) { ++ ret = ucsi_register(ua->ucsi); ++ if (ret) { + acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev), + ACPI_DEVICE_NOTIFY, + ucsi_acpi_notify); +- return PTR_ERR(ua->ucsi); ++ ucsi_destroy(ua->ucsi); ++ return ret; + } + + platform_set_drvdata(pdev, ua); +@@ -130,7 +185,8 @@ static int ucsi_acpi_remove(struct platform_device *pdev) + { + struct ucsi_acpi *ua = platform_get_drvdata(pdev); + +- ucsi_unregister_ppm(ua->ucsi); ++ ucsi_unregister(ua->ucsi); ++ ucsi_destroy(ua->ucsi); + + acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev), ACPI_DEVICE_NOTIFY, + ucsi_acpi_notify); +-- +2.17.1 + diff --git a/patches/0014-ASoC-Intel-Skylake-Unify-driver-cleanup-mechanism.audio b/patches/0014-ASoC-Intel-Skylake-Unify-driver-cleanup-mechanism.audio new file mode 100644 index 0000000000..622478a9c9 --- /dev/null +++ b/patches/0014-ASoC-Intel-Skylake-Unify-driver-cleanup-mechanism.audio @@ -0,0 +1,208 @@ +From ec6a8ca66d21408aeb72cc719f6774b22f467100 Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Sat, 23 Mar 2019 16:52:54 +0100 +Subject: [PATCH 014/193] ASoC: Intel: Skylake: Unify driver cleanup mechanism + +Driver cleanup process is similar for all platforms and sst_ops::free +provides enough customization already. Unify them. Also remove redundant +disable interrupt calls from new cleanup method. This is yet another +checkpoint in quest for simplification or, perhaps a removal of +skl_dsp_ops. + +Signed-off-by: Cezary Rojewski +--- + sound/soc/intel/skylake/bxt-sst.c | 11 ----------- + sound/soc/intel/skylake/cnl-sst-dsp.h | 1 - + sound/soc/intel/skylake/cnl-sst.c | 11 ----------- + sound/soc/intel/skylake/skl-messages.c | 10 +--------- + sound/soc/intel/skylake/skl-sst-dsp.h | 2 -- + sound/soc/intel/skylake/skl-sst.c | 20 ++++++++++++-------- + sound/soc/intel/skylake/skl.h | 2 +- + 7 files changed, 14 insertions(+), 43 deletions(-) + +diff --git a/sound/soc/intel/skylake/bxt-sst.c b/sound/soc/intel/skylake/bxt-sst.c +index f548523055fa..4d5f34c1fb1b 100644 +--- a/sound/soc/intel/skylake/bxt-sst.c ++++ b/sound/soc/intel/skylake/bxt-sst.c +@@ -592,16 +592,5 @@ int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, + } + EXPORT_SYMBOL_GPL(bxt_sst_dsp_init); + +-void bxt_sst_dsp_cleanup(struct device *dev, struct skl_dev *skl) +-{ +- +- skl_release_library(skl->lib_info, skl->lib_count); +- if (skl->dsp->fw) +- release_firmware(skl->dsp->fw); +- list_del_init(&skl->module_list); +- skl->dsp->ops->free(skl->dsp); +-} +-EXPORT_SYMBOL_GPL(bxt_sst_dsp_cleanup); +- + MODULE_LICENSE("GPL v2"); + MODULE_DESCRIPTION("Intel Broxton IPC driver"); +diff --git a/sound/soc/intel/skylake/cnl-sst-dsp.h b/sound/soc/intel/skylake/cnl-sst-dsp.h +index b1417639bc1c..f3d320b05eb5 100644 +--- a/sound/soc/intel/skylake/cnl-sst-dsp.h ++++ b/sound/soc/intel/skylake/cnl-sst-dsp.h +@@ -96,6 +96,5 @@ bool cnl_ipc_int_status(struct sst_dsp *ctx); + int cnl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, + const char *fw_name, struct skl_dsp_loader_ops dsp_ops, + struct skl_dev **dsp); +-void cnl_sst_dsp_cleanup(struct device *dev, struct skl_dev *skl); + + #endif /*__CNL_SST_DSP_H__*/ +diff --git a/sound/soc/intel/skylake/cnl-sst.c b/sound/soc/intel/skylake/cnl-sst.c +index 13821fac408d..0d3847d1a786 100644 +--- a/sound/soc/intel/skylake/cnl-sst.c ++++ b/sound/soc/intel/skylake/cnl-sst.c +@@ -463,16 +463,5 @@ int cnl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, + } + EXPORT_SYMBOL_GPL(cnl_sst_dsp_init); + +-void cnl_sst_dsp_cleanup(struct device *dev, struct skl_dev *skl) +-{ +- if (skl->dsp->fw) +- release_firmware(skl->dsp->fw); +- +- list_del_init(&skl->module_list); +- +- skl->dsp->ops->free(skl->dsp); +-} +-EXPORT_SYMBOL_GPL(cnl_sst_dsp_cleanup); +- + MODULE_LICENSE("GPL v2"); + MODULE_DESCRIPTION("Intel Cannonlake IPC driver"); +diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c +index 592da0803150..c24d6d14f889 100644 +--- a/sound/soc/intel/skylake/skl-messages.c ++++ b/sound/soc/intel/skylake/skl-messages.c +@@ -172,49 +172,41 @@ static const struct skl_dsp_ops dsp_ops[] = { + .id = 0x9d70, + .loader_ops = skl_get_loader_ops, + .init = skl_sst_dsp_init, +- .cleanup = skl_sst_dsp_cleanup + }, + { + .id = 0x9d71, + .loader_ops = skl_get_loader_ops, + .init = skl_sst_dsp_init, +- .cleanup = skl_sst_dsp_cleanup + }, + { + .id = 0x5a98, + .loader_ops = bxt_get_loader_ops, + .init = bxt_sst_dsp_init, +- .cleanup = bxt_sst_dsp_cleanup + }, + { + .id = 0x3198, + .loader_ops = bxt_get_loader_ops, + .init = bxt_sst_dsp_init, +- .cleanup = bxt_sst_dsp_cleanup + }, + { + .id = 0x9dc8, + .loader_ops = bxt_get_loader_ops, + .init = cnl_sst_dsp_init, +- .cleanup = cnl_sst_dsp_cleanup + }, + { + .id = 0xa348, + .loader_ops = bxt_get_loader_ops, + .init = cnl_sst_dsp_init, +- .cleanup = cnl_sst_dsp_cleanup + }, + { + .id = 0x02c8, + .loader_ops = bxt_get_loader_ops, + .init = cnl_sst_dsp_init, +- .cleanup = cnl_sst_dsp_cleanup + }, + { + .id = 0x06c8, + .loader_ops = bxt_get_loader_ops, + .init = cnl_sst_dsp_init, +- .cleanup = cnl_sst_dsp_cleanup + }, + }; + +@@ -282,7 +274,7 @@ int skl_free_dsp(struct skl_dev *skl) + /* disable ppcap interrupt */ + snd_hdac_ext_bus_ppcap_int_enable(bus, false); + +- skl->dsp_ops->cleanup(bus->dev, skl); ++ skl_sst_dsp_cleanup(skl); + + kfree(skl->hw_cfg.i2s_caps.ctrl_base_addr); + kfree(skl->cores.state); +diff --git a/sound/soc/intel/skylake/skl-sst-dsp.h b/sound/soc/intel/skylake/skl-sst-dsp.h +index ecf6d526f2fc..1739d4e66275 100644 +--- a/sound/soc/intel/skylake/skl-sst-dsp.h ++++ b/sound/soc/intel/skylake/skl-sst-dsp.h +@@ -239,8 +239,6 @@ int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, + int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, + const char *fw_name, struct skl_dsp_loader_ops dsp_ops, + struct skl_dev **dsp); +-void skl_sst_dsp_cleanup(struct device *dev, struct skl_dev *skl); +-void bxt_sst_dsp_cleanup(struct device *dev, struct skl_dev *skl); + int bxt_load_library(struct sst_dsp *ctx, struct skl_lib_info *linfo, + int lib_count); + +diff --git a/sound/soc/intel/skylake/skl-sst.c b/sound/soc/intel/skylake/skl-sst.c +index d3bbdcf22941..4d6242f9bf52 100644 +--- a/sound/soc/intel/skylake/skl-sst.c ++++ b/sound/soc/intel/skylake/skl-sst.c +@@ -612,17 +612,21 @@ int skl_sst_init_fw(struct skl_dev *skl) + } + EXPORT_SYMBOL_GPL(skl_sst_init_fw); + +-void skl_sst_dsp_cleanup(struct device *dev, struct skl_dev *skl) ++void skl_sst_dsp_cleanup(struct skl_dev *skl) + { ++ struct sst_dsp *dsp = skl->dsp; ++ ++ skl_release_library(skl->lib_info, skl->lib_count); ++ if (dsp->fw) ++ release_firmware(dsp->fw); ++ skl_clear_module_table(dsp); + +- if (skl->dsp->fw) +- release_firmware(skl->dsp->fw); +- skl_clear_module_table(skl->dsp); + list_del_init(&skl->module_list); +- skl->dsp->ops->free(skl->dsp); +- if (skl->boot_complete) { +- skl->dsp->cl_dev.ops.cl_cleanup_controller(skl->dsp); +- skl_cldma_int_disable(skl->dsp); ++ dsp->ops->free(dsp); ++ ++ if (skl->boot_complete && dsp->cl_dev.bufsize) { ++ dsp->cl_dev.ops.cl_cleanup_controller(dsp); ++ skl_cldma_int_disable(dsp); + } + } + EXPORT_SYMBOL_GPL(skl_sst_dsp_cleanup); +diff --git a/sound/soc/intel/skylake/skl.h b/sound/soc/intel/skylake/skl.h +index 6ddf690cd068..142bcdf89cbd 100644 +--- a/sound/soc/intel/skylake/skl.h ++++ b/sound/soc/intel/skylake/skl.h +@@ -159,7 +159,6 @@ struct skl_dsp_ops { + int irq, const char *fw_name, + struct skl_dsp_loader_ops loader_ops, + struct skl_dev **skl_sst); +- void (*cleanup)(struct device *dev, struct skl_dev *skl); + }; + + int skl_platform_unregister(struct device *dev); +@@ -173,6 +172,7 @@ int skl_nhlt_update_topology_bin(struct skl_dev *skl); + int skl_init_dsp(struct skl_dev *skl); + int skl_free_dsp(struct skl_dev *skl); + int skl_sst_init_fw(struct skl_dev *skl); ++void skl_sst_dsp_cleanup(struct skl_dev *skl); + int skl_suspend_late_dsp(struct skl_dev *skl); + int skl_suspend_dsp(struct skl_dev *skl); + int skl_resume_dsp(struct skl_dev *skl); +-- +2.17.1 + diff --git a/patches/0014-Add-TSC-ART-translation.felipeb-5.4 b/patches/0014-Add-TSC-ART-translation.felipeb-5.4 new file mode 100644 index 0000000000..758d28a930 --- /dev/null +++ b/patches/0014-Add-TSC-ART-translation.felipeb-5.4 @@ -0,0 +1,189 @@ +From 63fccd788da04c59b17168276c49698e8e9c9f64 Mon Sep 17 00:00:00 2001 +From: Christopher Hall +Date: Mon, 14 Oct 2019 08:26:08 -0700 +Subject: [PATCH 14/14] Add TSC/ART translation + +Signed-off-by: Felipe Balbi +--- + arch/x86/include/asm/tsc.h | 5 ++- + arch/x86/kernel/tsc.c | 61 ++++++++++++++++++++----------- + drivers/ptp/ptp-intel-pmc-tgpio.c | 29 ++++++++++----- + 3 files changed, 61 insertions(+), 34 deletions(-) + +diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h +index b7a9f4385a82..7ee2e58bfbfe 100644 +--- a/arch/x86/include/asm/tsc.h ++++ b/arch/x86/include/asm/tsc.h +@@ -32,8 +32,9 @@ static inline cycles_t get_cycles(void) + + extern struct system_counterval_t convert_art_to_tsc(u64 art); + extern struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns); +-extern void get_tsc_ns(struct system_counterval_t *tsc_counterval, u64 *tsc_ns); +-extern u64 get_art_ns_now(void); ++extern struct timespec64 get_tsc_ns_now(struct system_counterval_t ++ *system_counter); ++extern u64 convert_tsc_ns_to_art(struct timespec64 *tsc_ns); + + extern void tsc_early_init(void); + extern void tsc_init(void); +diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c +index 72253d59dd4f..9caac0d0ae63 100644 +--- a/arch/x86/kernel/tsc.c ++++ b/arch/x86/kernel/tsc.c +@@ -1232,14 +1232,15 @@ struct system_counterval_t convert_art_to_tsc(u64 art) + } + EXPORT_SYMBOL(convert_art_to_tsc); + +-void get_tsc_ns(struct system_counterval_t *tsc_counterval, u64 *tsc_ns) +-{ ++struct timespec64 get_tsc_ns_now(struct system_counterval_t *system_counter) { + u64 tmp, res, rem; + u64 cycles; + +- tsc_counterval->cycles = clocksource_tsc.read(NULL); +- cycles = tsc_counterval->cycles; +- tsc_counterval->cs = art_related_clocksource; ++ cycles = clocksource_tsc.read(NULL); ++ if (system_counter != NULL) { ++ system_counter->cycles = cycles; ++ system_counter->cs = art_related_clocksource; ++ } + + rem = do_div(cycles, tsc_khz); + +@@ -1249,30 +1250,46 @@ void get_tsc_ns(struct system_counterval_t *tsc_counterval, u64 *tsc_ns) + do_div(tmp, tsc_khz); + res += tmp; + +- *tsc_ns = res; ++ rem = do_div(res, NSEC_PER_SEC); ++ ++ return (struct timespec64) {.tv_sec = res, .tv_nsec = rem}; + } +-EXPORT_SYMBOL(get_tsc_ns); ++EXPORT_SYMBOL(get_tsc_ns_now); + +-u64 get_art_ns_now(void) +-{ +- struct system_counterval_t tsc_cycles; +- u64 tsc_ns; +- unsigned int eax; +- unsigned int ebx; +- unsigned int ecx; +- unsigned int edx; ++static u64 convert_tsc_ns_to_tsc(struct timespec64 *tsc_ns) { ++ u64 tmp, res, rem; ++ u64 cycles; + +- get_tsc_ns(&tsc_cycles, &tsc_ns); ++ cycles = ((u64)tsc_ns->tv_sec * NSEC_PER_SEC) + tsc_ns->tv_nsec; + +- /* CPUID 15H TSC/Crystal ratio, plus optionally Crystal Hz */ +- cpuid(ART_CPUID_LEAF, &eax, &ebx, &ecx, &edx); ++ rem = do_div(cycles, USEC_PER_SEC); ++ ++ res = cycles * tsc_khz; ++ tmp = rem * tsc_khz; ++ ++ do_div(tmp, USEC_PER_SEC); ++ ++ return res + tmp; ++} ++ ++ ++u64 convert_tsc_ns_to_art(struct timespec64 *tsc_ns) { ++ u64 tmp, res, rem; ++ u64 cycles; ++ ++ cycles = convert_tsc_ns_to_tsc( tsc_ns ); ++ cycles -= art_to_tsc_offset; ++ ++ rem = do_div(cycles, art_to_tsc_numerator); ++ ++ res = cycles * art_to_tsc_denominator; ++ tmp = rem * art_to_tsc_denominator; + +- printk(KERN_INFO "====> tsc_ns %llu %llu\n", tsc_ns, +- DIV_ROUND_UP_ULL(ecx * ebx, eax)); ++ do_div(tmp, art_to_tsc_numerator); + +- return tsc_ns; ++ return res + tmp; + } +-EXPORT_SYMBOL(get_art_ns_now); ++EXPORT_SYMBOL(convert_tsc_ns_to_art); + + /** + * convert_art_ns_to_tsc() - Convert ART in nanoseconds to TSC. +diff --git a/drivers/ptp/ptp-intel-pmc-tgpio.c b/drivers/ptp/ptp-intel-pmc-tgpio.c +index 571c6604299e..02634f81c807 100644 +--- a/drivers/ptp/ptp-intel-pmc-tgpio.c ++++ b/drivers/ptp/ptp-intel-pmc-tgpio.c +@@ -137,11 +137,9 @@ static int intel_pmc_tgpio_gettime64(struct ptp_clock_info *info, + struct timespec64 *ts) + { + struct intel_pmc_tgpio *tgpio = to_intel_pmc_tgpio(info); +- u64 now; + + mutex_lock(&tgpio->lock); +- now = get_art_ns_now(); +- *ts = ns_to_timespec64(now); ++ *ts = get_tsc_ns_now(NULL); + mutex_unlock(&tgpio->lock); + + return 0; +@@ -226,25 +224,33 @@ static int intel_pmc_tgpio_config_input(struct intel_pmc_tgpio *tgpio, + return 0; + } + ++#define ptp_clock_time_to_ts64(x) ((struct timespec64){.tv_sec = (x).sec, \ ++ .tv_nsec = (x).nsec}) ++ + static int intel_pmc_tgpio_config_output(struct intel_pmc_tgpio *tgpio, + struct ptp_perout_request *perout, int on) + { + u32 ctrl; ++ u64 art; + + ctrl = intel_pmc_tgpio_readl(tgpio->base, TGPIOCTL); + if (on) { +- struct ptp_clock_time *period = &perout->period; +- struct ptp_clock_time *start = &perout->start; ++ struct timespec64 period = ptp_clock_time_to_ts64 ++ (perout->period); ++ struct timespec64 start = ptp_clock_time_to_ts64 ++ (perout->start); + ++ art = convert_tsc_ns_to_art(&start); + intel_pmc_tgpio_writel(tgpio->base, TGPIOCOMPV63_32, +- start->sec); ++ art >> 32); + intel_pmc_tgpio_writel(tgpio->base, TGPIOCOMPV31_0, +- start->nsec); ++ art & 0xFFFFFFFF); + ++ art = convert_tsc_ns_to_art(&period); + intel_pmc_tgpio_writeq(tgpio->base, TGPIOPIV63_32, +- period->sec); ++ art >> 32); + intel_pmc_tgpio_writeq(tgpio->base, TGPIOPIV31_0, +- period->nsec); ++ art & 0xFFFFFFFF); + + ctrl &= ~TGPIOCTL_DIR; + if (perout->flags & PTP_PEROUT_ONE_SHOT) +@@ -290,7 +296,10 @@ static int intel_pmc_tgpio_enable(struct ptp_clock_info *info, + static int intel_pmc_tgpio_get_time_fn(ktime_t *device_time, + struct system_counterval_t *system_counter, void *_tgpio) + { +- get_tsc_ns(system_counter, device_time); ++ struct timespec64 now_ns; ++ ++ now_ns = get_tsc_ns_now(system_counter); ++ *device_time = timespec64_to_ktime(now_ns); + return 0; + } + +-- +2.17.1 + diff --git a/patches/0014-Revert-mfd-intel-lpss-add-children-devices-asynchrono.lpss b/patches/0014-Revert-mfd-intel-lpss-add-children-devices-asynchrono.lpss new file mode 100644 index 0000000000..58c38ed7e0 --- /dev/null +++ b/patches/0014-Revert-mfd-intel-lpss-add-children-devices-asynchrono.lpss @@ -0,0 +1,86 @@ +From 21a820009f1f59875f8267389394b304fe68a813 Mon Sep 17 00:00:00 2001 +From: Andy Shevchenko +Date: Wed, 21 Aug 2019 11:35:49 +0300 +Subject: [PATCH 14/40] Revert "mfd: intel-lpss: add children devices + asynchronously" + +This reverts commit 753a99e052646b81ff0a6668b4ccf35bbf13367d. +--- + drivers/mfd/intel-lpss.c | 42 ++++++++++++++++------------------------ + 1 file changed, 17 insertions(+), 25 deletions(-) + +diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c +index e8eb3ce98d30..bfe4ff337581 100644 +--- a/drivers/mfd/intel-lpss.c ++++ b/drivers/mfd/intel-lpss.c +@@ -10,7 +10,6 @@ + * Jarkko Nikula + */ + +-#include + #include + #include + #include +@@ -372,29 +371,6 @@ static void intel_lpss_unregister_clock(struct intel_lpss *lpss) + intel_lpss_unregister_clock_tree(lpss->clk); + } + +-static void intel_lpss_async_add_devices(void *_lpss, async_cookie_t cookie) +-{ +- struct intel_lpss *lpss = _lpss; +- int ret; +- +- if (intel_lpss_has_idma(lpss)) { +- ret = mfd_add_devices(lpss->dev, lpss->devid, +- &intel_lpss_idma64_cell, 1, lpss->info->mem, +- lpss->info->irq, NULL); +- if (ret) +- dev_warn(lpss->dev, "Failed to add %s, fallback to PIO\n", +- LPSS_IDMA64_DRIVER_NAME); +- } +- +- ret = mfd_add_devices(lpss->dev, lpss->devid, lpss->cell, +- 1, lpss->info->mem, lpss->info->irq, NULL); +- if (ret) { +- intel_lpss_debugfs_remove(lpss); +- intel_lpss_ltr_hide(lpss); +- intel_lpss_unregister_clock(lpss); +- } +-} +- + int intel_lpss_probe(struct device *dev, + const struct intel_lpss_platform_info *info) + { +@@ -441,12 +417,28 @@ int intel_lpss_probe(struct device *dev, + if (ret) + dev_warn(dev, "Failed to create debugfs entries\n"); + +- async_schedule(intel_lpss_async_add_devices, lpss); ++ if (intel_lpss_has_idma(lpss)) { ++ ret = mfd_add_devices(dev, lpss->devid, &intel_lpss_idma64_cell, ++ 1, info->mem, info->irq, NULL); ++ if (ret) ++ dev_warn(dev, "Failed to add %s, fallback to PIO\n", ++ LPSS_IDMA64_DRIVER_NAME); ++ } ++ ++ ret = mfd_add_devices(dev, lpss->devid, lpss->cell, ++ 1, info->mem, info->irq, NULL); ++ if (ret) ++ goto err_remove_ltr; + + dev_pm_set_driver_flags(dev, DPM_FLAG_SMART_SUSPEND); + + return 0; + ++err_remove_ltr: ++ intel_lpss_debugfs_remove(lpss); ++ intel_lpss_ltr_hide(lpss); ++ intel_lpss_unregister_clock(lpss); ++ + err_clk_register: + ida_simple_remove(&intel_lpss_devid_ida, lpss->devid); + +-- +2.17.1 + diff --git a/patches/0014-VBS-K-Virtio-Backend-Service-in-Kernel-a-kernel-level.acrn b/patches/0014-VBS-K-Virtio-Backend-Service-in-Kernel-a-kernel-level.acrn new file mode 100644 index 0000000000..f3a2c046c6 --- /dev/null +++ b/patches/0014-VBS-K-Virtio-Backend-Service-in-Kernel-a-kernel-level.acrn @@ -0,0 +1,421 @@ +From 277bf5b19f8723207922d8c904d8319ecb1596a3 Mon Sep 17 00:00:00 2001 +From: Hao Li +Date: Fri, 31 Aug 2018 10:58:56 +0800 +Subject: [PATCH 014/150] VBS-K (Virtio Backend Service in Kernel): a + kernel-level virtio framework for ACRN hypervisor. + +This patch added the basic VBS-K framework including the following: + - Definitions of the data structures shared between VBS-K and its + counterpart in userspace, which is VBS-U; + - VBS-K device common data structures; + - Core runtime control logic of the VBS-K framework; + + +Change-Id: I8d9e86de701c1aef965a2490f398a2360cb5bd92 +Tracked-On:218445 +Signed-off-by: Hao Li +Reviewed-on: +Reviewed-by: Chi, Mingqiang +Reviewed-by: Dong, Eddie +Tested-by: Dong, Eddie +--- + drivers/Kconfig | 2 + + drivers/Makefile | 1 + + drivers/vbs/Kconfig | 20 +++++ + drivers/vbs/Makefile | 3 + + drivers/vbs/vbs.c | 125 ++++++++++++++++++++++++++++++ + include/linux/vbs/vbs.h | 98 +++++++++++++++++++++++ + include/linux/vbs/vbs_common_if.h | 84 ++++++++++++++++++++ + 7 files changed, 333 insertions(+) + create mode 100644 drivers/vbs/Kconfig + create mode 100644 drivers/vbs/Makefile + create mode 100644 drivers/vbs/vbs.c + create mode 100644 include/linux/vbs/vbs.h + create mode 100644 include/linux/vbs/vbs_common_if.h + +diff --git a/drivers/Kconfig b/drivers/Kconfig +index 8befa53f43be..06c326db1799 100644 +--- a/drivers/Kconfig ++++ b/drivers/Kconfig +@@ -228,4 +228,6 @@ source "drivers/interconnect/Kconfig" + + source "drivers/counter/Kconfig" + ++source "drivers/vbs/Kconfig" ++ + endmenu +diff --git a/drivers/Makefile b/drivers/Makefile +index 2020ad495feb..dee6b447c1b5 100644 +--- a/drivers/Makefile ++++ b/drivers/Makefile +@@ -144,6 +144,7 @@ obj-$(CONFIG_OF) += of/ + obj-$(CONFIG_SSB) += ssb/ + obj-$(CONFIG_BCMA) += bcma/ + obj-$(CONFIG_VHOST_RING) += vhost/ ++obj-$(CONFIG_VBS) += vbs/ + obj-$(CONFIG_VHOST) += vhost/ + obj-$(CONFIG_VLYNQ) += vlynq/ + obj-$(CONFIG_GREYBUS) += greybus/ +diff --git a/drivers/vbs/Kconfig b/drivers/vbs/Kconfig +new file mode 100644 +index 000000000000..c48f306177c3 +--- /dev/null ++++ b/drivers/vbs/Kconfig +@@ -0,0 +1,20 @@ ++# ++# This Kconfig describes VBS for ACRN hypervisor ++# ++config VBS ++ tristate "Enable VBS framework for ACRN hypervisor" ++ depends on ACRN_GUEST ++ depends on ACRN_VHM ++ default n ++ ---help--- ++ This option is selected by any driver which needs to use ++ the Virtio Backend Service (VBS) framework on ACRN ++ hypervisor. ++ ++config VBS_DEBUG ++ bool "ACRN VBS debugging" ++ depends on VBS != n ++ default n ++ ---help--- ++ This is an option for use by developers; most people should ++ say N here. This enables ACRN VBS debugging. +diff --git a/drivers/vbs/Makefile b/drivers/vbs/Makefile +new file mode 100644 +index 000000000000..b52b65b6bd13 +--- /dev/null ++++ b/drivers/vbs/Makefile +@@ -0,0 +1,3 @@ ++ccflags-$(CONFIG_VBS_DEBUG) := -DDEBUG ++ ++obj-$(CONFIG_VBS) += vbs.o +diff --git a/drivers/vbs/vbs.c b/drivers/vbs/vbs.c +new file mode 100644 +index 000000000000..591d43dbe536 +--- /dev/null ++++ b/drivers/vbs/vbs.c +@@ -0,0 +1,125 @@ ++/* ++ * ACRN Project ++ * Virtio Backend Service (VBS) for ACRN hypervisor ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright (c) 2017 Intel Corporation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: Hao Li ++ * ++ * BSD LICENSE ++ * ++ * Copyright (c) 2017 Intel Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * ++ * ++ * Hao Li ++ * Created Virtio Backend Service (VBS) framework: ++ * - VBS-K is a kernel-level virtio framework that can be used for ++ * virtio backend driver development for ACRN hypervisor. ++ * - VBS-K should be working with VBS-U (Virtio Backend Service in ++ * User) together, in order to connect with virtio frontend driver. ++ * - VBS-K mainly handles data plane part of a virtio backend driver, ++ * such as virtqueue parsing and processing, while VBS-U mainly ++ * hanldes control plane part. ++ */ ++ ++#include ++#include ++#include ++ ++static long virtio_dev_info_set(struct virtio_dev_info *dev, ++ struct vbs_dev_info __user *i) ++{ ++ struct vbs_dev_info info; ++ ++ if (copy_from_user(&info, i, sizeof(struct vbs_dev_info))) ++ return -EFAULT; ++ ++ /* setup struct virtio_dev_info based on info in vbs_dev_info */ ++ strncpy(dev->name, info.name, VBS_NAME_LEN); ++ dev->_ctx.vmid = info.vmid; ++ dev->negotiated_features = info.negotiated_features; ++ dev->io_range_start = info.pio_range_start; ++ dev->io_range_len = info.pio_range_len; ++ dev->io_range_type = PIO_RANGE; ++ ++ return 0; ++} ++ ++long virtio_dev_ioctl(struct virtio_dev_info *dev, unsigned int ioctl, ++ void __user *argp) ++{ ++ long ret; ++ ++ /* ++ * Currently we don't conduct ownership checking, ++ * but assuming caller would have device mutex. ++ */ ++ ++ switch (ioctl) { ++ case VBS_SET_DEV: ++ ret = virtio_dev_info_set(dev, argp); ++ break; ++ default: ++ ret = -ENOIOCTLCMD; ++ break; ++ } ++ return ret; ++} ++EXPORT_SYMBOL_GPL(virtio_dev_ioctl); ++ ++static int __init vbs_init(void) ++{ ++ return 0; ++} ++ ++static void __exit vbs_exit(void) ++{ ++} ++ ++module_init(vbs_init); ++module_exit(vbs_exit); ++ ++MODULE_VERSION("0.1"); ++MODULE_AUTHOR("Intel Corporation"); ++MODULE_LICENSE("GPL and additional rights"); ++MODULE_DESCRIPTION("Virtio Backend Service framework for ACRN hypervisor"); +diff --git a/include/linux/vbs/vbs.h b/include/linux/vbs/vbs.h +new file mode 100644 +index 000000000000..7b876782fe41 +--- /dev/null ++++ b/include/linux/vbs/vbs.h +@@ -0,0 +1,98 @@ ++/* ++ * ACRN Project ++ * Virtio Backend Service (VBS) for ACRN hypervisor ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright (c) 2017 Intel Corporation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: Hao Li ++ * ++ * BSD LICENSE ++ * ++ * Copyright (c) 2017 Intel Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * ++ * ++ * Hao Li ++ * Define data structures and runtime control APIs for VBS framework. ++ * - VBS-K is a kernel-level virtio framework that can be used for ++ * virtio backend driver development for ACRN hypervisor. ++ * - VBS-K should be working with VBS-U (Virtio Backend Service in ++ * User) together, in order to connect with virtio frontend driver. ++ */ ++ ++#ifndef _VBS_H_ ++#define _VBS_H_ ++ ++#include ++ ++/* ++ * VBS-K device needs to handle frontend driver's kick in kernel. ++ * For virtio 0.9.5, the kick register is a PIO register, ++ * for virtio 1.0+, the kick register could be a MMIO register. ++ */ ++enum IORangeType { ++ PIO_RANGE = 0x0, /* default */ ++ MMIO_RANGE = 0x1, ++}; ++ ++/* device context */ ++struct ctx { ++ /* VHM required info */ ++ int vmid; ++}; ++ ++/* struct used to maintain virtio device info from userspace VBS */ ++struct virtio_dev_info { ++ /* dev info from VBS */ ++ char name[VBS_NAME_LEN]; /* VBS device name */ ++ struct ctx _ctx; /* device context */ ++ uint32_t negotiated_features; /* features after guest loads driver */ ++ uint64_t io_range_start; /* IO range start of VBS device */ ++ uint64_t io_range_len; /* IO range len of VBS device */ ++ enum IORangeType io_range_type; /* IO range type, PIO or MMIO */ ++}; ++ ++/* VBS Runtime Control APIs */ ++long virtio_dev_ioctl(struct virtio_dev_info *dev, unsigned int ioctl, ++ void __user *argp); ++ ++#endif +diff --git a/include/linux/vbs/vbs_common_if.h b/include/linux/vbs/vbs_common_if.h +new file mode 100644 +index 000000000000..13c1f4580cae +--- /dev/null ++++ b/include/linux/vbs/vbs_common_if.h +@@ -0,0 +1,84 @@ ++/* ++ * ACRN Project ++ * Virtio Backend Service (VBS) for ACRN hypervisor ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright (c) 2017 Intel Corporation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: Hao Li ++ * ++ * BSD LICENSE ++ * ++ * Copyright (c) 2017 Intel Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * ++ * ++ * Hao Li ++ * - Define data structures shared between VBS userspace and VBS kernel ++ * space. ++ */ ++ ++#ifndef _VBS_COMMON_IF_H_ ++#define _VBS_COMMON_IF_H_ ++ ++#ifdef __KERNEL__ ++#include ++#else ++#include ++#endif ++ ++#define VBS_NAME_LEN 32 ++ ++struct vbs_dev_info { ++ char name[VBS_NAME_LEN];/* VBS name */ ++ int vmid; /* id of VM this device belongs to */ ++ uint32_t negotiated_features; ++ /* features after VIRTIO_CONFIG_S_DRIVER_OK */ ++ uint64_t pio_range_start; ++ /* start of PIO range initialized by guest OS */ ++ uint64_t pio_range_len; /* len of PIO range initialized by guest OS */ ++}; ++ ++#define VBS_IOCTL 0xAF ++ ++#define VBS_SET_DEV _IOW(VBS_IOCTL, 0x00, struct vbs_dev_info) ++ ++#endif +-- +2.17.1 + diff --git a/patches/0014-drm-i915-Add-function-to-set-SSEU-info-per-platform.drm b/patches/0014-drm-i915-Add-function-to-set-SSEU-info-per-platform.drm new file mode 100644 index 0000000000..21f9195280 --- /dev/null +++ b/patches/0014-drm-i915-Add-function-to-set-SSEU-info-per-platform.drm @@ -0,0 +1,166 @@ +From 0cccd2b2a86e1d83ec555f70f275a8c68672c8be Mon Sep 17 00:00:00 2001 +From: Stuart Summers +Date: Fri, 23 Aug 2019 09:02:58 -0700 +Subject: [PATCH 014/690] drm/i915: Add function to set SSEU info per platform + +Add a new function to allow each platform to set maximum +slice, subslice, and EU information to reduce code duplication. + +Signed-off-by: Stuart Summers +Reviewed-by: Mika Kuoppala +Signed-off-by: Chris Wilson +Link: https://patchwork.freedesktop.org/patch/msgid/20190823160307.180813-3-stuart.summers@intel.com +--- + drivers/gpu/drm/i915/gt/intel_sseu.c | 8 +++++ + drivers/gpu/drm/i915/gt/intel_sseu.h | 3 ++ + drivers/gpu/drm/i915/i915_debugfs.c | 6 ++-- + drivers/gpu/drm/i915/intel_device_info.c | 39 +++++++++--------------- + 4 files changed, 28 insertions(+), 28 deletions(-) + +diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c +index 6bf2d87da109..6727079eb9b6 100644 +--- a/drivers/gpu/drm/i915/gt/intel_sseu.c ++++ b/drivers/gpu/drm/i915/gt/intel_sseu.c +@@ -8,6 +8,14 @@ + #include "intel_lrc_reg.h" + #include "intel_sseu.h" + ++void intel_sseu_set_info(struct sseu_dev_info *sseu, u8 max_slices, ++ u8 max_subslices, u8 max_eus_per_subslice) ++{ ++ sseu->max_slices = max_slices; ++ sseu->max_subslices = max_subslices; ++ sseu->max_eus_per_subslice = max_eus_per_subslice; ++} ++ + unsigned int + intel_sseu_subslice_total(const struct sseu_dev_info *sseu) + { +diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h +index b50d0401a4e2..64e47dad07be 100644 +--- a/drivers/gpu/drm/i915/gt/intel_sseu.h ++++ b/drivers/gpu/drm/i915/gt/intel_sseu.h +@@ -63,6 +63,9 @@ intel_sseu_from_device_info(const struct sseu_dev_info *sseu) + return value; + } + ++void intel_sseu_set_info(struct sseu_dev_info *sseu, u8 max_slices, ++ u8 max_subslices, u8 max_eus_per_subslice); ++ + unsigned int + intel_sseu_subslice_total(const struct sseu_dev_info *sseu); + +diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c +index 806db87affb2..28713c9c98ea 100644 +--- a/drivers/gpu/drm/i915/i915_debugfs.c ++++ b/drivers/gpu/drm/i915/i915_debugfs.c +@@ -3950,9 +3950,9 @@ static int i915_sseu_status(struct seq_file *m, void *unused) + + seq_puts(m, "SSEU Device Status\n"); + memset(&sseu, 0, sizeof(sseu)); +- sseu.max_slices = info->sseu.max_slices; +- sseu.max_subslices = info->sseu.max_subslices; +- sseu.max_eus_per_subslice = info->sseu.max_eus_per_subslice; ++ intel_sseu_set_info(&sseu, info->sseu.max_slices, ++ info->sseu.max_subslices, ++ info->sseu.max_eus_per_subslice); + + with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { + if (IS_CHERRYVIEW(dev_priv)) +diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c +index d0ed44d33484..77d7bbaa49f3 100644 +--- a/drivers/gpu/drm/i915/intel_device_info.c ++++ b/drivers/gpu/drm/i915/intel_device_info.c +@@ -191,15 +191,10 @@ static void gen11_sseu_info_init(struct drm_i915_private *dev_priv) + u8 eu_en; + int s; + +- if (IS_ELKHARTLAKE(dev_priv)) { +- sseu->max_slices = 1; +- sseu->max_subslices = 4; +- sseu->max_eus_per_subslice = 8; +- } else { +- sseu->max_slices = 1; +- sseu->max_subslices = 8; +- sseu->max_eus_per_subslice = 8; +- } ++ if (IS_ELKHARTLAKE(dev_priv)) ++ intel_sseu_set_info(sseu, 1, 4, 8); ++ else ++ intel_sseu_set_info(sseu, 1, 8, 8); + + s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK; + ss_en = ~I915_READ(GEN11_GT_SUBSLICE_DISABLE); +@@ -236,11 +231,10 @@ static void gen10_sseu_info_init(struct drm_i915_private *dev_priv) + const int eu_mask = 0xff; + u32 subslice_mask, eu_en; + ++ intel_sseu_set_info(sseu, 6, 4, 8); ++ + sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >> + GEN10_F2_S_ENA_SHIFT; +- sseu->max_slices = 6; +- sseu->max_subslices = 4; +- sseu->max_eus_per_subslice = 8; + + subslice_mask = (1 << 4) - 1; + subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >> +@@ -314,9 +308,7 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv) + fuse = I915_READ(CHV_FUSE_GT); + + sseu->slice_mask = BIT(0); +- sseu->max_slices = 1; +- sseu->max_subslices = 2; +- sseu->max_eus_per_subslice = 8; ++ intel_sseu_set_info(sseu, 1, 2, 8); + + if (!(fuse & CHV_FGT_DISABLE_SS0)) { + u8 disabled_mask = +@@ -372,9 +364,8 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv) + sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT; + + /* BXT has a single slice and at most 3 subslices. */ +- sseu->max_slices = IS_GEN9_LP(dev_priv) ? 1 : 3; +- sseu->max_subslices = IS_GEN9_LP(dev_priv) ? 3 : 4; +- sseu->max_eus_per_subslice = 8; ++ intel_sseu_set_info(sseu, IS_GEN9_LP(dev_priv) ? 1 : 3, ++ IS_GEN9_LP(dev_priv) ? 3 : 4, 8); + + /* + * The subslice disable field is global, i.e. it applies +@@ -473,9 +464,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv) + + fuse2 = I915_READ(GEN8_FUSE2); + sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT; +- sseu->max_slices = 3; +- sseu->max_subslices = 3; +- sseu->max_eus_per_subslice = 8; ++ intel_sseu_set_info(sseu, 3, 3, 8); + + /* + * The subslice disable field is global, i.e. it applies +@@ -577,9 +566,6 @@ static void haswell_sseu_info_init(struct drm_i915_private *dev_priv) + break; + } + +- sseu->max_slices = hweight8(sseu->slice_mask); +- sseu->max_subslices = hweight8(sseu->subslice_mask[0]); +- + fuse1 = I915_READ(HSW_PAVP_FUSE1); + switch ((fuse1 & HSW_F1_EU_DIS_MASK) >> HSW_F1_EU_DIS_SHIFT) { + default: +@@ -596,7 +582,10 @@ static void haswell_sseu_info_init(struct drm_i915_private *dev_priv) + sseu->eu_per_subslice = 6; + break; + } +- sseu->max_eus_per_subslice = sseu->eu_per_subslice; ++ ++ intel_sseu_set_info(sseu, hweight8(sseu->slice_mask), ++ hweight8(sseu->subslice_mask[0]), ++ sseu->eu_per_subslice); + + for (s = 0; s < sseu->max_slices; s++) { + for (ss = 0; ss < sseu->max_subslices; ss++) { +-- +2.17.1 + diff --git a/patches/0014-net-phy-make-mdiobus_create_device-function-c.connectivity b/patches/0014-net-phy-make-mdiobus_create_device-function-c.connectivity new file mode 100644 index 0000000000..855a297788 --- /dev/null +++ b/patches/0014-net-phy-make-mdiobus_create_device-function-c.connectivity @@ -0,0 +1,81 @@ +From ad9bae8fbd5a356537fc0c3c7df105e20fe2b321 Mon Sep 17 00:00:00 2001 +From: Ong Boon Leong +Date: Wed, 10 Jul 2019 16:36:57 +0800 +Subject: [PATCH 014/108] net: phy: make mdiobus_create_device() function + callable from Eth driver + +PHY converter and external PHY drivers depend on MDIO functions of Eth +driver and such MDIO read/write completion may fire IRQ. The ISR for MDIO +completion IRQ is done in the open() function of driver. + +For PHY converter mdio driver that registers ISR event that uses MDIO +read/write function during its probe() function, the MDIO ISR should have +been performed a head of time before mdio driver probe() is called. It is +for reason as such, the mdio device creation and registration will need +to be callable from Eth driver open() function. + +Why existing way to register mdio_device for PHY converter that is done +via mdiobus_register_board_info() is not feasible is the mdio device +creation and registration happens inside Eth driver probe() function, +specifically in mdiobus_setup_mdiodevfrom_board_info() that is called +by mdiobus_register(). + +Therefore, to fulfill the need mentioned above, we make mdiobus_create_ +device() to be callable from Eth driver open(). + +Signed-off-by: Ong Boon Leong +--- + drivers/net/phy/mdio_bus.c | 5 +++-- + include/linux/phy.h | 7 +++++++ + 2 files changed, 10 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c +index eda7703fed4b..7d27c9c6f657 100644 +--- a/drivers/net/phy/mdio_bus.c ++++ b/drivers/net/phy/mdio_bus.c +@@ -327,8 +327,8 @@ static inline void of_mdiobus_link_mdiodev(struct mii_bus *mdio, + * + * Returns 0 on success or < 0 on error. + */ +-static int mdiobus_create_device(struct mii_bus *bus, +- struct mdio_board_info *bi) ++int mdiobus_create_device(struct mii_bus *bus, ++ struct mdio_board_info *bi) + { + struct mdio_device *mdiodev; + int ret = 0; +@@ -348,6 +348,7 @@ static int mdiobus_create_device(struct mii_bus *bus, + + return ret; + } ++EXPORT_SYMBOL(mdiobus_create_device); + + /** + * __mdiobus_register - bring up all the PHYs on a given bus and attach them to bus +diff --git a/include/linux/phy.h b/include/linux/phy.h +index 9a0e981df502..4b8107cce037 100644 +--- a/include/linux/phy.h ++++ b/include/linux/phy.h +@@ -1252,12 +1252,19 @@ struct mdio_board_info { + #if IS_ENABLED(CONFIG_MDIO_DEVICE) + int mdiobus_register_board_info(const struct mdio_board_info *info, + unsigned int n); ++int mdiobus_create_device(struct mii_bus *bus, struct mdio_board_info *bi); + #else + static inline int mdiobus_register_board_info(const struct mdio_board_info *i, + unsigned int n) + { + return 0; + } ++ ++static inline int mdiobus_create_device(struct mii_bus *bus, ++ struct mdio_board_info *bi) ++{ ++ return 0; ++} + #endif + + +-- +2.17.1 + diff --git a/patches/0014-platform-x86-SoCWatch-coverity-issues.sep-socwatch b/patches/0014-platform-x86-SoCWatch-coverity-issues.sep-socwatch new file mode 100644 index 0000000000..0ee5eaeaf5 --- /dev/null +++ b/patches/0014-platform-x86-SoCWatch-coverity-issues.sep-socwatch @@ -0,0 +1,152 @@ +From e0135bef6e0ef5074f5849d74642408b111e5996 Mon Sep 17 00:00:00 2001 +From: Faycal Benmlih +Date: Fri, 26 Apr 2019 13:45:06 -0500 +Subject: [PATCH 14/27] platform/x86: SoCWatch coverity issues + +- INCK-96: unsigned idx variable compared with < 0 +- INCK-95: incorrect pointer null check usage +- INCK-93, INCK-94: bad weak symbol comparison + +Signed-off-by: Faycal Benmlih +--- + .../platform/x86/socwatch/inc/sw_structs.h | 2 +- + drivers/platform/x86/socwatch/sw_collector.c | 31 ++++++++------- + .../platform/x86/socwatch/sw_ops_provider.c | 2 +- + drivers/platform/x86/socwatch/sw_telem.c | 38 +++++++++---------- + .../platform/x86/socwatchhv/inc/sw_structs.h | 2 +- + 5 files changed, 37 insertions(+), 38 deletions(-) + +diff --git a/drivers/platform/x86/socwatch/inc/sw_structs.h b/drivers/platform/x86/socwatch/inc/sw_structs.h +index 738edd35de24..3c1bcc46be2f 100644 +--- a/drivers/platform/x86/socwatch/inc/sw_structs.h ++++ b/drivers/platform/x86/socwatch/inc/sw_structs.h +@@ -250,7 +250,7 @@ struct sw_driver_trace_args_io_descriptor { + struct sw_driver_telem_io_descriptor { + union { + pw_u16_t id; +- pw_u8_t idx; ++ pw_s8_t idx; + }; + pw_u8_t unit; + pw_u8_t scale_op; +diff --git a/drivers/platform/x86/socwatch/sw_collector.c b/drivers/platform/x86/socwatch/sw_collector.c +index ebc65666caf6..652d5af6113b 100644 +--- a/drivers/platform/x86/socwatch/sw_collector.c ++++ b/drivers/platform/x86/socwatch/sw_collector.c +@@ -528,23 +528,22 @@ struct sw_collector_data *sw_alloc_collector_node(void) + + void sw_free_collector_node(struct sw_collector_data *node) + { +- if (node) +- return; +- +- if (node->info) { +- sw_reset_driver_interface_info_i(node->info); +- sw_free_driver_interface_info_i(node->info); +- node->info = NULL; +- } +- if (node->ops) { +- sw_free_ops_i(node->ops); +- node->ops = NULL; +- } +- if (node->msg) { +- sw_free_collector_msg_i(node->msg); +- node->msg = NULL; ++ if (node) { ++ if (node->info) { ++ sw_reset_driver_interface_info_i(node->info); ++ sw_free_driver_interface_info_i(node->info); ++ node->info = NULL; ++ } ++ if (node->ops) { ++ sw_free_ops_i(node->ops); ++ node->ops = NULL; ++ } ++ if (node->msg) { ++ sw_free_collector_msg_i(node->msg); ++ node->msg = NULL; ++ } ++ sw_kfree(node); + } +- sw_kfree(node); + } + + int sw_handle_collector_node(struct sw_collector_data *node) +diff --git a/drivers/platform/x86/socwatch/sw_ops_provider.c b/drivers/platform/x86/socwatch/sw_ops_provider.c +index 6277d7d550ca..285bec0130fe 100644 +--- a/drivers/platform/x86/socwatch/sw_ops_provider.c ++++ b/drivers/platform/x86/socwatch/sw_ops_provider.c +@@ -810,7 +810,7 @@ bool sw_socperf_available_i(void) + * weak definition exists. This test will suffice to detect if + * the socperf driver is loaded. + */ +- if (SOCPERF_Read_Data3 != 0) { ++ if (SOCPERF_Read_Data3) { + pw_pr_debug("INFO: SoCPerf support in ON!\n"); + retVal = true; + } else +diff --git a/drivers/platform/x86/socwatch/sw_telem.c b/drivers/platform/x86/socwatch/sw_telem.c +index 1aed81c8c119..eb162b1b28e3 100644 +--- a/drivers/platform/x86/socwatch/sw_telem.c ++++ b/drivers/platform/x86/socwatch/sw_telem.c +@@ -703,25 +703,25 @@ static int builtin_telemetry_available(void) + * weak definition exists. This test will suffice to detect if + * the telemetry driver is loaded. + */ +- if (telemetry_get_eventconfig == 0) +- return 0; +- +- /* OK, the telemetry driver is loaded. But it's possible it +- * hasn't been configured properly. To check that, retrieve +- * the number of events currently configured. This should never +- * be zero since the telemetry driver reserves some SSRAM slots +- * for its own use +- */ +- memset(&punit_evtconfig, 0, sizeof(punit_evtconfig)); +- memset(&pmc_evtconfig, 0, sizeof(pmc_evtconfig)); +- +- punit_evtconfig.evtmap = (u32 *) &punit_event_map; +- pmc_evtconfig.evtmap = (u32 *) &pmc_event_map; +- +- retval = telemetry_get_eventconfig(&punit_evtconfig, &pmc_evtconfig, +- MAX_TELEM_EVENTS, MAX_TELEM_EVENTS); +- return (retval == 0 && punit_evtconfig.num_evts > 0 && +- pmc_evtconfig.num_evts > 0); ++ if (telemetry_get_eventconfig) { ++ /* OK, the telemetry driver is loaded. But it's possible it ++ * hasn't been configured properly. To check that, retrieve ++ * the number of events currently configured. This should never ++ * be zero since the telemetry driver reserves some SSRAM slots ++ * for its own use ++ */ ++ memset(&punit_evtconfig, 0, sizeof(punit_evtconfig)); ++ memset(&pmc_evtconfig, 0, sizeof(pmc_evtconfig)); ++ ++ punit_evtconfig.evtmap = (u32 *) &punit_event_map; ++ pmc_evtconfig.evtmap = (u32 *) &pmc_event_map; ++ ++ retval = telemetry_get_eventconfig(&punit_evtconfig, &pmc_evtconfig, ++ MAX_TELEM_EVENTS, MAX_TELEM_EVENTS); ++ return (retval == 0 && punit_evtconfig.num_evts > 0 && ++ pmc_evtconfig.num_evts > 0); ++ } ++ return 0; + } + + /** +diff --git a/drivers/platform/x86/socwatchhv/inc/sw_structs.h b/drivers/platform/x86/socwatchhv/inc/sw_structs.h +index 738edd35de24..3c1bcc46be2f 100644 +--- a/drivers/platform/x86/socwatchhv/inc/sw_structs.h ++++ b/drivers/platform/x86/socwatchhv/inc/sw_structs.h +@@ -250,7 +250,7 @@ struct sw_driver_trace_args_io_descriptor { + struct sw_driver_telem_io_descriptor { + union { + pw_u16_t id; +- pw_u8_t idx; ++ pw_s8_t idx; + }; + pw_u8_t unit; + pw_u8_t scale_op; +-- +2.17.1 + diff --git a/patches/0014-scsi-ufs-connect-to-RPMB-subsystem.security b/patches/0014-scsi-ufs-connect-to-RPMB-subsystem.security new file mode 100644 index 0000000000..ce28f64377 --- /dev/null +++ b/patches/0014-scsi-ufs-connect-to-RPMB-subsystem.security @@ -0,0 +1,346 @@ +From 2aa99fea0890ee5a7fc1951a378e7c5250ec0499 Mon Sep 17 00:00:00 2001 +From: Tomas Winkler +Date: Thu, 5 Nov 2015 10:22:44 +0200 +Subject: [PATCH 14/65] scsi: ufs: connect to RPMB subsystem + +Register UFS RPMB LUN with the RPMB subsystem and provide +implementation for the RPMB access operations. RPMB partition is +accessed via a sequence of security protocol in and security protocol +out commands with UFS specific parameters. This multi step process is +abstracted into 4 basic RPMB commands. + +V2: resend +V3: resend +V4: Kconfig: use select RPMB to ensure valid configuration +V5: Revamp code using new sequence command. +V6: Resend +V7: Resend +V8: 1. Replace scsi_execute_req_flags() with scsi_execute_req() + 2. line over 80 characters fixes + 3. scsi_device_get return value has to be checked +V9: 1. adjust to new unregister api + 2: nframes is 0 based now +V19: + 1. Add block size. + +Change-Id: Ia45c6776d534fb311b6016aaa88f441d403cb0ca +Signed-off-by: Alexander Usyskin +Signed-off-by: Tomas Winkler +Tested-by: Avri Altman +--- + drivers/scsi/ufs/Kconfig | 1 + + drivers/scsi/ufs/ufshcd.c | 224 ++++++++++++++++++++++++++++++++++++++ + drivers/scsi/ufs/ufshcd.h | 2 + + 3 files changed, 227 insertions(+) + +diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig +index 0b845ab7c3bf..b98066f2f34b 100644 +--- a/drivers/scsi/ufs/Kconfig ++++ b/drivers/scsi/ufs/Kconfig +@@ -38,6 +38,7 @@ config SCSI_UFSHCD + select PM_DEVFREQ + select DEVFREQ_GOV_SIMPLE_ONDEMAND + select NLS ++ select RPMB + ---help--- + This selects the support for UFS devices in Linux, say Y and make + sure that you know the name of your UFS host adapter (the card +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c +index 86a00f2349dd..49bc2afb929d 100644 +--- a/drivers/scsi/ufs/ufshcd.c ++++ b/drivers/scsi/ufs/ufshcd.c +@@ -37,11 +37,14 @@ + * license terms, and distributes only under these terms. + */ + ++#include + #include + #include + #include + #include + #include ++#include ++ + #include "ufshcd.h" + #include "ufs_quirks.h" + #include "unipro.h" +@@ -6528,6 +6531,218 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba) + kfree(desc_buf); + } + ++#define SEC_PROTOCOL_UFS 0xEC ++#define SEC_SPECIFIC_UFS_RPMB 0x001 ++ ++#define SEC_PROTOCOL_CMD_SIZE 12 ++#define SEC_PROTOCOL_RETRIES 3 ++#define SEC_PROTOCOL_RETRIES_ON_RESET 10 ++#define SEC_PROTOCOL_TIMEOUT msecs_to_jiffies(1000) ++ ++static int ++ufshcd_rpmb_security_out(struct scsi_device *sdev, u8 region, ++ void *frames, u32 trans_len) ++{ ++ struct scsi_sense_hdr sshdr; ++ int reset_retries = SEC_PROTOCOL_RETRIES_ON_RESET; ++ int ret; ++ u8 cmd[SEC_PROTOCOL_CMD_SIZE]; ++ ++ memset(cmd, 0, SEC_PROTOCOL_CMD_SIZE); ++ cmd[0] = SECURITY_PROTOCOL_OUT; ++ cmd[1] = SEC_PROTOCOL_UFS; ++ cmd[2] = region; ++ cmd[3] = SEC_SPECIFIC_UFS_RPMB; ++ cmd[4] = 0; /* inc_512 bit 7 set to 0 */ ++ put_unaligned_be32(trans_len, cmd + 6); /* transfer length */ ++ ++retry: ++ ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, ++ frames, trans_len, &sshdr, ++ SEC_PROTOCOL_TIMEOUT, SEC_PROTOCOL_RETRIES, ++ NULL); ++ ++ if (ret && scsi_sense_valid(&sshdr) && ++ sshdr.sense_key == UNIT_ATTENTION && ++ sshdr.asc == 0x29 && sshdr.ascq == 0x00) ++ /* ++ * Device reset might occur several times, ++ * give it one more chance ++ */ ++ if (--reset_retries > 0) ++ goto retry; ++ ++ if (ret) ++ dev_err(&sdev->sdev_gendev, "%s: failed with err %0x\n", ++ __func__, ret); ++ ++ if (driver_byte(ret) & DRIVER_SENSE) ++ scsi_print_sense_hdr(sdev, "rpmb: security out", &sshdr); ++ ++ return ret; ++} ++ ++static int ++ufshcd_rpmb_security_in(struct scsi_device *sdev, u8 region, ++ void *frames, u32 alloc_len) ++{ ++ struct scsi_sense_hdr sshdr; ++ int reset_retries = SEC_PROTOCOL_RETRIES_ON_RESET; ++ int ret; ++ u8 cmd[SEC_PROTOCOL_CMD_SIZE]; ++ ++ memset(cmd, 0, SEC_PROTOCOL_CMD_SIZE); ++ cmd[0] = SECURITY_PROTOCOL_IN; ++ cmd[1] = SEC_PROTOCOL_UFS; ++ cmd[2] = region; ++ cmd[3] = SEC_SPECIFIC_UFS_RPMB; ++ cmd[4] = 0; /* inc_512 bit 7 set to 0 */ ++ put_unaligned_be32(alloc_len, cmd + 6); /* allocation length */ ++ ++retry: ++ ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, ++ frames, alloc_len, &sshdr, ++ SEC_PROTOCOL_TIMEOUT, SEC_PROTOCOL_RETRIES, ++ NULL); ++ ++ if (ret && scsi_sense_valid(&sshdr) && ++ sshdr.sense_key == UNIT_ATTENTION && ++ sshdr.asc == 0x29 && sshdr.ascq == 0x00) ++ /* ++ * Device reset might occur several times, ++ * give it one more chance ++ */ ++ if (--reset_retries > 0) ++ goto retry; ++ ++ if (ret) ++ dev_err(&sdev->sdev_gendev, "%s: failed with err %0x\n", ++ __func__, ret); ++ ++ if (driver_byte(ret) & DRIVER_SENSE) ++ scsi_print_sense_hdr(sdev, "rpmb: security in", &sshdr); ++ ++ return ret; ++} ++ ++static int ufshcd_rpmb_cmd_seq(struct device *dev, u8 target, ++ struct rpmb_cmd *cmds, u32 ncmds) ++{ ++ unsigned long flags; ++ struct ufs_hba *hba = dev_get_drvdata(dev); ++ struct scsi_device *sdev; ++ struct rpmb_cmd *cmd; ++ u32 len; ++ u32 i; ++ int ret; ++ ++ spin_lock_irqsave(hba->host->host_lock, flags); ++ sdev = hba->sdev_ufs_rpmb; ++ if (sdev) { ++ ret = scsi_device_get(sdev); ++ if (!ret && !scsi_device_online(sdev)) { ++ ret = -ENODEV; ++ scsi_device_put(sdev); ++ } ++ } else { ++ ret = -ENODEV; ++ } ++ spin_unlock_irqrestore(hba->host->host_lock, flags); ++ if (ret) ++ return ret; ++ ++ for (ret = 0, i = 0; i < ncmds && !ret; i++) { ++ cmd = &cmds[i]; ++ len = rpmb_ioc_frames_len_jdec(cmd->nframes); ++ if (cmd->flags & RPMB_F_WRITE) ++ ret = ufshcd_rpmb_security_out(sdev, target, ++ cmd->frames, len); ++ else ++ ret = ufshcd_rpmb_security_in(sdev, target, ++ cmd->frames, len); ++ } ++ scsi_device_put(sdev); ++ return ret; ++} ++ ++static int ufshcd_rpmb_get_capacity(struct device *dev, u8 target) ++{ ++ struct ufs_hba *hba = dev_get_drvdata(dev); ++ __be64 block_count; ++ int ret; ++ ++ ret = ufshcd_read_unit_desc_param(hba, ++ UFS_UPIU_RPMB_WLUN, ++ UNIT_DESC_PARAM_LOGICAL_BLK_COUNT, ++ (u8 *)&block_count, ++ sizeof(block_count)); ++ if (ret) ++ return ret; ++ ++ return be64_to_cpu(block_count) * SZ_512 / SZ_128K; ++} ++ ++static struct rpmb_ops ufshcd_rpmb_dev_ops = { ++ .cmd_seq = ufshcd_rpmb_cmd_seq, ++ .get_capacity = ufshcd_rpmb_get_capacity, ++ .type = RPMB_TYPE_UFS, ++ .auth_method = RPMB_HMAC_ALGO_SHA_256, ++ .block_size = 1, ++ ++}; ++ ++static inline void ufshcd_rpmb_add(struct ufs_hba *hba) ++{ ++ struct rpmb_dev *rdev; ++ u8 rpmb_rw_size = 1; ++ int ret; ++ ++ ret = scsi_device_get(hba->sdev_ufs_rpmb); ++ if (ret) ++ goto out_put_dev; ++ ++ if (hba->ufs_version >= UFSHCI_VERSION_21) { ++ ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, ++ GEOMETRY_DESC_PARAM_RPMB_RW_SIZE, ++ &rpmb_rw_size, ++ sizeof(rpmb_rw_size)); ++ if (ret) ++ goto out_put_dev; ++ } ++ ++ ufshcd_rpmb_dev_ops.rd_cnt_max = rpmb_rw_size; ++ ufshcd_rpmb_dev_ops.wr_cnt_max = rpmb_rw_size; ++ ++ rdev = rpmb_dev_register(hba->dev, 0, &ufshcd_rpmb_dev_ops); ++ if (IS_ERR(rdev)) { ++ dev_warn(hba->dev, "%s: cannot register to rpmb %ld\n", ++ dev_name(hba->dev), PTR_ERR(rdev)); ++ goto out_put_dev; ++ } ++ ++ return; ++ ++out_put_dev: ++ scsi_device_put(hba->sdev_ufs_rpmb); ++ hba->sdev_ufs_rpmb = NULL; ++} ++ ++static inline void ufshcd_rpmb_remove(struct ufs_hba *hba) ++{ ++ unsigned long flags; ++ ++ if (!hba->sdev_ufs_rpmb) ++ return; ++ ++ spin_lock_irqsave(hba->host->host_lock, flags); ++ ++ rpmb_dev_unregister_by_device(hba->dev, 0); ++ scsi_device_put(hba->sdev_ufs_rpmb); ++ hba->sdev_ufs_rpmb = NULL; ++ ++ spin_unlock_irqrestore(hba->host->host_lock, flags); ++} ++ + /** + * ufshcd_scsi_add_wlus - Adds required W-LUs + * @hba: per-adapter instance +@@ -6575,6 +6790,8 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba) + ret = PTR_ERR(sdev_rpmb); + goto remove_sdev_ufs_device; + } ++ hba->sdev_ufs_rpmb = sdev_rpmb; ++ + scsi_device_put(sdev_rpmb); + + sdev_boot = __scsi_add_device(hba->host, 0, 0, +@@ -7188,6 +7405,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) + if (ufshcd_scsi_add_wlus(hba)) + goto out; + ++ ufshcd_rpmb_add(hba); ++ + /* Initialize devfreq after UFS device is detected */ + if (ufshcd_is_clkscaling_supported(hba)) { + memcpy(&hba->clk_scaling.saved_pwr_info.info, +@@ -8406,6 +8625,8 @@ int ufshcd_shutdown(struct ufs_hba *hba) + goto out; + } + ++ ufshcd_rpmb_remove(hba); ++ + ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM); + out: + if (ret) +@@ -8423,7 +8644,10 @@ EXPORT_SYMBOL(ufshcd_shutdown); + void ufshcd_remove(struct ufs_hba *hba) + { + ufs_bsg_remove(hba); ++ ufshcd_rpmb_remove(hba); ++ + ufs_sysfs_remove_nodes(hba->dev); ++ + scsi_remove_host(hba->host); + /* disable interrupts */ + ufshcd_disable_intr(hba, hba->intr_mask); +diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h +index 4eee863b83f3..cdcea5301501 100644 +--- a/drivers/scsi/ufs/ufshcd.h ++++ b/drivers/scsi/ufs/ufshcd.h +@@ -482,6 +482,7 @@ struct ufs_stats { + * @utmrdl_dma_addr: UTMRDL DMA address + * @host: Scsi_Host instance of the driver + * @dev: device handle ++ * @sdev_ufs_rpmb: reference to RPMB device W-LU + * @lrb: local reference block + * @lrb_in_use: lrb in use + * @outstanding_tasks: Bits representing outstanding task requests +@@ -546,6 +547,7 @@ struct ufs_hba { + * "UFS device" W-LU. + */ + struct scsi_device *sdev_ufs_device; ++ struct scsi_device *sdev_ufs_rpmb; + + enum ufs_dev_pwr_mode curr_dev_pwr_mode; + enum uic_link_state uic_link_state; +-- +2.17.1 + diff --git a/patches/0014-trusty-Add-smp-support.trusty b/patches/0014-trusty-Add-smp-support.trusty new file mode 100644 index 0000000000..36631a5755 --- /dev/null +++ b/patches/0014-trusty-Add-smp-support.trusty @@ -0,0 +1,223 @@ +From 4887e52b8db16aa19a3474ece2e0d447d5ce5daa Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?= +Date: Fri, 23 Jan 2015 17:55:48 -0800 +Subject: [PATCH 14/63] trusty: Add smp support +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Add an unlocked nop call to allow multiple cpus to enter trusty. +Other standard calls are still serialized to avoid return codes +getting mixed up. +A new return code is used to indicate that the standard call is +running on another cpu. + +Change-Id: I0eecb88fb28989e3f4942659d109eee8863f3227 +Signed-off-by: Arve Hjønnevåg +--- + drivers/trusty/trusty-irq.c | 31 ++++++++++++++++++++++++++++--- + drivers/trusty/trusty.c | 27 ++++++++++++++++++++++++--- + include/linux/trusty/sm_err.h | 3 +++ + include/linux/trusty/smcall.h | 17 +++++++++++++++-- + 4 files changed, 70 insertions(+), 8 deletions(-) + +diff --git a/drivers/trusty/trusty-irq.c b/drivers/trusty/trusty-irq.c +index ae9535af77dd..1f14f7f48bed 100644 +--- a/drivers/trusty/trusty-irq.c ++++ b/drivers/trusty/trusty-irq.c +@@ -154,7 +154,7 @@ static int trusty_irq_call_notify(struct notifier_block *nb, + } + + +-static void trusty_irq_work_func(struct work_struct *work) ++static void trusty_irq_work_func_locked_nop(struct work_struct *work) + { + int ret; + struct trusty_irq_state *is = +@@ -162,8 +162,27 @@ static void trusty_irq_work_func(struct work_struct *work) + + dev_dbg(is->dev, "%s\n", __func__); + +- ret = trusty_std_call32(is->trusty_dev, SMC_SC_NOP, 0, 0, 0); ++ ret = trusty_std_call32(is->trusty_dev, SMC_SC_LOCKED_NOP, 0, 0, 0); + if (ret != 0) ++ dev_err(is->dev, "%s: SMC_SC_LOCKED_NOP failed %d", ++ __func__, ret); ++ ++ dev_dbg(is->dev, "%s: done\n", __func__); ++} ++ ++static void trusty_irq_work_func(struct work_struct *work) ++{ ++ int ret; ++ struct trusty_irq_state *is = ++ container_of(work, struct trusty_irq_work, work)->is; ++ ++ dev_dbg(is->dev, "%s\n", __func__); ++ ++ do { ++ ret = trusty_std_call32(is->trusty_dev, SMC_SC_NOP, 0, 0, 0); ++ } while (ret == SM_ERR_NOP_INTERRUPTED); ++ ++ if (ret != SM_ERR_NOP_DONE) + dev_err(is->dev, "%s: SMC_SC_NOP failed %d", __func__, ret); + + dev_dbg(is->dev, "%s: done\n", __func__); +@@ -397,6 +416,7 @@ static int trusty_irq_probe(struct platform_device *pdev) + unsigned int cpu; + unsigned long irq_flags; + struct trusty_irq_state *is; ++ work_func_t work_func; + + dev_dbg(&pdev->dev, "%s\n", __func__); + +@@ -431,12 +451,17 @@ static int trusty_irq_probe(struct platform_device *pdev) + goto err_trusty_call_notifier_register; + } + ++ if (trusty_get_api_version(is->trusty_dev) < TRUSTY_API_VERSION_SMP) ++ work_func = trusty_irq_work_func_locked_nop; ++ else ++ work_func = trusty_irq_work_func; ++ + for_each_possible_cpu(cpu) { + struct trusty_irq_work *trusty_irq_work; + + trusty_irq_work = per_cpu_ptr(is->irq_work, cpu); + trusty_irq_work->is = is; +- INIT_WORK(&trusty_irq_work->work, trusty_irq_work_func); ++ INIT_WORK(&trusty_irq_work->work, work_func); + } + + for (irq = 0; irq >= 0;) +diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c +index 4b5d3552720b..2a7aeb4725c5 100644 +--- a/drivers/trusty/trusty.c ++++ b/drivers/trusty/trusty.c +@@ -28,6 +28,7 @@ + struct trusty_state { + struct mutex smc_lock; + struct atomic_notifier_head notifier; ++ struct completion cpu_idle_completion; + char *version_str; + u32 api_version; + }; +@@ -161,6 +162,17 @@ static ulong trusty_std_call_helper(struct device *dev, ulong smcnr, + return ret; + } + ++static void trusty_std_call_cpu_idle(struct trusty_state *s) ++{ ++ int ret; ++ ++ ret = wait_for_completion_timeout(&s->cpu_idle_completion, HZ * 10); ++ if (!ret) { ++ pr_warn("%s: timed out waiting for cpu idle to clear, retry anyway\n", ++ __func__); ++ } ++} ++ + s32 trusty_std_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2) + { + int ret; +@@ -169,15 +181,20 @@ s32 trusty_std_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2) + BUG_ON(SMC_IS_FASTCALL(smcnr)); + BUG_ON(SMC_IS_SMC64(smcnr)); + +- mutex_lock(&s->smc_lock); ++ if (smcnr != SMC_SC_NOP) { ++ mutex_lock(&s->smc_lock); ++ reinit_completion(&s->cpu_idle_completion); ++ } + + dev_dbg(dev, "%s(0x%x 0x%x 0x%x 0x%x) started\n", + __func__, smcnr, a0, a1, a2); + + ret = trusty_std_call_helper(dev, smcnr, a0, a1, a2); +- while (ret == SM_ERR_INTERRUPTED) { ++ while (ret == SM_ERR_INTERRUPTED || ret == SM_ERR_CPU_IDLE) { + dev_dbg(dev, "%s(0x%x 0x%x 0x%x 0x%x) interrupted\n", + __func__, smcnr, a0, a1, a2); ++ if (ret == SM_ERR_CPU_IDLE) ++ trusty_std_call_cpu_idle(s); + ret = trusty_std_call_helper(dev, SMC_SC_RESTART_LAST, 0, 0, 0); + } + dev_dbg(dev, "%s(0x%x 0x%x 0x%x 0x%x) returned 0x%x\n", +@@ -185,7 +202,10 @@ s32 trusty_std_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2) + + WARN_ONCE(ret == SM_ERR_PANIC, "trusty crashed"); + +- mutex_unlock(&s->smc_lock); ++ if (smcnr == SMC_SC_NOP) ++ complete(&s->cpu_idle_completion); ++ else ++ mutex_unlock(&s->smc_lock); + + return ret; + } +@@ -315,6 +335,7 @@ static int trusty_probe(struct platform_device *pdev) + } + mutex_init(&s->smc_lock); + ATOMIC_INIT_NOTIFIER_HEAD(&s->notifier); ++ init_completion(&s->cpu_idle_completion); + platform_set_drvdata(pdev, s); + + trusty_init_version(s, &pdev->dev); +diff --git a/include/linux/trusty/sm_err.h b/include/linux/trusty/sm_err.h +index 7de09b46fddb..32ee08e499c3 100644 +--- a/include/linux/trusty/sm_err.h ++++ b/include/linux/trusty/sm_err.h +@@ -36,5 +36,8 @@ + #define SM_ERR_END_OF_INPUT -10 + #define SM_ERR_PANIC -11 /* Secure OS crashed */ + #define SM_ERR_FIQ_INTERRUPTED -12 /* Got interrupted by FIQ. Call back with SMC_SC_RESTART_FIQ on same CPU */ ++#define SM_ERR_CPU_IDLE -13 /* SMC call waiting for another CPU */ ++#define SM_ERR_NOP_INTERRUPTED -14 /* Got interrupted. Call back with new SMC_SC_NOP */ ++#define SM_ERR_NOP_DONE -15 /* Cpu idle after SMC_SC_NOP (not an error) */ + + #endif +diff --git a/include/linux/trusty/smcall.h b/include/linux/trusty/smcall.h +index 7d8950a8890e..2e43803d9333 100644 +--- a/include/linux/trusty/smcall.h ++++ b/include/linux/trusty/smcall.h +@@ -56,7 +56,7 @@ + + /* FC = Fast call, SC = Standard call */ + #define SMC_SC_RESTART_LAST SMC_STDCALL_NR (SMC_ENTITY_SECURE_MONITOR, 0) +-#define SMC_SC_NOP SMC_STDCALL_NR (SMC_ENTITY_SECURE_MONITOR, 1) ++#define SMC_SC_LOCKED_NOP SMC_STDCALL_NR (SMC_ENTITY_SECURE_MONITOR, 1) + + /** + * SMC_SC_RESTART_FIQ - Re-enter trusty after it was interrupted by an fiq +@@ -70,6 +70,18 @@ + */ + #define SMC_SC_RESTART_FIQ SMC_STDCALL_NR (SMC_ENTITY_SECURE_MONITOR, 2) + ++/** ++ * SMC_SC_NOP - Enter trusty to run pending work. ++ * ++ * No arguments. ++ * ++ * Returns SM_ERR_NOP_INTERRUPTED or SM_ERR_NOP_DONE. ++ * If SM_ERR_NOP_INTERRUPTED is returned, the call must be repeated. ++ * ++ * Enable by selecting api version TRUSTY_API_VERSION_SMP (2) or later. ++ */ ++#define SMC_SC_NOP SMC_STDCALL_NR (SMC_ENTITY_SECURE_MONITOR, 3) ++ + /* + * Return from secure os to non-secure os with return value in r1 + */ +@@ -107,7 +119,8 @@ + * This call must be made before any calls that are affected by the api version. + */ + #define TRUSTY_API_VERSION_RESTART_FIQ (1) +-#define TRUSTY_API_VERSION_CURRENT (1) ++#define TRUSTY_API_VERSION_SMP (2) ++#define TRUSTY_API_VERSION_CURRENT (2) + #define SMC_FC_API_VERSION SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 11) + + /* TRUSTED_OS entity calls */ +-- +2.17.1 + diff --git a/patches/0014-usb-typec-ucsi-ccg-Move-to-the-new-API.usb-typec b/patches/0014-usb-typec-ucsi-ccg-Move-to-the-new-API.usb-typec new file mode 100644 index 0000000000..f5efe3d81c --- /dev/null +++ b/patches/0014-usb-typec-ucsi-ccg-Move-to-the-new-API.usb-typec @@ -0,0 +1,310 @@ +From 8513198ad82becabeb936ef986d719046d047dc4 Mon Sep 17 00:00:00 2001 +From: Heikki Krogerus +Date: Thu, 26 Sep 2019 12:38:25 +0300 +Subject: [PATCH 14/18] usb: typec: ucsi: ccg: Move to the new API + +Replacing the old "cmd" and "sync" callbacks with an +implementation of struct ucsi_operations. The interrupt +handler will from now on read the CCI (Command Status and +Connector Change Indication) register, and call +ucsi_connector_change() function and/or complete pending +command completions based on it. + +Signed-off-by: Heikki Krogerus +--- + drivers/usb/typec/ucsi/ucsi_ccg.c | 170 +++++++++++++++--------------- + 1 file changed, 85 insertions(+), 85 deletions(-) + +diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c +index d772fce51905..43442580a13c 100644 +--- a/drivers/usb/typec/ucsi/ucsi_ccg.c ++++ b/drivers/usb/typec/ucsi/ucsi_ccg.c +@@ -176,8 +176,8 @@ struct ccg_resp { + struct ucsi_ccg { + struct device *dev; + struct ucsi *ucsi; +- struct ucsi_ppm ppm; + struct i2c_client *client; ++ + struct ccg_dev_info info; + /* version info for boot, primary and secondary */ + struct version_info version[FW2 + 1]; +@@ -196,6 +196,8 @@ struct ucsi_ccg { + /* fw build with vendor information */ + u16 fw_build; + struct work_struct pm_work; ++ ++ struct completion complete; + }; + + static int ccg_read(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len) +@@ -243,7 +245,7 @@ static int ccg_read(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len) + return 0; + } + +-static int ccg_write(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len) ++static int ccg_write(struct ucsi_ccg *uc, u16 rab, const u8 *data, u32 len) + { + struct i2c_client *client = uc->client; + unsigned char *buf; +@@ -317,88 +319,89 @@ static int ucsi_ccg_init(struct ucsi_ccg *uc) + return -ETIMEDOUT; + } + +-static int ucsi_ccg_send_data(struct ucsi_ccg *uc) ++static int ucsi_ccg_read(struct ucsi *ucsi, unsigned int offset, ++ void *val, size_t val_len) + { +- u8 *ppm = (u8 *)uc->ppm.data; +- int status; +- u16 rab; ++ u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(offset); + +- rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, message_out)); +- status = ccg_write(uc, rab, ppm + +- offsetof(struct ucsi_data, message_out), +- sizeof(uc->ppm.data->message_out)); +- if (status < 0) +- return status; +- +- rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, ctrl)); +- return ccg_write(uc, rab, ppm + offsetof(struct ucsi_data, ctrl), +- sizeof(uc->ppm.data->ctrl)); ++ return ccg_read(ucsi_get_drvdata(ucsi), reg, val, val_len); + } + +-static int ucsi_ccg_recv_data(struct ucsi_ccg *uc) ++static int ucsi_ccg_async_write(struct ucsi *ucsi, unsigned int offset, ++ const void *val, size_t val_len) + { +- u8 *ppm = (u8 *)uc->ppm.data; +- int status; +- u16 rab; ++ u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(offset); + +- rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, cci)); +- status = ccg_read(uc, rab, ppm + offsetof(struct ucsi_data, cci), +- sizeof(uc->ppm.data->cci)); +- if (status < 0) +- return status; +- +- rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, message_in)); +- return ccg_read(uc, rab, ppm + offsetof(struct ucsi_data, message_in), +- sizeof(uc->ppm.data->message_in)); ++ return ccg_write(ucsi_get_drvdata(ucsi), reg, val, val_len); + } + +-static int ucsi_ccg_ack_interrupt(struct ucsi_ccg *uc) ++static int ucsi_ccg_sync_write(struct ucsi *ucsi, unsigned int offset, ++ const void *val, size_t val_len) + { +- int status; +- unsigned char data; ++ struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi); ++ int ret; + +- status = ccg_read(uc, CCGX_RAB_INTR_REG, &data, sizeof(data)); +- if (status < 0) +- return status; ++ mutex_lock(&uc->lock); ++ pm_runtime_get_sync(uc->dev); ++ set_bit(DEV_CMD_PENDING, &uc->flags); + +- return ccg_write(uc, CCGX_RAB_INTR_REG, &data, sizeof(data)); +-} ++ ret = ucsi_ccg_async_write(ucsi, offset, val, val_len); ++ if (ret) ++ goto err_clear_bit; + +-static int ucsi_ccg_sync(struct ucsi_ppm *ppm) +-{ +- struct ucsi_ccg *uc = container_of(ppm, struct ucsi_ccg, ppm); +- int status; ++ if (!wait_for_completion_timeout(&uc->complete, msecs_to_jiffies(5000))) ++ ret = -ETIMEDOUT; + +- status = ucsi_ccg_recv_data(uc); +- if (status < 0) +- return status; ++err_clear_bit: ++ clear_bit(DEV_CMD_PENDING, &uc->flags); ++ pm_runtime_put_sync(uc->dev); ++ mutex_unlock(&uc->lock); + +- /* ack interrupt to allow next command to run */ +- return ucsi_ccg_ack_interrupt(uc); ++ return ret; + } + +-static int ucsi_ccg_cmd(struct ucsi_ppm *ppm, struct ucsi_control *ctrl) +-{ +- struct ucsi_ccg *uc = container_of(ppm, struct ucsi_ccg, ppm); +- +- ppm->data->ctrl.raw_cmd = ctrl->raw_cmd; +- return ucsi_ccg_send_data(uc); +-} ++static const struct ucsi_operations ucsi_ccg_ops = { ++ .read = ucsi_ccg_read, ++ .sync_write = ucsi_ccg_sync_write, ++ .async_write = ucsi_ccg_async_write ++}; + + static irqreturn_t ccg_irq_handler(int irq, void *data) + { ++ u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(UCSI_CCI); + struct ucsi_ccg *uc = data; ++ u8 intr_reg; ++ u32 cci; ++ int ret; ++ ++ ret = ccg_read(uc, CCGX_RAB_INTR_REG, &intr_reg, sizeof(intr_reg)); ++ if (ret) ++ return ret; ++ ++ ret = ccg_read(uc, reg, (void *)&cci, sizeof(cci)); ++ if (ret) { ++ dev_err(uc->dev, "failed to read CCI\n"); ++ goto err_clear_irq; ++ } ++ ++ if (UCSI_CCI_CONNECTOR(cci)) ++ ucsi_connector_change(uc->ucsi, UCSI_CCI_CONNECTOR(cci)); + +- ucsi_notify(uc->ucsi); ++ if (test_bit(DEV_CMD_PENDING, &uc->flags) && ++ cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE)) ++ complete(&uc->complete); ++ ++err_clear_irq: ++ ret = ccg_write(uc, CCGX_RAB_INTR_REG, &intr_reg, sizeof(intr_reg)); ++ if (ret) ++ dev_err(uc->dev, "failed to clear interrupt\n"); + + return IRQ_HANDLED; + } + + static void ccg_pm_workaround_work(struct work_struct *pm_work) + { +- struct ucsi_ccg *uc = container_of(pm_work, struct ucsi_ccg, pm_work); +- +- ucsi_notify(uc->ucsi); ++ ccg_irq_handler(0, container_of(pm_work, struct ucsi_ccg, pm_work)); + } + + static int get_fw_info(struct ucsi_ccg *uc) +@@ -1027,10 +1030,10 @@ static int ccg_restart(struct ucsi_ccg *uc) + return status; + } + +- uc->ucsi = ucsi_register_ppm(dev, &uc->ppm); +- if (IS_ERR(uc->ucsi)) { +- dev_err(uc->dev, "ucsi_register_ppm failed\n"); +- return PTR_ERR(uc->ucsi); ++ status = ucsi_register(uc->ucsi); ++ if (status) { ++ dev_err(uc->dev, "failed to register the interface\n"); ++ return status; + } + + return 0; +@@ -1047,7 +1050,7 @@ static void ccg_update_firmware(struct work_struct *work) + return; + + if (flash_mode != FLASH_NOT_NEEDED) { +- ucsi_unregister_ppm(uc->ucsi); ++ ucsi_unregister(uc->ucsi); + free_irq(uc->irq, uc); + + ccg_fw_update(uc, flash_mode); +@@ -1091,21 +1094,15 @@ static int ucsi_ccg_probe(struct i2c_client *client, + struct device *dev = &client->dev; + struct ucsi_ccg *uc; + int status; +- u16 rab; + + uc = devm_kzalloc(dev, sizeof(*uc), GFP_KERNEL); + if (!uc) + return -ENOMEM; + +- uc->ppm.data = devm_kzalloc(dev, sizeof(struct ucsi_data), GFP_KERNEL); +- if (!uc->ppm.data) +- return -ENOMEM; +- +- uc->ppm.cmd = ucsi_ccg_cmd; +- uc->ppm.sync = ucsi_ccg_sync; + uc->dev = dev; + uc->client = client; + mutex_init(&uc->lock); ++ init_completion(&uc->complete); + INIT_WORK(&uc->work, ccg_update_firmware); + INIT_WORK(&uc->pm_work, ccg_pm_workaround_work); + +@@ -1133,30 +1130,25 @@ static int ucsi_ccg_probe(struct i2c_client *client, + if (uc->info.mode & CCG_DEVINFO_PDPORTS_MASK) + uc->port_num++; + ++ uc->ucsi = ucsi_create(dev, &ucsi_ccg_ops); ++ if (IS_ERR(uc->ucsi)) ++ return PTR_ERR(uc->ucsi); ++ ++ ucsi_set_drvdata(uc->ucsi, uc); ++ + status = request_threaded_irq(client->irq, NULL, ccg_irq_handler, + IRQF_ONESHOT | IRQF_TRIGGER_HIGH, + dev_name(dev), uc); + if (status < 0) { + dev_err(uc->dev, "request_threaded_irq failed - %d\n", status); +- return status; ++ goto out_ucsi_destroy; + } + + uc->irq = client->irq; + +- uc->ucsi = ucsi_register_ppm(dev, &uc->ppm); +- if (IS_ERR(uc->ucsi)) { +- dev_err(uc->dev, "ucsi_register_ppm failed\n"); +- return PTR_ERR(uc->ucsi); +- } +- +- rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, version)); +- status = ccg_read(uc, rab, (u8 *)(uc->ppm.data) + +- offsetof(struct ucsi_data, version), +- sizeof(uc->ppm.data->version)); +- if (status < 0) { +- ucsi_unregister_ppm(uc->ucsi); +- return status; +- } ++ status = ucsi_register(uc->ucsi); ++ if (status) ++ goto out_free_irq; + + i2c_set_clientdata(client, uc); + +@@ -1167,6 +1159,13 @@ static int ucsi_ccg_probe(struct i2c_client *client, + pm_runtime_idle(uc->dev); + + return 0; ++ ++out_free_irq: ++ free_irq(uc->irq, uc); ++out_ucsi_destroy: ++ ucsi_destroy(uc->ucsi); ++ ++ return status; + } + + static int ucsi_ccg_remove(struct i2c_client *client) +@@ -1175,8 +1174,9 @@ static int ucsi_ccg_remove(struct i2c_client *client) + + cancel_work_sync(&uc->pm_work); + cancel_work_sync(&uc->work); +- ucsi_unregister_ppm(uc->ucsi); + pm_runtime_disable(uc->dev); ++ ucsi_unregister(uc->ucsi); ++ ucsi_destroy(uc->ucsi); + free_irq(uc->irq, uc); + + return 0; +-- +2.17.1 + diff --git a/patches/0015-ASoC-Intel-Relocate-irq-thread-header-to-sst_ops.audio b/patches/0015-ASoC-Intel-Relocate-irq-thread-header-to-sst_ops.audio new file mode 100644 index 0000000000..bf08f93b9f --- /dev/null +++ b/patches/0015-ASoC-Intel-Relocate-irq-thread-header-to-sst_ops.audio @@ -0,0 +1,283 @@ +From 6187a23ce6e8152355c0270c26cfd513342c2507 Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Mon, 12 Aug 2019 10:51:42 +0200 +Subject: [PATCH 015/193] ASoC: Intel: Relocate irq thread header to sst_ops + +thread_fn is the only DSP core op which is located outside of sst_ops. +Add it to the rest. Change enables further cleanups by following +patches, mainly removal of sst_dsp_device struct. + +Change-Id: Icb403bbca916541d0766f8198ef2027894c2fff6 +Signed-off-by: Cezary Rojewski +--- + sound/soc/intel/baytrail/sst-baytrail-dsp.c | 1 + + sound/soc/intel/baytrail/sst-baytrail-ipc.c | 3 +-- + sound/soc/intel/baytrail/sst-baytrail-ipc.h | 2 ++ + sound/soc/intel/common/sst-dsp-priv.h | 1 + + sound/soc/intel/common/sst-dsp.h | 1 - + sound/soc/intel/common/sst-firmware.c | 2 +- + sound/soc/intel/haswell/sst-haswell-dsp.c | 1 + + sound/soc/intel/haswell/sst-haswell-ipc.c | 3 +-- + sound/soc/intel/haswell/sst-haswell-ipc.h | 2 ++ + sound/soc/intel/skylake/bxt-sst.c | 2 +- + sound/soc/intel/skylake/cnl-sst.c | 28 ++++++++++----------- + sound/soc/intel/skylake/skl-sst-dsp.c | 3 +-- + sound/soc/intel/skylake/skl-sst.c | 2 +- + 13 files changed, 27 insertions(+), 24 deletions(-) + +diff --git a/sound/soc/intel/baytrail/sst-baytrail-dsp.c b/sound/soc/intel/baytrail/sst-baytrail-dsp.c +index 4116ba66a4c2..4869e18116eb 100644 +--- a/sound/soc/intel/baytrail/sst-baytrail-dsp.c ++++ b/sound/soc/intel/baytrail/sst-baytrail-dsp.c +@@ -352,6 +352,7 @@ struct sst_ops sst_byt_ops = { + .ram_read = sst_memcpy_fromio_32, + .ram_write = sst_memcpy_toio_32, + .irq_handler = sst_byt_irq, ++ .thread_fn = sst_byt_irq_thread, + .init = sst_byt_init, + .free = sst_byt_free, + .parse_fw = sst_byt_parse_fw_image, +diff --git a/sound/soc/intel/baytrail/sst-baytrail-ipc.c b/sound/soc/intel/baytrail/sst-baytrail-ipc.c +index 74274bd38f7a..23d65ad38e19 100644 +--- a/sound/soc/intel/baytrail/sst-baytrail-ipc.c ++++ b/sound/soc/intel/baytrail/sst-baytrail-ipc.c +@@ -293,7 +293,7 @@ static int sst_byt_process_notification(struct sst_byt *byt, + return 1; + } + +-static irqreturn_t sst_byt_irq_thread(int irq, void *context) ++irqreturn_t sst_byt_irq_thread(int irq, void *context) + { + struct sst_dsp *sst = (struct sst_dsp *) context; + struct sst_byt *byt = sst_dsp_get_thread_context(sst); +@@ -557,7 +557,6 @@ struct sst_dsp *sst_byt_get_dsp(struct sst_byt *byt) + } + + static struct sst_dsp_device byt_dev = { +- .thread = sst_byt_irq_thread, + .ops = &sst_byt_ops, + }; + +diff --git a/sound/soc/intel/baytrail/sst-baytrail-ipc.h b/sound/soc/intel/baytrail/sst-baytrail-ipc.h +index 755098509327..9aba6b83ee5f 100644 +--- a/sound/soc/intel/baytrail/sst-baytrail-ipc.h ++++ b/sound/soc/intel/baytrail/sst-baytrail-ipc.h +@@ -7,6 +7,7 @@ + #ifndef __SST_BYT_IPC_H + #define __SST_BYT_IPC_H + ++#include + #include + + struct sst_byt; +@@ -61,5 +62,6 @@ struct sst_dsp *sst_byt_get_dsp(struct sst_byt *byt); + int sst_byt_dsp_suspend_late(struct device *dev, struct sst_pdata *pdata); + int sst_byt_dsp_boot(struct device *dev, struct sst_pdata *pdata); + int sst_byt_dsp_wait_for_ready(struct device *dev, struct sst_pdata *pdata); ++irqreturn_t sst_byt_irq_thread(int irq, void *context); + + #endif +diff --git a/sound/soc/intel/common/sst-dsp-priv.h b/sound/soc/intel/common/sst-dsp-priv.h +index 3d8765ce3e0d..7ec477108948 100644 +--- a/sound/soc/intel/common/sst-dsp-priv.h ++++ b/sound/soc/intel/common/sst-dsp-priv.h +@@ -49,6 +49,7 @@ struct sst_ops { + + /* IRQ handlers */ + irqreturn_t (*irq_handler)(int irq, void *context); ++ irqreturn_t (*thread_fn)(int irq, void *context); + + /* SST init and free */ + int (*init)(struct sst_dsp *sst, struct sst_pdata *pdata); +diff --git a/sound/soc/intel/common/sst-dsp.h b/sound/soc/intel/common/sst-dsp.h +index 604a80c5859b..05fa1ca72f46 100644 +--- a/sound/soc/intel/common/sst-dsp.h ++++ b/sound/soc/intel/common/sst-dsp.h +@@ -175,7 +175,6 @@ struct sst_dsp; + struct sst_dsp_device { + /* Mandatory fields */ + struct sst_ops *ops; +- irqreturn_t (*thread)(int irq, void *context); + void *thread_context; + }; + +diff --git a/sound/soc/intel/common/sst-firmware.c b/sound/soc/intel/common/sst-firmware.c +index d27947aeb079..c18236ca77f4 100644 +--- a/sound/soc/intel/common/sst-firmware.c ++++ b/sound/soc/intel/common/sst-firmware.c +@@ -1237,7 +1237,7 @@ struct sst_dsp *sst_dsp_new(struct device *dev, + + /* Register the ISR */ + err = request_threaded_irq(sst->irq, sst->ops->irq_handler, +- sst_dev->thread, IRQF_SHARED, "AudioDSP", sst); ++ sst->ops->thread_fn, IRQF_SHARED, "AudioDSP", sst); + if (err) + goto irq_err; + +diff --git a/sound/soc/intel/haswell/sst-haswell-dsp.c b/sound/soc/intel/haswell/sst-haswell-dsp.c +index 88c3f63bded9..c099dec7d61f 100644 +--- a/sound/soc/intel/haswell/sst-haswell-dsp.c ++++ b/sound/soc/intel/haswell/sst-haswell-dsp.c +@@ -699,6 +699,7 @@ struct sst_ops haswell_ops = { + .ram_read = sst_memcpy_fromio_32, + .ram_write = sst_memcpy_toio_32, + .irq_handler = hsw_irq, ++ .thread_fn = hsw_irq_thread, + .init = hsw_init, + .free = hsw_free, + .parse_fw = hsw_parse_fw_image, +diff --git a/sound/soc/intel/haswell/sst-haswell-ipc.c b/sound/soc/intel/haswell/sst-haswell-ipc.c +index 0ff89ea96ccf..12a799828240 100644 +--- a/sound/soc/intel/haswell/sst-haswell-ipc.c ++++ b/sound/soc/intel/haswell/sst-haswell-ipc.c +@@ -757,7 +757,7 @@ static int hsw_process_notification(struct sst_hsw *hsw) + return handled; + } + +-static irqreturn_t hsw_irq_thread(int irq, void *context) ++irqreturn_t hsw_irq_thread(int irq, void *context) + { + struct sst_dsp *sst = (struct sst_dsp *) context; + struct sst_hsw *hsw = sst_dsp_get_thread_context(sst); +@@ -2046,7 +2046,6 @@ int sst_hsw_module_set_param(struct sst_hsw *hsw, + } + + static struct sst_dsp_device hsw_dev = { +- .thread = hsw_irq_thread, + .ops = &haswell_ops, + }; + +diff --git a/sound/soc/intel/haswell/sst-haswell-ipc.h b/sound/soc/intel/haswell/sst-haswell-ipc.h +index fdc70c77e688..d4a936a75f7d 100644 +--- a/sound/soc/intel/haswell/sst-haswell-ipc.h ++++ b/sound/soc/intel/haswell/sst-haswell-ipc.h +@@ -8,6 +8,7 @@ + #ifndef __SST_HASWELL_IPC_H + #define __SST_HASWELL_IPC_H + ++#include + #include + #include + #include +@@ -396,6 +397,7 @@ struct sst_hsw_ipc_fw_version; + struct sst_hsw *sst_hsw_new(struct device *dev, const u8 *fw, size_t fw_length, + u32 fw_offset); + void sst_hsw_free(struct sst_hsw *hsw); ++irqreturn_t hsw_irq_thread(int irq, void *context); + int sst_hsw_fw_get_version(struct sst_hsw *hsw, + struct sst_hsw_ipc_fw_version *version); + u32 create_channel_map(enum sst_hsw_channel_config config); +diff --git a/sound/soc/intel/skylake/bxt-sst.c b/sound/soc/intel/skylake/bxt-sst.c +index 4d5f34c1fb1b..129837fc789b 100644 +--- a/sound/soc/intel/skylake/bxt-sst.c ++++ b/sound/soc/intel/skylake/bxt-sst.c +@@ -535,6 +535,7 @@ static const struct skl_dsp_fw_ops bxt_fw_ops = { + + static struct sst_ops skl_ops = { + .irq_handler = skl_dsp_sst_interrupt, ++ .thread_fn = skl_dsp_irq_thread_handler, + .write = sst_shim32_write, + .read = sst_shim32_read, + .ram_read = sst_memcpy_fromio_32, +@@ -543,7 +544,6 @@ static struct sst_ops skl_ops = { + }; + + static struct sst_dsp_device skl_dev = { +- .thread = skl_dsp_irq_thread_handler, + .ops = &skl_ops, + }; + +diff --git a/sound/soc/intel/skylake/cnl-sst.c b/sound/soc/intel/skylake/cnl-sst.c +index 0d3847d1a786..b306b7b521b2 100644 +--- a/sound/soc/intel/skylake/cnl-sst.c ++++ b/sound/soc/intel/skylake/cnl-sst.c +@@ -285,15 +285,6 @@ static const struct skl_dsp_fw_ops cnl_fw_ops = { + .load_library = bxt_load_library, + }; + +-static struct sst_ops cnl_ops = { +- .irq_handler = cnl_dsp_sst_interrupt, +- .write = sst_shim32_write, +- .read = sst_shim32_read, +- .ram_read = sst_memcpy_fromio_32, +- .ram_write = sst_memcpy_toio_32, +- .free = cnl_dsp_free, +-}; +- + #define CNL_IPC_GLB_NOTIFY_RSP_SHIFT 29 + #define CNL_IPC_GLB_NOTIFY_RSP_MASK 0x1 + #define CNL_IPC_GLB_NOTIFY_RSP_TYPE(x) (((x) >> CNL_IPC_GLB_NOTIFY_RSP_SHIFT) \ +@@ -369,11 +360,6 @@ static irqreturn_t cnl_dsp_irq_thread_handler(int irq, void *context) + return IRQ_HANDLED; + } + +-static struct sst_dsp_device cnl_dev = { +- .thread = cnl_dsp_irq_thread_handler, +- .ops = &cnl_ops, +-}; +- + static void cnl_ipc_tx_msg(struct sst_generic_ipc *ipc, struct ipc_message *msg) + { + struct skl_ipc_header *header = (struct skl_ipc_header *)(&msg->tx.header); +@@ -422,6 +408,20 @@ static int cnl_ipc_init(struct device *dev, struct skl_dev *cnl) + return 0; + } + ++static struct sst_ops cnl_ops = { ++ .irq_handler = cnl_dsp_sst_interrupt, ++ .thread_fn = cnl_dsp_irq_thread_handler, ++ .write = sst_shim32_write, ++ .read = sst_shim32_read, ++ .ram_read = sst_memcpy_fromio_32, ++ .ram_write = sst_memcpy_toio_32, ++ .free = cnl_dsp_free, ++}; ++ ++static struct sst_dsp_device cnl_dev = { ++ .ops = &cnl_ops, ++}; ++ + int cnl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, + const char *fw_name, struct skl_dsp_loader_ops dsp_ops, + struct skl_dev **dsp) +diff --git a/sound/soc/intel/skylake/skl-sst-dsp.c b/sound/soc/intel/skylake/skl-sst-dsp.c +index e0807db225f4..8d98089e3177 100644 +--- a/sound/soc/intel/skylake/skl-sst-dsp.c ++++ b/sound/soc/intel/skylake/skl-sst-dsp.c +@@ -448,12 +448,11 @@ struct sst_dsp *skl_dsp_ctx_init(struct device *dev, + + int skl_dsp_acquire_irq(struct sst_dsp *sst) + { +- struct sst_dsp_device *sst_dev = sst->sst_dev; + int ret; + + /* Register the ISR */ + ret = request_threaded_irq(sst->irq, sst->ops->irq_handler, +- sst_dev->thread, IRQF_SHARED, "AudioDSP", sst); ++ sst->ops->thread_fn, IRQF_SHARED, "AudioDSP", sst); + if (ret) + dev_err(sst->dev, "unable to grab threaded IRQ %d, disabling device\n", + sst->irq); +diff --git a/sound/soc/intel/skylake/skl-sst.c b/sound/soc/intel/skylake/skl-sst.c +index 4d6242f9bf52..dd8aac3f0230 100644 +--- a/sound/soc/intel/skylake/skl-sst.c ++++ b/sound/soc/intel/skylake/skl-sst.c +@@ -504,6 +504,7 @@ static const struct skl_dsp_fw_ops skl_fw_ops = { + + static struct sst_ops skl_ops = { + .irq_handler = skl_dsp_sst_interrupt, ++ .thread_fn = skl_dsp_irq_thread_handler, + .write = sst_shim32_write, + .read = sst_shim32_read, + .ram_read = sst_memcpy_fromio_32, +@@ -512,7 +513,6 @@ static struct sst_ops skl_ops = { + }; + + static struct sst_dsp_device skl_dev = { +- .thread = skl_dsp_irq_thread_handler, + .ops = &skl_ops, + }; + +-- +2.17.1 + diff --git a/patches/0015-VBS-K-virtqueue-initialization-API.acrn b/patches/0015-VBS-K-virtqueue-initialization-API.acrn new file mode 100644 index 0000000000..d9abe2c2ae --- /dev/null +++ b/patches/0015-VBS-K-virtqueue-initialization-API.acrn @@ -0,0 +1,504 @@ +From f7fa816028d45ef944d6c55daafc25b1e9e79b6b Mon Sep 17 00:00:00 2001 +From: Hao Li +Date: Fri, 31 Aug 2018 10:58:56 +0800 +Subject: [PATCH 015/150] VBS-K: virtqueue initialization API. + +This patch added the following to the VBS-K framework: + - virtqueue data structures shared between VBS-K and its + counterpart in userspace, which is VBS-U; + - virtqueue initialization API; + +Change-Id: Ib928ea94cb4f33cf30abd17921089afc14518365 +Tracked-On:218445 +Signed-off-by: Hao Li +Reviewed-on: +Reviewed-by: Chi, Mingqiang +Reviewed-by: Dong, Eddie +Tested-by: Dong, Eddie +--- + drivers/vbs/Makefile | 1 + + drivers/vbs/vbs.c | 75 ++++++++++++++++++ + drivers/vbs/vq.c | 125 ++++++++++++++++++++++++++++++ + include/linux/vbs/vbs.h | 57 ++++++++++++++ + include/linux/vbs/vbs_common_if.h | 18 +++++ + include/linux/vbs/vq.h | 99 +++++++++++++++++++++++ + 6 files changed, 375 insertions(+) + create mode 100644 drivers/vbs/vq.c + create mode 100644 include/linux/vbs/vq.h + +diff --git a/drivers/vbs/Makefile b/drivers/vbs/Makefile +index b52b65b6bd13..cbd5076e2313 100644 +--- a/drivers/vbs/Makefile ++++ b/drivers/vbs/Makefile +@@ -1,3 +1,4 @@ + ccflags-$(CONFIG_VBS_DEBUG) := -DDEBUG + + obj-$(CONFIG_VBS) += vbs.o ++obj-$(CONFIG_VBS) += vq.o +diff --git a/drivers/vbs/vbs.c b/drivers/vbs/vbs.c +index 591d43dbe536..1e7a9645a353 100644 +--- a/drivers/vbs/vbs.c ++++ b/drivers/vbs/vbs.c +@@ -65,6 +65,66 @@ + #include + #include + #include ++#include ++ ++static long virtio_vqs_info_set(struct virtio_dev_info *dev, ++ struct vbs_vqs_info __user *i) ++{ ++ struct vbs_vqs_info info; ++ struct virtio_vq_info *vq; ++ int j; ++ ++ vq = dev->vqs; ++ ++ if (copy_from_user(&info, i, sizeof(struct vbs_vqs_info))) ++ return -EFAULT; ++ ++ /* setup struct virtio_vq_info based on info in struct vbs_vq_info */ ++ if (dev->nvq && dev->nvq != info.nvq) { ++ pr_err("Oops! dev's nvq != vqs's nvq. Not the same device?\n"); ++ return -EFAULT; ++ } ++ ++ for (j = 0; j < info.nvq; j++) { ++ vq->qsize = info.vqs[j].qsize; ++ vq->pfn = info.vqs[j].pfn; ++ vq->msix_idx = info.vqs[j].msix_idx; ++ vq->msix_addr = info.vqs[j].msix_addr; ++ vq->msix_data = info.vqs[j].msix_data; ++ ++ pr_debug("msix id %x, addr %llx, data %x\n", vq->msix_idx, ++ vq->msix_addr, vq->msix_data); ++ ++ virtio_vq_init(vq, vq->pfn); ++ ++ vq++; ++ } ++ ++ return 0; ++} ++ ++/* invoked by VBS-K device's ioctl routine */ ++long virtio_vqs_ioctl(struct virtio_dev_info *dev, unsigned int ioctl, ++ void __user *argp) ++{ ++ long ret; ++ ++ /* ++ * Currently we don't conduct ownership checking, ++ * but assuming caller would have device mutex. ++ */ ++ ++ switch (ioctl) { ++ case VBS_SET_VQ: ++ ret = virtio_vqs_info_set(dev, argp); ++ break; ++ default: ++ ret = -ENOIOCTLCMD; ++ break; ++ } ++ return ret; ++} ++EXPORT_SYMBOL_GPL(virtio_vqs_ioctl); + + static long virtio_dev_info_set(struct virtio_dev_info *dev, + struct vbs_dev_info __user *i) +@@ -77,6 +137,7 @@ static long virtio_dev_info_set(struct virtio_dev_info *dev, + /* setup struct virtio_dev_info based on info in vbs_dev_info */ + strncpy(dev->name, info.name, VBS_NAME_LEN); + dev->_ctx.vmid = info.vmid; ++ dev->nvq = info.nvq; + dev->negotiated_features = info.negotiated_features; + dev->io_range_start = info.pio_range_start; + dev->io_range_len = info.pio_range_len; +@@ -85,6 +146,7 @@ static long virtio_dev_info_set(struct virtio_dev_info *dev, + return 0; + } + ++/* invoked by VBS-K device's ioctl routine */ + long virtio_dev_ioctl(struct virtio_dev_info *dev, unsigned int ioctl, + void __user *argp) + { +@@ -107,6 +169,19 @@ long virtio_dev_ioctl(struct virtio_dev_info *dev, unsigned int ioctl, + } + EXPORT_SYMBOL_GPL(virtio_dev_ioctl); + ++/* called in VBS-K device's .open() */ ++long virtio_dev_init(struct virtio_dev_info *dev, ++ struct virtio_vq_info *vqs, int nvq) ++{ ++ int i; ++ ++ for (i = 0; i < nvq; i++) ++ virtio_vq_reset(&vqs[i]); ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(virtio_dev_init); ++ + static int __init vbs_init(void) + { + return 0; +diff --git a/drivers/vbs/vq.c b/drivers/vbs/vq.c +new file mode 100644 +index 000000000000..95a6757a1c85 +--- /dev/null ++++ b/drivers/vbs/vq.c +@@ -0,0 +1,125 @@ ++/* ++ * ACRN Project ++ * Virtio Backend Service (VBS) for ACRN hypervisor ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright (c) 2017 Intel Corporation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: Hao Li ++ * ++ * BSD LICENSE ++ * ++ * Copyright (c) 2017 Intel Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * ++ * Chris Torek ++ * Hao Li ++ * Created Virtqueue APIs for ACRN VBS framework: ++ * - VBS-K is a kernel-level virtio framework that can be used for ++ * virtio backend driver development for ACRN hypervisor. ++ * - Virtqueue APIs abstract away the details of the internal data ++ * structures of virtqueue, so that callers could easily access ++ * the data from guest through virtqueues. ++ */ ++ ++#include ++#include ++#include ++#include ++ ++/* helper function for remote memory map */ ++void * paddr_guest2host(struct ctx *ctx, uintptr_t gaddr, size_t len) ++{ ++ return map_guest_phys(ctx->vmid, gaddr, len); ++} ++ ++/* ++ * Initialize the currently-selected virtqueue. ++ * The guest just gave us a page frame number, from which we can ++ * calculate the addresses of the queue. ++ */ ++void virtio_vq_init(struct virtio_vq_info *vq, uint32_t pfn) ++{ ++ uint64_t phys; ++ size_t size; ++ char *base; ++ struct ctx *ctx; ++ ++ ctx = &vq->dev->_ctx; ++ ++ phys = (uint64_t)pfn << VRING_PAGE_BITS; ++ size = virtio_vq_ring_size(vq->qsize); ++ base = paddr_guest2host(ctx, phys, size); ++ ++ /* First page(s) are descriptors... */ ++ vq->desc = (struct virtio_desc *)base; ++ base += vq->qsize * sizeof(struct virtio_desc); ++ ++ /* ... immediately followed by "avail" ring (entirely uint16_t's) */ ++ vq->avail = (struct vring_avail *)base; ++ base += (2 + vq->qsize + 1) * sizeof(uint16_t); ++ ++ /* Then it's rounded up to the next page... */ ++ base = (char *)roundup2((uintptr_t)base, VRING_ALIGN); ++ ++ /* ... and the last page(s) are the used ring. */ ++ vq->used = (struct vring_used *)base; ++ ++ /* Mark queue as allocated, and start at 0 when we use it. */ ++ vq->flags = VQ_ALLOC; ++ vq->last_avail = 0; ++ vq->save_used = 0; ++} ++ ++/* reset one virtqueue, make it invalid */ ++void virtio_vq_reset(struct virtio_vq_info *vq) ++{ ++ if (!vq) { ++ pr_info("%s: vq is NULL!\n", __func__); ++ return; ++ } ++ ++ vq->pfn = 0; ++ vq->msix_idx = VIRTIO_MSI_NO_VECTOR; ++ vq->flags = 0; ++ vq->last_avail = 0; ++ vq->save_used = 0; ++} +diff --git a/include/linux/vbs/vbs.h b/include/linux/vbs/vbs.h +index 7b876782fe41..715c49156a1a 100644 +--- a/include/linux/vbs/vbs.h ++++ b/include/linux/vbs/vbs.h +@@ -80,19 +80,76 @@ struct ctx { + int vmid; + }; + ++struct virtio_desc { /* AKA vring_desc */ ++ uint64_t addr; /* guest physical address */ ++ uint32_t len; /* length of scatter/gather seg */ ++ uint16_t flags; /* desc flags */ ++ uint16_t next; /* next desc if F_NEXT */ ++} __attribute__((packed)); ++ ++struct virtio_used { /* AKA vring_used_elem */ ++ uint32_t idx; /* head of used descriptor chain */ ++ uint32_t len; /* length written-to */ ++} __attribute__((packed)); ++ ++struct vring_avail { ++ uint16_t flags; /* vring_avail flags */ ++ uint16_t idx; /* counts to 65535, then cycles */ ++ uint16_t ring[]; /* size N, reported in QNUM value */ ++} __attribute__((packed)); ++ ++struct vring_used { ++ uint16_t flags; /* vring_used flags */ ++ uint16_t idx; /* counts to 65535, then cycles */ ++ struct virtio_used ring[]; /* size N */ ++} __attribute__((packed)); ++ ++/* struct used to maintain virtqueue info from userspace VBS */ ++struct virtio_vq_info { ++ /* virtqueue info from VBS-U */ ++ uint16_t qsize; /* size of this queue (a power of 2) */ ++ uint32_t pfn; /* PFN of virt queue (not shifted!) */ ++ uint16_t msix_idx; /* MSI-X index/VIRTIO_MSI_NO_VECTOR */ ++ uint64_t msix_addr; /* MSI-X address specified by index */ ++ uint32_t msix_data; /* MSI-X data specified by index */ ++ ++ /* members created in kernel space VBS */ ++ int (*vq_notify)(int); /* vq-wide notification */ ++ struct virtio_dev_info *dev; /* backpointer to virtio_dev_info */ ++ uint16_t num; /* we're the num'th virtqueue */ ++ uint16_t flags; /* virtqueue flags */ ++ uint16_t last_avail; /* a recent value of vq_avail->va_idx */ ++ uint16_t save_used; /* saved vq_used->vu_idx */ ++ ++ volatile struct virtio_desc *desc; /* descriptor array */ ++ volatile struct vring_avail *avail; /* the "avail" ring */ ++ volatile struct vring_used *used; /* the "used" ring */ ++}; ++ + /* struct used to maintain virtio device info from userspace VBS */ + struct virtio_dev_info { + /* dev info from VBS */ + char name[VBS_NAME_LEN]; /* VBS device name */ + struct ctx _ctx; /* device context */ ++ int nvq; /* number of virtqueues */ + uint32_t negotiated_features; /* features after guest loads driver */ + uint64_t io_range_start; /* IO range start of VBS device */ + uint64_t io_range_len; /* IO range len of VBS device */ + enum IORangeType io_range_type; /* IO range type, PIO or MMIO */ ++ ++ /* members created in kernel space VBS */ ++ void (*dev_notify)(void *, struct virtio_vq_info *); ++ /* device-wide notification */ ++ struct virtio_vq_info *vqs; /* virtqueue(s) */ ++ int curq; /* current virtqueue index */ + }; + + /* VBS Runtime Control APIs */ ++long virtio_dev_init(struct virtio_dev_info *dev, struct virtio_vq_info *vqs, ++ int nvq); + long virtio_dev_ioctl(struct virtio_dev_info *dev, unsigned int ioctl, + void __user *argp); ++long virtio_vqs_ioctl(struct virtio_dev_info *dev, unsigned int ioctl, ++ void __user *argp); + + #endif +diff --git a/include/linux/vbs/vbs_common_if.h b/include/linux/vbs/vbs_common_if.h +index 13c1f4580cae..1736174a2651 100644 +--- a/include/linux/vbs/vbs_common_if.h ++++ b/include/linux/vbs/vbs_common_if.h +@@ -65,11 +65,28 @@ + #include + #endif + ++#define VBS_MAX_VQ_CNT 10 + #define VBS_NAME_LEN 32 ++#define VIRTIO_MSI_NO_VECTOR 0xFFFF ++ ++struct vbs_vq_info { ++ uint16_t qsize; /* size of this virtqueue (a power of 2) */ ++ uint32_t pfn; /* PFN of virtqueue (not shifted!) */ ++ uint16_t msix_idx; /* MSI-X index, or VIRTIO_MSI_NO_VECTOR */ ++ uint64_t msix_addr; /* MSI-X address specified by index */ ++ uint32_t msix_data; /* MSI-X data specified by index */ ++}; ++ ++struct vbs_vqs_info { ++ uint32_t nvq; /* number of virtqueues */ ++ struct vbs_vq_info vqs[VBS_MAX_VQ_CNT]; ++ /* array of struct vbs_vq_info */ ++}; + + struct vbs_dev_info { + char name[VBS_NAME_LEN];/* VBS name */ + int vmid; /* id of VM this device belongs to */ ++ int nvq; /* number of virtqueues */ + uint32_t negotiated_features; + /* features after VIRTIO_CONFIG_S_DRIVER_OK */ + uint64_t pio_range_start; +@@ -80,5 +97,6 @@ struct vbs_dev_info { + #define VBS_IOCTL 0xAF + + #define VBS_SET_DEV _IOW(VBS_IOCTL, 0x00, struct vbs_dev_info) ++#define VBS_SET_VQ _IOW(VBS_IOCTL, 0x01, struct vbs_vqs_info) + + #endif +diff --git a/include/linux/vbs/vq.h b/include/linux/vbs/vq.h +new file mode 100644 +index 000000000000..55ff810fa094 +--- /dev/null ++++ b/include/linux/vbs/vq.h +@@ -0,0 +1,99 @@ ++/* ++ * ACRN Project ++ * Virtio Backend Service (VBS) for ACRN hypervisor ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright (c) 2017 Intel Corporation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: Hao Li ++ * ++ * BSD LICENSE ++ * ++ * Copyright (c) 2017 Intel Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * ++ * Chris Torek ++ * Hao Li ++ * Define virtqueue data structures and APIs for VBS framework. ++ * - VBS-K is a kernel-level virtio framework that can be used for ++ * virtio backend driver development for ACRN hypervisor. ++ * - VBS-K should be working with VBS-U (Virtio Backend Service in ++ * User) together, in order to connect with virtio frontend driver. ++ */ ++ ++#ifndef _VQ_H_ ++#define _VQ_H_ ++ ++#include ++#include ++ ++/* virtqueue alignment */ ++#define VRING_ALIGN 4096 ++#define roundup2(x, y) (((x)+((y)-1))&(~((y)-1))) ++ ++/* PFN register shift amount */ ++#define VRING_PAGE_BITS 12 ++ ++/* virtqueue flags */ ++#define VQ_ALLOC 0x01 ++#define VQ_BROKED 0x02 ++ ++/* get virtqueue size according to virtio specification */ ++static inline size_t virtio_vq_ring_size(unsigned int qsz) ++{ ++ size_t size; ++ ++ /* constant 3 below = va_flags, va_idx, va_used_event */ ++ size = sizeof(struct virtio_desc) * qsz + sizeof(uint16_t) * (3 + qsz); ++ size = roundup2(size, VRING_ALIGN); ++ ++ /* constant 3 below = vu_flags, vu_idx, vu_avail_event */ ++ size += sizeof(uint16_t) * 3 + sizeof(struct virtio_used) * qsz; ++ size = roundup2(size, VRING_ALIGN); ++ ++ return size; ++} ++ ++/* virtqueue initialization APIs */ ++void virtio_vq_init(struct virtio_vq_info *vq, uint32_t pfn); ++void virtio_vq_reset(struct virtio_vq_info *vq); ++ ++#endif +-- +2.17.1 + diff --git a/patches/0015-drm-i915-Add-subslice-stride-runtime-parameter.drm b/patches/0015-drm-i915-Add-subslice-stride-runtime-parameter.drm new file mode 100644 index 0000000000..fb2a7d75d5 --- /dev/null +++ b/patches/0015-drm-i915-Add-subslice-stride-runtime-parameter.drm @@ -0,0 +1,92 @@ +From 7bf3c886bc781f2943cdc2015c117eb30552ddb5 Mon Sep 17 00:00:00 2001 +From: Stuart Summers +Date: Fri, 23 Aug 2019 09:02:59 -0700 +Subject: [PATCH 015/690] drm/i915: Add subslice stride runtime parameter + +Add a new parameter, ss_stride, to the runtime info +structure. This is used to mirror the userspace concept +of subslice stride, which is a range of subslices per slice. + +This patch simply adds the definition and updates usage +in the QUERY_TOPOLOGY_INFO handler. + +v2: Add GEM_BUG_ON to make sure ss_stride is valid + +Signed-off-by: Stuart Summers +Reviewed-by: Chris Wilson +Signed-off-by: Chris Wilson +Link: https://patchwork.freedesktop.org/patch/msgid/20190823160307.180813-4-stuart.summers@intel.com +--- + drivers/gpu/drm/i915/gt/intel_sseu.c | 3 +++ + drivers/gpu/drm/i915/gt/intel_sseu.h | 3 +++ + drivers/gpu/drm/i915/i915_query.c | 5 ++--- + 3 files changed, 8 insertions(+), 3 deletions(-) + +diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c +index 6727079eb9b6..edf39ae132c3 100644 +--- a/drivers/gpu/drm/i915/gt/intel_sseu.c ++++ b/drivers/gpu/drm/i915/gt/intel_sseu.c +@@ -14,6 +14,9 @@ void intel_sseu_set_info(struct sseu_dev_info *sseu, u8 max_slices, + sseu->max_slices = max_slices; + sseu->max_subslices = max_subslices; + sseu->max_eus_per_subslice = max_eus_per_subslice; ++ ++ sseu->ss_stride = GEN_SSEU_STRIDE(sseu->max_subslices); ++ GEM_BUG_ON(sseu->ss_stride > GEN_MAX_SUBSLICE_STRIDE); + } + + unsigned int +diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h +index 64e47dad07be..8b8b562ff773 100644 +--- a/drivers/gpu/drm/i915/gt/intel_sseu.h ++++ b/drivers/gpu/drm/i915/gt/intel_sseu.h +@@ -15,6 +15,7 @@ struct drm_i915_private; + #define GEN_MAX_SLICES (6) /* CNL upper bound */ + #define GEN_MAX_SUBSLICES (8) /* ICL upper bound */ + #define GEN_SSEU_STRIDE(max_entries) DIV_ROUND_UP(max_entries, BITS_PER_BYTE) ++#define GEN_MAX_SUBSLICE_STRIDE GEN_SSEU_STRIDE(GEN_MAX_SUBSLICES) + + struct sseu_dev_info { + u8 slice_mask; +@@ -33,6 +34,8 @@ struct sseu_dev_info { + u8 max_subslices; + u8 max_eus_per_subslice; + ++ u8 ss_stride; ++ + /* We don't have more than 8 eus per subslice at the moment and as we + * store eus enabled using bits, no need to multiply by eus per + * subslice. +diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c +index ad9240a0817a..d8e25dcf5f0b 100644 +--- a/drivers/gpu/drm/i915/i915_query.c ++++ b/drivers/gpu/drm/i915/i915_query.c +@@ -37,7 +37,6 @@ static int query_topology_info(struct drm_i915_private *dev_priv, + const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; + struct drm_i915_query_topology_info topo; + u32 slice_length, subslice_length, eu_length, total_length; +- u8 subslice_stride = GEN_SSEU_STRIDE(sseu->max_subslices); + u8 eu_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); + int ret; + +@@ -50,7 +49,7 @@ static int query_topology_info(struct drm_i915_private *dev_priv, + BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask)); + + slice_length = sizeof(sseu->slice_mask); +- subslice_length = sseu->max_slices * subslice_stride; ++ subslice_length = sseu->max_slices * sseu->ss_stride; + eu_length = sseu->max_slices * sseu->max_subslices * eu_stride; + total_length = sizeof(topo) + slice_length + subslice_length + + eu_length; +@@ -69,7 +68,7 @@ static int query_topology_info(struct drm_i915_private *dev_priv, + topo.max_eus_per_subslice = sseu->max_eus_per_subslice; + + topo.subslice_offset = slice_length; +- topo.subslice_stride = subslice_stride; ++ topo.subslice_stride = sseu->ss_stride; + topo.eu_offset = slice_length + subslice_length; + topo.eu_stride = eu_stride; + +-- +2.17.1 + diff --git a/patches/0015-mfd-intel-lpss-Add-Intel-Comet-Lake-PCH-H-PCI-IDs.lpss b/patches/0015-mfd-intel-lpss-Add-Intel-Comet-Lake-PCH-H-PCI-IDs.lpss new file mode 100644 index 0000000000..bf1e7439f4 --- /dev/null +++ b/patches/0015-mfd-intel-lpss-Add-Intel-Comet-Lake-PCH-H-PCI-IDs.lpss @@ -0,0 +1,47 @@ +From 26f9d9d0ed90ed40a82b3ba27c7ed4f73529e848 Mon Sep 17 00:00:00 2001 +From: Andy Shevchenko +Date: Mon, 5 Mar 2018 13:54:17 +0200 +Subject: [PATCH 15/40] mfd: intel-lpss: Add Intel Comet Lake PCH-H PCI IDs + +Intel Comet Lake PCH-H has the same LPSS than Intel Cannon Lake. +Add the new IDs to the list of supported devices. + +Signed-off-by: Andy Shevchenko +--- + drivers/mfd/intel-lpss-pci.c | 13 ++++++++++++- + 1 file changed, 12 insertions(+), 1 deletion(-) + +diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c +index 9355db29d2f9..6a7bfa2ab06d 100644 +--- a/drivers/mfd/intel-lpss-pci.c ++++ b/drivers/mfd/intel-lpss-pci.c +@@ -128,7 +128,7 @@ static const struct intel_lpss_platform_info cnl_i2c_info = { + }; + + static const struct pci_device_id intel_lpss_pci_ids[] = { +- /* CML */ ++ /* CML-LP */ + { PCI_VDEVICE(INTEL, 0x02a8), (kernel_ulong_t)&spt_uart_info }, + { PCI_VDEVICE(INTEL, 0x02a9), (kernel_ulong_t)&spt_uart_info }, + { PCI_VDEVICE(INTEL, 0x02aa), (kernel_ulong_t)&spt_info }, +@@ -141,6 +141,17 @@ static const struct pci_device_id intel_lpss_pci_ids[] = { + { PCI_VDEVICE(INTEL, 0x02ea), (kernel_ulong_t)&cnl_i2c_info }, + { PCI_VDEVICE(INTEL, 0x02eb), (kernel_ulong_t)&cnl_i2c_info }, + { PCI_VDEVICE(INTEL, 0x02fb), (kernel_ulong_t)&spt_info }, ++ /* CML-H */ ++ { PCI_VDEVICE(INTEL, 0x06a8), (kernel_ulong_t)&spt_uart_info }, ++ { PCI_VDEVICE(INTEL, 0x06a9), (kernel_ulong_t)&spt_uart_info }, ++ { PCI_VDEVICE(INTEL, 0x06aa), (kernel_ulong_t)&spt_info }, ++ { PCI_VDEVICE(INTEL, 0x06ab), (kernel_ulong_t)&spt_info }, ++ { PCI_VDEVICE(INTEL, 0x06c7), (kernel_ulong_t)&spt_uart_info }, ++ { PCI_VDEVICE(INTEL, 0x06e8), (kernel_ulong_t)&cnl_i2c_info }, ++ { PCI_VDEVICE(INTEL, 0x06e9), (kernel_ulong_t)&cnl_i2c_info }, ++ { PCI_VDEVICE(INTEL, 0x06ea), (kernel_ulong_t)&cnl_i2c_info }, ++ { PCI_VDEVICE(INTEL, 0x06eb), (kernel_ulong_t)&cnl_i2c_info }, ++ { PCI_VDEVICE(INTEL, 0x06fb), (kernel_ulong_t)&spt_info }, + /* BXT A-Step */ + { PCI_VDEVICE(INTEL, 0x0aac), (kernel_ulong_t)&bxt_i2c_info }, + { PCI_VDEVICE(INTEL, 0x0aae), (kernel_ulong_t)&bxt_i2c_info }, +-- +2.17.1 + diff --git a/patches/0015-net-phy-introduce-mdiobus_get_mdio_device.connectivity b/patches/0015-net-phy-introduce-mdiobus_get_mdio_device.connectivity new file mode 100644 index 0000000000..0e1fd82ad4 --- /dev/null +++ b/patches/0015-net-phy-introduce-mdiobus_get_mdio_device.connectivity @@ -0,0 +1,45 @@ +From cce7587ccb87880563c5f974ad91b6e39947b31b Mon Sep 17 00:00:00 2001 +From: Ong Boon Leong +Date: Thu, 1 Aug 2019 13:08:27 +0800 +Subject: [PATCH 015/108] net: phy: introduce mdiobus_get_mdio_device + +Add the function to get mdio_device based on the mdio addr. + +Signed-off-by: Ong Boon Leong +--- + drivers/net/phy/mdio_bus.c | 6 ++++++ + include/linux/mdio.h | 1 + + 2 files changed, 7 insertions(+) + +diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c +index 7d27c9c6f657..f313aa1905fd 100644 +--- a/drivers/net/phy/mdio_bus.c ++++ b/drivers/net/phy/mdio_bus.c +@@ -126,6 +126,12 @@ struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr) + } + EXPORT_SYMBOL(mdiobus_get_phy); + ++struct mdio_device *mdiobus_get_mdio_device(struct mii_bus *bus, int addr) ++{ ++ return bus->mdio_map[addr]; ++} ++EXPORT_SYMBOL(mdiobus_get_mdio_device); ++ + bool mdiobus_is_registered_device(struct mii_bus *bus, int addr) + { + return bus->mdio_map[addr]; +diff --git a/include/linux/mdio.h b/include/linux/mdio.h +index a7604248777b..f4b14b68df74 100644 +--- a/include/linux/mdio.h ++++ b/include/linux/mdio.h +@@ -326,6 +326,7 @@ int mdiobus_register_device(struct mdio_device *mdiodev); + int mdiobus_unregister_device(struct mdio_device *mdiodev); + bool mdiobus_is_registered_device(struct mii_bus *bus, int addr); + struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr); ++struct mdio_device *mdiobus_get_mdio_device(struct mii_bus *bus, int addr); + + /** + * mdio_module_driver() - Helper macro for registering mdio drivers +-- +2.17.1 + diff --git a/patches/0015-platform-x86-SEP-coverity-issues.sep-socwatch b/patches/0015-platform-x86-SEP-coverity-issues.sep-socwatch new file mode 100644 index 0000000000..76fa5ee30d --- /dev/null +++ b/patches/0015-platform-x86-SEP-coverity-issues.sep-socwatch @@ -0,0 +1,83 @@ +From dd6e363c721f4124b7dbde430362060530aba7cf Mon Sep 17 00:00:00 2001 +From: Manisha Chinthapally +Date: Tue, 30 Apr 2019 14:25:28 -0700 +Subject: [PATCH 15/27] platform/x86: SEP coverity issues + + - INCK-97: Incompatible cast found in pci.c (PCI_Read_U64_Valid) + - INCK-98: Incompatible cast found in pci.c (PCI_Read_U64) + +Signed-off-by: Manisha Chinthapally +--- + drivers/platform/x86/sepdk/sep/pci.c | 18 +++++++++--------- + 1 file changed, 9 insertions(+), 9 deletions(-) + +diff --git a/drivers/platform/x86/sepdk/sep/pci.c b/drivers/platform/x86/sepdk/sep/pci.c +index 12a93804975c..b830372cc9f6 100755 +--- a/drivers/platform/x86/sepdk/sep/pci.c ++++ b/drivers/platform/x86/sepdk/sep/pci.c +@@ -173,7 +173,7 @@ U32 PCI_Read_U32_Valid(U32 bus, U32 device, U32 function, U32 offset, + */ + U64 PCI_Read_U64(U32 bus, U32 device, U32 function, U32 offset) + { +- U64 res = 0; ++ U32 res = 0; + U32 devfn = (device << 3) | (function & 0x7); + + SEP_DRV_LOG_REGISTER_IN("Will read BDF(%x:%x:%x)[0x%x](8B)...", bus, +@@ -181,9 +181,9 @@ U64 PCI_Read_U64(U32 bus, U32 device, U32 function, U32 offset) + + if (bus < MAX_BUSNO && pci_buses[bus]) { + pci_buses[bus]->ops->read(pci_buses[bus], devfn, offset, 4, +- (U32 *)&res); ++ &res); + pci_buses[bus]->ops->read(pci_buses[bus], devfn, offset + 4, 4, +- ((U32 *)&res) + 1); ++ &res + 1); + } else { + SEP_DRV_LOG_ERROR( + "Could not read BDF(%x:%x:%x)[0x%x]: bus not found!", +@@ -192,7 +192,7 @@ U64 PCI_Read_U64(U32 bus, U32 device, U32 function, U32 offset) + + SEP_DRV_LOG_REGISTER_OUT("Has read BDF(%x:%x:%x)[0x%x](8B): 0x%llx.", + bus, device, function, offset, res); +- return res; ++ return (U64)res; + } + + /* ------------------------------------------------------------------------- */ +@@ -213,7 +213,7 @@ U64 PCI_Read_U64(U32 bus, U32 device, U32 function, U32 offset) + U64 PCI_Read_U64_Valid(U32 bus, U32 device, U32 function, U32 offset, + U64 invalid_value) + { +- U64 res = 0; ++ U32 res = 0; + U32 devfn = (device << 3) | (function & 0x7); + + SEP_DRV_LOG_REGISTER_IN("Will read BDF(%x:%x:%x)[0x%x](8B)...", bus, +@@ -221,11 +221,11 @@ U64 PCI_Read_U64_Valid(U32 bus, U32 device, U32 function, U32 offset, + + if (bus < MAX_BUSNO && pci_buses[bus]) { + pci_buses[bus]->ops->read(pci_buses[bus], devfn, offset, 4, +- (U32 *)&res); ++ &res); + pci_buses[bus]->ops->read(pci_buses[bus], devfn, offset + 4, 4, +- ((U32 *)&res) + 1); ++ &res + 1); + +- if (res == invalid_value) { ++ if ((U64)res == invalid_value) { + res = 0; + SEP_DRV_LOG_REGISTER_OUT( + "Has read BDF(%x:%x:%x)[0x%x](8B): 0x%llx(invalid val)", +@@ -241,7 +241,7 @@ U64 PCI_Read_U64_Valid(U32 bus, U32 device, U32 function, U32 offset, + bus, device, function, offset); + } + +- return res; ++ return (U64)res; + } + + /* ------------------------------------------------------------------------- */ +-- +2.17.1 + diff --git a/patches/0015-scsi-ufs-store-device-serial-number.security b/patches/0015-scsi-ufs-store-device-serial-number.security new file mode 100644 index 0000000000..cb2e033585 --- /dev/null +++ b/patches/0015-scsi-ufs-store-device-serial-number.security @@ -0,0 +1,156 @@ +From 2a77205075c603f89ae87fc18d279429935afb93 Mon Sep 17 00:00:00 2001 +From: Tomas Winkler +Date: Thu, 14 May 2015 00:00:59 +0300 +Subject: [PATCH 15/65] scsi: ufs: store device serial number. + +Retrieve device serial number and store it for RPMB subsystem use. + +V9: rebase +v10: Fix Kdoc + +Change-Id: Ieee7f85696f6614cd2f3c81403124159ea85b77e +Signed-off-by: Tomas Winkler +Signed-off-by: Alexander Usyskin +--- + drivers/scsi/ufs/ufs.h | 4 ++++ + drivers/scsi/ufs/ufshcd.c | 35 +++++++++++++++++++++++++++++------ + 2 files changed, 33 insertions(+), 6 deletions(-) + +diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h +index ee93ba19c78f..790dcbd788f6 100644 +--- a/drivers/scsi/ufs/ufs.h ++++ b/drivers/scsi/ufs/ufs.h +@@ -547,12 +547,16 @@ struct ufs_dev_info { + * @min_uma_sz: minimum UM area size + * @wmanufacturerid: card details + * @model: card model ++ * @serial_no: serial number ++ * @serial_no_len: serial number string length + */ + struct ufs_dev_desc { + u8 subclass; + u32 min_uma_sz; + u16 wmanufacturerid; + char *model; ++ char *serial_no; ++ size_t serial_no_len; + }; + + /** +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c +index 49bc2afb929d..d3a242104a16 100644 +--- a/drivers/scsi/ufs/ufshcd.c ++++ b/drivers/scsi/ufs/ufshcd.c +@@ -43,6 +43,7 @@ + #include + #include + #include ++#include + #include + + #include "ufshcd.h" +@@ -6691,12 +6692,19 @@ static struct rpmb_ops ufshcd_rpmb_dev_ops = { + + }; + +-static inline void ufshcd_rpmb_add(struct ufs_hba *hba) ++static inline void ufshcd_rpmb_add(struct ufs_hba *hba, ++ struct ufs_dev_desc *dev_desc) + { + struct rpmb_dev *rdev; + u8 rpmb_rw_size = 1; + int ret; + ++ ufshcd_rpmb_dev_ops.dev_id = kmemdup(dev_desc->serial_no, ++ dev_desc->serial_no_len, ++ GFP_KERNEL); ++ if (ufshcd_rpmb_dev_ops.dev_id) ++ ufshcd_rpmb_dev_ops.dev_id_len = dev_desc->serial_no_len; ++ + ret = scsi_device_get(hba->sdev_ufs_rpmb); + if (ret) + goto out_put_dev; +@@ -6740,6 +6748,9 @@ static inline void ufshcd_rpmb_remove(struct ufs_hba *hba) + scsi_device_put(hba->sdev_ufs_rpmb); + hba->sdev_ufs_rpmb = NULL; + ++ kfree(ufshcd_rpmb_dev_ops.dev_id); ++ ufshcd_rpmb_dev_ops.dev_id = NULL; ++ + spin_unlock_irqrestore(hba->host->host_lock, flags); + } + +@@ -6813,7 +6824,7 @@ static int ufs_get_device_desc(struct ufs_hba *hba, + { + int err; + size_t buff_len; +- u8 model_index; ++ u8 index; + u8 *desc_buf; + __be32 val; + +@@ -6847,8 +6858,8 @@ static int ufs_get_device_desc(struct ufs_hba *hba, + dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 | + desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1]; + +- model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; +- err = ufshcd_read_string_desc(hba, model_index, ++ index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; ++ err = ufshcd_read_string_desc(hba, index, + &dev_desc->model, SD_ASCII_STD); + if (err < 0) { + dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n", +@@ -6856,6 +6867,14 @@ static int ufs_get_device_desc(struct ufs_hba *hba, + goto out; + } + ++ index = desc_buf[DEVICE_DESC_PARAM_SN]; ++ err = ufshcd_read_string_desc(hba, index, &dev_desc->serial_no, SD_RAW); ++ if (err < 0) { ++ dev_err(hba->dev, "%s: Failed reading Serial No. err = %d\n", ++ __func__, err); ++ goto out; ++ } ++ + /* + * ufshcd_read_string_desc returns size of the string + * reset the error value +@@ -6871,6 +6890,9 @@ static void ufs_put_device_desc(struct ufs_dev_desc *dev_desc) + { + kfree(dev_desc->model); + dev_desc->model = NULL; ++ ++ kfree(dev_desc->serial_no); ++ dev_desc->serial_no = NULL; + } + + static void ufs_fixup_device_setup(struct ufs_hba *hba, +@@ -7348,7 +7370,6 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) + } + + ufs_fixup_device_setup(hba, &card); +- ufs_put_device_desc(&card); + + ufshcd_tune_unipro_params(hba); + +@@ -7405,7 +7426,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) + if (ufshcd_scsi_add_wlus(hba)) + goto out; + +- ufshcd_rpmb_add(hba); ++ ufshcd_rpmb_add(hba, &card); + + /* Initialize devfreq after UFS device is detected */ + if (ufshcd_is_clkscaling_supported(hba)) { +@@ -7431,6 +7452,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) + hba->is_init_prefetch = true; + + out: ++ ++ ufs_put_device_desc(&card); + /* + * If we failed to initialize the device or the device is not + * present, turn off the power/clocks etc. +-- +2.17.1 + diff --git a/patches/0015-trusty-irq-Add-support-for-secure-interrupt-mapping.trusty b/patches/0015-trusty-irq-Add-support-for-secure-interrupt-mapping.trusty new file mode 100644 index 0000000000..4f0db4e672 --- /dev/null +++ b/patches/0015-trusty-irq-Add-support-for-secure-interrupt-mapping.trusty @@ -0,0 +1,233 @@ +From 65ddb502135e5f710db505783122a4ca5338a37b Mon Sep 17 00:00:00 2001 +From: Michael Ryleev +Date: Fri, 26 Jun 2015 13:47:02 -0700 +Subject: [PATCH 15/63] trusty-irq: Add support for secure interrupt mapping + +Trusty TEE is using flat IRQ space to identify its +interrupts which does not match to IRQ domain model +introduced on the Linux side. This CL adds support +for optional "interrupt-templates" and "interrupt-ranges" +properties that can be used to define correspondence +between secure and non-secure IRQ IDs. + +Change-Id: Idb298760f2f21f0b8507eafa72600cca7ab8ac64 +Signed-off-by: Michael Ryleev +--- + .../devicetree/bindings/trusty/trusty-irq.txt | 59 ++++++++++ + drivers/trusty/trusty-irq.c | 106 +++++++++++++++++- + 2 files changed, 161 insertions(+), 4 deletions(-) + +diff --git a/Documentation/devicetree/bindings/trusty/trusty-irq.txt b/Documentation/devicetree/bindings/trusty/trusty-irq.txt +index 85fe1f1c7458..5aefeb8e536f 100644 +--- a/Documentation/devicetree/bindings/trusty/trusty-irq.txt ++++ b/Documentation/devicetree/bindings/trusty/trusty-irq.txt +@@ -5,4 +5,63 @@ Trusty requires non-secure irqs to be forwarded to the secure OS. + Required properties: + - compatible: "android,trusty-irq-v1" + ++Optional properties: ++ ++- interrupt-templates: is an optional property that works together ++ with "interrupt-ranges" to specify secure side to kernel IRQs mapping. ++ ++ It is a list of entries, each one of which defines a group of interrupts ++ having common properties, and has the following format: ++ < phandle irq_id_pos [templ_data]> ++ phandle - phandle of interrupt controller this template is for ++ irq_id_pos - the position of irq id in interrupt specifier array ++ for interrupt controller referenced by phandle. ++ templ_data - is an array of u32 values (could be empty) in the same ++ format as interrupt specifier for interrupt controller ++ referenced by phandle but with omitted irq id field. ++ ++- interrupt-ranges: list of entries that specifies secure side to kernel ++ IRQs mapping. ++ ++ Each entry in the "interrupt-ranges" list has the following format: ++ ++ beg - first entry in this range ++ end - last entry in this range ++ templ_idx - index of entry in "interrupt-templates" property ++ that must be used as a template for all interrupts ++ in this range ++ ++Example: ++{ ++ gic: interrupt-controller@50041000 { ++ compatible = "arm,gic-400"; ++ #interrupt-cells = <3>; ++ interrupt-controller; ++ ... ++ }; ++ ... ++ IPI: interrupt-controller { ++ compatible = "android,CustomIPI"; ++ #interrupt-cells = <1>; ++ interrupt-controller; ++ }; ++ ... ++ trusty { ++ compatible = "android,trusty-smc-v1"; ++ ranges; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ++ irq { ++ compatible = "android,trusty-irq-v1"; ++ interrupt-templates = <&IPI 0>, ++ <&gic 1 GIC_PPI 0>, ++ <&gic 1 GIC_SPI 0>; ++ interrupt-ranges = < 0 15 0>, ++ <16 31 1>, ++ <32 223 2>; ++ }; ++ } ++} ++ + Must be a child of the node that provides the trusty std/fast call interface. +diff --git a/drivers/trusty/trusty-irq.c b/drivers/trusty/trusty-irq.c +index 1f14f7f48bed..8d6e8afb2a2f 100644 +--- a/drivers/trusty/trusty-irq.c ++++ b/drivers/trusty/trusty-irq.c +@@ -15,8 +15,10 @@ + #include + #include + #include ++#include + #include + #include ++#include + #include + #include + #include +@@ -266,13 +268,101 @@ static int trusty_irq_cpu_notify(struct notifier_block *nb, + return NOTIFY_OK; + } + +-static int trusty_irq_init_normal_irq(struct trusty_irq_state *is, int irq) ++static int trusty_irq_create_irq_mapping(struct trusty_irq_state *is, int irq) + { + int ret; ++ int index; ++ u32 irq_pos; ++ u32 templ_idx; ++ u32 range_base; ++ u32 range_end; ++ struct of_phandle_args oirq; ++ ++ /* check if "interrupt-ranges" property is present */ ++ if (!of_find_property(is->dev->of_node, "interrupt-ranges", NULL)) { ++ /* fallback to old behavior to be backward compatible with ++ * systems that do not need IRQ domains. ++ */ ++ return irq; ++ } ++ ++ /* find irq range */ ++ for (index = 0;; index += 3) { ++ ret = of_property_read_u32_index(is->dev->of_node, ++ "interrupt-ranges", ++ index, &range_base); ++ if (ret) ++ return ret; ++ ++ ret = of_property_read_u32_index(is->dev->of_node, ++ "interrupt-ranges", ++ index + 1, &range_end); ++ if (ret) ++ return ret; ++ ++ if (irq >= range_base && irq <= range_end) ++ break; ++ } ++ ++ /* read the rest of range entry: template index and irq_pos */ ++ ret = of_property_read_u32_index(is->dev->of_node, ++ "interrupt-ranges", ++ index + 2, &templ_idx); ++ if (ret) ++ return ret; ++ ++ /* read irq template */ ++ ret = of_parse_phandle_with_args(is->dev->of_node, ++ "interrupt-templates", ++ "#interrupt-cells", ++ templ_idx, &oirq); ++ if (ret) ++ return ret; ++ ++ WARN_ON(!oirq.np); ++ WARN_ON(!oirq.args_count); ++ ++ /* ++ * An IRQ template is a non empty array of u32 values describing group ++ * of interrupts having common properties. The u32 entry with index ++ * zero contains the position of irq_id in interrupt specifier array ++ * followed by data representing interrupt specifier array with irq id ++ * field omitted, so to convert irq template to interrupt specifier ++ * array we have to move down one slot the first irq_pos entries and ++ * replace the resulting gap with real irq id. ++ */ ++ irq_pos = oirq.args[0]; ++ ++ if (irq_pos >= oirq.args_count) { ++ dev_err(is->dev, "irq pos is out of range: %d\n", irq_pos); ++ return -EINVAL; ++ } ++ ++ for (index = 1; index <= irq_pos; index++) ++ oirq.args[index - 1] = oirq.args[index]; ++ ++ oirq.args[irq_pos] = irq - range_base; ++ ++ ret = irq_create_of_mapping(&oirq); ++ ++ return (!ret) ? -EINVAL : ret; ++} ++ ++static int trusty_irq_init_normal_irq(struct trusty_irq_state *is, int tirq) ++{ ++ int ret; ++ int irq; + unsigned long irq_flags; + struct trusty_irq *trusty_irq; + +- dev_dbg(is->dev, "%s: irq %d\n", __func__, irq); ++ dev_dbg(is->dev, "%s: irq %d\n", __func__, tirq); ++ ++ irq = trusty_irq_create_irq_mapping(is, tirq); ++ if (irq < 0) { ++ dev_err(is->dev, ++ "trusty_irq_create_irq_mapping failed (%d)\n", irq); ++ return irq; ++ } + + trusty_irq = kzalloc(sizeof(*trusty_irq), GFP_KERNEL); + if (!trusty_irq) +@@ -302,13 +392,21 @@ static int trusty_irq_init_normal_irq(struct trusty_irq_state *is, int irq) + return ret; + } + +-static int trusty_irq_init_per_cpu_irq(struct trusty_irq_state *is, int irq) ++static int trusty_irq_init_per_cpu_irq(struct trusty_irq_state *is, int tirq) + { + int ret; ++ int irq; + unsigned int cpu; + struct trusty_irq __percpu *trusty_irq_handler_data; + +- dev_dbg(is->dev, "%s: irq %d\n", __func__, irq); ++ dev_dbg(is->dev, "%s: irq %d\n", __func__, tirq); ++ ++ irq = trusty_irq_create_irq_mapping(is, tirq); ++ if (irq <= 0) { ++ dev_err(is->dev, ++ "trusty_irq_create_irq_mapping failed (%d)\n", irq); ++ return irq; ++ } + + trusty_irq_handler_data = alloc_percpu(struct trusty_irq); + if (!trusty_irq_handler_data) +-- +2.17.1 + diff --git a/patches/0015-usb-typec-ucsi-Remove-the-old-API.usb-typec b/patches/0015-usb-typec-ucsi-Remove-the-old-API.usb-typec new file mode 100644 index 0000000000..5170e0fabe --- /dev/null +++ b/patches/0015-usb-typec-ucsi-Remove-the-old-API.usb-typec @@ -0,0 +1,562 @@ +From 4cfd6223d6798a290ea96fa909c29138e52aa7a8 Mon Sep 17 00:00:00 2001 +From: Heikki Krogerus +Date: Thu, 26 Sep 2019 12:38:25 +0300 +Subject: [PATCH 15/18] usb: typec: ucsi: Remove the old API + +The drivers now only use the new API, so removing the old one. + +Signed-off-by: Heikki Krogerus +--- + drivers/usb/typec/ucsi/displayport.c | 8 +- + drivers/usb/typec/ucsi/trace.h | 17 -- + drivers/usb/typec/ucsi/ucsi.c | 346 +++------------------------ + drivers/usb/typec/ucsi/ucsi.h | 41 ---- + 4 files changed, 41 insertions(+), 371 deletions(-) + +diff --git a/drivers/usb/typec/ucsi/displayport.c b/drivers/usb/typec/ucsi/displayport.c +index d99700cb4dca..9f86d4f99363 100644 +--- a/drivers/usb/typec/ucsi/displayport.c ++++ b/drivers/usb/typec/ucsi/displayport.c +@@ -48,8 +48,10 @@ struct ucsi_dp { + static int ucsi_displayport_enter(struct typec_altmode *alt) + { + struct ucsi_dp *dp = typec_altmode_get_drvdata(alt); ++ struct ucsi *ucsi = dp->con->ucsi; + struct ucsi_control ctrl; + u8 cur = 0; ++ u16 ver; + int ret; + + mutex_lock(&dp->con->lock); +@@ -66,7 +68,11 @@ static int ucsi_displayport_enter(struct typec_altmode *alt) + UCSI_CMD_GET_CURRENT_CAM(ctrl, dp->con->num); + ret = ucsi_send_command(dp->con->ucsi, &ctrl, &cur, sizeof(cur)); + if (ret < 0) { +- if (dp->con->ucsi->ppm->data->version > 0x0100) { ++ ret = ucsi->ops->read(ucsi, UCSI_VERSION, &ver, sizeof(ver)); ++ if (ret) ++ return ret; ++ ++ if (ver > 0x0100) { + mutex_unlock(&dp->con->lock); + return ret; + } +diff --git a/drivers/usb/typec/ucsi/trace.h b/drivers/usb/typec/ucsi/trace.h +index 783ec9c72055..6e3d510b236e 100644 +--- a/drivers/usb/typec/ucsi/trace.h ++++ b/drivers/usb/typec/ucsi/trace.h +@@ -75,23 +75,6 @@ DEFINE_EVENT(ucsi_log_command, ucsi_reset_ppm, + TP_ARGS(ctrl, ret) + ); + +-DECLARE_EVENT_CLASS(ucsi_log_cci, +- TP_PROTO(u32 cci), +- TP_ARGS(cci), +- TP_STRUCT__entry( +- __field(u32, cci) +- ), +- TP_fast_assign( +- __entry->cci = cci; +- ), +- TP_printk("CCI=%08x %s", __entry->cci, ucsi_cci_str(__entry->cci)) +-); +- +-DEFINE_EVENT(ucsi_log_cci, ucsi_notify, +- TP_PROTO(u32 cci), +- TP_ARGS(cci) +-); +- + DECLARE_EVENT_CLASS(ucsi_log_connector_status, + TP_PROTO(int port, struct ucsi_connector_status *status), + TP_ARGS(port, status), +diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c +index 2ba890327b9d..ea149a115834 100644 +--- a/drivers/usb/typec/ucsi/ucsi.c ++++ b/drivers/usb/typec/ucsi/ucsi.c +@@ -36,68 +36,6 @@ + */ + #define UCSI_SWAP_TIMEOUT_MS 5000 + +-static inline int ucsi_sync(struct ucsi *ucsi) +-{ +- if (ucsi->ppm && ucsi->ppm->sync) +- return ucsi->ppm->sync(ucsi->ppm); +- return 0; +-} +- +-static int ucsi_command(struct ucsi *ucsi, struct ucsi_control *ctrl) +-{ +- int ret; +- +- trace_ucsi_command(ctrl); +- +- set_bit(COMMAND_PENDING, &ucsi->flags); +- +- ret = ucsi->ppm->cmd(ucsi->ppm, ctrl); +- if (ret) +- goto err_clear_flag; +- +- if (!wait_for_completion_timeout(&ucsi->complete, +- msecs_to_jiffies(UCSI_TIMEOUT_MS))) { +- dev_warn(ucsi->dev, "PPM NOT RESPONDING\n"); +- ret = -ETIMEDOUT; +- } +- +-err_clear_flag: +- clear_bit(COMMAND_PENDING, &ucsi->flags); +- +- return ret; +-} +- +-static int ucsi_ack(struct ucsi *ucsi, u8 ack) +-{ +- struct ucsi_control ctrl; +- int ret; +- +- trace_ucsi_ack(ack); +- +- set_bit(ACK_PENDING, &ucsi->flags); +- +- UCSI_CMD_ACK(ctrl, ack); +- ret = ucsi->ppm->cmd(ucsi->ppm, &ctrl); +- if (ret) +- goto out_clear_bit; +- +- /* Waiting for ACK with ACK CMD, but not with EVENT for now */ +- if (ack == UCSI_ACK_EVENT) +- goto out_clear_bit; +- +- if (!wait_for_completion_timeout(&ucsi->complete, +- msecs_to_jiffies(UCSI_TIMEOUT_MS))) +- ret = -ETIMEDOUT; +- +-out_clear_bit: +- clear_bit(ACK_PENDING, &ucsi->flags); +- +- if (ret) +- dev_err(ucsi->dev, "%s: failed\n", __func__); +- +- return ret; +-} +- + static int ucsi_acknowledge_command(struct ucsi *ucsi) + { + u64 ctrl; +@@ -193,115 +131,26 @@ static int ucsi_exec_command(struct ucsi *ucsi, u64 cmd) + static int ucsi_run_command(struct ucsi *ucsi, struct ucsi_control *ctrl, + void *data, size_t size) + { +- struct ucsi_control _ctrl; +- u8 data_length; +- u16 error; ++ u8 length; + int ret; + +- if (ucsi->ops) { +- ret = ucsi_exec_command(ucsi, ctrl->raw_cmd); +- if (ret < 0) +- return ret; +- +- data_length = ret; ++ ret = ucsi_exec_command(ucsi, ctrl->raw_cmd); ++ if (ret < 0) ++ return ret; + +- if (data) { +- ret = ucsi->ops->read(ucsi, UCSI_MESSAGE_IN, data, size); +- if (ret) +- return ret; +- } ++ length = ret; + +- ret = ucsi_acknowledge_command(ucsi); ++ if (data) { ++ ret = ucsi->ops->read(ucsi, UCSI_MESSAGE_IN, data, size); + if (ret) + return ret; +- +- return data_length; + } + +- ret = ucsi_command(ucsi, ctrl); ++ ret = ucsi_acknowledge_command(ucsi); + if (ret) +- goto err; +- +- switch (ucsi->status) { +- case UCSI_IDLE: +- ret = ucsi_sync(ucsi); +- if (ret) +- dev_warn(ucsi->dev, "%s: sync failed\n", __func__); +- +- if (data) +- memcpy(data, ucsi->ppm->data->message_in, size); +- +- data_length = ucsi->ppm->data->cci.data_length; +- +- ret = ucsi_ack(ucsi, UCSI_ACK_CMD); +- if (!ret) +- ret = data_length; +- break; +- case UCSI_BUSY: +- /* The caller decides whether to cancel or not */ +- ret = -EBUSY; +- break; +- case UCSI_ERROR: +- ret = ucsi_ack(ucsi, UCSI_ACK_CMD); +- if (ret) +- break; +- +- _ctrl.raw_cmd = 0; +- _ctrl.cmd.cmd = UCSI_GET_ERROR_STATUS; +- ret = ucsi_command(ucsi, &_ctrl); +- if (ret) { +- dev_err(ucsi->dev, "reading error failed!\n"); +- break; +- } +- +- memcpy(&error, ucsi->ppm->data->message_in, sizeof(error)); +- +- /* Something has really gone wrong */ +- if (WARN_ON(ucsi->status == UCSI_ERROR)) { +- ret = -ENODEV; +- break; +- } +- +- ret = ucsi_ack(ucsi, UCSI_ACK_CMD); +- if (ret) +- break; +- +- switch (error) { +- case UCSI_ERROR_INCOMPATIBLE_PARTNER: +- ret = -EOPNOTSUPP; +- break; +- case UCSI_ERROR_CC_COMMUNICATION_ERR: +- ret = -ECOMM; +- break; +- case UCSI_ERROR_CONTRACT_NEGOTIATION_FAIL: +- ret = -EPROTO; +- break; +- case UCSI_ERROR_DEAD_BATTERY: +- dev_warn(ucsi->dev, "Dead battery condition!\n"); +- ret = -EPERM; +- break; +- /* The following mean a bug in this driver */ +- case UCSI_ERROR_INVALID_CON_NUM: +- case UCSI_ERROR_UNREGONIZED_CMD: +- case UCSI_ERROR_INVALID_CMD_ARGUMENT: +- dev_warn(ucsi->dev, +- "%s: possible UCSI driver bug - error 0x%x\n", +- __func__, error); +- ret = -EINVAL; +- break; +- default: +- dev_warn(ucsi->dev, +- "%s: error without status\n", __func__); +- ret = -EIO; +- break; +- } +- break; +- } +- +-err: +- trace_ucsi_run_command(ctrl, ret); ++ return ret; + +- return ret; ++ return length; + } + + int ucsi_send_command(struct ucsi *ucsi, struct ucsi_control *ctrl, +@@ -332,6 +181,7 @@ void ucsi_altmode_update_active(struct ucsi_connector *con) + { + const struct typec_altmode *altmode = NULL; + struct ucsi_control ctrl; ++ u16 version; + int ret; + u8 cur; + int i; +@@ -339,7 +189,9 @@ void ucsi_altmode_update_active(struct ucsi_connector *con) + UCSI_CMD_GET_CURRENT_CAM(ctrl, con->num); + ret = ucsi_run_command(con->ucsi, &ctrl, &cur, sizeof(cur)); + if (ret < 0) { +- if (con->ucsi->ppm->data->version > 0x0100) { ++ ret = con->ucsi->ops->read(con->ucsi, UCSI_VERSION, &version, ++ sizeof(version)); ++ if (ret || version > 0x0100) { + dev_err(con->ucsi->dev, + "GET_CURRENT_CAM command failed\n"); + return; +@@ -692,10 +544,7 @@ static void ucsi_handle_connector_change(struct work_struct *work) + if (con->status.change & UCSI_CONSTAT_PARTNER_CHANGE) + ucsi_partner_change(con); + +- if (ucsi->ops) +- ret = ucsi_acknowledge_connector_change(ucsi); +- else +- ret = ucsi_ack(ucsi, UCSI_ACK_EVENT); ++ ret = ucsi_acknowledge_connector_change(ucsi); + if (ret) + dev_err(ucsi->dev, "%s: ACK failed (%d)", __func__, ret); + +@@ -720,45 +569,6 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num) + } + EXPORT_SYMBOL_GPL(ucsi_connector_change); + +-/** +- * ucsi_notify - PPM notification handler +- * @ucsi: Source UCSI Interface for the notifications +- * +- * Handle notifications from PPM of @ucsi. +- */ +-void ucsi_notify(struct ucsi *ucsi) +-{ +- struct ucsi_cci *cci; +- +- /* There is no requirement to sync here, but no harm either. */ +- ucsi_sync(ucsi); +- +- cci = &ucsi->ppm->data->cci; +- +- if (cci->error) +- ucsi->status = UCSI_ERROR; +- else if (cci->busy) +- ucsi->status = UCSI_BUSY; +- else +- ucsi->status = UCSI_IDLE; +- +- if (cci->cmd_complete && test_bit(COMMAND_PENDING, &ucsi->flags)) { +- complete(&ucsi->complete); +- } else if (cci->ack_complete && test_bit(ACK_PENDING, &ucsi->flags)) { +- complete(&ucsi->complete); +- } else if (cci->connector_change) { +- struct ucsi_connector *con; +- +- con = &ucsi->connector[cci->connector_change - 1]; +- +- if (!test_and_set_bit(EVENT_PENDING, &ucsi->flags)) +- schedule_work(&con->work); +- } +- +- trace_ucsi_notify(ucsi->ppm->data->raw_cci); +-} +-EXPORT_SYMBOL_GPL(ucsi_notify); +- + /* -------------------------------------------------------------------------- */ + + static int ucsi_reset_connector(struct ucsi_connector *con, bool hard) +@@ -772,82 +582,39 @@ static int ucsi_reset_connector(struct ucsi_connector *con, bool hard) + + static int ucsi_reset_ppm(struct ucsi *ucsi) + { +- struct ucsi_control ctrl; ++ u64 command = UCSI_PPM_RESET; + unsigned long tmo; ++ u32 cci; + int ret; + +- if (ucsi->ops) { +- u64 command = UCSI_PPM_RESET; +- u32 cci; +- +- ret = ucsi->ops->async_write(ucsi, UCSI_CONTROL, &command, +- sizeof(command)); +- if (ret < 0) +- return ret; +- +- tmo = jiffies + msecs_to_jiffies(UCSI_TIMEOUT_MS); +- +- do { +- if (time_is_before_jiffies(tmo)) +- return -ETIMEDOUT; +- +- ret = ucsi->ops->read(ucsi, UCSI_CCI, &cci, sizeof(cci)); +- if (ret) +- return ret; +- +- if (cci & ~UCSI_CCI_RESET_COMPLETE) { +- ret = ucsi->ops->async_write(ucsi, UCSI_CONTROL, +- &command, +- sizeof(command)); +- if (ret < 0) +- return ret; +- } +- +- msleep(20); +- } while (!(cci & UCSI_CCI_RESET_COMPLETE)); +- +- return 0; +- } +- +- ctrl.raw_cmd = 0; +- ctrl.cmd.cmd = UCSI_PPM_RESET; +- trace_ucsi_command(&ctrl); +- ret = ucsi->ppm->cmd(ucsi->ppm, &ctrl); +- if (ret) +- goto err; ++ ret = ucsi->ops->async_write(ucsi, UCSI_CONTROL, &command, ++ sizeof(command)); ++ if (ret < 0) ++ return ret; + + tmo = jiffies + msecs_to_jiffies(UCSI_TIMEOUT_MS); + + do { +- /* Here sync is critical. */ +- ret = ucsi_sync(ucsi); +- if (ret) +- goto err; ++ if (time_is_before_jiffies(tmo)) ++ return -ETIMEDOUT; + +- if (ucsi->ppm->data->cci.reset_complete) +- break; ++ ret = ucsi->ops->read(ucsi, UCSI_CCI, &cci, sizeof(cci)); ++ if (ret) ++ return ret; + + /* If the PPM is still doing something else, reset it again. */ +- if (ucsi->ppm->data->raw_cci) { +- dev_warn_ratelimited(ucsi->dev, +- "Failed to reset PPM! Trying again..\n"); +- +- trace_ucsi_command(&ctrl); +- ret = ucsi->ppm->cmd(ucsi->ppm, &ctrl); +- if (ret) +- goto err; ++ if (cci & ~UCSI_CCI_RESET_COMPLETE) { ++ ret = ucsi->ops->async_write(ucsi, UCSI_CONTROL, ++ &command, ++ sizeof(command)); ++ if (ret < 0) ++ return ret; + } + +- /* Letting the PPM settle down. */ + msleep(20); ++ } while (!(cci & UCSI_CCI_RESET_COMPLETE)); + +- ret = -ETIMEDOUT; +- } while (time_is_after_jiffies(tmo)); +- +-err: +- trace_ucsi_reset_ppm(&ctrl, ret); +- +- return ret; ++ return 0; + } + + static int ucsi_role_cmd(struct ucsi_connector *con, struct ucsi_control *ctrl) +@@ -1262,51 +1029,6 @@ void ucsi_unregister(struct ucsi *ucsi) + } + EXPORT_SYMBOL_GPL(ucsi_unregister); + +-/** +- * ucsi_register_ppm - Register UCSI PPM Interface +- * @dev: Device interface to the PPM +- * @ppm: The PPM interface +- * +- * Allocates UCSI instance, associates it with @ppm and returns it to the +- * caller, and schedules initialization of the interface. +- */ +-struct ucsi *ucsi_register_ppm(struct device *dev, struct ucsi_ppm *ppm) +-{ +- struct ucsi *ucsi; +- +- ucsi = kzalloc(sizeof(*ucsi), GFP_KERNEL); +- if (!ucsi) +- return ERR_PTR(-ENOMEM); +- +- INIT_WORK(&ucsi->work, ucsi_init_work); +- init_completion(&ucsi->complete); +- mutex_init(&ucsi->ppm_lock); +- +- ucsi->dev = dev; +- ucsi->ppm = ppm; +- +- /* +- * Communication with the PPM takes a lot of time. It is not reasonable +- * to initialize the driver here. Using a work for now. +- */ +- queue_work(system_long_wq, &ucsi->work); +- +- return ucsi; +-} +-EXPORT_SYMBOL_GPL(ucsi_register_ppm); +- +-/** +- * ucsi_unregister_ppm - Unregister UCSI PPM Interface +- * @ucsi: struct ucsi associated with the PPM +- * +- * Unregister UCSI PPM that was created with ucsi_register(). +- */ +-void ucsi_unregister_ppm(struct ucsi *ucsi) +-{ +- ucsi_unregister(ucsi); +-} +-EXPORT_SYMBOL_GPL(ucsi_unregister_ppm); +- + MODULE_AUTHOR("Heikki Krogerus "); + MODULE_LICENSE("GPL v2"); + MODULE_DESCRIPTION("USB Type-C Connector System Software Interface driver"); +diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h +index 3e9a4ba912e9..bb1df6cb241b 100644 +--- a/drivers/usb/typec/ucsi/ucsi.h ++++ b/drivers/usb/typec/ucsi/ucsi.h +@@ -398,53 +398,12 @@ struct ucsi_connector_status { + + /* -------------------------------------------------------------------------- */ + +-struct ucsi; +- +-struct ucsi_data { +- u16 version; +- u16 reserved; +- union { +- u32 raw_cci; +- struct ucsi_cci cci; +- }; +- struct ucsi_control ctrl; +- u32 message_in[4]; +- u32 message_out[4]; +-} __packed; +- +-/* +- * struct ucsi_ppm - Interface to UCSI Platform Policy Manager +- * @data: memory location to the UCSI data structures +- * @cmd: UCSI command execution routine +- * @sync: Refresh UCSI mailbox (the data structures) +- */ +-struct ucsi_ppm { +- struct ucsi_data *data; +- int (*cmd)(struct ucsi_ppm *, struct ucsi_control *); +- int (*sync)(struct ucsi_ppm *); +-}; +- +-struct ucsi *ucsi_register_ppm(struct device *dev, struct ucsi_ppm *ppm); +-void ucsi_unregister_ppm(struct ucsi *ucsi); +-void ucsi_notify(struct ucsi *ucsi); +- +-/* -------------------------------------------------------------------------- */ +- +-enum ucsi_status { +- UCSI_IDLE = 0, +- UCSI_BUSY, +- UCSI_ERROR, +-}; +- + struct ucsi { + struct device *dev; +- struct ucsi_ppm *ppm; + struct driver_data *driver_data; + + const struct ucsi_operations *ops; + +- enum ucsi_status status; +- struct completion complete; + struct ucsi_capability cap; + struct ucsi_connector *connector; + +-- +2.17.1 + diff --git a/patches/0016-ASoC-Intel-Merge-sst_dsp_device-into-sst_pdata.audio b/patches/0016-ASoC-Intel-Merge-sst_dsp_device-into-sst_pdata.audio new file mode 100644 index 0000000000..0cc8ef4553 --- /dev/null +++ b/patches/0016-ASoC-Intel-Merge-sst_dsp_device-into-sst_pdata.audio @@ -0,0 +1,316 @@ +From 26e9fe1244b58232b86a359821475cb16c14fe22 Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Mon, 12 Aug 2019 11:24:35 +0200 +Subject: [PATCH 016/193] ASoC: Intel: Merge sst_dsp_device into sst_pdata + +With all core ops united, sst_dsp_device is a stub struct. Merge it with +sst_pdata and thus making it dsp-platform info struct. As sst_pdata is +targeted to become a sole initialization struct for DSP drivers, update +its definition to contain fw_name and machine board list too. + +Signed-off-by: Cezary Rojewski +--- + sound/soc/intel/baytrail/sst-baytrail-ipc.c | 10 +++------- + sound/soc/intel/common/sst-dsp-priv.h | 1 - + sound/soc/intel/common/sst-dsp.h | 17 ++++------------- + sound/soc/intel/common/sst-firmware.c | 8 +++----- + sound/soc/intel/haswell/sst-haswell-ipc.c | 10 +++------- + sound/soc/intel/skylake/bxt-sst.c | 2 +- + sound/soc/intel/skylake/cnl-sst-dsp.h | 2 +- + sound/soc/intel/skylake/cnl-sst.c | 2 +- + sound/soc/intel/skylake/skl-sst-dsp.c | 8 ++++---- + sound/soc/intel/skylake/skl-sst-dsp.h | 6 +++--- + sound/soc/intel/skylake/skl-sst-utils.c | 6 +++--- + sound/soc/intel/skylake/skl-sst.c | 2 +- + 12 files changed, 27 insertions(+), 47 deletions(-) + +diff --git a/sound/soc/intel/baytrail/sst-baytrail-ipc.c b/sound/soc/intel/baytrail/sst-baytrail-ipc.c +index 23d65ad38e19..efa78e33caf2 100644 +--- a/sound/soc/intel/baytrail/sst-baytrail-ipc.c ++++ b/sound/soc/intel/baytrail/sst-baytrail-ipc.c +@@ -556,10 +556,6 @@ struct sst_dsp *sst_byt_get_dsp(struct sst_byt *byt) + return byt->dsp; + } + +-static struct sst_dsp_device byt_dev = { +- .ops = &sst_byt_ops, +-}; +- + int sst_byt_dsp_suspend_late(struct device *dev, struct sst_pdata *pdata) + { + struct sst_byt *byt = pdata->dsp; +@@ -701,10 +697,11 @@ int sst_byt_dsp_init(struct device *dev, struct sst_pdata *pdata) + + INIT_LIST_HEAD(&byt->stream_list); + init_waitqueue_head(&byt->boot_wait); +- byt_dev.thread_context = byt; ++ pdata->dsp = byt; ++ pdata->ops = &sst_byt_ops; + + /* init SST shim */ +- byt->dsp = sst_dsp_new(dev, &byt_dev, pdata); ++ byt->dsp = sst_dsp_new(dev, pdata); + if (byt->dsp == NULL) { + err = -ENODEV; + goto dsp_new_err; +@@ -741,7 +738,6 @@ int sst_byt_dsp_init(struct device *dev, struct sst_pdata *pdata) + dev_info(byt->dev, "Build date: %s %s\n", + init.build_info.date, init.build_info.time); + +- pdata->dsp = byt; + byt->fw = byt_sst_fw; + + return 0; +diff --git a/sound/soc/intel/common/sst-dsp-priv.h b/sound/soc/intel/common/sst-dsp-priv.h +index 7ec477108948..8a7009937d59 100644 +--- a/sound/soc/intel/common/sst-dsp-priv.h ++++ b/sound/soc/intel/common/sst-dsp-priv.h +@@ -259,7 +259,6 @@ struct sst_dsp { + /* Shared for all platforms */ + + /* runtime */ +- struct sst_dsp_device *sst_dev; + spinlock_t spinlock; /* IPC locking */ + struct mutex mutex; /* DSP FW lock */ + struct device *dev; +diff --git a/sound/soc/intel/common/sst-dsp.h b/sound/soc/intel/common/sst-dsp.h +index 05fa1ca72f46..63c29bb45cf1 100644 +--- a/sound/soc/intel/common/sst-dsp.h ++++ b/sound/soc/intel/common/sst-dsp.h +@@ -167,17 +167,6 @@ + + struct sst_dsp; + +-/* +- * SST Device. +- * +- * This structure is populated by the SST core driver. +- */ +-struct sst_dsp_device { +- /* Mandatory fields */ +- struct sst_ops *ops; +- void *thread_context; +-}; +- + /* + * SST Platform Data. + */ +@@ -203,13 +192,15 @@ struct sst_pdata { + + /* DSP */ + u32 id; ++ const char *fw_name; ++ struct sst_ops *ops; ++ struct snd_soc_acpi_mach *boards; + void *dsp; + }; + + #if IS_ENABLED(CONFIG_DW_DMAC_CORE) + /* Initialization */ +-struct sst_dsp *sst_dsp_new(struct device *dev, +- struct sst_dsp_device *sst_dev, struct sst_pdata *pdata); ++struct sst_dsp *sst_dsp_new(struct device *dev, struct sst_pdata *pdata); + void sst_dsp_free(struct sst_dsp *sst); + #endif + +diff --git a/sound/soc/intel/common/sst-firmware.c b/sound/soc/intel/common/sst-firmware.c +index c18236ca77f4..6b6af11c32c3 100644 +--- a/sound/soc/intel/common/sst-firmware.c ++++ b/sound/soc/intel/common/sst-firmware.c +@@ -1200,8 +1200,7 @@ u32 sst_dsp_get_offset(struct sst_dsp *dsp, u32 offset, + } + EXPORT_SYMBOL_GPL(sst_dsp_get_offset); + +-struct sst_dsp *sst_dsp_new(struct device *dev, +- struct sst_dsp_device *sst_dev, struct sst_pdata *pdata) ++struct sst_dsp *sst_dsp_new(struct device *dev, struct sst_pdata *pdata) + { + struct sst_dsp *sst; + int err; +@@ -1216,11 +1215,10 @@ struct sst_dsp *sst_dsp_new(struct device *dev, + mutex_init(&sst->mutex); + sst->dev = dev; + sst->dma_dev = pdata->dma_dev; +- sst->thread_context = sst_dev->thread_context; +- sst->sst_dev = sst_dev; ++ sst->thread_context = pdata->dsp; + sst->id = pdata->id; + sst->irq = pdata->irq; +- sst->ops = sst_dev->ops; ++ sst->ops = pdata->ops; + sst->pdata = pdata; + INIT_LIST_HEAD(&sst->used_block_list); + INIT_LIST_HEAD(&sst->free_block_list); +diff --git a/sound/soc/intel/haswell/sst-haswell-ipc.c b/sound/soc/intel/haswell/sst-haswell-ipc.c +index 12a799828240..207c1440a906 100644 +--- a/sound/soc/intel/haswell/sst-haswell-ipc.c ++++ b/sound/soc/intel/haswell/sst-haswell-ipc.c +@@ -2045,10 +2045,6 @@ int sst_hsw_module_set_param(struct sst_hsw *hsw, + return ret; + } + +-static struct sst_dsp_device hsw_dev = { +- .ops = &haswell_ops, +-}; +- + static void hsw_tx_msg(struct sst_generic_ipc *ipc, struct ipc_message *msg) + { + /* send the message */ +@@ -2126,10 +2122,11 @@ int sst_hsw_dsp_init(struct device *dev, struct sst_pdata *pdata) + + INIT_LIST_HEAD(&hsw->stream_list); + init_waitqueue_head(&hsw->boot_wait); +- hsw_dev.thread_context = hsw; ++ pdata->dsp = hsw; ++ pdata->ops = &haswell_ops; + + /* init SST shim */ +- hsw->dsp = sst_dsp_new(dev, &hsw_dev, pdata); ++ hsw->dsp = sst_dsp_new(dev, pdata); + if (hsw->dsp == NULL) { + ret = -ENODEV; + goto dsp_new_err; +@@ -2189,7 +2186,6 @@ int sst_hsw_dsp_init(struct device *dev, struct sst_pdata *pdata) + goto boot_err; + } + +- pdata->dsp = hsw; + return 0; + + boot_err: +diff --git a/sound/soc/intel/skylake/bxt-sst.c b/sound/soc/intel/skylake/bxt-sst.c +index 129837fc789b..29b59ce50816 100644 +--- a/sound/soc/intel/skylake/bxt-sst.c ++++ b/sound/soc/intel/skylake/bxt-sst.c +@@ -543,7 +543,7 @@ static struct sst_ops skl_ops = { + .free = skl_dsp_free, + }; + +-static struct sst_dsp_device skl_dev = { ++static struct sst_pdata skl_dev = { + .ops = &skl_ops, + }; + +diff --git a/sound/soc/intel/skylake/cnl-sst-dsp.h b/sound/soc/intel/skylake/cnl-sst-dsp.h +index f3d320b05eb5..30b586acc858 100644 +--- a/sound/soc/intel/skylake/cnl-sst-dsp.h ++++ b/sound/soc/intel/skylake/cnl-sst-dsp.h +@@ -9,7 +9,7 @@ + #define __CNL_SST_DSP_H__ + + struct sst_dsp; +-struct sst_dsp_device; ++struct sst_pdata; + struct sst_generic_ipc; + + /* Intel HD Audio General DSP Registers */ +diff --git a/sound/soc/intel/skylake/cnl-sst.c b/sound/soc/intel/skylake/cnl-sst.c +index b306b7b521b2..5cdf4960617d 100644 +--- a/sound/soc/intel/skylake/cnl-sst.c ++++ b/sound/soc/intel/skylake/cnl-sst.c +@@ -418,7 +418,7 @@ static struct sst_ops cnl_ops = { + .free = cnl_dsp_free, + }; + +-static struct sst_dsp_device cnl_dev = { ++static struct sst_pdata cnl_dev = { + .ops = &cnl_ops, + }; + +diff --git a/sound/soc/intel/skylake/skl-sst-dsp.c b/sound/soc/intel/skylake/skl-sst-dsp.c +index 8d98089e3177..348e69226e46 100644 +--- a/sound/soc/intel/skylake/skl-sst-dsp.c ++++ b/sound/soc/intel/skylake/skl-sst-dsp.c +@@ -419,7 +419,7 @@ int skl_dsp_sleep(struct sst_dsp *ctx) + EXPORT_SYMBOL_GPL(skl_dsp_sleep); + + struct sst_dsp *skl_dsp_ctx_init(struct device *dev, +- struct sst_dsp_device *sst_dev, int irq) ++ struct sst_pdata *pdata, int irq) + { + int ret; + struct sst_dsp *sst; +@@ -431,10 +431,10 @@ struct sst_dsp *skl_dsp_ctx_init(struct device *dev, + spin_lock_init(&sst->spinlock); + mutex_init(&sst->mutex); + sst->dev = dev; +- sst->sst_dev = sst_dev; ++ sst->pdata = pdata; + sst->irq = irq; +- sst->ops = sst_dev->ops; +- sst->thread_context = sst_dev->thread_context; ++ sst->ops = pdata->ops; ++ sst->thread_context = pdata->dsp; + + /* Initialise SST Audio DSP */ + if (sst->ops->init) { +diff --git a/sound/soc/intel/skylake/skl-sst-dsp.h b/sound/soc/intel/skylake/skl-sst-dsp.h +index 1739d4e66275..7f6e853a6f5c 100644 +--- a/sound/soc/intel/skylake/skl-sst-dsp.h ++++ b/sound/soc/intel/skylake/skl-sst-dsp.h +@@ -15,7 +15,7 @@ + #include "skl-sst-cldma.h" + + struct sst_dsp; +-struct sst_dsp_device; ++struct sst_pdata; + struct skl_lib_info; + struct skl_dev; + +@@ -210,7 +210,7 @@ int skl_cldma_wait_interruptible(struct sst_dsp *ctx); + + void skl_dsp_set_state_locked(struct sst_dsp *ctx, int state); + struct sst_dsp *skl_dsp_ctx_init(struct device *dev, +- struct sst_dsp_device *sst_dev, int irq); ++ struct sst_pdata *pdata, int irq); + int skl_dsp_acquire_irq(struct sst_dsp *sst); + bool is_skl_dsp_running(struct sst_dsp *ctx); + +@@ -258,7 +258,7 @@ void skl_dsp_set_astate_cfg(struct skl_dev *skl, u32 cnt, void *data); + + int skl_sst_ctx_init(struct device *dev, int irq, const char *fw_name, + struct skl_dsp_loader_ops dsp_ops, struct skl_dev **dsp, +- struct sst_dsp_device *skl_dev); ++ struct sst_pdata *pdata); + int skl_prepare_lib_load(struct skl_dev *skl, struct skl_lib_info *linfo, + struct firmware *stripped_fw, + unsigned int hdr_offset, int index); +diff --git a/sound/soc/intel/skylake/skl-sst-utils.c b/sound/soc/intel/skylake/skl-sst-utils.c +index fa1c73077551..c7eeba920534 100644 +--- a/sound/soc/intel/skylake/skl-sst-utils.c ++++ b/sound/soc/intel/skylake/skl-sst-utils.c +@@ -395,15 +395,15 @@ int skl_dsp_strip_extended_manifest(struct firmware *fw) + + int skl_sst_ctx_init(struct device *dev, int irq, const char *fw_name, + struct skl_dsp_loader_ops dsp_ops, struct skl_dev **dsp, +- struct sst_dsp_device *skl_dev) ++ struct sst_pdata *pdata) + { + struct skl_dev *skl = *dsp; + struct sst_dsp *sst; + + skl->dev = dev; +- skl_dev->thread_context = skl; ++ pdata->dsp = skl; + INIT_LIST_HEAD(&skl->module_list); +- skl->dsp = skl_dsp_ctx_init(dev, skl_dev, irq); ++ skl->dsp = skl_dsp_ctx_init(dev, pdata, irq); + if (!skl->dsp) { + dev_err(skl->dev, "%s: no device\n", __func__); + return -ENODEV; +diff --git a/sound/soc/intel/skylake/skl-sst.c b/sound/soc/intel/skylake/skl-sst.c +index dd8aac3f0230..a0b7842b08dc 100644 +--- a/sound/soc/intel/skylake/skl-sst.c ++++ b/sound/soc/intel/skylake/skl-sst.c +@@ -512,7 +512,7 @@ static struct sst_ops skl_ops = { + .free = skl_dsp_free, + }; + +-static struct sst_dsp_device skl_dev = { ++static struct sst_pdata skl_dev = { + .ops = &skl_ops, + }; + +-- +2.17.1 + diff --git a/patches/0016-Modify-the-static-analysis-errors-for-google-s-trus.trusty b/patches/0016-Modify-the-static-analysis-errors-for-google-s-trus.trusty new file mode 100644 index 0000000000..4e9c564845 --- /dev/null +++ b/patches/0016-Modify-the-static-analysis-errors-for-google-s-trus.trusty @@ -0,0 +1,108 @@ +From 8bca1fbe4c924a4a01dae4c0c5cc5ec9df8b21a7 Mon Sep 17 00:00:00 2001 +From: weideng +Date: Fri, 13 May 2016 10:36:16 +0800 +Subject: [PATCH 16/63] Modify the static analysis errors for google's trusty + driver patches. + +Totally 15 patches for Google's trusty driver are ported into kernel/glv. +This patch will fix all of the static analysis errors for the 15 patches +from google. + +Change-Id: I38c604cc010f1e93fda6a06d1f9410ab578656df +Signed-off-by: weideng +--- + drivers/trusty/trusty.c | 4 ++-- + include/linux/trusty/smcall.h | 32 ++++++++++++++++---------------- + 2 files changed, 18 insertions(+), 18 deletions(-) + +diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c +index 2a7aeb4725c5..6fcd5481ac88 100644 +--- a/drivers/trusty/trusty.c ++++ b/drivers/trusty/trusty.c +@@ -39,8 +39,8 @@ struct trusty_state { + #define SMC_ARG2 "x2" + #define SMC_ARG3 "x3" + #define SMC_ARCH_EXTENSION "" +-#define SMC_REGISTERS_TRASHED "x4","x5","x6","x7","x8","x9","x10","x11", \ +- "x12","x13","x14","x15","x16","x17" ++#define SMC_REGISTERS_TRASHED "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", \ ++ "x12", "x13", "x14", "x15", "x16", "x17" + #else + #define SMC_ARG0 "r0" + #define SMC_ARG1 "r1" +diff --git a/include/linux/trusty/smcall.h b/include/linux/trusty/smcall.h +index 2e43803d9333..1160890a3d90 100644 +--- a/include/linux/trusty/smcall.h ++++ b/include/linux/trusty/smcall.h +@@ -55,8 +55,8 @@ + #define SMC_ENTITY_SECURE_MONITOR 60 /* Trusted OS calls internal to secure monitor */ + + /* FC = Fast call, SC = Standard call */ +-#define SMC_SC_RESTART_LAST SMC_STDCALL_NR (SMC_ENTITY_SECURE_MONITOR, 0) +-#define SMC_SC_LOCKED_NOP SMC_STDCALL_NR (SMC_ENTITY_SECURE_MONITOR, 1) ++#define SMC_SC_RESTART_LAST SMC_STDCALL_NR(SMC_ENTITY_SECURE_MONITOR, 0) ++#define SMC_SC_LOCKED_NOP SMC_STDCALL_NR(SMC_ENTITY_SECURE_MONITOR, 1) + + /** + * SMC_SC_RESTART_FIQ - Re-enter trusty after it was interrupted by an fiq +@@ -68,7 +68,7 @@ + * + * Enable by selecting api version TRUSTY_API_VERSION_RESTART_FIQ (1) or later. + */ +-#define SMC_SC_RESTART_FIQ SMC_STDCALL_NR (SMC_ENTITY_SECURE_MONITOR, 2) ++#define SMC_SC_RESTART_FIQ SMC_STDCALL_NR(SMC_ENTITY_SECURE_MONITOR, 2) + + /** + * SMC_SC_NOP - Enter trusty to run pending work. +@@ -80,27 +80,27 @@ + * + * Enable by selecting api version TRUSTY_API_VERSION_SMP (2) or later. + */ +-#define SMC_SC_NOP SMC_STDCALL_NR (SMC_ENTITY_SECURE_MONITOR, 3) ++#define SMC_SC_NOP SMC_STDCALL_NR(SMC_ENTITY_SECURE_MONITOR, 3) + + /* + * Return from secure os to non-secure os with return value in r1 + */ +-#define SMC_SC_NS_RETURN SMC_STDCALL_NR (SMC_ENTITY_SECURE_MONITOR, 0) ++#define SMC_SC_NS_RETURN SMC_STDCALL_NR(SMC_ENTITY_SECURE_MONITOR, 0) + +-#define SMC_FC_RESERVED SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 0) +-#define SMC_FC_FIQ_EXIT SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 1) +-#define SMC_FC_REQUEST_FIQ SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 2) +-#define SMC_FC_GET_NEXT_IRQ SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 3) +-#define SMC_FC_FIQ_ENTER SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 4) ++#define SMC_FC_RESERVED SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 0) ++#define SMC_FC_FIQ_EXIT SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 1) ++#define SMC_FC_REQUEST_FIQ SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 2) ++#define SMC_FC_GET_NEXT_IRQ SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 3) ++#define SMC_FC_FIQ_ENTER SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 4) + + #define SMC_FC64_SET_FIQ_HANDLER SMC_FASTCALL64_NR(SMC_ENTITY_SECURE_MONITOR, 5) +-#define SMC_FC64_GET_FIQ_REGS SMC_FASTCALL64_NR (SMC_ENTITY_SECURE_MONITOR, 6) ++#define SMC_FC64_GET_FIQ_REGS SMC_FASTCALL64_NR(SMC_ENTITY_SECURE_MONITOR, 6) + +-#define SMC_FC_CPU_SUSPEND SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 7) +-#define SMC_FC_CPU_RESUME SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 8) ++#define SMC_FC_CPU_SUSPEND SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 7) ++#define SMC_FC_CPU_RESUME SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 8) + +-#define SMC_FC_AARCH_SWITCH SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 9) +-#define SMC_FC_GET_VERSION_STR SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 10) ++#define SMC_FC_AARCH_SWITCH SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 9) ++#define SMC_FC_GET_VERSION_STR SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 10) + + /** + * SMC_FC_API_VERSION - Find and select supported API version. +@@ -121,7 +121,7 @@ + #define TRUSTY_API_VERSION_RESTART_FIQ (1) + #define TRUSTY_API_VERSION_SMP (2) + #define TRUSTY_API_VERSION_CURRENT (2) +-#define SMC_FC_API_VERSION SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 11) ++#define SMC_FC_API_VERSION SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 11) + + /* TRUSTED_OS entity calls */ + #define SMC_SC_VIRTIO_GET_DESCR SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 20) +-- +2.17.1 + diff --git a/patches/0016-VBS-K-virtqueue-runtime-API.acrn b/patches/0016-VBS-K-virtqueue-runtime-API.acrn new file mode 100644 index 0000000000..17b16c3c37 --- /dev/null +++ b/patches/0016-VBS-K-virtqueue-runtime-API.acrn @@ -0,0 +1,425 @@ +From 9a05a5d17d8d0717070ef32fa25db2f3cbdef74a Mon Sep 17 00:00:00 2001 +From: Hao Li +Date: Fri, 31 Aug 2018 10:58:56 +0800 +Subject: [PATCH 016/150] VBS-K: virtqueue runtime API. + +This patch added the virtqueue runtime API to the VBS-K framework: + - int virtio_vq_getchain(struct virtio_vq_info *vq, uint16_t *pidx, + struct iovec *iov, int n_iov, uint16_t *flags); + - void virtio_vq_retchain(struct virtio_vq_info *vq); + - void virtio_vq_relchain(struct virtio_vq_info *vq, uint16_t idx, + uint32_t iolen); + - void virtio_vq_endchains(struct virtio_vq_info *vq, int used_all_avail); + +V2: exports some vbs symbols for other modules to use. + +Change-Id: Ie7f81d96c895a16e210133c19aca99b185b8682d +Tracked-On:218445 +Signed-off-by: Hao Li +Reviewed-on: +Reviewed-by: Chi, Mingqiang +Reviewed-by: Dong, Eddie +Tested-by: Dong, Eddie +--- + drivers/vbs/vq.c | 280 +++++++++++++++++++++++++++++++++++++++++ + include/linux/vbs/vq.h | 73 +++++++++++ + 2 files changed, 353 insertions(+) + +diff --git a/drivers/vbs/vq.c b/drivers/vbs/vq.c +index 95a6757a1c85..c344002d4005 100644 +--- a/drivers/vbs/vq.c ++++ b/drivers/vbs/vq.c +@@ -71,6 +71,284 @@ void * paddr_guest2host(struct ctx *ctx, uintptr_t gaddr, size_t len) + return map_guest_phys(ctx->vmid, gaddr, len); + } + ++/* ++ * helper function for vq_getchain(): ++ * record the i'th "real" descriptor. ++ */ ++static inline void _vq_record(int i, volatile struct virtio_desc *vd, ++ struct ctx *ctx, struct iovec *iov, ++ int n_iov, uint16_t *flags) ++{ ++ if (i >= n_iov) ++ return; ++ ++ iov[i].iov_base = paddr_guest2host(ctx, vd->addr, vd->len); ++ iov[i].iov_len = vd->len; ++ ++ if (flags != NULL) ++ flags[i] = vd->flags; ++} ++ ++/* ++ * Walk descriptor table and put requests into iovec. ++ * ++ * Examine the chain of descriptors starting at the "next one" to ++ * make sure that they describe a sensible request. If so, return ++ * the number of "real" descriptors that would be needed/used in ++ * acting on this request. This may be smaller than the number of ++ * available descriptors, e.g., if there are two available but ++ * they are two separate requests, this just returns 1. Or, it ++ * may be larger: if there are indirect descriptors involved, ++ * there may only be one descriptor available but it may be an ++ * indirect pointing to eight more. We return 8 in this case, ++ * i.e., we do not count the indirect descriptors, only the "real" ++ * ones. ++ * ++ * Basically, this vets the vd_flags and vd_next field of each ++ * descriptor and tells you how many are involved. Since some may ++ * be indirect, this also needs the vmctx (in the pci_vdev ++ * at vc->vc_pi) so that it can find indirect descriptors. ++ * ++ * As we process each descriptor, we copy and adjust it (guest to ++ * host address wise, also using the vmtctx) into the given iov[] ++ * array (of the given size). If the array overflows, we stop ++ * placing values into the array but keep processing descriptors, ++ * up to VQ_MAX_DESCRIPTORS, before giving up and returning -1. ++ * So you, the caller, must not assume that iov[] is as big as the ++ * return value (you can process the same thing twice to allocate ++ * a larger iov array if needed, or supply a zero length to find ++ * out how much space is needed). ++ * ++ * If you want to verify the WRITE flag on each descriptor, pass a ++ * non-NULL "flags" pointer to an array of "uint16_t" of the same size ++ * as n_iov and we'll copy each vd_flags field after unwinding any ++ * indirects. ++ * ++ * If some descriptor(s) are invalid, this prints a diagnostic message ++ * and returns -1. If no descriptors are ready now it simply returns 0. ++ * ++ * You are assumed to have done a vq_ring_ready() if needed (note ++ * that vq_has_descs() does one). ++ */ ++int virtio_vq_getchain(struct virtio_vq_info *vq, uint16_t *pidx, ++ struct iovec *iov, int n_iov, uint16_t *flags) ++{ ++ int i; ++ unsigned int ndesc, n_indir; ++ unsigned int idx, next; ++ struct ctx *ctx; ++ struct virtio_dev_info *dev; ++ const char *name; ++ ++ volatile struct virtio_desc *vdir, *vindir, *vp; ++ ++ dev = vq->dev; ++ name = dev->name; ++ ++ /* ++ * Note: it's the responsibility of the guest not to ++ * update vq->vq_avail->va_idx until all of the descriptors ++ * the guest has written are valid (including all their ++ * vd_next fields and vd_flags). ++ * ++ * Compute (last_avail - va_idx) in integers mod 2**16. This is ++ * the number of descriptors the device has made available ++ * since the last time we updated vq->vq_last_avail. ++ * ++ * We just need to do the subtraction as an unsigned int, ++ * then trim off excess bits. ++ */ ++ idx = vq->last_avail; ++ ndesc = (uint16_t)((unsigned int)vq->avail->idx - idx); ++ ++ if (ndesc == 0) ++ return 0; ++ ++ if (ndesc > vq->qsize) { ++ /* XXX need better way to diagnose issues */ ++ pr_err("%s: ndesc (%u) out of range, driver confused?\r\n", ++ name, (unsigned int)ndesc); ++ return -1; ++ } ++ ++ /* ++ * Now count/parse "involved" descriptors starting from ++ * the head of the chain. ++ * ++ * To prevent loops, we could be more complicated and ++ * check whether we're re-visiting a previously visited ++ * index, but we just abort if the count gets excessive. ++ */ ++ ctx = &dev->_ctx; ++ *pidx = next = vq->avail->ring[idx & (vq->qsize - 1)]; ++ vq->last_avail++; ++ for (i = 0; i < VQ_MAX_DESCRIPTORS; next = vdir->next) { ++ if (next >= vq->qsize) { ++ pr_err("%s: descriptor index %u out of range, " ++ "driver confused?\r\n", name, next); ++ return -1; ++ } ++ vdir = &vq->desc[next]; ++ if ((vdir->flags & VRING_DESC_F_INDIRECT) == 0) { ++ _vq_record(i, vdir, ctx, iov, n_iov, flags); ++ i++; ++ } else if ((dev->negotiated_features & ++ VIRTIO_RING_F_INDIRECT_DESC) == 0) { ++ pr_err("%s: descriptor has forbidden INDIRECT flag, " ++ "driver confused?\r\n", name); ++ return -1; ++ } else { ++ n_indir = vdir->len / 16; ++ if ((vdir->len & 0xf) || n_indir == 0) { ++ pr_err("%s: invalid indir len 0x%x, " ++ "driver confused?\r\n", name, ++ (unsigned int)vdir->len); ++ return -1; ++ } ++ vindir = paddr_guest2host(ctx, vdir->addr, vdir->len); ++ /* ++ * Indirects start at the 0th, then follow ++ * their own embedded "next"s until those run ++ * out. Each one's indirect flag must be off ++ * (we don't really have to check, could just ++ * ignore errors...). ++ */ ++ next = 0; ++ for (;;) { ++ vp = &vindir[next]; ++ if (vp->flags & VRING_DESC_F_INDIRECT) { ++ pr_err("%s: indirect desc has INDIR flag," ++ " driver confused?\r\n", name); ++ return -1; ++ } ++ _vq_record(i, vp, ctx, iov, n_iov, flags); ++ if (++i > VQ_MAX_DESCRIPTORS) ++ goto loopy; ++ if ((vp->flags & VRING_DESC_F_NEXT) == 0) ++ break; ++ next = vp->next; ++ if (next >= n_indir) { ++ pr_err("%s: invalid next %u > %u, " ++ "driver confused?\r\n", ++ name, (unsigned int)next, n_indir); ++ return -1; ++ } ++ } ++ } ++ if ((vdir->flags & VRING_DESC_F_NEXT) == 0) ++ return i; ++ } ++loopy: ++ pr_err("%s: descriptor loop? count > %d - driver confused?\r\n", ++ name, i); ++ return -1; ++} ++ ++EXPORT_SYMBOL(virtio_vq_getchain); ++ ++/* ++ * Return the currently-first request chain back to the available queue. ++ * ++ * (This chain is the one you handled when you called vq_getchain() ++ * and used its positive return value.) ++ */ ++void virtio_vq_retchain(struct virtio_vq_info *vq) ++{ ++ vq->last_avail--; ++} ++ ++EXPORT_SYMBOL(virtio_vq_retchain); ++ ++/* ++ * Return specified request chain to the guest, setting its I/O length ++ * to the provided value. ++ * ++ * (This chain is the one you handled when you called vq_getchain() ++ * and used its positive return value.) ++ */ ++void virtio_vq_relchain(struct virtio_vq_info *vq, uint16_t idx, ++ uint32_t iolen) ++{ ++ uint16_t uidx, mask; ++ volatile struct vring_used *vuh; ++ volatile struct virtio_used *vue; ++ ++ /* ++ * Notes: ++ * - mask is N-1 where N is a power of 2 so computes x % N ++ * - vuh points to the "used" data shared with guest ++ * - vue points to the "used" ring entry we want to update ++ * - head is the same value we compute in vq_iovecs(). ++ * ++ * (I apologize for the two fields named vu_idx; the ++ * virtio spec calls the one that vue points to, "id"...) ++ */ ++ mask = vq->qsize - 1; ++ vuh = vq->used; ++ ++ uidx = vuh->idx; ++ vue = &vuh->ring[uidx++ & mask]; ++ vue->idx = idx; ++ vue->len = iolen; ++ vuh->idx = uidx; ++} ++ ++EXPORT_SYMBOL(virtio_vq_relchain); ++ ++/* ++ * Driver has finished processing "available" chains and calling ++ * vq_relchain on each one. If driver used all the available ++ * chains, used_all should be set. ++ * ++ * If the "used" index moved we may need to inform the guest, i.e., ++ * deliver an interrupt. Even if the used index did NOT move we ++ * may need to deliver an interrupt, if the avail ring is empty and ++ * we are supposed to interrupt on empty. ++ * ++ * Note that used_all_avail is provided by the caller because it's ++ * a snapshot of the ring state when he decided to finish interrupt ++ * processing -- it's possible that descriptors became available after ++ * that point. (It's also typically a constant 1/True as well.) ++ */ ++void virtio_vq_endchains(struct virtio_vq_info *vq, int used_all_avail) ++{ ++ struct virtio_dev_info *dev; ++ uint16_t event_idx, new_idx, old_idx; ++ int intr; ++ ++ /* ++ * Interrupt generation: if we're using EVENT_IDX, ++ * interrupt if we've crossed the event threshold. ++ * Otherwise interrupt is generated if we added "used" entries, ++ * but suppressed by VRING_AVAIL_F_NO_INTERRUPT. ++ * ++ * In any case, though, if NOTIFY_ON_EMPTY is set and the ++ * entire avail was processed, we need to interrupt always. ++ */ ++ dev = vq->dev; ++ old_idx = vq->save_used; ++ vq->save_used = new_idx = vq->used->idx; ++ if (used_all_avail && ++ (dev->negotiated_features & VIRTIO_F_NOTIFY_ON_EMPTY)) ++ intr = 1; ++ else if (dev->negotiated_features & VIRTIO_RING_F_EVENT_IDX) { ++ event_idx = VQ_USED_EVENT_IDX(vq); ++ /* ++ * This calculation is per docs and the kernel ++ * (see src/sys/dev/virtio/virtio_ring.h). ++ */ ++ intr = (uint16_t)(new_idx - event_idx - 1) < ++ (uint16_t)(new_idx - old_idx); ++ } else { ++ intr = new_idx != old_idx && ++ !(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT); ++ } ++ if (intr) ++ virtio_vq_interrupt(dev, vq); ++} ++ ++EXPORT_SYMBOL(virtio_vq_endchains); ++ + /* + * Initialize the currently-selected virtqueue. + * The guest just gave us a page frame number, from which we can +@@ -123,3 +401,5 @@ void virtio_vq_reset(struct virtio_vq_info *vq) + vq->last_avail = 0; + vq->save_used = 0; + } ++ ++EXPORT_SYMBOL(virtio_vq_reset); +diff --git a/include/linux/vbs/vq.h b/include/linux/vbs/vq.h +index 55ff810fa094..9ebde05e4663 100644 +--- a/include/linux/vbs/vq.h ++++ b/include/linux/vbs/vq.h +@@ -64,6 +64,7 @@ + + #include + #include ++#include + + /* virtqueue alignment */ + #define VRING_ALIGN 4096 +@@ -76,6 +77,30 @@ + #define VQ_ALLOC 0x01 + #define VQ_BROKED 0x02 + ++/* ++ * Feature flags. ++ * Note: bits 0 through 23 are reserved to each device type. ++ */ ++#define VIRTIO_F_NOTIFY_ON_EMPTY (1 << 24) ++#define VIRTIO_RING_F_INDIRECT_DESC (1 << 28) ++#define VIRTIO_RING_F_EVENT_IDX (1 << 29) ++ ++#define VQ_MAX_DESCRIPTORS 512 ++ ++/* virtio_desc flags */ ++#define VRING_DESC_F_NEXT (1 << 0) ++#define VRING_DESC_F_WRITE (1 << 1) ++#define VRING_DESC_F_INDIRECT (1 << 2) ++ ++/* vring_avail flags */ ++#define VRING_AVAIL_F_NO_INTERRUPT 1 ++ ++/* vring_used flags */ ++#define VRING_USED_F_NO_NOTIFY 1 ++ ++/* Functions for dealing with generalized "virtual devices" */ ++#define VQ_USED_EVENT_IDX(vq) ((vq)->avail->ring[(vq)->qsize]) ++ + /* get virtqueue size according to virtio specification */ + static inline size_t virtio_vq_ring_size(unsigned int qsz) + { +@@ -92,8 +117,56 @@ static inline size_t virtio_vq_ring_size(unsigned int qsz) + return size; + } + ++/* Is this ring ready for I/O? */ ++static inline int virtio_vq_ring_ready(struct virtio_vq_info *vq) ++{ ++ return (vq->flags & VQ_ALLOC); ++} ++ ++/* ++ * Are there "available" descriptors? (This does not count ++ * how many, just returns True if there are some). ++ */ ++static inline int virtio_vq_has_descs(struct virtio_vq_info *vq) ++{ ++ return (virtio_vq_ring_ready(vq) && ++ vq->last_avail != vq->avail->idx); ++} ++ ++/* Deliver an interrupt to guest on the given virtual queue */ ++static inline void virtio_vq_interrupt(struct virtio_dev_info *dev, ++ struct virtio_vq_info *vq) ++{ ++ uint16_t msix_idx; ++ uint64_t msix_addr; ++ uint32_t msix_data; ++ ++ /* Currently we only support MSIx */ ++ msix_idx = vq->msix_idx; ++ ++ if (msix_idx == VIRTIO_MSI_NO_VECTOR) { ++ pr_err("msix idx is VIRTIO_MSI_NO_VECTOR!\n"); ++ return; ++ } ++ ++ msix_addr = vq->msix_addr; ++ msix_data = vq->msix_data; ++ ++ pr_debug("virtio_vq_interrupt: vmid is %d\n", dev->_ctx.vmid); ++ vhm_inject_msi(dev->_ctx.vmid, msix_addr, msix_data); ++} ++ ++ + /* virtqueue initialization APIs */ + void virtio_vq_init(struct virtio_vq_info *vq, uint32_t pfn); + void virtio_vq_reset(struct virtio_vq_info *vq); + ++/* virtqueue runtime APIs */ ++int virtio_vq_getchain(struct virtio_vq_info *vq, uint16_t *pidx, ++ struct iovec *iov, int n_iov, uint16_t *flags); ++void virtio_vq_retchain(struct virtio_vq_info *vq); ++void virtio_vq_relchain(struct virtio_vq_info *vq, uint16_t idx, ++ uint32_t iolen); ++void virtio_vq_endchains(struct virtio_vq_info *vq, int used_all_avail); ++ + #endif +-- +2.17.1 + diff --git a/patches/0016-dmaengine-dw-platform-Mark-hclk-clock-optional.lpss b/patches/0016-dmaengine-dw-platform-Mark-hclk-clock-optional.lpss new file mode 100644 index 0000000000..c3bbfe95a9 --- /dev/null +++ b/patches/0016-dmaengine-dw-platform-Mark-hclk-clock-optional.lpss @@ -0,0 +1,33 @@ +From e0fd1ffb1b93286d74f2a5331cc61bc014d20d7f Mon Sep 17 00:00:00 2001 +From: Andy Shevchenko +Date: Tue, 24 Sep 2019 11:45:02 +0300 +Subject: [PATCH 16/40] dmaengine: dw: platform: Mark 'hclk' clock optional + +On some platforms the clock can be fixed rate, always running one and +there is no need to do anything with it. + +In order to support those platforms, switch to use optional clock. + +Fixes: f8d9ddbc2851 ("Enable iDMA 32-bit on Intel Elkhart Lake") +Depends-on: 60b8f0ddf1a9 ("clk: Add (devm_)clk_get_optional() functions") +Signed-off-by: Andy Shevchenko +--- + drivers/dma/dw/platform.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c +index c90c798e5ec3..0585d749d935 100644 +--- a/drivers/dma/dw/platform.c ++++ b/drivers/dma/dw/platform.c +@@ -66,7 +66,7 @@ static int dw_probe(struct platform_device *pdev) + + data->chip = chip; + +- chip->clk = devm_clk_get(chip->dev, "hclk"); ++ chip->clk = devm_clk_get_optional(chip->dev, "hclk"); + if (IS_ERR(chip->clk)) + return PTR_ERR(chip->clk); + err = clk_prepare_enable(chip->clk); +-- +2.17.1 + diff --git a/patches/0016-drm-i915-Add-EU-stride-runtime-parameter.drm b/patches/0016-drm-i915-Add-EU-stride-runtime-parameter.drm new file mode 100644 index 0000000000..974f681ed6 --- /dev/null +++ b/patches/0016-drm-i915-Add-EU-stride-runtime-parameter.drm @@ -0,0 +1,127 @@ +From 0ea90a8090f1913711d4b45c306d584f1f17df60 Mon Sep 17 00:00:00 2001 +From: Stuart Summers +Date: Fri, 23 Aug 2019 09:03:00 -0700 +Subject: [PATCH 016/690] drm/i915: Add EU stride runtime parameter + +Add a new SSEU runtime parameter, eu_stride, which is +used to mirror the userspace concept of a range of EUs +per subslice. + +This patch simply adds the parameter and updates usage +in the QUERY_TOPOLOGY_INFO handler. + +v2: Add GEM_BUG_ON to make sure eu_stride is valid + +Signed-off-by: Stuart Summers +Reviewed-by: Chris Wilson +Signed-off-by: Chris Wilson +Link: https://patchwork.freedesktop.org/patch/msgid/20190823160307.180813-5-stuart.summers@intel.com +--- + drivers/gpu/drm/i915/gt/intel_sseu.c | 2 ++ + drivers/gpu/drm/i915/gt/intel_sseu.h | 3 +++ + drivers/gpu/drm/i915/i915_query.c | 5 ++--- + drivers/gpu/drm/i915/intel_device_info.c | 9 ++++----- + 4 files changed, 11 insertions(+), 8 deletions(-) + +diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c +index edf39ae132c3..d52686a1afdc 100644 +--- a/drivers/gpu/drm/i915/gt/intel_sseu.c ++++ b/drivers/gpu/drm/i915/gt/intel_sseu.c +@@ -17,6 +17,8 @@ void intel_sseu_set_info(struct sseu_dev_info *sseu, u8 max_slices, + + sseu->ss_stride = GEN_SSEU_STRIDE(sseu->max_subslices); + GEM_BUG_ON(sseu->ss_stride > GEN_MAX_SUBSLICE_STRIDE); ++ sseu->eu_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); ++ GEM_BUG_ON(sseu->eu_stride > GEN_MAX_EU_STRIDE); + } + + unsigned int +diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h +index 8b8b562ff773..7f2355ce963d 100644 +--- a/drivers/gpu/drm/i915/gt/intel_sseu.h ++++ b/drivers/gpu/drm/i915/gt/intel_sseu.h +@@ -16,6 +16,8 @@ struct drm_i915_private; + #define GEN_MAX_SUBSLICES (8) /* ICL upper bound */ + #define GEN_SSEU_STRIDE(max_entries) DIV_ROUND_UP(max_entries, BITS_PER_BYTE) + #define GEN_MAX_SUBSLICE_STRIDE GEN_SSEU_STRIDE(GEN_MAX_SUBSLICES) ++#define GEN_MAX_EUS (10) /* HSW upper bound */ ++#define GEN_MAX_EU_STRIDE GEN_SSEU_STRIDE(GEN_MAX_EUS) + + struct sseu_dev_info { + u8 slice_mask; +@@ -35,6 +37,7 @@ struct sseu_dev_info { + u8 max_eus_per_subslice; + + u8 ss_stride; ++ u8 eu_stride; + + /* We don't have more than 8 eus per subslice at the moment and as we + * store eus enabled using bits, no need to multiply by eus per +diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c +index d8e25dcf5f0b..abac5042da2b 100644 +--- a/drivers/gpu/drm/i915/i915_query.c ++++ b/drivers/gpu/drm/i915/i915_query.c +@@ -37,7 +37,6 @@ static int query_topology_info(struct drm_i915_private *dev_priv, + const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; + struct drm_i915_query_topology_info topo; + u32 slice_length, subslice_length, eu_length, total_length; +- u8 eu_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); + int ret; + + if (query_item->flags != 0) +@@ -50,7 +49,7 @@ static int query_topology_info(struct drm_i915_private *dev_priv, + + slice_length = sizeof(sseu->slice_mask); + subslice_length = sseu->max_slices * sseu->ss_stride; +- eu_length = sseu->max_slices * sseu->max_subslices * eu_stride; ++ eu_length = sseu->max_slices * sseu->max_subslices * sseu->eu_stride; + total_length = sizeof(topo) + slice_length + subslice_length + + eu_length; + +@@ -70,7 +69,7 @@ static int query_topology_info(struct drm_i915_private *dev_priv, + topo.subslice_offset = slice_length; + topo.subslice_stride = sseu->ss_stride; + topo.eu_offset = slice_length + subslice_length; +- topo.eu_stride = eu_stride; ++ topo.eu_stride = sseu->eu_stride; + + if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr), + &topo, sizeof(topo))) +diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c +index 77d7bbaa49f3..b1a79ed408eb 100644 +--- a/drivers/gpu/drm/i915/intel_device_info.c ++++ b/drivers/gpu/drm/i915/intel_device_info.c +@@ -118,10 +118,9 @@ void intel_device_info_dump_runtime(const struct intel_runtime_info *info, + static int sseu_eu_idx(const struct sseu_dev_info *sseu, int slice, + int subslice) + { +- int subslice_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); +- int slice_stride = sseu->max_subslices * subslice_stride; ++ int slice_stride = sseu->max_subslices * sseu->eu_stride; + +- return slice * slice_stride + subslice * subslice_stride; ++ return slice * slice_stride + subslice * sseu->eu_stride; + } + + static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice, +@@ -130,7 +129,7 @@ static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice, + int i, offset = sseu_eu_idx(sseu, slice, subslice); + u16 eu_mask = 0; + +- for (i = 0; i < GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); i++) { ++ for (i = 0; i < sseu->eu_stride; i++) { + eu_mask |= ((u16)sseu->eu_mask[offset + i]) << + (i * BITS_PER_BYTE); + } +@@ -143,7 +142,7 @@ static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice, + { + int i, offset = sseu_eu_idx(sseu, slice, subslice); + +- for (i = 0; i < GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); i++) { ++ for (i = 0; i < sseu->eu_stride; i++) { + sseu->eu_mask[offset + i] = + (eu_mask >> (BITS_PER_BYTE * i)) & 0xff; + } +-- +2.17.1 + diff --git a/patches/0016-net-phy-add-private-data-to-mdio_device.connectivity b/patches/0016-net-phy-add-private-data-to-mdio_device.connectivity new file mode 100644 index 0000000000..0a32ec9a22 --- /dev/null +++ b/patches/0016-net-phy-add-private-data-to-mdio_device.connectivity @@ -0,0 +1,29 @@ +From 5cb24e8e581d9ae9021ccff579133e6b3f6dd737 Mon Sep 17 00:00:00 2001 +From: Ong Boon Leong +Date: Thu, 1 Aug 2019 13:10:28 +0800 +Subject: [PATCH 016/108] net: phy: add private data to mdio_device + +PHY converter device is represented as mdio_device and requires private +data. So, we add pointer for private data to mdio_device struct. + +Signed-off-by: Ong Boon Leong +--- + include/linux/mdio.h | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/include/linux/mdio.h b/include/linux/mdio.h +index f4b14b68df74..308282caf4a3 100644 +--- a/include/linux/mdio.h ++++ b/include/linux/mdio.h +@@ -40,6 +40,8 @@ struct mdio_device { + struct reset_control *reset_ctrl; + unsigned int reset_assert_delay; + unsigned int reset_deassert_delay; ++ /* Private data */ ++ void *priv; + }; + #define to_mdio_device(d) container_of(d, struct mdio_device, dev) + +-- +2.17.1 + diff --git a/patches/0016-platform-x86-SEP-build-only-when-ACPI-PCI-is-.sep-socwatch b/patches/0016-platform-x86-SEP-build-only-when-ACPI-PCI-is-.sep-socwatch new file mode 100644 index 0000000000..ee1f1e5231 --- /dev/null +++ b/patches/0016-platform-x86-SEP-build-only-when-ACPI-PCI-is-.sep-socwatch @@ -0,0 +1,28 @@ +From 11c2fc9dada524b8c520a25cd3900f730481a6b7 Mon Sep 17 00:00:00 2001 +From: Manisha +Date: Mon, 6 May 2019 17:23:22 +0000 +Subject: [PATCH 16/27] platform/x86: SEP build only when ACPI && PCI is + configured + +SEP driver uses PCI and ACPI functions like pci_find_bus etc + +Signed-off-by: Manisha +--- + drivers/platform/x86/sepdk/Kconfig | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/drivers/platform/x86/sepdk/Kconfig b/drivers/platform/x86/sepdk/Kconfig +index b119ed6d7c1f..d7dc0f592a96 100755 +--- a/drivers/platform/x86/sepdk/Kconfig ++++ b/drivers/platform/x86/sepdk/Kconfig +@@ -12,6 +12,7 @@ config INTEL_SEP + config SEP + tristate "SEP kernel driver" + depends on INTEL_SEP ++ depends on ACPI && PCI + default m + + config SEP_ACRN +-- +2.17.1 + diff --git a/patches/0016-rpmb-add-nvme-rpmb-frame-type.security b/patches/0016-rpmb-add-nvme-rpmb-frame-type.security new file mode 100644 index 0000000000..a3f5ad52a0 --- /dev/null +++ b/patches/0016-rpmb-add-nvme-rpmb-frame-type.security @@ -0,0 +1,164 @@ +From 08f00c85d1dbf53cb0a17b2984030261501430ad Mon Sep 17 00:00:00 2001 +From: Tomas Winkler +Date: Sat, 28 Apr 2018 22:01:32 +0300 +Subject: [PATCH 16/65] rpmb: add nvme rpmb frame type + +The NVMe RPMB frame differs in layout and endianity +from the one defined by JDEC. + +Change-Id: Ifae77454e1bc8733eb1e5bcb2146dc198f94151d +Signed-off-by: Tomas Winkler +--- + drivers/char/rpmb/cdev.c | 26 ++++++++++++++++++------- + include/uapi/linux/rpmb.h | 41 +++++++++++++++++++++++++++++++++++++++ + 2 files changed, 60 insertions(+), 7 deletions(-) + +diff --git a/drivers/char/rpmb/cdev.c b/drivers/char/rpmb/cdev.c +index 49a6a26a5297..da945942a5e6 100644 +--- a/drivers/char/rpmb/cdev.c ++++ b/drivers/char/rpmb/cdev.c +@@ -67,18 +67,28 @@ static int rpmb_release(struct inode *inode, struct file *fp) + return 0; + } + ++static size_t rpmb_ioc_frames_len(struct rpmb_dev *rdev, size_t nframes) ++{ ++ if (rdev->ops->type == RPMB_TYPE_NVME) ++ return rpmb_ioc_frames_len_nvme(nframes); ++ else ++ return rpmb_ioc_frames_len_jdec(nframes); ++} ++ + /** + * rpmb_cmd_copy_from_user - copy rpmb command from the user space + * ++ * @rdev: rpmb device + * @cmd: internal cmd structure + * @ucmd: user space cmd structure + * + * Return: 0 on success, <0 on error + */ +-static int rpmb_cmd_copy_from_user(struct rpmb_cmd *cmd, ++static int rpmb_cmd_copy_from_user(struct rpmb_dev *rdev, ++ struct rpmb_cmd *cmd, + struct rpmb_ioc_cmd __user *ucmd) + { +- struct rpmb_frame *frames; ++ void *frames; + u64 frames_ptr; + + if (get_user(cmd->flags, &ucmd->flags)) +@@ -95,7 +105,7 @@ static int rpmb_cmd_copy_from_user(struct rpmb_cmd *cmd, + return -EFAULT; + + frames = memdup_user(u64_to_user_ptr(frames_ptr), +- rpmb_ioc_frames_len_jdec(cmd->nframes)); ++ rpmb_ioc_frames_len(rdev, cmd->nframes)); + if (IS_ERR(frames)) + return PTR_ERR(frames); + +@@ -106,12 +116,14 @@ static int rpmb_cmd_copy_from_user(struct rpmb_cmd *cmd, + /** + * rpmb_cmd_copy_to_user - copy rpmb command to the user space + * ++ * @rdev: rpmb device + * @ucmd: user space cmd structure + * @cmd: internal cmd structure + * + * Return: 0 on success, <0 on error + */ +-static int rpmb_cmd_copy_to_user(struct rpmb_ioc_cmd __user *ucmd, ++static int rpmb_cmd_copy_to_user(struct rpmb_dev *rdev, ++ struct rpmb_ioc_cmd __user *ucmd, + struct rpmb_cmd *cmd) + { + u64 frames_ptr; +@@ -121,7 +133,7 @@ static int rpmb_cmd_copy_to_user(struct rpmb_ioc_cmd __user *ucmd, + + /* some archs have issues with 64bit get_user */ + if (copy_to_user(u64_to_user_ptr(frames_ptr), cmd->frames, +- rpmb_ioc_frames_len_jdec(cmd->nframes))) ++ rpmb_ioc_frames_len(rdev, cmd->nframes))) + return -EFAULT; + + return 0; +@@ -167,7 +179,7 @@ static long rpmb_ioctl_seq_cmd(struct rpmb_dev *rdev, + + ucmds = (struct rpmb_ioc_cmd __user *)ptr->cmds; + for (i = 0; i < ncmds; i++) { +- ret = rpmb_cmd_copy_from_user(&cmds[i], &ucmds[i]); ++ ret = rpmb_cmd_copy_from_user(rdev, &cmds[i], &ucmds[i]); + if (ret) + goto out; + } +@@ -177,7 +189,7 @@ static long rpmb_ioctl_seq_cmd(struct rpmb_dev *rdev, + goto out; + + for (i = 0; i < ncmds; i++) { +- ret = rpmb_cmd_copy_to_user(&ucmds[i], &cmds[i]); ++ ret = rpmb_cmd_copy_to_user(rdev, &ucmds[i], &cmds[i]); + if (ret) + goto out; + } +diff --git a/include/uapi/linux/rpmb.h b/include/uapi/linux/rpmb.h +index 2bcfb2715dfa..e23bd6ef0ab3 100644 +--- a/include/uapi/linux/rpmb.h ++++ b/include/uapi/linux/rpmb.h +@@ -68,6 +68,44 @@ struct rpmb_frame_jdec { + (sizeof(struct rpmb_frame_jdec) - \ + offsetof(struct rpmb_frame_jdec, data)) + ++/** ++ * struct rpmb_frame_nvme - rpmb frame as defined by specs ++ * ++ * @key_mac : The authentication key or the message authentication ++ * code (MAC) depending on the request/response type. ++ * The MAC will be delivered in the last (or the only) ++ * block of data. ++ * @rpmb_target : RPMB target to access. ++ * @nonce : Random number generated by the host for the requests ++ * and copied to the response by the RPMB engine. ++ * @write_counter: Counter value for the total amount of the successful ++ * authenticated data write requests made by the host. ++ * @addr : Address of the data to be programmed to or read ++ * from the RPMB. Address is the serial number of ++ * the accessed block (half sector 256B). ++ * @block_count : Number of sctors (sectors, 512B) requested to be ++ * read/programmed. (In spec this field is named sector_count). ++ * @result : Includes information about the status of the write counter ++ * (valid, expired) and result of the access made to the RPMB. ++ * @req_resp : Defines the type of request and response to/from the memory. ++ * @data : variable sized payload 512 * block_count ++ */ ++struct rpmb_frame_nvme { ++ __u8 key_mac[223]; ++ __u8 rpmb_target; ++ __u8 nonce[16]; ++ __le32 write_counter; ++ __le32 addr; ++ __le32 block_count; ++ __le16 result; ++ __le16 req_resp; ++ __u8 data[0]; ++} __attribute__((packed)); ++ ++#define rpmb_nvme_hmac_data_len \ ++ (sizeof(struct rpmb_frame_nvme) - \ ++ offsetof(struct rpmb_frame_nvme, rpmb_target)) ++ + #define RPMB_PROGRAM_KEY 0x0001 /* Program RPMB Authentication Key */ + #define RPMB_GET_WRITE_COUNTER 0x0002 /* Read RPMB write counter */ + #define RPMB_WRITE_DATA 0x0003 /* Write data to RPMB partition */ +@@ -142,6 +180,9 @@ struct rpmb_ioc_cmd { + #define rpmb_ioc_frames_len_jdec(_n) \ + (((_n) ?: 1) * sizeof(struct rpmb_frame_jdec)) + ++#define rpmb_ioc_frames_len_nvme(_n) \ ++ (sizeof(struct rpmb_frame_nvme) + (_n) * 512) ++ + /** + * struct rpmb_ioc_seq_cmd - rpmb command sequence + * +-- +2.17.1 + diff --git a/patches/0016-usb-typec-ucsi-Remove-struct-ucsi_control.usb-typec b/patches/0016-usb-typec-ucsi-Remove-struct-ucsi_control.usb-typec new file mode 100644 index 0000000000..acfa308e00 --- /dev/null +++ b/patches/0016-usb-typec-ucsi-Remove-struct-ucsi_control.usb-typec @@ -0,0 +1,754 @@ +From 7d2011a87815a4187cfe9b688212d61c76c4a96f Mon Sep 17 00:00:00 2001 +From: Heikki Krogerus +Date: Thu, 26 Sep 2019 12:38:25 +0300 +Subject: [PATCH 16/18] usb: typec: ucsi: Remove struct ucsi_control + +That data structure was used for constructing the commands +before executing them, but it was never really useful. Using +the structure just complicated the driver. The commands are +64-bit wide, so it is enough to simply fill a u64 variable. +No data structures needed. + +This simplifies the driver considerable and makes it much +easier to for example add support for big endian systems +later on. + +Signed-off-by: Heikki Krogerus +--- + drivers/usb/typec/ucsi/displayport.c | 18 +-- + drivers/usb/typec/ucsi/trace.c | 11 -- + drivers/usb/typec/ucsi/trace.h | 50 +----- + drivers/usb/typec/ucsi/ucsi.c | 109 +++++++------ + drivers/usb/typec/ucsi/ucsi.h | 231 +++++---------------------- + 5 files changed, 117 insertions(+), 302 deletions(-) + +diff --git a/drivers/usb/typec/ucsi/displayport.c b/drivers/usb/typec/ucsi/displayport.c +index 9f86d4f99363..0f1fa22b1d30 100644 +--- a/drivers/usb/typec/ucsi/displayport.c ++++ b/drivers/usb/typec/ucsi/displayport.c +@@ -49,7 +49,7 @@ static int ucsi_displayport_enter(struct typec_altmode *alt) + { + struct ucsi_dp *dp = typec_altmode_get_drvdata(alt); + struct ucsi *ucsi = dp->con->ucsi; +- struct ucsi_control ctrl; ++ u64 command; + u8 cur = 0; + u16 ver; + int ret; +@@ -65,8 +65,8 @@ static int ucsi_displayport_enter(struct typec_altmode *alt) + return -EOPNOTSUPP; + } + +- UCSI_CMD_GET_CURRENT_CAM(ctrl, dp->con->num); +- ret = ucsi_send_command(dp->con->ucsi, &ctrl, &cur, sizeof(cur)); ++ command = UCSI_GET_CURRENT_CAM | UCSI_CONNECTOR_NUMBER(dp->con->num); ++ ret = ucsi_send_command(dp->con->ucsi, command, &cur, sizeof(cur)); + if (ret < 0) { + ret = ucsi->ops->read(ucsi, UCSI_VERSION, &ver, sizeof(ver)); + if (ret) +@@ -109,7 +109,7 @@ static int ucsi_displayport_enter(struct typec_altmode *alt) + static int ucsi_displayport_exit(struct typec_altmode *alt) + { + struct ucsi_dp *dp = typec_altmode_get_drvdata(alt); +- struct ucsi_control ctrl; ++ u64 command; + int ret = 0; + + mutex_lock(&dp->con->lock); +@@ -123,8 +123,8 @@ static int ucsi_displayport_exit(struct typec_altmode *alt) + goto out_unlock; + } + +- ctrl.raw_cmd = UCSI_CMD_SET_NEW_CAM(dp->con->num, 0, dp->offset, 0); +- ret = ucsi_send_command(dp->con->ucsi, &ctrl, NULL, 0); ++ command = UCSI_CMD_SET_NEW_CAM(dp->con->num, 0, dp->offset, 0); ++ ret = ucsi_send_command(dp->con->ucsi, command, NULL, 0); + if (ret < 0) + goto out_unlock; + +@@ -178,14 +178,14 @@ static int ucsi_displayport_status_update(struct ucsi_dp *dp) + static int ucsi_displayport_configure(struct ucsi_dp *dp) + { + u32 pins = DP_CONF_GET_PIN_ASSIGN(dp->data.conf); +- struct ucsi_control ctrl; ++ u64 command; + + if (!dp->override) + return 0; + +- ctrl.raw_cmd = UCSI_CMD_SET_NEW_CAM(dp->con->num, 1, dp->offset, pins); ++ command = UCSI_CMD_SET_NEW_CAM(dp->con->num, 1, dp->offset, pins); + +- return ucsi_send_command(dp->con->ucsi, &ctrl, NULL, 0); ++ return ucsi_send_command(dp->con->ucsi, command, NULL, 0); + } + + static int ucsi_displayport_vdm(struct typec_altmode *alt, +diff --git a/drivers/usb/typec/ucsi/trace.c b/drivers/usb/typec/ucsi/trace.c +index 1dabafb74320..48ad1dc1b1b2 100644 +--- a/drivers/usb/typec/ucsi/trace.c ++++ b/drivers/usb/typec/ucsi/trace.c +@@ -33,17 +33,6 @@ const char *ucsi_cmd_str(u64 raw_cmd) + return ucsi_cmd_strs[(cmd >= ARRAY_SIZE(ucsi_cmd_strs)) ? 0 : cmd]; + } + +-static const char * const ucsi_ack_strs[] = { +- [0] = "", +- [UCSI_ACK_EVENT] = "event", +- [UCSI_ACK_CMD] = "command", +-}; +- +-const char *ucsi_ack_str(u8 ack) +-{ +- return ucsi_ack_strs[(ack >= ARRAY_SIZE(ucsi_ack_strs)) ? 0 : ack]; +-} +- + const char *ucsi_cci_str(u32 cci) + { + if (cci & GENMASK(7, 0)) { +diff --git a/drivers/usb/typec/ucsi/trace.h b/drivers/usb/typec/ucsi/trace.h +index 6e3d510b236e..2262229dae8e 100644 +--- a/drivers/usb/typec/ucsi/trace.h ++++ b/drivers/usb/typec/ucsi/trace.h +@@ -10,54 +10,18 @@ + #include + + const char *ucsi_cmd_str(u64 raw_cmd); +-const char *ucsi_ack_str(u8 ack); + const char *ucsi_cci_str(u32 cci); + const char *ucsi_recipient_str(u8 recipient); + +-DECLARE_EVENT_CLASS(ucsi_log_ack, +- TP_PROTO(u8 ack), +- TP_ARGS(ack), +- TP_STRUCT__entry( +- __field(u8, ack) +- ), +- TP_fast_assign( +- __entry->ack = ack; +- ), +- TP_printk("ACK %s", ucsi_ack_str(__entry->ack)) +-); +- +-DEFINE_EVENT(ucsi_log_ack, ucsi_ack, +- TP_PROTO(u8 ack), +- TP_ARGS(ack) +-); +- +-DECLARE_EVENT_CLASS(ucsi_log_control, +- TP_PROTO(struct ucsi_control *ctrl), +- TP_ARGS(ctrl), +- TP_STRUCT__entry( +- __field(u64, ctrl) +- ), +- TP_fast_assign( +- __entry->ctrl = ctrl->raw_cmd; +- ), +- TP_printk("control=%08llx (%s)", __entry->ctrl, +- ucsi_cmd_str(__entry->ctrl)) +-); +- +-DEFINE_EVENT(ucsi_log_control, ucsi_command, +- TP_PROTO(struct ucsi_control *ctrl), +- TP_ARGS(ctrl) +-); +- + DECLARE_EVENT_CLASS(ucsi_log_command, +- TP_PROTO(struct ucsi_control *ctrl, int ret), +- TP_ARGS(ctrl, ret), ++ TP_PROTO(u64 command, int ret), ++ TP_ARGS(command, ret), + TP_STRUCT__entry( + __field(u64, ctrl) + __field(int, ret) + ), + TP_fast_assign( +- __entry->ctrl = ctrl->raw_cmd; ++ __entry->ctrl = command; + __entry->ret = ret; + ), + TP_printk("%s -> %s (err=%d)", ucsi_cmd_str(__entry->ctrl), +@@ -66,13 +30,13 @@ DECLARE_EVENT_CLASS(ucsi_log_command, + ); + + DEFINE_EVENT(ucsi_log_command, ucsi_run_command, +- TP_PROTO(struct ucsi_control *ctrl, int ret), +- TP_ARGS(ctrl, ret) ++ TP_PROTO(u64 command, int ret), ++ TP_ARGS(command, ret) + ); + + DEFINE_EVENT(ucsi_log_command, ucsi_reset_ppm, +- TP_PROTO(struct ucsi_control *ctrl, int ret), +- TP_ARGS(ctrl, ret) ++ TP_PROTO(u64 command, int ret), ++ TP_ARGS(command, ret) + ); + + DECLARE_EVENT_CLASS(ucsi_log_connector_status, +diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c +index ea149a115834..19892511120b 100644 +--- a/drivers/usb/typec/ucsi/ucsi.c ++++ b/drivers/usb/typec/ucsi/ucsi.c +@@ -128,13 +128,13 @@ static int ucsi_exec_command(struct ucsi *ucsi, u64 cmd) + return UCSI_CCI_LENGTH(cci); + } + +-static int ucsi_run_command(struct ucsi *ucsi, struct ucsi_control *ctrl, ++static int ucsi_run_command(struct ucsi *ucsi, u64 command, + void *data, size_t size) + { + u8 length; + int ret; + +- ret = ucsi_exec_command(ucsi, ctrl->raw_cmd); ++ ret = ucsi_exec_command(ucsi, command); + if (ret < 0) + return ret; + +@@ -153,13 +153,13 @@ static int ucsi_run_command(struct ucsi *ucsi, struct ucsi_control *ctrl, + return length; + } + +-int ucsi_send_command(struct ucsi *ucsi, struct ucsi_control *ctrl, ++int ucsi_send_command(struct ucsi *ucsi, u64 command, + void *retval, size_t size) + { + int ret; + + mutex_lock(&ucsi->ppm_lock); +- ret = ucsi_run_command(ucsi, ctrl, retval, size); ++ ret = ucsi_run_command(ucsi, command, retval, size); + mutex_unlock(&ucsi->ppm_lock); + + return ret; +@@ -168,11 +168,12 @@ EXPORT_SYMBOL_GPL(ucsi_send_command); + + int ucsi_resume(struct ucsi *ucsi) + { +- struct ucsi_control ctrl; ++ u64 command; + + /* Restore UCSI notification enable mask after system resume */ +- UCSI_CMD_SET_NTFY_ENABLE(ctrl, UCSI_ENABLE_NTFY_ALL); +- return ucsi_send_command(ucsi, &ctrl, NULL, 0); ++ command = UCSI_SET_NOTIFICATION_ENABLE | UCSI_ENABLE_NTFY_ALL; ++ ++ return ucsi_send_command(ucsi, command, NULL, 0); + } + EXPORT_SYMBOL_GPL(ucsi_resume); + /* -------------------------------------------------------------------------- */ +@@ -180,14 +181,14 @@ EXPORT_SYMBOL_GPL(ucsi_resume); + void ucsi_altmode_update_active(struct ucsi_connector *con) + { + const struct typec_altmode *altmode = NULL; +- struct ucsi_control ctrl; ++ u64 command; + u16 version; + int ret; + u8 cur; + int i; + +- UCSI_CMD_GET_CURRENT_CAM(ctrl, con->num); +- ret = ucsi_run_command(con->ucsi, &ctrl, &cur, sizeof(cur)); ++ command = UCSI_GET_CURRENT_CAM | UCSI_CONNECTOR_NUMBER(con->num); ++ ret = ucsi_run_command(con->ucsi, command, &cur, sizeof(cur)); + if (ret < 0) { + ret = con->ucsi->ops->read(con->ucsi, UCSI_VERSION, &version, + sizeof(version)); +@@ -307,7 +308,7 @@ static int ucsi_register_altmodes(struct ucsi_connector *con, u8 recipient) + int max_altmodes = UCSI_MAX_ALTMODES; + struct typec_altmode_desc desc; + struct ucsi_altmode alt[2]; +- struct ucsi_control ctrl; ++ u64 command; + int num = 1; + int ret; + int len; +@@ -325,8 +326,11 @@ static int ucsi_register_altmodes(struct ucsi_connector *con, u8 recipient) + + for (i = 0; i < max_altmodes;) { + memset(alt, 0, sizeof(alt)); +- UCSI_CMD_GET_ALTERNATE_MODES(ctrl, recipient, con->num, i, 1); +- len = ucsi_run_command(con->ucsi, &ctrl, alt, sizeof(alt)); ++ command = UCSI_GET_ALTERNATE_MODES; ++ command |= UCSI_GET_ALTMODE_RECIPIENT(recipient); ++ command |= UCSI_GET_ALTMODE_CONNECTOR_NUMBER(con->num); ++ command |= UCSI_GET_ALTMODE_OFFSET(i); ++ len = ucsi_run_command(con->ucsi, command, alt, sizeof(alt)); + if (len <= 0) + return len; + +@@ -487,13 +491,14 @@ static void ucsi_handle_connector_change(struct work_struct *work) + struct ucsi_connector *con = container_of(work, struct ucsi_connector, + work); + struct ucsi *ucsi = con->ucsi; +- struct ucsi_control ctrl; ++ u64 command; + int ret; + + mutex_lock(&con->lock); + +- UCSI_CMD_GET_CONNECTOR_STATUS(ctrl, con->num); +- ret = ucsi_send_command(ucsi, &ctrl, &con->status, sizeof(con->status)); ++ command = UCSI_GET_CONNECTOR_STATUS | UCSI_CONNECTOR_NUMBER(con->num); ++ ret = ucsi_send_command(ucsi, command, &con->status, ++ sizeof(con->status)); + if (ret < 0) { + dev_err(ucsi->dev, "%s: GET_CONNECTOR_STATUS failed (%d)\n", + __func__, ret); +@@ -537,8 +542,9 @@ static void ucsi_handle_connector_change(struct work_struct *work) + * Running GET_CAM_SUPPORTED command just to make sure the PPM + * does not get stuck in case it assumes we do so. + */ +- UCSI_CMD_GET_CAM_SUPPORTED(ctrl, con->num); +- ucsi_run_command(con->ucsi, &ctrl, NULL, 0); ++ command = UCSI_GET_CAM_SUPPORTED; ++ command |= UCSI_CONNECTOR_NUMBER(con->num); ++ ucsi_run_command(con->ucsi, command, NULL, 0); + } + + if (con->status.change & UCSI_CONSTAT_PARTNER_CHANGE) +@@ -573,11 +579,12 @@ EXPORT_SYMBOL_GPL(ucsi_connector_change); + + static int ucsi_reset_connector(struct ucsi_connector *con, bool hard) + { +- struct ucsi_control ctrl; ++ u64 command; + +- UCSI_CMD_CONNECTOR_RESET(ctrl, con, hard); ++ command = UCSI_CONNECTOR_RESET | UCSI_CONNECTOR_NUMBER(con->num); ++ command |= hard ? UCSI_CONNECTOR_RESET_HARD : 0; + +- return ucsi_send_command(con->ucsi, &ctrl, NULL, 0); ++ return ucsi_send_command(con->ucsi, command, NULL, 0); + } + + static int ucsi_reset_ppm(struct ucsi *ucsi) +@@ -617,21 +624,21 @@ static int ucsi_reset_ppm(struct ucsi *ucsi) + return 0; + } + +-static int ucsi_role_cmd(struct ucsi_connector *con, struct ucsi_control *ctrl) ++static int ucsi_role_cmd(struct ucsi_connector *con, u64 command) + { + int ret; + +- ret = ucsi_send_command(con->ucsi, ctrl, NULL, 0); ++ ret = ucsi_send_command(con->ucsi, command, NULL, 0); + if (ret == -ETIMEDOUT) { +- struct ucsi_control c; ++ u64 c; + + /* PPM most likely stopped responding. Resetting everything. */ + mutex_lock(&con->ucsi->ppm_lock); + ucsi_reset_ppm(con->ucsi); + mutex_unlock(&con->ucsi->ppm_lock); + +- UCSI_CMD_SET_NTFY_ENABLE(c, UCSI_ENABLE_NTFY_ALL); +- ucsi_send_command(con->ucsi, &c, NULL, 0); ++ c = UCSI_SET_NOTIFICATION_ENABLE | UCSI_ENABLE_NTFY_ALL; ++ ucsi_send_command(con->ucsi, c, NULL, 0); + + ucsi_reset_connector(con, true); + } +@@ -642,7 +649,7 @@ static int ucsi_role_cmd(struct ucsi_connector *con, struct ucsi_control *ctrl) + static int ucsi_dr_swap(struct typec_port *port, enum typec_data_role role) + { + struct ucsi_connector *con = typec_get_drvdata(port); +- struct ucsi_control ctrl; ++ u64 command; + int ret = 0; + + mutex_lock(&con->lock); +@@ -658,8 +665,10 @@ static int ucsi_dr_swap(struct typec_port *port, enum typec_data_role role) + role == TYPEC_HOST)) + goto out_unlock; + +- UCSI_CMD_SET_UOR(ctrl, con, role); +- ret = ucsi_role_cmd(con, &ctrl); ++ command = UCSI_SET_UOR | UCSI_CONNECTOR_NUMBER(con->num); ++ command |= UCSI_SET_UOR_ROLE(role); ++ command |= UCSI_SET_UOR_ACCEPT_ROLE_SWAPS; ++ ret = ucsi_role_cmd(con, command); + if (ret < 0) + goto out_unlock; + +@@ -676,7 +685,7 @@ static int ucsi_dr_swap(struct typec_port *port, enum typec_data_role role) + static int ucsi_pr_swap(struct typec_port *port, enum typec_role role) + { + struct ucsi_connector *con = typec_get_drvdata(port); +- struct ucsi_control ctrl; ++ u64 command; + int ret = 0; + + mutex_lock(&con->lock); +@@ -689,8 +698,10 @@ static int ucsi_pr_swap(struct typec_port *port, enum typec_role role) + if (con->status.pwr_dir == role) + goto out_unlock; + +- UCSI_CMD_SET_PDR(ctrl, con, role); +- ret = ucsi_role_cmd(con, &ctrl); ++ command = UCSI_SET_PDR | UCSI_CONNECTOR_NUMBER(con->num); ++ command |= UCSI_SET_PDR_ROLE(role); ++ command |= UCSI_SET_PDR_ACCEPT_ROLE_SWAPS; ++ ret = ucsi_role_cmd(con, command); + if (ret < 0) + goto out_unlock; + +@@ -733,7 +744,7 @@ static int ucsi_register_port(struct ucsi *ucsi, int index) + struct ucsi_connector *con = &ucsi->connector[index]; + struct typec_capability *cap = &con->typec_cap; + enum typec_accessory *accessory = cap->accessory; +- struct ucsi_control ctrl; ++ u64 command; + int ret; + + INIT_WORK(&con->work, ucsi_handle_connector_change); +@@ -743,8 +754,9 @@ static int ucsi_register_port(struct ucsi *ucsi, int index) + con->ucsi = ucsi; + + /* Get connector capability */ +- UCSI_CMD_GET_CONNECTOR_CAPABILITY(ctrl, con->num); +- ret = ucsi_run_command(ucsi, &ctrl, &con->cap, sizeof(con->cap)); ++ command = UCSI_GET_CONNECTOR_CAPABILITY; ++ command |= UCSI_CONNECTOR_NUMBER(con->num); ++ ret = ucsi_run_command(ucsi, command, &con->cap, sizeof(con->cap)); + if (ret < 0) + return ret; + +@@ -787,8 +799,9 @@ static int ucsi_register_port(struct ucsi *ucsi, int index) + con->num); + + /* Get the status */ +- UCSI_CMD_GET_CONNECTOR_STATUS(ctrl, con->num); +- ret = ucsi_run_command(ucsi, &ctrl, &con->status, sizeof(con->status)); ++ command = UCSI_GET_CONNECTOR_STATUS | UCSI_CONNECTOR_NUMBER(con->num); ++ ret = ucsi_run_command(ucsi, command, &con->status, ++ sizeof(con->status)); + if (ret < 0) { + dev_err(ucsi->dev, "con%d: failed to get status\n", con->num); + return 0; +@@ -836,7 +849,7 @@ static int ucsi_register_port(struct ucsi *ucsi, int index) + int ucsi_init(struct ucsi *ucsi) + { + struct ucsi_connector *con; +- struct ucsi_control ctrl; ++ u64 command; + int ret; + int i; + +@@ -850,15 +863,15 @@ int ucsi_init(struct ucsi *ucsi) + } + + /* Enable basic notifications */ +- UCSI_CMD_SET_NTFY_ENABLE(ctrl, UCSI_ENABLE_NTFY_CMD_COMPLETE | +- UCSI_ENABLE_NTFY_ERROR); +- ret = ucsi_run_command(ucsi, &ctrl, NULL, 0); ++ command = UCSI_SET_NOTIFICATION_ENABLE; ++ command |= UCSI_ENABLE_NTFY_CMD_COMPLETE | UCSI_ENABLE_NTFY_ERROR; ++ ret = ucsi_run_command(ucsi, command, NULL, 0); + if (ret < 0) + goto err_reset; + + /* Get PPM capabilities */ +- UCSI_CMD_GET_CAPABILITY(ctrl); +- ret = ucsi_run_command(ucsi, &ctrl, &ucsi->cap, sizeof(ucsi->cap)); ++ command = UCSI_GET_CAPABILITY; ++ ret = ucsi_run_command(ucsi, command, &ucsi->cap, sizeof(ucsi->cap)); + if (ret < 0) + goto err_reset; + +@@ -883,8 +896,8 @@ int ucsi_init(struct ucsi *ucsi) + } + + /* Enable all notifications */ +- UCSI_CMD_SET_NTFY_ENABLE(ctrl, UCSI_ENABLE_NTFY_ALL); +- ret = ucsi_run_command(ucsi, &ctrl, NULL, 0); ++ command = UCSI_SET_NOTIFICATION_ENABLE | UCSI_ENABLE_NTFY_ALL; ++ ret = ucsi_run_command(ucsi, command, NULL, 0); + if (ret < 0) + goto err_unregister; + +@@ -1005,15 +1018,15 @@ EXPORT_SYMBOL_GPL(ucsi_register); + */ + void ucsi_unregister(struct ucsi *ucsi) + { +- struct ucsi_control ctrl; ++ u64 command; + int i; + + /* Make sure that we are not in the middle of driver initialization */ + cancel_work_sync(&ucsi->work); + + /* Disable everything except command complete notification */ +- UCSI_CMD_SET_NTFY_ENABLE(ctrl, UCSI_ENABLE_NTFY_CMD_COMPLETE) +- ucsi_send_command(ucsi, &ctrl, NULL, 0); ++ command = UCSI_SET_NOTIFICATION_ENABLE | UCSI_ENABLE_NTFY_CMD_COMPLETE; ++ ucsi_send_command(ucsi, command, NULL, 0); + + for (i = 0; i < ucsi->cap.num_connectors; i++) { + cancel_work_sync(&ucsi->connector[i].work); +diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h +index bb1df6cb241b..755c8936bff4 100644 +--- a/drivers/usb/typec/ucsi/ucsi.h ++++ b/drivers/usb/typec/ucsi/ucsi.h +@@ -60,178 +60,6 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num); + + /* -------------------------------------------------------------------------- */ + +-/* Command Status and Connector Change Indication (CCI) data structure */ +-struct ucsi_cci { +- u8:1; /* reserved */ +- u8 connector_change:7; +- u8 data_length; +- u16:9; /* reserved */ +- u16 not_supported:1; +- u16 cancel_complete:1; +- u16 reset_complete:1; +- u16 busy:1; +- u16 ack_complete:1; +- u16 error:1; +- u16 cmd_complete:1; +-} __packed; +- +-/* Default fields in CONTROL data structure */ +-struct ucsi_command { +- u8 cmd; +- u8 length; +- u64 data:48; +-} __packed; +- +-/* ACK Command structure */ +-struct ucsi_ack_cmd { +- u8 cmd; +- u8 length; +- u8 cci_ack:1; +- u8 cmd_ack:1; +- u8:6; /* reserved */ +-} __packed; +- +-/* Connector Reset Command structure */ +-struct ucsi_con_rst { +- u8 cmd; +- u8 length; +- u8 con_num:7; +- u8 hard_reset:1; +-} __packed; +- +-/* Set USB Operation Mode Command structure */ +-struct ucsi_uor_cmd { +- u8 cmd; +- u8 length; +- u16 con_num:7; +- u16 role:3; +-#define UCSI_UOR_ROLE_DFP BIT(0) +-#define UCSI_UOR_ROLE_UFP BIT(1) +-#define UCSI_UOR_ROLE_DRP BIT(2) +- u16:6; /* reserved */ +-} __packed; +- +-/* Get Alternate Modes Command structure */ +-struct ucsi_altmode_cmd { +- u8 cmd; +- u8 length; +- u8 recipient; +-#define UCSI_RECIPIENT_CON 0 +-#define UCSI_RECIPIENT_SOP 1 +-#define UCSI_RECIPIENT_SOP_P 2 +-#define UCSI_RECIPIENT_SOP_PP 3 +- u8 con_num; +- u8 offset; +- u8 num_altmodes; +-} __packed; +- +-struct ucsi_control { +- union { +- u64 raw_cmd; +- struct ucsi_command cmd; +- struct ucsi_uor_cmd uor; +- struct ucsi_ack_cmd ack; +- struct ucsi_con_rst con_rst; +- struct ucsi_altmode_cmd alt; +- }; +-}; +- +-#define __UCSI_CMD(_ctrl_, _cmd_) \ +-{ \ +- (_ctrl_).raw_cmd = 0; \ +- (_ctrl_).cmd.cmd = _cmd_; \ +-} +- +-/* Helper for preparing ucsi_control for CONNECTOR_RESET command. */ +-#define UCSI_CMD_CONNECTOR_RESET(_ctrl_, _con_, _hard_) \ +-{ \ +- __UCSI_CMD(_ctrl_, UCSI_CONNECTOR_RESET) \ +- (_ctrl_).con_rst.con_num = (_con_)->num; \ +- (_ctrl_).con_rst.hard_reset = _hard_; \ +-} +- +-/* Helper for preparing ucsi_control for ACK_CC_CI command. */ +-#define UCSI_CMD_ACK(_ctrl_, _ack_) \ +-{ \ +- __UCSI_CMD(_ctrl_, UCSI_ACK_CC_CI) \ +- (_ctrl_).ack.cci_ack = ((_ack_) == UCSI_ACK_EVENT); \ +- (_ctrl_).ack.cmd_ack = ((_ack_) == UCSI_ACK_CMD); \ +-} +- +-/* Helper for preparing ucsi_control for SET_NOTIFY_ENABLE command. */ +-#define UCSI_CMD_SET_NTFY_ENABLE(_ctrl_, _ntfys_) \ +-{ \ +- __UCSI_CMD(_ctrl_, UCSI_SET_NOTIFICATION_ENABLE) \ +- (_ctrl_).cmd.data = _ntfys_; \ +-} +- +-/* Helper for preparing ucsi_control for GET_CAPABILITY command. */ +-#define UCSI_CMD_GET_CAPABILITY(_ctrl_) \ +-{ \ +- __UCSI_CMD(_ctrl_, UCSI_GET_CAPABILITY) \ +-} +- +-/* Helper for preparing ucsi_control for GET_CONNECTOR_CAPABILITY command. */ +-#define UCSI_CMD_GET_CONNECTOR_CAPABILITY(_ctrl_, _con_) \ +-{ \ +- __UCSI_CMD(_ctrl_, UCSI_GET_CONNECTOR_CAPABILITY) \ +- (_ctrl_).cmd.data = _con_; \ +-} +- +-/* Helper for preparing ucsi_control for GET_ALTERNATE_MODES command. */ +-#define UCSI_CMD_GET_ALTERNATE_MODES(_ctrl_, _r_, _con_num_, _o_, _num_)\ +-{ \ +- __UCSI_CMD((_ctrl_), UCSI_GET_ALTERNATE_MODES) \ +- _ctrl_.alt.recipient = (_r_); \ +- _ctrl_.alt.con_num = (_con_num_); \ +- _ctrl_.alt.offset = (_o_); \ +- _ctrl_.alt.num_altmodes = (_num_) - 1; \ +-} +- +-/* Helper for preparing ucsi_control for GET_CAM_SUPPORTED command. */ +-#define UCSI_CMD_GET_CAM_SUPPORTED(_ctrl_, _con_) \ +-{ \ +- __UCSI_CMD((_ctrl_), UCSI_GET_CAM_SUPPORTED) \ +- _ctrl_.cmd.data = (_con_); \ +-} +- +-/* Helper for preparing ucsi_control for GET_CAM_SUPPORTED command. */ +-#define UCSI_CMD_GET_CURRENT_CAM(_ctrl_, _con_) \ +-{ \ +- __UCSI_CMD((_ctrl_), UCSI_GET_CURRENT_CAM) \ +- _ctrl_.cmd.data = (_con_); \ +-} +- +-/* Helper for preparing ucsi_control for GET_CONNECTOR_STATUS command. */ +-#define UCSI_CMD_GET_CONNECTOR_STATUS(_ctrl_, _con_) \ +-{ \ +- __UCSI_CMD(_ctrl_, UCSI_GET_CONNECTOR_STATUS) \ +- (_ctrl_).cmd.data = _con_; \ +-} +- +-#define __UCSI_ROLE(_ctrl_, _cmd_, _con_num_) \ +-{ \ +- __UCSI_CMD(_ctrl_, _cmd_) \ +- (_ctrl_).uor.con_num = _con_num_; \ +- (_ctrl_).uor.role = UCSI_UOR_ROLE_DRP; \ +-} +- +-/* Helper for preparing ucsi_control for SET_UOR command. */ +-#define UCSI_CMD_SET_UOR(_ctrl_, _con_, _role_) \ +-{ \ +- __UCSI_ROLE(_ctrl_, UCSI_SET_UOR, (_con_)->num) \ +- (_ctrl_).uor.role |= (_role_) == TYPEC_HOST ? UCSI_UOR_ROLE_DFP : \ +- UCSI_UOR_ROLE_UFP; \ +-} +- +-/* Helper for preparing ucsi_control for SET_PDR command. */ +-#define UCSI_CMD_SET_PDR(_ctrl_, _con_, _role_) \ +-{ \ +- __UCSI_ROLE(_ctrl_, UCSI_SET_PDR, (_con_)->num) \ +- (_ctrl_).uor.role |= (_role_) == TYPEC_SOURCE ? UCSI_UOR_ROLE_DFP : \ +- UCSI_UOR_ROLE_UFP; \ +-} +- + /* Commands */ + #define UCSI_PPM_RESET 0x01 + #define UCSI_CANCEL 0x02 +@@ -253,28 +81,49 @@ struct ucsi_control { + #define UCSI_GET_CONNECTOR_STATUS 0x12 + #define UCSI_GET_ERROR_STATUS 0x13 + +-/* ACK_CC_CI commands */ +-#define UCSI_ACK_EVENT 1 +-#define UCSI_ACK_CMD 2 ++#define UCSI_CONNECTOR_NUMBER(_num_) ((_num_) << 16) ++ ++/* CONNECTOR_RESET command bits */ ++#define UCSI_CONNECTOR_RESET_HARD BIT(23) /* Deprecated in v1.1 */ + +-/* Bits for ACK CC or CI */ ++/* ACK_CC_CI bits */ + #define UCSI_ACK_CONNECTOR_CHANGE BIT(16) + #define UCSI_ACK_COMMAND_COMPLETE BIT(17) + +-/* Bits for SET_NOTIFICATION_ENABLE command */ +-#define UCSI_ENABLE_NTFY_CMD_COMPLETE BIT(0) +-#define UCSI_ENABLE_NTFY_EXT_PWR_SRC_CHANGE BIT(1) +-#define UCSI_ENABLE_NTFY_PWR_OPMODE_CHANGE BIT(2) +-#define UCSI_ENABLE_NTFY_CAP_CHANGE BIT(5) +-#define UCSI_ENABLE_NTFY_PWR_LEVEL_CHANGE BIT(6) +-#define UCSI_ENABLE_NTFY_PD_RESET_COMPLETE BIT(7) +-#define UCSI_ENABLE_NTFY_CAM_CHANGE BIT(8) +-#define UCSI_ENABLE_NTFY_BAT_STATUS_CHANGE BIT(9) +-#define UCSI_ENABLE_NTFY_PARTNER_CHANGE BIT(11) +-#define UCSI_ENABLE_NTFY_PWR_DIR_CHANGE BIT(12) +-#define UCSI_ENABLE_NTFY_CONNECTOR_CHANGE BIT(14) +-#define UCSI_ENABLE_NTFY_ERROR BIT(15) +-#define UCSI_ENABLE_NTFY_ALL 0xdbe7 ++/* SET_NOTIFICATION_ENABLE command bits */ ++#define UCSI_ENABLE_NTFY_CMD_COMPLETE BIT(16) ++#define UCSI_ENABLE_NTFY_EXT_PWR_SRC_CHANGE BIT(17) ++#define UCSI_ENABLE_NTFY_PWR_OPMODE_CHANGE BIT(18) ++#define UCSI_ENABLE_NTFY_CAP_CHANGE BIT(19) ++#define UCSI_ENABLE_NTFY_PWR_LEVEL_CHANGE BIT(20) ++#define UCSI_ENABLE_NTFY_PD_RESET_COMPLETE BIT(21) ++#define UCSI_ENABLE_NTFY_CAM_CHANGE BIT(22) ++#define UCSI_ENABLE_NTFY_BAT_STATUS_CHANGE BIT(23) ++#define UCSI_ENABLE_NTFY_PARTNER_CHANGE BIT(24) ++#define UCSI_ENABLE_NTFY_PWR_DIR_CHANGE BIT(25) ++#define UCSI_ENABLE_NTFY_CONNECTOR_CHANGE BIT(26) ++#define UCSI_ENABLE_NTFY_ERROR BIT(27) ++#define UCSI_ENABLE_NTFY_ALL 0xdbe70000 ++ ++/* SET_UOR command bits */ ++#define UCSI_SET_UOR_ROLE(_r_) (((_r_) == TYPEC_HOST ? 1 : 2) << 23) ++#define UCSI_SET_UOR_ACCEPT_ROLE_SWAPS BIT(25) ++ ++/* SET_PDF command bits */ ++#define UCSI_SET_PDR_ROLE(_r_) (((_r_) == TYPEC_SOURCE ? 1 : 2) << 23) ++#define UCSI_SET_PDR_ACCEPT_ROLE_SWAPS BIT(25) ++ ++/* GET_ALTERNATE_MODES command bits */ ++#define UCSI_GET_ALTMODE_RECIPIENT(_r_) ((u64)(_r_) << 16) ++#define UCSI_RECIPIENT_CON 0 ++#define UCSI_RECIPIENT_SOP 1 ++#define UCSI_RECIPIENT_SOP_P 2 ++#define UCSI_RECIPIENT_SOP_PP 3 ++#define UCSI_GET_ALTMODE_CONNECTOR_NUMBER(_r_) ((u64)(_r_) << 24) ++#define UCSI_GET_ALTMODE_OFFSET(_r_) ((u64)(_r_) << 32) ++#define UCSI_GET_ALTMODE_NUM_ALTMODES(_r_) ((u64)(_r_) << 40) ++ ++/* -------------------------------------------------------------------------- */ + + /* Error information returned by PPM in response to GET_ERROR_STATUS command. */ + #define UCSI_ERROR_UNREGONIZED_CMD BIT(0) +@@ -442,7 +291,7 @@ struct ucsi_connector { + struct ucsi_connector_capability cap; + }; + +-int ucsi_send_command(struct ucsi *ucsi, struct ucsi_control *ctrl, ++int ucsi_send_command(struct ucsi *ucsi, u64 command, + void *retval, size_t size); + + void ucsi_altmode_update_active(struct ucsi_connector *con); +-- +2.17.1 + diff --git a/patches/0017-ASoC-Intel-Skylake-Reuse-sst_dsp_free.audio b/patches/0017-ASoC-Intel-Skylake-Reuse-sst_dsp_free.audio new file mode 100644 index 0000000000..20a32ca463 --- /dev/null +++ b/patches/0017-ASoC-Intel-Skylake-Reuse-sst_dsp_free.audio @@ -0,0 +1,56 @@ +From 9630458755da509efa15d6397185dbac3f25693b Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Thu, 15 Aug 2019 17:40:23 +0200 +Subject: [PATCH 017/193] ASoC: Intel: Skylake: Reuse sst_dsp_free + +Skylake is sst_dsp descendant. Rather than bypassing framework's flow, +embrace it. sst_dsp_free invokes sst specific handler internally so +nothing is missed. + +Signed-off-by: Cezary Rojewski +--- + sound/soc/intel/skylake/cnl-sst-dsp.c | 1 - + sound/soc/intel/skylake/skl-sst-dsp.c | 1 - + sound/soc/intel/skylake/skl-sst.c | 2 +- + 3 files changed, 1 insertion(+), 3 deletions(-) + +diff --git a/sound/soc/intel/skylake/cnl-sst-dsp.c b/sound/soc/intel/skylake/cnl-sst-dsp.c +index 189c1c7086e3..48b465939ef8 100644 +--- a/sound/soc/intel/skylake/cnl-sst-dsp.c ++++ b/sound/soc/intel/skylake/cnl-sst-dsp.c +@@ -215,7 +215,6 @@ void cnl_dsp_free(struct sst_dsp *dsp) + sst_ipc_fini(&skl->ipc); + cnl_ipc_int_disable(dsp); + +- free_irq(dsp->irq, dsp); + cnl_dsp_disable_core(dsp, SKL_DSP_CORE0_MASK); + } + EXPORT_SYMBOL_GPL(cnl_dsp_free); +diff --git a/sound/soc/intel/skylake/skl-sst-dsp.c b/sound/soc/intel/skylake/skl-sst-dsp.c +index 348e69226e46..1c4ecbcd7db7 100644 +--- a/sound/soc/intel/skylake/skl-sst-dsp.c ++++ b/sound/soc/intel/skylake/skl-sst-dsp.c +@@ -468,7 +468,6 @@ void skl_dsp_free(struct sst_dsp *dsp) + sst_ipc_fini(&skl->ipc); + skl_ipc_int_disable(dsp); + +- free_irq(dsp->irq, dsp); + skl_dsp_disable_core(dsp, SKL_DSP_CORE0_MASK); + } + EXPORT_SYMBOL_GPL(skl_dsp_free); +diff --git a/sound/soc/intel/skylake/skl-sst.c b/sound/soc/intel/skylake/skl-sst.c +index a0b7842b08dc..598e76d2a3fc 100644 +--- a/sound/soc/intel/skylake/skl-sst.c ++++ b/sound/soc/intel/skylake/skl-sst.c +@@ -622,7 +622,7 @@ void skl_sst_dsp_cleanup(struct skl_dev *skl) + skl_clear_module_table(dsp); + + list_del_init(&skl->module_list); +- dsp->ops->free(dsp); ++ sst_dsp_free(dsp); + + if (skl->boot_complete && dsp->cl_dev.bufsize) { + dsp->cl_dev.ops.cl_cleanup_controller(dsp); +-- +2.17.1 + diff --git a/patches/0017-Modify-Google-s-trusty-drivers-so-as-to-support-Int.trusty b/patches/0017-Modify-Google-s-trusty-drivers-so-as-to-support-Int.trusty new file mode 100644 index 0000000000..5b0e90c5d6 --- /dev/null +++ b/patches/0017-Modify-Google-s-trusty-drivers-so-as-to-support-Int.trusty @@ -0,0 +1,896 @@ +From 4fa09d1a8a4e6fae6b0d6c8277ec8a003b857e67 Mon Sep 17 00:00:00 2001 +From: weideng +Date: Fri, 28 Oct 2016 13:46:37 +0800 +Subject: [PATCH 17/63] Modify Google's trusty drivers so as to support Intel + platform + +Previously, Google's trusty drivers can just work on ARM platform. With +this patch, the trusty drivers can then support Intel platform so as to +implement IPC functionality between android and lk. This patch is +implemented by Intel, and it has been verified by tipc-test32 +test cases which are also provided by Google. + +Change-Id: I7076ee23eb1eb8f1102feca4b299b34873f7f861 +Author: chunmei +Signed-off-by: kwang13 +Signed-off-by: chunmei +Signed-off-by: weideng +--- + drivers/trusty/Kconfig | 2 +- + drivers/trusty/trusty-ipc.c | 9 +- + drivers/trusty/trusty-irq.c | 102 ++++++++++++-- + drivers/trusty/trusty-log.c | 2 + + drivers/trusty/trusty-mem.c | 115 ++++++++-------- + drivers/trusty/trusty-virtio.c | 15 ++- + drivers/trusty/trusty.c | 236 +++++++++++++++++++++++++-------- + include/linux/trusty/trusty.h | 2 +- + 8 files changed, 348 insertions(+), 135 deletions(-) + +diff --git a/drivers/trusty/Kconfig b/drivers/trusty/Kconfig +index 052cd8e91ab0..0b6b88e3a718 100644 +--- a/drivers/trusty/Kconfig ++++ b/drivers/trusty/Kconfig +@@ -29,7 +29,7 @@ config TRUSTY_FIQ_ARM64 + default y + + config TRUSTY_LOG +- tristate ++ tristate "Trusty Log support" + depends on TRUSTY + default y + +diff --git a/drivers/trusty/trusty-ipc.c b/drivers/trusty/trusty-ipc.c +index 06e026344e67..7d66e9f74220 100644 +--- a/drivers/trusty/trusty-ipc.c ++++ b/drivers/trusty/trusty-ipc.c +@@ -33,6 +33,8 @@ + + #define MAX_DEVICES 4 + ++#define VIRTIO_ID_TRUSTY_IPC 13 /* virtio trusty ipc */ ++ + #define REPLY_TIMEOUT 5000 + #define TXBUF_TIMEOUT 15000 + +@@ -172,7 +174,10 @@ static int _match_data(int id, void *p, void *data) + + static void *_alloc_shareable_mem(size_t sz, phys_addr_t *ppa, gfp_t gfp) + { +- return alloc_pages_exact(sz, gfp); ++ void *buf_va; ++ buf_va = alloc_pages_exact(sz, gfp); ++ *ppa = virt_to_phys(buf_va); ++ return buf_va; + } + + static void _free_shareable_mem(size_t sz, void *va, phys_addr_t pa) +@@ -1597,7 +1602,7 @@ static void tipc_virtio_remove(struct virtio_device *vdev) + _cleanup_vq(vds->txvq); + _free_msg_buf_list(&vds->free_buf_list); + +- vdev->config->del_vqs(vds->vdev); ++ vdev->config->del_vqs(vdev); + + kref_put(&vds->refcount, _free_vds); + } +diff --git a/drivers/trusty/trusty-irq.c b/drivers/trusty/trusty-irq.c +index 8d6e8afb2a2f..b325bff33774 100644 +--- a/drivers/trusty/trusty-irq.c ++++ b/drivers/trusty/trusty-irq.c +@@ -12,6 +12,7 @@ + * + */ + ++#include + #include + #include + #include +@@ -22,6 +23,8 @@ + #include + #include + #include ++#include ++#include + #include + #include + #include +@@ -56,6 +59,24 @@ struct trusty_irq_state { + struct notifier_block cpu_notifier; + }; + ++#define TRUSTY_VMCALL_PENDING_INTR 0x74727505 ++static inline void set_pending_intr_to_lk(uint8_t vector) ++{ ++ __asm__ __volatile__( ++ "vmcall" ++ ::"a"(TRUSTY_VMCALL_PENDING_INTR), "b"(vector) ++ ); ++} ++ ++#define TRUSTY_VMCALL_IRQ_DONE 0x74727506 ++static inline void irq_register_done(void) ++{ ++ __asm__ __volatile__( ++ "vmcall" ++ ::"a"(TRUSTY_VMCALL_IRQ_DONE) ++ ); ++} ++ + static void trusty_irq_enable_pending_irqs(struct trusty_irq_state *is, + struct trusty_irq_irqset *irqset, + bool percpu) +@@ -201,6 +222,8 @@ irqreturn_t trusty_irq_handler(int irq, void *data) + __func__, irq, trusty_irq->irq, smp_processor_id(), + trusty_irq->enable); + ++ set_pending_intr_to_lk(irq+0x30); ++ + if (trusty_irq->percpu) { + disable_percpu_irq(irq); + irqset = this_cpu_ptr(is->percpu_irqs); +@@ -348,6 +371,39 @@ static int trusty_irq_create_irq_mapping(struct trusty_irq_state *is, int irq) + return (!ret) ? -EINVAL : ret; + } + ++static inline void trusty_irq_unmask(struct irq_data *data) ++{ ++ return; ++} ++ ++static inline void trusty_irq_mask(struct irq_data *data) ++{ ++ return; ++} ++ ++static void trusty_irq_enable(struct irq_data *data) ++{ ++ return; ++} ++ ++static void trusty_irq_disable(struct irq_data *data) ++{ ++ return; ++} ++ ++void trusty_irq_eoi(struct irq_data *data) ++{ ++ return; ++} ++static struct irq_chip trusty_irq_chip = { ++ .name = "TRUSY-IRQ", ++ .irq_mask = trusty_irq_mask, ++ .irq_unmask = trusty_irq_unmask, ++ .irq_enable = trusty_irq_enable, ++ .irq_disable = trusty_irq_disable, ++ .irq_eoi = trusty_irq_eoi, ++}; ++ + static int trusty_irq_init_normal_irq(struct trusty_irq_state *is, int tirq) + { + int ret; +@@ -357,12 +413,7 @@ static int trusty_irq_init_normal_irq(struct trusty_irq_state *is, int tirq) + + dev_dbg(is->dev, "%s: irq %d\n", __func__, tirq); + +- irq = trusty_irq_create_irq_mapping(is, tirq); +- if (irq < 0) { +- dev_err(is->dev, +- "trusty_irq_create_irq_mapping failed (%d)\n", irq); +- return irq; +- } ++ irq = tirq; + + trusty_irq = kzalloc(sizeof(*trusty_irq), GFP_KERNEL); + if (!trusty_irq) +@@ -376,8 +427,17 @@ static int trusty_irq_init_normal_irq(struct trusty_irq_state *is, int tirq) + hlist_add_head(&trusty_irq->node, &is->normal_irqs.inactive); + spin_unlock_irqrestore(&is->normal_irqs_lock, irq_flags); + ++ ret = irq_alloc_desc_at(irq, 0); ++ if (ret >= 0) ++ irq_set_chip_and_handler_name(irq, &trusty_irq_chip, handle_edge_irq, "trusty-irq"); ++ else if (ret != -EEXIST) { ++ dev_err(is->dev, "can't allocate irq desc %d\n", ret); ++ goto err_request_irq; ++ } ++ + ret = request_irq(irq, trusty_irq_handler, IRQF_NO_THREAD, +- "trusty", trusty_irq); ++ "trusty-irq", trusty_irq); ++ + if (ret) { + dev_err(is->dev, "request_irq failed %d\n", ret); + goto err_request_irq; +@@ -416,6 +476,8 @@ static int trusty_irq_init_per_cpu_irq(struct trusty_irq_state *is, int tirq) + struct trusty_irq *trusty_irq; + struct trusty_irq_irqset *irqset; + ++ if (cpu >= 32) ++ return -EINVAL; + trusty_irq = per_cpu_ptr(trusty_irq_handler_data, cpu); + irqset = per_cpu_ptr(is->percpu_irqs, cpu); + +@@ -439,6 +501,8 @@ static int trusty_irq_init_per_cpu_irq(struct trusty_irq_state *is, int tirq) + for_each_possible_cpu(cpu) { + struct trusty_irq *trusty_irq; + ++ if (cpu >= 32) ++ return -EINVAL; + trusty_irq = per_cpu_ptr(trusty_irq_handler_data, cpu); + hlist_del(&trusty_irq->node); + } +@@ -462,11 +526,11 @@ static int trusty_irq_init_one(struct trusty_irq_state *is, + irq = trusty_smc_get_next_irq(is, irq, per_cpu); + if (irq < 0) + return irq; +- ++ dev_info(is->dev, "irq from lk = %d\n", irq); + if (per_cpu) +- ret = trusty_irq_init_per_cpu_irq(is, irq); ++ ret = trusty_irq_init_per_cpu_irq(is, irq-0x30); + else +- ret = trusty_irq_init_normal_irq(is, irq); ++ ret = trusty_irq_init_normal_irq(is, irq-0x30); + + if (ret) { + dev_warn(is->dev, +@@ -481,7 +545,6 @@ static void trusty_irq_free_irqs(struct trusty_irq_state *is) + { + struct trusty_irq *irq; + struct hlist_node *n; +- unsigned int cpu; + + hlist_for_each_entry_safe(irq, n, &is->normal_irqs.inactive, node) { + dev_dbg(is->dev, "%s: irq %d\n", __func__, irq->irq); +@@ -489,6 +552,7 @@ static void trusty_irq_free_irqs(struct trusty_irq_state *is) + hlist_del(&irq->node); + kfree(irq); + } ++/* + hlist_for_each_entry_safe(irq, n, + &this_cpu_ptr(is->percpu_irqs)->inactive, + node) { +@@ -504,7 +568,7 @@ static void trusty_irq_free_irqs(struct trusty_irq_state *is) + hlist_del(&irq_tmp->node); + } + free_percpu(trusty_irq_handler_data); +- } ++ } */ + } + + static int trusty_irq_probe(struct platform_device *pdev) +@@ -557,16 +621,18 @@ static int trusty_irq_probe(struct platform_device *pdev) + for_each_possible_cpu(cpu) { + struct trusty_irq_work *trusty_irq_work; + ++ if (cpu >= 32) ++ return -EINVAL; + trusty_irq_work = per_cpu_ptr(is->irq_work, cpu); + trusty_irq_work->is = is; + INIT_WORK(&trusty_irq_work->work, work_func); + } + +- for (irq = 0; irq >= 0;) +- irq = trusty_irq_init_one(is, irq, true); + for (irq = 0; irq >= 0;) + irq = trusty_irq_init_one(is, irq, false); + ++ irq_register_done(); ++ + is->cpu_notifier.notifier_call = trusty_irq_cpu_notify; + ret = register_hotcpu_notifier(&is->cpu_notifier); + if (ret) { +@@ -597,6 +663,8 @@ static int trusty_irq_probe(struct platform_device *pdev) + for_each_possible_cpu(cpu) { + struct trusty_irq_work *trusty_irq_work; + ++ if (cpu >= 32) ++ return -EINVAL; + trusty_irq_work = per_cpu_ptr(is->irq_work, cpu); + flush_work(&trusty_irq_work->work); + } +@@ -632,6 +700,8 @@ static int trusty_irq_remove(struct platform_device *pdev) + for_each_possible_cpu(cpu) { + struct trusty_irq_work *trusty_irq_work; + ++ if (cpu >= 32) ++ return -EINVAL; + trusty_irq_work = per_cpu_ptr(is->irq_work, cpu); + flush_work(&trusty_irq_work->work); + } +@@ -657,3 +727,7 @@ static struct platform_driver trusty_irq_driver = { + }; + + module_platform_driver(trusty_irq_driver); ++ ++ ++MODULE_LICENSE("GPL v2"); ++ +diff --git a/drivers/trusty/trusty-log.c b/drivers/trusty/trusty-log.c +index e8dcced2ff1d..112287cd4739 100644 +--- a/drivers/trusty/trusty-log.c ++++ b/drivers/trusty/trusty-log.c +@@ -11,6 +11,7 @@ + * GNU General Public License for more details. + * + */ ++#include + #include + #include + #include +@@ -272,3 +273,4 @@ static struct platform_driver trusty_log_driver = { + }; + + module_platform_driver(trusty_log_driver); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/trusty/trusty-mem.c b/drivers/trusty/trusty-mem.c +index c55ace25beed..1317ec734315 100644 +--- a/drivers/trusty/trusty-mem.c ++++ b/drivers/trusty/trusty-mem.c +@@ -11,66 +11,68 @@ + * GNU General Public License for more details. + * + */ +- ++#include + #include + #include + #include + #include ++#include ++ ++/* Normal memory */ ++#define NS_MAIR_NORMAL_CACHED_WB_RWA 0xFF /* inner and outer write back read/write allocate */ ++#define NS_MAIR_NORMAL_CACHED_WT_RA 0xAA /* inner and outer write through read allocate */ ++#define NS_MAIR_NORMAL_CACHED_WB_RA 0xEE /* inner and outer wriet back, read allocate */ ++#define NS_MAIR_NORMAL_UNCACHED 0x44 /* uncached */ + + static int get_mem_attr(struct page *page, pgprot_t pgprot) + { +-#if defined(CONFIG_ARM64) +- uint64_t mair; +- uint attr_index = (pgprot_val(pgprot) & PTE_ATTRINDX_MASK) >> 2; +- +- asm ("mrs %0, mair_el1\n" : "=&r" (mair)); +- return (mair >> (attr_index * 8)) & 0xff; +- +-#elif defined(CONFIG_ARM_LPAE) +- uint32_t mair; +- uint attr_index = ((pgprot_val(pgprot) & L_PTE_MT_MASK) >> 2); +- +- if (attr_index >= 4) { +- attr_index -= 4; +- asm volatile("mrc p15, 0, %0, c10, c2, 1\n" : "=&r" (mair)); +- } else { +- asm volatile("mrc p15, 0, %0, c10, c2, 0\n" : "=&r" (mair)); ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) ++ /* The porting to CHT kernel (3.14.55) is in the #else clause. ++ ** For BXT kernel (4.1.0), the function get_page_memtype() is static. ++ ** ++ ** The orignal google code (for arm) getst the cache states and page ++ ** flags from input parameter "pgprot", which is not prefered in x86. ++ ** In x86, both cache states and page flags should be got from input ++ ** parameter "page". But, since current caller of trusty_call32_mem_buf() ++ ** always allocate memory in kernel heap, it is also ok to use hardcode ++ ** here. ++ ** ++ ** The memory allocated in kernel heap should be CACHED. The reason to ++ ** return UNCACHED here is to pass the check in LK sm_decode_ns_memory_attr() ++ ** with SMP, which only allow UNCACHED. ++ */ ++ return NS_MAIR_NORMAL_UNCACHED; ++#else ++ unsigned long type; ++ int ret_mem_attr = 0; ++ ++ type = get_page_memtype(page); ++ /* ++ * -1 from get_page_memtype() implies RAM page is in its ++ * default state and not reserved, and hence of type WB ++ */ ++ if (type == -1) { ++ type = _PAGE_CACHE_MODE_WB; + } +- return (mair >> (attr_index * 8)) & 0xff; +- +-#elif defined(CONFIG_ARM) +- /* check memory type */ +- switch (pgprot_val(pgprot) & L_PTE_MT_MASK) { +- case L_PTE_MT_WRITEALLOC: +- /* Normal: write back write allocate */ +- return 0xFF; +- +- case L_PTE_MT_BUFFERABLE: +- /* Normal: non-cacheble */ +- return 0x44; +- +- case L_PTE_MT_WRITEBACK: +- /* Normal: writeback, read allocate */ +- return 0xEE; +- +- case L_PTE_MT_WRITETHROUGH: +- /* Normal: write through */ +- return 0xAA; +- +- case L_PTE_MT_UNCACHED: +- /* strongly ordered */ +- return 0x00; +- +- case L_PTE_MT_DEV_SHARED: +- case L_PTE_MT_DEV_NONSHARED: +- /* device */ +- return 0x04; ++ switch (type) { ++ case _PAGE_CACHE_MODE_UC_MINUS: ++ /* uncacheable */ ++ ret_mem_attr = NS_MAIR_NORMAL_UNCACHED; ++ break; ++ case _PAGE_CACHE_MODE_WB: ++ /* writeback */ ++ ret_mem_attr = NS_MAIR_NORMAL_CACHED_WB_RWA; ++ break; ++ case _PAGE_CACHE_MODE_WC: ++ /* write combined */ ++ ret_mem_attr = NS_MAIR_NORMAL_UNCACHED; ++ break; + + default: +- return -EINVAL; ++ printk(KERN_ERR "%s(): invalid type: 0x%x\n", __func__, type); ++ ret_mem_attr = -EINVAL; + } +-#else +- return 0; ++ return ret_mem_attr; + #endif + } + +@@ -90,18 +92,10 @@ int trusty_encode_page_info(struct ns_mem_page_info *inf, + mem_attr = get_mem_attr(page, pgprot); + if (mem_attr < 0) + return mem_attr; +- +- /* add other attributes */ +-#if defined(CONFIG_ARM64) || defined(CONFIG_ARM_LPAE) +- pte |= pgprot_val(pgprot); +-#elif defined(CONFIG_ARM) +- if (pgprot_val(pgprot) & L_PTE_USER) ++ if (pgprot_val(pgprot) & _PAGE_USER) + pte |= (1 << 6); +- if (pgprot_val(pgprot) & L_PTE_RDONLY) ++ if (!(pgprot_val(pgprot) & _PAGE_RW)) + pte |= (1 << 7); +- if (pgprot_val(pgprot) & L_PTE_SHARED) +- pte |= (3 << 8); /* inner sharable */ +-#endif + + inf->attr = (pte & 0x0000FFFFFFFFFFFFull) | ((uint64_t)mem_attr << 48); + return 0; +@@ -131,4 +125,5 @@ int trusty_call32_mem_buf(struct device *dev, u32 smcnr, + (u32)(pg_inf.attr >> 32), size); + } + } +- ++EXPORT_SYMBOL(trusty_call32_mem_buf); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/trusty/trusty-virtio.c b/drivers/trusty/trusty-virtio.c +index fabbf29bffcc..f00c4ece03bf 100644 +--- a/drivers/trusty/trusty-virtio.c ++++ b/drivers/trusty/trusty-virtio.c +@@ -206,22 +206,23 @@ static void trusty_virtio_reset(struct virtio_device *vdev) + dev_dbg(&vdev->dev, "reset vdev_id=%d\n", tvdev->notifyid); + trusty_std_call32(tctx->dev->parent, SMC_SC_VDEV_RESET, + tvdev->notifyid, 0, 0); ++ vdev->config->set_status(vdev, 0); + } + + static u64 trusty_virtio_get_features(struct virtio_device *vdev) + { + struct trusty_vdev *tvdev = vdev_to_tvdev(vdev); +- return tvdev->vdev_descr->dfeatures; ++ return ((u64)tvdev->vdev_descr->dfeatures) & 0x00000000FFFFFFFFULL; + } + + static int trusty_virtio_finalize_features(struct virtio_device *vdev) + { + struct trusty_vdev *tvdev = vdev_to_tvdev(vdev); +- ++ + /* Make sure we don't have any features > 32 bits! */ + BUG_ON((u32)vdev->features != vdev->features); + +- tvdev->vdev_descr->gfeatures = vdev->features; ++ tvdev->vdev_descr->gfeatures = (u32)(vdev->features); + return 0; + } + +@@ -381,6 +382,12 @@ static const struct virtio_config_ops trusty_virtio_config_ops = { + .bus_name = trusty_virtio_bus_name, + }; + ++void virtio_vdev_release(struct device *dev) ++{ ++ dev_dbg(dev, "%s() is called\n", __func__); ++ return; ++} ++ + static int trusty_virtio_add_device(struct trusty_ctx *tctx, + struct fw_rsc_vdev *vdev_descr, + struct fw_rsc_vdev_vring *vr_descr, +@@ -400,6 +407,7 @@ static int trusty_virtio_add_device(struct trusty_ctx *tctx, + /* setup vdev */ + tvdev->tctx = tctx; + tvdev->vdev.dev.parent = tctx->dev; ++ tvdev->vdev.dev.release = virtio_vdev_release; + tvdev->vdev.id.device = vdev_descr->id; + tvdev->vdev.config = &trusty_virtio_config_ops; + tvdev->vdev_descr = vdev_descr; +@@ -677,6 +685,7 @@ static const struct of_device_id trusty_of_match[] = { + { + .compatible = "android,trusty-virtio-v1", + }, ++ {}, + }; + + MODULE_DEVICE_TABLE(of, trusty_of_match); +diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c +index 6fcd5481ac88..12a90224eb27 100644 +--- a/drivers/trusty/trusty.c ++++ b/drivers/trusty/trusty.c +@@ -12,7 +12,6 @@ + * + */ + +-#include + #include + #include + #include +@@ -20,11 +19,14 @@ + #include + #include + #include ++#include + #include + #include + #include + #include + ++#define TRUSTY_VMCALL_SMC 0x74727500 ++ + struct trusty_state { + struct mutex smc_lock; + struct atomic_notifier_head notifier; +@@ -33,56 +35,57 @@ struct trusty_state { + u32 api_version; + }; + +-#ifdef CONFIG_ARM64 +-#define SMC_ARG0 "x0" +-#define SMC_ARG1 "x1" +-#define SMC_ARG2 "x2" +-#define SMC_ARG3 "x3" +-#define SMC_ARCH_EXTENSION "" +-#define SMC_REGISTERS_TRASHED "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", \ +- "x12", "x13", "x14", "x15", "x16", "x17" +-#else +-#define SMC_ARG0 "r0" +-#define SMC_ARG1 "r1" +-#define SMC_ARG2 "r2" +-#define SMC_ARG3 "r3" +-#define SMC_ARCH_EXTENSION ".arch_extension sec\n" +-#define SMC_REGISTERS_TRASHED "ip" +-#endif ++struct trusty_smc_interface { ++ struct device *dev; ++ ulong args[5]; ++}; + + static inline ulong smc(ulong r0, ulong r1, ulong r2, ulong r3) + { +- register ulong _r0 asm(SMC_ARG0) = r0; +- register ulong _r1 asm(SMC_ARG1) = r1; +- register ulong _r2 asm(SMC_ARG2) = r2; +- register ulong _r3 asm(SMC_ARG3) = r3; +- +- asm volatile( +- __asmeq("%0", SMC_ARG0) +- __asmeq("%1", SMC_ARG1) +- __asmeq("%2", SMC_ARG2) +- __asmeq("%3", SMC_ARG3) +- __asmeq("%4", SMC_ARG0) +- __asmeq("%5", SMC_ARG1) +- __asmeq("%6", SMC_ARG2) +- __asmeq("%7", SMC_ARG3) +- SMC_ARCH_EXTENSION +- "smc #0" /* switch to secure world */ +- : "=r" (_r0), "=r" (_r1), "=r" (_r2), "=r" (_r3) +- : "r" (_r0), "r" (_r1), "r" (_r2), "r" (_r3) +- : SMC_REGISTERS_TRASHED); +- return _r0; ++ __asm__ __volatile__( ++ "vmcall; \n" ++ :"=D"(r0) ++ :"a"(TRUSTY_VMCALL_SMC), "D"(r0), "S"(r1), "d"(r2), "b"(r3) ++ ); ++ return r0; + } + +-s32 trusty_fast_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2) ++static void trusty_fast_call32_remote(void *args) + { ++ struct trusty_smc_interface *p_args = args; ++ struct device *dev = p_args->dev; ++ ulong smcnr = p_args->args[0]; ++ ulong a0 = p_args->args[1]; ++ ulong a1 = p_args->args[2]; ++ ulong a2 = p_args->args[3]; + struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); + + BUG_ON(!s); + BUG_ON(!SMC_IS_FASTCALL(smcnr)); + BUG_ON(SMC_IS_SMC64(smcnr)); + +- return smc(smcnr, a0, a1, a2); ++ p_args->args[4] = smc(smcnr, a0, a1, a2); ++} ++ ++s32 trusty_fast_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2) ++{ ++ int cpu = 0; ++ int ret = 0; ++ struct trusty_smc_interface s; ++ s.dev = dev; ++ s.args[0] = smcnr; ++ s.args[1] = a0; ++ s.args[2] = a1; ++ s.args[3] = a2; ++ s.args[4] = 0; ++ ++ ret = smp_call_function_single(cpu, trusty_fast_call32_remote, (void *)&s, 1); ++ ++ if (ret) { ++ pr_err("%s: smp_call_function_single failed: %d\n", __func__, ret); ++ } ++ ++ return s.args[4]; + } + EXPORT_SYMBOL(trusty_fast_call32); + +@@ -122,21 +125,59 @@ static ulong trusty_std_call_inner(struct device *dev, ulong smcnr, + return ret; + } + ++static void trusty_std_call_inner_wrapper_remote(void *args) ++{ ++ struct trusty_smc_interface *p_args = args; ++ struct device *dev = p_args->dev; ++ ulong smcnr = p_args->args[0]; ++ ulong a0 = p_args->args[1]; ++ ulong a1 = p_args->args[2]; ++ ulong a2 = p_args->args[3]; ++ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); ++ ulong ret; ++ unsigned long flags; ++ ++ local_irq_save(flags); ++ atomic_notifier_call_chain(&s->notifier, TRUSTY_CALL_PREPARE, ++ NULL); ++ ret = trusty_std_call_inner(dev, smcnr, a0, a1, a2); ++ atomic_notifier_call_chain(&s->notifier, TRUSTY_CALL_RETURNED, ++ NULL); ++ local_irq_restore(flags); ++ ++ p_args->args[4] = ret; ++} ++ ++static ulong trusty_std_call_inner_wrapper(struct device *dev, ulong smcnr, ++ ulong a0, ulong a1, ulong a2) ++{ ++ int cpu = 0; ++ int ret = 0; ++ struct trusty_smc_interface s; ++ s.dev = dev; ++ s.args[0] = smcnr; ++ s.args[1] = a0; ++ s.args[2] = a1; ++ s.args[3] = a2; ++ s.args[4] = 0; ++ ++ ret = smp_call_function_single(cpu, trusty_std_call_inner_wrapper_remote, (void *)&s, 1); ++ ++ if (ret) { ++ pr_err("%s: smp_call_function_single failed: %d\n", __func__, ret); ++ } ++ ++ return s.args[4]; ++} ++ + static ulong trusty_std_call_helper(struct device *dev, ulong smcnr, + ulong a0, ulong a1, ulong a2) + { + ulong ret; + int sleep_time = 1; +- struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); + + while (true) { +- local_irq_disable(); +- atomic_notifier_call_chain(&s->notifier, TRUSTY_CALL_PREPARE, +- NULL); +- ret = trusty_std_call_inner(dev, smcnr, a0, a1, a2); +- atomic_notifier_call_chain(&s->notifier, TRUSTY_CALL_RETURNED, +- NULL); +- local_irq_enable(); ++ ret = trusty_std_call_inner_wrapper(dev, smcnr, a0, a1, a2); + + if ((int)ret != SM_ERR_BUSY) + break; +@@ -173,6 +214,9 @@ static void trusty_std_call_cpu_idle(struct trusty_state *s) + } + } + ++/* must set CONFIG_DEBUG_ATOMIC_SLEEP=n ++** otherwise mutex_lock() will fail and crash ++*/ + s32 trusty_std_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2) + { + int ret; +@@ -230,6 +274,7 @@ EXPORT_SYMBOL(trusty_call_notifier_unregister); + + static int trusty_remove_child(struct device *dev, void *data) + { ++ dev_dbg(dev, "%s() is called()\n", __func__); + platform_device_unregister(to_platform_device(dev)); + return 0; + } +@@ -265,6 +310,8 @@ static void trusty_init_version(struct trusty_state *s, struct device *dev) + version_str_len = ret; + + s->version_str = kmalloc(version_str_len + 1, GFP_KERNEL); ++ if (!s->version_str) ++ goto err_get_size; + for (i = 0; i < version_str_len; i++) { + ret = trusty_fast_call32(dev, SMC_FC_GET_VERSION_STR, i, 0, 0); + if (ret < 0) +@@ -344,15 +391,8 @@ static int trusty_probe(struct platform_device *pdev) + if (ret < 0) + goto err_api_version; + +- ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); +- if (ret < 0) { +- dev_err(&pdev->dev, "Failed to add children: %d\n", ret); +- goto err_add_children; +- } +- + return 0; + +-err_add_children: + err_api_version: + if (s->version_str) { + device_remove_file(&pdev->dev, &dev_attr_trusty_version); +@@ -369,6 +409,8 @@ static int trusty_remove(struct platform_device *pdev) + { + struct trusty_state *s = platform_get_drvdata(pdev); + ++ dev_dbg(&(pdev->dev), "%s() is called\n", __func__); ++ + device_for_each_child(&pdev->dev, NULL, trusty_remove_child); + mutex_destroy(&s->smc_lock); + if (s->version_str) { +@@ -394,15 +436,101 @@ static struct platform_driver trusty_driver = { + }, + }; + ++void trusty_dev_release(struct device *dev) ++{ ++ dev_dbg(dev, "%s() is called()\n", __func__); ++ return; ++} ++ ++static struct device_node trusty_irq_node = { ++ .name = "trusty-irq", ++ .sibling = NULL, ++}; ++ ++static struct device_node trusty_virtio_node = { ++ .name = "trusty-virtio", ++ .sibling = &trusty_irq_node, ++}; ++ ++static struct device_node trusty_log_node = { ++ .name = "trusty-log", ++ .sibling = &trusty_virtio_node, ++}; ++ ++ ++static struct device_node trusty_node = { ++ .name = "trusty", ++ .child = &trusty_log_node, ++}; ++ ++static struct platform_device trusty_platform_dev = { ++ .name = "trusty", ++ .id = -1, ++ .num_resources = 0, ++ .dev = { ++ .release = trusty_dev_release, ++ .of_node = &trusty_node, ++ }, ++}; ++static struct platform_device trusty_platform_dev_log = { ++ .name = "trusty-log", ++ .id = -1, ++ .num_resources = 0, ++ .dev = { ++ .release = trusty_dev_release, ++ .parent = &trusty_platform_dev.dev, ++ .of_node = &trusty_log_node, ++ }, ++}; ++ ++static struct platform_device trusty_platform_dev_virtio = { ++ .name = "trusty-virtio", ++ .id = -1, ++ .num_resources = 0, ++ .dev = { ++ .release = trusty_dev_release, ++ .parent = &trusty_platform_dev.dev, ++ .of_node = &trusty_virtio_node, ++ }, ++}; ++ ++static struct platform_device trusty_platform_dev_irq = { ++ .name = "trusty-irq", ++ .id = -1, ++ .num_resources = 0, ++ .dev = { ++ .release = trusty_dev_release, ++ .parent = &trusty_platform_dev.dev, ++ .of_node = &trusty_irq_node, ++ }, ++}; ++ ++static struct platform_device *trusty_devices[] __initdata = { ++ &trusty_platform_dev, ++ &trusty_platform_dev_log, ++ &trusty_platform_dev_virtio, ++ &trusty_platform_dev_irq ++}; + static int __init trusty_driver_init(void) + { ++ int ret = 0; ++ ++ ret = platform_add_devices(trusty_devices, ARRAY_SIZE(trusty_devices)); ++ if (ret) { ++ printk(KERN_ERR "%s(): platform_add_devices() failed, ret %d\n", __func__, ret); ++ return ret; ++ } + return platform_driver_register(&trusty_driver); + } + + static void __exit trusty_driver_exit(void) + { + platform_driver_unregister(&trusty_driver); ++ platform_device_unregister(&trusty_platform_dev); + } + + subsys_initcall(trusty_driver_init); + module_exit(trusty_driver_exit); ++ ++MODULE_LICENSE("GPL"); ++ +diff --git a/include/linux/trusty/trusty.h b/include/linux/trusty/trusty.h +index 24fe2101a528..74598389c308 100644 +--- a/include/linux/trusty/trusty.h ++++ b/include/linux/trusty/trusty.h +@@ -20,7 +20,7 @@ + #include + + +-#ifdef CONFIG_TRUSTY ++#if IS_ENABLED(CONFIG_TRUSTY) + s32 trusty_std_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2); + s32 trusty_fast_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2); + #ifdef CONFIG_64BIT +-- +2.17.1 + diff --git a/patches/0017-VBS-K-added-a-VBS-K-reference-driver.acrn b/patches/0017-VBS-K-added-a-VBS-K-reference-driver.acrn new file mode 100644 index 0000000000..af0cad8b8c --- /dev/null +++ b/patches/0017-VBS-K-added-a-VBS-K-reference-driver.acrn @@ -0,0 +1,664 @@ +From 3e9cca73db6d0a3e674c4096bb21cf2eeb63a04b Mon Sep 17 00:00:00 2001 +From: Hao Li +Date: Fri, 31 Aug 2018 10:58:56 +0800 +Subject: [PATCH 017/150] VBS-K: added a VBS-K reference driver. + +This patch implemented a VBS-K reference driver: virtio RNG. + +This reference driver shows how to use VBS-K APIs to implement virtio +backend drivers for ACRN hypervisor. + +Key points from the reference driver: + - Each VBS-K driver exports a char device to /dev/, e.g. /dev/vbs_rng; + - Each VBS-K driver should use Virtqueue APIs to interact with the + virtio frontend driver in guest; + - Each VBS-K driver could register itelf as VHM (Virtio and Hypervisor + service Module) client, and uses VHM API to handle frontend's register + access to backend; + - Each VBS-K driver could maintain the connections, from VBS-U, in a + list/table, so that it could serve multiple guests. + Sometimes even single guest could have multiple connections from + VBS-U, depending on the device type. + The reference driver shows how to maintain connections in a hashtable. + +Change-Id: Id590930d2f64d391ceb18c6ef491ec48412a89d8 +Tracked-On:218445 +Signed-off-by: Hao Li +Reviewed-on: +Reviewed-by: Chi, Mingqiang +Reviewed-by: Dong, Eddie +Tested-by: Dong, Eddie +--- + drivers/vbs/Kconfig | 11 + + drivers/vbs/Makefile | 2 + + drivers/vbs/vbs_rng.c | 589 ++++++++++++++++++++++++++++++++++++++++++ + 3 files changed, 602 insertions(+) + create mode 100644 drivers/vbs/vbs_rng.c + +diff --git a/drivers/vbs/Kconfig b/drivers/vbs/Kconfig +index c48f306177c3..da189ec0eea4 100644 +--- a/drivers/vbs/Kconfig ++++ b/drivers/vbs/Kconfig +@@ -18,3 +18,14 @@ config VBS_DEBUG + ---help--- + This is an option for use by developers; most people should + say N here. This enables ACRN VBS debugging. ++ ++config VBS_RNG ++ tristate "ACRN VBS reference driver: virtio RNG" ++ depends on VBS != n ++ default n ++ ---help--- ++ Say M or * here to enable a VBS-K reference driver for ACRN ++ hypervisor, virtio RNG driver, to work with virtio-rng ++ frontend driver in guest. ++ The reference driver shows an example on how to use VBS-K ++ APIs. +diff --git a/drivers/vbs/Makefile b/drivers/vbs/Makefile +index cbd5076e2313..85e1cc252197 100644 +--- a/drivers/vbs/Makefile ++++ b/drivers/vbs/Makefile +@@ -2,3 +2,5 @@ ccflags-$(CONFIG_VBS_DEBUG) := -DDEBUG + + obj-$(CONFIG_VBS) += vbs.o + obj-$(CONFIG_VBS) += vq.o ++ ++obj-$(CONFIG_VBS_RNG) += vbs_rng.o +diff --git a/drivers/vbs/vbs_rng.c b/drivers/vbs/vbs_rng.c +new file mode 100644 +index 000000000000..ef6f8776e71c +--- /dev/null ++++ b/drivers/vbs/vbs_rng.c +@@ -0,0 +1,589 @@ ++/* ++ * ACRN Project ++ * Virtio Backend Service (VBS) for ACRN hypervisor ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright (c) 2017 Intel Corporation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: Hao Li ++ * ++ * BSD LICENSE ++ * ++ * Copyright (c) 2017 Intel Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * ++ * ++ * Hao Li ++ * VBS-K Reference Driver: virtio-rng ++ * - Each VBS-K driver exports a char device to /dev/, e.g. /dev/vbs_rng; ++ * - Each VBS-K driver uses Virtqueue APIs to interact with the virtio ++ * frontend driver in guest; ++ * - Each VBS-K driver registers itelf as VHM (Virtio and Hypervisor ++ * service Module) client, which enables in-kernel handling of register ++ * access of virtio device; ++ * - Each VBS-K driver could maintain the connections, from VBS-U, in a ++ * list/table, so that it could serve multiple guests. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++ ++enum { ++ VBS_K_RNG_VQ = 0, ++ VBS_K_RNG_VQ_MAX = 1, ++}; ++ ++#define VTRND_RINGSZ 64 ++ ++/* VBS-K features if any */ ++/* ++ *enum { ++ * VBS_K_RNG_FEATURES = VBS_K_FEATURES | ++ * (1ULL << VIRTIO_F_VERSION_1), ++ *}; ++ */ ++ ++struct vbs_rng { ++ struct virtio_dev_info dev; ++ struct virtio_vq_info vqs[VBS_K_RNG_VQ_MAX]; ++ int vhm_client_id; ++ /* Below could be device specific members */ ++ struct hwrng hwrng; ++}; ++ ++/* ++ * Each VBS-K module might serve multiple connections from multiple ++ * guests/device models/VBS-Us, so better to maintain the connections ++ * in a list, and here we use hashtalble as an example. ++ */ ++struct vbs_rng_client { ++ struct vbs_rng *rng; ++ int vhm_client_id; ++ int max_vcpu; ++ struct vhm_request *req_buf; ++}; ++ ++/* instances malloced/freed by hashtable routines */ ++struct vbs_rng_hash_entry { ++ struct vbs_rng_client *info; ++ struct hlist_node node; ++}; ++ ++#define RNG_MAX_HASH_BITS 4 /* MAX is 2^4 */ ++#define HASH_NAME vbs_rng_hash ++ ++DECLARE_HASHTABLE(HASH_NAME, RNG_MAX_HASH_BITS); ++static int vbs_rng_hash_initialized = 0; ++static int vbs_rng_connection_cnt = 0; ++ ++/* function declarations */ ++static int handle_kick(int client_id, int req_cnt); ++static void vbs_rng_reset(struct vbs_rng *rng); ++static void vbs_rng_disable_vq(struct vbs_rng *rng, ++ struct virtio_vq_info *vq); ++static int vbs_rng_enable_vq(struct vbs_rng *rng, ++ struct virtio_vq_info *vq); ++static void vbs_rng_stop_vq(struct vbs_rng *rng, ++ struct virtio_vq_info *vq); ++static void vbs_rng_stop(struct vbs_rng *rng); ++static void vbs_rng_flush_vq(struct vbs_rng *rng, int index); ++static void vbs_rng_flush(struct vbs_rng *rng); ++ ++/* hash table related functions */ ++static void vbs_rng_hash_init(void) ++{ ++ if (vbs_rng_hash_initialized) ++ return; ++ ++ hash_init(HASH_NAME); ++ vbs_rng_hash_initialized = 1; ++} ++ ++static int vbs_rng_hash_add(struct vbs_rng_client *client) ++{ ++ struct vbs_rng_hash_entry *entry; ++ ++ if (!vbs_rng_hash_initialized) { ++ pr_err("RNG hash table not initialized!\n"); ++ return -1; ++ } ++ ++ entry = kmalloc(sizeof(*entry), GFP_KERNEL); ++ if (!entry) { ++ pr_err("Failed to alloc memory for rng hash entry!\n"); ++ return -1; ++ } ++ ++ entry->info = client; ++ ++ hash_add(HASH_NAME, &entry->node, entry->info->vhm_client_id); ++ return 0; ++} ++ ++static struct vbs_rng_client *vbs_rng_hash_find(int client_id) ++{ ++ struct vbs_rng_hash_entry *entry; ++ int bkt; ++ ++ if (!vbs_rng_hash_initialized) { ++ pr_err("RNG hash table not initialized!\n"); ++ return NULL; ++ } ++ ++ hash_for_each(HASH_NAME, bkt, entry, node) ++ if (entry->info->vhm_client_id == client_id) ++ return entry->info; ++ ++ pr_err("Not found item matching client_id!\n"); ++ return NULL; ++} ++ ++static int vbs_rng_hash_del(int client_id) ++{ ++ struct vbs_rng_hash_entry *entry; ++ int bkt; ++ ++ if (!vbs_rng_hash_initialized) { ++ pr_err("RNG hash table not initialized!\n"); ++ return -1; ++ } ++ ++ hash_for_each(HASH_NAME, bkt, entry, node) ++ if (entry->info->vhm_client_id == client_id) { ++ hash_del(&entry->node); ++ kfree(entry); ++ return 0; ++ } ++ ++ pr_err("%s failed, not found matching client_id!\n", ++ __func__); ++ return -1; ++} ++ ++static int vbs_rng_hash_del_all(void) ++{ ++ struct vbs_rng_hash_entry *entry; ++ int bkt; ++ ++ if (!vbs_rng_hash_initialized) { ++ pr_err("RNG hash table not initialized!\n"); ++ return -1; ++ } ++ ++ hash_for_each(HASH_NAME, bkt, entry, node) ++ if (1) { ++ hash_del(&entry->node); ++ kfree(entry); ++ } ++ ++ return 0; ++} ++ ++static int register_vhm_client(struct virtio_dev_info *dev) ++{ ++ unsigned int vmid; ++ struct vm_info info; ++ struct vbs_rng_client *client; ++ int ret; ++ ++ client = kcalloc(1, sizeof(*client), GFP_KERNEL); ++ if (!client) { ++ pr_err("failed to malloc vbs_rng_client!\n"); ++ return -EINVAL; ++ } ++ ++ client->rng = container_of(dev, struct vbs_rng, dev); ++ vmid = dev->_ctx.vmid; ++ pr_debug("vmid is %d\n", vmid); ++ ++ client->vhm_client_id = acrn_ioreq_create_client(vmid, handle_kick, ++ "vbs_rng kick init\n"); ++ if (client->vhm_client_id < 0) { ++ pr_err("failed to create client of acrn ioreq!\n"); ++ goto err; ++ } ++ ++ ret = acrn_ioreq_add_iorange(client->vhm_client_id, ++ dev->io_range_type ? REQ_MMIO : REQ_PORTIO, ++ dev->io_range_start, ++ dev->io_range_start + dev->io_range_len); ++ if (ret < 0) { ++ pr_err("failed to add iorange to acrn ioreq!\n"); ++ goto err; ++ } ++ ++ /* feed up max_cpu and req_buf */ ++ ret = vhm_get_vm_info(vmid, &info); ++ if (ret < 0) { ++ pr_err("failed in vhm_get_vm_info!\n"); ++ goto err; ++ } ++ client->max_vcpu = info.max_vcpu; ++ ++ client->req_buf = acrn_ioreq_get_reqbuf(client->vhm_client_id); ++ if (client->req_buf == NULL) { ++ pr_err("failed in acrn_ioreq_get_reqbuf!\n"); ++ goto err; ++ } ++ ++ /* just attach once as vhm will kick kthread */ ++ acrn_ioreq_attach_client(client->vhm_client_id, 0); ++ ++ client->rng->vhm_client_id = client->vhm_client_id; ++ vbs_rng_hash_add(client); ++ ++ return 0; ++err: ++ acrn_ioreq_destroy_client(client->vhm_client_id); ++ kfree(client); ++ ++ return -EINVAL; ++} ++ ++static void handle_vq_kick(struct vbs_rng *rng, int vq_idx) ++{ ++ struct iovec iov; ++ struct vbs_rng *sc; ++ struct virtio_vq_info *vq; ++ int len; ++ uint16_t idx; ++ ++ pr_debug("%s: vq_idx %d\n", __func__, vq_idx); ++ ++ sc = rng; ++ ++ if (!sc) { ++ pr_err("rng is NULL! Cannot proceed!\n"); ++ return; ++ } ++ ++ vq = &(sc->vqs[vq_idx]); ++ ++ pr_debug("before vq_has_desc!\n"); ++ ++ while (virtio_vq_has_descs(vq)) { ++ virtio_vq_getchain(vq, &idx, &iov, 1, NULL); ++ ++ /* device specific operations, for example: */ ++ /* len = read(sc->vrsc_fd, iov.iov_base, iov.iov_len); */ ++ pr_debug("iov base %p len %lx\n", iov.iov_base, iov.iov_len); ++ ++ /* let's generate some cool data... :-) */ ++ len = iov.iov_len; ++ ++ pr_debug("vtrnd: vtrnd_notify(): %d\r\n", len); ++ ++ /* ++ * Release this chain and handle more ++ */ ++ virtio_vq_relchain(vq, idx, len); ++ } ++ virtio_vq_endchains(vq, 1); /* Generate interrupt if appropriate. */ ++} ++ ++static int handle_kick(int client_id, int req_cnt) ++{ ++ int val = -1; ++ struct vhm_request *req; ++ struct vbs_rng_client *client; ++ int i; ++ ++ if (unlikely(req_cnt <= 0)) ++ return -EINVAL; ++ ++ pr_debug("%s!\n", __func__); ++ ++ client = vbs_rng_hash_find(client_id); ++ if (!client) { ++ pr_err("Ooops! client %d not found!\n", client_id); ++ return -EINVAL; ++ } ++ ++ for (i = 0; i < client->max_vcpu; i++) { ++ req = &client->req_buf[i]; ++ if (req->valid && req->processed == REQ_STATE_PROCESSING && ++ req->client == client->vhm_client_id) { ++ if (req->reqs.pio_request.direction == REQUEST_READ) ++ /* currently we handle kick only, ++ * so read will return 0 ++ */ ++ req->reqs.pio_request.value = 0; ++ else ++ val = req->reqs.pio_request.value; ++ pr_debug("%s: ioreq type %d, direction %d, " ++ "addr 0x%lx, size 0x%lx, value 0x%x\n", ++ __func__, ++ req->type, ++ req->reqs.pio_request.direction, ++ req->reqs.pio_request.address, ++ req->reqs.pio_request.size, ++ req->reqs.pio_request.value); ++ req->processed = REQ_STATE_SUCCESS; ++ acrn_ioreq_complete_request(client->vhm_client_id, ++ 1 << i); ++ } ++ } ++ ++ if (val >= 0) ++ handle_vq_kick(client->rng, val); ++ return 0; ++} ++ ++static int vbs_rng_open(struct inode *inode, struct file *f) ++{ ++ struct vbs_rng *rng; ++ struct virtio_dev_info *dev; ++ struct virtio_vq_info *vqs; ++ int i; ++ ++ pr_debug("%s!\n", __func__); ++ ++ rng = kmalloc(sizeof(*rng), GFP_KERNEL); ++ if (!rng) { ++ pr_err("Failed to allocate memory for vbs_rng!\n"); ++ return -ENOMEM; ++ } ++ ++ dev = &rng->dev; ++ vqs = (struct virtio_vq_info *)&rng->vqs; ++ ++ for (i = 0; i < VBS_K_RNG_VQ_MAX; i++) { ++ vqs[i].dev = dev; ++ /* ++ * Currently relies on VHM to kick us, ++ * thus vq_notify not used ++ */ ++ vqs[i].vq_notify = NULL; ++ } ++ ++ /* link dev and vqs */ ++ dev->vqs = vqs; ++ ++ virtio_dev_init(dev, vqs, VBS_K_RNG_VQ_MAX); ++ ++ f->private_data = rng; ++ vbs_rng_hash_init(); ++ ++ return 0; ++} ++ ++static int vbs_rng_release(struct inode *inode, struct file *f) ++{ ++ struct vbs_rng *rng = f->private_data; ++ struct vbs_rng_client *client; ++ int i; ++ ++ pr_debug("%s!\n", __func__); ++ ++ client = vbs_rng_hash_find(rng->vhm_client_id); ++ if (!client) ++ pr_err("%s: UNLIKELY not found client!\n", ++ __func__); ++ ++ vbs_rng_stop(rng); ++ vbs_rng_flush(rng); ++ for (i = 0; i < VBS_K_RNG_VQ_MAX; i++) ++ virtio_vq_reset(&(rng->vqs[i])); ++ ++ /* device specific release */ ++ vbs_rng_reset(rng); ++ ++ pr_debug("vbs_rng_connection cnt is %d\n", vbs_rng_connection_cnt); ++ ++ if (client && vbs_rng_connection_cnt--) ++ vbs_rng_hash_del(client->vhm_client_id); ++ if (!vbs_rng_connection_cnt) { ++ pr_debug("vbs_rng remove all hash entries\n"); ++ vbs_rng_hash_del_all(); ++ } ++ ++ kfree(client); ++ kfree(rng); ++ ++ pr_debug("%s done\n", __func__); ++ return 0; ++} ++ ++static struct hwrng get_hwrng(struct vbs_rng *rng) ++{ ++ return rng->hwrng; ++} ++ ++/* Set feature bits in kernel side device */ ++static int vbs_rng_set_features(struct vbs_rng *rng, u64 features) ++{ ++ return 0; ++} ++ ++static long vbs_rng_ioctl(struct file *f, unsigned int ioctl, ++ unsigned long arg) ++{ ++ struct vbs_rng *rng = f->private_data; ++ void __user *argp = (void __user *)arg; ++ /*u64 __user *featurep = argp;*/ ++ /*u64 features;*/ ++ int r; ++ ++ switch (ioctl) { ++/* ++ * case VHOST_GET_FEATURES: ++ * features = VHOST_NET_FEATURES; ++ * if (copy_to_user(featurep, &features, sizeof features)) ++ * return -EFAULT; ++ * return 0; ++ * case VHOST_SET_FEATURES: ++ * if (copy_from_user(&features, featurep, sizeof features)) ++ * return -EFAULT; ++ * if (features & ~VHOST_NET_FEATURES) ++ * return -EOPNOTSUPP; ++ * return vhost_net_set_features(n, features); ++ */ ++ case VBS_SET_VQ: ++ /* we handle this here because we want to register VHM client ++ * after handling VBS_K_SET_VQ request ++ */ ++ pr_debug("VBS_K_SET_VQ ioctl:\n"); ++ r = virtio_vqs_ioctl(&rng->dev, ioctl, argp); ++ if (r == -ENOIOCTLCMD) { ++ pr_err("VBS_K_SET_VQ: virtio_vqs_ioctl failed!\n"); ++ return -EFAULT; ++ } ++ /* Register VHM client */ ++ if (register_vhm_client(&rng->dev) < 0) { ++ pr_err("failed to register VHM client!\n"); ++ return -EFAULT; ++ } ++ vbs_rng_connection_cnt++; ++ return r; ++ default: ++ /*mutex_lock(&n->dev.mutex);*/ ++ pr_debug("VBS_K generic ioctls!\n"); ++ r = virtio_dev_ioctl(&rng->dev, ioctl, argp); ++ if (r == -ENOIOCTLCMD) ++ r = virtio_vqs_ioctl(&rng->dev, ioctl, argp); ++ else ++ vbs_rng_flush(rng); ++ /*mutex_unlock(&n->dev.mutex);*/ ++ return r; ++ } ++} ++ ++/* device specific function to cleanup itself */ ++static void vbs_rng_reset(struct vbs_rng *rng) ++{ ++} ++ ++/* device specific function */ ++static void vbs_rng_disable_vq(struct vbs_rng *rng, ++ struct virtio_vq_info *vq) ++{ ++} ++ ++/* device specific function */ ++static int vbs_rng_enable_vq(struct vbs_rng *rng, ++ struct virtio_vq_info *vq) ++{ ++ return 0; ++} ++ ++/* device specific function */ ++static void vbs_rng_stop_vq(struct vbs_rng *rng, ++ struct virtio_vq_info *vq) ++{ ++} ++ ++/* device specific function */ ++static void vbs_rng_stop(struct vbs_rng *rng) ++{ ++} ++ ++/* device specific function */ ++static void vbs_rng_flush_vq(struct vbs_rng *rng, int index) ++{ ++} ++ ++/* device specific function */ ++static void vbs_rng_flush(struct vbs_rng *rng) ++{ ++} ++ ++static const struct file_operations vbs_rng_fops = { ++ .owner = THIS_MODULE, ++ .release = vbs_rng_release, ++ .unlocked_ioctl = vbs_rng_ioctl, ++ .open = vbs_rng_open, ++ .llseek = noop_llseek, ++}; ++ ++static struct miscdevice vbs_rng_misc = { ++ .minor = MISC_DYNAMIC_MINOR, ++ .name = "vbs_rng", ++ .fops = &vbs_rng_fops, ++}; ++ ++static int vbs_rng_init(void) ++{ ++ return misc_register(&vbs_rng_misc); ++} ++module_init(vbs_rng_init); ++ ++static void vbs_rng_exit(void) ++{ ++ misc_deregister(&vbs_rng_misc); ++} ++module_exit(vbs_rng_exit); ++ ++MODULE_VERSION("0.1"); ++MODULE_AUTHOR("Intel Corporation"); ++MODULE_LICENSE("GPL and additional rights"); ++MODULE_DESCRIPTION("Virtio Backend Service reference driver on ACRN hypervisor"); +-- +2.17.1 + diff --git a/patches/0017-dmaengine-acpi-Move-index-to-struct-acpi_dma_spec.lpss b/patches/0017-dmaengine-acpi-Move-index-to-struct-acpi_dma_spec.lpss new file mode 100644 index 0000000000..2d13785ba5 --- /dev/null +++ b/patches/0017-dmaengine-acpi-Move-index-to-struct-acpi_dma_spec.lpss @@ -0,0 +1,71 @@ +From 8d65657ac7700f25d611ca2d54c61112d436932d Mon Sep 17 00:00:00 2001 +From: Andy Shevchenko +Date: Mon, 5 Aug 2019 15:50:09 +0300 +Subject: [PATCH 17/40] dmaengine: acpi: Move index to struct acpi_dma_spec + +In the future ->acpi_dma_xlate() callback function may use the index +of FixedDMA() descriptor to be utilized for channel direction setting. + +As a preparation step move index from local data structure to +struct acpi_dma_spec. + +Signed-off-by: Andy Shevchenko +--- + drivers/dma/acpi-dma.c | 5 ++--- + include/linux/acpi_dma.h | 2 ++ + 2 files changed, 4 insertions(+), 3 deletions(-) + +diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c +index 8a05db3343d3..50ddc8871a5f 100644 +--- a/drivers/dma/acpi-dma.c ++++ b/drivers/dma/acpi-dma.c +@@ -319,7 +319,6 @@ static int acpi_dma_update_dma_spec(struct acpi_dma *adma, + + struct acpi_dma_parser_data { + struct acpi_dma_spec dma_spec; +- size_t index; + size_t n; + }; + +@@ -335,7 +334,7 @@ static int acpi_dma_parse_fixed_dma(struct acpi_resource *res, void *data) + if (res->type == ACPI_RESOURCE_TYPE_FIXED_DMA) { + struct acpi_resource_fixed_dma *dma = &res->data.fixed_dma; + +- if (pdata->n++ == pdata->index) { ++ if (pdata->n++ == pdata->dma_spec.index) { + pdata->dma_spec.chan_id = dma->channels; + pdata->dma_spec.slave_id = dma->request_lines; + } +@@ -373,9 +372,9 @@ struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev, + return ERR_PTR(-ENODEV); + + memset(&pdata, 0, sizeof(pdata)); +- pdata.index = index; + + /* Initial values for the request line and channel */ ++ dma_spec->index = index; + dma_spec->chan_id = -1; + dma_spec->slave_id = -1; + +diff --git a/include/linux/acpi_dma.h b/include/linux/acpi_dma.h +index 72cedb916a9c..2caebb8fb158 100644 +--- a/include/linux/acpi_dma.h ++++ b/include/linux/acpi_dma.h +@@ -18,12 +18,14 @@ + + /** + * struct acpi_dma_spec - slave device DMA resources ++ * @index: index of FixedDMA() resource + * @chan_id: channel unique id + * @slave_id: request line unique id + * @dev: struct device of the DMA controller to be used in the filter + * function + */ + struct acpi_dma_spec { ++ size_t index; + int chan_id; + int slave_id; + struct device *dev; +-- +2.17.1 + diff --git a/patches/0017-drm-i915-Use-local-variables-for-subslice_mask-for-dev.drm b/patches/0017-drm-i915-Use-local-variables-for-subslice_mask-for-dev.drm new file mode 100644 index 0000000000..9cfece4664 --- /dev/null +++ b/patches/0017-drm-i915-Use-local-variables-for-subslice_mask-for-dev.drm @@ -0,0 +1,146 @@ +From e452ecbfedceeddd25fc2e98e0caa3565b131958 Mon Sep 17 00:00:00 2001 +From: Stuart Summers +Date: Fri, 23 Aug 2019 09:03:01 -0700 +Subject: [PATCH 017/690] drm/i915: Use local variables for subslice_mask for + device info + +When setting up subslice_mask, instead of operating on the slice +array directly, use a local variable to start bits per slice, then +use this to set the per slice array in one step. + +Signed-off-by: Stuart Summers +Reviewed-by: Chris Wilson +Signed-off-by: Chris Wilson +Link: https://patchwork.freedesktop.org/patch/msgid/20190823160307.180813-6-stuart.summers@intel.com +--- + drivers/gpu/drm/i915/intel_device_info.c | 49 +++++++++++++----------- + 1 file changed, 26 insertions(+), 23 deletions(-) + +diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c +index b1a79ed408eb..52515efe9f4e 100644 +--- a/drivers/gpu/drm/i915/intel_device_info.c ++++ b/drivers/gpu/drm/i915/intel_device_info.c +@@ -235,18 +235,6 @@ static void gen10_sseu_info_init(struct drm_i915_private *dev_priv) + sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >> + GEN10_F2_S_ENA_SHIFT; + +- subslice_mask = (1 << 4) - 1; +- subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >> +- GEN10_F2_SS_DIS_SHIFT); +- +- /* +- * Slice0 can have up to 3 subslices, but there are only 2 in +- * slice1/2. +- */ +- sseu->subslice_mask[0] = subslice_mask; +- for (s = 1; s < sseu->max_slices; s++) +- sseu->subslice_mask[s] = subslice_mask & 0x3; +- + /* Slice0 */ + eu_en = ~I915_READ(GEN8_EU_DISABLE0); + for (ss = 0; ss < sseu->max_subslices; ss++) +@@ -270,14 +258,24 @@ static void gen10_sseu_info_init(struct drm_i915_private *dev_priv) + eu_en = ~I915_READ(GEN10_EU_DISABLE3); + sseu_set_eus(sseu, 5, 1, eu_en & eu_mask); + +- /* Do a second pass where we mark the subslices disabled if all their +- * eus are off. +- */ ++ subslice_mask = (1 << 4) - 1; ++ subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >> ++ GEN10_F2_SS_DIS_SHIFT); ++ + for (s = 0; s < sseu->max_slices; s++) { ++ u32 subslice_mask_with_eus = subslice_mask; ++ + for (ss = 0; ss < sseu->max_subslices; ss++) { + if (sseu_get_eus(sseu, s, ss) == 0) +- sseu->subslice_mask[s] &= ~BIT(ss); ++ subslice_mask_with_eus &= ~BIT(ss); + } ++ ++ /* ++ * Slice0 can have up to 3 subslices, but there are only 2 in ++ * slice1/2. ++ */ ++ sseu->subslice_mask[s] = s == 0 ? subslice_mask_with_eus : ++ subslice_mask_with_eus & 0x3; + } + + sseu->eu_total = compute_eu_total(sseu); +@@ -303,6 +301,7 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv) + { + struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; + u32 fuse; ++ u8 subslice_mask = 0; + + fuse = I915_READ(CHV_FUSE_GT); + +@@ -316,7 +315,7 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv) + (((fuse & CHV_FGT_EU_DIS_SS0_R1_MASK) >> + CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4); + +- sseu->subslice_mask[0] |= BIT(0); ++ subslice_mask |= BIT(0); + sseu_set_eus(sseu, 0, 0, ~disabled_mask); + } + +@@ -327,10 +326,12 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv) + (((fuse & CHV_FGT_EU_DIS_SS1_R1_MASK) >> + CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4); + +- sseu->subslice_mask[0] |= BIT(1); ++ subslice_mask |= BIT(1); + sseu_set_eus(sseu, 0, 1, ~disabled_mask); + } + ++ sseu->subslice_mask[0] = subslice_mask; ++ + sseu->eu_total = compute_eu_total(sseu); + + /* +@@ -540,6 +541,7 @@ static void haswell_sseu_info_init(struct drm_i915_private *dev_priv) + { + struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; + u32 fuse1; ++ u8 subslice_mask = 0; + int s, ss; + + /* +@@ -552,16 +554,15 @@ static void haswell_sseu_info_init(struct drm_i915_private *dev_priv) + /* fall through */ + case 1: + sseu->slice_mask = BIT(0); +- sseu->subslice_mask[0] = BIT(0); ++ subslice_mask = BIT(0); + break; + case 2: + sseu->slice_mask = BIT(0); +- sseu->subslice_mask[0] = BIT(0) | BIT(1); ++ subslice_mask = BIT(0) | BIT(1); + break; + case 3: + sseu->slice_mask = BIT(0) | BIT(1); +- sseu->subslice_mask[0] = BIT(0) | BIT(1); +- sseu->subslice_mask[1] = BIT(0) | BIT(1); ++ subslice_mask = BIT(0) | BIT(1); + break; + } + +@@ -583,10 +584,12 @@ static void haswell_sseu_info_init(struct drm_i915_private *dev_priv) + } + + intel_sseu_set_info(sseu, hweight8(sseu->slice_mask), +- hweight8(sseu->subslice_mask[0]), ++ hweight8(subslice_mask), + sseu->eu_per_subslice); + + for (s = 0; s < sseu->max_slices; s++) { ++ sseu->subslice_mask[s] = subslice_mask; ++ + for (ss = 0; ss < sseu->max_subslices; ss++) { + sseu_set_eus(sseu, s, ss, + (1UL << sseu->eu_per_subslice) - 1); +-- +2.17.1 + diff --git a/patches/0017-net-phy-introducing-support-for-DWC-xPCS-logi.connectivity b/patches/0017-net-phy-introducing-support-for-DWC-xPCS-logi.connectivity new file mode 100644 index 0000000000..6a5d3a09bc --- /dev/null +++ b/patches/0017-net-phy-introducing-support-for-DWC-xPCS-logi.connectivity @@ -0,0 +1,520 @@ +From 4fccf571625faa273b93f297f3aa61dfc82d40ab Mon Sep 17 00:00:00 2001 +From: Ong Boon Leong +Date: Wed, 27 Jun 2018 03:22:24 +0800 +Subject: [PATCH 017/108] net: phy: introducing support for DWC xPCS logics for + EHL & TGL + +xPCS is DWC Ethernet Physical Coding Sublayer that can be integrated with +Ethernet MAC controller and acts as converter between GMII and SGMII. An +example is as shown below whereby DWC xPCS is integrated with DW EQoS MAC +to form GbE Controller. + + <-----------------GbE Controller---------->|<--External PHY chip--> + + +----------+ +----+ +---+ +--------------+ + | EQoS | <-GMII->| DW |<-->|PHY| <-- SGMII --> | External GbE | + | MAC | |xPCS| |IF | | PHY Chip | + +----------+ +----+ +---+ +--------------+ + ^ ^ ^ + | | | + +---------------------MDIO-------------------------+ + +xPCS is a Clause-45 MDIO Manageable Device (MMD) and supports basic +functionalities for initializing xPCS and configuring auto negotiation(AN), +loopback, link status, AN advertisement and Link Partner ability are +implemented. The implementation supports the C37 AN for 1000BASE-X and +SGMII (MAC side SGMII only). + +Tested-by: Tan, Tee Min +Reviewed-by: Voon Weifeng +Reviewed-by: Kweh Hock Leong +Signed-off-by: Ong Boon Leong +--- + drivers/net/phy/Kconfig | 9 + + drivers/net/phy/Makefile | 1 + + drivers/net/phy/dwxpcs.c | 417 +++++++++++++++++++++++++++++++++++++++ + include/linux/dwxpcs.h | 16 ++ + 4 files changed, 443 insertions(+) + create mode 100644 drivers/net/phy/dwxpcs.c + create mode 100644 include/linux/dwxpcs.h + +diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig +index fe602648b99f..6d0ac118f5d2 100644 +--- a/drivers/net/phy/Kconfig ++++ b/drivers/net/phy/Kconfig +@@ -364,6 +364,15 @@ config DP83867_PHY + ---help--- + Currently supports the DP83867 PHY. + ++config DWXPCS ++ tristate "Synopsys DesignWare PCS converter driver" ++ help ++ This driver supports DW PCS IP that provides the Serial Gigabit ++ Media Independent Interface(SGMII) between Ethernet physical media ++ devices and the Gigabit Ethernet controller. ++ ++ Currently tested with stmmac. ++ + config FIXED_PHY + tristate "MDIO Bus/PHY emulation with fixed speed/link PHYs" + depends on PHYLIB +diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile +index a03437e091f3..5def985ae3ca 100644 +--- a/drivers/net/phy/Makefile ++++ b/drivers/net/phy/Makefile +@@ -70,6 +70,7 @@ obj-$(CONFIG_DP83822_PHY) += dp83822.o + obj-$(CONFIG_DP83TC811_PHY) += dp83tc811.o + obj-$(CONFIG_DP83848_PHY) += dp83848.o + obj-$(CONFIG_DP83867_PHY) += dp83867.o ++obj-$(CONFIG_DWXPCS) += dwxpcs.o + obj-$(CONFIG_FIXED_PHY) += fixed_phy.o + obj-$(CONFIG_ICPLUS_PHY) += icplus.o + obj-$(CONFIG_INTEL_XWAY_PHY) += intel-xway.o +diff --git a/drivers/net/phy/dwxpcs.c b/drivers/net/phy/dwxpcs.c +new file mode 100644 +index 000000000000..f0003cec6871 +--- /dev/null ++++ b/drivers/net/phy/dwxpcs.c +@@ -0,0 +1,417 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* Copyright (c) 2019, Intel Corporation. ++ * DWC Ethernet Physical Coding Sublayer for GMII2SGMII Converter ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* XPCS MII MMD Device Addresses */ ++#define XPCS_MDIO_MII_MMD MDIO_MMD_VEND2 ++ ++/* MII MMD registers offsets */ ++#define MDIO_MII_MMD_DIGITAL_CTRL_1 0x8000 /* Digital Control 1 */ ++#define MDIO_MII_MMD_AN_CTRL 0x8001 /* AN Control */ ++#define MDIO_MII_MMD_AN_STAT 0x8002 /* AN Status */ ++ ++/* MII MMD SR AN Advertisement & Link Partner Ability are slightly ++ * different from MII_ADVERTISEMENT & MII_LPA in below fields: ++ */ ++#define MDIO_MII_MMD_HD BIT(6) /* Half duplex */ ++#define MDIO_MII_MMD_FD BIT(5) /* Full duplex */ ++#define MDIO_MII_MMD_PSE_SHIFT 7 /* Pause Ability shift */ ++#define MDIO_MII_MMD_PSE GENMASK(8, 7) /* Pause Ability */ ++#define MDIO_MII_MMD_PSE_NO 0x0 ++#define MDIO_MII_MMD_PSE_ASYM 0x1 ++#define MDIO_MII_MMD_PSE_SYM 0x2 ++#define MDIO_MII_MMD_PSE_BOTH 0x3 ++ ++/* Automatic Speed Mode Change for MAC side SGMII AN */ ++#define MDIO_MII_MMD_DIGI_CTRL_1_MAC_AUTO_SW BIT(9) ++ ++/* MII MMD AN Control defines */ ++#define MDIO_MII_MMD_AN_CTRL_TX_CONFIG_SHIFT 3 /* TX Config shift */ ++#define AN_CTRL_TX_CONF_PHY_SIDE_SGMII 0x1 /* PHY side SGMII mode */ ++#define AN_CTRL_TX_CONF_MAC_SIDE_SGMII 0x0 /* MAC side SGMII mode */ ++#define MDIO_MII_MMD_AN_CTRL_PCS_MD_SHIFT 1 /* PCS Mode shift */ ++#define MDIO_MII_MMD_AN_CTRL_PCS_MD GENMASK(2, 1) /* PCS Mode */ ++#define AN_CTRL_PCS_MD_C37_1000BASEX 0x0 /* C37 AN for 1000BASE-X */ ++#define AN_CTRL_PCS_MD_C37_SGMII 0x2 /* C37 AN for SGMII */ ++#define MDIO_MII_MMD_AN_CTRL_AN_INTR_EN BIT(0) /* AN Complete Intr Enable */ ++ ++/* MII MMD AN Status defines for SGMII AN Status */ ++#define AN_STAT_C37_AN_CMPLT BIT(0) /* AN Complete Intr */ ++#define AN_STAT_SGMII_AN_FD BIT(1) /* Full Duplex */ ++#define AN_STAT_SGMII_AN_SPEED_SHIFT 2 /* AN Speed shift */ ++#define AN_STAT_SGMII_AN_SPEED GENMASK(3, 2) /* AN Speed */ ++#define AN_STAT_SGMII_AN_10MBPS 0x0 /* 10 Mbps */ ++#define AN_STAT_SGMII_AN_100MBPS 0x1 /* 100 Mbps */ ++#define AN_STAT_SGMII_AN_1000MBPS 0x2 /* 1000 Mbps */ ++#define AN_STAT_SGMII_AN_LNKSTS BIT(4) /* Link Status */ ++ ++enum dwxpcs_state_t { ++ __DWXPCS_REMOVING, ++ __DWXPCS_TASK_SCHED, ++}; ++ ++struct pcs_stats { ++ int link; ++ int speed; ++ int duplex; ++}; ++ ++struct dwxpcs_priv { ++ struct phy_device *phy_dev; ++ struct phy_driver *phy_drv; ++ struct phy_device cached_phy_dev; ++ struct phy_driver conv_phy_drv; ++ struct mdio_device *mdiodev; ++ struct pcs_stats stats; ++ struct dwxpcs_platform_data *pdata; ++ char int_name[IFNAMSIZ]; ++ unsigned long state; ++ struct workqueue_struct *int_wq; ++ struct work_struct an_task; ++}; ++ ++/* DW xPCS mdiobus_read and mdiobus_write helper functions */ ++#define xpcs_read(dev, reg) \ ++ mdiobus_read(bus, xpcs_addr, \ ++ MII_ADDR_C45 | (reg) | \ ++ ((dev) << MII_DEVADDR_C45_SHIFT)) ++#define xpcs_write(dev, reg, val) \ ++ mdiobus_write(bus, xpcs_addr, \ ++ MII_ADDR_C45 | (reg) | \ ++ ((dev) << MII_DEVADDR_C45_SHIFT), val) ++ ++static void dwxpcs_init(struct dwxpcs_priv *priv) ++{ ++ struct mii_bus *bus = priv->mdiodev->bus; ++ int xpcs_addr = priv->mdiodev->addr; ++ int pcs_mode = priv->pdata->mode; ++ int phydata; ++ ++ if (pcs_mode == DWXPCS_MODE_SGMII_AN) { ++ /* For AN for SGMII mode, the settings are :- ++ * 1) VR_MII_AN_CTRL Bit(2:1)[PCS_MODE] = 10b (SGMII AN) ++ * 2) VR_MII_AN_CTRL Bit(3) [TX_CONFIG] = 0b (MAC side SGMII) ++ * DW xPCS used with DW EQoS MAC is always MAC ++ * side SGMII. ++ * 3) VR_MII_AN_CTRL Bit(0) [AN_INTR_EN] = 1b (AN Interrupt ++ * enabled) ++ * 4) VR_MII_DIG_CTRL1 Bit(9) [MAC_AUTO_SW] = 1b (Automatic ++ * speed/duplex mode change by HW after SGMII AN complete) ++ * Note: Since it is MAC side SGMII, there is no need to set ++ * SR_MII_AN_ADV. MAC side SGMII receives AN Tx Config ++ * from PHY about the link state change after C28 AN ++ * is completed between PHY and Link Partner. ++ */ ++ phydata = xpcs_read(XPCS_MDIO_MII_MMD, MDIO_MII_MMD_AN_CTRL); ++ phydata &= ~MDIO_MII_MMD_AN_CTRL_PCS_MD; ++ ++ phydata |= MDIO_MII_MMD_AN_CTRL_AN_INTR_EN | ++ (AN_CTRL_PCS_MD_C37_SGMII << ++ MDIO_MII_MMD_AN_CTRL_PCS_MD_SHIFT & ++ MDIO_MII_MMD_AN_CTRL_PCS_MD) | ++ (AN_CTRL_TX_CONF_MAC_SIDE_SGMII << ++ MDIO_MII_MMD_AN_CTRL_TX_CONFIG_SHIFT); ++ xpcs_write(XPCS_MDIO_MII_MMD, MDIO_MII_MMD_AN_CTRL, phydata); ++ ++ phydata = xpcs_read(XPCS_MDIO_MII_MMD, ++ MDIO_MII_MMD_DIGITAL_CTRL_1); ++ phydata |= MDIO_MII_MMD_DIGI_CTRL_1_MAC_AUTO_SW; ++ xpcs_write(XPCS_MDIO_MII_MMD, MDIO_MII_MMD_DIGITAL_CTRL_1, ++ phydata); ++ } else { ++ /* For AN for 1000BASE-X mode, the settings are :- ++ * 1) VR_MII_AN_CTRL Bit(2:1)[PCS_MODE] = 00b (1000BASE-X C37) ++ * 2) VR_MII_AN_CTRL Bit(0) [AN_INTR_EN] = 1b (AN Interrupt ++ * enabled) ++ * 3) SR_MII_AN_ADV Bit(6)[FD] = 1b (Full Duplex) ++ * Note: Half Duplex is rarely used, so don't advertise. ++ * 4) SR_MII_AN_ADV Bit(8:7)[PSE] = 11b (Sym & Asym Pause) ++ */ ++ phydata = xpcs_read(XPCS_MDIO_MII_MMD, MDIO_MII_MMD_AN_CTRL); ++ phydata &= ~MDIO_MII_MMD_AN_CTRL_PCS_MD; ++ phydata |= MDIO_MII_MMD_AN_CTRL_AN_INTR_EN | ++ (AN_CTRL_PCS_MD_C37_1000BASEX << ++ MDIO_MII_MMD_AN_CTRL_PCS_MD_SHIFT & ++ MDIO_MII_MMD_AN_CTRL_PCS_MD); ++ xpcs_write(XPCS_MDIO_MII_MMD, MDIO_MII_MMD_AN_CTRL, phydata); ++ ++ phydata = xpcs_read(XPCS_MDIO_MII_MMD, MII_ADVERTISE); ++ phydata |= MDIO_MII_MMD_FD | ++ (MDIO_MII_MMD_PSE_BOTH << MDIO_MII_MMD_PSE_SHIFT); ++ xpcs_write(XPCS_MDIO_MII_MMD, MII_ADVERTISE, phydata); ++ } ++} ++ ++static int dwxpcs_read_status(struct phy_device *phydev) ++{ ++ struct dwxpcs_priv *priv = (struct dwxpcs_priv *)phydev->priv; ++ struct mii_bus *bus = priv->mdiodev->bus; ++ int xpcs_addr = priv->mdiodev->addr; ++ int pcs_mode = priv->pdata->mode; ++ int phydata; ++ int err; ++ ++ if (priv->phy_drv->read_status) ++ err = priv->phy_drv->read_status(phydev); ++ else ++ err = genphy_read_status(phydev); ++ ++ if (err < 0) ++ return err; ++ ++ /* For SGMII AN, we are done as the speed/duplex are automatically ++ * set because we have initialized 'MAC_AUTO_SW' for MAC side SGMII. ++ */ ++ if (pcs_mode == DWXPCS_MODE_1000BASEX_AN) { ++ /* For 1000BASE-X AN, we need to adjust duplex mode according ++ * to link partner. No need to update speed as it is always ++ * 1000Mbps. ++ */ ++ phydata = xpcs_read(XPCS_MDIO_MII_MMD, MII_BMCR); ++ phydata &= ~BMCR_FULLDPLX; ++ phydata |= phydev->duplex ? BMCR_FULLDPLX : 0; ++ xpcs_write(XPCS_MDIO_MII_MMD, MII_BMCR, phydata); ++ } ++ ++ return 0; ++} ++ ++static void dwxpcs_get_linkstatus(struct dwxpcs_priv *priv, int an_stat) ++{ ++ struct mii_bus *bus = priv->mdiodev->bus; ++ struct pcs_stats *stats = &priv->stats; ++ int xpcs_addr = priv->mdiodev->addr; ++ int pcs_mode = priv->pdata->mode; ++ ++ if (pcs_mode == DWXPCS_MODE_SGMII_AN) { ++ /* Check the SGMII AN link status */ ++ if (an_stat & AN_STAT_SGMII_AN_LNKSTS) { ++ int speed_value; ++ ++ stats->link = 1; ++ ++ speed_value = ((an_stat & AN_STAT_SGMII_AN_SPEED) >> ++ AN_STAT_SGMII_AN_SPEED_SHIFT); ++ if (speed_value == AN_STAT_SGMII_AN_1000MBPS) ++ stats->speed = SPEED_1000; ++ else if (speed_value == AN_STAT_SGMII_AN_100MBPS) ++ stats->speed = SPEED_100; ++ else ++ stats->speed = SPEED_10; ++ ++ if (an_stat & AN_STAT_SGMII_AN_FD) ++ stats->duplex = 1; ++ else ++ stats->duplex = 0; ++ } else { ++ stats->link = 0; ++ } ++ } else if (pcs_mode == DWXPCS_MODE_1000BASEX_AN) { ++ /* For 1000BASE-X AN, 1000BASE-X is always 1000Mbps. ++ * For duplex mode, we read from BMCR_FULLDPLX which is ++ * only valid if BMCR_ANENABLE is not enabeld. ++ */ ++ int phydata = xpcs_read(XPCS_MDIO_MII_MMD, MII_BMCR); ++ ++ stats->link = 1; ++ stats->speed = SPEED_1000; ++ if (!(phydata & BMCR_ANENABLE)) ++ stats->duplex = phydata & BMCR_FULLDPLX ? 1 : 0; ++ } ++} ++ ++static void dwxpcs_irq_handle(struct dwxpcs_priv *priv) ++{ ++ struct mii_bus *bus = priv->mdiodev->bus; ++ struct device *dev = &priv->mdiodev->dev; ++ int xpcs_addr = priv->mdiodev->addr; ++ int an_stat; ++ ++ /* AN status */ ++ an_stat = xpcs_read(XPCS_MDIO_MII_MMD, MDIO_MII_MMD_AN_STAT); ++ ++ if (an_stat & AN_STAT_C37_AN_CMPLT) { ++ struct pcs_stats *stats = &priv->stats; ++ ++ dwxpcs_get_linkstatus(priv, an_stat); ++ ++ /* Clear C37 AN complete status by writing zero */ ++ xpcs_write(XPCS_MDIO_MII_MMD, MDIO_MII_MMD_AN_STAT, 0); ++ ++ dev_info(dev, "%s: Link = %d - %d/%s\n", ++ __func__, ++ stats->link, ++ stats->speed, ++ stats->duplex ? "Full" : "Half"); ++ } ++} ++ ++static void dwxpcs_an_task(struct work_struct *work) ++{ ++ struct dwxpcs_priv *priv = container_of(work, ++ struct dwxpcs_priv, ++ an_task); ++ dwxpcs_irq_handle(priv); ++ ++ clear_bit(__DWXPCS_TASK_SCHED, &priv->state); ++} ++ ++static irqreturn_t dwxpcs_interrupt(int irq, void *dev_id) ++{ ++ struct dwxpcs_priv *priv = (struct dwxpcs_priv *)dev_id; ++ ++ /* Handle the clearing of AN status outside of interrupt context ++ * as it involves mdiobus_read() & mdiobus_write(). ++ */ ++ if (!test_bit(__DWXPCS_REMOVING, &priv->state) && ++ !test_and_set_bit(__DWXPCS_TASK_SCHED, &priv->state)) { ++ queue_work(priv->int_wq, &priv->an_task); ++ ++ return IRQ_HANDLED; ++ } ++ ++ return IRQ_NONE; ++} ++ ++#ifdef CONFIG_ACPI ++static const struct acpi_device_id dwxpcs_acpi_match[] = { ++ { "INTC1033" }, /* EHL Ethernet PCS */ ++ { "INTC1034" }, /* TGL Ethernet PCS */ ++ { }, ++}; ++ ++MODULE_DEVICE_TABLE(acpi, dwxpcs_acpi_match); ++#endif // CONFIG_ACPI ++ ++static int dwxpcs_probe(struct mdio_device *mdiodev) ++{ ++ struct device_node *phy_node; ++ struct dwxpcs_priv *priv; ++ struct device_node *np; ++ struct device *dev; ++ int ret; ++ ++ dev = &mdiodev->dev; ++ np = dev->of_node; ++ ++ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); ++ if (!priv) ++ return -ENOMEM; ++ ++ if (np) { ++ /* Handle mdio_device registered through devicetree */ ++ phy_node = of_parse_phandle(np, "phy-handle", 0); ++ if (!phy_node) { ++ dev_err(dev, "Couldn't parse phy-handle\n"); ++ return -ENODEV; ++ } ++ ++ priv->phy_dev = of_phy_find_device(phy_node); ++ of_node_put(phy_node); ++ if (!priv->phy_dev) { ++ dev_info(dev, "Couldn't find phydev\n"); ++ return -EPROBE_DEFER; ++ } ++ } else { ++ /* Handle mdio_device registered through mdio_board_info */ ++ priv->pdata = (struct dwxpcs_platform_data *)dev->platform_data; ++ ++ priv->phy_dev = mdiobus_get_phy(mdiodev->bus, ++ priv->pdata->ext_phy_addr); ++ } ++ ++ if (!priv->phy_dev) { ++ dev_info(dev, "Couldn't find phydev\n"); ++ return -EPROBE_DEFER; ++ } ++ ++ if (!priv->phy_dev->drv) { ++ dev_info(dev, "Attached phy not ready\n"); ++ return -EPROBE_DEFER; ++ } ++ ++ priv->mdiodev = mdiodev; ++ ++ /* Initialize DW XPCS */ ++ dwxpcs_init(priv); ++ ++ priv->phy_drv = priv->phy_dev->drv; ++ memcpy(&priv->conv_phy_drv, priv->phy_dev->drv, ++ sizeof(struct phy_driver)); ++ priv->conv_phy_drv.read_status = dwxpcs_read_status; ++ /* Store a copy of phy_dev info for remove() later */ ++ priv->cached_phy_dev.priv = priv->phy_dev->priv; ++ priv->cached_phy_dev.drv = priv->phy_dev->drv; ++ priv->phy_dev->priv = priv; ++ priv->phy_dev->drv = &priv->conv_phy_drv; ++ ++ if (priv->pdata->irq > 0) { ++ char *int_name; ++ ++ INIT_WORK(&priv->an_task, dwxpcs_an_task); ++ clear_bit(__DWXPCS_TASK_SCHED, &priv->state); ++ ++ int_name = priv->int_name; ++ sprintf(int_name, "%s-%d", "dwxpcs", priv->mdiodev->dev.id); ++ priv->int_wq = create_singlethread_workqueue(int_name); ++ if (!priv->int_wq) { ++ dev_err(dev, "%s: Failed to create workqueue\n", ++ int_name); ++ return -ENOMEM; ++ } ++ ++ ret = request_irq(priv->pdata->irq, dwxpcs_interrupt, ++ IRQF_SHARED, int_name, priv); ++ if (unlikely(ret < 0)) { ++ destroy_workqueue(priv->int_wq); ++ dev_err(dev, "%s: Allocating DW XPCS IRQ %d (%d)\n", ++ __func__, priv->pdata->irq, ret); ++ return ret; ++ } ++ } ++ dev_info(dev, "%s: DW XPCS mdio device (IRQ: %d) probed successful\n", ++ __func__, priv->pdata->irq); ++ ++ mdiodev->priv = priv; ++ ++ return 0; ++} ++ ++static void dwxpcs_remove(struct mdio_device *mdiodev) ++{ ++ struct dwxpcs_priv *priv = (struct dwxpcs_priv *)mdiodev->priv; ++ ++ set_bit(__DWXPCS_REMOVING, &priv->state); ++ ++ /* Restore the original phy_device info */ ++ priv->phy_dev->priv = priv->cached_phy_dev.priv; ++ priv->phy_dev->drv = priv->cached_phy_dev.drv; ++ ++ free_irq(priv->pdata->irq, priv); ++ if (priv->int_wq) ++ destroy_workqueue(priv->int_wq); ++} ++ ++static struct mdio_driver dwxpcs_driver = { ++ .probe = dwxpcs_probe, ++ .remove = dwxpcs_remove, ++ .mdiodrv.driver = { ++ .name = "dwxpcs", ++ .acpi_match_table = ACPI_PTR(dwxpcs_acpi_match), ++ }, ++}; ++ ++mdio_module_driver(dwxpcs_driver); ++ ++MODULE_DESCRIPTION("DW xPCS converter driver"); ++MODULE_LICENSE("GPL"); +diff --git a/include/linux/dwxpcs.h b/include/linux/dwxpcs.h +new file mode 100644 +index 000000000000..2082e800ee04 +--- /dev/null ++++ b/include/linux/dwxpcs.h +@@ -0,0 +1,16 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++#ifndef __LINUX_DWXPCS_H ++#define __LINUX_DWXPCS_H ++ ++enum dwxpcs_pcs_mode { ++ DWXPCS_MODE_SGMII_AN, ++ DWXPCS_MODE_1000BASEX_AN, ++}; ++ ++struct dwxpcs_platform_data { ++ int irq; ++ enum dwxpcs_pcs_mode mode; ++ int ext_phy_addr; ++}; ++ ++#endif +-- +2.17.1 + diff --git a/patches/0017-platform-x86-SOCPERF3-support-for-sep-socwatc.sep-socwatch b/patches/0017-platform-x86-SOCPERF3-support-for-sep-socwatc.sep-socwatch new file mode 100644 index 0000000000..270c7c8e48 --- /dev/null +++ b/patches/0017-platform-x86-SOCPERF3-support-for-sep-socwatc.sep-socwatch @@ -0,0 +1,11127 @@ +From 7600f50465503854501a25851d85e15d7b7173c5 Mon Sep 17 00:00:00 2001 +From: Manisha Chinthapally +Date: Wed, 1 May 2019 17:21:53 -0700 +Subject: [PATCH 17/27] platform/x86: SOCPERF3 support for sep & socwatch + +SEP/SOCWATCH are now using version 3 of socperf driver + +Signed-off-by: Manisha Chinthapally +--- + drivers/platform/x86/Kconfig | 2 + + drivers/platform/x86/Makefile | 7 +- + drivers/platform/x86/sepdk/Kconfig | 1 + + drivers/platform/x86/sepdk/sep/lwpmudrv.c | 6 +- + drivers/platform/x86/sepdk/sep/unc_sa.c | 6 - + .../x86/sepdk/sep/valleyview_sochap.c | 6 - + drivers/platform/x86/socperf/Kconfig | 10 + + drivers/platform/x86/socperf/Makefile | 12 + + drivers/platform/x86/socperf/control.c | 739 ++++++ + drivers/platform/x86/socperf/haswellunc_sa.c | 407 ++++ + drivers/platform/x86/socperf/inc/control.h | 467 ++++ + .../platform/x86/socperf/inc/ecb_iterators.h | 130 ++ + .../platform/x86/socperf/inc/haswellunc_sa.h | 79 + + drivers/platform/x86/socperf/inc/npk_uncore.h | 76 + + drivers/platform/x86/socperf/inc/pci.h | 103 + + drivers/platform/x86/socperf/inc/soc_uncore.h | 85 + + drivers/platform/x86/socperf/inc/socperfdrv.h | 191 ++ + drivers/platform/x86/socperf/inc/utility.h | 61 + + .../socperf/include/error_reporting_utils.h | 168 ++ + .../x86/socperf/include/lwpmudrv_chipset.h | 285 +++ + .../x86/socperf/include/lwpmudrv_defines.h | 502 ++++ + .../x86/socperf/include/lwpmudrv_ecb.h | 1095 +++++++++ + .../x86/socperf/include/lwpmudrv_ioctl.h | 343 +++ + .../x86/socperf/include/lwpmudrv_struct.h | 2014 +++++++++++++++++ + .../x86/socperf/include/lwpmudrv_types.h | 158 ++ + .../x86/socperf/include/lwpmudrv_version.h | 158 ++ + .../x86/socperf/include/rise_errors.h | 326 +++ + drivers/platform/x86/socperf/npk_uncore.c | 502 ++++ + drivers/platform/x86/socperf/pci.c | 188 ++ + drivers/platform/x86/socperf/soc_uncore.c | 901 ++++++++ + drivers/platform/x86/socperf/socperfdrv.c | 1560 +++++++++++++ + drivers/platform/x86/socperf/utility.c | 170 ++ + 32 files changed, 10741 insertions(+), 17 deletions(-) + create mode 100644 drivers/platform/x86/socperf/Kconfig + create mode 100644 drivers/platform/x86/socperf/Makefile + create mode 100644 drivers/platform/x86/socperf/control.c + create mode 100644 drivers/platform/x86/socperf/haswellunc_sa.c + create mode 100644 drivers/platform/x86/socperf/inc/control.h + create mode 100644 drivers/platform/x86/socperf/inc/ecb_iterators.h + create mode 100644 drivers/platform/x86/socperf/inc/haswellunc_sa.h + create mode 100644 drivers/platform/x86/socperf/inc/npk_uncore.h + create mode 100644 drivers/platform/x86/socperf/inc/pci.h + create mode 100644 drivers/platform/x86/socperf/inc/soc_uncore.h + create mode 100644 drivers/platform/x86/socperf/inc/socperfdrv.h + create mode 100644 drivers/platform/x86/socperf/inc/utility.h + create mode 100644 drivers/platform/x86/socperf/include/error_reporting_utils.h + create mode 100644 drivers/platform/x86/socperf/include/lwpmudrv_chipset.h + create mode 100644 drivers/platform/x86/socperf/include/lwpmudrv_defines.h + create mode 100644 drivers/platform/x86/socperf/include/lwpmudrv_ecb.h + create mode 100644 drivers/platform/x86/socperf/include/lwpmudrv_ioctl.h + create mode 100644 drivers/platform/x86/socperf/include/lwpmudrv_struct.h + create mode 100644 drivers/platform/x86/socperf/include/lwpmudrv_types.h + create mode 100644 drivers/platform/x86/socperf/include/lwpmudrv_version.h + create mode 100644 drivers/platform/x86/socperf/include/rise_errors.h + create mode 100644 drivers/platform/x86/socperf/npk_uncore.c + create mode 100644 drivers/platform/x86/socperf/pci.c + create mode 100644 drivers/platform/x86/socperf/soc_uncore.c + create mode 100644 drivers/platform/x86/socperf/socperfdrv.c + create mode 100644 drivers/platform/x86/socperf/utility.c + +diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig +index 52b941c5c2c8..51e3c9c5d5e8 100644 +--- a/drivers/platform/x86/Kconfig ++++ b/drivers/platform/x86/Kconfig +@@ -1343,6 +1343,8 @@ config PMC_ATOM + def_bool y + depends on PCI + select COMMON_CLK ++ ++source "drivers/platform/x86/socperf/Kconfig" + source "drivers/platform/x86/socwatch/Kconfig" + source "drivers/platform/x86/socwatchhv/Kconfig" + source "drivers/platform/x86/sepdk/Kconfig" +diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile +index f9fac98188c8..0799d4515449 100644 +--- a/drivers/platform/x86/Makefile ++++ b/drivers/platform/x86/Makefile +@@ -100,7 +100,10 @@ obj-$(CONFIG_I2C_MULTI_INSTANTIATE) += i2c-multi-instantiate.o + obj-$(CONFIG_INTEL_ATOMISP2_PM) += intel_atomisp2_pm.o + obj-$(CONFIG_PCENGINES_APU2) += pcengines-apuv2.o + obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += intel_speed_select_if/ +-obj-$(CONFIG_INTEL_SOCWATCH) += socwatch/ ++ ++# sep and socwatch drivers ++obj-$(CONFIG_INTEL_SOCPERF) += socperf/ ++obj-$(CONFIG_INTEL_SOCWATCH) += socwatch/ socperf/ + obj-$(CONFIG_INTEL_SOCWATCH_HV) += socwatchhv/ +-obj-$(CONFIG_INTEL_SEP) += sepdk/ ++obj-$(CONFIG_INTEL_SEP) += sepdk/ socperf/ + +diff --git a/drivers/platform/x86/sepdk/Kconfig b/drivers/platform/x86/sepdk/Kconfig +index d7dc0f592a96..23a458958c6e 100755 +--- a/drivers/platform/x86/sepdk/Kconfig ++++ b/drivers/platform/x86/sepdk/Kconfig +@@ -13,6 +13,7 @@ config SEP + tristate "SEP kernel driver" + depends on INTEL_SEP + depends on ACPI && PCI ++ depends on SOCPERF + default m + + config SEP_ACRN +diff --git a/drivers/platform/x86/sepdk/sep/lwpmudrv.c b/drivers/platform/x86/sepdk/sep/lwpmudrv.c +index e4b9a80efe9a..ab827edf3e7c 100755 +--- a/drivers/platform/x86/sepdk/sep/lwpmudrv.c ++++ b/drivers/platform/x86/sepdk/sep/lwpmudrv.c +@@ -223,7 +223,7 @@ static U8 *prev_set_CR4; + + wait_queue_head_t wait_exit; + +-// extern OS_STATUS SOCPERF_Switch_Group3 (void); ++extern OS_STATUS SOCPERF_Switch_Group3(void); + + #if !defined(DRV_USE_UNLOCKED_IOCTL) + #define MUTEX_INIT(lock) +@@ -1509,7 +1509,7 @@ static VOID lwpmudrv_Switch_To_Next_Group(void) + if (pecb_unc && + ECB_device_type(pecb_unc) == + DEVICE_UNC_SOCPERF) { +- // SOCPERF_Switch_Group3(); ++ SOCPERF_Switch_Group3(); + } + } + } +@@ -2134,7 +2134,7 @@ static OS_STATUS lwpmudrv_Uncore_Switch_Group(void) + (ECB_device_type(ecb_unc) == + DEVICE_UNC_SOCPERF) && + (j == 0)) { +- // SOCPERF_Switch_Group3(); ++ SOCPERF_Switch_Group3(); + } + // Post group switch + cur_grp = LWPMU_DEVICE_cur_group( +diff --git a/drivers/platform/x86/sepdk/sep/unc_sa.c b/drivers/platform/x86/sepdk/sep/unc_sa.c +index 7345807f9588..8691544e3c13 100755 +--- a/drivers/platform/x86/sepdk/sep/unc_sa.c ++++ b/drivers/platform/x86/sepdk/sep/unc_sa.c +@@ -33,12 +33,10 @@ + #include "inc/haswellunc_sa.h" + #include "inc/utility.h" + +-#if 0 + extern U64 *read_counter_info; + extern DRV_CONFIG drv_cfg; + + extern VOID SOCPERF_Read_Data3(PVOID data_buffer); +-#endif + + /*! + * @fn static VOID hswunc_sa_Initialize(PVOID) +@@ -71,7 +69,6 @@ static VOID hswunc_sa_Initialize(VOID *param) + */ + static VOID hswunc_sa_Trigger_Read(PVOID param, U32 id) + { +-#if 0 + U64 *data = (U64 *)param; + U32 cur_grp; + ECB pecb; +@@ -90,7 +87,6 @@ static VOID hswunc_sa_Trigger_Read(PVOID param, U32 id) + SOCPERF_Read_Data3((void*)data); + + SEP_DRV_LOG_TRACE_OUT(""); +-#endif + } + + /* ------------------------------------------------------------------------- */ +@@ -106,7 +102,6 @@ static VOID hswunc_sa_Trigger_Read(PVOID param, U32 id) + */ + static VOID hswunc_sa_Read_PMU_Data(PVOID param) + { +-#if 0 + U32 j; + U64 *buffer = read_counter_info; + U32 dev_idx; +@@ -144,7 +139,6 @@ static VOID hswunc_sa_Read_PMU_Data(PVOID param) + END_FOR_EACH_PCI_DATA_REG_RAW; + + SEP_DRV_LOG_TRACE_OUT(""); +-#endif + } + + /* +diff --git a/drivers/platform/x86/sepdk/sep/valleyview_sochap.c b/drivers/platform/x86/sepdk/sep/valleyview_sochap.c +index 7e1e5eb9c65f..f092e376754c 100755 +--- a/drivers/platform/x86/sepdk/sep/valleyview_sochap.c ++++ b/drivers/platform/x86/sepdk/sep/valleyview_sochap.c +@@ -37,10 +37,8 @@ static U64 *uncore_current_data; + static U64 *uncore_to_read_data; + extern DRV_CONFIG drv_cfg; + +-#if 0 + extern U64 *read_counter_info; + extern VOID SOCPERF_Read_Data3(PVOID data_buffer); +-#endif + + /*! + * @fn static VOID valleyview_VISA_Initialize(PVOID) +@@ -187,7 +185,6 @@ static VOID valleyview_VISA_Clean_Up(VOID *param) + */ + static VOID valleyview_VISA_Read_PMU_Data(PVOID param) + { +-#if 0 + U32 j; + U64 *buffer = read_counter_info; + U32 dev_idx; +@@ -236,7 +233,6 @@ static VOID valleyview_VISA_Read_PMU_Data(PVOID param) + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); +-#endif + } + + /* ------------------------------------------------------------------------- */ +@@ -252,7 +248,6 @@ static VOID valleyview_VISA_Read_PMU_Data(PVOID param) + */ + static VOID valleyview_Trigger_Read(PVOID param, U32 id) + { +-#if 0 + U64 *data = (U64 *)param; + U32 cur_grp; + ECB pecb; +@@ -271,7 +266,6 @@ static VOID valleyview_Trigger_Read(PVOID param, U32 id) + SOCPERF_Read_Data3((void*)data); + + SEP_DRV_LOG_TRACE_OUT(""); +-#endif + } + + /* +diff --git a/drivers/platform/x86/socperf/Kconfig b/drivers/platform/x86/socperf/Kconfig +new file mode 100644 +index 000000000000..f2edf457877e +--- /dev/null ++++ b/drivers/platform/x86/socperf/Kconfig +@@ -0,0 +1,10 @@ ++config INTEL_SOCPERF ++ bool "Socperf kernel driver" ++ depends on X86 || X86_64 ++ default y ++ ++config SOCPERF ++ tristate "Socperf kernel driver" ++ depends on INTEL_SOCPERF ++ depends on ACPI && PCI ++ default m +diff --git a/drivers/platform/x86/socperf/Makefile b/drivers/platform/x86/socperf/Makefile +new file mode 100644 +index 000000000000..a67e6a5c9e1f +--- /dev/null ++++ b/drivers/platform/x86/socperf/Makefile +@@ -0,0 +1,12 @@ ++ccflags-y := -I$(src)/include -I$(src)/inc ++ ++obj-$(CONFIG_SOCPERF) += socperf3.o ++ ++socperf3-y := socperfdrv.o \ ++ control.o \ ++ utility.o \ ++ pci.o \ ++ soc_uncore.o \ ++ haswellunc_sa.o \ ++ npk_uncore.o ++ +diff --git a/drivers/platform/x86/socperf/control.c b/drivers/platform/x86/socperf/control.c +new file mode 100644 +index 000000000000..f526dbb7717e +--- /dev/null ++++ b/drivers/platform/x86/socperf/control.c +@@ -0,0 +1,739 @@ ++/* *********************************************************************************************** ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(C) 2011-2019 Intel Corporation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * BSD LICENSE ++ * ++ * Copyright(C) 2011-2019 Intel Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * *********************************************************************************************** ++ */ ++ ++ ++#include "lwpmudrv_defines.h" ++#include ++#include ++#include ++#include ++#include ++ ++#include "lwpmudrv_types.h" ++#include "rise_errors.h" ++#include "lwpmudrv_ecb.h" ++#include "socperfdrv.h" ++#include "control.h" ++#include ++ ++#define SMP_CALL_FUNCTION(func, ctx, retry, wait) \ ++ smp_call_function((func), (ctx), (wait)) ++ ++/* ++ * Global State Nodes - keep here for now. Abstract out when necessary. ++ */ ++GLOBAL_STATE_NODE socperf_driver_state; ++static MEM_TRACKER mem_tr_head; // start of the mem tracker list ++static MEM_TRACKER mem_tr_tail; // end of mem tracker list ++static spinlock_t mem_tr_lock; // spinlock for mem tracker list ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID SOCPERF_Invoke_Cpu (func, ctx, arg) ++ * ++ * @brief Set up a DPC call and insert it into the queue ++ * ++ * @param IN cpu_idx - the core id to dispatch this function to ++ * IN func - function to be invoked by the specified core(s) ++ * IN ctx - pointer to the parameter block for each function ++ * invocation ++ * ++ * @return None ++ * ++ * Special Notes: ++ * ++ */ ++VOID SOCPERF_Invoke_Cpu(int cpu_idx, VOID (*func)(PVOID), PVOID ctx) ++{ ++ SOCPERF_Invoke_Parallel(func, ctx); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/* ++ * @fn VOID SOCPERF_Invoke_Parallel_Service(func, ctx, blocking, exclude) ++ * ++ * @param func - function to be invoked by each core in the system ++ * @param ctx - pointer to the parameter block for each function invocation ++ * @param blocking - Wait for invoked function to complete ++ * @param exclude - exclude the current core from executing the code ++ * ++ * @returns None ++ * ++ * @brief Service routine to handle all kinds of parallel invoke on all CPU calls ++ * ++ * Special Notes: ++ * Invoke the function provided in parallel in either a blocking or ++ * non-blocking mode. The current core may be excluded if desired. ++ * NOTE - Do not call this function directly from source code. ++ * Use the aliases SOCPERF_Invoke_Parallel(), SOCPERF_Invoke_Parallel_NB(), ++ * or SOCPERF_Invoke_Parallel_XS(). ++ * ++ */ ++VOID SOCPERF_Invoke_Parallel_Service(VOID (*func)(PVOID), PVOID ctx, ++ int blocking, int exclude) ++{ ++ GLOBAL_STATE_cpu_count(socperf_driver_state) = 0; ++ GLOBAL_STATE_dpc_count(socperf_driver_state) = 0; ++ ++ preempt_disable(); ++ SMP_CALL_FUNCTION(func, ctx, 0, blocking); ++ ++ if (!exclude) { ++ func(ctx); ++ } ++ preempt_enable(); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/* ++ * @fn VOID control_Memory_Tracker_Delete_Node(mem_tr) ++ * ++ * @param IN mem_tr - memory tracker node to delete ++ * ++ * @returns None ++ * ++ * @brief Delete specified node in the memory tracker ++ * ++ * Special Notes: ++ * Assumes mem_tr_lock is already held while calling this function! ++ */ ++static VOID control_Memory_Tracker_Delete_Node(MEM_TRACKER mem_tr) ++{ ++ MEM_TRACKER prev_tr = NULL; ++ MEM_TRACKER next_tr = NULL; ++ U32 size = MEM_EL_MAX_ARRAY_SIZE * sizeof(MEM_EL_NODE); ++ ++ if (!mem_tr) { ++ return; ++ } ++ ++ // free the allocated mem_el array (if any) ++ if (MEM_TRACKER_mem(mem_tr)) { ++ if (size < MAX_KMALLOC_SIZE) { ++ kfree(MEM_TRACKER_mem(mem_tr)); ++ } else { ++ free_pages((unsigned long)MEM_TRACKER_mem(mem_tr), ++ get_order(size)); ++ } ++ } ++ ++ // update the linked list ++ prev_tr = MEM_TRACKER_prev(mem_tr); ++ next_tr = MEM_TRACKER_next(mem_tr); ++ if (prev_tr) { ++ MEM_TRACKER_next(prev_tr) = next_tr; ++ } ++ if (next_tr) { ++ MEM_TRACKER_prev(next_tr) = prev_tr; ++ } ++ ++ // free the mem_tracker node ++ kfree(mem_tr); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/* ++ * @fn VOID control_Memory_Tracker_Create_Node(void) ++ * ++ * @param None - size of the memory to allocate ++ * ++ * @returns OS_SUCCESS if successful, otherwise error ++ * ++ * @brief Initialize the memory tracker ++ * ++ * Special Notes: ++ * Assumes mem_tr_lock is already held while calling this function! ++ * ++ * Since this function can be called within either GFP_KERNEL or ++ * GFP_ATOMIC contexts, the most restrictive allocation is used ++ * (viz., GFP_ATOMIC). ++ */ ++static U32 control_Memory_Tracker_Create_Node(void) ++{ ++ U32 size = MEM_EL_MAX_ARRAY_SIZE * sizeof(MEM_EL_NODE); ++ PVOID location = NULL; ++ MEM_TRACKER mem_tr = NULL; ++ ++ // create a mem tracker node ++ mem_tr = (MEM_TRACKER)kmalloc(sizeof(MEM_TRACKER_NODE), GFP_ATOMIC); ++ if (!mem_tr) { ++ SOCPERF_PRINT_ERROR( ++ "%s: failed to allocate mem tracker node\n", __func__); ++ return OS_FAULT; ++ } ++ ++ // create an initial array of mem_el's inside the mem tracker node ++ if (size < MAX_KMALLOC_SIZE) { ++ location = (PVOID)kmalloc(size, GFP_ATOMIC); ++ SOCPERF_PRINT_DEBUG( ++ "%s: allocated small memory (0x%p, %d)\n", ++ __func__, location, (S32)size); ++ } else { ++ location = (PVOID)__get_free_pages(GFP_ATOMIC, get_order(size)); ++ SOCPERF_PRINT_DEBUG( ++ "%s: allocated large memory (0x%p, %d)\n", ++ __func__, location, (S32)size); ++ } ++ ++ // initialize new mem tracker node ++ MEM_TRACKER_mem(mem_tr) = location; ++ MEM_TRACKER_prev(mem_tr) = NULL; ++ MEM_TRACKER_next(mem_tr) = NULL; ++ ++ // if mem_el array allocation failed, then remove node ++ if (!MEM_TRACKER_mem(mem_tr)) { ++ control_Memory_Tracker_Delete_Node(mem_tr); ++ SOCPERF_PRINT_ERROR( ++ "%s: failed to allocate mem_el array in tracker node ... deleting node\n", __func__); ++ return OS_FAULT; ++ } ++ ++ // initialize mem_tracker's mem_el array ++ MEM_TRACKER_max_size(mem_tr) = MEM_EL_MAX_ARRAY_SIZE; ++ memset(MEM_TRACKER_mem(mem_tr), 0, size); ++ ++ // update the linked list ++ if (!mem_tr_head) { ++ mem_tr_head = mem_tr; ++ } else { ++ MEM_TRACKER_prev(mem_tr) = mem_tr_tail; ++ MEM_TRACKER_next(mem_tr_tail) = mem_tr; ++ } ++ mem_tr_tail = mem_tr; ++ SOCPERF_PRINT_DEBUG( ++ "%s: allocating new node=0x%p, max_elements=%d, size=%d\n", ++ __func__, MEM_TRACKER_mem(mem_tr_tail), MEM_EL_MAX_ARRAY_SIZE, size); ++ ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/* ++ * @fn VOID control_Memory_Tracker_Add(location, size, vmalloc_flag) ++ * ++ * @param IN location - memory location ++ * @param IN size - size of the memory to allocate ++ * @param IN vmalloc_flag - flag that indicates if the allocation was done with vmalloc ++ * ++ * @returns None ++ * ++ * @brief Keep track of allocated memory with memory tracker ++ * ++ * Special Notes: ++ * Starting from first mem_tracker node, the algorithm ++ * finds the first "hole" in the mem_tracker list and ++ * tracks the memory allocation there. ++ */ ++static U32 control_Memory_Tracker_Add(PVOID location, ssize_t size, ++ DRV_BOOL vmalloc_flag) ++{ ++ S32 i, n; ++ U32 status; ++ DRV_BOOL found; ++ MEM_TRACKER mem_tr; ++ ++ spin_lock(&mem_tr_lock); ++ ++ // check if there is space in ANY of mem_tracker's nodes for the memory item ++ mem_tr = mem_tr_head; ++ found = FALSE; ++ status = OS_SUCCESS; ++ i = n = 0; ++ while (mem_tr && (!found)) { ++ for (i = 0; i < MEM_TRACKER_max_size(mem_tr); i++) { ++ if (!MEM_TRACKER_mem_address(mem_tr, i)) { ++ SOCPERF_PRINT_DEBUG( ++ "%s: found index %d of %d available\n", ++ __func__, i, MEM_TRACKER_max_size(mem_tr) - 1); ++ n = i; ++ found = TRUE; ++ } ++ } ++ if (!found) { ++ mem_tr = MEM_TRACKER_next(mem_tr); ++ } ++ } ++ ++ if (!found) { ++ // extend into (i.e., create new) mem_tracker node ... ++ status = control_Memory_Tracker_Create_Node(); ++ if (status != OS_SUCCESS) { ++ SOCPERF_PRINT_ERROR( ++ "Unable to create mem tracker node\n"); ++ goto finish_add; ++ } ++ // use mem tracker tail node and first available entry in mem_el array ++ mem_tr = mem_tr_tail; ++ n = 0; ++ } ++ ++ // we now have a location in mem tracker to keep track of the memory item ++ MEM_TRACKER_mem_address(mem_tr, n) = location; ++ MEM_TRACKER_mem_size(mem_tr, n) = size; ++ MEM_TRACKER_mem_vmalloc(mem_tr, n) = vmalloc_flag; ++ SOCPERF_PRINT_DEBUG( ++ "%s: tracking (0x%p, %d) in node %d of %d\n", ++ __func__, location, (S32)size, n, MEM_TRACKER_max_size(mem_tr) - 1); ++ ++finish_add: ++ spin_unlock(&mem_tr_lock); ++ ++ return status; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/* ++ * @fn VOID SOCPERF_Memory_Tracker_Init(void) ++ * ++ * @param None ++ * ++ * @returns None ++ * ++ * @brief Initializes Memory Tracker ++ * ++ * Special Notes: ++ * This should only be called when the driver is being loaded. ++ */ ++VOID SOCPERF_Memory_Tracker_Init(VOID) ++{ ++ SOCPERF_PRINT_DEBUG( ++ "%s: initializing mem tracker\n", __func__); ++ ++ mem_tr_head = NULL; ++ mem_tr_tail = NULL; ++ ++ spin_lock_init(&mem_tr_lock); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/* ++ * @fn VOID SOCPERF_Memory_Tracker_Free(void) ++ * ++ * @param None ++ * ++ * @returns None ++ * ++ * @brief Frees memory used by Memory Tracker ++ * ++ * Special Notes: ++ * This should only be called when the driver is being unloaded. ++ */ ++VOID SOCPERF_Memory_Tracker_Free(VOID) ++{ ++ S32 i; ++ MEM_TRACKER temp; ++ ++ SOCPERF_PRINT_DEBUG( ++ "%s: destroying mem tracker\n", __func__); ++ ++ spin_lock(&mem_tr_lock); ++ ++ // check for any memory that was not freed, and free it ++ while (mem_tr_head) { ++ for (i = 0; i < MEM_TRACKER_max_size(mem_tr_head); i++) { ++ if (MEM_TRACKER_mem_address(mem_tr_head, i)) { ++ SOCPERF_PRINT_WARNING( ++ "%s: index %d of %d, not freed (0x%p, %d) ... freeing now\n", ++ __func__, i, ++ MEM_TRACKER_max_size(mem_tr_head) - 1, ++ MEM_TRACKER_mem_address(mem_tr_head, i), ++ MEM_TRACKER_mem_size(mem_tr_head, i)); ++ free_pages( ++ (unsigned long)MEM_TRACKER_mem_address( ++ mem_tr_head, i), ++ get_order(MEM_TRACKER_mem_size( ++ mem_tr_head, i))); ++ MEM_TRACKER_mem_address(mem_tr_head, i) = NULL; ++ MEM_TRACKER_mem_size(mem_tr_head, i) = 0; ++ MEM_TRACKER_mem_vmalloc(mem_tr_head, i) = FALSE; ++ } ++ } ++ temp = MEM_TRACKER_next(mem_tr_head); ++ control_Memory_Tracker_Delete_Node(mem_tr_head); ++ mem_tr_head = temp; ++ } ++ ++ spin_unlock(&mem_tr_lock); ++ ++ SOCPERF_PRINT_DEBUG( ++ "%s: mem tracker destruction complete\n", __func__); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/* ++ * @fn VOID SOCPERF_Memory_Tracker_Compaction(void) ++ * ++ * @param None ++ * ++ * @returns None ++ * ++ * @brief Compacts the memory allocator if holes are detected ++ * ++ * Special Notes: ++ * The algorithm compacts mem_tracker nodes such that ++ * node entries are full starting from mem_tr_head ++ * up until the first empty node is detected, after ++ * which nodes up to mem_tr_tail will be empty. ++ * At end of collection (or at other safe sync point), ++ * we reclaim/compact space used by mem tracker. ++ */ ++VOID SOCPERF_Memory_Tracker_Compaction(void) ++{ ++ S32 i, j, n, m, c, d; ++ DRV_BOOL found, overlap; ++ MEM_TRACKER mem_tr1, mem_tr2; ++ ++ spin_lock(&mem_tr_lock); ++ ++ mem_tr1 = mem_tr_head; ++ mem_tr2 = mem_tr_tail; ++ ++ // if memory tracker was never used, then no need to compact ++ if (!mem_tr1 || !mem_tr2) { ++ goto finish_compact; ++ } ++ ++ i = j = n = c = d = 0; ++ m = MEM_TRACKER_max_size(mem_tr2) - 1; ++ overlap = FALSE; ++ while (!overlap) { ++ // find an empty node ++ found = FALSE; ++ while (!found && !overlap && mem_tr1) { ++ SOCPERF_PRINT_DEBUG( ++ "%s: looking at mem_tr1 0x%p, index=%d\n", ++ __func__, mem_tr1, n); ++ for (i = n; i < MEM_TRACKER_max_size(mem_tr1); i++) { ++ if (!MEM_TRACKER_mem_address(mem_tr1, i)) { ++ SOCPERF_PRINT_DEBUG( ++ "%s: found index %d of %d empty\n", ++ __func__, i, ++ MEM_TRACKER_max_size(mem_tr1) - ++ 1); ++ found = TRUE; ++ } ++ } ++ // check for overlap ++ overlap = (mem_tr1 == mem_tr2) && (i >= m); ++ ++ // if no overlap and an empty node was not found, then advance to next node ++ if (!found && !overlap) { ++ mem_tr1 = MEM_TRACKER_next(mem_tr1); ++ n = 0; ++ } ++ } ++ // all nodes going in forward direction are full, so exit ++ if (!found || overlap) { ++ goto finish_compact; ++ } ++ ++ // find a non-empty node ++ found = FALSE; ++ while (!found && !overlap && mem_tr2) { ++ SOCPERF_PRINT_DEBUG( ++ "%s: looking at mem_tr2 0x%p, index=%d\n", ++ __func__, mem_tr2, m); ++ for (j = m; j >= 0; j--) { ++ if (MEM_TRACKER_mem_address(mem_tr2, j)) { ++ SOCPERF_PRINT_DEBUG( ++ "%s: found index %d of %d non-empty\n", ++ __func__, j, ++ MEM_TRACKER_max_size(mem_tr2) - ++ 1); ++ found = TRUE; ++ } ++ } ++ // check for overlap ++ overlap = (mem_tr1 == mem_tr2) && (j <= i); ++ ++ // if no overlap and no non-empty node was found, then retreat to prev node ++ if (!found && !overlap) { ++ MEM_TRACKER empty_tr = ++ mem_tr2; // keep track of empty node ++ ++ mem_tr2 = MEM_TRACKER_prev(mem_tr2); ++ m = MEM_TRACKER_max_size(mem_tr2) - 1; ++ mem_tr_tail = mem_tr2; // keep track of new tail ++ // reclaim empty mem_tracker node ++ control_Memory_Tracker_Delete_Node(empty_tr); ++ // keep track of number of node deletions performed ++ d++; ++ } ++ } ++ // all nodes going in reverse direction are empty, so exit ++ if (!found || overlap) { ++ goto finish_compact; ++ } ++ ++ // swap empty node with non-empty node so that "holes" get bubbled towards the end of list ++ MEM_TRACKER_mem_address(mem_tr1, i) = ++ MEM_TRACKER_mem_address(mem_tr2, j); ++ MEM_TRACKER_mem_size(mem_tr1, i) = ++ MEM_TRACKER_mem_size(mem_tr2, j); ++ MEM_TRACKER_mem_vmalloc(mem_tr1, i) = ++ MEM_TRACKER_mem_vmalloc(mem_tr2, j); ++ ++ MEM_TRACKER_mem_address(mem_tr2, j) = NULL; ++ MEM_TRACKER_mem_size(mem_tr2, j) = 0; ++ MEM_TRACKER_mem_vmalloc(mem_tr2, j) = FALSE; ++ ++ // keep track of number of memory compactions performed ++ c++; ++ ++ // start new search starting from next element in mem_tr1 ++ n = i + 1; ++ ++ // start new search starting from prev element in mem_tr2 ++ m = j - 1; ++ } ++ ++finish_compact: ++ spin_unlock(&mem_tr_lock); ++ ++ SOCPERF_PRINT_DEBUG( ++ "%s: number of elements compacted = %d, nodes deleted = %d\n", ++ __func__, c, d); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/* ++ * @fn PVOID SOCPERF_Allocate_Memory(size) ++ * ++ * @param IN size - size of the memory to allocate ++ * ++ * @returns char* - pointer to the allocated memory block ++ * ++ * @brief Allocate and zero memory ++ * ++ * Special Notes: ++ * Allocate memory in the GFP_KERNEL pool. ++ * ++ * Use this if memory is to be allocated within a context where ++ * the allocator can block the allocation (e.g., by putting ++ * the caller to sleep) while it tries to free up memory to ++ * satisfy the request. Otherwise, if the allocation must ++ * occur atomically (e.g., caller cannot sleep), then use ++ * SOCPERF_Allocate_KMemory instead. ++ */ ++PVOID SOCPERF_Allocate_Memory(size_t size) ++{ ++ U32 status; ++ PVOID location; ++ ++ if (size <= 0) { ++ return NULL; ++ } ++ ++ // determine whether to use mem_tracker or not ++ if (size < MAX_KMALLOC_SIZE) { ++ location = (PVOID)kmalloc(size, GFP_KERNEL); ++ SOCPERF_PRINT_DEBUG( ++ "%s: allocated small memory (0x%p, %d)\n", ++ __func__, location, (S32)size); ++ } else { ++ location = (PVOID)vmalloc(size); ++ if (location) { ++ status = control_Memory_Tracker_Add(location, size, ++ TRUE); ++ SOCPERF_PRINT_DEBUG( ++ "%s: - allocated *large* memory (0x%p, %d)\n", ++ __func__, location, (S32)size); ++ if (status != OS_SUCCESS) { ++ // failed to track in mem_tracker, so free up memory and return NULL ++ vfree(location); ++ SOCPERF_PRINT_ERROR( ++ "%s: - able to allocate, but failed to track via MEM_TRACKER ... freeing\n", ++ __func__); ++ return NULL; ++ } ++ } ++ } ++ ++ if (!location) { ++ SOCPERF_PRINT_ERROR( ++ "%s: failed for size %d bytes\n", ++ __func__, (S32)size); ++ return NULL; ++ } ++ ++ memset(location, 0, size); ++ ++ return location; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/* ++ * @fn PVOID SOCPERF_Allocate_KMemory(size) ++ * ++ * @param IN size - size of the memory to allocate ++ * ++ * @returns char* - pointer to the allocated memory block ++ * ++ * @brief Allocate and zero memory ++ * ++ * Special Notes: ++ * Allocate memory in the GFP_ATOMIC pool. ++ * ++ * Use this if memory is to be allocated within a context where ++ * the allocator cannot block the allocation (e.g., by putting ++ * the caller to sleep) as it tries to free up memory to ++ * satisfy the request. Examples include interrupt handlers, ++ * process context code holding locks, etc. ++ */ ++PVOID SOCPERF_Allocate_KMemory(size_t size) ++{ ++ U32 status; ++ PVOID location; ++ ++ if (size <= 0) { ++ return NULL; ++ } ++ ++ if (size < MAX_KMALLOC_SIZE) { ++ location = (PVOID)kmalloc(size, GFP_ATOMIC); ++ SOCPERF_PRINT_DEBUG( ++ "%s: allocated small memory (0x%p, %d)\n", ++ __func__, location, (S32)size); ++ } else { ++ location = (PVOID)__get_free_pages(GFP_ATOMIC, get_order(size)); ++ status = control_Memory_Tracker_Add(location, size, FALSE); ++ SOCPERF_PRINT_DEBUG( ++ "%s: allocated large memory (0x%p, %d)\n", ++ __func__, location, (S32)size); ++ if (status != OS_SUCCESS) { ++ // failed to track in mem_tracker, so free up memory and return NULL ++ free_pages((unsigned long)location, get_order(size)); ++ SOCPERF_PRINT_ERROR( ++ "%s: - able to allocate, but failed to track via MEM_TRACKER ... freeing\n", __func__); ++ return NULL; ++ } ++ } ++ ++ if (!location) { ++ SOCPERF_PRINT_ERROR( ++ "%s: failed for size %d bytes\n", ++ __func__, (S32)size); ++ return NULL; ++ } ++ ++ memset(location, 0, size); ++ ++ return location; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/* ++ * @fn PVOID SOCPERF_Free_Memory(location) ++ * ++ * @param IN location - size of the memory to allocate ++ * ++ * @returns pointer to the allocated memory block ++ * ++ * @brief Frees the memory block ++ * ++ * Special Notes: ++ * Does not try to free memory if fed with a NULL pointer ++ * Expected usage: ++ * ptr = SOCPERF_Free_Memory(ptr); ++ * Does not do compaction ... can have "holes" in ++ * mem_tracker list after this operation. ++ */ ++PVOID SOCPERF_Free_Memory(PVOID location) ++{ ++ S32 i; ++ DRV_BOOL found; ++ MEM_TRACKER mem_tr; ++ ++ if (!location) { ++ return NULL; ++ } ++ ++ spin_lock(&mem_tr_lock); ++ ++ // scan through mem_tracker nodes for matching entry (if any) ++ mem_tr = mem_tr_head; ++ found = FALSE; ++ while (mem_tr) { ++ for (i = 0; i < MEM_TRACKER_max_size(mem_tr); i++) { ++ if (location == MEM_TRACKER_mem_address(mem_tr, i)) { ++ SOCPERF_PRINT_DEBUG( ++ "%s: freeing large memory location 0x%p\n", ++ __func__, location); ++ found = TRUE; ++ if (MEM_TRACKER_mem_vmalloc(mem_tr, i)) { ++ vfree(location); ++ } else { ++ free_pages( ++ (unsigned long)location, ++ get_order(MEM_TRACKER_mem_size( ++ mem_tr, i))); ++ } ++ MEM_TRACKER_mem_address(mem_tr, i) = NULL; ++ MEM_TRACKER_mem_size(mem_tr, i) = 0; ++ MEM_TRACKER_mem_vmalloc(mem_tr, i) = FALSE; ++ goto finish_free; ++ } ++ } ++ mem_tr = MEM_TRACKER_next(mem_tr); ++ } ++ ++finish_free: ++ spin_unlock(&mem_tr_lock); ++ ++ // must have been of smaller than the size limit for mem tracker nodes ++ if (!found) { ++ SOCPERF_PRINT_DEBUG( ++ "%s: freeing small memory location 0x%p\n", ++ __func__, location); ++ kfree(location); ++ } ++ ++ return NULL; ++} +diff --git a/drivers/platform/x86/socperf/haswellunc_sa.c b/drivers/platform/x86/socperf/haswellunc_sa.c +new file mode 100644 +index 000000000000..9b487b25f101 +--- /dev/null ++++ b/drivers/platform/x86/socperf/haswellunc_sa.c +@@ -0,0 +1,407 @@ ++/* *********************************************************************************************** ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(C) 2011-2019 Intel Corporation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * BSD LICENSE ++ * ++ * Copyright(C) 2011-2019 Intel Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * *********************************************************************************************** ++ */ ++ ++ ++#include "lwpmudrv_defines.h" ++#include ++#include ++#include ++ ++#include "lwpmudrv_types.h" ++#include "lwpmudrv_ecb.h" ++#include "lwpmudrv_struct.h" ++ ++#include "socperfdrv.h" ++#include "control.h" ++#include "haswellunc_sa.h" ++#include "ecb_iterators.h" ++#include "inc/pci.h" ++ ++static U64 counter_virtual_address; ++static U32 counter_overflow[HSWUNC_SA_MAX_COUNTERS]; ++extern LWPMU_DEVICE device_uncore; ++static U32 device_id; ++ ++/*! ++ * @fn static VOID hswunc_sa_Write_PMU(VOID*) ++ * ++ * @brief Initial write of PMU registers ++ * Walk through the entries and write the value of the register accordingly. ++ * When current_group = 0, then this is the first time this routine is called, ++ * ++ * @param param - device index ++ * ++ * @return None ++ * ++ * Special Notes: ++ */ ++static VOID hswunc_sa_Write_PMU(VOID *param) ++{ ++ U32 dev_idx = *((U32 *)param); ++ U32 cur_grp = LWPMU_DEVICE_cur_group(device_uncore); ++ ECB pecb = LWPMU_DEVICE_PMU_register_data(device_uncore)[cur_grp]; ++ DRV_PCI_DEVICE_ENTRY dpden; ++ U32 pci_address; ++ U32 bar_lo; ++ U64 bar_hi; ++ U64 final_bar; ++ U64 physical_address; ++ U32 dev_index = 0; ++ S32 bar_list[HSWUNC_SA_MAX_PCI_DEVICES]; ++ U32 bar_index = 0; ++ U64 gdxc_bar = 0; ++ U32 map_size = 0; ++ U64 virtual_address = 0; ++ U64 mmio_offset = 0; ++ U32 bar_name = 0; ++ DRV_PCI_DEVICE_ENTRY curr_pci_entry = NULL; ++ U32 next_bar_offset = 0; ++ U32 i = 0; ++ ++ for (dev_index = 0; dev_index < HSWUNC_SA_MAX_PCI_DEVICES; ++ dev_index++) { ++ bar_list[dev_index] = -1; ++ } ++ ++ device_id = dev_idx; ++ // initialize the CHAP per-counter overflow numbers ++ for (i = 0; i < HSWUNC_SA_MAX_COUNTERS; i++) { ++ counter_overflow[i] = 0; ++ socperf_pcb[0].last_uncore_count[i] = 0; ++ } ++ ++ ECB_pcidev_entry_list(pecb) = (DRV_PCI_DEVICE_ENTRY)( ++ (S8 *)pecb + ECB_pcidev_list_offset(pecb)); ++ dpden = ECB_pcidev_entry_list(pecb); ++ ++ if (counter_virtual_address) { ++ for (i = 0; i < ECB_num_entries(pecb); i++) { ++ writel(HSWUNC_SA_CHAP_STOP, ++ (void __iomem *)(((char *)(UIOP)counter_virtual_address) + ++ HSWUNC_SA_CHAP_CTRL_REG_OFFSET + ++ i * 0x10)); ++ } ++ } ++ ++ for (dev_index = 0; dev_index < ECB_num_pci_devices(pecb); ++ dev_index++) { ++ curr_pci_entry = &dpden[dev_index]; ++ mmio_offset = DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio( ++ curr_pci_entry); ++ bar_name = DRV_PCI_DEVICE_ENTRY_bar_name(curr_pci_entry); ++ if (DRV_PCI_DEVICE_ENTRY_config_type(curr_pci_entry) == ++ UNC_PCICFG) { ++ pci_address = FORM_PCI_ADDR( ++ DRV_PCI_DEVICE_ENTRY_bus_no(curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_dev_no(curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_func_no(curr_pci_entry), ++ mmio_offset); ++ SOCPERF_PCI_Write_Ulong( ++ pci_address, ++ DRV_PCI_DEVICE_ENTRY_value(curr_pci_entry)); ++ continue; ++ } ++ // UNC_MMIO programming ++ if (bar_list[bar_name] != -1) { ++ bar_index = bar_list[bar_name]; ++ virtual_address = DRV_PCI_DEVICE_ENTRY_virtual_address( ++ &dpden[bar_index]); ++ DRV_PCI_DEVICE_ENTRY_virtual_address(curr_pci_entry) = ++ DRV_PCI_DEVICE_ENTRY_virtual_address( ++ &dpden[bar_index]); ++ writel(DRV_PCI_DEVICE_ENTRY_value(curr_pci_entry), ++ (void __iomem *)(((char *)(UIOP)virtual_address) + ++ mmio_offset)); ++ continue; ++ } ++ if (bar_name == UNC_GDXCBAR) { ++ DRV_PCI_DEVICE_ENTRY_bar_address(curr_pci_entry) = ++ gdxc_bar; ++ } else { ++ pci_address = FORM_PCI_ADDR( ++ DRV_PCI_DEVICE_ENTRY_bus_no(curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_dev_no(curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_func_no(curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_bar_offset( ++ curr_pci_entry)); ++ bar_lo = SOCPERF_PCI_Read_Ulong(pci_address); ++ next_bar_offset = DRV_PCI_DEVICE_ENTRY_bar_offset( ++ curr_pci_entry) + ++ HSWUNC_SA_NEXT_ADDR_OFFSET; ++ pci_address = FORM_PCI_ADDR( ++ DRV_PCI_DEVICE_ENTRY_bus_no(curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_dev_no(curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_func_no(curr_pci_entry), ++ next_bar_offset); ++ bar_hi = SOCPERF_PCI_Read_Ulong(pci_address); ++ final_bar = ++ (bar_hi << HSWUNC_SA_BAR_ADDR_SHIFT) | bar_lo; ++ final_bar &= HSWUNC_SA_BAR_ADDR_MASK; ++ ++ DRV_PCI_DEVICE_ENTRY_bar_address(curr_pci_entry) = ++ final_bar; ++ } ++ physical_address = ++ DRV_PCI_DEVICE_ENTRY_bar_address(curr_pci_entry); ++ ++ if (physical_address) { ++ if (bar_name == UNC_MCHBAR) { ++ map_size = HSWUNC_SA_MCHBAR_MMIO_PAGE_SIZE; ++ } else if (bar_name == UNC_PCIEXBAR) { ++ map_size = HSWUNC_SA_PCIEXBAR_MMIO_PAGE_SIZE; ++ } else { ++ map_size = HSWUNC_SA_OTHER_BAR_MMIO_PAGE_SIZE; ++ } ++ DRV_PCI_DEVICE_ENTRY_virtual_address(curr_pci_entry) = ++ (U64)(UIOP)ioremap_nocache(physical_address, ++ map_size); ++ virtual_address = DRV_PCI_DEVICE_ENTRY_virtual_address( ++ curr_pci_entry); ++ ++ if (!gdxc_bar && bar_name == UNC_MCHBAR) { ++ bar_lo = readl( ++ (void __iomem *)((char *)(UIOP)virtual_address + ++ HSWUNC_SA_GDXCBAR_OFFSET_LO)); ++ bar_hi = readl( ++ (void __iomem *)((char *)(UIOP)virtual_address + ++ HSWUNC_SA_GDXCBAR_OFFSET_HI)); ++ gdxc_bar = ++ (bar_hi << HSWUNC_SA_BAR_ADDR_SHIFT) | ++ bar_lo; ++ gdxc_bar = gdxc_bar & HSWUNC_SA_GDXCBAR_MASK; ++ } ++ writel((U32)DRV_PCI_DEVICE_ENTRY_value(curr_pci_entry), ++ (void __iomem *)(((char *)(UIOP)virtual_address) + ++ mmio_offset)); ++ bar_list[bar_name] = dev_index; ++ if (counter_virtual_address == 0 && ++ bar_name == UNC_CHAPADR) { ++ counter_virtual_address = virtual_address; ++ } ++ } ++ } ++} ++ ++/*! ++ * @fn static VOID hswunc_sa_Disable_PMU(PVOID) ++ * ++ * @brief Unmap the virtual address when sampling/driver stops ++ * ++ * @param param - device index ++ * ++ * @return None ++ * ++ * Special Notes: ++ */ ++static VOID hswunc_sa_Disable_PMU(PVOID param) ++{ ++ DRV_PCI_DEVICE_ENTRY dpden; ++ U32 dev_index = 0; ++ U32 cur_grp = LWPMU_DEVICE_cur_group(device_uncore); ++ ECB pecb = LWPMU_DEVICE_PMU_register_data(device_uncore)[cur_grp]; ++ U32 i = 0; ++ ++ if (GLOBAL_STATE_current_phase(socperf_driver_state) == ++ DRV_STATE_PREPARE_STOP) { ++ if (counter_virtual_address) { ++ for (i = 0; i < ECB_num_entries(pecb); i++) { ++ writel(HSWUNC_SA_CHAP_STOP, ++ (void __iomem *)(((char *)(UIOP) ++ counter_virtual_address) + ++ HSWUNC_SA_CHAP_CTRL_REG_OFFSET + ++ i * 0x10)); ++ } ++ } ++ ++ dpden = ECB_pcidev_entry_list(pecb); ++ for (dev_index = 0; dev_index < ECB_num_pci_devices(pecb); ++ dev_index++) { ++ if (DRV_PCI_DEVICE_ENTRY_config_type( ++ &dpden[dev_index]) == UNC_MMIO && ++ DRV_PCI_DEVICE_ENTRY_bar_address( ++ &dpden[dev_index]) != 0) { ++ iounmap((void __iomem *)(UIOP)( ++ DRV_PCI_DEVICE_ENTRY_virtual_address( ++ &dpden[dev_index]))); ++ } ++ } ++ counter_virtual_address = 0; ++ } ++} ++ ++/*! ++ * @fn static VOID hswunc_sa_Initialize(PVOID) ++ * ++ * @brief Initialize any registers or addresses ++ * ++ * @param param ++ * ++ * @return None ++ * ++ * Special Notes: ++ */ ++static VOID hswunc_sa_Initialize(VOID *param) ++{ ++ counter_virtual_address = 0; ++} ++ ++/*! ++ * @fn static VOID hswunc_sa_Clean_Up(PVOID) ++ * ++ * @brief Reset any registers or addresses ++ * ++ * @param param ++ * ++ * @return None ++ * ++ * Special Notes: ++ */ ++static VOID hswunc_sa_Clean_Up(VOID *param) ++{ ++ counter_virtual_address = 0; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn hswunc_sa_Read_Data(param, id) ++ * ++ * @param data_buffer data buffer to read data into ++ * ++ * @return None No return needed ++ * ++ * @brief Read the Uncore count data and store into the buffer param; ++ * ++ */ ++static VOID hswunc_sa_Read_Data(PVOID data_buffer) ++{ ++ U32 event_id = 0; ++ U64 *data; ++ int data_index; ++ U32 data_val = 0; ++ U64 total_count = 0; ++ U32 cur_grp = LWPMU_DEVICE_cur_group(device_uncore); ++ ++ if (GLOBAL_STATE_current_phase(socperf_driver_state) == ++ DRV_STATE_UNINITIALIZED || ++ GLOBAL_STATE_current_phase(socperf_driver_state) == ++ DRV_STATE_IDLE || ++ GLOBAL_STATE_current_phase(socperf_driver_state) == ++ DRV_STATE_RESERVED || ++ GLOBAL_STATE_current_phase(socperf_driver_state) == ++ DRV_STATE_PREPARE_STOP || ++ GLOBAL_STATE_current_phase(socperf_driver_state) == ++ DRV_STATE_STOPPED) { ++ SOCPERF_PRINT_ERROR("ERROR: RETURING EARLY from Read_Data\n"); ++ return; ++ } ++ if (data_buffer == NULL) { ++ return; ++ } ++ data = (U64 *)data_buffer; ++ data_index = 0; ++ // group id ++ data[data_index] = cur_grp + 1; ++ data_index++; ++ ++ FOR_EACH_PCI_DATA_REG_RAW(pecb, i, dev_idx) ++ { ++ //event_id = ECB_entries_event_id_index_local(pecb, i); ++ if (counter_virtual_address) { ++ writel(HSWUNC_SA_CHAP_SAMPLE_DATA, ++ (void __iomem *)(((char *)(UIOP)counter_virtual_address) + ++ HSWUNC_SA_CHAP_CTRL_REG_OFFSET + ++ i * 0x10)); ++ data_val = readl((void __iomem *) ++ ((char *)(UIOP)(counter_virtual_address) + ++ ECB_entries_reg_offset(pecb, i))); ++ } ++ ++ if (data_val < socperf_pcb[0].last_uncore_count[i]) { ++ counter_overflow[i]++; ++ } ++ socperf_pcb[0].last_uncore_count[i] = data_val; ++ ++ total_count = ++ data_val + counter_overflow[i] * HSWUNC_SA_MAX_COUNT; ++ data[data_index + event_id] = total_count; ++ SOCPERF_PRINT_DEBUG("DATA=%u\n", data_val); ++ event_id++; ++ } ++ END_FOR_EACH_PCI_DATA_REG_RAW; ++} ++ ++/* ++ * Initialize the dispatch table ++ */ ++DISPATCH_NODE socperf_hswunc_sa_dispatch = { ++ .init = hswunc_sa_Initialize, // initialize ++ .fini = NULL, // destroy ++ .write = hswunc_sa_Write_PMU, // write ++ .freeze = hswunc_sa_Disable_PMU, // freeze ++ .restart = NULL, // restart ++ .read_data = NULL, // read ++ .check_overflow = NULL, // check for overflow ++ .swap_group = NULL, ++ .read_lbrs = NULL, ++ .clean_up = hswunc_sa_Clean_Up, ++ .hw_errata = NULL, ++ .read_power = NULL, ++ .check_overflow_errata = NULL, ++ .read_counts = NULL, //read_counts ++ .check_overflow_gp_errata = NULL, ++ .read_power = NULL, ++ .platform_info = NULL, ++ .trigger_read = NULL, ++ .read_current_data = hswunc_sa_Read_Data, ++ .create_mem = NULL, ++ .check_status = NULL, ++ .read_mem = NULL, ++ .stop_mem = NULL ++}; +diff --git a/drivers/platform/x86/socperf/inc/control.h b/drivers/platform/x86/socperf/inc/control.h +new file mode 100644 +index 000000000000..0f44b85d76dc +--- /dev/null ++++ b/drivers/platform/x86/socperf/inc/control.h +@@ -0,0 +1,467 @@ ++/* *********************************************************************************************** ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(C) 2005-2019 Intel Corporation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * BSD LICENSE ++ * ++ * Copyright(C) 2005-2019 Intel Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * *********************************************************************************************** ++ */ ++ ++ ++#ifndef _CONTROL_H_ ++#define _CONTROL_H_ ++ ++#include ++#include ++#if defined(DRV_IA32) ++#include ++#endif ++#include ++#if defined(DRV_IA32) ++#include ++#endif ++#include ++ ++#include "lwpmudrv_defines.h" ++#include "socperfdrv.h" ++#include "lwpmudrv_types.h" ++ ++// large memory allocation will be used if the requested size (in bytes) is ++// above this threshold ++#define MAX_KMALLOC_SIZE ((1 << 17) - 1) ++ ++// check whether Linux driver should use unlocked ioctls (not protected by BKL) ++#if defined(HAVE_UNLOCKED_IOCTL) ++#define DRV_USE_UNLOCKED_IOCTL ++#endif ++#if defined(DRV_USE_UNLOCKED_IOCTL) ++#define IOCTL_OP .unlocked_ioctl ++#define IOCTL_OP_TYPE long ++#define IOCTL_USE_INODE ++#else ++#define IOCTL_OP .ioctl ++#define IOCTL_OP_TYPE S32 ++#define IOCTL_USE_INODE struct inode *inode, ++#endif ++ ++// Information about the state of the driver ++typedef struct GLOBAL_STATE_NODE_S GLOBAL_STATE_NODE; ++typedef GLOBAL_STATE_NODE *GLOBAL_STATE; ++struct GLOBAL_STATE_NODE_S { ++ volatile S32 cpu_count; ++ volatile S32 dpc_count; ++ ++ S32 num_cpus; // Number of CPUs in the system ++ S32 active_cpus; // Number of active CPUs - some cores can be ++ // deactivated by the user / admin ++ S32 num_em_groups; ++ S32 num_descriptors; ++ volatile S32 current_phase; ++}; ++ ++// Access Macros ++#define GLOBAL_STATE_num_cpus(x) ((x).num_cpus) ++#define GLOBAL_STATE_active_cpus(x) ((x).active_cpus) ++#define GLOBAL_STATE_cpu_count(x) ((x).cpu_count) ++#define GLOBAL_STATE_dpc_count(x) ((x).dpc_count) ++#define GLOBAL_STATE_num_em_groups(x) ((x).num_em_groups) ++#define GLOBAL_STATE_num_descriptors(x) ((x).num_descriptors) ++#define GLOBAL_STATE_current_phase(x) ((x).current_phase) ++#define GLOBAL_STATE_sampler_id(x) ((x).sampler_id) ++ ++/* ++ * ++ * ++ * CPU State data structure and access macros ++ * ++ */ ++typedef struct CPU_STATE_NODE_S CPU_STATE_NODE; ++typedef CPU_STATE_NODE * CPU_STATE; ++struct CPU_STATE_NODE_S { ++ S32 apic_id; // Processor ID on the system bus ++ PVOID apic_linear_addr; // linear address of local apic ++ PVOID apic_physical_addr; // physical address of local apic ++ ++ PVOID idt_base; // local IDT base address ++ atomic_t in_interrupt; ++ ++#if defined(DRV_IA32) ++ U64 saved_ih; // saved perfvector to restore ++#endif ++#if defined(DRV_EM64T) ++ PVOID saved_ih; // saved perfvector to restore ++#endif ++ ++ S64 * em_tables; // holds the data that is saved/restored ++ // during event multiplexing ++ ++ struct timer_list *em_timer; ++ U32 current_group; ++ S32 trigger_count; ++ S32 trigger_event_num; ++ ++ DISPATCH dispatch; ++ PVOID lbr_area; ++ PVOID old_dts_buffer; ++ PVOID dts_buffer; ++ U32 initial_mask; ++ U32 accept_interrupt; ++ ++#if defined(BUILD_CHIPSET) ++ // Chipset counter stuff ++ U32 chipset_count_init; // flag to initialize the last MCH and ICH arrays below. ++ U64 last_mch_count[8]; ++ U64 last_ich_count[8]; ++ U64 last_gmch_count[MAX_CHIPSET_COUNTERS]; ++ U64 last_mmio_count ++ [32]; // it's only 9 now but the next generation may have 29. ++#endif ++ ++ U64 *pmu_state; // holds PMU state (e.g., MSRs) that will be ++ // saved before and restored after collection ++ S32 socket_master; ++ S32 core_master; ++ S32 thr_master; ++ U64 num_samples; ++ U64 reset_mask; ++ U64 group_swap; ++ U64 last_uncore_count[16]; ++}; ++ ++#define CPU_STATE_apic_id(cpu) ((cpu)->apic_id) ++#define CPU_STATE_apic_linear_addr(cpu) ((cpu)->apic_linear_addr) ++#define CPU_STATE_apic_physical_addr(cpu) ((cpu)->apic_physical_addr) ++#define CPU_STATE_idt_base(cpu) ((cpu)->idt_base) ++#define CPU_STATE_in_interrupt(cpu) ((cpu)->in_interrupt) ++#define CPU_STATE_saved_ih(cpu) ((cpu)->saved_ih) ++#define CPU_STATE_saved_ih_hi(cpu) ((cpu)->saved_ih_hi) ++#define CPU_STATE_dpc(cpu) ((cpu)->dpc) ++#define CPU_STATE_em_tables(cpu) ((cpu)->em_tables) ++#define CPU_STATE_pmu_state(cpu) ((cpu)->pmu_state) ++#define CPU_STATE_em_dpc(cpu) ((cpu)->em_dpc) ++#define CPU_STATE_em_timer(cpu) ((cpu)->em_timer) ++#define CPU_STATE_current_group(cpu) ((cpu)->current_group) ++#define CPU_STATE_trigger_count(cpu) ((cpu)->trigger_count) ++#define CPU_STATE_trigger_event_num(cpu) ((cpu)->trigger_event_num) ++#define CPU_STATE_dispatch(cpu) ((cpu)->dispatch) ++#define CPU_STATE_lbr(cpu) ((cpu)->lbr) ++#define CPU_STATE_old_dts_buffer(cpu) ((cpu)->old_dts_buffer) ++#define CPU_STATE_dts_buffer(cpu) ((cpu)->dts_buffer) ++#define CPU_STATE_initial_mask(cpu) ((cpu)->initial_mask) ++#define CPU_STATE_accept_interrupt(cpu) ((cpu)->accept_interrupt) ++#define CPU_STATE_msr_value(cpu) ((cpu)->msr_value) ++#define CPU_STATE_msr_addr(cpu) ((cpu)->msr_addr) ++#define CPU_STATE_socket_master(cpu) ((cpu)->socket_master) ++#define CPU_STATE_core_master(cpu) ((cpu)->core_master) ++#define CPU_STATE_thr_master(cpu) ((cpu)->thr_master) ++#define CPU_STATE_num_samples(cpu) ((cpu)->num_samples) ++#define CPU_STATE_reset_mask(cpu) ((cpu)->reset_mask) ++#define CPU_STATE_group_swap(cpu) ((cpu)->group_swap) ++ ++/* ++ * For storing data for --read/--write-msr command line options ++ */ ++typedef struct MSR_DATA_NODE_S MSR_DATA_NODE; ++typedef MSR_DATA_NODE * MSR_DATA; ++struct MSR_DATA_NODE_S { ++ U64 value; // Used for emon, for read/write-msr value ++ U64 addr; ++}; ++ ++#define MSR_DATA_value(md) ((md)->value) ++#define MSR_DATA_addr(md) ((md)->addr) ++ ++/* ++ * Memory Allocation tracker ++ * ++ * Currently used to track large memory allocations ++ */ ++ ++typedef struct MEM_EL_NODE_S MEM_EL_NODE; ++typedef MEM_EL_NODE * MEM_EL; ++struct MEM_EL_NODE_S { ++ char *address; // pointer to piece of memory we're tracking ++ S32 size; // size (bytes) of the piece of memory ++ DRV_BOOL is_addr_vmalloc; // flag to check if the memory is allocated using vmalloc ++}; ++ ++// accessors for MEM_EL defined in terms of MEM_TRACKER below ++ ++#define MEM_EL_MAX_ARRAY_SIZE 32 // minimum is 1, nominal is 64 ++ ++typedef struct MEM_TRACKER_NODE_S MEM_TRACKER_NODE; ++typedef MEM_TRACKER_NODE * MEM_TRACKER; ++struct MEM_TRACKER_NODE_S { ++ S32 max_size; // number of elements in the array (default: MEM_EL_MAX_ARRAY_SIZE) ++ MEM_EL mem; // array of large memory items we're tracking ++ MEM_TRACKER prev, ++ next; // enables bi-directional scanning of linked list ++}; ++#define MEM_TRACKER_max_size(mt) ((mt)->max_size) ++#define MEM_TRACKER_mem(mt) ((mt)->mem) ++#define MEM_TRACKER_prev(mt) ((mt)->prev) ++#define MEM_TRACKER_next(mt) ((mt)->next) ++#define MEM_TRACKER_mem_address(mt, i) (MEM_TRACKER_mem(mt)[(i)].address) ++#define MEM_TRACKER_mem_size(mt, i) (MEM_TRACKER_mem(mt)[(i)].size) ++#define MEM_TRACKER_mem_vmalloc(mt, i) \ ++ (MEM_TRACKER_mem(mt)[(i)].is_addr_vmalloc) ++ ++/**************************************************************************** ++ ** Global State variables exported ++ ***************************************************************************/ ++extern CPU_STATE socperf_pcb; ++extern U64 *tsc_info; ++extern GLOBAL_STATE_NODE socperf_driver_state; ++extern MSR_DATA msr_data; ++extern U32 *core_to_package_map; ++extern U32 num_packages; ++extern U64 *restore_bl_bypass; ++extern U32 **restore_ha_direct2core; ++extern U32 **restore_qpi_direct2core; ++/**************************************************************************** ++ ** Handy Short cuts ++ ***************************************************************************/ ++ ++/* ++ * SOCPERF_THIS_CPU() ++ * Parameters ++ * None ++ * Returns ++ * CPU number of the processor being executed on ++ * ++ */ ++#define SOCPERF_THIS_CPU() smp_processor_id() ++ ++/**************************************************************************** ++ ** Interface definitions ++ ***************************************************************************/ ++ ++/* ++ * Execution Control Functions ++ */ ++ ++VOID SOCPERF_Invoke_Cpu(S32 cpuid, VOID (*func)(PVOID), PVOID ctx); ++ ++/* ++ * @fn VOID SOCPERF_Invoke_Parallel_Service(func, ctx, blocking, exclude) ++ * ++ * @param func - function to be invoked by each core in the system ++ * @param ctx - pointer to the parameter block for each function invocation ++ * @param blocking - Wait for invoked function to complete ++ * @param exclude - exclude the current core from executing the code ++ * ++ * @returns none ++ * ++ * @brief Service routine to handle all kinds of parallel invoke on all CPU calls ++ * ++ * Special Notes: ++ * Invoke the function provided in parallel in either a blocking/non-blocking mode. ++ * The current core may be excluded if desired. ++ * NOTE - Do not call this function directly from source code. Use the aliases ++ * SOCPERF_Invoke_Parallel(), SOCPERF_Invoke_Parallel_NB(), SOCPERF_Invoke_Parallel_XS(). ++ * ++ */ ++extern VOID SOCPERF_Invoke_Parallel_Service(VOID (*func)(PVOID), PVOID ctx, ++ S32 blocking, S32 exclude); ++ ++/* ++ * @fn VOID SOCPERF_Invoke_Parallel(func, ctx) ++ * ++ * @param func - function to be invoked by each core in the system ++ * @param ctx - pointer to the parameter block for each function invocation ++ * ++ * @returns none ++ * ++ * @brief Invoke the named function in parallel. Wait for all the functions to complete. ++ * ++ * Special Notes: ++ * Invoke the function named in parallel, including the CPU that the control is ++ * being invoked on ++ * Macro built on the service routine ++ * ++ */ ++#define SOCPERF_Invoke_Parallel(a, b) \ ++ SOCPERF_Invoke_Parallel_Service((a), (b), TRUE, FALSE) ++ ++/* ++ * @fn VOID SOCPERF_Invoke_Parallel_NB(func, ctx) ++ * ++ * @param func - function to be invoked by each core in the system ++ * @param ctx - pointer to the parameter block for each function invocation ++ * ++ * @returns none ++ * ++ * @brief Invoke the named function in parallel. DO NOT Wait for all the functions to complete. ++ * ++ * Special Notes: ++ * Invoke the function named in parallel, including the CPU that the control is ++ * being invoked on ++ * Macro built on the service routine ++ * ++ */ ++#define SOCPERF_Invoke_Parallel_NB(a, b) \ ++ SOCPERF_Invoke_Parallel_Service((a), (b), FALSE, FALSE) ++ ++/* ++ * @fn VOID SOCPERF_Invoke_Parallel_XS(func, ctx) ++ * ++ * @param func - function to be invoked by each core in the system ++ * @param ctx - pointer to the parameter block for each function invocation ++ * ++ * @returns none ++ * ++ * @brief Invoke the named function in parallel. Wait for all the functions to complete. ++ * ++ * Special Notes: ++ * Invoke the function named in parallel, excluding the CPU that the control is ++ * being invoked on ++ * Macro built on the service routine ++ * ++ */ ++#define SOCPERF_Invoke_Parallel_XS(a, b) \ ++ SOCPERF_Invoke_Parallel_Service((a), (b), TRUE, TRUE) ++ ++/* ++ * @fn VOID SOCPERF_Memory_Tracker_Init(void) ++ * ++ * @param None ++ * ++ * @returns None ++ * ++ * @brief Initializes Memory Tracker ++ * ++ * Special Notes: ++ * This should only be called when the ++ * the driver is being loaded. ++ */ ++extern VOID SOCPERF_Memory_Tracker_Init(VOID); ++ ++/* ++ * @fn VOID SOCPERF_Memory_Tracker_Free(void) ++ * ++ * @param None ++ * ++ * @returns None ++ * ++ * @brief Frees memory used by Memory Tracker ++ * ++ * Special Notes: ++ * This should only be called when the ++ * driver is being unloaded. ++ */ ++extern VOID SOCPERF_Memory_Tracker_Free(VOID); ++ ++/* ++ * @fn VOID SOCPERF_Memory_Tracker_Compaction(void) ++ * ++ * @param None ++ * ++ * @returns None ++ * ++ * @brief Compacts the memory allocator if holes are detected ++ * ++ * Special Notes: ++ * At end of collection (or at other safe sync point), ++ * reclaim/compact space used by mem tracker ++ */ ++extern VOID SOCPERF_Memory_Tracker_Compaction(void); ++ ++/* ++ * @fn PVOID SOCPERF_Allocate_Memory(size) ++ * ++ * @param IN size - size of the memory to allocate ++ * ++ * @returns char* - pointer to the allocated memory block ++ * ++ * @brief Allocate and zero memory ++ * ++ * Special Notes: ++ * Allocate memory in the GFP_KERNEL pool. ++ * ++ * Use this if memory is to be allocated within a context where ++ * the allocator can block the allocation (e.g., by putting ++ * the caller to sleep) while it tries to free up memory to ++ * satisfy the request. Otherwise, if the allocation must ++ * occur atomically (e.g., caller cannot sleep), then use ++ * SOCPERF_Allocate_KMemory instead. ++ */ ++extern PVOID SOCPERF_Allocate_Memory(size_t size); ++ ++/* ++ * @fn PVOID SOCPERF_Allocate_KMemory(size) ++ * ++ * @param IN size - size of the memory to allocate ++ * ++ * @returns char* - pointer to the allocated memory block ++ * ++ * @brief Allocate and zero memory ++ * ++ * Special Notes: ++ * Allocate memory in the GFP_ATOMIC pool. ++ * ++ * Use this if memory is to be allocated within a context where ++ * the allocator cannot block the allocation (e.g., by putting ++ * the caller to sleep) as it tries to free up memory to ++ * satisfy the request. Examples include interrupt handlers, ++ * process context code holding locks, etc. ++ */ ++extern PVOID SOCPERF_Allocate_KMemory(size_t size); ++ ++/* ++ * @fn PVOID SOCPERF_Free_Memory(location) ++ * ++ * @param IN location - size of the memory to allocate ++ * ++ * @returns pointer to the allocated memory block ++ * ++ * @brief Frees the memory block ++ * ++ * Special Notes: ++ * Does not try to free memory if fed with a NULL pointer ++ * Expected usage: ++ * ptr = SOCPERF_Free_Memory(ptr); ++ */ ++extern PVOID SOCPERF_Free_Memory(PVOID location); ++ ++#endif +diff --git a/drivers/platform/x86/socperf/inc/ecb_iterators.h b/drivers/platform/x86/socperf/inc/ecb_iterators.h +new file mode 100644 +index 000000000000..564248909e99 +--- /dev/null ++++ b/drivers/platform/x86/socperf/inc/ecb_iterators.h +@@ -0,0 +1,130 @@ ++/* *********************************************************************************************** ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(C) 2005-2019 Intel Corporation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * BSD LICENSE ++ * ++ * Copyright(C) 2005-2019 Intel Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * *********************************************************************************************** ++ */ ++ ++ ++#ifndef _ECB_ITERATORS_H_ ++#define _ECB_ITERATORS_H_ ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++// ++// Loop macros to walk through the event control block ++// Use for access only in the kernel mode ++// To Do - Control access from kernel mode by a macro ++// ++ ++#define FOR_EACH_PCI_DATA_REG_RAW(pecb, i, device_idx) \ ++ { \ ++ U32(i) = 0; \ ++ U32(cur_grp) = LWPMU_DEVICE_cur_group(device_uncore); \ ++ ECB(pecb) = LWPMU_DEVICE_PMU_register_data( \ ++ device_uncore)[(cur_grp)]; \ ++ if ((pecb)) { \ ++ for ((i) = ECB_operations_register_start( \ ++ pecb, PMU_OPERATION_READ); \ ++ (i) < ECB_operations_register_start( \ ++ pecb, PMU_OPERATION_READ) + \ ++ ECB_operations_register_len( \ ++ pecb, PMU_OPERATION_READ); \ ++ (i)++) { \ ++ if (ECB_entries_reg_offset((pecb), (i)) == \ ++ 0) { \ ++ continue; \ ++ } ++ ++#define END_FOR_EACH_PCI_DATA_REG_RAW \ ++ } \ ++ } \ ++ } ++ ++#define FOR_EACH_PCI_REG_RAW(pecb, i, device_idx) \ ++ { \ ++ U32(i) = 0; \ ++ U32(cur_grp) = LWPMU_DEVICE_cur_group(device_uncore); \ ++ ECB(pecb) = LWPMU_DEVICE_PMU_register_data( \ ++ device_uncore)[(cur_grp)]; \ ++ if ((pecb)) { \ ++ for ((i) = 0; (i) < ECB_num_entries(pecb); (i)++) { \ ++ if (ECB_entries_reg_offset((pecb), (i)) == \ ++ 0) { \ ++ continue; \ ++ } ++ ++#define END_FOR_EACH_PCI_REG_RAW \ ++ } \ ++ } \ ++ } ++ ++#define FOR_EACH_REG_ENTRY_UNC(pecb, device_idx, idx) \ ++ { \ ++ U32(idx); \ ++ U32(cur_grp) = LWPMU_DEVICE_cur_group(device_uncore); \ ++ ECB(pecb) = LWPMU_DEVICE_PMU_register_data( \ ++ device_uncore)[(cur_grp)]; \ ++ if ((pecb)) { \ ++ for ((idx) = 0; (idx) < ECB_num_entries(pecb); \ ++ (idx)++) { \ ++ if (ECB_entries_bus_no((pecb), (idx)) == 0 && \ ++ ECB_entries_reg_id((pecb), (idx)) == 0) { \ ++ continue; \ ++ } ++ ++#define END_FOR_EACH_REG_ENTRY_UNC \ ++ } \ ++ } \ ++ } ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif +diff --git a/drivers/platform/x86/socperf/inc/haswellunc_sa.h b/drivers/platform/x86/socperf/inc/haswellunc_sa.h +new file mode 100644 +index 000000000000..a5ad8e477170 +--- /dev/null ++++ b/drivers/platform/x86/socperf/inc/haswellunc_sa.h +@@ -0,0 +1,79 @@ ++/* *********************************************************************************************** ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(C) 2011-2019 Intel Corporation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * BSD LICENSE ++ * ++ * Copyright(C) 2011-2019 Intel Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * *********************************************************************************************** ++ */ ++ ++ ++#ifndef _HSWUNC_SA_H_INC_ ++#define _HSWUNC_SA_H_INC_ ++ ++/* ++ * Local to this architecture: Haswell uncore SA unit ++ * ++ */ ++#define HSWUNC_SA_DESKTOP_DID 0x000C04 ++#define HSWUNC_SA_NEXT_ADDR_OFFSET 4 ++#define HSWUNC_SA_BAR_ADDR_SHIFT 32 ++#define HSWUNC_SA_BAR_ADDR_MASK 0x0007FFFFFF000LL ++#define HSWUNC_SA_MAX_PCI_DEVICES 16 ++#define HSWUNC_SA_MAX_COUNT 0x00000000FFFFFFFFLL ++#define HSWUNC_SA_MAX_COUNTERS 8 ++ ++#define HSWUNC_SA_MCHBAR_MMIO_PAGE_SIZE (8 * 4096) ++#define HSWUNC_SA_PCIEXBAR_MMIO_PAGE_SIZE (57 * 4096) ++#define HSWUNC_SA_OTHER_BAR_MMIO_PAGE_SIZE 4096 ++#define HSWUNC_SA_GDXCBAR_OFFSET_LO 0x5420 ++#define HSWUNC_SA_GDXCBAR_OFFSET_HI 0x5424 ++#define HSWUNC_SA_GDXCBAR_MASK 0x7FFFFFF000LL ++#define HSWUNC_SA_CHAP_SAMPLE_DATA 0x00020000 ++#define HSWUNC_SA_CHAP_STOP 0x00040000 ++#define HSWUNC_SA_CHAP_CTRL_REG_OFFSET 0x0 ++ ++extern DISPATCH_NODE socperf_hswunc_sa_dispatch; ++ ++#endif +diff --git a/drivers/platform/x86/socperf/inc/npk_uncore.h b/drivers/platform/x86/socperf/inc/npk_uncore.h +new file mode 100644 +index 000000000000..c70214136886 +--- /dev/null ++++ b/drivers/platform/x86/socperf/inc/npk_uncore.h +@@ -0,0 +1,76 @@ ++/* *********************************************************************************************** ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(C) 2013-2019 Intel Corporation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * BSD LICENSE ++ * ++ * Copyright(C) 2013-2019 Intel Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * *********************************************************************************************** ++ */ ++ ++ ++#ifndef _NPK_UNCORE_H_INC_ ++#define _NPK_UNCORE_H_INC_ ++ ++/* ++ * Local to this architecture: uncore SA unit ++ * ++ */ ++#define SOC_NPK_UNCORE_NEXT_ADDR_OFFSET 4 ++#define SOC_NPK_UNCORE_BAR_ADDR_SHIFT 32 ++#define SOC_NPK_UNCORE_BAR_ADDR_MASK 0x00FFFFF00000LL ++#define SOC_NPK_UNCORE_MAX_PCI_DEVICES 16 ++#define SOC_NPK_COUNTER_MAX_COUNTERS 16 ++#define SOC_NPK_COUNTER_MAX_COUNT 0x00000000FFFFFFFFLL ++#define SOC_NPK_UNCORE_MCHBAR_ADDR_MASK 0x7FFFFF8000LL ++ ++#define SOC_NPK_UNCORE_NPK_BAR_MMIO_PAGE_SIZE 0x100000 ++#define SOC_NPK_UNCORE_MCHBAR_MMIO_PAGE_SIZE (8 * 4096) ++#define SOC_NPK_UNCORE_SAMPLE_DATA 0x00120000 ++#define SOC_NPK_UNCORE_STOP 0x00040000 ++#define SOC_NPK_UNCORE_CHAP_START 0x00110000 ++#define SOC_NPK_UNCORE_CHAP_CTRL_REG_OFFSET 0x0 ++ ++extern DISPATCH_NODE npk_dispatch; ++ ++#endif +diff --git a/drivers/platform/x86/socperf/inc/pci.h b/drivers/platform/x86/socperf/inc/pci.h +new file mode 100644 +index 000000000000..3e67619815b5 +--- /dev/null ++++ b/drivers/platform/x86/socperf/inc/pci.h +@@ -0,0 +1,103 @@ ++/* *********************************************************************************************** ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(C) 2013-2019 Intel Corporation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * BSD LICENSE ++ * ++ * Copyright(C) 2013-2019 Intel Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * *********************************************************************************************** ++ */ ++ ++ ++#ifndef _PCI_H_ ++#define _PCI_H_ ++ ++#include "lwpmudrv_defines.h" ++ ++/* ++ * PCI Config Address macros ++ */ ++#define PCI_ENABLE 0x80000000 ++ ++#define PCI_ADDR_IO 0xCF8 ++#define PCI_DATA_IO 0xCFC ++ ++#define BIT0 0x1 ++#define BIT1 0x2 ++ ++/* ++ * Macro for forming a PCI configuration address ++ */ ++#define FORM_PCI_ADDR(bus, dev, fun, off) \ ++ (((PCI_ENABLE)) | ((bus & 0xFF) << 16) | ((dev & 0x1F) << 11) | \ ++ ((fun & 0x07) << 8) | ((off & 0xFF) << 0)) ++ ++#define VENDOR_ID_MASK 0x0000FFFF ++#define DEVICE_ID_MASK 0xFFFF0000 ++#define DEVICE_ID_BITSHIFT 16 ++#define LOWER_4_BYTES_MASK 0x00000000FFFFFFFF ++#define MAX_BUSNO 256 ++#define NEXT_ADDR_OFFSET 4 ++#define NEXT_ADDR_SHIFT 32 ++#define DRV_IS_PCI_VENDOR_ID_INTEL 0x8086 ++ ++#define CHECK_IF_GENUINE_INTEL_DEVICE(value, vendor_id, device_id) \ ++ { \ ++ vendor_id = value & VENDOR_ID_MASK; \ ++ device_id = (value & DEVICE_ID_MASK) >> DEVICE_ID_BITSHIFT; \ ++ if (vendor_id != DRV_IS_PCI_VENDOR_ID_INTEL) { \ ++ continue; \ ++ } \ ++ } ++ ++#if defined(DRV_IA32) || defined(DRV_EM64T) ++extern int SOCPERF_PCI_Read_From_Memory_Address(U32 addr, U32 *val); ++ ++extern int SOCPERF_PCI_Write_To_Memory_Address(U32 addr, U32 val); ++ ++extern int SOCPERF_PCI_Read_Ulong(U32 pci_address); ++ ++extern void SOCPERF_PCI_Write_Ulong(U32 pci_address, U32 value); ++#endif ++ ++#endif +diff --git a/drivers/platform/x86/socperf/inc/soc_uncore.h b/drivers/platform/x86/socperf/inc/soc_uncore.h +new file mode 100644 +index 000000000000..5cfe9695da79 +--- /dev/null ++++ b/drivers/platform/x86/socperf/inc/soc_uncore.h +@@ -0,0 +1,85 @@ ++/* *********************************************************************************************** ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(C) 2013-2019 Intel Corporation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * BSD LICENSE ++ * ++ * Copyright(C) 2013-2019 Intel Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * *********************************************************************************************** ++ */ ++ ++ ++#ifndef _SOC_UNCORE_H_INC_ ++#define _SOC_UNCORE_H_INC_ ++ ++/* ++ * Local to this architecture: SoC uncore unit ++ * ++ */ ++#define SOC_UNCORE_DESKTOP_DID 0x000C04 ++#define SOC_UNCORE_NEXT_ADDR_OFFSET 4 ++#define SOC_UNCORE_BAR_ADDR_SHIFT 32 ++#define SOC_UNCORE_BAR_ADDR_MASK 0x000FFFC00000LL ++#define SOC_UNCORE_MAX_PCI_DEVICES 16 ++#define SOC_UNCORE_MCR_REG_OFFSET 0xD0 ++#define SOC_UNCORE_MDR_REG_OFFSET 0xD4 ++#define SOC_UNCORE_MCRX_REG_OFFSET 0xD8 ++#define SOC_UNCORE_BYTE_ENABLES 0xF ++#define SOC_UNCORE_OP_CODE_SHIFT 24 ++#define SOC_UNCORE_PORT_ID_SHIFT 16 ++#define SOC_UNCORE_OFFSET_HI_MASK 0xFFFFFF00 ++#define SOC_UNCORE_OFFSET_LO_MASK 0xFF ++#define SOC_COUNTER_PORT_ID 23 ++#define SOC_COUNTER_WRITE_OP_CODE 1 ++#define SOC_COUNTER_READ_OP_CODE 0 ++#define UNCORE_MAX_COUNTERS 8 ++#define UNCORE_MAX_COUNT 0x00000000FFFFFFFFLL ++ ++#define SOC_UNCORE_OTHER_BAR_MMIO_PAGE_SIZE 4096 ++#define SOC_UNCORE_SAMPLE_DATA 0x00020000 ++#define SOC_UNCORE_STOP 0x00040000 ++#define SOC_UNCORE_CTRL_REG_OFFSET 0x0 ++ ++extern DISPATCH_NODE soc_uncore_dispatch; ++ ++#endif +diff --git a/drivers/platform/x86/socperf/inc/socperfdrv.h b/drivers/platform/x86/socperf/inc/socperfdrv.h +new file mode 100644 +index 000000000000..f90f344edb66 +--- /dev/null ++++ b/drivers/platform/x86/socperf/inc/socperfdrv.h +@@ -0,0 +1,191 @@ ++/* *********************************************************************************************** ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(C) 2005-2019 Intel Corporation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * BSD LICENSE ++ * ++ * Copyright(C) 2005-2019 Intel Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * *********************************************************************************************** ++ */ ++#ifndef _SOCPERFDRV_H_ ++#define _SOCPERFDRV_H_ ++ ++#include ++#include ++#include ++#include ++#include "lwpmudrv_defines.h" ++#include "lwpmudrv_ecb.h" ++#include "lwpmudrv_struct.h" ++#include "lwpmudrv_types.h" ++#include "lwpmudrv_version.h" ++ ++/* ++ * Print macros for driver messages ++ */ ++ ++#if defined(MYDEBUG) ++#define SOCPERF_PRINT_DEBUG(fmt, args...) \ ++ { \ ++ printk(KERN_INFO SOCPERF_MSG_PREFIX " [DEBUG] " fmt, ##args); \ ++ } ++#else ++#define SOCPERF_PRINT_DEBUG(fmt, args...) \ ++ { \ ++ ; \ ++ } ++#endif ++ ++#define SOCPERF_PRINT(fmt, args...) \ ++ { \ ++ printk(KERN_INFO SOCPERF_MSG_PREFIX " " fmt, ##args); \ ++ } ++ ++#define SOCPERF_PRINT_WARNING(fmt, args...) \ ++ { \ ++ printk(KERN_ALERT SOCPERF_MSG_PREFIX " [Warning] " fmt, \ ++ ##args); \ ++ } ++ ++#define SOCPERF_PRINT_ERROR(fmt, args...) \ ++ { \ ++ printk(KERN_CRIT SOCPERF_MSG_PREFIX " [ERROR] " fmt, ##args); \ ++ } ++ ++// Macro to return the thread group id ++#define GET_CURRENT_TGID() (current->tgid) ++ ++#if defined(DRV_IA32) || defined(DRV_EM64T) ++#define OVERFLOW_ARGS U64 *, U64 * ++#elif defined(DRV_IA64) ++#define OVERFLOW_ARGS U64 *, U64 *, U64 *, U64 *, U64 *, U64 * ++#endif ++ ++/* ++ * Dispatch table for virtualized functions. ++ * Used to enable common functionality for different ++ * processor microarchitectures ++ */ ++typedef struct DISPATCH_NODE_S DISPATCH_NODE; ++typedef DISPATCH_NODE *DISPATCH; ++ ++struct DISPATCH_NODE_S { ++ VOID (*init)(PVOID); ++ VOID (*fini)(PVOID); ++ VOID (*write)(PVOID); ++ VOID (*freeze)(PVOID); ++ VOID (*restart)(PVOID); ++ VOID (*read_data)(PVOID); ++ VOID (*check_overflow)(VOID); ++ VOID (*swap_group)(DRV_BOOL); ++ VOID (*read_lbrs)(PVOID); ++ VOID (*clean_up)(PVOID); ++ VOID (*hw_errata)(VOID); ++ VOID (*read_power)(PVOID); ++ U64 (*check_overflow_errata)(ECB, U32, U64); ++ VOID (*read_counts)(PVOID, U32); ++ U64 (*check_overflow_gp_errata)(ECB, U64 *); ++ VOID (*read_ro)(PVOID, U32, U32); ++ U64 (*platform_info)(VOID); ++ VOID (*trigger_read)(VOID); ++ // Counter reads triggered/initiated by User mode timer ++ VOID (*read_current_data)(PVOID); ++ VOID (*create_mem)(U32, U64 *); ++ VOID (*check_status)(U64 *, U32 *); ++ VOID (*read_mem)(U64, U64 *, U32); ++ VOID (*stop_mem)(VOID); ++}; ++ ++extern DISPATCH dispatch; ++ ++extern VOID **PMU_register_data; ++extern VOID **desc_data; ++extern U64 *prev_counter_data; ++extern U64 *cur_counter_data; ++ ++/*! ++ * @struct LWPMU_DEVICE_NODE_S ++ * @brief Struct to hold fields per device ++ * PMU_register_data_unc - MSR info ++ * dispatch_unc - dispatch table ++ * em_groups_counts_unc - # groups ++ * pcfg_unc - config struct ++ */ ++typedef struct LWPMU_DEVICE_NODE_S LWPMU_DEVICE_NODE; ++typedef LWPMU_DEVICE_NODE * LWPMU_DEVICE; ++ ++struct LWPMU_DEVICE_NODE_S { ++ VOID **PMU_register_data_unc; ++ DISPATCH dispatch_unc; ++ S32 em_groups_count_unc; ++ VOID *pcfg_unc; ++ U64 **acc_per_thread; ++ U64 **prev_val_per_thread; ++ U64 counter_mask; ++ U64 num_events; ++ U32 num_units; ++ VOID *ec; ++ S32 cur_group; ++}; ++ ++#define LWPMU_DEVICE_PMU_register_data(dev) ((dev)->PMU_register_data_unc) ++#define LWPMU_DEVICE_dispatch(dev) ((dev)->dispatch_unc) ++#define LWPMU_DEVICE_em_groups_count(dev) ((dev)->em_groups_count_unc) ++#define LWPMU_DEVICE_pcfg(dev) ((dev)->pcfg_unc) ++#define LWPMU_DEVICE_acc_per_thread(dev) ((dev)->acc_per_thread) ++#define LWPMU_DEVICE_prev_val_per_thread(dev) ((dev)->prev_val_per_thread) ++#define LWPMU_DEVICE_counter_mask(dev) ((dev)->counter_mask) ++#define LWPMU_DEVICE_num_events(dev) ((dev)->num_events) ++#define LWPMU_DEVICE_num_units(dev) ((dev)->num_units) ++#define LWPMU_DEVICE_ec(dev) ((dev)->ec) ++#define LWPMU_DEVICE_cur_group(dev) ((dev)->cur_group) ++ ++extern U32 num_devices; ++extern U32 cur_devices; ++extern LWPMU_DEVICE device_uncore; ++extern U64 *pmu_state; ++ ++// Handy macro ++#define TSC_SKEW(this_cpu) (tsc_info[this_cpu] - tsc_info[0]) ++ ++#endif +diff --git a/drivers/platform/x86/socperf/inc/utility.h b/drivers/platform/x86/socperf/inc/utility.h +new file mode 100644 +index 000000000000..6b3bc07fc0ed +--- /dev/null ++++ b/drivers/platform/x86/socperf/inc/utility.h +@@ -0,0 +1,61 @@ ++/* *********************************************************************************************** ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(C) 2005-2019 Intel Corporation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * BSD LICENSE ++ * ++ * Copyright(C) 2005-2019 Intel Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * *********************************************************************************************** ++ */ ++#ifndef _UTILITY_H_ ++#define _UTILITY_H_ ++ ++extern void SOCPERF_UTILITY_Read_TSC(U64 *pTsc); ++ ++extern void SOCPERF_UTILITY_Read_Cpuid(U64 cpuid_function, U64 *rax_value, ++ U64 *rbx_value, U64 *rcx_value, ++ U64 *rdx_value); ++ ++extern DISPATCH SOCPERF_UTILITY_Configure_CPU(U32); ++ ++#endif +diff --git a/drivers/platform/x86/socperf/include/error_reporting_utils.h b/drivers/platform/x86/socperf/include/error_reporting_utils.h +new file mode 100644 +index 000000000000..4490303f7cf8 +--- /dev/null ++++ b/drivers/platform/x86/socperf/include/error_reporting_utils.h +@@ -0,0 +1,168 @@ ++/*** ++ * ------------------------------------------------------------------------- ++ * INTEL CORPORATION PROPRIETARY INFORMATION ++ * This software is supplied under the terms of the accompanying license ++ * agreement or nondisclosure agreement with Intel Corporation and may not ++ * be copied or disclosed except in accordance with the terms of that ++ * agreement. ++ * Copyright(C) 2002-2019 Intel Corporation. All Rights Reserved. ++ * ------------------------------------------------------------------------- ++***/ ++ ++#ifndef __ERROR_REPORTING_UTILS_H__ ++#define __ERROR_REPORTING_UTILS_H__ ++ ++#define DRV_ASSERT_N_RET_VAL(ret_val) \ ++ do { \ ++ DRV_ASSERT((ret_val) == VT_SUCCESS); \ ++ DRV_CHECK_N_RETURN_N_FAIL(ret_val); \ ++ } while (0) ++ ++ ++#define DRV_ASSERT_N_CONTINUE(ret_val) \ ++ do { \ ++ if ((ret_val) != VT_SUCCESS) { \ ++ LOG_ERR1(VTSA_T("Operation failed with error code "), \ ++ (ret_val)); \ ++ } \ ++ } while (0) ++ ++#define DRV_CHECK_N_RETURN_N_FAIL(ret_val) \ ++ do { \ ++ if ((ret_val) != VT_SUCCESS) { \ ++ LOG_ERR1(VTSA_T("Operation failed with error code "), \ ++ (ret_val)); \ ++ return ret_val; \ ++ } \ ++ } while (0) ++ ++#define DRV_CHECK_N_RETURN_NO_RETVAL(ret_val) \ ++ do { \ ++ if ((ret_val) != VT_SUCCESS) { \ ++ LOG_ERR1(VTSA_T("Operation failed with error code "), \ ++ (ret_val)); \ ++ return; \ ++ } \ ++ } while (0) ++ ++ ++#define DRV_CHECK_PTR_N_RET_VAL(ptr) \ ++ do { \ ++ if ((ptr) == NULL) { \ ++ LOG_ERR0(VTSA_T("Encountered null pointer")); \ ++ return VT_SAM_ERROR; \ ++ } \ ++ } while (0) ++ ++#define DRV_CHECK_PTR_N_RET_NULL(ptr) \ ++ do { \ ++ if ((ptr) == NULL) { \ ++ LOG_ERR0(VTSA_T("Encountered null pointer")); \ ++ return NULL; \ ++ } \ ++ } while (0) ++ ++#define DRV_CHECK_PTR_N_LOG_NO_RETURN(ptr) \ ++ do { \ ++ if ((ptr) == NULL) { \ ++ LOG_ERR0(VTSA_T("Encountered null pointer")); \ ++ } \ ++ } while (0) ++ ++#define DRV_CHECK_N_LOG_NO_RETURN(ret_val) \ ++ do { \ ++ if ((ret_val) != VT_SUCCESS) { \ ++ LOG_ERR1(VTSA_T("Operation failed with error code "), \ ++ (ret_val)); \ ++ } \ ++ } while (0) ++ ++#define DRV_CHECK_N_RET_NEG_ONE(ret_val) \ ++ do { \ ++ if ((ret_val) == -1) { \ ++ LOG_ERR0(VTSA_T( \ ++ "Operation failed with error code = -1")); \ ++ return VT_SAM_ERROR; \ ++ } \ ++ } while (0) ++ ++#define DRV_REQUIRES_TRUE_COND_RET_N_FAIL(cond) \ ++ do { \ ++ if (!(cond)) { \ ++ LOG_ERR0(VTSA_T("Condition check failed")); \ ++ return VT_SAM_ERROR; \ ++ } \ ++ } while (0) ++ ++#define DRV_REQUIRES_TRUE_COND_RET_ASSIGNED_VAL(cond, ret_val) \ ++ do { \ ++ if (!(cond)) { \ ++ LOG_ERR0(VTSA_T("Condition check failed")); \ ++ return ret_val; \ ++ } \ ++ } while (0) ++ ++#define DRV_CHECK_N_ERR_LOG_ERR_STRNG_N_RET(rise_err) \ ++ do { \ ++ if (rise_err != VT_SUCCESS) { \ ++ PVOID rise_ptr = NULL; \ ++ const VTSA_CHAR *error_str = NULL; \ ++ RISE_open(&rise_ptr); \ ++ RISE_translate_err_code(rise_ptr, rise_err, &error_str); \ ++ LogItW(LOG_LEVEL_ERROR | LOG_AREA_GENERAL, \ ++ L"Operation failed with error [ %d ] = %s\n", rise_err, \ ++ error_str); \ ++ RISE_close(rise_ptr); \ ++ return rise_err; \ ++ } \ ++ } while (0) ++ ++#define DRV_CHECK_PTR_N_CLEANUP(ptr, gotolabel, ret_val) \ ++ do { \ ++ if ((ptr) == NULL) { \ ++ LOG_ERR0(VTSA_T("Encountered null pointer")); \ ++ ret_val = VT_SAM_ERROR; \ ++ goto gotolabel; \ ++ } \ ++ } while (0) ++ ++#define DRV_CHECK_ON_FAIL_CLEANUP_N_RETURN(ret_val, gotolabel) \ ++ do { \ ++ if ((ret_val) != VT_SUCCESS) { \ ++ DRV_CHECK_N_LOG_NO_RETURN(ret_val); \ ++ goto gotolabel; \ ++ } \ ++ } while (0) ++ ++#define DRV_CHECK_N_CLEANUP_N_RETURN_RET_NEG_ONE(ret_val, gotolabel) \ ++ do { \ ++ if ((ret_val) == -1) { \ ++ DRV_CHECK_N_LOG_NO_RETURN(ret_val); \ ++ goto gotolabel; \ ++ } \ ++ } while (0) ++ ++#define DRV_CHECK_PTR_ON_NULL_CLEANUP_N_RETURN(ptr, gotolabel) \ ++ do { \ ++ if ((ptr) == NULL) { \ ++ DRV_CHECK_PTR_N_LOG_NO_RETURN(ptr); \ ++ goto gotolabel; \ ++ } \ ++ } while (0) ++ ++#define FREE_N_SET_NULL(ptr) \ ++ do { \ ++ if (ptr != NULL) { \ ++ free(ptr); \ ++ ptr = NULL; \ ++ } \ ++ } while (0) ++ ++#define DELETE_N_SET_NULL(ptr) \ ++ do { \ ++ delete ptr; \ ++ ptr = NULL; \ ++ } while (0) ++ ++ ++#endif +diff --git a/drivers/platform/x86/socperf/include/lwpmudrv_chipset.h b/drivers/platform/x86/socperf/include/lwpmudrv_chipset.h +new file mode 100644 +index 000000000000..90cef28f08c2 +--- /dev/null ++++ b/drivers/platform/x86/socperf/include/lwpmudrv_chipset.h +@@ -0,0 +1,285 @@ ++/*** ++ * ------------------------------------------------------------------------- ++ * INTEL CORPORATION PROPRIETARY INFORMATION ++ * This software is supplied under the terms of the accompanying license ++ * agreement or nondisclosure agreement with Intel Corporation and may not ++ * be copied or disclosed except in accordance with the terms of that ++ * agreement. ++ * Copyright(C) 2007-2019 Intel Corporation. All Rights Reserved. ++ * ------------------------------------------------------------------------- ++***/ ++ ++#ifndef _LWPMUDRV_CHIPSET_UTILS_H_ ++#define _LWPMUDRV_CHIPSET_UTILS_H_ ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++#define MAX_CHIPSET_EVENT_NAME 64 ++#define MAX_CHIPSET_COUNTERS 5 ++ // TODO: this covers 1 fixed counter \ ++ // plus 4 general counters on GMCH; \ ++ // for other chipset devices, this \ ++ // can vary from 8 to 32; might consider \ ++ // making this per-chipset-type since \ ++ // event-multiplexing is currently not \ ++ // supported for chipset collections ++ ++#if defined(_NTDDK_) ++#define CHIPSET_PHYS_ADDRESS PHYSICAL_ADDRESS ++#else ++#define CHIPSET_PHYS_ADDRESS U64 ++#endif ++ ++// possible values for whether chipset data is valid or not ++enum { DATA_IS_VALID, DATA_IS_INVALID, DATA_OUT_OF_RANGE }; ++ ++typedef struct CHIPSET_PCI_ARG_NODE_S CHIPSET_PCI_ARG_NODE; ++typedef CHIPSET_PCI_ARG_NODE * CHIPSET_PCI_ARG; ++ ++struct CHIPSET_PCI_ARG_NODE_S { ++ U32 address; ++ U32 value; ++}; ++ ++#define CHIPSET_PCI_ARG_address(chipset_pci) ((chipset_pci)->address) ++#define CHIPSET_PCI_ARG_value(chipset_pci) ((chipset_pci)->value) ++ ++typedef struct CHIPSET_PCI_SEARCH_ADDR_NODE_S CHIPSET_PCI_SEARCH_ADDR_NODE; ++typedef CHIPSET_PCI_SEARCH_ADDR_NODE * CHIPSET_PCI_SEARCH_ADDR; ++ ++struct CHIPSET_PCI_SEARCH_ADDR_NODE_S { ++ U32 start; ++ U32 stop; ++ U32 increment; ++ U32 addr; ++}; ++ ++#define CHIPSET_PCI_SEARCH_ADDR_start(pci_search_addr) ((pci_search_addr)->start) ++#define CHIPSET_PCI_SEARCH_ADDR_stop(pci_search_addr) ((pci_search_addr)->stop) ++#define CHIPSET_PCI_SEARCH_ADDR_increment(pci_search_addr) \ ++ ((pci_search_addr)->increment) ++#define CHIPSET_PCI_SEARCH_ADDR_address(pci_search_addr) ((pci_search_addr)->addr) ++ ++typedef struct CHIPSET_PCI_CONFIG_NODE_S CHIPSET_PCI_CONFIG_NODE; ++typedef CHIPSET_PCI_CONFIG_NODE * CHIPSET_PCI_CONFIG; ++ ++struct CHIPSET_PCI_CONFIG_NODE_S { ++ U32 bus; ++ U32 device; ++ U32 function; ++ U32 offset; ++ U32 value; ++}; ++ ++#define CHIPSET_PCI_CONFIG_bus(pci_config) ((pci_config)->bus) ++#define CHIPSET_PCI_CONFIG_device(pci_config) ((pci_config)->device) ++#define CHIPSET_PCI_CONFIG_function(pci_config) ((pci_config)->function) ++#define CHIPSET_PCI_CONFIG_offset(pci_config) ((pci_config)->offset) ++#define CHIPSET_PCI_CONFIG_value(pci_config) ((pci_config)->value) ++ ++typedef struct CHIPSET_MARKER_NODE_S CHIPSET_MARKER_NODE; ++typedef CHIPSET_MARKER_NODE * CHIPSET_MARKER; ++ ++struct CHIPSET_MARKER_NODE_S { ++ U32 processor_number; ++ U32 rsvd; ++ U64 tsc; ++}; ++ ++#define CHIPSET_MARKER_processor_number(chipset_marker) \ ++ ((pci_config)->processor_number) ++#define CHIPSET_MARKER_tsc(chipset_marker) ((pci_config)->tsc) ++ ++typedef struct CHAP_INTERFACE_NODE_S CHAP_INTERFACE_NODE; ++typedef CHAP_INTERFACE_NODE * CHAP_INTERFACE; ++ ++// CHAP chipset registers ++// The offsets for registers are command-0x00, event-0x04, status-0x08, data-0x0C ++struct CHAP_INTERFACE_NODE_S { ++ U32 command_register; ++ U32 event_register; ++ U32 status_register; ++ U32 data_register; ++}; ++ ++#define CHAP_INTERFACE_command_register(chap) ((chap)->command_register) ++#define CHAP_INTERFACE_event_register(chap) ((chap)->event_register) ++#define CHAP_INTERFACE_status_register(chap) ((chap)->status_register) ++#define CHAP_INTERFACE_data_register(chap) ((chap)->data_register) ++ ++/************************************************************************** ++ * GMCH Registers and Offsets ++ ************************************************************************** ++ */ ++ ++// Counter registers - each counter has 4 registers ++#define GMCH_MSG_CTRL_REG 0xD0 // message control register (MCR) 0xD0-0xD3 ++#define GMCH_MSG_DATA_REG 0xD4 // message data register (MDR) 0xD4-0xD7 ++ ++// Counter register offsets ++#define GMCH_PMON_CAPABILITIES \ ++ 0x0005F0F0 // when read, bit 0 enabled means GMCH counters are available ++#define GMCH_PMON_GLOBAL_CTRL \ ++ 0x0005F1F0 // simultaneously enables or disables fixed and general counters ++ ++// Fixed counters (32-bit) ++#define GMCH_PMON_FIXED_CTR_CTRL \ ++ 0x0005F4F0 // enables and filters the fixed counters ++#define GMCH_PMON_FIXED_CTR0 \ ++ 0x0005E8F0 // 32-bit fixed counter for GMCH_CORE_CLKS event ++#define GMCH_PMON_FIXED_CTR_OVF_VAL \ ++ 0xFFFFFFFFLL // overflow value for GMCH fixed counters ++ ++// General counters (38-bit) ++// NOTE: lower order bits on GP counters must be read before the higher bits! ++#define GMCH_PMON_GP_CTR0_L 0x0005F8F0 // GMCH GP counter 0, low bits ++#define GMCH_PMON_GP_CTR0_H 0x0005FCF0 // GMCH GP counter 0, high bits ++#define GMCH_PMON_GP_CTR1_L 0x0005F9F0 ++#define GMCH_PMON_GP_CTR1_H 0x0005FDF0 ++#define GMCH_PMON_GP_CTR2_L 0x0005FAF0 ++#define GMCH_PMON_GP_CTR2_H 0x0005FEF0 ++#define GMCH_PMON_GP_CTR3_L 0x0005FBF0 ++#define GMCH_PMON_GP_CTR3_H 0x0005FFF0 ++#define GMCH_PMON_GP_CTR_OVF_VAL \ ++ 0x3FFFFFFFFFLL // overflow value for GMCH general counters ++ ++/* other counter register offsets ... ++#define GMCH_PMON_GLOBAL_STATUS 0x0005F2F0 // bit 16 indicates overflow on fixed counter 0; bits 0-3 indicate overflows on GP counters 0-3 ++#define GMCH_PMON_GLOBAL_OVF_CTRL 0x0005F3F0 // on CDV, it is write-only psuedo-register that always returns 0 when read ++#define GMCH_PMON_PERFEVTSEL0 0x0005E0F0 // this is used for selecting which event in GP counter 0 to count ++#define GMCH_PMON_PERFEVTSEL1 0x0005E1F0 // this is used for selecting which event in GP counter 1 to count ++#define GMCH_PMON_PERFEVTSEL2 0x0005E2F0 // this is used for selecting which event in GP counter 2 to count ++#define GMCH_PMON_PERFEVTSEL3 0x0005E3F0 // this is used for selecting which event in GP counter 3 to count ++#define GMCH_PERF_ADDR_LIMIT_H 0x0001E8F0 // used for qualifying upper address limit for DRAM_PAGE_STATUS event ++#define GMCH_PERF_ADDR_LIMIT_L 0x0001E9F0 // used for qualifying lower address limit for DRAM_PAGE_STATUS event ++#define GMCH_PERF_BANK_SEL 0x0001EAF0 // used for addtional qualification of DRAM_PAGE_STATUS event ++*/ ++ ++// Register offsets for LNC ++#define LNC_GMCH_REGISTER_READ 0xD0000000 ++#define LNC_GMCH_REGISTER_WRITE 0xE0000000 ++ ++// Register offsets for SLT ++#define SLT_GMCH_REGISTER_READ 0x10000000 ++#define SLT_GMCH_REGISTER_WRITE 0x11000000 ++ ++// Register offsets for CDV ++#define CDV_GMCH_REGISTER_READ 0x10000000 ++#define CDV_GMCH_REGISTER_WRITE 0x11000000 ++ ++// possible values for whether chipset data is valid or not ++/*enum { ++ DATA_IS_VALID, ++ DATA_IS_INVALID, ++ DATA_OUT_OF_RANGE ++}; ++*/ ++typedef struct CHIPSET_EVENT_NODE_S CHIPSET_EVENT_NODE; ++typedef CHIPSET_EVENT_NODE * CHIPSET_EVENT; ++ ++//chipset event ++struct CHIPSET_EVENT_NODE_S { ++ U32 event_id; ++ U32 group_id; ++ char name[MAX_CHIPSET_EVENT_NAME]; ++ U32 pm; ++ U32 counter; ++}; ++ ++#define CHIPSET_EVENT_event_id(chipset_event) ((chipset_event)->event_id) ++#define CHIPSET_EVENT_group_id(chipset_event) ((chipset_event)->group_id) ++#define CHIPSET_EVENT_name(chipset_event) ((chipset_event)->name) ++#define CHIPSET_EVENT_pm(chipset_event) ((chipset_event)->pm) ++#define CHIPSET_EVENT_counter(chipset_event) ((chipset_event)->counter) ++ ++typedef struct CHIPSET_SEGMENT_NODE_S CHIPSET_SEGMENT_NODE; ++typedef CHIPSET_SEGMENT_NODE * CHIPSET_SEGMENT; ++ ++//chipset segment data ++struct CHIPSET_SEGMENT_NODE_S { ++ CHIPSET_PHYS_ADDRESS physical_address; ++ U64 virtual_address; ++ U16 size; ++ U16 number_of_counters; ++ U16 total_events; ++ U16 start_register; // (see driver for details) ++ U32 read_register; // read register offset (model dependent) ++ U32 write_register; // write register offset (model dependent) ++ CHIPSET_EVENT_NODE events[MAX_CHIPSET_COUNTERS]; ++}; ++ ++#define CHIPSET_SEGMENT_physical_address(chipset_segment) \ ++ ((chipset_segment)->physical_address) ++#define CHIPSET_SEGMENT_virtual_address(chipset_segment) \ ++ ((chipset_segment)->virtual_address) ++#define CHIPSET_SEGMENT_size(chipset_segment) ((chipset_segment)->size) ++#define CHIPSET_SEGMENT_num_counters(chipset_segment) \ ++ ((chipset_segment)->number_of_counters) ++#define CHIPSET_SEGMENT_total_events(chipset_segment) \ ++ ((chipset_segment)->total_events) ++#define CHIPSET_SEGMENT_start_register(chipset_segment) \ ++ ((chipset_segment)->start_register) ++#define CHIPSET_SEGMENT_read_register(chipset_segment) \ ++ ((chipset_segment)->read_register) ++#define CHIPSET_SEGMENT_write_register(chipset_segment) \ ++ ((chipset_segment)->write_register) ++#define CHIPSET_SEGMENT_events(chipset_segment) ((chipset_segment)->events) ++ ++typedef struct CHIPSET_CONFIG_NODE_S CHIPSET_CONFIG_NODE; ++typedef CHIPSET_CONFIG_NODE * CHIPSET_CONFIG; ++ ++//chipset struct used for communication between user mode and kernel ++struct CHIPSET_CONFIG_NODE_S { ++ U32 length; // length of this entire area ++ U32 major_version; ++ U32 minor_version; ++ U32 rsvd; ++ U64 cpu_counter_mask; ++ struct { ++ U64 processor : 1; // Processor PMU ++ U64 mch_chipset : 1; // MCH Chipset ++ U64 ich_chipset : 1; // ICH Chipset ++ U64 motherboard_time_flag : 1; // Motherboard_Time requested. ++ U64 host_processor_run : 1; // Each processor should manage the MCH counts they see. ++ // Turn off for Gen 4 (NOA) runs. ++ U64 mmio_noa_registers : 1; // NOA ++ U64 bnb_chipset : 1; // BNB Chipset ++ U64 gmch_chipset : 1; // GMCH Chipset ++ U64 rsvd : 56; ++ } config_flags; ++ CHIPSET_SEGMENT_NODE mch; ++ CHIPSET_SEGMENT_NODE ich; ++ CHIPSET_SEGMENT_NODE mmio; ++ CHIPSET_SEGMENT_NODE bnb; ++ CHIPSET_SEGMENT_NODE gmch; ++}; ++ ++#define CHIPSET_CONFIG_length(chipset) ((chipset)->length) ++#define CHIPSET_CONFIG_major_version(chipset) ((chipset)->major_version) ++#define CHIPSET_CONFIG_minor_version(chipset) ((chipset)->minor_version) ++#define CHIPSET_CONFIG_cpu_counter_mask(chipset) ((chipset)->cpu_counter_mask) ++#define CHIPSET_CONFIG_processor(chipset) ((chipset)->config_flags.processor) ++#define CHIPSET_CONFIG_mch_chipset(chipset) ((chipset)->config_flags.mch_chipset) ++#define CHIPSET_CONFIG_ich_chipset(chipset) ((chipset)->config_flags.ich_chipset) ++#define CHIPSET_CONFIG_motherboard_time(chipset) \ ++ ((chipset)->config_flags.motherboard_time_flag) ++#define CHIPSET_CONFIG_host_proc_run(chipset) \ ++ ((chipset)->config_flags.host_processor_run) ++#define CHIPSET_CONFIG_noa_chipset(chipset) \ ++ ((chipset)->config_flags.mmio_noa_registers) ++#define CHIPSET_CONFIG_bnb_chipset(chipset) ((chipset)->config_flags.bnb_chipset) ++#define CHIPSET_CONFIG_gmch_chipset(chipset) \ ++ ((chipset)->config_flags.gmch_chipset) ++#define CHIPSET_CONFIG_mch(chipset) ((chipset)->mch) ++#define CHIPSET_CONFIG_ich(chipset) ((chipset)->ich) ++#define CHIPSET_CONFIG_noa(chipset) ((chipset)->mmio) ++#define CHIPSET_CONFIG_bnb(chipset) ((chipset)->bnb) ++#define CHIPSET_CONFIG_gmch(chipset) ((chipset)->gmch) ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif +diff --git a/drivers/platform/x86/socperf/include/lwpmudrv_defines.h b/drivers/platform/x86/socperf/include/lwpmudrv_defines.h +new file mode 100644 +index 000000000000..322c33b125c4 +--- /dev/null ++++ b/drivers/platform/x86/socperf/include/lwpmudrv_defines.h +@@ -0,0 +1,502 @@ ++/*** ++ * ------------------------------------------------------------------------- ++ * INTEL CORPORATION PROPRIETARY INFORMATION ++ * This software is supplied under the terms of the accompanying license ++ * agreement or nondisclosure agreement with Intel Corporation and may not ++ * be copied or disclosed except in accordance with the terms of that ++ * agreement. ++ * Copyright(C) 2007-2019 Intel Corporation. All Rights Reserved. ++ * ------------------------------------------------------------------------- ++***/ ++ ++#ifndef _LWPMUDRV_DEFINES_H_ ++#define _LWPMUDRV_DEFINES_H_ ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++// ++// Start off with none of the OS'es are defined ++// ++#undef DRV_OS_WINDOWS ++#undef DRV_OS_LINUX ++#undef DRV_OS_SOLARIS ++#undef DRV_OS_MAC ++#undef DRV_OS_ANDROID ++#undef DRV_OS_UNIX ++ ++// ++// Make sure none of the architectures is defined here ++// ++#undef DRV_IA32 ++#undef DRV_EM64T ++ ++// ++// Make sure one (and only one) of the OS'es gets defined here ++// ++// Unfortunately entirex defines _WIN32 so we need to check for linux ++// first. The definition of these flags is one and only one ++// _OS_xxx is allowed to be defined. ++// ++#if defined(__ANDROID__) ++#define DRV_OS_ANDROID ++#define DRV_OS_UNIX ++#elif defined(__linux__) ++#define DRV_OS_LINUX ++#define DRV_OS_UNIX ++#elif defined(sun) ++#define DRV_OS_SOLARIS ++#define DRV_OS_UNIX ++#elif defined(_WIN32) ++#define DRV_OS_WINDOWS ++#elif defined(__APPLE__) ++#define DRV_OS_MAC ++#define DRV_OS_UNIX ++#elif defined(__FreeBSD__) ++#define DRV_OS_FREEBSD ++#define DRV_OS_UNIX ++#else ++#error "Compiling for an unknown OS" ++#endif ++ ++// ++// Make sure one (and only one) architecture is defined here ++// as well as one (and only one) pointer__ size ++// ++#if defined(_M_IX86) || defined(__i386__) ++#define DRV_IA32 ++#elif defined(_M_AMD64) || defined(__x86_64__) ++#define DRV_EM64T ++#else ++#error "Unknown architecture for compilation" ++#endif ++ ++// ++// Add a well defined definition of compiling for release (free) vs. ++// debug (checked). Once again, don't assume these are the only two values, ++// always have an else clause in case we want to expand this. ++// ++#if defined(DRV_OS_UNIX) ++#define WINAPI ++#endif ++ ++/* ++ * Add OS neutral defines for file processing. This is needed in both ++ * the user code and the kernel code for cleanliness ++ */ ++#undef DRV_FILE_DESC ++#undef DRV_INVALID_FILE_DESC_VALUE ++#define DRV_ASSERT assert ++ ++#if defined(DRV_OS_WINDOWS) ++ ++#define DRV_FILE_DESC HANDLE ++#define DRV_INVALID_FILE_DESC_VALUE INVALID_HANDLE_VALUE ++ ++#elif defined(DRV_OS_LINUX) || defined(DRV_OS_SOLARIS) || \ ++ defined(DRV_OS_ANDROID) ++ ++#define DRV_IOCTL_FILE_DESC SIOP ++#define DRV_FILE_DESC SIOP ++#define DRV_INVALID_FILE_DESC_VALUE -1 ++ ++#elif defined(DRV_OS_FREEBSD) ++ ++#define DRV_IOCTL_FILE_DESC S64 ++#define DRV_FILE_DESC S64 ++#define DRV_INVALID_FILE_DESC_VALUE -1 ++ ++#elif defined(DRV_OS_MAC) ++#if defined __LP64__ ++#define DRV_IOCTL_FILE_DESC S64 ++#define DRV_FILE_DESC S64 ++#define DRV_INVALID_FILE_DESC_VALUE (S64)(-1) ++#else ++#define DRV_IOCTL_FILE_DESC S32 ++#define DRV_FILE_DESC S32 ++#define DRV_INVALID_FILE_DESC_VALUE (S32)(-1) ++#endif ++ ++#else ++ ++#error "Compiling for an unknown OS" ++ ++#endif ++ ++#define OUT ++#define IN ++#define INOUT ++ ++// ++// VERIFY_SIZEOF let's you insert a compile-time check that the size of a data ++// type (e.g. a struct) is what you think it should be. Usually it is ++// important to know what the actual size of your struct is, and to make sure ++// it is the same across all platforms. So this will prevent the code from ++// compiling if something happens that you didn't expect, whether it's because ++// you counted wring, or more often because the compiler inserted padding that ++// you don't want. ++// ++// NOTE: 'elem' and 'size' must both be identifier safe, e.g. matching the ++// regular expression /^[0-9a-zA-Z_]$/. ++// ++// Example: ++// typedef struct { void *ptr; int data; } mytype; ++// VERIFY_SIZEOF(mytype, 8); ++// ^-- this is correct on 32-bit platforms, but fails ++// on 64-bit platforms, indicating a possible ++// portability issue. ++// ++#define VERIFY_SIZEOF(type, size) \ ++ { enum { sizeof_##type##_eq_##size = 1 / (int)(sizeof(type) == size) } } ++ ++#if defined(DRV_OS_WINDOWS) ++#define DRV_DLLIMPORT __declspec(dllimport) ++#define DRV_DLLEXPORT __declspec(dllexport) ++#endif ++#if defined(DRV_OS_UNIX) ++#define DRV_DLLIMPORT ++#define DRV_DLLEXPORT ++#endif ++ ++#if defined(DRV_OS_WINDOWS) ++#define FSI64RAW "I64" ++#define DRV_PATH_SEPARATOR "\\" ++#define L_DRV_PATH_SEPARATOR L"\\" ++#endif ++ ++#if defined(DRV_OS_UNIX) ++#define FSI64RAW "ll" ++#define DRV_PATH_SEPARATOR "/" ++#define L_DRV_PATH_SEPARATOR L"/" ++#endif ++ ++#define FSS64 "%" FSI64RAW "d" ++#define FSU64 "%" FSI64RAW "u" ++#define FSX64 "%" FSI64RAW "x" ++ ++#if defined(DRV_OS_WINDOWS) ++#define DRV_RTLD_NOW 0 ++#endif ++#if defined(DRV_OS_UNIX) ++#if defined(DRV_OS_FREEBSD) ++#define DRV_RTLD_NOW 0 ++#else ++#define DRV_RTLD_NOW RTLD_NOW ++#endif ++#endif ++ ++// #define DRV_STRLEN (U32)strlen ++// #define DRV_WCSLEN (U32)wcslen ++#define DRV_STRCSPN strcspn ++#define DRV_STRCHR strchr ++#define DRV_STRRCHR strrchr ++#define DRV_WCSRCHR wcsrchr ++ ++// #if defined(DRV_OS_WINDOWS) ++// #define DRV_STCHARLEN DRV_WCSLEN ++// #else ++// #define DRV_STCHARLEN DRV_STRLEN ++// #endif ++ ++#if defined(DRV_OS_WINDOWS) ++#define DRV_STRCPY strcpy_s ++#define DRV_STRNCPY strncpy_s ++#define DRV_STRICMP _stricmp ++#define DRV_STRNCMP strncmp ++#define DRV_STRNICMP _strnicmp ++#define DRV_STRDUP _strdup ++#define DRV_WCSDUP _wcsdup ++#define DRV_STRCMP strcmp ++#define DRV_WCSCMP wcscmp ++#define DRV_SNPRINTF _snprintf_s ++#define DRV_SNWPRINTF _snwprintf_s ++#define DRV_VSNPRINTF _vsnprintf_s ++#define DRV_SSCANF sscanf_s ++#define DRV_STRCAT strcat_s ++#define DRV_STRNCAT strncat_s ++#define DRV_MEMCPY memcpy_s ++#define DRV_WMEMCPY wmemcpy_s ++#define DRV_STRTOK strtok_s ++#define DRV_STRTOUL strtoul ++#define DRV_STRTOULL _strtoui64 ++#define DRV_STRTOQ _strtoui64 ++#define DRV_FOPEN(fp, name, mode) fopen_s(&(fp), (name), (mode)) ++#define DRV_WFOPEN(fp, name, mode) _wfopen_s(&(fp), (name), (mode)) ++#define DRV_FCLOSE(fp) \ ++ do { \ ++ if ((fp) != NULL) { \ ++ fclose((fp)); \ ++ } \ ++ } while (0) ++#define DRV_WCSCPY wcscpy_s ++#define DRV_WCSNCPY wcsncpy_s ++#define DRV_WCSCAT wcscat_s ++#define DRV_WCSNCAT wcsncat_s ++#define DRV_WCSTOK wcstok_s ++#define DRV_WCSSTR wcsstr ++#define DRV_STRERROR strerror_s ++#define DRV_SPRINTF sprintf_s ++#define DRV_VSPRINTF vsprintf_s ++#define DRV_VSWPRINTF vswprintf_s ++#define DRV_GETENV_S getenv_s ++#define DRV_WGETENV_S wgetenv_s ++#define DRV_PUTENV(name) _putenv(name) ++#define DRV_USTRCMP(X, Y) DRV_WCSCMP(X, Y) ++#define DRV_USTRDUP(X) DRV_WCSDUP(X) ++#define DRV_ACCESS(X) _access_s(X, 4) ++#define DRV_STRSTR strstr ++ ++#define DRV_STCHAR_COPY DRV_WCSNCPY ++ ++#define DRV_GETENV(buf, buf_size, name) _dupenv_s(&(buf), &(buf_size), (name)) ++#define DRV_WGETENV(buf, buf_size, name) _wdupenv_s(&(buf), &(buf_size), (name)) ++#define DRV_SCLOSE(fp) _close(fp) ++#define DRV_WRITE(fp, buf, buf_size) _write(fp, buf, buf_size); ++#define DRV_SOPEN_S(fp, name, oflag, shflag, pmode) \ ++ _sopen_s((fp), (name), (oflag), (shflag), (pmode)) ++#endif ++ ++#if defined(DRV_OS_UNIX) ++/* ++ Note: Many of the following macros have a "size" as the second argument. Generally ++ speaking, this is for compatibility with the _s versions available on Windows. ++ On Linux/Solaris/Mac, it is ignored. On Windows, it is the size of the destination ++ buffer and is used wrt memory checking features available in the C runtime in debug ++ mode. Do not confuse it with the number of bytes to be copied, or such. ++ ++ On Windows, this size should correspond to the number of allocated characters ++ (char or wchar_t) pointed to by the first argument. See MSDN for more details. ++*/ ++#define DRV_STRICMP strcasecmp ++#define DRV_STRDUP strdup ++#define DRV_STRNDUP strndup ++#define DRV_STRCMP strcmp ++#define DRV_STRNCMP strncmp ++#define DRV_STRSTR strstr ++#define DRV_SNPRINTF(buf, buf_size, length, args...) \ ++ snprintf((buf), (length), ##args) ++#define DRV_SNWPRINTF(buf, buf_size, length, args...) \ ++ snwprintf((buf), (length), ##args) ++#define DRV_VSNPRINTF(buf, buf_size, length, args...) \ ++ vsnprintf((buf), (length), ##args) ++#define DRV_SSCANF sscanf ++#define DRV_STRCPY(dst, dst_size, src) strcpy((dst), (src)) ++#define DRV_STRNCPY(dst, dst_size, src, n) strncpy((dst), (src), (n)) ++#define DRV_STRCAT(dst, dst_size, src) strcat((dst), (src)) ++#define DRV_STRNCAT(dst, dst_size, src, n) strncat((dst), (src), (n)) ++#define DRV_MEMCPY(dst, dst_size, src, n) memcpy((dst), (src), (n)) ++#define DRV_STRTOK(tok, delim, context) strtok((tok), (delim)) ++#define DRV_STRTOUL strtoul ++#define DRV_STRTOULL strtoull ++#define DRV_STRTOL strtol ++#define DRV_FOPEN(fp, name, mode) { (fp) = fopen((name), (mode)) } ++#define DRV_FCLOSE(fp) \ ++ do { \ ++ if ((fp) != NULL) { \ ++ fclose((fp)); \ ++ } \ ++ } while (0) ++#define DRV_WCSCPY(dst, dst_size, src) wcscpy((dst), (const wchar_t *)(src)) ++#define DRV_WCSNCPY(dst, dst_size, src, count) \ ++ wcsncpy((dst), (const wchar_t *)(src), (count)) ++#define DRV_WCSCAT(dst, dst_size, src) wcscat((dst), (const wchar_t *)(src)) ++#define DRV_WCSTOK(tok, delim, context) \ ++ wcstok((tok), (const wchar_t *)(delim), (context)) ++#define DRV_STRERROR strerror ++#define DRV_SPRINTF(dst, dst_size, args...) sprintf((dst), ##args) ++#define DRV_VSPRINTF(dst, dst_size, length, args...) \ ++ vsprintf((dst), (length), ##args) ++#define DRV_VSWPRINTF(dst, dst_size, length, args...) \ ++ vswprintf((dst), (length), ##args) ++#define DRV_GETENV_S(dst, dst_size) getenv(dst) ++#define DRV_WGETENV_S(dst, dst_size) wgetenv(dst) ++#define DRV_PUTENV(name) putenv(name) ++#define DRV_GETENV(buf, buf_size, name) ((buf) = getenv((name))) ++#define DRV_USTRCMP(X, Y) DRV_STRCMP(X, Y) ++#define DRV_USTRDUP(X) DRV_STRDUP(X) ++#define DRV_ACCESS(X) access(X, X_OK) ++ ++#define DRV_STCHAR_COPY DRV_STRNCPY ++#endif ++ ++#if defined(DRV_OS_WINDOWS) ++#define DRV_STRTOK_R(tok, delim, context) strtok_s((tok), (delim), (context)) ++#else ++#define DRV_STRTOK_R(tok, delim, context) strtok_r((tok), (delim), (context)) ++#endif ++ ++#if defined(DRV_OS_LINUX) || defined(DRV_OS_MAC) || defined(DRV_OS_FREEBSD) ++#define DRV_STRTOQ strtoq ++#endif ++ ++#if defined(DRV_OS_ANDROID) ++#define DRV_STRTOQ strtol ++#endif ++ ++#if defined(DRV_OS_SOLARIS) ++#define DRV_STRTOQ strtoll ++#endif ++ ++#if defined(DRV_OS_LINUX) || defined(DRV_OS_FREEBSD) || defined(DRV_OS_MAC) ++#define DRV_WCSDUP wcsdup ++#endif ++ ++#if defined(DRV_OS_SOLARIS) ++#define DRV_WCSDUP solaris_wcsdup ++#endif ++ ++#if defined(DRV_OS_ANDROID) ++#define DRV_WCSDUP android_wcsdup ++#endif ++ ++/* ++ * Windows uses wchar_t and linux uses char for strings. ++ * Need an extra level of abstraction to standardize it. ++ */ ++#if defined(DRV_OS_WINDOWS) ++#define DRV_STDUP DRV_WCSDUP ++#define DRV_FORMAT_STRING(x) L##x ++#define DRV_PRINT_STRING(stream, format, ...) \ ++ fwprintf((stream), (format), __VA_ARGS__) ++#else ++#define DRV_STDUP DRV_STRDUP ++#define DRV_FORMAT_STRING(x) x ++#define DRV_PRINT_STRING(stream, format, ...) \ ++ fprintf((stream), (format), __VA_ARGS__) ++#endif ++ ++/* ++ * OS return types ++ */ ++#if defined(DRV_OS_UNIX) ++#define OS_STATUS int ++#define OS_SUCCESS 0 ++#if defined(BUILD_DRV_ESX) ++#define OS_ILLEGAL_IOCTL -1 ++#define OS_NO_MEM -2 ++#define OS_FAULT -3 ++#define OS_INVALID -4 ++#define OS_NO_SYSCALL -5 ++#define OS_RESTART_SYSCALL -6 ++#define OS_IN_PROGRESS -7 ++#else ++#define OS_ILLEGAL_IOCTL -ENOTTY ++#define OS_NO_MEM -ENOMEM ++#define OS_FAULT -EFAULT ++#define OS_INVALID -EINVAL ++#define OS_NO_SYSCALL -ENOSYS ++#define OS_RESTART_SYSCALL -ERESTARTSYS ++#define OS_IN_PROGRESS -EALREADY ++#endif ++#endif ++#if defined(DRV_OS_WINDOWS) ++#define OS_STATUS NTSTATUS ++#define OS_SUCCESS STATUS_SUCCESS ++#define OS_ILLEGAL_IOCTL STATUS_UNSUCCESSFUL ++#define OS_NO_MEM STATUS_UNSUCCESSFUL ++#define OS_FAULT STATUS_UNSUCCESSFUL ++#define OS_INVALID STATUS_UNSUCCESSFUL ++#define OS_NO_SYSCALL STATUS_UNSUCCESSFUL ++#define OS_RESTART_SYSCALL STATUS_UNSUCCESSFUL ++#define OS_IN_PROGRESS STATUS_UNSUCCESSFUL ++#endif ++ ++/**************************************************************************** ++ ** Driver State definitions ++ ***************************************************************************/ ++#define DRV_STATE_UNINITIALIZED 0 ++#define DRV_STATE_RESERVED 1 ++#define DRV_STATE_IDLE 2 ++#define DRV_STATE_PAUSED 3 ++#define DRV_STATE_STOPPED 4 ++#define DRV_STATE_RUNNING 5 ++#define DRV_STATE_PAUSING 6 ++#define DRV_STATE_PREPARE_STOP 7 ++#define DRV_STATE_TERMINATING 8 ++ ++#define MATCHING_STATE_BIT(state) ((U32)1 << state) ++#define STATE_BIT_UNINITIALIZED MATCHING_STATE_BIT(DRV_STATE_UNINITIALIZED) ++#define STATE_BIT_RESERVED MATCHING_STATE_BIT(DRV_STATE_RESERVED) ++#define STATE_BIT_IDLE MATCHING_STATE_BIT(DRV_STATE_IDLE) ++#define STATE_BIT_PAUSED MATCHING_STATE_BIT(DRV_STATE_PAUSED) ++#define STATE_BIT_STOPPED MATCHING_STATE_BIT(DRV_STATE_STOPPED) ++#define STATE_BIT_RUNNING MATCHING_STATE_BIT(DRV_STATE_RUNNING) ++#define STATE_BIT_PAUSING MATCHING_STATE_BIT(DRV_STATE_PAUSING) ++#define STATE_BIT_PREPARE_STOP MATCHING_STATE_BIT(DRV_STATE_PREPARE_STOP) ++#define STATE_BIT_TERMINATING MATCHING_STATE_BIT(DRV_STATE_TERMINATING) ++#define STATE_BIT_ANY ((U32)-1) ++ ++#define IS_COLLECTING_STATE(state) \ ++ (!!(MATCHING_STATE_BIT(state) & \ ++ (STATE_BIT_RUNNING | STATE_BIT_PAUSING | STATE_BIT_PAUSED))) ++ ++/* ++ * Stop codes ++ */ ++#define DRV_STOP_BASE 0 ++#define DRV_STOP_NORMAL 1 ++#define DRV_STOP_ASYNC 2 ++#define DRV_STOP_CANCEL 3 ++ ++#define SEP_FREE(loc) \ ++ do { \ ++ if ((loc)) { \ ++ free(loc); \ ++ loc = NULL; \ ++ } \ ++ } while (0) ++ ++#define MAX_EVENTS 256 // Limiting maximum multiplexing events to 256. ++#if defined(DRV_OS_UNIX) ++#define UNREFERENCED_PARAMETER(p) ((p) = (p)) ++#endif ++ ++/* ++ * Global marker names ++ */ ++#define START_MARKER_NAME "SEP_START_MARKER" ++#define PAUSE_MARKER_NAME "SEP_PAUSE_MARKER" ++#define RESUME_MARKER_NAME "SEP_RESUME_MARKER" ++ ++#define DRV_SOC_STRING_LEN (100 + MAX_MARKER_LENGTH) ++ ++/* ++ * Temp path ++ */ ++#define SEP_TMPDIR "SEP_TMP_DIR" ++#if defined(DRV_OS_WINDOWS) ++#define OS_TMPDIR "TEMP" ++#define GET_DEFAULT_TMPDIR(dir, size) \ ++ { \ ++ GetTempPath((U32)size, dir); \ ++ } ++#else ++#define OS_TMPDIR "TMPDIR" ++/* ++ * Unix has default tmp dir ++ */ ++#if defined(DRV_OS_ANDROID) ++#define TEMP_PATH "/data" ++#else ++#define TEMP_PATH "/tmp" ++#endif ++#define GET_DEFAULT_TMPDIR(dir, size) \ ++ { \ ++ DRV_STRCPY((STCHAR *)dir, (U32)size, (STCHAR *)TEMP_PATH); \ ++ } ++#endif ++ ++#define OS_ID_UNKNOWN -1 ++#define OS_ID_NATIVE 0 ++#define OS_ID_VMM 0 ++#define OS_ID_MODEM 1 ++#define OS_ID_ANDROID 2 ++#define OS_ID_SECVM 3 ++#define OS_ID_ACORN ((U32)-1) ++ ++#define PERF_HW_VER4 (5) ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif +diff --git a/drivers/platform/x86/socperf/include/lwpmudrv_ecb.h b/drivers/platform/x86/socperf/include/lwpmudrv_ecb.h +new file mode 100644 +index 000000000000..ac1c09f95214 +--- /dev/null ++++ b/drivers/platform/x86/socperf/include/lwpmudrv_ecb.h +@@ -0,0 +1,1095 @@ ++/*** ++ * ------------------------------------------------------------------------- ++ * INTEL CORPORATION PROPRIETARY INFORMATION ++ * This software is supplied under the terms of the accompanying license ++ * agreement or nondisclosure agreement with Intel Corporation and may not ++ * be copied or disclosed except in accordance with the terms of that ++ * agreement. ++ * Copyright(C) 2007-2019 Intel Corporation. All Rights Reserved. ++ * ------------------------------------------------------------------------- ++***/ ++ ++#ifndef _LWPMUDRV_ECB_UTILS_H_ ++#define _LWPMUDRV_ECB_UTILS_H_ ++ ++#if defined(DRV_OS_WINDOWS) ++#pragma warning(disable : 4200) ++#endif ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++// control register types ++#define CCCR 1 // counter configuration control register ++#define ESCR 2 // event selection control register ++#define DATA 4 // collected as snapshot of current value ++#define DATA_RO_DELTA 8 // read-only counter collected as current-previous ++#define DATA_RO_SS \ ++ 16 // read-only counter collected as snapshot of current value ++#define METRICS 32 // hardware metrics ++ ++// event multiplexing modes ++#define EM_DISABLED -1 ++#define EM_TIMER_BASED 0 ++#define EM_EVENT_BASED_PROFILING 1 ++#define EM_TRIGGER_BASED 2 ++ ++// *************************************************************************** ++ ++/*!\struct EVENT_DESC_NODE ++ * \var sample_size - size of buffer in bytes to hold the sample + extras ++ * \var max_gp_events - max number of General Purpose events per EM group ++ * \var pebs_offset - offset in the sample to locate the pebs capture information ++ * \var lbr_offset - offset in the sample to locate the lbr information ++ * \var lbr_num_regs - offset in the sample to locate the number of lbr register information ++ * \var latency_offset_in_sample - offset in the sample to locate the latency information ++ * \var latency_size_in_sample - size of latency records in the sample ++ * \var latency_size_from_pebs_record - size of the latency data from pebs record in the sample ++ * \var latency_offset_in_pebs_record - offset in the sample to locate the latency information ++ * in pebs record ++ * \var power_offset_in_sample - offset in the sample to locate the power information ++ * \var ebc_offset - offset in the sample to locate the ebc count information ++ * \var uncore_ebc_offset - offset in the sample to locate the uncore ebc count information ++ * ++ * \var ro_offset - offset of RO data in the sample ++ * \var ro_count - total number of RO entries (including all of IEAR/DEAR/BTB/IPEAR) ++ * \var iear_offset - offset into RO data at which IEAR entries begin ++ * \var dear_offset - offset into RO data at which DEAR entries begin ++ * \var btb_offset - offset into RO data at which BTB entries begin (these use the same PMDs) ++ * \var ipear_offset - offset into RO data at which IPEAR entries begin (these use the same PMDs) ++ * \var iear_count - number of IEAR entries ++ * \var dear_count - number of DEAR entries ++ * \var btb_count - number of BTB entries ++ * \var ipear_count - number of IPEAR entries ++ * ++ * \var pwr_offset - offset in the sample to locate the pwr count information ++ * \var p_state_offset - offset in the sample to locate the p_state information (APERF/MPERF) ++ * ++ * \brief Data structure to describe the events and the mode ++ * ++ */ ++ ++typedef struct EVENT_DESC_NODE_S EVENT_DESC_NODE; ++typedef EVENT_DESC_NODE * EVENT_DESC; ++ ++struct EVENT_DESC_NODE_S { ++ U32 sample_size; ++ U32 pebs_offset; ++ U32 pebs_size; ++ U32 lbr_offset; ++ U32 lbr_num_regs; ++ U32 latency_offset_in_sample; ++ U32 latency_size_in_sample; ++ U32 latency_size_from_pebs_record; ++ U32 latency_offset_in_pebs_record; ++ U32 power_offset_in_sample; ++ U32 ebc_offset; ++ U32 uncore_ebc_offset; ++ U32 eventing_ip_offset; ++ U32 hle_offset; ++ U32 pwr_offset; ++ U32 callstack_offset; ++ U32 callstack_size; ++ U32 p_state_offset; ++ U32 pebs_tsc_offset; ++ U32 perfmetrics_offset; ++ U32 perfmetrics_size; ++ /* ----------ADAPTIVE PEBS FIELDS --------- */ ++ U16 applicable_counters_offset; ++ U16 gpr_info_offset; ++ U16 gpr_info_size; ++ U16 xmm_info_offset; ++ U16 xmm_info_size; ++ U16 lbr_info_size; ++ /*------------------------------------------*/ ++ U32 reserved2; ++ U64 reserved3; ++}; ++ ++// ++// Accessor macros for EVENT_DESC node ++// ++#define EVENT_DESC_sample_size(ec) ((ec)->sample_size) ++#define EVENT_DESC_pebs_offset(ec) ((ec)->pebs_offset) ++#define EVENT_DESC_pebs_size(ec) ((ec)->pebs_size) ++#define EVENT_DESC_lbr_offset(ec) ((ec)->lbr_offset) ++#define EVENT_DESC_lbr_num_regs(ec) ((ec)->lbr_num_regs) ++#define EVENT_DESC_latency_offset_in_sample(ec) ((ec)->latency_offset_in_sample) ++#define EVENT_DESC_latency_size_from_pebs_record(ec) \ ++ ((ec)->latency_size_from_pebs_record) ++#define EVENT_DESC_latency_offset_in_pebs_record(ec) \ ++ ((ec)->latency_offset_in_pebs_record) ++#define EVENT_DESC_latency_size_in_sample(ec) ((ec)->latency_size_in_sample) ++#define EVENT_DESC_power_offset_in_sample(ec) ((ec)->power_offset_in_sample) ++#define EVENT_DESC_ebc_offset(ec) ((ec)->ebc_offset) ++#define EVENT_DESC_uncore_ebc_offset(ec) ((ec)->uncore_ebc_offset) ++#define EVENT_DESC_eventing_ip_offset(ec) ((ec)->eventing_ip_offset) ++#define EVENT_DESC_hle_offset(ec) ((ec)->hle_offset) ++#define EVENT_DESC_pwr_offset(ec) ((ec)->pwr_offset) ++#define EVENT_DESC_callstack_offset(ec) ((ec)->callstack_offset) ++#define EVENT_DESC_callstack_size(ec) ((ec)->callstack_size) ++#define EVENT_DESC_perfmetrics_offset(ec) ((ec)->perfmetrics_offset) ++#define EVENT_DESC_perfmetrics_size(ec) ((ec)->perfmetrics_size) ++#define EVENT_DESC_p_state_offset(ec) ((ec)->p_state_offset) ++#define EVENT_DESC_pebs_tsc_offset(ec) ((ec)->pebs_tsc_offset) ++#define EVENT_DESC_applicable_counters_offset(ec) \ ++ ((ec)->applicable_counters_offset) ++#define EVENT_DESC_gpr_info_offset(ec) ((ec)->gpr_info_offset) ++#define EVENT_DESC_gpr_info_size(ec) ((ec)->gpr_info_size) ++#define EVENT_DESC_xmm_info_offset(ec) ((ec)->xmm_info_offset) ++#define EVENT_DESC_xmm_info_size(ec) ((ec)->xmm_info_size) ++#define EVENT_DESC_lbr_info_size(ec) ((ec)->lbr_info_size) ++ ++// *************************************************************************** ++ ++/*!\struct EVENT_CONFIG_NODE ++ * \var num_groups - The number of groups being programmed ++ * \var em_mode - Is EM valid? If so how? ++ * \var em_time_slice - EM valid? time slice in milliseconds ++ * \var sample_size - size of buffer in bytes to hold the sample + extras ++ * \var max_gp_events - Max number of General Purpose events per EM group ++ * \var pebs_offset - offset in the sample to locate the pebs capture information ++ * \var lbr_offset - offset in the sample to locate the lbr information ++ * \var lbr_num_regs - offset in the sample to locate the lbr information ++ * \var latency_offset_in_sample - offset in the sample to locate the latency information ++ * \var latency_size_in_sample - size of latency records in the sample ++ * \var latency_size_from_pebs_record - offset in the sample to locate the latency ++ * size from pebs record ++ * \var latency_offset_in_pebs_record - offset in the sample to locate the latency information ++ * in pebs record ++ * \var power_offset_in_sample - offset in the sample to locate the power information ++ * \var ebc_offset - offset in the sample to locate the ebc count information ++ * ++ * \var pwr_offset - offset in the sample to locate the pwr count information ++ * \var p_state_offset - offset in the sample to locate the p_state information (APERF/MPERF) ++ * ++ * \brief Data structure to describe the events and the mode ++ * ++ */ ++ ++typedef struct EVENT_CONFIG_NODE_S EVENT_CONFIG_NODE; ++typedef EVENT_CONFIG_NODE * EVENT_CONFIG; ++ ++struct EVENT_CONFIG_NODE_S { ++ U32 num_groups; ++ S32 em_mode; ++ S32 em_factor; ++ S32 em_event_num; ++ U32 sample_size; ++ U32 max_gp_events; ++ U32 max_fixed_counters; ++ U32 max_ro_counters; // maximum read-only counters ++ U32 pebs_offset; ++ U32 pebs_size; ++ U32 lbr_offset; ++ U32 lbr_num_regs; ++ U32 latency_offset_in_sample; ++ U32 latency_size_in_sample; ++ U32 latency_size_from_pebs_record; ++ U32 latency_offset_in_pebs_record; ++ U32 power_offset_in_sample; ++ U32 ebc_offset; ++ U32 num_groups_unc; ++ U32 ebc_offset_unc; ++ U32 sample_size_unc; ++ U32 eventing_ip_offset; ++ U32 hle_offset; ++ U32 pwr_offset; ++ U32 callstack_offset; ++ U32 callstack_size; ++ U32 p_state_offset; ++ U32 pebs_tsc_offset; ++ U64 reserved1; ++ U64 reserved2; ++ U64 reserved3; ++ U64 reserved4; ++}; ++ ++// ++// Accessor macros for EVENT_CONFIG node ++// ++#define EVENT_CONFIG_num_groups(ec) ((ec)->num_groups) ++#define EVENT_CONFIG_mode(ec) ((ec)->em_mode) ++#define EVENT_CONFIG_em_factor(ec) ((ec)->em_factor) ++#define EVENT_CONFIG_em_event_num(ec) ((ec)->em_event_num) ++#define EVENT_CONFIG_sample_size(ec) ((ec)->sample_size) ++#define EVENT_CONFIG_max_gp_events(ec) ((ec)->max_gp_events) ++#define EVENT_CONFIG_max_fixed_counters(ec) ((ec)->max_fixed_counters) ++#define EVENT_CONFIG_max_ro_counters(ec) ((ec)->max_ro_counters) ++#define EVENT_CONFIG_pebs_offset(ec) ((ec)->pebs_offset) ++#define EVENT_CONFIG_pebs_size(ec) ((ec)->pebs_size) ++#define EVENT_CONFIG_lbr_offset(ec) ((ec)->lbr_offset) ++#define EVENT_CONFIG_lbr_num_regs(ec) ((ec)->lbr_num_regs) ++#define EVENT_CONFIG_latency_offset_in_sample(ec) ((ec)->latency_offset_in_sample) ++#define EVENT_CONFIG_latency_size_from_pebs_record(ec) \ ++ ((ec)->latency_size_from_pebs_record) ++#define EVENT_CONFIG_latency_offset_in_pebs_record(ec) \ ++ ((ec)->latency_offset_in_pebs_record) ++#define EVENT_CONFIG_latency_size_in_sample(ec) ((ec)->latency_size_in_sample) ++#define EVENT_CONFIG_power_offset_in_sample(ec) ((ec)->power_offset_in_sample) ++#define EVENT_CONFIG_ebc_offset(ec) ((ec)->ebc_offset) ++#define EVENT_CONFIG_num_groups_unc(ec) ((ec)->num_groups_unc) ++#define EVENT_CONFIG_ebc_offset_unc(ec) ((ec)->ebc_offset_unc) ++#define EVENT_CONFIG_sample_size_unc(ec) ((ec)->sample_size_unc) ++#define EVENT_CONFIG_eventing_ip_offset(ec) ((ec)->eventing_ip_offset) ++#define EVENT_CONFIG_hle_offset(ec) ((ec)->hle_offset) ++#define EVENT_CONFIG_pwr_offset(ec) ((ec)->pwr_offset) ++#define EVENT_CONFIG_callstack_offset(ec) ((ec)->callstack_offset) ++#define EVENT_CONFIG_callstack_size(ec) ((ec)->callstack_size) ++#define EVENT_CONFIG_p_state_offset(ec) ((ec)->p_state_offset) ++#define EVENT_CONFIG_pebs_tsc_offset(ec) ((ec)->pebs_tsc_offset) ++ ++typedef enum { UNC_MUX = 1, UNC_COUNTER } UNC_SA_PROG_TYPE; ++ ++typedef enum { ++ UNC_PCICFG = 1, ++ UNC_MMIO, ++ UNC_STOP, ++ UNC_MEMORY, ++ UNC_STATUS ++} UNC_SA_CONFIG_TYPE; ++ ++typedef enum { ++ UNC_MCHBAR = 1, ++ UNC_DMIBAR, ++ UNC_PCIEXBAR, ++ UNC_GTTMMADR, ++ UNC_GDXCBAR, ++ UNC_CHAPADR, ++ UNC_SOCPCI, ++ UNC_NPKBAR ++} UNC_SA_BAR_TYPE; ++ ++typedef enum { UNC_OP_READ = 1, UNC_OP_WRITE, UNC_OP_RMW } UNC_SA_OPERATION; ++ ++typedef enum { ++ STATIC_COUNTER = 1, ++ FREERUN_COUNTER, ++ PROG_FREERUN_COUNTER ++} COUNTER_TYPES; ++ ++typedef enum { ++ PACKAGE_EVENT = 1, ++ MODULE_EVENT, ++ THREAD_EVENT, ++ SYSTEM_EVENT ++} EVENT_SCOPE_TYPES; ++ ++typedef enum { ++ DEVICE_CORE = 1, // CORE DEVICE ++ DEVICE_HETERO, ++ DEVICE_UNC_CBO = 10, // UNCORE DEVICES START ++ DEVICE_UNC_HA, ++ DEVICE_UNC_IMC, ++ DEVICE_UNC_IRP, ++ DEVICE_UNC_NCU, ++ DEVICE_UNC_PCU, ++ DEVICE_UNC_POWER, ++ DEVICE_UNC_QPI, ++ DEVICE_UNC_R2PCIE, ++ DEVICE_UNC_R3QPI, ++ DEVICE_UNC_SBOX, ++ DEVICE_UNC_GT, ++ DEVICE_UNC_UBOX, ++ DEVICE_UNC_WBOX, ++ DEVICE_UNC_COREI7, ++ DEVICE_UNC_CHA, ++ DEVICE_UNC_EDC, ++ DEVICE_UNC_IIO, ++ DEVICE_UNC_M2M, ++ DEVICE_UNC_EDRAM, ++ DEVICE_UNC_FPGA_CACHE, ++ DEVICE_UNC_FPGA_FAB, ++ DEVICE_UNC_FPGA_THERMAL, ++ DEVICE_UNC_FPGA_POWER, ++ DEVICE_UNC_FPGA_GB, ++ DEVICE_UNC_TELEMETRY = 150, // TELEMETRY DEVICE ++ DEVICE_UNC_CHAP = 200, // CHIPSET DEVICES START ++ DEVICE_UNC_GMCH, ++ DEVICE_UNC_GFX, ++ DEVICE_UNC_SOCPERF = 300, // UNCORE VISA DEVICES START ++ DEVICE_UNC_HFI_RXE = 400, // STL HFI ++ DEVICE_UNC_HFI_TXE, ++} DEVICE_TYPES; ++ ++typedef enum { ++ LBR_ENTRY_TOS = 0, ++ LBR_ENTRY_FROM_IP, ++ LBR_ENTRY_TO_IP, ++ LBR_ENTRY_INFO ++} LBR_ENTRY_TYPE; ++ ++// *************************************************************************** ++ ++/*!\struct EVENT_REG_ID_NODE ++ * \var reg_id - MSR index to r/w ++ * \var pci_id PCI based register and its details to operate on ++ */ ++typedef struct EVENT_REG_ID_NODE_S EVENT_REG_ID_NODE; ++typedef EVENT_REG_ID_NODE * EVENT_REG_ID; ++ ++struct EVENT_REG_ID_NODE_S { ++ U32 reg_id; ++ U32 pci_bus_no; ++ U32 pci_dev_no; ++ U32 pci_func_no; ++ U32 data_size; ++ U32 bar_index; // Points to the index (MMIO_INDEX_LIST) ++ // of bar memory map list to be used in mmio_bar_list of ECB ++ U32 reserved1; ++ U32 reserved2; ++ U64 reserved3; ++}; ++ ++// *************************************************************************** ++ ++typedef enum { ++ PMU_REG_RW_READ = 1, ++ PMU_REG_RW_WRITE, ++ PMU_REG_RW_READ_WRITE, ++} PMU_REG_RW_TYPES; ++ ++typedef enum { ++ PMU_REG_PROG_MSR = 1, ++ PMU_REG_PROG_PCI, ++ PMU_REG_PROG_MMIO, ++} PMU_REG_PROG_TYPES; ++ ++typedef enum { ++ PMU_REG_GLOBAL_CTRL = 1, ++ PMU_REG_UNIT_CTRL, ++ PMU_REG_UNIT_STATUS, ++ PMU_REG_DATA, ++ PMU_REG_EVENT_SELECT, ++ PMU_REG_FILTER, ++ PMU_REG_FIXED_CTRL, ++} PMU_REG_TYPES; ++ ++/*!\struct EVENT_REG_NODE ++ * \var reg_type - register type ++ * \var event_id_index - event ID index ++ * \var event_reg_id - register ID/pci register details ++ * \var desc_id - desc ID ++ * \var flags - flags ++ * \var reg_value - register value ++ * \var max_bits - max bits ++ * \var scheduled - boolean to specify if this event node has been scheduled already ++ * \var bus_no - PCI bus number ++ * \var dev_no - PCI device number ++ * \var func_no - PCI function number ++ * \var counter_type - Event counter type - static/freerun ++ * \var event_scope - Event scope - package/module/thread ++ * \var reg_prog_type - Register Programming type ++ * \var reg_rw_type - Register Read/Write type ++ * \var reg_order - Register order in the programming sequence ++ * \var ++ * \brief Data structure to describe the event registers ++ * ++ */ ++ ++typedef struct EVENT_REG_NODE_S EVENT_REG_NODE; ++typedef EVENT_REG_NODE * EVENT_REG; ++ ++struct EVENT_REG_NODE_S { ++ U8 reg_type; ++ U8 unit_id; ++ U16 event_id_index; ++ U16 counter_event_offset; ++ U16 reserved1; ++ EVENT_REG_ID_NODE event_reg_id; ++ U64 reg_value; ++ U16 desc_id; ++ U16 flags; ++ U32 reserved2; ++ U64 max_bits; ++ U8 scheduled; ++ S8 secondary_pci_offset_shift; ++ U16 secondary_pci_offset_offset; // offset of the offset... ++ U32 counter_type; ++ U32 event_scope; ++ U8 reg_prog_type; ++ U8 reg_rw_type; ++ U8 reg_order; ++ U8 bit_position; ++ U64 secondary_pci_offset_mask; ++ U32 core_event_id; ++ U32 uncore_buffer_offset_in_package; ++ U32 uncore_buffer_offset_in_system; ++ U32 reserved3; ++ U64 reserved4; ++ U64 reserved5; ++ U64 reserved6; ++}; ++ ++// ++// Accessor macros for EVENT_REG node ++// Note: the flags field is not directly addressible to prevent hackery ++// ++#define EVENT_REG_reg_type(x, i) ((x)[(i)].reg_type) ++#define EVENT_REG_event_id_index(x, i) ((x)[(i)].event_id_index) ++#define EVENT_REG_unit_id(x, i) ((x)[(i)].unit_id) ++#define EVENT_REG_counter_event_offset(x, i) ((x)[(i)].counter_event_offset) ++#define EVENT_REG_reg_id(x, i) ((x)[(i)].event_reg_id.reg_id) ++#define EVENT_REG_bus_no(x, i) ((x)[(i)].event_reg_id.pci_bus_no) ++#define EVENT_REG_dev_no(x, i) ((x)[(i)].event_reg_id.pci_dev_no) ++#define EVENT_REG_func_no(x, i) ((x)[(i)].event_reg_id.pci_func_no) ++#define EVENT_REG_offset(x, i) \ ++ ((x)[(i)].event_reg_id.reg_id) // points to the reg_id ++#define EVENT_REG_data_size(x, i) ((x)[(i)].event_reg_id.data_size) ++#define EVENT_REG_desc_id(x, i) ((x)[(i)].desc_id) ++#define EVENT_REG_flags(x, i) ((x)[(i)].flags) ++#define EVENT_REG_reg_value(x, i) ((x)[(i)].reg_value) ++#define EVENT_REG_max_bits(x, i) ((x)[(i)].max_bits) ++#define EVENT_REG_scheduled(x, i) ((x)[(i)].scheduled) ++#define EVENT_REG_secondary_pci_offset_shift(x, i) \ ++ ((x)[(i)].secondary_pci_offset_shift) ++#define EVENT_REG_secondary_pci_offset_offset(x, i) \ ++ ((x)[(i)].secondary_pci_offset_offset) ++#define EVENT_REG_secondary_pci_offset_mask(x, i) \ ++ ((x)[(i)].secondary_pci_offset_mask) ++ ++#define EVENT_REG_counter_type(x, i) ((x)[(i)].counter_type) ++#define EVENT_REG_event_scope(x, i) ((x)[(i)].event_scope) ++#define EVENT_REG_reg_prog_type(x, i) ((x)[(i)].reg_prog_type) ++#define EVENT_REG_reg_rw_type(x, i) ((x)[(i)].reg_rw_type) ++#define EVENT_REG_reg_order(x, i) ((x)[(i)].reg_order) ++#define EVENT_REG_bit_position(x, i) ((x)[(i)].bit_position) ++ ++#define EVENT_REG_core_event_id(x, i) ((x)[(i)].core_event_id) ++#define EVENT_REG_uncore_buffer_offset_in_package(x, i) \ ++ ((x)[(i)].uncore_buffer_offset_in_package) ++#define EVENT_REG_uncore_buffer_offset_in_system(x, i) \ ++ ((x)[(i)].uncore_buffer_offset_in_system) ++ ++// ++// Config bits ++// ++#define EVENT_REG_precise_bit 0x00000001 ++#define EVENT_REG_global_bit 0x00000002 ++#define EVENT_REG_uncore_bit 0x00000004 ++#define EVENT_REG_uncore_q_rst_bit 0x00000008 ++#define EVENT_REG_latency_bit 0x00000010 ++#define EVENT_REG_is_gp_reg_bit 0x00000020 ++#define EVENT_REG_clean_up_bit 0x00000040 ++#define EVENT_REG_em_trigger_bit 0x00000080 ++#define EVENT_REG_lbr_value_bit 0x00000100 ++#define EVENT_REG_fixed_reg_bit 0x00000200 ++#define EVENT_REG_multi_pkg_evt_bit 0x00001000 ++#define EVENT_REG_branch_evt_bit 0x00002000 ++ ++// ++// Accessor macros for config bits ++// ++#define EVENT_REG_precise_get(x, i) ((x)[(i)].flags & EVENT_REG_precise_bit) ++#define EVENT_REG_precise_set(x, i) ((x)[(i)].flags |= EVENT_REG_precise_bit) ++#define EVENT_REG_precise_clear(x, i) ((x)[(i)].flags &= ~EVENT_REG_precise_bit) ++ ++#define EVENT_REG_global_get(x, i) ((x)[(i)].flags & EVENT_REG_global_bit) ++#define EVENT_REG_global_set(x, i) ((x)[(i)].flags |= EVENT_REG_global_bit) ++#define EVENT_REG_global_clear(x, i) ((x)[(i)].flags &= ~EVENT_REG_global_bit) ++ ++#define EVENT_REG_uncore_get(x, i) ((x)[(i)].flags & EVENT_REG_uncore_bit) ++#define EVENT_REG_uncore_set(x, i) ((x)[(i)].flags |= EVENT_REG_uncore_bit) ++#define EVENT_REG_uncore_clear(x, i) ((x)[(i)].flags &= ~EVENT_REG_uncore_bit) ++ ++#define EVENT_REG_uncore_q_rst_get(x, i) \ ++ ((x)[(i)].flags & EVENT_REG_uncore_q_rst_bit) ++#define EVENT_REG_uncore_q_rst_set(x, i) \ ++ ((x)[(i)].flags |= EVENT_REG_uncore_q_rst_bit) ++#define EVENT_REG_uncore_q_rst_clear(x, i) \ ++ ((x)[(i)].flags &= ~EVENT_REG_uncore_q_rst_bit) ++ ++#define EVENT_REG_latency_get(x, i) ((x)[(i)].flags & EVENT_REG_latency_bit) ++#define EVENT_REG_latency_set(x, i) ((x)[(i)].flags |= EVENT_REG_latency_bit) ++#define EVENT_REG_latency_clear(x, i) ((x)[(i)].flags &= ~EVENT_REG_latency_bit) ++ ++#define EVENT_REG_is_gp_reg_get(x, i) ((x)[(i)].flags & EVENT_REG_is_gp_reg_bit) ++#define EVENT_REG_is_gp_reg_set(x, i) \ ++ ((x)[(i)].flags |= EVENT_REG_is_gp_reg_bit) ++#define EVENT_REG_is_gp_reg_clear(x, i) \ ++ ((x)[(i)].flags &= ~EVENT_REG_is_gp_reg_bit) ++ ++#define EVENT_REG_lbr_value_get(x, i) ((x)[(i)].flags & EVENT_REG_lbr_value_bit) ++#define EVENT_REG_lbr_value_set(x, i) \ ++ ((x)[(i)].flags |= EVENT_REG_lbr_value_bit) ++#define EVENT_REG_lbr_value_clear(x, i) \ ++ ((x)[(i)].flags &= ~EVENT_REG_lbr_value_bit) ++ ++#define EVENT_REG_fixed_reg_get(x, i) ((x)[(i)].flags & EVENT_REG_fixed_reg_bit) ++#define EVENT_REG_fixed_reg_set(x, i) \ ++ ((x)[(i)].flags |= EVENT_REG_fixed_reg_bit) ++#define EVENT_REG_fixed_reg_clear(x, i) \ ++ ((x)[(i)].flags &= ~EVENT_REG_fixed_reg_bit) ++ ++#define EVENT_REG_multi_pkg_evt_bit_get(x, i) \ ++ ((x)[(i)].flags & EVENT_REG_multi_pkg_evt_bit) ++#define EVENT_REG_multi_pkg_evt_bit_set(x, i) \ ++ ((x)[(i)].flags |= EVENT_REG_multi_pkg_evt_bit) ++#define EVENT_REG_multi_pkg_evt_bit_clear(x, i) \ ++ ((x)[(i)].flags &= ~EVENT_REG_multi_pkg_evt_bit) ++ ++#define EVENT_REG_clean_up_get(x, i) ((x)[(i)].flags & EVENT_REG_clean_up_bit) ++#define EVENT_REG_clean_up_set(x, i) ((x)[(i)].flags |= EVENT_REG_clean_up_bit) ++#define EVENT_REG_clean_up_clear(x, i) \ ++ ((x)[(i)].flags &= ~EVENT_REG_clean_up_bit) ++ ++#define EVENT_REG_em_trigger_get(x, i) \ ++ ((x)[(i)].flags & EVENT_REG_em_trigger_bit) ++#define EVENT_REG_em_trigger_set(x, i) \ ++ ((x)[(i)].flags |= EVENT_REG_em_trigger_bit) ++#define EVENT_REG_em_trigger_clear(x, i) \ ++ ((x)[(i)].flags &= ~EVENT_REG_em_trigger_bit) ++ ++#define EVENT_REG_branch_evt_get(x, i) \ ++ ((x)[(i)].flags & EVENT_REG_branch_evt_bit) ++#define EVENT_REG_branch_evt_set(x, i) \ ++ ((x)[(i)].flags |= EVENT_REG_branch_evt_bit) ++#define EVENT_REG_branch_evt_clear(x, i) \ ++ ((x)[(i)].flags &= ~EVENT_REG_branch_evt_bit) ++ ++// *************************************************************************** ++ ++/*!\struct DRV_PCI_DEVICE_ENTRY_NODE_S ++ * \var bus_no - PCI bus no to read ++ * \var dev_no - PCI device no to read ++ * \var func_no PCI device no to read ++ * \var bar_offset BASE Address Register offset of the PCI based PMU ++ * \var bit_offset Bit offset of the same ++ * \var size size of read/write ++ * \var bar_address the actual BAR present ++ * \var enable_offset Offset info to enable/disable ++ * \var enabled Status of enable/disable ++ * \brief Data structure to describe the PCI Device ++ * ++ */ ++ ++typedef struct DRV_PCI_DEVICE_ENTRY_NODE_S DRV_PCI_DEVICE_ENTRY_NODE; ++typedef DRV_PCI_DEVICE_ENTRY_NODE * DRV_PCI_DEVICE_ENTRY; ++ ++struct DRV_PCI_DEVICE_ENTRY_NODE_S { ++ U32 bus_no; ++ U32 dev_no; ++ U32 func_no; ++ U32 bar_offset; ++ U64 bar_mask; ++ U32 bit_offset; ++ U32 size; ++ U64 bar_address; ++ U32 enable_offset; ++ U32 enabled; ++ U32 base_offset_for_mmio; ++ U32 operation; ++ U32 bar_name; ++ U32 prog_type; ++ U32 config_type; ++ S8 bar_shift; // positive shifts right, negative shifts left ++ U8 reserved0; ++ U16 reserved1; ++ U64 value; ++ U64 mask; ++ U64 virtual_address; ++ U32 port_id; ++ U32 op_code; ++ U32 device_id; ++ U16 bar_num; ++ U16 feature_id; ++ U64 reserved2; ++ U64 reserved3; ++ U64 reserved4; ++}; ++ ++// ++// Accessor macros for DRV_PCI_DEVICE_NODE node ++// ++#define DRV_PCI_DEVICE_ENTRY_bus_no(x) ((x)->bus_no) ++#define DRV_PCI_DEVICE_ENTRY_dev_no(x) ((x)->dev_no) ++#define DRV_PCI_DEVICE_ENTRY_func_no(x) ((x)->func_no) ++#define DRV_PCI_DEVICE_ENTRY_bar_offset(x) ((x)->bar_offset) ++#define DRV_PCI_DEVICE_ENTRY_bar_mask(x) ((x)->bar_mask) ++#define DRV_PCI_DEVICE_ENTRY_bit_offset(x) ((x)->bit_offset) ++#define DRV_PCI_DEVICE_ENTRY_size(x) ((x)->size) ++#define DRV_PCI_DEVICE_ENTRY_bar_address(x) ((x)->bar_address) ++#define DRV_PCI_DEVICE_ENTRY_enable_offset(x) ((x)->enable_offset) ++#define DRV_PCI_DEVICE_ENTRY_enable(x) ((x)->enabled) ++#define DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio(x) ((x)->base_offset_for_mmio) ++#define DRV_PCI_DEVICE_ENTRY_operation(x) ((x)->operation) ++#define DRV_PCI_DEVICE_ENTRY_bar_name(x) ((x)->bar_name) ++#define DRV_PCI_DEVICE_ENTRY_prog_type(x) ((x)->prog_type) ++#define DRV_PCI_DEVICE_ENTRY_config_type(x) ((x)->config_type) ++#define DRV_PCI_DEVICE_ENTRY_bar_shift(x) ((x)->bar_shift) ++#define DRV_PCI_DEVICE_ENTRY_value(x) ((x)->value) ++#define DRV_PCI_DEVICE_ENTRY_mask(x) ((x)->mask) ++#define DRV_PCI_DEVICE_ENTRY_virtual_address(x) ((x)->virtual_address) ++#define DRV_PCI_DEVICE_ENTRY_port_id(x) ((x)->port_id) ++#define DRV_PCI_DEVICE_ENTRY_op_code(x) ((x)->op_code) ++#define DRV_PCI_DEVICE_ENTRY_device_id(x) ((x)->device_id) ++#define DRV_PCI_DEVICE_ENTRY_bar_num(x) ((x)->bar_num) ++#define DRV_PCI_DEVICE_ENTRY_feature_id(x) ((x)->feature_id) ++ ++// *************************************************************************** ++typedef enum { ++ PMU_OPERATION_INITIALIZE = 0, ++ PMU_OPERATION_WRITE, ++ PMU_OPERATION_ENABLE, ++ PMU_OPERATION_DISABLE, ++ PMU_OPERATION_READ, ++ PMU_OPERATION_CLEANUP, ++ PMU_OPERATION_READ_LBRS, ++ PMU_OPERATION_GLOBAL_REGS, ++ PMU_OPERATION_CTRL_GP, ++ PMU_OPERATION_DATA_FIXED, ++ PMU_OPERATION_DATA_GP, ++ PMU_OPERATION_OCR, ++ PMU_OPERATION_HW_ERRATA, ++ PMU_OPERATION_CHECK_OVERFLOW_GP_ERRATA, ++ PMU_OPERATION_CHECK_OVERFLOW_ERRATA, ++ PMU_OPERATION_ALL_REG, ++ PMU_OPERATION_DATA_ALL, ++ PMU_OPERATION_GLOBAL_STATUS, ++ PMU_OPERATION_METRICS, ++} PMU_OPERATION_TYPES; ++#define MAX_OPERATION_TYPES 32 ++ ++/*!\struct PMU_OPERATIONS_NODE ++ * \var operation_type - Type of operation from enumeration PMU_OPERATION_TYPES ++ * \var register_start - Start index of the registers for a specific operation ++ * \var register_len - Number of registers for a specific operation ++ * ++ * \brief ++ * Structure for defining start and end indices in the ECB entries array for ++ * each type of operation performed in the driver ++ * initialize, write, read, enable, disable, etc. ++ */ ++typedef struct PMU_OPERATIONS_NODE_S PMU_OPERATIONS_NODE; ++typedef PMU_OPERATIONS_NODE * PMU_OPERATIONS; ++struct PMU_OPERATIONS_NODE_S { ++ U32 operation_type; ++ U32 register_start; ++ U32 register_len; ++ U32 reserved1; ++ U32 reserved2; ++ U32 reserved3; ++}; ++#define PMU_OPERATIONS_operation_type(x) ((x)->operation_type) ++#define PMU_OPERATIONS_register_start(x) ((x)->register_start) ++#define PMU_OPERATIONS_register_len(x) ((x)->register_len) ++#define PMU_OPER_operation_type(x, i) ((x)[(i)].operation_type) ++#define PMU_OPER_register_start(x, i) ((x)[(i)].register_start) ++#define PMU_OPER_register_len(x, i) ((x)[(i)].register_len) ++ ++typedef enum { ++ ECB_MMIO_BAR1 = 1, ++ ECB_MMIO_BAR2 = 2, ++ ECB_MMIO_BAR3 = 3, ++ ECB_MMIO_BAR4 = 4, ++ ECB_MMIO_BAR5 = 5, ++ ECB_MMIO_BAR6 = 6, ++ ECB_MMIO_BAR7 = 7, ++ ECB_MMIO_BAR8 = 8, ++} MMIO_INDEX_LIST; ++#define MAX_MMIO_BARS 8 ++ ++/*!\struct MMIO_BAR_INFO_NODE ++ */ ++typedef struct MMIO_BAR_INFO_NODE_S MMIO_BAR_INFO_NODE; ++typedef MMIO_BAR_INFO_NODE * MMIO_BAR_INFO; ++ ++struct MMIO_BAR_INFO_NODE_S { ++ U32 bus_no; ++ U32 dev_no; ++ U32 func_no; ++ U32 offset; ++ U32 addr_size; ++ U32 map_size; ++ S8 bar_shift; ++ U8 reserved1; ++ U16 reserved2; ++ U32 reserved3; ++ U32 reserved4; ++ U32 reserved5; ++ U64 bar_mask; ++ U64 base_mmio_offset; ++ U64 physical_address; ++ U64 virtual_address; ++ U64 reserved6; ++ U64 reserved7; ++}; ++ ++/*!\struct ECB_NODE_S ++ * \var num_entries - Total number of entries in "entries". ++ * \var group_id - Group ID. ++ * \var num_events - Number of events in this group. ++ * \var cccr_start - Starting index of counter configuration control registers in "entries". ++ * \var cccr_pop - Number of counter configuration control registers in "entries". ++ * \var escr_start - Starting index of event selection control registers in "entries". ++ * \var escr_pop - Number of event selection control registers in "entries". ++ * \var data_start - Starting index of data registers in "entries". ++ * \var data_pop - Number of data registers in "entries". ++ * \var pcidev_entry_node PCI device details for one device ++ * \var entries - . All the register nodes required for programming ++ * ++ * \brief ++ */ ++ ++typedef struct ECB_NODE_S ECB_NODE; ++typedef ECB_NODE * ECB; ++ ++struct ECB_NODE_S { ++ U8 version; ++ U8 reserved1; ++ U16 reserved2; ++ U32 num_entries; ++ U32 group_id; ++ U32 num_events; ++ U32 cccr_start; ++ U32 cccr_pop; ++ U32 escr_start; ++ U32 escr_pop; ++ U32 data_start; ++ U32 data_pop; ++ U16 flags; ++ U8 pmu_timer_interval; ++ U8 reserved3; ++ U32 size_of_allocation; ++ U32 group_offset; ++ U32 reserved4; ++ DRV_PCI_DEVICE_ENTRY_NODE pcidev_entry_node; ++ U32 num_pci_devices; ++ U32 pcidev_list_offset; ++ DRV_PCI_DEVICE_ENTRY pcidev_entry_list; ++ U32 device_type; ++ U32 dev_node; ++ PMU_OPERATIONS_NODE operations[MAX_OPERATION_TYPES]; ++ U32 descriptor_id; ++ U32 reserved5; ++ U32 metric_start; ++ U32 metric_pop; ++ MMIO_BAR_INFO_NODE mmio_bar_list[MAX_MMIO_BARS]; ++ U64 reserved6; ++ U64 reserved7; ++ U64 reserved8; ++ EVENT_REG_NODE entries[]; ++}; ++ ++// ++// Accessor macros for ECB node ++// ++#define ECB_version(x) ((x)->version) ++#define ECB_num_entries(x) ((x)->num_entries) ++#define ECB_group_id(x) ((x)->group_id) ++#define ECB_num_events(x) ((x)->num_events) ++#define ECB_cccr_start(x) ((x)->cccr_start) ++#define ECB_cccr_pop(x) ((x)->cccr_pop) ++#define ECB_escr_start(x) ((x)->escr_start) ++#define ECB_escr_pop(x) ((x)->escr_pop) ++#define ECB_data_start(x) ((x)->data_start) ++#define ECB_data_pop(x) ((x)->data_pop) ++#define ECB_metric_start(x) ((x)->metric_start) ++#define ECB_metric_pop(x) ((x)->metric_pop) ++#define ECB_pcidev_entry_node(x) ((x)->pcidev_entry_node) ++#define ECB_num_pci_devices(x) ((x)->num_pci_devices) ++#define ECB_pcidev_list_offset(x) ((x)->pcidev_list_offset) ++#define ECB_pcidev_entry_list(x) ((x)->pcidev_entry_list) ++#define ECB_flags(x) ((x)->flags) ++#define ECB_pmu_timer_interval(x) ((x)->pmu_timer_interval) ++#define ECB_size_of_allocation(x) ((x)->size_of_allocation) ++#define ECB_group_offset(x) ((x)->group_offset) ++#define ECB_device_type(x) ((x)->device_type) ++#define ECB_dev_node(x) ((x)->dev_node) ++#define ECB_operations(x) ((x)->operations) ++#define ECB_descriptor_id(x) ((x)->descriptor_id) ++#define ECB_entries(x) ((x)->entries) ++ ++// for flag bit field ++#define ECB_direct2core_bit 0x0001 ++#define ECB_bl_bypass_bit 0x0002 ++#define ECB_pci_id_offset_bit 0x0003 ++#define ECB_pcu_ccst_debug 0x0004 ++ ++#define ECB_VERSION 2 ++ ++#define ECB_CONSTRUCT(x, num_entries, group_id, cccr_start, escr_start, \ ++ data_start, size_of_allocation) \ ++ { \ ++ ECB_num_entries((x)) = (num_entries); \ ++ ECB_group_id((x)) = (group_id); \ ++ ECB_cccr_start((x)) = (cccr_start); \ ++ ECB_cccr_pop((x)) = 0; \ ++ ECB_escr_start((x)) = (escr_start); \ ++ ECB_escr_pop((x)) = 0; \ ++ ECB_data_start((x)) = (data_start); \ ++ ECB_data_pop((x)) = 0; \ ++ ECB_metric_start((x)) = 0; \ ++ ECB_metric_pop((x)) = 0; \ ++ ECB_num_pci_devices((x)) = 0; \ ++ ECB_version((x)) = ECB_VERSION; \ ++ ECB_size_of_allocation((x)) = (size_of_allocation); \ ++ } ++ ++#define ECB_CONSTRUCT2(x, num_entries, group_id, size_of_allocation) \ ++ { \ ++ ECB_num_entries((x)) = (num_entries); \ ++ ECB_group_id((x)) = (group_id); \ ++ ECB_num_pci_devices((x)) = 0; \ ++ ECB_version((x)) = ECB_VERSION; \ ++ ECB_size_of_allocation((x)) = (size_of_allocation); \ ++ } ++ ++ ++#define ECB_CONSTRUCT1(x, num_entries, group_id, cccr_start, escr_start, \ ++ data_start, num_pci_devices, size_of_allocation) \ ++ { \ ++ ECB_num_entries((x)) = (num_entries); \ ++ ECB_group_id((x)) = (group_id); \ ++ ECB_cccr_start((x)) = (cccr_start); \ ++ ECB_cccr_pop((x)) = 0; \ ++ ECB_escr_start((x)) = (escr_start); \ ++ ECB_escr_pop((x)) = 0; \ ++ ECB_data_start((x)) = (data_start); \ ++ ECB_data_pop((x)) = 0; \ ++ ECB_metric_start((x)) = 0; \ ++ ECB_metric_pop((x)) = 0; \ ++ ECB_num_pci_devices((x)) = (num_pci_devices); \ ++ ECB_version((x)) = ECB_VERSION; \ ++ ECB_size_of_allocation((x)) = (size_of_allocation); \ ++ } ++ ++ ++// ++// Accessor macros for ECB node entries ++// ++#define ECB_entries_reg_type(x, i) EVENT_REG_reg_type((ECB_entries(x)), (i)) ++#define ECB_entries_event_id_index(x, i) \ ++ EVENT_REG_event_id_index((ECB_entries(x)), (i)) ++#define ECB_entries_unit_id(x, i) EVENT_REG_unit_id((ECB_entries(x)), (i)) ++#define ECB_entries_counter_event_offset(x, i) \ ++ EVENT_REG_counter_event_offset((ECB_entries(x)), (i)) ++#define ECB_entries_reg_id(x, i) EVENT_REG_reg_id((ECB_entries(x)), (i)) ++#define ECB_entries_reg_prog_type(x, i) \ ++ EVENT_REG_reg_prog_type((ECB_entries(x)), (i)) ++#define ECB_entries_reg_offset(x, i) EVENT_REG_offset((ECB_entries(x)), (i)) ++#define ECB_entries_reg_data_size(x, i) \ ++ EVENT_REG_data_size((ECB_entries(x)), (i)) ++#define ECB_entries_desc_id(x, i) EVENT_REG_desc_id((ECB_entries(x)), i) ++#define ECB_entries_flags(x, i) EVENT_REG_flags((ECB_entries(x)), i) ++#define ECB_entries_reg_order(x, i) EVENT_REG_reg_order((ECB_entries(x)), i) ++#define ECB_entries_reg_value(x, i) EVENT_REG_reg_value((ECB_entries(x)), (i)) ++#define ECB_entries_max_bits(x, i) EVENT_REG_max_bits((ECB_entries(x)), (i)) ++#define ECB_entries_scheduled(x, i) EVENT_REG_scheduled((ECB_entries(x)), (i)) ++#define ECB_entries_counter_event_offset(x, i) \ ++ EVENT_REG_counter_event_offset((ECB_entries(x)), (i)) ++#define ECB_entries_bit_position(x, i) \ ++ EVENT_REG_bit_position((ECB_entries(x)), (i)) ++// PCI config-specific fields ++#define ECB_entries_bus_no(x, i) EVENT_REG_bus_no((ECB_entries(x)), (i)) ++#define ECB_entries_dev_no(x, i) EVENT_REG_dev_no((ECB_entries(x)), (i)) ++#define ECB_entries_func_no(x, i) EVENT_REG_func_no((ECB_entries(x)), (i)) ++#define ECB_entries_counter_type(x, i) \ ++ EVENT_REG_counter_type((ECB_entries(x)), (i)) ++#define ECB_entries_event_scope(x, i) \ ++ EVENT_REG_event_scope((ECB_entries(x)), (i)) ++#define ECB_entries_precise_get(x, i) \ ++ EVENT_REG_precise_get((ECB_entries(x)), (i)) ++#define ECB_entries_global_get(x, i) EVENT_REG_global_get((ECB_entries(x)), (i)) ++#define ECB_entries_uncore_get(x, i) EVENT_REG_uncore_get((ECB_entries(x)), (i)) ++#define ECB_entries_uncore_q_rst_get(x, i) \ ++ EVENT_REG_uncore_q_rst_get((ECB_entries(x)), (i)) ++#define ECB_entries_is_gp_reg_get(x, i) \ ++ EVENT_REG_is_gp_reg_get((ECB_entries(x)), (i)) ++#define ECB_entries_lbr_value_get(x, i) \ ++ EVENT_REG_lbr_value_get((ECB_entries(x)), (i)) ++#define ECB_entries_fixed_reg_get(x, i) \ ++ EVENT_REG_fixed_reg_get((ECB_entries(x)), (i)) ++#define ECB_entries_is_multi_pkg_bit_set(x, i) \ ++ EVENT_REG_multi_pkg_evt_bit_get((ECB_entries(x)), (i)) ++#define ECB_entries_clean_up_get(x, i) \ ++ EVENT_REG_clean_up_get((ECB_entries(x)), (i)) ++#define ECB_entries_em_trigger_get(x, i) \ ++ EVENT_REG_em_trigger_get((ECB_entries(x)), (i)) ++#define ECB_entries_branch_evt_get(x, i) \ ++ EVENT_REG_branch_evt_get((ECB_entries(x)), (i)) ++#define ECB_entries_reg_rw_type(x, i) \ ++ EVENT_REG_reg_rw_type((ECB_entries(x)), (i)) ++#define ECB_entries_secondary_pci_offset_offset(x, i) \ ++ EVENT_REG_secondary_pci_offset_offset((ECB_entries(x)), (i)) ++#define ECB_entries_secondary_pci_offset_shift(x, i) \ ++ EVENT_REG_secondary_pci_offset_shift((ECB_entries(x)), (i)) ++#define ECB_entries_secondary_pci_offset_mask(x, i) \ ++ EVENT_REG_secondary_pci_offset_mask((ECB_entries(x)), (i)) ++#define ECB_operations_operation_type(x, i) \ ++ PMU_OPER_operation_type((ECB_operations(x)), (i)) ++#define ECB_operations_register_start(x, i) \ ++ PMU_OPER_register_start((ECB_operations(x)), (i)) ++#define ECB_operations_register_len(x, i) \ ++ PMU_OPER_register_len((ECB_operations(x)), (i)) ++ ++#define ECB_entries_core_event_id(x, i) \ ++ EVENT_REG_core_event_id((ECB_entries(x)), (i)) ++#define ECB_entries_uncore_buffer_offset_in_package(x, i) \ ++ EVENT_REG_uncore_buffer_offset_in_package((ECB_entries(x)), (i)) ++#define ECB_entries_uncore_buffer_offset_in_system(x, i) \ ++ EVENT_REG_uncore_buffer_offset_in_system((ECB_entries(x)), (i)) ++ ++#define ECB_SET_OPERATIONS(x, operation_type, start, len) \ ++ { \ ++ ECB_operations_operation_type(x, operation_type) = operation_type; \ ++ ECB_operations_register_start(x, operation_type) = start; \ ++ ECB_operations_register_len(x, operation_type) = len; \ ++ } ++ ++ ++// *************************************************************************** ++ ++/*!\struct LBR_ENTRY_NODE_S ++ * \var etype TOS = 0; FROM = 1; TO = 2 ++ * \var type_index ++ * \var reg_id ++ */ ++ ++typedef struct LBR_ENTRY_NODE_S LBR_ENTRY_NODE; ++typedef LBR_ENTRY_NODE * LBR_ENTRY; ++ ++struct LBR_ENTRY_NODE_S { ++ U16 etype; ++ U16 type_index; ++ U32 reg_id; ++}; ++ ++// ++// Accessor macros for LBR entries ++// ++#define LBR_ENTRY_NODE_etype(lentry) ((lentry).etype) ++#define LBR_ENTRY_NODE_type_index(lentry) ((lentry).type_index) ++#define LBR_ENTRY_NODE_reg_id(lentry) ((lentry).reg_id) ++ ++// *************************************************************************** ++ ++/*!\struct LBR_NODE_S ++ * \var num_entries - The number of entries ++ * \var entries - The entries in the list ++ * ++ * \brief Data structure to describe the LBR registers that need to be read ++ * ++ */ ++ ++typedef struct LBR_NODE_S LBR_NODE; ++typedef LBR_NODE * LBR; ++ ++struct LBR_NODE_S { ++ U32 size; ++ U32 num_entries; ++ LBR_ENTRY_NODE entries[]; ++}; ++ ++// ++// Accessor macros for LBR node ++// ++#define LBR_size(lbr) ((lbr)->size) ++#define LBR_num_entries(lbr) ((lbr)->num_entries) ++#define LBR_entries_etype(lbr, idx) ((lbr)->entries[idx].etype) ++#define LBR_entries_type_index(lbr, idx) ((lbr)->entries[idx].type_index) ++#define LBR_entries_reg_id(lbr, idx) ((lbr)->entries[idx].reg_id) ++ ++// *************************************************************************** ++ ++/*!\struct PWR_ENTRY_NODE_S ++ * \var etype none as yet ++ * \var type_index ++ * \var reg_id ++ */ ++ ++typedef struct PWR_ENTRY_NODE_S PWR_ENTRY_NODE; ++typedef PWR_ENTRY_NODE * PWR_ENTRY; ++ ++struct PWR_ENTRY_NODE_S { ++ U16 etype; ++ U16 type_index; ++ U32 reg_id; ++}; ++ ++// ++// Accessor macros for PWR entries ++// ++#define PWR_ENTRY_NODE_etype(lentry) ((lentry).etype) ++#define PWR_ENTRY_NODE_type_index(lentry) ((lentry).type_index) ++#define PWR_ENTRY_NODE_reg_id(lentry) ((lentry).reg_id) ++ ++// *************************************************************************** ++ ++/*!\struct PWR_NODE_S ++ * \var num_entries - The number of entries ++ * \var entries - The entries in the list ++ * ++ * \brief Data structure to describe the PWR registers that need to be read ++ * ++ */ ++ ++typedef struct PWR_NODE_S PWR_NODE; ++typedef PWR_NODE * PWR; ++ ++struct PWR_NODE_S { ++ U32 size; ++ U32 num_entries; ++ PWR_ENTRY_NODE entries[]; ++}; ++ ++// ++// Accessor macros for PWR node ++// ++#define PWR_size(lentry) ((lentry)->size) ++#define PWR_num_entries(lentry) ((lentry)->num_entries) ++#define PWR_entries_etype(lentry, idx) ((lentry)->entries[idx].etype) ++#define PWR_entries_type_index(lentry, idx) ((lentry)->entries[idx].type_index) ++#define PWR_entries_reg_id(lentry, idx) ((lentry)->entries[idx].reg_id) ++ ++// *************************************************************************** ++ ++/*!\struct RO_ENTRY_NODE_S ++ * \var type - DEAR, IEAR, BTB. ++ */ ++ ++typedef struct RO_ENTRY_NODE_S RO_ENTRY_NODE; ++typedef RO_ENTRY_NODE * RO_ENTRY; ++ ++struct RO_ENTRY_NODE_S { ++ U32 reg_id; ++}; ++ ++// ++// Accessor macros for RO entries ++// ++#define RO_ENTRY_NODE_reg_id(lentry) ((lentry).reg_id) ++ ++// *************************************************************************** ++ ++/*!\struct RO_NODE_S ++ * \var size - The total size including header and entries. ++ * \var num_entries - The number of entries. ++ * \var entries - The entries in the list. ++ * ++ * \brief Data structure to describe the RO registers that need to be read. ++ * ++ */ ++ ++typedef struct RO_NODE_S RO_NODE; ++typedef RO_NODE * RO; ++ ++struct RO_NODE_S { ++ U32 size; ++ U32 num_entries; ++ RO_ENTRY_NODE entries[]; ++}; ++ ++// ++// Accessor macros for RO node ++// ++#define RO_size(ro) ((ro)->size) ++#define RO_num_entries(ro) ((ro)->num_entries) ++#define RO_entries_reg_id(ro, idx) ((ro)->entries[idx].reg_id) ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif +diff --git a/drivers/platform/x86/socperf/include/lwpmudrv_ioctl.h b/drivers/platform/x86/socperf/include/lwpmudrv_ioctl.h +new file mode 100644 +index 000000000000..0b1ee130c8b5 +--- /dev/null ++++ b/drivers/platform/x86/socperf/include/lwpmudrv_ioctl.h +@@ -0,0 +1,343 @@ ++/* *********************************************************************************************** ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(C) 2007-2019 Intel Corporation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * BSD LICENSE ++ * ++ * Copyright(C) 2007-2019 Intel Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * *********************************************************************************************** ++ */ ++ ++ ++#ifndef _LWPMUDRV_IOCTL_H_ ++#define _LWPMUDRV_IOCTL_H_ ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++//SEP Driver Operation defines ++// ++#define DRV_OPERATION_START 1 ++#define DRV_OPERATION_STOP 2 ++#define DRV_OPERATION_INIT_PMU 3 ++#define DRV_OPERATION_GET_NORMALIZED_TSC 4 ++#define DRV_OPERATION_TSC_SKEW_INFO 5 ++#define DRV_OPERATION_PAUSE 6 ++#define DRV_OPERATION_RESUME 7 ++#define DRV_OPERATION_TERMINATE 8 ++#define DRV_OPERATION_RESERVE 9 ++#define DRV_OPERATION_VERSION 10 ++#define DRV_OPERATION_SWITCH_GROUP 11 ++#define DRV_OPERATION_GET_DRIVER_STATE 12 ++#define DRV_OPERATION_INIT_UNCORE 13 ++#define DRV_OPERATION_EM_GROUPS_UNCORE 14 ++#define DRV_OPERATION_EM_CONFIG_NEXT_UNCORE 15 ++#define DRV_OPERATION_READ_UNCORE_DATA 16 ++#define DRV_OPERATION_STOP_MEM 17 ++#define DRV_OPERATION_CREATE_MEM 18 ++#define DRV_OPERATION_READ_MEM 19 ++#define DRV_OPERATION_CHECK_STATUS 20 ++#define DRV_OPERATION_TIMER_TRIGGER_READ 21 ++#define DRV_OPERATION_INIT_DRIVER 22 ++ ++// IOCTL_SETUP ++// ++ ++#if defined(DRV_OS_WINDOWS) ++ ++// ++// NtDeviceIoControlFile IoControlCode values for this device. ++// ++// Warning: Remember that the low two bits of the code specify how the ++// buffers are passed to the driver! ++// ++// 16 bit device type. 12 bit function codes ++#define LWPMUDRV_IOCTL_DEVICE_TYPE \ ++ 0xA000 // values 0-32768 reserved for Microsoft ++#define LWPMUDRV_IOCTL_FUNCTION 0x0A00 // values 0-2047 reserved for Microsoft ++ ++// ++// Basic CTL CODE macro to reduce typographical errors ++// Use for FILE_READ_ACCESS ++// ++#define LWPMUDRV_CTL_READ_CODE(x) \ ++ CTL_CODE(LWPMUDRV_IOCTL_DEVICE_TYPE, LWPMUDRV_IOCTL_FUNCTION + (x), \ ++ METHOD_BUFFERED, FILE_READ_ACCESS) ++ ++#define LWPMUDRV_IOCTL_START LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_START) ++#define LWPMUDRV_IOCTL_STOP LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_STOP) ++#define LWPMUDRV_IOCTL_INIT_PMU LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_INIT_PMU) ++#define LWPMUDRV_IOCTL_GET_NORMALIZED_TSC \ ++ LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_GET_NORMALIZED_TSC) ++#define LWPMUDRV_IOCTL_TSC_SKEW_INFO \ ++ LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_TSC_SKEW_INFO) ++#define LWPMUDRV_IOCTL_PAUSE LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_PAUSE) ++#define LWPMUDRV_IOCTL_RESUME LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_RESUME) ++#define LWPMUDRV_IOCTL_TERMINATE LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_TERMINATE) ++#define LWPMUDRV_IOCTL_RESERVE LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_RESERVE) ++#define LWPMUDRV_IOCTL_VERSION LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_VERSION) ++#define LWPMUDRV_IOCTL_SWITCH_GROUP \ ++ LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_SWITCH_GROUP) ++#define LWPMUDRV_IOCTL_GET_DRIVER_STATE \ ++ LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_GET_DRIVER_STATE) ++#define LWPMUDRV_IOCTL_INIT_UNCORE \ ++ LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_INIT_UNCORE) ++#define LWPMUDRV_IOCTL_EM_GROUPS_UNCORE \ ++ LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_EM_GROUPS_UNCORE) ++#define LWPMUDRV_IOCTL_EM_CONFIG_NEXT_UNCORE \ ++ LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_EM_CONFIG_NEXT_UNCORE) ++#define LWPMUDRV_IOCTL_READ_UNCORE_DATA \ ++ LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_READ_UNCORE_DATA) ++#define LWPMUDRV_IOCTL_STOP_MEM LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_STOP_MEM) ++#define LWPMUDRV_IOCTL_CREATE_MEM \ ++ LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_CREATE_MEM) ++#define LWPMUDRV_IOCTL_READ_MEM LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_READ_MEM) ++#define LWPMUDRV_IOCTL_CHECK_STATUS \ ++ LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_CHECK_STATUS) ++#define LWPMUDRV_IOCTL_TIMER_TRIGGER_READ \ ++ LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_TIMER_TRIGGER_READ) ++#define LWPMUDRV_IOCTL_INIT_DRIVER \ ++ LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_INIT_DRIVER) ++ ++#elif defined(DRV_OS_LINUX) || defined(DRV_OS_SOLARIS) || \ ++ defined(DRV_OS_ANDROID) ++// IOCTL_ARGS ++typedef struct IOCTL_ARGS_NODE_S IOCTL_ARGS_NODE; ++typedef IOCTL_ARGS_NODE * IOCTL_ARGS; ++struct IOCTL_ARGS_NODE_S { ++ U64 len_drv_to_usr; ++ U64 len_usr_to_drv; ++ char *buf_drv_to_usr; ++ char *buf_usr_to_drv; ++}; ++ ++// COMPAT IOCTL_ARGS ++#if defined(CONFIG_COMPAT) && defined(DRV_EM64T) ++typedef struct IOCTL_COMPAT_ARGS_NODE_S IOCTL_COMPAT_ARGS_NODE; ++typedef IOCTL_COMPAT_ARGS_NODE * IOCTL_COMPAT_ARGS; ++struct IOCTL_COMPAT_ARGS_NODE_S { ++ U64 len_drv_to_usr; ++ U64 len_usr_to_drv; ++ compat_uptr_t buf_drv_to_usr; ++ compat_uptr_t buf_usr_to_drv; ++}; ++#endif ++ ++#define LWPMU_IOC_MAGIC 99 ++ ++// IOCTL_SETUP ++// ++#define LWPMUDRV_IOCTL_START _IO(LWPMU_IOC_MAGIC, DRV_OPERATION_START) ++#define LWPMUDRV_IOCTL_STOP _IO(LWPMU_IOC_MAGIC, DRV_OPERATION_STOP) ++#define LWPMUDRV_IOCTL_INIT_PMU \ ++ _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_INIT_PMU, IOCTL_ARGS) ++#define LWPMUDRV_IOCTL_GET_NORMALIZED_TSC \ ++ _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_GET_NORMALIZED_TSC, int) ++#define LWPMUDRV_IOCTL_TSC_SKEW_INFO \ ++ _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_TSC_SKEW_INFO, IOCTL_ARGS) ++#define LWPMUDRV_IOCTL_PAUSE _IO(LWPMU_IOC_MAGIC, DRV_OPERATION_PAUSE) ++#define LWPMUDRV_IOCTL_RESUME _IO(LWPMU_IOC_MAGIC, DRV_OPERATION_RESUME) ++#define LWPMUDRV_IOCTL_TERMINATE _IO(LWPMU_IOC_MAGIC, DRV_OPERATION_TERMINATE) ++#define LWPMUDRV_IOCTL_RESERVE \ ++ _IOR(LWPMU_IOC_MAGIC, DRV_OPERATION_RESERVE, IOCTL_ARGS) ++#define LWPMUDRV_IOCTL_VERSION \ ++ _IOR(LWPMU_IOC_MAGIC, DRV_OPERATION_VERSION, IOCTL_ARGS) ++#define LWPMUDRV_IOCTL_SWITCH_GROUP \ ++ _IO(LWPMU_IOC_MAGIC, DRV_OPERATION_SWITCH_GROUP) ++#define LWPMUDRV_IOCTL_GET_DRIVER_STATE \ ++ _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_GET_DRIVER_STATE, IOCTL_ARGS) ++#define LWPMUDRV_IOCTL_INIT_UNCORE \ ++ _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_INIT_UNCORE, IOCTL_ARGS) ++#define LWPMUDRV_IOCTL_EM_GROUPS_UNCORE \ ++ _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_EM_GROUPS_UNCORE, IOCTL_ARGS) ++#define LWPMUDRV_IOCTL_EM_CONFIG_NEXT_UNCORE \ ++ _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_EM_CONFIG_NEXT_UNCORE, IOCTL_ARGS) ++#define LWPMUDRV_IOCTL_READ_UNCORE_DATA \ ++ _IOR(LWPMU_IOC_MAGIC, DRV_OPERATION_READ_UNCORE_DATA, IOCTL_ARGS) ++#define LWPMUDRV_IOCTL_STOP_MEM _IO(LWPMU_IOC_MAGIC, DRV_OPERATION_STOP_MEM) ++#define LWPMUDRV_IOCTL_CREATE_MEM \ ++ _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_CREATE_MEM, IOCTL_ARGS) ++#define LWPMUDRV_IOCTL_READ_MEM \ ++ _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_READ_MEM, IOCTL_ARGS) ++#define LWPMUDRV_IOCTL_CHECK_STATUS \ ++ _IOR(LWPMU_IOC_MAGIC, DRV_OPERATION_CHECK_STATUS, IOCTL_ARGS) ++#define LWPMUDRV_IOCTL_TIMER_TRIGGER_READ \ ++ _IO(LWPMU_IOC_MAGIC, DRV_OPERATION_TIMER_TRIGGER_READ) ++#define LWPMUDRV_IOCTL_INIT_DRIVER \ ++ _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_INIT_DRIVER, IOCTL_ARGS) ++ ++#elif defined(DRV_OS_FREEBSD) ++ ++// IOCTL_ARGS ++typedef struct IOCTL_ARGS_NODE_S IOCTL_ARGS_NODE; ++typedef IOCTL_ARGS_NODE * IOCTL_ARGS; ++struct IOCTL_ARGS_NODE_S { ++ U64 len_drv_to_usr; ++ char *buf_drv_to_usr; ++ U64 len_usr_to_drv; ++ char *buf_usr_to_drv; ++}; ++ ++// IOCTL_SETUP ++// ++#define LWPMU_IOC_MAGIC 99 ++ ++/* FreeBSD is very strict about IOR/IOW/IOWR specifications on IOCTLs. ++ * Since these IOCTLs all pass down the real read/write buffer lengths ++ * and addresses inside of an IOCTL_ARGS_NODE data structure, we ++ * need to specify all of these as _IOW so that the kernel will ++ * view it as userspace passing the data to the driver, rather than ++ * the reverse. There are also some cases where Linux is passing ++ * a smaller type than IOCTL_ARGS_NODE, even though its really ++ * passing an IOCTL_ARGS_NODE. These needed to be fixed for FreeBSD. ++ */ ++#define LWPMUDRV_IOCTL_START _IO(LWPMU_IOC_MAGIC, DRV_OPERATION_START) ++#define LWPMUDRV_IOCTL_STOP _IO(LWPMU_IOC_MAGIC, DRV_OPERATION_STOP) ++#define LWPMUDRV_IOCTL_INIT_PMU _IO(LWPMU_IOC_MAGIC, DRV_OPERATION_INIT_PMU) ++#define LWPMUDRV_IOCTL_GET_NORMALIZED_TSC \ ++ _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_GET_NORMALIZED_TSC, IOCTL_ARGS_NODE) ++#define LWPMUDRV_IOCTL_TSC_SKEW_INFO \ ++ _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_TSC_SKEW_INFO, IOCTL_ARGS_NODE) ++#define LWPMUDRV_IOCTL_PAUSE _IO(LWPMU_IOC_MAGIC, DRV_OPERATION_PAUSE) ++#define LWPMUDRV_IOCTL_RESUME _IO(LWPMU_IOC_MAGIC, DRV_OPERATION_RESUME) ++#define LWPMUDRV_IOCTL_TERMINATE _IO(LWPMU_IOC_MAGIC, DRV_OPERATION_TERMINATE) ++#define LWPMUDRV_IOCTL_RESERVE \ ++ _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_RESERVE, IOCTL_ARGS_NODE) ++#define LWPMUDRV_IOCTL_VERSION \ ++ _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_VERSION, IOCTL_ARGS_NODE) ++#define LWPMUDRV_IOCTL_SWITCH_GROUP \ ++ _IO(LWPMU_IOC_MAGIC, DRV_OPERATION_SWITCH_GROUP) ++#define LWPMUDRV_IOCTL_GET_DRIVER_STATE \ ++ _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_GET_DRIVER_STATE, IOCTL_ARGS_NODE) ++#define LWPMUDRV_IOCTL_INIT_UNCORE \ ++ _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_INIT_UNCORE, IOCTL_ARGS) ++#define LWPMUDRV_IOCTL_EM_GROUPS_UNCORE \ ++ _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_EM_GROUPS_UNCORE, IOCTL_ARGS) ++#define LWPMUDRV_IOCTL_EM_CONFIG_NEXT_UNCORE \ ++ _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_EM_CONFIG_NEXT_UNCORE, IOCTL_ARGS) ++#define LWPMUDRV_IOCTL_READ_UNCORE_DATA \ ++ _IOR(LWPMU_IOC_MAGIC, DRV_OPERATION_READ_UNCORE_DATA, IOCTL_ARGS) ++#define LWPMUDRV_IOCTL_STOP_MEM _IO(LWPMU_IOC_MAGIC, DRV_OPERATION_STOP_MEM) ++#define LWPMUDRV_IOCTL_CREATE_MEM \ ++ _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_CREATE_MEM, IOCTL_ARGS_NODE) ++#define LWPMUDRV_IOCTL_READ_MEM \ ++ _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_READ_MEM, IOCTL_ARGS_NODE) ++#define LWPMUDRV_IOCTL_CHECK_STATUS \ ++ _IOR(LWPMU_IOC_MAGIC, DRV_OPERATION_CHECK_STATUS, IOCTL_ARGS_NODE) ++#define LWPMUDRV_IOCTL_TIMER_TRIGGER_READ \ ++ _IO(LWPMU_IOC_MAGIC, DRV_OPERATION_TIMER_TRIGGER_READ) ++#define LWPMUDRV_IOCTL_INIT_DRIVER \ ++ _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_INIT_DRIVER, IOCTL_ARGS) ++ ++#elif defined(DRV_OS_MAC) ++ ++// IOCTL_ARGS ++typedef struct IOCTL_ARGS_NODE_S IOCTL_ARGS_NODE; ++typedef IOCTL_ARGS_NODE * IOCTL_ARGS; ++struct IOCTL_ARGS_NODE_S { ++ U64 len_drv_to_usr; ++ char *buf_drv_to_usr; ++ U64 len_usr_to_drv; ++ char *buf_usr_to_drv; ++ U32 command; ++}; ++ ++typedef struct CPU_ARGS_NODE_S CPU_ARGS_NODE; ++typedef CPU_ARGS_NODE * CPU_ARGS; ++struct CPU_ARGS_NODE_S { ++ U64 len_drv_to_usr; ++ char *buf_drv_to_usr; ++ U32 command; ++ U32 CPU_ID; ++ U32 BUCKET_ID; ++}; ++ ++// IOCTL_SETUP ++// ++#define LWPMU_IOC_MAGIC 99 ++#define OS_SUCCESS 0 ++#define OS_STATUS int ++#define OS_ILLEGAL_IOCTL -ENOTTY ++#define OS_NO_MEM -ENOMEM ++#define OS_FAULT -EFAULT ++ ++// Task file Opcodes. ++// keeping the definitions as IOCTL but in MAC OSX ++// these are really OpCodes consumed by Execute command. ++#define LWPMUDRV_IOCTL_START DRV_OPERATION_START ++#define LWPMUDRV_IOCTL_STOP DRV_OPERATION_STOP ++#define LWPMUDRV_IOCTL_INIT_PMU DRV_OPERATION_INIT_PMU ++#define LWPMUDRV_IOCTL_GET_NORMALIZED_TSC DRV_OPERATION_GET_NORMALIZED_TSC ++#define LWPMUDRV_IOCTL_TSC_SKEW_INFO DRV_OPERATION_TSC_SKEW_INFO ++#define LWPMUDRV_IOCTL_PAUSE DRV_OPERATION_PAUSE ++#define LWPMUDRV_IOCTL_RESUME DRV_OPERATION_RESUME ++#define LWPMUDRV_IOCTL_TERMINATE DRV_OPERATION_TERMINATE ++#define LWPMUDRV_IOCTL_RESERVE DRV_OPERATION_RESERVE ++#define LWPMUDRV_IOCTL_VERSION DRV_OPERATION_VERSION ++#define LWPMUDRV_IOCTL_SWITCH_GROUP DRV_OPERATION_SWITCH_GROUP ++#define LWPMUDRV_IOCTL_GET_DRIVER_STATE DRV_OPERATION_GET_DRIVER_STATE ++#define LWPMUDRV_IOCTL_INIT_UNCORE DRV_OPERATION_INIT_UNCORE ++#define LWPMUDRV_IOCTL_EM_GROUPS_UNCORE DRV_OPERATION_EM_GROUPS_UNCORE ++#define LWPMUDRV_IOCTL_EM_CONFIG_NEXT_UNCORE DRV_OPERATION_EM_CONFIG_NEXT_UNCORE ++#define LWPMUDRV_IOCTL_READ_UNCORE_DATA DRV_OPERATION_READ_UNCORE_DATA ++#define LWPMUDRV_IOCTL_STOP_MEM DRV_OPERATION_STOP_MEM ++#define LWPMUDRV_IOCTL_CREATE_MEM DRV_OPERATION_CREATE_MEM ++#define LWPMUDRV_IOCTL_READ_MEM DRV_OPERATION_READ_MEM ++#define LWPMUDRV_IOCTL_CHECK_STATUS DRV_OPERATION_CHECK_STATUS ++#define LWPMUDRV_IOCTL_TIMER_TRIGGER_READ DRV_OPERATION_TIMER_TRIGGER_READ ++#define LWPMUDRV_IOCTL_INIT_DRIVER DRV_OPERATION_INIT_DRIVER ++ ++// This is only for MAC OSX ++#define LWPMUDRV_IOCTL_SET_OSX_VERSION 998 ++#define LWPMUDRV_IOCTL_PROVIDE_FUNCTION_PTRS 999 ++ ++#else ++#error "unknown OS in lwpmudrv_ioctl.h" ++#endif ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif +diff --git a/drivers/platform/x86/socperf/include/lwpmudrv_struct.h b/drivers/platform/x86/socperf/include/lwpmudrv_struct.h +new file mode 100644 +index 000000000000..1966e6282149 +--- /dev/null ++++ b/drivers/platform/x86/socperf/include/lwpmudrv_struct.h +@@ -0,0 +1,2014 @@ ++/*** ++ * ------------------------------------------------------------------------- ++ * INTEL CORPORATION PROPRIETARY INFORMATION ++ * This software is supplied under the terms of the accompanying license ++ * agreement or nondisclosure agreement with Intel Corporation and may not ++ * be copied or disclosed except in accordance with the terms of that ++ * agreement. ++ * Copyright(C) 2007-2019 Intel Corporation. All Rights Reserved. ++ * ------------------------------------------------------------------------- ++***/ ++ ++#ifndef _LWPMUDRV_STRUCT_UTILS_H_ ++#define _LWPMUDRV_STRUCT_UTILS_H_ ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++// processor execution modes ++#define MODE_UNKNOWN 99 ++// the following defines must start at 0 ++#define MODE_64BIT 3 ++#define MODE_32BIT 2 ++#define MODE_16BIT 1 ++#define MODE_V86 0 ++ ++// sampling methods ++#define SM_RTC 2020 // real time clock ++#define SM_VTD 2021 // OS Virtual Timer Device ++#define SM_NMI 2022 // non-maskable interrupt time based ++#define SM_EBS 2023 // event based sampling ++#define SM_EBC 2024 // event based counting ++ ++// sampling mechanism bitmap definitions ++#define INTERRUPT_RTC 0x1 ++#define INTERRUPT_VTD 0x2 ++#define INTERRUPT_NMI 0x4 ++#define INTERRUPT_EBS 0x8 ++ ++// Device types ++#define DEV_CORE 0x01 ++#define DEV_UNC 0x02 ++ ++// eflags defines ++#define EFLAGS_VM 0x00020000 // V86 mode ++#define EFLAGS_IOPL0 0 ++#define EFLAGS_IOPL1 0x00001000 ++#define EFLAGS_IOPL2 0x00002000 ++#define EFLAGS_IOPL3 0x00003000 ++#define MAX_EMON_GROUPS 1000 ++#define MAX_PCI_BUSNO 256 ++#define MAX_DEVICES 30 ++#define MAX_REGS 64 ++#define MAX_EMON_GROUPS 1000 ++#define MAX_PCI_DEVNO 32 ++#define MAX_PCI_FUNCNO 8 ++#define MAX_PCI_DEVUNIT 16 ++#define MAX_TURBO_VALUES 32 ++#define REG_BIT_MASK 0xFFFFFFFFFFFFFFFFULL ++ ++extern float freq_multiplier; ++ ++// Enumeration for invoking dispatch on multiple cpus or not ++typedef enum { DRV_MULTIPLE_INSTANCE = 0, DRV_SINGLE_INSTANCE } DRV_PROG_TYPE; ++ ++typedef struct DRV_CONFIG_NODE_S DRV_CONFIG_NODE; ++typedef DRV_CONFIG_NODE * DRV_CONFIG; ++ ++struct DRV_CONFIG_NODE_S { ++ U32 size; ++ U16 version; ++ U16 reserved1; ++ U32 num_events; ++ U32 num_chipset_events; ++ U32 chipset_offset; ++ S32 seed_name_len; ++ union { ++ S8 *seed_name; ++ U64 dummy1; ++ } u1; ++ union { ++ S8 *cpu_mask; ++ U64 dummy2; ++ } u2; ++ union { ++ U64 collection_config; ++ struct { ++ U64 start_paused : 1; ++ U64 counting_mode : 1; ++ U64 enable_chipset : 1; ++ U64 enable_gfx : 1; ++ U64 enable_pwr : 1; ++ U64 emon_mode : 1; ++ U64 debug_inject : 1; ++ U64 virt_phys_translation : 1; ++ U64 enable_p_state : 1; ++ U64 enable_cp_mode : 1; ++ U64 read_pstate_msrs : 1; ++ U64 use_pcl : 1; ++ U64 enable_ebc : 1; ++ U64 enable_tbc : 1; ++ U64 ds_area_available : 1; ++ U64 per_cpu_tsc : 1; ++ U64 reserved_field1 : 48; ++ } s1; ++ } u3; ++ U64 target_pid; ++ U32 os_of_interest; ++ U16 unc_timer_interval; ++ U16 unc_em_factor; ++ S32 p_state_trigger_index; ++ DRV_BOOL multi_pebs_enabled; ++ U32 reserved2; ++ U32 reserved3; ++ U64 reserved4; ++ U64 reserved5; ++ U64 reserved6; ++}; ++ ++#define DRV_CONFIG_size(cfg) ((cfg)->size) ++#define DRV_CONFIG_version(cfg) ((cfg)->version) ++#define DRV_CONFIG_num_events(cfg) ((cfg)->num_events) ++#define DRV_CONFIG_num_chipset_events(cfg) ((cfg)->num_chipset_events) ++#define DRV_CONFIG_chipset_offset(cfg) ((cfg)->chipset_offset) ++ ++#define DRV_CONFIG_seed_name(cfg) ((cfg)->u1.seed_name) ++#define DRV_CONFIG_seed_name_len(cfg) ((cfg)->seed_name_len) ++#define DRV_CONFIG_cpu_mask(cfg) ((cfg)->u2.cpu_mask) ++#define DRV_CONFIG_start_paused(cfg) ((cfg)->u3.s1.start_paused) ++#define DRV_CONFIG_counting_mode(cfg) ((cfg)->u3.s1.counting_mode) ++#define DRV_CONFIG_enable_chipset(cfg) ((cfg)->u3.s1.enable_chipset) ++#define DRV_CONFIG_enable_gfx(cfg) ((cfg)->u3.s1.enable_gfx) ++#define DRV_CONFIG_enable_pwr(cfg) ((cfg)->u3.s1.enable_pwr) ++#define DRV_CONFIG_emon_mode(cfg) ((cfg)->u3.s1.emon_mode) ++#define DRV_CONFIG_debug_inject(cfg) ((cfg)->u3.s1.debug_inject) ++#define DRV_CONFIG_virt_phys_translation(cfg) ((cfg)->u3.s1.virt_phys_translation) ++#define DRV_CONFIG_enable_p_state(cfg) ((cfg)->u3.s1.enable_p_state) ++#define DRV_CONFIG_enable_cp_mode(cfg) ((cfg)->u3.s1.enable_cp_mode) ++#define DRV_CONFIG_read_pstate_msrs(cfg) ((cfg)->u3.s1.read_pstate_msrs) ++#define DRV_CONFIG_use_pcl(cfg) ((cfg)->u3.s1.use_pcl) ++#define DRV_CONFIG_event_based_counts(cfg) ((cfg)->u3.s1.enable_ebc) ++#define DRV_CONFIG_timer_based_counts(cfg) ((cfg)->u3.s1.enable_tbc) ++#define DRV_CONFIG_ds_area_available(cfg) ((cfg)->u3.s1.ds_area_available) ++#define DRV_CONFIG_per_cpu_tsc(cfg) ((cfg)->u3.s1.per_cpu_tsc) ++#define DRV_CONFIG_target_pid(cfg) ((cfg)->target_pid) ++#define DRV_CONFIG_os_of_interest(cfg) ((cfg)->os_of_interest) ++#define DRV_CONFIG_unc_timer_interval(cfg) ((cfg)->unc_timer_interval) ++#define DRV_CONFIG_unc_em_factor(cfg) ((cfg)->unc_em_factor) ++#define DRV_CONFIG_p_state_trigger_index(cfg) ((cfg)->p_state_trigger_index) ++#define DRV_CONFIG_multi_pebs_enabled(cfg) ((cfg)->multi_pebs_enabled) ++ ++#define DRV_CONFIG_VERSION 1 ++ ++typedef struct DEV_CONFIG_NODE_S DEV_CONFIG_NODE; ++typedef DEV_CONFIG_NODE * DEV_CONFIG; ++ ++struct DEV_CONFIG_NODE_S { ++ U16 size; ++ U16 version; ++ U32 dispatch_id; ++ U32 pebs_mode; ++ U32 pebs_record_num; ++ U32 results_offset; // this is to store the offset for this device's results ++ U32 max_gp_counters; ++ U32 device_type; ++ U32 core_type; ++ union { ++ U64 enable_bit_fields; ++ struct { ++ U64 pebs_capture : 1; ++ U64 collect_lbrs : 1; ++ U64 collect_callstacks : 1; ++ U64 collect_kernel_callstacks : 1; ++ U64 latency_capture : 1; ++ U64 power_capture : 1; ++ U64 htoff_mode : 1; ++ U64 eventing_ip_capture : 1; ++ U64 hle_capture : 1; ++ U64 precise_ip_lbrs : 1; ++ U64 store_lbrs : 1; ++ U64 tsc_capture : 1; ++ U64 enable_perf_metrics : 1; ++ U64 enable_adaptive_pebs : 1; ++ U64 apebs_collect_mem_info : 1; ++ U64 apebs_collect_gpr : 1; ++ U64 apebs_collect_xmm : 1; ++ U64 apebs_collect_lbrs : 1; ++ U64 collect_fixed_counter_pebs : 1; ++ U64 collect_os_callstacks : 1; ++ U64 reserved_field1 : 44; ++ } s1; ++ } u1; ++ U32 emon_unc_offset[MAX_EMON_GROUPS]; ++ U32 ebc_group_id_offset; ++ U8 num_perf_metrics; ++ U8 apebs_num_lbr_entries; ++ U16 emon_perf_metrics_offset; ++ U32 device_scope; ++ U32 reserved1; ++ U64 reserved2; ++ U64 reserved3; ++ U64 reserved4; ++}; ++ ++#define DEV_CONFIG_dispatch_id(cfg) ((cfg)->dispatch_id) ++#define DEV_CONFIG_pebs_mode(cfg) ((cfg)->pebs_mode) ++#define DEV_CONFIG_pebs_record_num(cfg) ((cfg)->pebs_record_num) ++#define DEV_CONFIG_results_offset(cfg) ((cfg)->results_offset) ++#define DEV_CONFIG_max_gp_counters(cfg) ((cfg)->max_gp_counters) ++ ++#define DEV_CONFIG_device_type(cfg) ((cfg)->device_type) ++#define DEV_CONFIG_core_type(cfg) ((cfg)->core_type) ++ ++#define DEV_CONFIG_pebs_capture(cfg) ((cfg)->u1.s1.pebs_capture) ++#define DEV_CONFIG_collect_lbrs(cfg) ((cfg)->u1.s1.collect_lbrs) ++#define DEV_CONFIG_collect_callstacks(cfg) ((cfg)->u1.s1.collect_callstacks) ++#define DEV_CONFIG_collect_kernel_callstacks(cfg) \ ++ ((cfg)->u1.s1.collect_kernel_callstacks) ++#define DEV_CONFIG_latency_capture(cfg) ((cfg)->u1.s1.latency_capture) ++#define DEV_CONFIG_power_capture(cfg) ((cfg)->u1.s1.power_capture) ++#define DEV_CONFIG_htoff_mode(cfg) ((cfg)->u1.s1.htoff_mode) ++#define DEV_CONFIG_eventing_ip_capture(cfg) ((cfg)->u1.s1.eventing_ip_capture) ++#define DEV_CONFIG_hle_capture(cfg) ((cfg)->u1.s1.hle_capture) ++#define DEV_CONFIG_precise_ip_lbrs(cfg) ((cfg)->u1.s1.precise_ip_lbrs) ++#define DEV_CONFIG_store_lbrs(cfg) ((cfg)->u1.s1.store_lbrs) ++#define DEV_CONFIG_tsc_capture(cfg) ((cfg)->u1.s1.tsc_capture) ++#define DEV_CONFIG_enable_perf_metrics(cfg) ((cfg)->u1.s1.enable_perf_metrics) ++#define DEV_CONFIG_enable_adaptive_pebs(cfg) ((cfg)->u1.s1.enable_adaptive_pebs) ++#define DEV_CONFIG_apebs_collect_mem_info(cfg) \ ++ ((cfg)->u1.s1.apebs_collect_mem_info) ++#define DEV_CONFIG_apebs_collect_gpr(cfg) ((cfg)->u1.s1.apebs_collect_gpr) ++#define DEV_CONFIG_apebs_collect_xmm(cfg) ((cfg)->u1.s1.apebs_collect_xmm) ++#define DEV_CONFIG_apebs_collect_lbrs(cfg) ((cfg)->u1.s1.apebs_collect_lbrs) ++#define DEV_CONFIG_collect_fixed_counter_pebs(cfg) \ ++ ((cfg)->u1.s1.collect_fixed_counter_pebs) ++#define DEV_CONFIG_collect_os_callstacks(cfg) ((cfg)->u1.s1.collect_os_callstacks) ++#define DEV_CONFIG_enable_bit_fields(cfg) ((cfg)->u1.enable_bit_fields) ++#define DEV_CONFIG_emon_unc_offset(cfg, grp_num) ((cfg)->emon_unc_offset[grp_num]) ++#define DEV_CONFIG_ebc_group_id_offset(cfg) ((cfg)->ebc_group_id_offset) ++#define DEV_CONFIG_num_perf_metrics(cfg) ((cfg)->num_perf_metrics) ++#define DEV_CONFIG_apebs_num_lbr_entries(cfg) ((cfg)->apebs_num_lbr_entries) ++#define DEV_CONFIG_emon_perf_metrics_offset(cfg) ((cfg)->emon_perf_metrics_offset) ++#define DEV_CONFIG_device_scope(cfg) ((cfg)->device_scope) ++ ++typedef struct DEV_UNC_CONFIG_NODE_S DEV_UNC_CONFIG_NODE; ++typedef DEV_UNC_CONFIG_NODE * DEV_UNC_CONFIG; ++ ++struct DEV_UNC_CONFIG_NODE_S { ++ U16 size; ++ U16 version; ++ U32 dispatch_id; ++ U32 results_offset; ++ U32 device_type; ++ U32 device_scope; ++ U32 reserved1; ++ U32 emon_unc_offset[MAX_EMON_GROUPS]; ++ U64 reserved2; ++ U64 reserved3; ++ U64 reserved4; ++}; ++ ++#define DEV_UNC_CONFIG_dispatch_id(cfg) ((cfg)->dispatch_id) ++#define DEV_UNC_CONFIG_results_offset(cfg) ((cfg)->results_offset) ++#define DEV_UNC_CONFIG_emon_unc_offset(cfg, grp_num) \ ++ ((cfg)->emon_unc_offset[grp_num]) ++#define DEV_UNC_CONFIG_device_type(cfg) ((cfg)->device_type) ++#define DEV_UNC_CONFIG_device_scope(cfg) ((cfg)->device_scope) ++ ++/* ++ * X86 processor code descriptor ++ */ ++typedef struct CodeDescriptor_s { ++ union { ++ U32 lowWord; // low dword of descriptor ++ struct { // low broken out by fields ++ U16 limitLow; // segment limit 15:00 ++ U16 baseLow; // segment base 15:00 ++ } s1; ++ } u1; ++ union { ++ U32 highWord; // high word of descriptor ++ struct { // high broken out by bit fields ++ U32 baseMid : 8; // base 23:16 ++ U32 accessed : 1; // accessed ++ U32 readable : 1; // readable ++ U32 conforming : 1; // conforming code segment ++ U32 oneOne : 2; // always 11 ++ U32 dpl : 2; // Dpl ++ U32 pres : 1; // present bit ++ U32 limitHi : 4; // limit 19:16 ++ U32 sys : 1; // available for use by system ++ U32 reserved_0 : 1; // reserved, always 0 ++ U32 default_size : 1; // default operation size (1=32bit, 0=16bit) ++ U32 granularity : 1; // granularity (1=32 bit, 0=20 bit) ++ U32 baseHi : 8; // base hi 31:24 ++ } s2; ++ } u2; ++} CodeDescriptor; ++ ++/* ++ * Module record. These are emitted whenever a DLL or EXE is loaded or unloaded. ++ * The filename fields may be 0 on an unload. The records reperesent a module for a ++ * certain span of time, delineated by the load / unload samplecounts. ++ * Note: ++ * The structure contains 64 bit fields which may cause the compiler to pad the ++ * length of the structure to an 8 byte boundary. ++ */ ++typedef struct ModuleRecord_s { ++ U16 recLength; // total length of this record (including this length, ++ // always U32 multiple) output from sampler is variable ++ // length (pathname at end of record) sampfile builder moves ++ // path names to a separate "literal pool" area ++ // so that these records become fixed length, and can be treated ++ // as an array see modrecFixedLen in header ++ ++ U16 segmentType : 2; // V86, 16, 32, 64 (see MODE_ defines), maybe inaccurate for Win95 ++ // .. a 16 bit module may become a 32 bit module, inferred by ++ // ..looking at 1st sample record that matches the module selector ++ U16 loadEvent : 1; // 0 for load, 1 for unload ++ U16 processed : 1; // 0 for load, 1 for unload ++ U16 reserved0 : 12; ++ ++ U16 selector; // code selector or V86 segment ++ U16 segmentNameLength; // length of the segment name if the segmentNameSet bit is set ++ U32 segmentNumber; // segment number, Win95 (and now Java) can have multiple pieces for one module ++ union { ++ U32 flags; // all the flags as one dword ++ struct { ++ U32 exe : 1; // this module is an exe ++ U32 globalModule : 1; // globally loaded module. There may be multiple ++ // module records for a global module, but the samples ++ // will only point to the 1st one, the others will be ++ // ignored. NT's Kernel32 is an example of this. ++ // REVISIT this?? ++ U32 bogusWin95 : 1; // "bogus" win95 module. By bogus, we mean a ++ // module that has a pid of 0, no length and no base. ++ // Selector actually used as a 32 bit module. ++ U32 pidRecIndexRaw : 1; // pidRecIndex is raw OS pid ++ U32 sampleFound : 1; // at least one sample referenced this module ++ U32 tscUsed : 1; // tsc set when record written ++ U32 duplicate : 1; // 1st pass analysis has determined this is a ++ // duplicate load ++ U32 globalModuleTB5 : 1; // module mapped into all processes on system ++ U32 segmentNameSet : 1; // set if the segment name was collected ++ // (initially done for xbox collections) ++ U32 firstModuleRecInProcess : 1; // if the pidCreatesTrackedInModuleRecs flag is set ++ // in the SampleHeaderEx struct and this flag ++ // is set, the associated module indicates ++ // the beginning of a new process ++ U32 source : 1; // 0 for path in target system, 1 for path in host system (offloaded) ++ U32 unknownLoadAddress : 1; // for 0 valid loadAddr64 value, 1 for invalid loadAddr64 value ++ U32 reserved1 : 20; ++ } s1; ++ } u2; ++ U64 length64; // module length ++ U64 loadAddr64; // load address ++ U32 pidRecIndex; // process ID rec index (index into start of pid record section). ++ // .. (see pidRecIndexRaw). If pidRecIndex == 0 and pidRecIndexRaw == 1 ++ // ..then this is a kernel or global module. Can validly ++ // ..be 0 if not raw (array index). Use ReturnPid() to access this ++ // ..field ++ U32 osid; // OS identifier ++ U64 unloadTsc; // TSC collected on an unload event ++ U32 path; // module path name (section offset on disk) ++ // ..when initally written by sampler name is at end of this ++ // ..struct, when merged with main file names are pooled at end ++ // ..of ModuleRecord Section so ModulesRecords can be ++ // ..fixed length ++ U16 pathLength; // path name length (inludes terminating \0) ++ U16 filenameOffset; // offset into path name of base filename ++ U32 segmentName; // offset to the segmentName from the beginning of the ++ // module section in a processed module section ++ // (s/b 0 in a raw module record) ++ // in a raw module record, the segment name will follow the ++ // module name and the module name's terminating NULL char ++ U32 page_offset_high; ++ U64 tsc; // time stamp counter module event occurred ++ U32 parent_pid; // Parent PID of the process ++ U32 page_offset_low; ++} ModuleRecord; ++ ++#define MR_unloadTscSet(x, y) ((x)->unloadTsc = (y)) ++#define MR_unloadTscGet(x) ((x)->unloadTsc) ++ ++#define MR_page_offset_Set(x, y) \ ++{ \ ++ (x)->page_offset_low = (y)&0xFFFFFFFF; \ ++ (x)->page_offset_high = ((y) >> 32) & 0xFFFFFFFF; \ ++} ++ ++#define MR_page_offset_Get(x) \ ++ ((((U64)(x)->page_offset_high) << 32) | (x)->page_offset_low) ++ ++// Accessor macros for ModuleRecord ++#define MODULE_RECORD_rec_length(x) ((x)->recLength) ++#define MODULE_RECORD_segment_type(x) ((x)->segmentType) ++#define MODULE_RECORD_load_event(x) ((x)->loadEvent) ++#define MODULE_RECORD_processed(x) ((x)->processed) ++#define MODULE_RECORD_selector(x) ((x)->selector) ++#define MODULE_RECORD_segment_name_length(x) ((x)->segmentNameLength) ++#define MODULE_RECORD_segment_number(x) ((x)->segmentNumber) ++#define MODULE_RECORD_flags(x) ((x)->u2.flags) ++#define MODULE_RECORD_exe(x) ((x)->u2.s1.exe) ++#define MODULE_RECORD_global_module(x) ((x)->u2.s1.globalModule) ++#define MODULE_RECORD_bogus_win95(x) ((x)->u2.s1.bogusWin95) ++#define MODULE_RECORD_pid_rec_index_raw(x) ((x)->u2.s1.pidRecIndexRaw) ++#define MODULE_RECORD_sample_found(x) ((x)->u2.s1.sampleFound) ++#define MODULE_RECORD_tsc_used(x) ((x)->u2.s1.tscUsed) ++#define MODULE_RECORD_duplicate(x) ((x)->u2.s1.duplicate) ++#define MODULE_RECORD_global_module_tb5(x) ((x)->u2.s1.globalModuleTB5) ++#define MODULE_RECORD_segment_name_set(x) ((x)->u2.s1.segmentNameSet) ++#define MODULE_RECORD_first_module_rec_in_process(x) \ ++ ((x)->u2.s1.firstModuleRecInProcess) ++#define MODULE_RECORD_source(x) ((x)->u2.s1.source) ++#define MODULE_RECORD_unknown_load_address(x) ((x)->u2.s1.unknownLoadAddress) ++#define MODULE_RECORD_length64(x) ((x)->length64) ++#define MODULE_RECORD_load_addr64(x) ((x)->loadAddr64) ++#define MODULE_RECORD_pid_rec_index(x) ((x)->pidRecIndex) ++#define MODULE_RECORD_load_sample_count(x) ((x)->u5.s2.loadSampleCount) ++#define MODULE_RECORD_unload_sample_count(x) ((x)->u5.s2.unloadSampleCount) ++#define MODULE_RECORD_unload_tsc(x) ((x)->unloadTsc) ++#define MODULE_RECORD_path(x) ((x)->path) ++#define MODULE_RECORD_path_length(x) ((x)->pathLength) ++#define MODULE_RECORD_filename_offset(x) ((x)->filenameOffset) ++#define MODULE_RECORD_segment_name(x) ((x)->segmentName) ++#define MODULE_RECORD_tsc(x) ((x)->tsc) ++#define MODULE_RECORD_parent_pid(x) ((x)->parent_pid) ++#define MODULE_RECORD_osid(x) ((x)->osid) ++ ++/* ++ * Sample record. Size can be determined by looking at the header record. ++ * There can be up to 3 sections. The SampleFileHeader defines the presence ++ * of sections and their offsets. Within a sample file, all of the sample ++ * records have the same number of sections and the same size. However, ++ * different sample record sections and sizes can exist in different ++ * sample files. Since recording counters and the time stamp counter for ++ * each sample can be space consuming, the user can determine whether or not ++ * this information is kept at sample collection time. ++ */ ++ ++typedef struct SampleRecordPC_s { // Program Counter section ++ U32 descriptor_id; ++ U32 osid; // OS identifier ++ union { ++ struct { ++ U64 iip; // IA64 interrupt instruction pointer ++ U64 ipsr; // IA64 interrupt processor status register ++ } s1; ++ struct { ++ U32 eip; // IA32 instruction pointer ++ U32 eflags; // IA32 eflags ++ CodeDescriptor csd; // IA32 code seg descriptor (8 bytes) ++ } s2; ++ } u1; ++ U16 cs; // IA32 cs (0 for IA64) ++ union { ++ U16 cpuAndOS; // cpu and OS info as one word ++ struct { // cpu and OS info broken out ++ U16 cpuNum : 12; // cpu number (0 - 4096) ++ U16 notVmid0 : 1; // win95, vmid0 flag (1 means NOT vmid 0) ++ U16 codeMode : 2; // processor mode, see MODE_ defines ++ U16 uncore_valid : 1; // identifies if the uncore count is valid ++ } s3; ++ } u2; ++ U32 tid; // OS thread ID (may get reused, see tidIsRaw) ++ U32 pidRecIndex; // process ID rec index (index into start of pid ++ // record section) .. can validly be 0 if not raw ++ // (array index). Use ReturnPid() to ++ // ..access this field .. (see pidRecIndexRaw) ++ union { ++ U32 bitFields2; ++ struct { ++ U32 mrIndex : 20; // module record index (index into start of ++ // module rec section) .. (see mrIndexNone) ++ U32 eventIndex : 8; // index into the Events section ++ U32 tidIsRaw : 1; // tid is raw OS tid ++ U32 IA64PC : 1; // TRUE=this is a IA64 PC sample record ++ U32 pidRecIndexRaw : 1; // pidRecIndex is raw OS pid ++ U32 mrIndexNone : 1; // no mrIndex (unknown module) ++ } s4; ++ } u3; ++ U64 tsc; // processor timestamp counter ++} SampleRecordPC, *PSampleRecordPC; ++ ++#define SAMPLE_RECORD_descriptor_id(x) ((x)->descriptor_id) ++#define SAMPLE_RECORD_osid(x) ((x)->osid) ++#define SAMPLE_RECORD_iip(x) ((x)->u1.s1.iip) ++#define SAMPLE_RECORD_ipsr(x) ((x)->u1.s1.ipsr) ++#define SAMPLE_RECORD_eip(x) ((x)->u1.s2.eip) ++#define SAMPLE_RECORD_eflags(x) ((x)->u1.s2.eflags) ++#define SAMPLE_RECORD_csd(x) ((x)->u1.s2.csd) ++#define SAMPLE_RECORD_cs(x) ((x)->cs) ++#define SAMPLE_RECORD_cpu_and_os(x) ((x)->u2.cpuAndOS) ++#define SAMPLE_RECORD_cpu_num(x) ((x)->u2.s3.cpuNum) ++#define SAMPLE_RECORD_uncore_valid(x) ((x)->u2.s3.uncore_valid) ++#define SAMPLE_RECORD_not_vmid0(x) ((x)->u2.s3.notVmid0) ++#define SAMPLE_RECORD_code_mode(x) ((x)->u2.s3.codeMode) ++#define SAMPLE_RECORD_tid(x) ((x)->tid) ++#define SAMPLE_RECORD_pid_rec_index(x) ((x)->pidRecIndex) ++#define SAMPLE_RECORD_bit_fields2(x) ((x)->u3.bitFields2) ++#define SAMPLE_RECORD_mr_index(x) ((x)->u3.s4.mrIndex) ++#define SAMPLE_RECORD_event_index(x) ((x)->u3.s4.eventIndex) ++#define SAMPLE_RECORD_tid_is_raw(x) ((x)->u3.s4.tidIsRaw) ++#define SAMPLE_RECORD_ia64_pc(x) ((x)->u3.s4.IA64PC) ++#define SAMPLE_RECORD_pid_rec_index_raw(x) ((x)->u3.s4.pidRecIndexRaw) ++#define SAMPLE_RECORD_mr_index_none(x) ((x)->u3.s4.mrIndexNone) ++#define SAMPLE_RECORD_tsc(x) ((x)->tsc) ++ ++// end of SampleRecord sections ++ ++/* Uncore Sample Record definition. This is a skinny sample record used by uncore boxes ++ to record samples. The sample record consists of a descriptor id, cpu info and timestamp.*/ ++ ++typedef struct UncoreSampleRecordPC_s { ++ U32 descriptor_id; ++ U32 osid; ++ U16 cpuNum; ++ U16 pkgNum; ++ union { ++ U32 flags; ++ struct { ++ U32 uncore_valid : 1; // identifies if the uncore count is valid ++ U32 reserved1 : 31; ++ } s1; ++ } u1; ++ U64 reserved2; ++ U64 tsc; // processor timestamp counter ++} UncoreSampleRecordPC, *PUnocreSampleRecordPC; ++ ++#define UNCORE_SAMPLE_RECORD_descriptor_id(x) ((x)->descriptor_id) ++#define UNCORE_SAMPLE_RECORD_osid(x) ((x)->osid) ++#define UNCORE_SAMPLE_RECORD_cpu_num(x) ((x)->cpuNum) ++#define UNCORE_SAMPLE_RECORD_pkg_num(x) ((x)->pkgNum) ++#define UNCORE_SAMPLE_RECORD_uncore_valid(x) ((x)->u1.s1.uncore_valid) ++#define UNCORE_SAMPLE_RECORD_tsc(x) ((x)->tsc) ++ ++// end of UncoreSampleRecord section ++ ++// Definitions for user markers data ++// The instances of these structures will be written to the user markers temp file. ++#define MARKER_DEFAULT_TYPE "Default_Marker" ++#define MARKER_DEFAULT_ID 0 ++#define MAX_MARKER_LENGTH 136 ++ ++#define MARK_ID 4 ++#define MARK_DATA 2 ++#define THREAD_INFO 8 ++ ++/* do not use it at ths moment ++typedef enum { ++ SMRK_USER_DEFINED = 0, ++ SMRK_THREAD_NAME, ++ SMRK_WALLCLOCK, ++ SMRK_TEXT, ++ SMRK_TYPE_ID ++} SMRK_TYPE; ++*/ ++ ++/* ++ * Common Register descriptions ++ */ ++ ++/* ++ * Bits used in the debug control register ++ */ ++#define DEBUG_CTL_LBR 0x0000001 ++#define DEBUG_CTL_BTF 0x0000002 ++#define DEBUG_CTL_TR 0x0000040 ++#define DEBUG_CTL_BTS 0x0000080 ++#define DEBUG_CTL_BTINT 0x0000100 ++#define DEBUG_CTL_BT_OFF_OS 0x0000200 ++#define DEBUG_CTL_BTS_OFF_USR 0x0000400 ++#define DEBUG_CTL_FRZ_LBR_ON_PMI 0x0000800 ++#define DEBUG_CTL_FRZ_PMON_ON_PMI 0x0001000 ++#define DEBUG_CTL_ENABLE_UNCORE_PMI_BIT 0x0002000 ++ ++#define DEBUG_CTL_NODE_lbr_get(reg) ((reg) & DEBUG_CTL_LBR) ++#define DEBUG_CTL_NODE_lbr_set(reg) ((reg) |= DEBUG_CTL_LBR) ++#define DEBUG_CTL_NODE_lbr_clear(reg) ((reg) &= ~DEBUG_CTL_LBR) ++ ++#define DEBUG_CTL_NODE_btf_get(reg) ((reg) & DEBUG_CTL_BTF) ++#define DEBUG_CTL_NODE_btf_set(reg) ((reg) |= DEBUG_CTL_BTF) ++#define DEBUG_CTL_NODE_btf_clear(reg) ((reg) &= ~DEBUG_CTL_BTF) ++ ++#define DEBUG_CTL_NODE_tr_get(reg) ((reg) & DEBUG_CTL_TR) ++#define DEBUG_CTL_NODE_tr_set(reg) ((reg) |= DEBUG_CTL_TR) ++#define DEBUG_CTL_NODE_tr_clear(reg) ((reg) &= ~DEBUG_CTL_TR) ++ ++#define DEBUG_CTL_NODE_bts_get(reg) ((reg) & DEBUG_CTL_BTS) ++#define DEBUG_CTL_NODE_bts_set(reg) ((reg) |= DEBUG_CTL_BTS) ++#define DEBUG_CTL_NODE_bts_clear(reg) ((reg) &= ~DEBUG_CTL_BTS) ++ ++#define DEBUG_CTL_NODE_btint_get(reg) ((reg) & DEBUG_CTL_BTINT) ++#define DEBUG_CTL_NODE_btint_set(reg) ((reg) |= DEBUG_CTL_BTINT) ++#define DEBUG_CTL_NODE_btint_clear(reg) ((reg) &= ~DEBUG_CTL_BTINT) ++ ++#define DEBUG_CTL_NODE_bts_off_os_get(reg) ((reg) & DEBUG_CTL_BTS_OFF_OS) ++#define DEBUG_CTL_NODE_bts_off_os_set(reg) ((reg) |= DEBUG_CTL_BTS_OFF_OS) ++#define DEBUG_CTL_NODE_bts_off_os_clear(reg) ((reg) &= ~DEBUG_CTL_BTS_OFF_OS) ++ ++#define DEBUG_CTL_NODE_bts_off_usr_get(reg) ((reg) & DEBUG_CTL_BTS_OFF_USR) ++#define DEBUG_CTL_NODE_bts_off_usr_set(reg) ((reg) |= DEBUG_CTL_BTS_OFF_USR) ++#define DEBUG_CTL_NODE_bts_off_usr_clear(reg) ((reg) &= ~DEBUG_CTL_BTS_OFF_USR) ++ ++#define DEBUG_CTL_NODE_frz_lbr_on_pmi_get(reg) ((reg) & DEBUG_CTL_FRZ_LBR_ON_PMI) ++#define DEBUG_CTL_NODE_frz_lbr_on_pmi_set(reg) ((reg) |= DEBUG_CTL_FRZ_LBR_ON_PMI) ++#define DEBUG_CTL_NODE_frz_lbr_on_pmi_clear(reg) \ ++ ((reg) &= ~DEBUG_CTL_FRZ_LBR_ON_PMI) ++ ++#define DEBUG_CTL_NODE_frz_pmon_on_pmi_get(reg) \ ++ ((reg) & DEBUG_CTL_FRZ_PMON_ON_PMI) ++#define DEBUG_CTL_NODE_frz_pmon_on_pmi_set(reg) \ ++ ((reg) |= DEBUG_CTL_FRZ_PMON_ON_PMI) ++#define DEBUG_CTL_NODE_frz_pmon_on_pmi_clear(reg) \ ++ ((reg) &= ~DEBUG_CTL_FRZ_PMON_ON_PMI) ++ ++#define DEBUG_CTL_NODE_enable_uncore_pmi_get(reg) \ ++ ((reg) & DEBUG_CTL_ENABLE_UNCORE_PMI) ++#define DEBUG_CTL_NODE_enable_uncore_pmi_set(reg) \ ++ ((reg) |= DEBUG_CTL_ENABLE_UNCORE_PMI) ++#define DEBUG_CTL_NODE_enable_uncore_pmi_clear(reg) \ ++ ((reg) &= ~DEBUG_CTL_ENABLE_UNCORE_PMI) ++ ++/* ++ * @macro SEP_VERSION_NODE_S ++ * @brief ++ * This structure supports versioning in Sep. The field major indicates the major version, ++ * minor indicates the minor version and api indicates the api version for the current ++ * sep build. This structure is initialized at the time when the driver is loaded. ++ */ ++ ++typedef struct SEP_VERSION_NODE_S SEP_VERSION_NODE; ++typedef SEP_VERSION_NODE * SEP_VERSION; ++ ++struct SEP_VERSION_NODE_S { ++ union { ++ U32 sep_version; ++ struct { ++ S32 major : 8; ++ S32 minor : 8; ++ S32 api : 8; ++ S32 update : 8; ++ } s1; ++ } u1; ++}; ++ ++#define SEP_VERSION_NODE_sep_version(version) ((version)->u1.sep_version) ++#define SEP_VERSION_NODE_major(version) ((version)->u1.s1.major) ++#define SEP_VERSION_NODE_minor(version) ((version)->u1.s1.minor) ++#define SEP_VERSION_NODE_api(version) ((version)->u1.s1.api) ++#define SEP_VERSION_NODE_update(version) ((version)->u1.s1.update) ++ ++/* ++ * The VTSA_SYS_INFO_STRUCT information that is shared across kernel mode ++ * and user mode code, very specifically for tb5 file generation ++ */ ++ ++typedef enum { ++ GT_UNK = 0, ++ GT_PER_CPU, ++ GT_PER_CHIPSET, ++ GT_CPUID, ++ GT_NODE, ++ GT_SYSTEM, ++ GT_SAMPLE_RECORD_INFO ++} GEN_ENTRY_TYPES; ++ ++typedef enum { ++ GST_UNK = 0, ++ GST_X86, ++ GST_ITANIUM, ++ GST_SA, //strong arm ++ GST_XSC, ++ GST_EM64T, ++ GST_CS860 ++} GEN_ENTRY_SUBTYPES; ++ ++typedef struct __fixed_size_pointer { ++ union { ++ U64 fs_force_alignment; ++ struct { ++ U32 fs_unused; ++ U32 is_ptr : 1; ++ } s1; ++ } u1; ++ union { ++ U64 fs_offset; ++ void *fs_ptr; ++ } u2; ++} VTSA_FIXED_SIZE_PTR; ++ ++#define VTSA_FIXED_SIZE_PTR_is_ptr(fsp) ((fsp)->u1.s1.is_ptr) ++#define VTSA_FIXED_SIZE_PTR_fs_offset(fsp) ((fsp)->u2.fs_offset) ++#define VTSA_FIXED_SIZE_PTR_fs_ptr(fsp) ((fsp)->u2.fs_ptr) ++ ++typedef struct __generic_array_header { ++ // ++ // Information realted to the generic header ++ // ++ U32 hdr_size; // size of this generic header ++ // (for versioning and real data starts ++ // after the header) ++ ++ U32 next_field_hdr_padding; // make sure the next field is 8-byte aligned ++ ++ // ++ // VTSA_FIXED_SIZE_PTR should always be on an 8-byte boundary... ++ // ++ // pointer to the next generic header if there is one ++ // ++ VTSA_FIXED_SIZE_PTR hdr_next_gen_hdr; ++ ++ U32 hdr_reserved[7]; // padding for future use - force to 64 bytes... ++ ++ // ++ // Information related to the array this header is describing ++ // ++ U32 array_num_entries; ++ U32 array_entry_size; ++ U16 array_type; // from the GEN_ENTRY_TYPES enumeration ++ U16 array_subtype; // from the GEN_ENTRY_SUBTYPES enumeration ++} VTSA_GEN_ARRAY_HDR; ++ ++#define VTSA_GEN_ARRAY_HDR_hdr_size(gah) ((gah)->hdr_size) ++#define VTSA_GEN_ARRAY_HDR_hdr_next_gen_hdr(gah) ((gah)->hdr_next_gen_hdr) ++#define VTSA_GEN_ARRAY_HDR_array_num_entries(gah) ((gah)->array_num_entries) ++#define VTSA_GEN_ARRAY_HDR_array_entry_size(gah) ((gah)->array_entry_size) ++#define VTSA_GEN_ARRAY_HDR_array_type(gah) ((gah)->array_type) ++#define VTSA_GEN_ARRAY_HDR_array_subtype(gah) ((gah)->array_subtype) ++ ++typedef struct __cpuid_x86 { ++ U32 cpuid_eax_input; ++ U32 cpuid_eax; ++ U32 cpuid_ebx; ++ U32 cpuid_ecx; ++ U32 cpuid_edx; ++} VTSA_CPUID_X86; ++ ++#define VTSA_CPUID_X86_cpuid_eax_input(cid) ((cid)->cpuid_eax_input) ++#define VTSA_CPUID_X86_cpuid_eax(cid) ((cid)->cpuid_eax) ++#define VTSA_CPUID_X86_cpuid_ebx(cid) ((cid)->cpuid_ebx) ++#define VTSA_CPUID_X86_cpuid_ecx(cid) ((cid)->cpuid_ecx) ++#define VTSA_CPUID_X86_cpuid_edx(cid) ((cid)->cpuid_edx) ++ ++typedef struct __cpuid_ipf { ++ U64 cpuid_select; ++ U64 cpuid_val; ++} VTSA_CPUID_IPF; ++ ++#define VTSA_CPUID_IPF_cpuid_select(cid) ((cid)->cpuid_select) ++#define VTSA_CPUID_IPF_cpuid_val(cid) ((cid)->cpuid_val) ++ ++typedef struct __generic_per_cpu { ++ // ++ // per cpu information ++ // ++ U32 cpu_number; // cpu number (as defined by the OS) ++ U32 cpu_speed_mhz; // cpu speed (in Mhz) ++ U32 cpu_fsb_mhz; // Front Side Bus speed (in Mhz) (if known) ++ U32 cpu_cache_L2; // ??? USER: cpu L2 (marketing definition) cache size (if known) ++ ++ // ++ // And pointer to other structures. Keep this on an 8-byte boundary ++ // ++ // "pointer" to generic array header that should contain ++ // cpuid information for this cpu ++ // ++ VTSA_FIXED_SIZE_PTR cpu_cpuid_array; ++ ++ S64 cpu_tsc_offset; // TSC offset from CPU 0 computed as (TSC CPU N - TSC CPU 0) ++ // ++ // intel processor number (from mkting). ++ // Currently 3 decimal digits (3xx, 5xx and 7xx) ++ // ++ U32 cpu_intel_processor_number; ++ ++ U32 cpu_cache_L3; // ??? USER: cpu L3 (marketing definition) cache size (if known) ++ ++ U64 platform_id; ++ ++ // ++ // package/mapping information ++ // ++ // The hierarchy for uniquely identifying a logical processor ++ // in a system is node number/id (from the node structure), ++ // package number, core number, and thread number. ++ // Core number is for identifying a core within a package. ++ // ++ // Actually, on Itanium getting all this information is ++ // pretty involved with complicated algorithm using PAL calls. ++ // I don't know how important all this stuff is to the user. ++ // Maybe we can just have the place holder now and figure out ++ // how to fill them later. ++ // ++ U16 cpu_package_num; // package number for this cpu (if known) ++ U16 cpu_core_num; // core number (if known) ++ U16 cpu_hw_thread_num; // hw thread number inside the core (if known) ++ ++ U16 cpu_threads_per_core; // total number of h/w threads per core (if known) ++ U16 cpu_module_id; // Processor module number ++ U16 cpu_num_modules; // Number of processor modules ++ U32 cpu_core_type; // Core type for hetero ++ U32 arch_perfmon_ver; ++ U32 num_gp_counters; ++ U32 num_fixed_counters; ++ U32 reserved1; ++ U64 reserved2; ++ U64 reserved3; ++ ++} VTSA_GEN_PER_CPU; ++ ++#define VTSA_GEN_PER_CPU_cpu_number(p_cpu) ((p_cpu)->cpu_number) ++#define VTSA_GEN_PER_CPU_cpu_speed_mhz(p_cpu) ((p_cpu)->cpu_speed_mhz) ++#define VTSA_GEN_PER_CPU_cpu_fsb_mhz(p_cpu) ((p_cpu)->cpu_fsb_mhz) ++#define VTSA_GEN_PER_CPU_cpu_cache_L2(p_cpu) ((p_cpu)->cpu_cache_L2) ++#define VTSA_GEN_PER_CPU_cpu_cpuid_array(p_cpu) ((p_cpu)->cpu_cpuid_array) ++#define VTSA_GEN_PER_CPU_cpu_tsc_offset(p_cpu) ((p_cpu)->cpu_tsc_offset) ++#define VTSA_GEN_PER_CPU_cpu_intel_processor_number(p_cpu) \ ++ ((p_cpu)->cpu_intel_processor_number) ++#define VTSA_GEN_PER_CPU_cpu_cache_L3(p_cpu) ((p_cpu)->cpu_cache_L3) ++#define VTSA_GEN_PER_CPU_platform_id(p_cpu) ((p_cpu)->platform_id) ++#define VTSA_GEN_PER_CPU_cpu_package_num(p_cpu) ((p_cpu)->cpu_package_num) ++#define VTSA_GEN_PER_CPU_cpu_core_num(p_cpu) ((p_cpu)->cpu_core_num) ++#define VTSA_GEN_PER_CPU_cpu_hw_thread_num(p_cpu) ((p_cpu)->cpu_hw_thread_num) ++#define VTSA_GEN_PER_CPU_cpu_threads_per_core(p_cpu) \ ++ ((p_cpu)->cpu_threads_per_core) ++#define VTSA_GEN_PER_CPU_cpu_module_num(p_cpu) ((p_cpu)->cpu_module_id) ++#define VTSA_GEN_PER_CPU_cpu_num_modules(p_cpu) ((p_cpu)->cpu_num_modules) ++#define VTSA_GEN_PER_CPU_cpu_core_type(p_cpu) ((p_cpu)->cpu_core_type) ++#define VTSA_GEN_PER_CPU_arch_perfmon_ver(p_cpu) ((p_cpu)->arch_perfmon_ver) ++#define VTSA_GEN_PER_CPU_num_gp_counters(p_cpu) ((p_cpu)->num_gp_counters) ++#define VTSA_GEN_PER_CPU_num_fixed_counters(p_cpu) ((p_cpu)->num_fixed_counters) ++ ++typedef struct __node_info { ++ U32 node_type_from_shell; ++ U32 node_id; // The node number/id (if known) ++ ++ U32 node_num_available; // total number cpus on this node ++ U32 node_num_used; // USER: number used based on cpu mask at time of run ++ ++ U64 node_physical_memory; // amount of physical memory (bytes) on this node ++ ++ // ++ // pointer to the first generic header that ++ // contains the per-cpu information ++ // ++ // Keep the VTSA_FIXED_SIZE_PTR on an 8-byte boundary... ++ // ++ VTSA_FIXED_SIZE_PTR node_percpu_array; ++ ++ U32 node_reserved[2]; // leave some space ++ ++} VTSA_NODE_INFO; ++ ++#define VTSA_NODE_INFO_node_type_from_shell(vni) ((vni)->node_type_from_shell) ++#define VTSA_NODE_INFO_node_id(vni) ((vni)->node_id) ++#define VTSA_NODE_INFO_node_num_available(vni) ((vni)->node_num_available) ++#define VTSA_NODE_INFO_node_num_used(vni) ((vni)->node_num_used) ++#define VTSA_NODE_INFO_node_physical_memory(vni) ((vni)->node_physical_memory) ++#define VTSA_NODE_INFO_node_percpu_array(vni) ((vni)->node_percpu_array) ++ ++typedef struct __sys_info { ++ // ++ // Keep this on an 8-byte boundary ++ // ++ VTSA_FIXED_SIZE_PTR node_array; // the per-node information ++ ++ U64 min_app_address; // USER: lower allowed user space address (if known) ++ U64 max_app_address; // USER: upper allowed user space address (if known) ++ U32 page_size; // Current page size ++ U32 allocation_granularity; // USER: Granularity of allocation requests (if known) ++ U32 reserved1; // added for future fields ++ U32 reserved2; // alignment purpose ++ U64 reserved3[3]; // added for future fields ++ ++} VTSA_SYS_INFO; ++ ++#define VTSA_SYS_INFO_node_array(sys_info) ((sys_info)->node_array) ++#define VTSA_SYS_INFO_min_app_address(sys_info) ((sys_info)->min_app_address) ++#define VTSA_SYS_INFO_max_app_address(sys_info) ((sys_info)->max_app_address) ++#define VTSA_SYS_INFO_page_size(sys_info) ((sys_info)->page_size) ++#define VTSA_SYS_INFO_allocation_granularity(sys_info) \ ++ ((sys_info)->allocation_granularity) ++ ++typedef struct DRV_TOPOLOGY_INFO_NODE_S DRV_TOPOLOGY_INFO_NODE; ++typedef DRV_TOPOLOGY_INFO_NODE * DRV_TOPOLOGY_INFO; ++ ++struct DRV_TOPOLOGY_INFO_NODE_S { ++ U32 cpu_number; // cpu number (as defined by the OS) ++ U16 cpu_package_num; // package number for this cpu (if known) ++ U16 cpu_core_num; // core number (if known) ++ U16 cpu_hw_thread_num; // T0 or T1 if HT enabled ++ U16 reserved1; ++ S32 socket_master; ++ S32 core_master; ++ S32 thr_master; ++ U32 cpu_module_num; ++ U32 cpu_module_master; ++ U32 cpu_num_modules; ++ U32 cpu_core_type; ++ U32 arch_perfmon_ver; ++ U32 num_gp_counters; ++ U32 num_fixed_counters; ++ U32 reserved2; ++ U64 reserved3; ++ U64 reserved4; ++}; ++ ++#define DRV_TOPOLOGY_INFO_cpu_number(dti) ((dti)->cpu_number) ++#define DRV_TOPOLOGY_INFO_cpu_package_num(dti) ((dti)->cpu_package_num) ++#define DRV_TOPOLOGY_INFO_cpu_core_num(dti) ((dti)->cpu_core_num) ++#define DRV_TOPOLOGY_INFO_socket_master(dti) ((dti)->socket_master) ++#define DRV_TOPOLOGY_INFO_core_master(dti) ((dti)->core_master) ++#define DRV_TOPOLOGY_INFO_thr_master(dti) ((dti)->thr_master) ++#define DRV_TOPOLOGY_INFO_cpu_hw_thread_num(dti) ((dti)->cpu_hw_thread_num) ++#define DRV_TOPOLOGY_INFO_cpu_module_num(dti) ((dti)->cpu_module_num) ++#define DRV_TOPOLOGY_INFO_cpu_module_master(dti) ((dti)->cpu_module_master) ++#define DRV_TOPOLOGY_INFO_cpu_num_modules(dti) ((dti)->cpu_num_modules) ++#define DRV_TOPOLOGY_INFO_cpu_core_type(dti) ((dti)->cpu_core_type) ++#define DRV_TOPOLOGY_INFO_arch_perfmon_ver(dti) ((dti)->arch_perfmon_ver) ++#define DRV_TOPOLOGY_INFO_num_gp_counters(dti) ((dti)->num_gp_counters) ++#define DRV_TOPOLOGY_INFO_num_fixed_counters(dti) ((dti)->num_fixed_counters) ++ ++#define VALUE_TO_BE_DISCOVERED 0 ++ ++// dimm information ++typedef struct DRV_DIMM_INFO_NODE_S DRV_DIMM_INFO_NODE; ++typedef DRV_DIMM_INFO_NODE * DRV_DIMM_INFO; ++ ++struct DRV_DIMM_INFO_NODE_S { ++ U32 platform_id; ++ U32 channel_num; ++ U32 rank_num; ++ U32 value; ++ U8 mc_num; ++ U8 dimm_valid; ++ U8 valid_value; ++ U8 rank_value; ++ U8 density_value; ++ U8 width_value; ++ U16 socket_num; ++ U64 reserved1; ++ U64 reserved2; ++}; ++ ++#define DRV_DIMM_INFO_platform_id(di) ((di)->platform_id) ++#define DRV_DIMM_INFO_channel_num(di) ((di)->channel_num) ++#define DRV_DIMM_INFO_rank_num(di) ((di)->rank_num) ++#define DRV_DIMM_INFO_value(di) ((di)->value) ++#define DRV_DIMM_INFO_mc_num(di) ((di)->mc_num) ++#define DRV_DIMM_INFO_dimm_valid(di) ((di)->dimm_valid) ++#define DRV_DIMM_INFO_valid_value(di) ((di)->valid_value) ++#define DRV_DIMM_INFO_rank_value(di) ((di)->rank_value) ++#define DRV_DIMM_INFO_density_value(di) ((di)->density_value) ++#define DRV_DIMM_INFO_width_value(di) ((di)->width_value) ++#define DRV_DIMM_INFO_socket_num(di) ((di)->socket_num) ++ ++//platform information. need to get from driver ++#define MAX_PACKAGES 16 ++#define MAX_CHANNELS 8 ++#define MAX_RANKS 3 ++ ++typedef struct DRV_PLATFORM_INFO_NODE_S DRV_PLATFORM_INFO_NODE; ++typedef DRV_PLATFORM_INFO_NODE * DRV_PLATFORM_INFO; ++ ++struct DRV_PLATFORM_INFO_NODE_S { ++ U64 info; // platform info ++ U64 ddr_freq_index; // freq table index ++ U8 misc_valid; // misc enabled valid bit ++ U8 reserved1; // added for alignment purpose ++ U16 reserved2; ++ U32 vmm_timer_freq; // timer frequency from VMM on SoFIA (in HZ) ++ U64 misc_info; // misc enabled info ++ U64 ufs_freq; // ufs frequency (HSX only) ++ DRV_DIMM_INFO_NODE dimm_info[MAX_PACKAGES * MAX_CHANNELS * MAX_RANKS]; ++ U64 energy_multiplier; // Value of energy multiplier ++ U64 reserved3; ++ U64 reserved4; ++ U64 reserved5; ++ U64 reserved6; ++}; ++ ++#define DRV_PLATFORM_INFO_info(data) ((data)->info) ++#define DRV_PLATFORM_INFO_ddr_freq_index(data) ((data)->ddr_freq_index) ++#define DRV_PLATFORM_INFO_misc_valid(data) ((data)->misc_valid) ++#define DRV_PLATFORM_INFO_misc_info(data) ((data)->misc_info) ++#define DRV_PLATFORM_INFO_ufs_freq(data) ((data)->ufs_freq) ++#define DRV_PLATFORM_INFO_dimm_info(data) ((data)->dimm_info) ++#define DRV_PLATFORM_INFO_energy_multiplier(data) ((data)->energy_multiplier) ++#define DRV_PLATFORM_INFO_vmm_timer_freq(data) ((data)->vmm_timer_freq) ++ ++//platform information. need to get from Platform picker ++typedef struct PLATFORM_FREQ_INFO_NODE_S PLATFORM_FREQ_INFO_NODE; ++typedef PLATFORM_FREQ_INFO_NODE * PLATFORM_FREQ_INFO; ++ ++struct PLATFORM_FREQ_INFO_NODE_S { ++ float multiplier; // freq multiplier ++ double *table; // freq table ++ U32 table_size; // freq table size ++ U64 reserved1; ++ U64 reserved2; ++ U64 reserved3; ++ U64 reserved4; ++}; ++#define PLATFORM_FREQ_INFO_multiplier(data) ((data)->multiplier) ++#define PLATFORM_FREQ_INFO_table(data) ((data)->table) ++#define PLATFORM_FREQ_INFO_table_size(data) ((data)->table_size) ++ ++typedef struct DEVICE_INFO_NODE_S DEVICE_INFO_NODE; ++typedef DEVICE_INFO_NODE * DEVICE_INFO; //NEEDED in PP ++ ++struct DEVICE_INFO_NODE_S { ++ S8 *dll_name; ++ PVOID dll_handle; ++ S8 *cpu_name; ++ S8 *pmu_name; ++ DRV_STCHAR *event_db_file_name; ++ //PLATFORM_IDENTITY plat_identity; // this is undefined right now. Please take this as structure containing U64 ++ U32 plat_type; // device type (e.g., DEVICE_INFO_CORE, etc. ... see enum below) ++ U32 plat_sub_type; // cti_type (e.g., CTI_Sandybridge, etc., ... see env_info_types.h) ++ S32 dispatch_id; // this will be set in user mode dlls and will be unique across all IPF, IA32 (including MIDS). ++ ECB *ecb; ++ EVENT_CONFIG ec; ++ DEV_CONFIG pcfg; ++ DEV_UNC_CONFIG pcfg_unc; ++ U32 num_of_groups; ++ U32 size_of_alloc; // size of each event control block ++ PVOID drv_event; ++ U32 num_events; ++ U32 event_id_index; // event id index of device (basically how many events processed before this device) ++ U32 num_counters; ++ U32 group_index; ++ U32 num_packages; ++ U32 num_units; ++ U32 device_type; ++ U32 core_type; ++ U32 pmu_clone_id; // cti_type of platform to impersonate in device DLLs ++ U32 device_scope; ++ U32 reserved1; ++ U64 reserved2; ++ U64 reserved3; ++}; ++ ++#define MAX_EVENT_NAME_LENGTH 256 ++ ++#define DEVICE_INFO_dll_name(pdev) ((pdev)->dll_name) ++#define DEVICE_INFO_dll_handle(pdev) ((pdev)->dll_handle) ++#define DEVICE_INFO_cpu_name(pdev) ((pdev)->cpu_name) ++#define DEVICE_INFO_pmu_name(pdev) ((pdev)->pmu_name) ++#define DEVICE_INFO_event_db_file_name(pdev) ((pdev)->event_db_file_name) ++#define DEVICE_INFO_plat_type(pdev) ((pdev)->plat_type) ++#define DEVICE_INFO_plat_sub_type(pdev) ((pdev)->plat_sub_type) ++#define DEVICE_INFO_pmu_clone_id(pdev) ((pdev)->pmu_clone_id) ++#define DEVICE_INFO_dispatch_id(pdev) ((pdev)->dispatch_id) ++#define DEVICE_INFO_ecb(pdev) ((pdev)->ecb) ++#define DEVICE_INFO_ec(pdev) ((pdev)->ec) ++#define DEVICE_INFO_pcfg(pdev) ((pdev)->pcfg) ++#define DEVICE_INFO_pcfg_unc(pdev) ((pdev)->pcfg_unc) ++#define DEVICE_INFO_num_groups(pdev) ((pdev)->num_of_groups) ++#define DEVICE_INFO_size_of_alloc(pdev) ((pdev)->size_of_alloc) ++#define DEVICE_INFO_drv_event(pdev) ((pdev)->drv_event) ++#define DEVICE_INFO_num_events(pdev) ((pdev)->num_events) ++#define DEVICE_INFO_event_id_index(pdev) ((pdev)->event_id_index) ++#define DEVICE_INFO_num_counters(pdev) ((pdev)->num_counters) ++#define DEVICE_INFO_group_index(pdev) ((pdev)->group_index) ++#define DEVICE_INFO_num_packages(pdev) ((pdev)->num_packages) ++#define DEVICE_INFO_num_units(pdev) ((pdev)->num_units) ++#define DEVICE_INFO_device_type(pdev) ((pdev)->device_type) ++#define DEVICE_INFO_core_type(pdev) ((pdev)->core_type) ++#define DEVICE_INFO_device_scope(pdev) ((pdev)->device_scope) ++ ++typedef struct DEVICE_INFO_DATA_NODE_S DEVICE_INFO_DATA_NODE; ++typedef DEVICE_INFO_DATA_NODE * DEVICE_INFO_DATA; //NEEDED in PP ++ ++struct DEVICE_INFO_DATA_NODE_S { ++ DEVICE_INFO pdev_info; ++ U32 num_elements; ++ U32 num_allocated; ++ U64 reserved1; ++ U64 reserved2; ++ U64 reserved3; ++ U64 reserved4; ++}; ++ ++#define DEVICE_INFO_DATA_pdev_info(d) ((d)->pdev_info) ++#define DEVICE_INFO_DATA_num_elements(d) ((d)->num_elements) ++#define DEVICE_INFO_DATA_num_allocated(d) ((d)->num_allocated) ++ ++typedef enum { ++ DEVICE_INFO_CORE = 0, ++ DEVICE_INFO_UNCORE = 1, ++ DEVICE_INFO_CHIPSET = 2, ++ DEVICE_INFO_GFX = 3, ++ DEVICE_INFO_PWR = 4, ++ DEVICE_INFO_TELEMETRY = 5 ++} DEVICE_INFO_TYPE; ++ ++typedef enum { ++ INVALID_TERMINATE_TYPE = 0, ++ STOP_TERMINATE, ++ CANCEL_TERMINATE ++} ABNORMAL_TERMINATE_TYPE; ++ ++typedef enum { ++ DEVICE_SCOPE_PACKAGE = 0, ++ DEVICE_SCOPE_SYSTEM = 1 ++} DEVICE_SCOPE_TYPE; ++ ++typedef struct PCIFUNC_INFO_NODE_S PCIFUNC_INFO_NODE; ++typedef PCIFUNC_INFO_NODE * PCIFUNC_INFO; ++ ++struct PCIFUNC_INFO_NODE_S { ++ U32 valid; ++ U32 num_entries; // the number of entries found with same but difference bus_no. ++ U64 deviceId; ++ U64 reserved1; ++ U64 reserved2; ++}; ++ ++#define PCIFUNC_INFO_NODE_funcno(x) ((x)->funcno) ++#define PCIFUNC_INFO_NODE_valid(x) ((x)->valid) ++#define PCIFUNC_INFO_NODE_deviceId(x) ((x)->deviceId) ++#define PCIFUNC_INFO_NODE_num_entries(x) ((x)->num_entries) ++ ++typedef struct PCIDEV_INFO_NODE_S PCIDEV_INFO_NODE; ++typedef PCIDEV_INFO_NODE * PCIDEV_INFO; ++ ++struct PCIDEV_INFO_NODE_S { ++ PCIFUNC_INFO_NODE func_info[MAX_PCI_FUNCNO]; ++ U32 valid; ++ U32 dispatch_id; ++ U64 reserved1; ++ U64 reserved2; ++}; ++ ++#define PCIDEV_INFO_NODE_func_info(x, i) ((x).func_info[i]) ++#define PCIDEV_INFO_NODE_valid(x) ((x).valid) ++ ++typedef struct UNCORE_PCIDEV_NODE_S UNCORE_PCIDEV_NODE; ++ ++struct UNCORE_PCIDEV_NODE_S { ++ PCIDEV_INFO_NODE pcidev[MAX_PCI_DEVNO]; ++ U32 dispatch_id; ++ U32 scan; ++ U32 num_uncore_units; ++ U32 num_deviceid_entries; ++ U8 dimm_device1; ++ U8 dimm_device2; ++ U16 reserved1; ++ U32 reserved2; ++ U64 reserved3; ++ U64 reserved4; ++ U32 deviceid_list[MAX_PCI_DEVNO]; ++}; ++ ++// Structure used to perform uncore device discovery ++ ++typedef struct UNCORE_TOPOLOGY_INFO_NODE_S UNCORE_TOPOLOGY_INFO_NODE; ++typedef UNCORE_TOPOLOGY_INFO_NODE * UNCORE_TOPOLOGY_INFO; ++ ++struct UNCORE_TOPOLOGY_INFO_NODE_S { ++ UNCORE_PCIDEV_NODE device[MAX_DEVICES]; ++}; ++ ++#define UNCORE_TOPOLOGY_INFO_device(x, dev_index) ((x)->device[dev_index]) ++#define UNCORE_TOPOLOGY_INFO_device_dispatch_id(x, dev_index) \ ++ ((x)->device[dev_index].dispatch_id) ++#define UNCORE_TOPOLOGY_INFO_device_scan(x, dev_index) \ ++ ((x)->device[dev_index].scan) ++#define UNCORE_TOPOLOGY_INFO_pcidev_valid(x, dev_index, devno) \ ++ ((x)->device[dev_index].pcidev[devno].valid) ++#define UNCORE_TOPOLOGY_INFO_pcidev_dispatch_id(x, dev_index, devno) \ ++ ((x)->device[dev_index].pcidev[devno].dispatch_id) ++#define UNCORE_TOPOLOGY_INFO_pcidev(x, dev_index, devno) \ ++ ((x)->device[dev_index].pcidev[devno]) ++#define UNCORE_TOPOLOGY_INFO_num_uncore_units(x, dev_index) \ ++ ((x)->device[dev_index].num_uncore_units) ++#define UNCORE_TOPOLOGY_INFO_num_deviceid_entries(x, dev_index) \ ++ ((x)->device[dev_index].num_deviceid_entries) ++#define UNCORE_TOPOLOGY_INFO_dimm_device1(x, dev_index) \ ++ ((x)->device[dev_index].dimm_device1) ++#define UNCORE_TOPOLOGY_INFO_dimm_device2(x, dev_index) \ ++ ((x)->device[dev_index].dimm_device2) ++#define UNCORE_TOPOLOGY_INFO_deviceid(x, dev_index, deviceid_idx) \ ++ ((x)->device[dev_index].deviceid_list[deviceid_idx]) ++#define UNCORE_TOPOLOGY_INFO_pcidev_set_funcno_valid(x, dev_index, devno, \ ++ funcno) \ ++ ((x)->device[dev_index].pcidev[devno].func_info[funcno].valid = 1) ++#define UNCORE_TOPOLOGY_INFO_pcidev_is_found_in_platform(x, dev_index, devno, \ ++ funcno) \ ++ ((x)->device[dev_index].pcidev[devno].func_info[funcno].num_entries) ++#define UNCORE_TOPOLOGY_INFO_pcidev_is_devno_funcno_valid(x, dev_index, devno, \ ++ funcno) \ ++ ((x)->device[dev_index].pcidev[devno].func_info[funcno].valid ? TRUE : \ ++ FALSE) ++#define UNCORE_TOPOLOGY_INFO_pcidev_is_device_found(x, dev_index, devno, \ ++ funcno) \ ++ ((x)->device[dev_index].pcidev[devno].func_info[funcno].num_entries > 0) ++ ++#define UNCORE_TOPOLOGY_INFO_pcidev_num_entries_found(x, dev_index, devno, \ ++ funcno) \ ++ ((x)->device[dev_index].pcidev[devno].func_info[funcno].num_entries) ++ ++typedef enum { ++ CORE_TOPOLOGY_NODE = 0, ++ UNCORE_TOPOLOGY_NODE_IMC = 1, ++ UNCORE_TOPOLOGY_NODE_UBOX = 2, ++ UNCORE_TOPOLOGY_NODE_QPI = 3, ++ MAX_TOPOLOGY_DEV = 4, ++ // When you adding new topo node to this enum, make sue MAX_TOPOLOGY_DEV is always the last one. ++} UNCORE_TOPOLOGY_NODE_INDEX_TYPE; ++ ++typedef struct PLATFORM_TOPOLOGY_REG_NODE_S PLATFORM_TOPOLOGY_REG_NODE; ++typedef PLATFORM_TOPOLOGY_REG_NODE * PLATFORM_TOPOLOGY_REG; ++ ++struct PLATFORM_TOPOLOGY_REG_NODE_S { ++ U32 bus; ++ U32 device; ++ U32 function; ++ U32 reg_id; ++ U64 reg_mask; ++ U64 reg_value[MAX_PACKAGES]; ++ U8 reg_type; ++ U8 device_valid; ++ U16 reserved1; ++ U32 reserved2; ++ U64 reserved3; ++ U64 reserved4; ++}; ++ ++#define PLATFORM_TOPOLOGY_REG_bus(x, i) ((x)[(i)].bus) ++#define PLATFORM_TOPOLOGY_REG_device(x, i) ((x)[(i)].device) ++#define PLATFORM_TOPOLOGY_REG_function(x, i) ((x)[(i)].function) ++#define PLATFORM_TOPOLOGY_REG_reg_id(x, i) ((x)[(i)].reg_id) ++#define PLATFORM_TOPOLOGY_REG_reg_mask(x, i) ((x)[(i)].reg_mask) ++#define PLATFORM_TOPOLOGY_REG_reg_type(x, i) ((x)[(i)].reg_type) ++#define PLATFORM_TOPOLOGY_REG_device_valid(x, i) ((x)[(i)].device_valid) ++#define PLATFORM_TOPOLOGY_REG_reg_value(x, i, package_no) \ ++ ((x)[(i)].reg_value[package_no]) ++ ++typedef struct PLATFORM_TOPOLOGY_DISCOVERY_NODE_S ++ PLATFORM_TOPOLOGY_DISCOVERY_NODE; ++typedef PLATFORM_TOPOLOGY_DISCOVERY_NODE * PLATFORM_TOPOLOGY_DISCOVERY; ++ ++struct PLATFORM_TOPOLOGY_DISCOVERY_NODE_S { ++ U32 device_index; ++ U32 device_id; ++ U32 num_registers; ++ U8 scope; ++ U8 prog_valid; ++ U16 reserved2; ++ U64 reserved3; ++ U64 reserved4; ++ U64 reserved5; ++ PLATFORM_TOPOLOGY_REG_NODE topology_regs[MAX_REGS]; ++}; ++ ++//Structure used to discover the uncore device topology_device ++ ++typedef struct PLATFORM_TOPOLOGY_PROG_NODE_S PLATFORM_TOPOLOGY_PROG_NODE; ++typedef PLATFORM_TOPOLOGY_PROG_NODE * PLATFORM_TOPOLOGY_PROG; ++ ++struct PLATFORM_TOPOLOGY_PROG_NODE_S { ++ U32 num_devices; ++ PLATFORM_TOPOLOGY_DISCOVERY_NODE topology_device[MAX_TOPOLOGY_DEV]; ++}; ++ ++#define PLATFORM_TOPOLOGY_PROG_num_devices(x) ((x)->num_devices) ++#define PLATFORM_TOPOLOGY_PROG_topology_device(x, dev_index) \ ++ ((x)->topology_device[dev_index]) ++#define PLATFORM_TOPOLOGY_PROG_topology_device_device_index(x, dev_index) \ ++ ((x)->topology_device[dev_index].device_index) ++#define PLATFORM_TOPOLOGY_PROG_topology_device_device_id(x, dev_index) \ ++ ((x)->topology_device[dev_index].device_id) ++#define PLATFORM_TOPOLOGY_PROG_topology_device_scope(x, dev_index) \ ++ ((x)->topology_device[dev_index].scope) ++#define PLATFORM_TOPOLOGY_PROG_topology_device_num_registers(x, dev_index) \ ++ ((x)->topology_device[dev_index].num_registers) ++#define PLATFORM_TOPOLOGY_PROG_topology_device_prog_valid(x, dev_index) \ ++ ((x)->topology_device[dev_index].prog_valid) ++#define PLATFORM_TOPOLOGY_PROG_topology_topology_regs(x, dev_index) \ ++ ((x)->topology_device[dev_index].topology_regs) ++ ++typedef struct FPGA_GB_DISCOVERY_NODE_S FPGA_GB_DISCOVERY_NODE; ++ ++struct FPGA_GB_DISCOVERY_NODE_S { ++ U16 bar_num; ++ U16 feature_id; ++ U32 device_id; ++ U64 afu_id_l; ++ U64 afu_id_h; ++ U32 feature_offset; ++ U32 feature_len; ++ U8 scan; ++ U8 valid; ++ U16 reserved1; ++ U32 reserved2; ++}; ++ ++typedef struct FPGA_GB_DEV_NODE_S FPGA_GB_DEV_NODE; ++typedef FPGA_GB_DEV_NODE * FPGA_GB_DEV; ++ ++struct FPGA_GB_DEV_NODE_S { ++ U32 num_devices; ++ FPGA_GB_DISCOVERY_NODE fpga_gb_device[MAX_DEVICES]; ++}; ++ ++#define FPGA_GB_DEV_num_devices(x) ((x)->num_devices) ++#define FPGA_GB_DEV_device(x, dev_index) ((x)->fpga_gb_device[dev_index]) ++#define FPGA_GB_DEV_bar_num(x, dev_index) ((x)->fpga_gb_device[dev_index].bar_num) ++#define FPGA_GB_DEV_feature_id(x, dev_index) \ ++ ((x)->fpga_gb_device[dev_index].feature_id) ++#define FPGA_GB_DEV_device_id(x, dev_index) \ ++ ((x)->fpga_gb_device[dev_index].device_id) ++#define FPGA_GB_DEV_afu_id_low(x, dev_index) \ ++ ((x)->fpga_gb_device[dev_index].afu_id_l) ++#define FPGA_GB_DEV_afu_id_high(x, dev_index) \ ++ ((x)->fpga_gb_device[dev_index].afu_id_h) ++#define FPGA_GB_DEV_feature_offset(x, dev_index) \ ++ ((x)->fpga_gb_device[dev_index].feature_offset) ++#define FPGA_GB_DEV_feature_len(x, dev_index) \ ++ ((x)->fpga_gb_device[dev_index].feature_len) ++#define FPGA_GB_DEV_scan(x, dev_index) ((x)->fpga_gb_device[dev_index].scan) ++#define FPGA_GB_DEV_valid(x, dev_index) ((x)->fpga_gb_device[dev_index].valid) ++ ++typedef enum { ++ UNCORE_TOPOLOGY_INFO_NODE_IMC = 0, ++ UNCORE_TOPOLOGY_INFO_NODE_QPILL = 1, ++ UNCORE_TOPOLOGY_INFO_NODE_HA = 2, ++ UNCORE_TOPOLOGY_INFO_NODE_R3 = 3, ++ UNCORE_TOPOLOGY_INFO_NODE_R2 = 4, ++ UNCORE_TOPOLOGY_INFO_NODE_IRP = 5, ++ UNCORE_TOPOLOGY_INFO_NODE_IMC_UCLK = 6, ++ UNCORE_TOPOLOGY_INFO_NODE_EDC_ECLK = 7, ++ UNCORE_TOPOLOGY_INFO_NODE_EDC_UCLK = 8, ++ UNCORE_TOPOLOGY_INFO_NODE_M2M = 9, ++ UNCORE_TOPOLOGY_INFO_NODE_HFI_RXE = 10, ++ UNCORE_TOPOLOGY_INFO_NODE_HFI_TXE = 11, ++ UNCORE_TOPOLOGY_INFO_NODE_FPGA_CACHE = 12, ++ UNCORE_TOPOLOGY_INFO_NODE_FPGA_FAB = 13, ++ UNCORE_TOPOLOGY_INFO_NODE_FPGA_THERMAL = 14, ++ UNCORE_TOPOLOGY_INFO_NODE_FPGA_POWER = 15, ++} UNCORE_TOPOLOGY_INFO_NODE_INDEX_TYPE; ++ ++typedef struct SIDEBAND_INFO_NODE_S SIDEBAND_INFO_NODE; ++typedef SIDEBAND_INFO_NODE * SIDEBAND_INFO; ++ ++struct SIDEBAND_INFO_NODE_S { ++ U32 tid; ++ U32 pid; ++ U64 tsc; ++}; ++ ++#define SIDEBAND_INFO_pid(x) ((x)->pid) ++#define SIDEBAND_INFO_tid(x) ((x)->tid) ++#define SIDEBAND_INFO_tsc(x) ((x)->tsc) ++ ++typedef struct SAMPLE_DROP_NODE_S SAMPLE_DROP_NODE; ++typedef SAMPLE_DROP_NODE * SAMPLE_DROP; ++ ++struct SAMPLE_DROP_NODE_S { ++ U32 os_id; ++ U32 cpu_id; ++ U32 sampled; ++ U32 dropped; ++}; ++ ++#define SAMPLE_DROP_os_id(x) ((x)->os_id) ++#define SAMPLE_DROP_cpu_id(x) ((x)->cpu_id) ++#define SAMPLE_DROP_sampled(x) ((x)->sampled) ++#define SAMPLE_DROP_dropped(x) ((x)->dropped) ++ ++#define MAX_SAMPLE_DROP_NODES 20 ++ ++typedef struct SAMPLE_DROP_INFO_NODE_S SAMPLE_DROP_INFO_NODE; ++typedef SAMPLE_DROP_INFO_NODE * SAMPLE_DROP_INFO; ++ ++struct SAMPLE_DROP_INFO_NODE_S { ++ U32 size; ++ SAMPLE_DROP_NODE drop_info[MAX_SAMPLE_DROP_NODES]; ++}; ++ ++#define SAMPLE_DROP_INFO_size(x) ((x)->size) ++#define SAMPLE_DROP_INFO_drop_info(x, index) ((x)->drop_info[index]) ++ ++#define IS_PEBS_SAMPLE_RECORD(sample_record) \ ++ ((SAMPLE_RECORD_pid_rec_index(sample_record) == (U32)-1) && \ ++ (SAMPLE_RECORD_tid(sample_record) == (U32)-1)) ++ ++/* ++ * VMM vendor information ++ */ ++#define KVM_SIGNATURE "KVMKVMKVM\0\0\0" ++#define XEN_SIGNATURE "XenVMMXenVMM" ++#define VMWARE_SIGNATURE "VMwareVMware" ++#define HYPERV_SIGNATURE "Microsoft Hv" ++ ++#define DRV_VMM_UNKNOWN 0 ++#define DRV_VMM_MOBILEVISOR 1 ++#define DRV_VMM_KVM 2 ++#define DRV_VMM_XEN 3 ++#define DRV_VMM_HYPERV 4 ++#define DRV_VMM_VMWARE 5 ++#define DRV_VMM_ACRN 6 ++ ++/* ++ * @macro DRV_SETUP_INFO_NODE_S ++ * @brief ++ * This structure supports driver information such as NMI profiling mode. ++ */ ++ ++typedef struct DRV_SETUP_INFO_NODE_S DRV_SETUP_INFO_NODE; ++typedef DRV_SETUP_INFO_NODE * DRV_SETUP_INFO; ++ ++struct DRV_SETUP_INFO_NODE_S { ++ union { ++ U64 modes; ++ struct { ++ U64 nmi_mode : 1; ++ U64 vmm_mode : 1; ++ U64 vmm_vendor : 8; ++ U64 vmm_guest_vm : 1; ++ U64 pebs_accessible : 1; ++ U64 cpu_hotplug_mode : 1; ++ U64 matrix_inaccessible : 1; ++ U64 page_table_isolation : 2; ++ U64 pebs_ignored_by_pti : 1; ++ U64 reserved1 : 47; ++ } s1; ++ } u1; ++ U64 reserved2; ++ U64 reserved3; ++ U64 reserved4; ++}; ++ ++#define DRV_SETUP_INFO_nmi_mode(info) ((info)->u1.s1.nmi_mode) ++#define DRV_SETUP_INFO_vmm_mode(info) ((info)->u1.s1.vmm_mode) ++#define DRV_SETUP_INFO_vmm_vendor(info) ((info)->u1.s1.vmm_vendor) ++#define DRV_SETUP_INFO_vmm_guest_vm(info) ((info)->u1.s1.vmm_guest_vm) ++#define DRV_SETUP_INFO_pebs_accessible(info) ((info)->u1.s1.pebs_accessible) ++#define DRV_SETUP_INFO_cpu_hotplug_mode(info) ((info)->u1.s1.cpu_hotplug_mode) ++#define DRV_SETUP_INFO_matrix_inaccessible(info) \ ++ ((info)->u1.s1.matrix_inaccessible) ++#define DRV_SETUP_INFO_page_table_isolation(info) \ ++ ((info)->u1.s1.page_table_isolation) ++#define DRV_SETUP_INFO_pebs_ignored_by_pti(info) \ ++ ((info)->u1.s1.pebs_ignored_by_pti) ++ ++#define DRV_SETUP_INFO_PTI_DISABLED 0 ++#define DRV_SETUP_INFO_PTI_KPTI 1 ++#define DRV_SETUP_INFO_PTI_KAISER 2 ++#define DRV_SETUP_INFO_PTI_VA_SHADOW 3 ++#define DRV_SETUP_INFO_PTI_UNKNOWN 4 ++ ++/* ++ Type: task_info_t ++ Description: ++ Represents the equivalent of a Linux Thread. ++ Fields: ++ o id: A unique identifier. May be `NULL_TASK_ID`. ++ o name: Human-readable name for this task ++ o executable_name: Literal path to the binary elf that this task's ++ entry point is executing from. ++ o address_space_id: The unique ID for the address space this task is ++ running in. ++ */ ++struct task_info_node_s { ++ U64 id; ++ char name[32]; ++ U64 address_space_id; ++}; ++ ++/* ++ Type: REMOTE_SWITCH ++ Description: ++ Collection switch set on target ++*/ ++typedef struct REMOTE_SWITCH_NODE_S REMOTE_SWITCH_NODE; ++typedef REMOTE_SWITCH_NODE * REMOTE_SWITCH; ++ ++struct REMOTE_SWITCH_NODE_S { ++ U32 auto_mode : 1; ++ U32 adv_hotspot : 1; ++ U32 lbr_callstack : 2; ++ U32 full_pebs : 1; ++ U32 uncore_supported : 1; ++ U32 agent_mode : 2; ++ U32 sched_switch_enabled : 1; ++ U32 data_transfer_mode : 1; ++ U32 reserved1 : 22; ++ U32 reserved2; ++}; ++ ++#define REMOTE_SWITCH_auto_mode(x) ((x).auto_mode) ++#define REMOTE_SWITCH_adv_hotspot(x) ((x).adv_hotspot) ++#define REMOTE_SWITCH_lbr_callstack(x) ((x).lbr_callstack) ++#define REMOTE_SWITCH_full_pebs(x) ((x).full_pebs) ++#define REMOTE_SWITCH_uncore_supported(x) ((x).uncore_supported) ++#define REMOTE_SWITCH_agent_mode(x) ((x).agent_mode) ++#define REMOTE_SWITCH_sched_switch_enabled(x) ((x).sched_switch_enabled) ++#define REMOTE_SWITCH_data_transfer_mode(x) ((x).data_transfer_mode) ++ ++/* ++ Type: REMOTE_OS_INFO ++ Description: ++ Remote target OS system information ++*/ ++#define OSINFOLEN 64 ++typedef struct REMOTE_OS_INFO_NODE_S REMOTE_OS_INFO_NODE; ++typedef REMOTE_OS_INFO_NODE * REMOTE_OS_INFO; ++ ++struct REMOTE_OS_INFO_NODE_S { ++ U32 os_family; ++ U32 reserved1; ++ S8 sysname[OSINFOLEN]; ++ S8 release[OSINFOLEN]; ++ S8 version[OSINFOLEN]; ++}; ++ ++#define REMOTE_OS_INFO_os_family(x) ((x).os_family) ++#define REMOTE_OS_INFO_sysname(x) ((x).sysname) ++#define REMOTE_OS_INFO_release(x) ((x).release) ++#define REMOTE_OS_INFO_version(x) ((x).version) ++ ++/* ++ Type: REMOTE_HARDWARE_INFO ++ Description: ++ Remote target hardware information ++*/ ++typedef struct REMOTE_HARDWARE_INFO_NODE_S REMOTE_HARDWARE_INFO_NODE; ++typedef REMOTE_HARDWARE_INFO_NODE * REMOTE_HARDWARE_INFO; ++ ++struct REMOTE_HARDWARE_INFO_NODE_S { ++ U32 num_cpus; ++ U32 family; ++ U32 model; ++ U32 stepping; ++ U64 tsc_freq; ++ U64 reserved2; ++ U64 reserved3; ++}; ++ ++#define REMOTE_HARDWARE_INFO_num_cpus(x) ((x).num_cpus) ++#define REMOTE_HARDWARE_INFO_family(x) ((x).family) ++#define REMOTE_HARDWARE_INFO_model(x) ((x).model) ++#define REMOTE_HARDWARE_INFO_stepping(x) ((x).stepping) ++#define REMOTE_HARDWARE_INFO_tsc_frequency(x) ((x).tsc_freq) ++ ++/* ++ Type: SEP_AGENT_MODE ++ Description: ++ SEP mode on target agent ++*/ ++typedef enum { ++ NATIVE_AGENT = 0, ++ HOST_VM_AGENT, // Service OS in ACRN ++ GUEST_VM_AGENT // User OS in ACRN ++} SEP_AGENT_MODE; ++ ++/* ++ Type: DATA_TRANSFER_MODE ++ Description: ++ Data transfer mode from target agent to remote host ++*/ ++typedef enum { ++ IMMEDIATE_TRANSFER = 0, ++ DELAYED_TRANSFER // Send after collection is done ++} DATA_TRANSFER_MODE; ++ ++#define MAX_NUM_OS_ALLOWED 6 ++#define TARGET_IP_NAMELEN 64 ++ ++typedef struct TARGET_INFO_NODE_S TARGET_INFO_NODE; ++typedef TARGET_INFO_NODE * TARGET_INFO; ++ ++struct TARGET_INFO_NODE_S { ++ U32 num_of_agents; ++ U32 reserved; ++ U32 os_id[MAX_NUM_OS_ALLOWED]; ++ S8 ip_address[MAX_NUM_OS_ALLOWED][TARGET_IP_NAMELEN]; ++ REMOTE_OS_INFO_NODE os_info[MAX_NUM_OS_ALLOWED]; ++ REMOTE_HARDWARE_INFO_NODE hardware_info[MAX_NUM_OS_ALLOWED]; ++ REMOTE_SWITCH_NODE remote_switch[MAX_NUM_OS_ALLOWED]; ++}; ++ ++#define TARGET_INFO_num_of_agents(x) ((x)->num_of_agents) ++#define TARGET_INFO_os_id(x, i) ((x)->os_id[i]) ++#define TARGET_INFO_os_info(x, i) ((x)->os_info[i]) ++#define TARGET_INFO_ip_address(x, i) ((x)->ip_address[i]) ++#define TARGET_INFO_hardware_info(x, i) ((x)->hardware_info[i]) ++#define TARGET_INFO_remote_switch(x, i) ((x)->remote_switch[i]) ++ ++typedef struct CPU_MAP_TRACE_NODE_S CPU_MAP_TRACE_NODE; ++typedef CPU_MAP_TRACE_NODE * CPU_MAP_TRACE; ++ ++struct CPU_MAP_TRACE_NODE_S { ++ U64 tsc; ++ U32 os_id; ++ U32 vcpu_id; ++ U32 pcpu_id; ++ U8 is_static : 1; ++ U8 initial : 1; ++ U8 reserved1 : 6; ++ U8 reserved2; ++ U16 reserved3; ++ U64 reserved4; ++}; ++ ++#define CPU_MAP_TRACE_tsc(x) ((x)->tsc) ++#define CPU_MAP_TRACE_os_id(x) ((x)->os_id) ++#define CPU_MAP_TRACE_vcpu_id(x) ((x)->vcpu_id) ++#define CPU_MAP_TRACE_pcpu_id(x) ((x)->pcpu_id) ++#define CPU_MAP_TRACE_is_static(x) ((x)->is_static) ++#define CPU_MAP_TRACE_initial(x) ((x)->initial) ++ ++typedef struct VM_SWITCH_TRACE_NODE_S VM_SWITCH_TRACE_NODE; ++typedef VM_SWITCH_TRACE_NODE * VM_SWITCH_TRACE; ++ ++struct VM_SWITCH_TRACE_NODE_S { ++ U64 tsc; ++ U32 from_os_id; ++ U32 to_os_id; ++ U64 reason; ++ U64 reserved1; ++ U64 reserved2; ++}; ++ ++#define VM_SWITCH_TRACE_tsc(x) ((x)->tsc) ++#define VM_SWITCH_TRACE_from_os_id(x) ((x)->from_os_id) ++#define VM_SWITCH_TRACE_to_os_id(x) ((x)->to_os_id) ++#define VM_SWITCH_TRACE_reason(x) ((x)->reason) ++ ++typedef struct EMON_BUFFER_DRIVER_HELPER_NODE_S EMON_BUFFER_DRIVER_HELPER_NODE; ++typedef EMON_BUFFER_DRIVER_HELPER_NODE * EMON_BUFFER_DRIVER_HELPER; ++ ++struct EMON_BUFFER_DRIVER_HELPER_NODE_S { ++ U32 num_entries_per_package; ++ U32 num_cpu; ++ U32 power_num_package_events; ++ U32 power_num_module_events; ++ U32 power_num_thread_events; ++ U32 power_device_offset_in_package; ++ U32 core_num_events; ++ U32 core_index_to_thread_offset_map[]; ++}; ++ ++#define EMON_BUFFER_DRIVER_HELPER_num_entries_per_package(x) \ ++ ((x)->num_entries_per_package) ++#define EMON_BUFFER_DRIVER_HELPER_num_cpu(x) ((x)->num_cpu) ++#define EMON_BUFFER_DRIVER_HELPER_power_num_package_events(x) \ ++ ((x)->power_num_package_events) ++#define EMON_BUFFER_DRIVER_HELPER_power_num_module_events(x) \ ++ ((x)->power_num_module_events) ++#define EMON_BUFFER_DRIVER_HELPER_power_num_thread_events(x) \ ++ ((x)->power_num_thread_events) ++#define EMON_BUFFER_DRIVER_HELPER_power_device_offset_in_package(x) \ ++ ((x)->power_device_offset_in_package) ++#define EMON_BUFFER_DRIVER_HELPER_core_num_events(x) ((x)->core_num_events) ++#define EMON_BUFFER_DRIVER_HELPER_core_index_to_thread_offset_map(x) \ ++ ((x)->core_index_to_thread_offset_map) ++ ++// EMON counts buffer follow this hardware topology: package -> device -> unit/thread -> event ++ ++// Calculate the CORE thread offset ++// Using for initialization: calculate the cpu_index_to_thread_offset_map in emon_Create_Emon_Buffer_Descriptor() ++// EMON_BUFFER_CORE_THREAD_OFFSET = ++// package_id * num_entries_per_package + //package offset ++// device_offset_in_package + //device base offset ++// (core_id * threads_per_core + thread_id) * num_core_events + //thread offset ++#define EMON_BUFFER_CORE_THREAD_OFFSET(package_id, num_entries_per_package, \ ++ device_offset_in_package, core_id, \ ++ threads_per_core, thread_id, \ ++ num_core_events) \ ++ (package_id * num_entries_per_package + device_offset_in_package + \ ++ (core_id * threads_per_core + thread_id) * num_core_events) ++ ++// Take cpu_index and cpu_index_to_thread_offset_map to get thread_offset, and calculate the CORE event offset ++// Using for kernel and emon_output.c printing function ++// EMON_BUFFER_CORE_EVENT_OFFSET = ++// cpu_index_to_thread_offset + //thread offset ++// core_event_id //event_offset ++#define EMON_BUFFER_CORE_EVENT_OFFSET(cpu_index_to_thread_offset, \ ++ core_event_id) \ ++ (cpu_index_to_thread_offset + core_event_id) ++ ++// Calculate the device level to UNCORE event offset ++// Using for kernel and emon_output.c printing function ++// EMON_BUFFER_UNCORE_PACKAGE_EVENT_OFFSET_IN_PACKAGE = ++// device_offset_in_package + //device_offset_in_package ++// device_unit_id * num_unit_events + //unit_offset ++// device_event_id //event_offset ++#define EMON_BUFFER_UNCORE_PACKAGE_EVENT_OFFSET_IN_PACKAGE( \ ++ device_offset_in_package, device_unit_id, num_unit_events, \ ++ device_event_id) \ ++ (device_offset_in_package + device_unit_id * num_unit_events + \ ++ device_event_id) ++ ++// Take 'device level to UNCORE event offset' and package_id, calculate the UNCORE package level event offset ++// Using for emon_output.c printing function ++// EMON_BUFFER_UNCORE_PACKAGE_EVENT_OFFSET = ++// package_id * num_entries_per_package + //package_offset ++// uncore_offset_in_package; //offset_in_package ++#define EMON_BUFFER_UNCORE_PACKAGE_EVENT_OFFSET( \ ++ package_id, num_entries_per_package, uncore_offset_in_package) \ ++ (package_id * num_entries_per_package + uncore_offset_in_package) ++ ++// Take 'device level to UNCORE event offset', calculate the UNCORE system level event offset ++// Using for emon_output.c printing function ++// EMON_BUFFER_UNCORE_SYSTEM_EVENT_OFFSET = ++// device_offset_in_system + //device_offset_in_system ++// device_unit_id * num_system_events + //device_unit_offset ++// device_event_id //event_offset ++#define EMON_BUFFER_UNCORE_SYSTEM_EVENT_OFFSET(device_offset_in_system, \ ++ device_unit_id, \ ++ num_system_events, \ ++ device_event_id) \ ++ (device_offset_in_system + device_unit_id * num_system_events + \ ++ device_event_id) ++ ++// Calculate the package level power event offset ++// Using for kernel and emon_output.c printing function ++// EMON_BUFFER_UNCORE_PACKAGE_POWER_EVENT_OFFSET = ++// package_id * num_entries_per_package + //package offset ++// device_offset_in_package + //device offset ++// package_event_offset //power package event offset ++#define EMON_BUFFER_UNCORE_PACKAGE_POWER_EVENT_OFFSET( \ ++ package_id, num_entries_per_package, device_offset_in_package, \ ++ device_event_offset) \ ++ (package_id * num_entries_per_package + device_offset_in_package + \ ++ device_event_offset) ++ ++// Calculate the module level power event offset ++// Using for kernel and emon_output.c printing function ++// EMON_BUFFER_UNCORE_MODULE_POWER_EVENT_OFFSET = ++// package_id * num_entries_per_package + //package offset ++// device_offset_in_package + //device offset ++// num_package_events + //package event offset ++// module_id * num_module_events + //module offset ++// module_event_offset //power module event offset ++#define EMON_BUFFER_UNCORE_MODULE_POWER_EVENT_OFFSET( \ ++ package_id, num_entries_per_package, device_offset_in_package, \ ++ num_package_events, module_id, num_module_events, device_event_offset) \ ++ (package_id * num_entries_per_package + device_offset_in_package + \ ++ num_package_events + module_id * num_module_events + \ ++ device_event_offset) ++ ++// Calculate the package level power event offset ++// Using for kernel and emon_output.c printing function ++// EMON_BUFFER_UNCORE_THREAD_POWER_EVENT_OFFSET = ++// package_id * num_entries_per_package + //package offset ++// device_offset_in_package + //device offset ++// num_package_events + //package offset ++// num_modules_per_package * num_module_events + //module offset ++// (core_id * threads_per_core + thread_id) * num_thread_events + //thread offset ++// thread_event_offset //power thread event offset ++#define EMON_BUFFER_UNCORE_THREAD_POWER_EVENT_OFFSET( \ ++ package_id, num_entries_per_package, device_offset_in_package, \ ++ num_package_events, num_modules_per_package, num_module_events, \ ++ core_id, threads_per_core, thread_id, num_unit_events, \ ++ device_event_offset) \ ++ (package_id * num_entries_per_package + device_offset_in_package + \ ++ num_package_events + \ ++ num_modules_per_package * num_module_events + \ ++ (core_id * threads_per_core + thread_id) * num_unit_events + \ ++ device_event_offset) ++ ++/* ++ ************************************ ++ * DRIVER LOG BUFFER DECLARATIONS * ++ ************************************ ++ */ ++ ++#define DRV_MAX_NB_LOG_CATEGORIES 256 // Must be a multiple of 8 ++#define DRV_NB_LOG_CATEGORIES 14 ++#define DRV_LOG_CATEGORY_LOAD 0 ++#define DRV_LOG_CATEGORY_INIT 1 ++#define DRV_LOG_CATEGORY_DETECTION 2 ++#define DRV_LOG_CATEGORY_ERROR 3 ++#define DRV_LOG_CATEGORY_STATE_CHANGE 4 ++#define DRV_LOG_CATEGORY_MARK 5 ++#define DRV_LOG_CATEGORY_DEBUG 6 ++#define DRV_LOG_CATEGORY_FLOW 7 ++#define DRV_LOG_CATEGORY_ALLOC 8 ++#define DRV_LOG_CATEGORY_INTERRUPT 9 ++#define DRV_LOG_CATEGORY_TRACE 10 ++#define DRV_LOG_CATEGORY_REGISTER 11 ++#define DRV_LOG_CATEGORY_NOTIFICATION 12 ++#define DRV_LOG_CATEGORY_WARNING 13 ++ ++#define LOG_VERBOSITY_UNSET 0xFF ++#define LOG_VERBOSITY_DEFAULT 0xFE ++#define LOG_VERBOSITY_NONE 0 ++ ++#define LOG_CHANNEL_MEMLOG 0x1 ++#define LOG_CHANNEL_AUXMEMLOG 0x2 ++#define LOG_CHANNEL_PRINTK 0x4 ++#define LOG_CHANNEL_TRACEK 0x8 ++#define LOG_CHANNEL_MOSTWHERE \ ++ (LOG_CHANNEL_MEMLOG | LOG_CHANNEL_AUXMEMLOG | LOG_CHANNEL_PRINTK) ++#define LOG_CHANNEL_EVERYWHERE \ ++ (LOG_CHANNEL_MEMLOG | LOG_CHANNEL_AUXMEMLOG | LOG_CHANNEL_PRINTK | \ ++ LOG_CHANNEL_TRACEK) ++#define LOG_CHANNEL_MASK LOG_CATEGORY_VERBOSITY_EVERYWHERE ++ ++#define LOG_CONTEXT_REGULAR 0x10 ++#define LOG_CONTEXT_INTERRUPT 0x20 ++#define LOG_CONTEXT_NOTIFICATION 0x40 ++#define LOG_CONTEXT_ALL \ ++ (LOG_CONTEXT_REGULAR | LOG_CONTEXT_INTERRUPT | LOG_CONTEXT_NOTIFICATION) ++#define LOG_CONTEXT_MASK LOG_CONTEXT_ALL ++#define LOG_CONTEXT_SHIFT 4 ++ ++#define DRV_LOG_NOTHING 0 ++#define DRV_LOG_FLOW_IN 1 ++#define DRV_LOG_FLOW_OUT 2 ++ ++/* ++ * @macro DRV_LOG_ENTRY_NODE_S ++ * @brief ++ * This structure is used to store a log message from the driver. ++ */ ++ ++#define DRV_LOG_MESSAGE_LENGTH 64 ++#define DRV_LOG_FUNCTION_NAME_LENGTH 32 ++ ++typedef struct DRV_LOG_ENTRY_NODE_S DRV_LOG_ENTRY_NODE; ++typedef DRV_LOG_ENTRY_NODE * DRV_LOG_ENTRY; ++struct DRV_LOG_ENTRY_NODE_S { ++ char function_name[DRV_LOG_FUNCTION_NAME_LENGTH]; ++ char message[DRV_LOG_MESSAGE_LENGTH]; ++ ++ U16 temporal_tag; ++ U16 integrity_tag; ++ ++ U8 category; ++ U8 secondary_info; // Secondary attribute: ++ // former driver state for STATE category ++ // 'ENTER' or 'LEAVE' for FLOW and TRACE categories ++ U16 processor_id; // NB: not guaranteed to be accurate (due to preemption / core migration) ++ ++ U64 tsc; ++ ++ U16 nb_active_interrupts; // never 100% accurate, merely indicative ++ U8 active_drv_operation; // only 100% accurate for IOCTL-called functions ++ U8 driver_state; ++ ++ U16 line_number; // as per the __LINE__ macro ++ ++ U16 nb_active_notifications; ++ ++ U64 reserved; // need padding to reach 128 bytes ++}; // this structure should be exactly 128-byte long ++ ++#define DRV_LOG_ENTRY_temporal_tag(ent) ((ent)->temporal_tag) ++#define DRV_LOG_ENTRY_integrity_tag(ent) ((ent)->integrity_tag) ++#define DRV_LOG_ENTRY_category(ent) ((ent)->category) ++#define DRV_LOG_ENTRY_secondary_info(ent) ((ent)->secondary_info) ++#define DRV_LOG_ENTRY_processor_id(ent) ((ent)->processor_id) ++#define DRV_LOG_ENTRY_tsc(ent) ((ent)->tsc) ++#define DRV_LOG_ENTRY_driver_state(ent) ((ent)->driver_state) ++#define DRV_LOG_ENTRY_active_drv_operation(ent) ((ent)->active_drv_operation) ++#define DRV_LOG_ENTRY_nb_active_interrupts(ent) ((ent)->nb_active_interrupts) ++#define DRV_LOG_ENTRY_nb_active_notifications(ent) \ ++ ((ent)->nb_active_notifications) ++#define DRV_LOG_ENTRY_line_number(ent) ((ent)->line_number) ++#define DRV_LOG_ENTRY_message(ent) ((ent)->message) ++#define DRV_LOG_ENTRY_function_name(ent) ((ent)->function_name) ++ ++/* ++ * @macro DRV_LOG_BUFFER_NODE_S ++ * @brief ++ * Circular buffer structure storing the latest DRV_LOG_MAX_NB_ENTRIES driver messages ++ */ ++ ++#define DRV_LOG_SIGNATURE_SIZE 8 // Must be a multiple of 8 ++#define DRV_LOG_SIGNATURE_0 'S' ++#define DRV_LOG_SIGNATURE_1 'e' ++#define DRV_LOG_SIGNATURE_2 'P' ++#define DRV_LOG_SIGNATURE_3 'd' ++#define DRV_LOG_SIGNATURE_4 'R' ++#define DRV_LOG_SIGNATURE_5 'v' ++#define DRV_LOG_SIGNATURE_6 '5' ++#define DRV_LOG_SIGNATURE_7 '\0' ++// The signature is "SePdRv4"; ++// not declared as string on purpose to avoid false positives when trying to identify the log buffer in a crash dump ++ ++#define DRV_LOG_VERSION 1 ++#define DRV_LOG_FILLER_BYTE 1 ++ ++#define DRV_LOG_DRIVER_VERSION_SIZE 64 // Must be a multiple of 8 ++#define DRV_LOG_MAX_NB_PRI_ENTRIES (8192 * 2) ++ // 2MB buffer [*HAS TO BE* a power of 2!] [8192 entries = 1 MB] ++#define DRV_LOG_MAX_NB_AUX_ENTRIES (8192) ++ // 1MB buffer [*HAS TO BE* a power of 2!] ++#define DRV_LOG_MAX_NB_ENTRIES \ ++ (DRV_LOG_MAX_NB_PRI_ENTRIES + DRV_LOG_MAX_NB_AUX_ENTRIES) ++ ++typedef struct DRV_LOG_BUFFER_NODE_S DRV_LOG_BUFFER_NODE; ++typedef DRV_LOG_BUFFER_NODE * DRV_LOG_BUFFER; ++struct DRV_LOG_BUFFER_NODE_S { ++ char header_signature[DRV_LOG_SIGNATURE_SIZE]; ++ // some signature to be able to locate the log even without -g; ASCII would help ++ // should we change the signature for each log's version instead of keeping it in a ++ // dedicated field? ++ ++ U32 log_size; // filled with sizeof(this structure) at init. ++ U32 max_nb_pri_entries; // filled with the driver's "DRV_LOG_MAX_NB_PRIM_ENTRIES" at init. ++ ++ U32 max_nb_aux_entries; // filled with the driver's "DRV_LOG_MAX_NB_AUX_ENTRIES" at init. ++ U32 reserved1; ++ ++ U64 init_time; // primary log disambiguator ++ ++ U32 disambiguator; ++ // used to differentiate the driver's version of the log when a full memory dump can contain some from userland ++ ++ U32 log_version; // 0 at first, increase when format changes? ++ ++ U32 pri_entry_index; ++ // should be incremented *atomically* as a means to (re)allocate the next primary log entry. ++ ++ U32 aux_entry_index; ++ // should be incremented *atomically* as a means to (re)allocate the next auxiliary log entry. ++ ++ char driver_version[DRV_LOG_DRIVER_VERSION_SIZE]; ++ ++ U8 driver_state; ++ U8 active_drv_operation; ++ U16 reserved2; ++ U32 nb_drv_operations; ++ ++ U32 nb_interrupts; ++ U16 nb_active_interrupts; ++ U16 nb_active_notifications; ++ ++ U32 nb_notifications; ++ U32 nb_driver_state_transitions; ++ ++ U8 contiguous_physical_memory; ++ U8 reserved3; ++ U16 reserved4; ++ U32 reserved5; ++ ++ U8 verbosities[DRV_MAX_NB_LOG_CATEGORIES]; ++ ++ DRV_LOG_ENTRY_NODE entries[DRV_LOG_MAX_NB_ENTRIES]; ++ ++ char footer_signature[DRV_LOG_SIGNATURE_SIZE]; ++}; ++ ++#define DRV_LOG_BUFFER_pri_entry_index(log) ((log)->pri_entry_index) ++#define DRV_LOG_BUFFER_aux_entry_index(log) ((log)->aux_entry_index) ++#define DRV_LOG_BUFFER_header_signature(log) ((log)->header_signature) ++#define DRV_LOG_BUFFER_footer_signature(log) ((log)->footer_signature) ++#define DRV_LOG_BUFFER_log_size(log) ((log)->log_size) ++#define DRV_LOG_BUFFER_driver_version(log) ((log)->driver_version) ++#define DRV_LOG_BUFFER_driver_state(log) ((log)->driver_state) ++#define DRV_LOG_BUFFER_active_drv_operation(log) ((log)->active_drv_operation) ++#define DRV_LOG_BUFFER_nb_interrupts(log) ((log)->nb_interrupts) ++#define DRV_LOG_BUFFER_nb_active_interrupts(log) ((log)->nb_active_interrupts) ++#define DRV_LOG_BUFFER_nb_notifications(log) ((log)->nb_notifications) ++#define DRV_LOG_BUFFER_nb_active_notifications(log) \ ++ ((log)->nb_active_notifications) ++#define DRV_LOG_BUFFER_nb_driver_state_transitions(log) \ ++ ((log)->nb_driver_state_transitions) ++#define DRV_LOG_BUFFER_nb_drv_operations(log) ((log)->nb_drv_operations) ++#define DRV_LOG_BUFFER_max_nb_pri_entries(log) ((log)->max_nb_pri_entries) ++#define DRV_LOG_BUFFER_max_nb_aux_entries(log) ((log)->max_nb_aux_entries) ++#define DRV_LOG_BUFFER_init_time(log) ((log)->init_time) ++#define DRV_LOG_BUFFER_disambiguator(log) ((log)->disambiguator) ++#define DRV_LOG_BUFFER_log_version(log) ((log)->log_version) ++#define DRV_LOG_BUFFER_entries(log) ((log)->entries) ++#define DRV_LOG_BUFFER_contiguous_physical_memory(log) \ ++ ((log)->contiguous_physical_memory) ++#define DRV_LOG_BUFFER_verbosities(log) ((log)->verbosities) ++ ++#define DRV_LOG_CONTROL_MAX_DATA_SIZE \ ++ DRV_MAX_NB_LOG_CATEGORIES // Must be a multiple of 8 ++ ++typedef struct DRV_LOG_CONTROL_NODE_S DRV_LOG_CONTROL_NODE; ++typedef DRV_LOG_CONTROL_NODE * DRV_LOG_CONTROL; ++ ++struct DRV_LOG_CONTROL_NODE_S { ++ U32 command; ++ U32 reserved1; ++ U8 data[DRV_LOG_CONTROL_MAX_DATA_SIZE]; ++ // only DRV_NB_LOG_CATEGORIES elements will be used, but let's plan for backwards compatibility ++ // if LOG_CATEGORY_UNSET, then READ instead of WRITE ++ ++ U64 reserved2; ++ // may later want to add support for resizing the buffer, or only log 100 first interrupts, etc. ++ ++ U64 reserved3; ++ U64 reserved4; ++ U64 reserved5; ++}; ++ ++#define DRV_LOG_CONTROL_command(x) ((x)->command) ++#define DRV_LOG_CONTROL_verbosities(x) ((x)->data) ++#define DRV_LOG_CONTROL_message(x) ((x)->data) ++ // Userland 'MARK' messages use the 'data' field too. ++#define DRV_LOG_CONTROL_log_size(x) (*((U32 *)((x)->data))) ++ ++#define DRV_LOG_CONTROL_COMMAND_NONE 0 ++#define DRV_LOG_CONTROL_COMMAND_ADJUST_VERBOSITY 1 ++#define DRV_LOG_CONTROL_COMMAND_MARK 2 ++#define DRV_LOG_CONTROL_COMMAND_QUERY_SIZE 3 ++#define DRV_LOG_CONTROL_COMMAND_BENCHMARK 4 ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif +diff --git a/drivers/platform/x86/socperf/include/lwpmudrv_types.h b/drivers/platform/x86/socperf/include/lwpmudrv_types.h +new file mode 100644 +index 000000000000..85f3d1a9e1e7 +--- /dev/null ++++ b/drivers/platform/x86/socperf/include/lwpmudrv_types.h +@@ -0,0 +1,158 @@ ++/*** ++ * ------------------------------------------------------------------------- ++ * INTEL CORPORATION PROPRIETARY INFORMATION ++ * This software is supplied under the terms of the accompanying license ++ * agreement or nondisclosure agreement with Intel Corporation and may not ++ * be copied or disclosed except in accordance with the terms of that ++ * agreement. ++ * Copyright(C) 2007-2019 Intel Corporation. All Rights Reserved. ++ * ------------------------------------------------------------------------- ++***/ ++ ++#ifndef _LWPMUDRV_TYPES_H_ ++#define _LWPMUDRV_TYPES_H_ ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++#if defined(BUILD_DRV_ESX) ++//SR: added size_t def ++typedef unsigned long size_t; ++typedef unsigned long ssize_t; ++#endif ++ ++typedef unsigned char U8; ++typedef char S8; ++typedef short S16; ++typedef unsigned short U16; ++typedef unsigned int U32; ++typedef int S32; ++#if defined(DRV_OS_WINDOWS) ++typedef unsigned __int64 U64; ++typedef __int64 S64; ++#elif defined(DRV_OS_LINUX) || defined(DRV_OS_SOLARIS) || \ ++ defined(DRV_OS_MAC) || defined(DRV_OS_ANDROID) || \ ++ defined(DRV_OS_FREEBSD) ++typedef unsigned long long U64; ++typedef long long S64; ++typedef unsigned long ULONG; ++typedef void VOID; ++typedef void *LPVOID; ++ ++#if defined(BUILD_DRV_ESX) ++//SR: added UWORD64 def ++typedef union _UWORD64 { ++ struct { ++ U32 low; ++ S32 hi; ++ } c; ++ S64 qword; ++} UWORD64, *PWORD64; ++#endif ++#else ++#error "Undefined OS" ++#endif ++ ++#if defined(DRV_IA32) ++typedef S32 SIOP; ++typedef U32 UIOP; ++#elif defined(DRV_EM64T) ++typedef S64 SIOP; ++typedef U64 UIOP; ++#else ++#error "Unexpected Architecture seen" ++#endif ++ ++typedef U32 DRV_BOOL; ++typedef void *PVOID; ++ ++#if !defined(__DEFINE_STCHAR__) ++#define __DEFINE_STCHAR__ ++#if defined(UNICODE) ++typedef wchar_t STCHAR; ++#define VTSA_T(x) L##x ++#else ++typedef char STCHAR; ++#define VTSA_T(x) x ++#endif ++#endif ++ ++#if defined(DRV_OS_WINDOWS) ++#include ++typedef wchar_t DRV_STCHAR; ++typedef wchar_t VTSA_CHAR; ++#else ++typedef char DRV_STCHAR; ++#endif ++ ++// ++// Handy Defines ++// ++typedef U32 DRV_STATUS; ++ ++#define MAX_STRING_LENGTH 1024 ++#define MAXNAMELEN 256 ++ ++#if defined(DRV_OS_WINDOWS) ++#define UNLINK _unlink ++#define RENAME rename ++#define WCSDUP _wcsdup ++#endif ++#if defined(DRV_OS_LINUX) || defined(DRV_OS_SOLARIS) || defined(DRV_OS_MAC) || \ ++ defined(DRV_OS_ANDROID) || defined(DRV_OS_FREEBSD) ++#define UNLINK unlink ++#define RENAME rename ++#endif ++ ++#if defined(DRV_OS_SOLARIS) && !defined(_KERNEL) ++//wcsdup is missing on Solaris ++#include ++#include ++ ++static inline wchar_t *solaris_wcsdup(const wchar_t *wc) ++{ ++ wchar_t *tmp = (wchar_t *)malloc((wcslen(wc) + 1) * sizeof(wchar_t)); ++ wcscpy(tmp, wc); ++ return tmp; ++} ++#define WCSDUP solaris_wcsdup ++#endif ++ ++#if defined(DRV_OS_LINUX) || defined(DRV_OS_FREEBSD) || defined(DRV_OS_MAC) ++#define WCSDUP wcsdup ++#endif ++ ++#if !defined(_WCHAR_T_DEFINED) ++#if defined(DRV_OS_LINUX) || defined(DRV_OS_ANDROID) || defined(DRV_OS_SOLARIS) ++#if !defined(_GNU_SOURCE) ++#define _GNU_SOURCE ++#endif ++#endif ++#endif ++ ++#if (defined(DRV_OS_LINUX) || defined(DRV_OS_ANDROID)) && !defined(__KERNEL__) ++#include ++typedef wchar_t VTSA_CHAR; ++#endif ++ ++#if (defined(DRV_OS_MAC) || defined(DRV_OS_FREEBSD) || \ ++ defined(DRV_OS_SOLARIS)) && \ ++ !defined(_KERNEL) ++#include ++typedef wchar_t VTSA_CHAR; ++#endif ++ ++#define TRUE 1 ++#define FALSE 0 ++ ++#define ALIGN_4(x) (((x) + 3) & ~3) ++#define ALIGN_8(x) (((x) + 7) & ~7) ++#define ALIGN_16(x) (((x) + 15) & ~15) ++#define ALIGN_32(x) (((x) + 31) & ~31) ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif +diff --git a/drivers/platform/x86/socperf/include/lwpmudrv_version.h b/drivers/platform/x86/socperf/include/lwpmudrv_version.h +new file mode 100644 +index 000000000000..c8f709162a56 +--- /dev/null ++++ b/drivers/platform/x86/socperf/include/lwpmudrv_version.h +@@ -0,0 +1,158 @@ ++/* *********************************************************************************************** ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(C) 2010-2019 Intel Corporation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * BSD LICENSE ++ * ++ * Copyright(C) 2010-2019 Intel Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * *********************************************************************************************** ++ */ ++ ++ ++/* ++ * File : lwpmudrv_version.h ++ */ ++ ++#ifndef _LWPMUDRV_VERSION_H_ ++#define _LWPMUDRV_VERSION_H_ ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++/* ++ * @macro SOCPERF_VERSION_NODE_S ++ * @brief ++ * This structure supports versioning in Sep. The field major indicates the major version, ++ * minor indicates the minor version and api indicates the api version for the current ++ * sep build. This structure is initialized at the time when the driver is loaded. ++ */ ++ ++typedef struct SOCPERF_VERSION_NODE_S SOCPERF_VERSION_NODE; ++typedef SOCPERF_VERSION_NODE * SOCPERF_VERSION; ++ ++struct SOCPERF_VERSION_NODE_S { ++ union { ++ U32 socperf_version; ++ struct { ++ S32 major : 8; ++ S32 minor : 8; ++ S32 api : 8; ++ S32 update : 8; ++ } s1; ++ } u1; ++}; ++ ++#define SOCPERF_VERSION_NODE_socperf_version(version) \ ++ ((version)->u1.socperf_version) ++#define SOCPERF_VERSION_NODE_major(version) ((version)->u1.s1.major) ++#define SOCPERF_VERSION_NODE_minor(version) ((version)->u1.s1.minor) ++#define SOCPERF_VERSION_NODE_api(version) ((version)->u1.s1.api) ++#define SEP_VERSION_NODE_update(version) ((version)->u1.s1.update) ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++// SOCPERF VERSIONING ++ ++#define _STRINGIFY(x) #x ++#define STRINGIFY(x) _STRINGIFY(x) ++#define _STRINGIFY_W(x) L#x ++#define STRINGIFY_W(x) _STRINGIFY_W(x) ++ ++#define SOCPERF_MAJOR_VERSION 3 ++#define SOCPERF_MINOR_VERSION 0 ++#define SOCPERF_API_VERSION 0 ++#define SOCPERF_UPDATE_VERSION 0 ++#if SOCPERF_UPDATE_VERSION > 0 ++#define SOCPERF_UPDATE_STRING " Update " STRINGIFY(SOCPERF_UPDATE_VERSION) ++#else ++#define SOCPERF_UPDATE_STRING "" ++#endif ++ ++#define SOCPERF_PRODUCT_NAME "Sampling Enabling Product" ++#define PRODUCT_VERSION_DATE __DATE__ " at " __TIME__ ++#define PRODUCT_COPYRIGHT \ ++ "Copyright (C) 2011-2018 Intel Corporation. All rights reserved." ++#define PRODUCT_DISCLAIMER \ ++ "Warning: This computer program is protected under U.S. and international\n" \ ++ "copyright laws, and may only be used or copied in accordance with the terms\n" \ ++ "of the license agreement. Except as permitted by such license, no part\n" \ ++ "of this computer program may be reproduced, stored in a retrieval system,\n" \ ++ "or transmitted in any form or by any means without the express written consent\n" \ ++ "of Intel Corporation." ++ ++#define PRODUCT_VERSION "5.0" ++ ++#define SOCPERF_NAME "socperf" ++#define SOCPERF_NAME_W L"socperf" ++ ++#define SOCPERF_MSG_PREFIX \ ++ SOCPERF_NAME "" STRINGIFY(SOCPERF_MAJOR_VERSION) "_" STRINGIFY( \ ++ SOCPERF_MINOR_VERSION) ":" ++#define SOCPERF_VERSION_STR \ ++ STRINGIFY(SOCPERF_MAJOR_VERSION) \ ++ "." STRINGIFY(SOCPERF_MINOR_VERSION) "." STRINGIFY( \ ++ SOCPERF_API_VERSION) ++ ++// #if defined(DRV_OS_WINDOWS) ++// #define SOCPERF_DRIVER_NAME SOCPERF_NAME STRINGIFY(SOCPERF_MAJOR_VERSION) ++// #define SOCPERF_DRIVER_NAME_W SOCPERF_NAME_W STRINGIFY_W(SOCPERF_MAJOR_VERSION) ++// #define SOCPERF_DEVICE_NAME SOCPERF_DRIVER_NAME ++// #endif ++ ++#if defined(DRV_OS_LINUX) || defined(DRV_OS_SOLARIS) || \ ++ defined(DRV_OS_ANDROID) || defined(DRV_OS_FREEBSD) ++#define SOCPERF_DRIVER_NAME SOCPERF_NAME "" STRINGIFY(SOCPERF_MAJOR_VERSION) ++#define SOCPERF_SAMPLES_NAME SOCPERF_DRIVER_NAME "_s" ++#define SOCPERF_DEVICE_NAME "/dev/" SOCPERF_DRIVER_NAME ++#endif ++ ++// #if defined(DRV_OS_MAC) ++// #define SOCPERF_DRIVER_NAME SOCPERF_NAME "" STRINGIFY(SOCPERF_MAJOR_VERSION) ++// #define SOCPERF_SAMPLES_NAME SOCPERF_DRIVER_NAME "_s" ++// #define SOCPERF_DEVICE_NAME SOCPERF_DRIVER_NAME ++// #endif ++ ++#endif +diff --git a/drivers/platform/x86/socperf/include/rise_errors.h b/drivers/platform/x86/socperf/include/rise_errors.h +new file mode 100644 +index 000000000000..18e9561156e0 +--- /dev/null ++++ b/drivers/platform/x86/socperf/include/rise_errors.h +@@ -0,0 +1,326 @@ ++/*** ++ * ------------------------------------------------------------------------- ++ * INTEL CORPORATION PROPRIETARY INFORMATION ++ * This software is supplied under the terms of the accompanying license ++ * agreement or nondisclosure agreement with Intel Corporation and may not ++ * be copied or disclosed except in accordance with the terms of that ++ * agreement. ++ * Copyright(C) 2004-2019 Intel Corporation. All Rights Reserved. ++ * ------------------------------------------------------------------------- ++***/ ++ ++#ifndef _RISE_ERRORS_H_ ++#define _RISE_ERRORS_H_ ++ ++// ++// NOTE: ++// ++// 1) Before adding an error code, first make sure the error code doesn't ++// already exist. If it does, use that, don't create a new one just because... ++// ++// 2) When adding an error code, add it to the end of the list. Don't insert ++// error numbers in the middle of the list! For backwards compatibility, ++// we don't want the numbers changing unless we really need them ++// to for some reason (like we want to switch to negative error numbers) ++// ++// 3) Change the VT_LAST_ERROR_CODE macro to point to the (newly added) ++// last error. This is done so SW can verify the number of error codes ++// possible matches the number of error strings it has ++// ++// 4) Don't forget to update the error string table to include your ++// error code (rise.c). Since the goal is something human readable ++// you don't need to use abbreviations in there (ie. don't say "bad param", ++// say "bad parameter" or "illegal parameter passed in") ++// ++// 5) Compile and run the test_rise app (in the test_rise directory) to ++// verify things are still working ++// ++// ++ ++#define VT_SUCCESS 0 ++#define VT_FAILURE -1 ++ ++/*************************************************************/ ++ ++#define VT_INVALID_MAX_SAMP 1 ++#define VT_INVALID_SAMP_PER_BUFF 2 ++#define VT_INVALID_SAMP_INTERVAL 3 ++#define VT_INVALID_PATH 4 ++#define VT_TB5_IN_USE 5 ++#define VT_INVALID_NUM_EVENTS 6 ++#define VT_INTERNAL_ERROR 8 ++#define VT_BAD_EVENT_NAME 9 ++#define VT_NO_SAMP_SESSION 10 ++#define VT_NO_EVENTS 11 ++#define VT_MULTIPLE_RUNS 12 ++#define VT_NO_SAM_PARAMS 13 ++#define VT_SDB_ALREADY_EXISTS 14 ++#define VT_SAMPLING_ALREADY_STARTED 15 ++#define VT_TBS_NOT_SUPPORTED 16 ++#define VT_INVALID_SAMPARAMS_SIZE 17 ++#define VT_INVALID_EVENT_SIZE 18 ++#define VT_ALREADY_PROCESSES 19 ++#define VT_INVALID_EVENTS_PATH 20 ++#define VT_INVALID_LICENSE 21 ++ ++/******************************************************/ ++//SEP error codes ++ ++#define VT_SAM_ERROR 22 ++#define VT_SAMPLE_FILE_ALREADY_MAPPED 23 ++#define VT_INVALID_SAMPLE_FILE 24 ++#define VT_UNKNOWN_SECTION_NUMBER 25 ++#define VT_NO_MEMORY 26 ++#define VT_ENV_VAR_NOT_FOUND 27 ++#define VT_SAMPLE_FILE_NOT_MAPPED 28 ++#define VT_BUFFER_OVERFLOW 29 ++#define VT_USER_OP_COMPLETED 30 ++#define VT_BINARY_NOT_FOUND 31 ++#define VT_ISM_NOT_INITIALIZED 32 ++#define VT_NO_SYMBOLS 33 ++#define VT_SAMPLE_FILE_MAPPING_ERROR 34 ++#define VT_BUFFER_NULL 35 ++#define VT_UNEXPECTED_NULL_PTR 36 ++#define VT_BINARY_LOAD_FAILED 37 ++#define VT_FUNCTION_NOT_FOUND_IN_BINARY 38 ++#define VT_ENTRY_NOT_FOUND 39 ++#define VT_SEP_SYNTAX_ERROR 40 ++#define VT_SEP_OPTIONS_ERROR 41 ++#define VT_BAD_EVENT_MODIFIER 42 ++#define VT_INCOMPATIBLE_PARAMS 43 ++#define VT_FILE_OPEN_FAILED 44 ++#define VT_EARLY_EXIT 45 ++#define VT_TIMEOUT_RETURN 46 ++#define VT_NO_CHILD_PROCESS 47 ++#define VT_DRIVER_RUNNING 48 ++#define VT_DRIVER_STOPPED 49 ++#define VT_MULTIPLE_RUNS_NEEDED 50 ++#define VT_QUIT_IMMEDIATE 51 ++#define VT_DRIVER_INIT_FAILED 52 ++#define VT_NO_TB5_CREATED 53 ++#define VT_NO_WRITE_PERMISSION 54 ++#define VT_DSA_INIT_FAILED 55 ++#define VT_INVALID_CPU_MASK 56 ++#define VT_SAMP_IN_RUNNING_STATE 57 ++#define VT_SAMP_IN_PAUSE_STATE 58 ++#define VT_SAMP_IN_STOP_STATE 59 ++#define VT_SAMP_NO_SESSION 60 ++#define VT_NOT_CONFIGURED 61 ++#define VT_LAUNCH_BUILD64_FAILED 62 ++#define VT_BAD_PARAMETER 63 ++#define VT_ISM_INIT_FAILED 64 ++#define VT_INVALID_STATE_TRANS 65 ++#define VT_EARLY_EXIT_N_CANCEL 66 ++#define VT_EVT_MGR_NOT_INIT 67 ++#define VT_ISM_SECTION_ENUM_FAILED 68 ++#define VT_VG_PARSER_ERROR 69 ++#define VT_MISSING_VALUE_FOR_TOKEN 70 ++#define VT_EMPTY_SAMPLE_FILE_NAME 71 ++#define VT_UNEXPECTED_VALUE 72 ++#define VT_NOT_IMPLEMENTED 73 ++#define VT_MISSING_COL_DEPNDNCIES 74 ++#define VT_DEP_COL_NOT_LIB_DEFINED 75 ++#define VT_COL_NOT_REG_WITH_LIB 76 ++#define VT_SECTION_ALREADY_IN_USE 77 ++#define VT_SECTION_NOT_EXIST 78 ++#define VT_STREAM_NOT_EXIST 79 ++#define VT_INVALID_STREAM 80 ++#define VT_STREAM_ALREADY_IN_USE 81 ++#define VT_DATA_DESC_NOT_EXIST 82 ++#define VT_INVALID_ERROR_CODE 83 ++#define VT_INCOMPATIBLE_VERSION 84 ++#define VT_LEGACY_DATA_NOT_EXIST 85 ++#define VT_INVALID_READ_START 86 ++#define VT_DRIVER_OPEN_FAILED 87 ++#define VT_DRIVER_IOCTL_FAILED 88 ++#define VT_SAMP_FILE_CREATE_FAILED 89 ++#define VT_MODULE_FILE_CREATE_FAILED 90 ++#define VT_INVALID_SAMPLE_FILE_NAME 91 ++#define VT_INVALID_MODULE_FILE_NAME 92 ++#define VT_FORK_CHILD_PROCESS_FAILED 93 ++#define VT_UNEXPECTED_MISMATCH_IN_STRING_TYPES 94 ++#define VT_INCOMPLETE_TB5_ENCOUNTERED 95 ++#define VT_ERR_CONVERSION_FROM_STRING_2_NUMBER 96 ++#define VT_INVALID_STRING 97 ++#define VT_UNSUPPORTED_DATA_SIZE 98 ++#define VT_TBRW_INIT_FAILED 99 ++#define VT_PLUGIN_UNLOAD 100 ++#define VT_PLUGIN_ENTRY_NULL 101 ++#define VT_UNKNOWN_PLUGIN 102 ++#define VT_BUFFER_TOO_SMALL 103 ++#define VT_CANNOT_MODIFY_COLUMN 104 ++#define VT_MULT_FILTERS_NOT_ALLOWED 105 ++#define VT_ADDRESS_IN_USE 106 ++#define VT_NO_MORE_MMAPS 107 ++#define VT_MAX_PAGES_IN_DS_EXCEEDED 108 ++#define VT_INVALID_COL_TYPE_IN_GROUP_INFO 109 ++#define VT_AGG_FN_ON_VARCHAR_NOT_SUPP 110 ++#define VT_INVALID_ACCESS_PERMS 111 ++#define VT_NO_DATA_TO_DISPLAY 112 ++#define VT_TB5_IS_NOT_BOUND 113 ++#define VT_MISSING_GROUP_BY_COLUMN 114 ++#define VT_SMRK_MAX_STREAMS_EXCEEDED 115 ++#define VT_SMRK_STREAM_NOT_CREATED 116 ++#define VT_SMRK_NOT_IMPL 117 ++#define VT_SMRK_TYPE_NOT_IMPL 118 ++#define VT_SMRK_TYPE_ALREADY_SET 119 ++#define VT_SMRK_NO_STREAM 120 ++#define VT_SMRK_INVALID_STREAM_TYPE 121 ++#define VT_SMRK_STREAM_NOT_FOUND 122 ++#define VT_SMRK_FAIL 123 ++#define VT_SECTION_NOT_READABLE 124 ++#define VT_SECTION_NOT_WRITEABLE 125 ++#define VT_GLOBAL_SECTION_NOT_CLOSED 126 ++#define VT_STREAM_SECTION_NOT_CLOSED 127 ++#define VT_STREAM_NOT_CLOSED 128 ++#define VT_STREAM_NOT_BOUND 129 ++#define VT_NO_COLS_SPECIFIED 130 ++#define VT_NOT_ALL_SECTIONS_CLOSED 131 ++#define VT_SMRK_INVALID_PTR 132 ++#define VT_UNEXPECTED_BIND_MISMATCH 133 ++#define VT_WIN_TIMER_ERROR 134 ++#define VT_ONLY_SNGL_DEPNDT_COL_ALLWD 135 ++#define VT_BAD_MODULE 136 ++#define VT_INPUT_SOURCE_INFO_NOT_SET 137 ++#define VT_UNSUPPORTED_TIME_GRAN 138 ++#define VT_NO_SAMPLES_COLLECTED 139 ++#define VT_INVALID_CPU_TYPE_VERSION 140 ++#define VT_BIND_UNEXPECTED_1STMODREC 141 ++#define VT_BIND_MODULES_NOT_SORTED 142 ++#define VT_UNEXPECTED_NUM_CPUIDS 143 ++#define VT_UNSUPPORTED_ARCH_TYPE 144 ++#define VT_NO_DATA_TO_WRITE 145 ++#define VT_EM_TIME_SLICE_TOO_SMALL 146 ++#define VT_EM_TOO_MANY_EVENT_GROUPS 147 ++#define VT_EM_ZERO_GROUPS 148 ++#define VT_EM_NOT_SUPPORTED 149 ++#define VT_PMU_IN_USE 150 ++#define VT_TOO_MANY_INTERRUPTS 151 ++#define VT_MAX_SAMPLES_REACHED 152 ++#define VT_MODULE_COLLECTION_FAILED 153 ++#define VT_INCOMPATIBLE_DRIVER 154 ++#define VT_UNABLE_LOCATE_TRIGGER_EVENT 155 ++#define VT_COMMAND_NOT_HANDLED 156 ++#define VT_DRIVER_VERSION_MISMATCH 157 ++#define VT_MAX_MARKERS 158 ++#define VT_DRIVER_COMM_FAILED 159 ++#define VT_CHIPSET_CONFIG_FAILED 160 ++#define VT_BAD_DATA_BASE 161 ++#define VT_PAX_SERVICE_NOT_CONNECTED 162 ++#define VT_PAX_SERVICE_ERROR 163 ++#define VT_PAX_PMU_RESERVE_FAILED 164 ++#define VT_INVALID_CPU_INFO_TYPE 165 ++#define VT_CACHE_DOESNT_EXIST 166 ++#define VT_UNSUPPORTED_UNCORE_ARCH_TYPE 167 ++#define VT_EXCEEDED_MAX_EVENTS 168 ++#define VT_MARKER_TIMER_FAILED 169 ++#define VT_PAX_PMU_UNRESERVE_FAILED 170 ++#define VT_MULTIPLE_PROCESSES_FOUND 171 ++#define VT_NO_SUCH_PROCESS_FOUND 172 ++#define VT_PCL_NOT_ENABLED 173 ++#define VT_PCL_UID_CHECK 174 ++#define VT_DEL_RESULTS_DIR_FAILED 175 ++#define VT_NO_VALID_EVENTS 176 ++#define VT_INVALID_EVENT 177 ++#define VT_EVENTS_COUNTED 178 ++#define VT_EVENTS_COLLECTED 179 ++#define VT_UNSUPPORTED_GFX_ARCH_TYPE 180 ++#define VT_GFX_CONFIG_FAILED 181 ++#define VT_UNSUPPORTED_NON_NATIVE_MODE 182 ++#define VT_INVALID_DEVICE 183 ++#define VT_ENV_SETUP_FAILED 184 ++#define VT_RESUME_NOT_RECEIVED 185 ++#define VT_UNSUPPORTED_PWR_ARCH_TYPE 186 ++#define VT_PWR_CONFIG_FAILED 187 ++#define VT_NMI_WATCHDOG_FOUND 188 ++#define VT_NO_PMU_RESOURCES 189 ++#define VT_MIC_CARD_NOT_ONLINE 190 ++#define VT_FREEZE_ON_PMI_NOT_AVAIL 191 ++#define VT_FLUSH_FAILED 192 ++#define VT_FLUSH_SUCCESS 193 ++#define VT_WRITE_ERROR 194 ++#define VT_NO_SPACE 195 ++#define VT_MSR_ACCESS_ERROR 196 ++#define VT_PEBS_NOT_SUPPORTED 197 ++#define VT_LUA_PARSE_ERROR 198 ++#define VT_COMM_CONNECTION_CLOSED_BY_REMOTE 199 ++#define VT_COMM_LISTEN_ERROR 200 ++#define VT_COMM_BIND_ERROR 201 ++#define VT_COMM_ACCEPT_ERROR 202 ++#define VT_COMM_SEND_ERROR 203 ++#define VT_COMM_RECV_ERROR 204 ++#define VT_COMM_SOCKET_ERROR 205 ++#define VT_COMM_CONNECT_ERROR 206 ++#define VT_TARGET_COLLECTION_MISMATCH 207 ++#define VT_INVALID_SEP_DRIVER_LOG 208 ++#define VT_COMM_PROTOCOL_VERSION_MISTMATCH 209 ++#define VT_SAMP_IN_UNEXPECTED_STATE 210 ++#define VT_COMM_RECV_BUF_RESIZE_ERROR 211 ++ ++/* ++ * define error code for checking on async marker request ++ */ ++#define VT_INVALID_MARKER_ID -1 ++ ++/* ++ * ************************************************************ ++ * NOTE: after adding new error code(s), remember to also ++ * update the following: ++ * 1) VT_LAST_ERROR_CODE below ++ * 2) viewer/sampling_utils/src/rise.c ++ * 3) collector/controller/sep_msg_catalog.xmc ++ * 4) qnx_kernel/sepdk/include/rise_errors.h ++ * ++ * ************************************************************ ++ */ ++ ++// ++// To make error checking easier, the special VT_LAST_ERROR_CODE ++// should be set to whatever is the last error on the list above ++// ++#define VT_LAST_ERROR_CODE VT_COMM_RECV_BUF_RESIZE_ERROR ++ ++// ++// Define a macro to determine success or failure. Users of this ++// error header file should use the macros instead of direct ++// checks so that we can change the error numbers in the future ++// (such as making negative numbers be an error indication and positive ++// numbers being a success with a value indication) ++// ++#define VTSA_SUCCESS(x) ((x) == VT_SUCCESS) ++#define VTSA_FAILED(x) (!VTSA_SUCCESS(x)) ++ ++// ++// These should be deprecated, but we'll keep them here just in case ++// ++#define SEP_IS_SUCCESS(x) VTSA_SUCCESS(x) ++#define SEP_IS_FAILED(x) VTSA_FAILED(x) ++ ++/************************************************************* ++ * API Error Codes ++ *************************************************************/ ++#define VTAPI_INVALID_MAX_SAMP VT_INVALID_MAX_SAMP ++#define VTAPI_INVALID_SAMP_PER_BUFF VT_INVALID_SAMP_PER_BUFF ++#define VTAPI_INVALID_SAMP_INTERVAL VT_INVALID_SAMP_INTERVAL ++#define VTAPI_INVALID_PATH VT_INVALID_PATH ++#define VTAPI_TB5_IN_USE VT_TB5_IN_USE ++#define VTAPI_INVALID_NUM_EVENTS VT_INVALID_NUM_EVENTS ++#define VTAPI_INTERNAL_ERROR VT_INTERNAL_ERROR ++#define VTAPI_BAD_EVENT_NAME VT_BAD_EVENT_NAME ++#define VTAPI_NO_SAMP_SESSION VT_NO_SAMP_SESSION ++#define VTAPI_NO_EVENTS VT_NO_EVENTS ++#define VTAPI_MULTIPLE_RUNS VT_MULTIPLE_RUNS ++#define VTAPI_NO_SAM_PARAMS VT_NO_SAM_PARAMS ++#define VTAPI_SDB_ALREADY_EXISTS VT_SDB_ALREADY_EXISTS ++#define VTAPI_SAMPLING_ALREADY_STARTED VT_SAMPLING_ALREADY_STARTED ++#define VTAPI_TBS_NOT_SUPPORTED VT_TBS_NOT_SUPPORTED ++#define VTAPI_INVALID_SAMPARAMS_SIZE VT_INVALID_SAMPARAMS_SIZE ++#define VTAPI_INVALID_EVENT_SIZE VT_INVALID_EVENT_SIZE ++#define VTAPI_ALREADY_PROCESSES VT_ALREADY_PROCESSES ++#define VTAPI_INVALID_EVENTS_PATH VT_INVALID_EVENTS_PATH ++#define VTAPI_INVALID_LICENSE VT_INVALID_LICENSE ++ ++typedef int RISE_ERROR; ++typedef void *RISE_PTR; ++ ++#endif +diff --git a/drivers/platform/x86/socperf/npk_uncore.c b/drivers/platform/x86/socperf/npk_uncore.c +new file mode 100644 +index 000000000000..d8b3bf040453 +--- /dev/null ++++ b/drivers/platform/x86/socperf/npk_uncore.c +@@ -0,0 +1,502 @@ ++/* *********************************************************************************************** ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(C) 2013-2019 Intel Corporation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * BSD LICENSE ++ * ++ * Copyright(C) 2013-2019 Intel Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * *********************************************************************************************** ++ */ ++ ++ ++#include "lwpmudrv_defines.h" ++#include ++#include ++#include ++ ++#include "lwpmudrv_types.h" ++#include "lwpmudrv_ecb.h" ++#include "lwpmudrv_struct.h" ++ ++#include "inc/socperfdrv.h" ++#include "inc/ecb_iterators.h" ++#include "inc/pci.h" ++#include "inc/control.h" ++#include "inc/npk_uncore.h" ++ ++extern LWPMU_DEVICE device_uncore; ++static U32 counter_overflow[SOC_NPK_COUNTER_MAX_COUNTERS]; ++static U64 counter_virtual_address; ++static U64 mchbar_virtual_address; ++static U64 mchbar_offset; ++ ++/*! ++ * @fn static ULONG read_From_Register(U64 bar_virtual_address, ++ U64 mmio_offset, ++ U32 *data_val) ++ * ++ * @brief Reads register programming info ++ * ++ * @param bar_virtual_address - memory address ++ * mmio_offset - offset of the register ++ * data_val - register value read ++ * ++ * @return data from the counter register ++ * ++ * Special Notes: ++ */ ++static void read_From_Register(U64 bar_virtual_address, U64 mmio_offset, ++ U32 *data_val) ++{ ++ if (data_val) { ++ *data_val = readl((void __iomem *)((char *)(UIOP)(bar_virtual_address) + ++ mmio_offset)); ++ } ++} ++ ++/*! ++ * @fn static ULONG write_To_Register(U64 bar_virtual_address, ++ U64 mmio_offset, ++ U32 value) ++ * ++ * @brief Write register programming info ++ * ++ * @param bar_virtual_address - memory address ++ * mmio_offset - offset of the register ++ * value - register value to be written ++ * ++ * @return none ++ * ++ * Special Notes: ++ */ ++static void write_To_Register(U64 bar_virtual_address, U64 mmio_offset, ++ ULONG value) ++{ ++ U32 read_reg = 0; ++ ++ writel(value, ++ (void __iomem *)(((char *)(UIOP)bar_virtual_address) + mmio_offset)); ++ read_From_Register(bar_virtual_address, mmio_offset, &read_reg); ++} ++ ++/*! ++ * @fn static VOID uncore_Reset_Counters(U32 dev_idx) ++ * ++ * @brief Reset counters ++ * ++ * @param dev_idx - device index ++ * ++ * @return None ++ * ++ * Special Notes: ++ */ ++static VOID uncore_Reset_Counters(U32 dev_idx) ++{ ++ U32 data_reg = 0; ++ ++ if (counter_virtual_address) { ++ FOR_EACH_PCI_REG_RAW(pecb, i, dev_idx) ++ { ++ if (ECB_entries_reg_type(pecb, i) == ++ PMU_REG_EVENT_SELECT) { ++ data_reg = ++ i + ECB_operations_register_len( ++ pecb, PMU_OPERATION_WRITE); ++ if (ECB_entries_reg_type(pecb, data_reg) == ++ PMU_REG_DATA) { ++ write_To_Register( ++ counter_virtual_address, ++ ECB_entries_reg_offset( ++ pecb, data_reg), ++ (ULONG)0); ++ } ++ write_To_Register(counter_virtual_address, ++ ECB_entries_reg_offset(pecb, ++ i), ++ (ULONG)SOC_NPK_UNCORE_STOP); ++ } ++ } ++ END_FOR_EACH_PCI_REG_RAW; ++ } ++} ++ ++/*! ++ * @fn static VOID uncore_Write_PMU(VOID*) ++ * ++ * @brief Initial write of PMU registers ++ * Walk through the entries and write the value of the register accordingly. ++ * When current_group = 0, then this is the first time this routine is called, ++ * ++ * @param param - device index ++ * ++ * @return None ++ * ++ * Special Notes: ++ */ ++static VOID uncore_Write_PMU(VOID *param) ++{ ++ U32 dev_idx = *((U32 *)param); ++ ECB pecb; ++ DRV_PCI_DEVICE_ENTRY dpden; ++ U32 pci_address; ++ U32 bar_lo; ++ U64 bar_hi; ++ U64 final_bar; ++ U64 physical_address; ++ U32 dev_index = 0; ++ S32 bar_list[SOC_NPK_UNCORE_MAX_PCI_DEVICES]; ++ U32 bar_index = 0; ++ U64 virtual_address = 0; ++ U32 bar_name = 0; ++ DRV_PCI_DEVICE_ENTRY curr_pci_entry = NULL; ++ U32 next_bar_offset = 0; ++ U64 mmio_offset = 0; ++ U32 i = 0; ++ U32 map_size = 0; ++ U32 cur_grp; ++ ++ if (device_uncore == NULL) { ++ SOCPERF_PRINT_ERROR("ERROR: NULL device_uncore!\n"); ++ return; ++ } ++ cur_grp = LWPMU_DEVICE_cur_group(device_uncore); ++ ++ pecb = (ECB)LWPMU_DEVICE_PMU_register_data(device_uncore)[cur_grp]; ++ if (pecb == NULL) { ++ SOCPERF_PRINT_ERROR("ERROR: null pecb!\n"); ++ return; ++ } ++ ++ for (dev_index = 0; dev_index < SOC_NPK_UNCORE_MAX_PCI_DEVICES; ++ dev_index++) { ++ bar_list[dev_index] = -1; ++ } ++ ++ // initialize the per-counter overflow numbers ++ for (i = 0; i < SOC_NPK_COUNTER_MAX_COUNTERS; i++) { ++ counter_overflow[i] = 0; ++ socperf_pcb[0].last_uncore_count[i] = 0; ++ } ++ ++ ECB_pcidev_entry_list(pecb) = (DRV_PCI_DEVICE_ENTRY)( ++ (S8 *)pecb + ECB_pcidev_list_offset(pecb)); ++ dpden = ECB_pcidev_entry_list(pecb); ++ ++ uncore_Reset_Counters(dev_idx); ++ ++ SOCPERF_PRINT_DEBUG( ++ "Inside VISA Driver Write PMU: Number of entries=%d\n", ++ ECB_num_pci_devices(pecb)); ++ for (dev_index = 0; dev_index < ECB_num_pci_devices(pecb); ++ dev_index++) { ++ curr_pci_entry = &dpden[dev_index]; ++ bar_name = DRV_PCI_DEVICE_ENTRY_bar_name(curr_pci_entry); ++ mmio_offset = DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio( ++ curr_pci_entry); ++ ++ // UNC_MMIO programming ++ if (bar_list[bar_name] != -1) { ++ bar_index = bar_list[bar_name]; ++ virtual_address = DRV_PCI_DEVICE_ENTRY_virtual_address( ++ &dpden[bar_index]); ++ DRV_PCI_DEVICE_ENTRY_virtual_address(curr_pci_entry) = ++ DRV_PCI_DEVICE_ENTRY_virtual_address( ++ &dpden[bar_index]); ++ write_To_Register(virtual_address, mmio_offset, ++ (U32)DRV_PCI_DEVICE_ENTRY_value( ++ curr_pci_entry)); ++ continue; ++ } ++ ++ pci_address = FORM_PCI_ADDR( ++ DRV_PCI_DEVICE_ENTRY_bus_no(curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_dev_no(curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_func_no(curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_bar_offset(curr_pci_entry)); ++ bar_lo = SOCPERF_PCI_Read_Ulong(pci_address); ++ SOCPERF_PRINT_DEBUG( ++ "The bus=%x device=%x function=%x offset=%x\n", ++ DRV_PCI_DEVICE_ENTRY_bus_no(curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_dev_no(curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_func_no(curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_bar_offset(curr_pci_entry)); ++ next_bar_offset = ++ DRV_PCI_DEVICE_ENTRY_bar_offset(curr_pci_entry) + ++ SOC_NPK_UNCORE_NEXT_ADDR_OFFSET; ++ pci_address = FORM_PCI_ADDR( ++ DRV_PCI_DEVICE_ENTRY_bus_no(curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_dev_no(curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_func_no(curr_pci_entry), ++ next_bar_offset); ++ bar_hi = SOCPERF_PCI_Read_Ulong(pci_address); ++ SOCPERF_PRINT_DEBUG( ++ "The bus=%x device=%x function=%x offset=%x\n", ++ DRV_PCI_DEVICE_ENTRY_bus_no(curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_dev_no(curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_func_no(curr_pci_entry), ++ next_bar_offset); ++ final_bar = (bar_hi << SOC_NPK_UNCORE_BAR_ADDR_SHIFT) | bar_lo; ++ if (bar_name == UNC_MCHBAR) { ++ final_bar &= SOC_NPK_UNCORE_MCHBAR_ADDR_MASK; ++ map_size = SOC_NPK_UNCORE_MCHBAR_MMIO_PAGE_SIZE; ++ } else { ++ final_bar &= SOC_NPK_UNCORE_BAR_ADDR_MASK; ++ map_size = SOC_NPK_UNCORE_NPK_BAR_MMIO_PAGE_SIZE; ++ } ++ DRV_PCI_DEVICE_ENTRY_bar_address(curr_pci_entry) = final_bar; ++ physical_address = ++ DRV_PCI_DEVICE_ENTRY_bar_address(curr_pci_entry); ++ ++ if (physical_address) { ++ DRV_PCI_DEVICE_ENTRY_virtual_address(curr_pci_entry) = ++ (U64)(UIOP)ioremap_nocache(physical_address, ++ map_size); ++ virtual_address = DRV_PCI_DEVICE_ENTRY_virtual_address( ++ curr_pci_entry); ++ ++ write_To_Register(virtual_address, mmio_offset, ++ (U32)DRV_PCI_DEVICE_ENTRY_value( ++ curr_pci_entry)); ++ bar_list[bar_name] = dev_index; ++ if (counter_virtual_address == 0) { ++ counter_virtual_address = virtual_address; ++ } ++ if (mchbar_virtual_address == 0 && ++ bar_name == UNC_MCHBAR) { ++ mchbar_virtual_address = virtual_address; ++ mchbar_offset = mmio_offset; ++ } ++ } ++ } ++} ++ ++/*! ++ * @fn static VOID uncore_Disable_PMU(PVOID) ++ * ++ * @brief Unmap the virtual address when sampling/driver stops ++ * ++ * @param param - device index ++ * ++ * @return None ++ * ++ * Special Notes: ++ */ ++static VOID uncore_Disable_PMU(PVOID param) ++{ ++ U32 dev_idx = *((U32 *)param); ++ ++ if (GLOBAL_STATE_current_phase(socperf_driver_state) == ++ DRV_STATE_PREPARE_STOP) { ++ uncore_Reset_Counters(dev_idx); ++ if (mchbar_virtual_address) { ++ write_To_Register(mchbar_virtual_address, mchbar_offset, ++ 0x0); ++ iounmap((void __iomem *)(UIOP)(mchbar_virtual_address)); ++ SOCPERF_PRINT_DEBUG("Unmapping MCHBAR address=%x\n", ++ mchbar_virtual_address); ++ } ++ if (counter_virtual_address) { ++ iounmap((void __iomem *)(UIOP)(counter_virtual_address)); ++ SOCPERF_PRINT_DEBUG("Unmapping NPKBAR address=%x\n", ++ counter_virtual_address); ++ } ++ counter_virtual_address = 0; ++ mchbar_virtual_address = 0; ++ mchbar_offset = 0; ++ } ++} ++ ++/*! ++ * @fn static VOID uncore_Initialize(PVOID) ++ * ++ * @brief Initialize any registers or addresses ++ * ++ * @param param ++ * ++ * @return None ++ * ++ * Special Notes: ++ */ ++static VOID uncore_Initialize(VOID *param) ++{ ++ counter_virtual_address = 0; ++ mchbar_virtual_address = 0; ++ mchbar_offset = 0; ++} ++ ++/*! ++ * @fn static VOID uncore_Clean_Up(PVOID) ++ * ++ * @brief Reset any registers or addresses ++ * ++ * @param param ++ * ++ * @return None ++ * ++ * Special Notes: ++ */ ++static VOID uncore_Clean_Up(VOID *param) ++{ ++ counter_virtual_address = 0; ++ mchbar_virtual_address = 0; ++ mchbar_offset = 0; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn uncore_Read_Data() ++ * ++ * @param None ++ * ++ * @return None No return needed ++ * ++ * @brief Read the counters ++ * ++ */ ++static VOID uncore_Read_Data(PVOID data_buffer) ++{ ++ U32 event_id = 0; ++ U64 *data; ++ int data_index; ++ U32 data_val = 0; ++ U32 data_reg = 0; ++ U64 total_count = 0; ++ U32 event_index = 0; ++ U32 cur_grp; ++ ++ if (device_uncore == NULL) { ++ SOCPERF_PRINT_ERROR("ERROR: NULL device_uncore!\n"); ++ return; ++ } ++ cur_grp = LWPMU_DEVICE_cur_group(device_uncore); ++ ++ if (GLOBAL_STATE_current_phase(socperf_driver_state) == ++ DRV_STATE_UNINITIALIZED || ++ GLOBAL_STATE_current_phase(socperf_driver_state) == ++ DRV_STATE_IDLE || ++ GLOBAL_STATE_current_phase(socperf_driver_state) == ++ DRV_STATE_RESERVED || ++ GLOBAL_STATE_current_phase(socperf_driver_state) == ++ DRV_STATE_PREPARE_STOP || ++ GLOBAL_STATE_current_phase(socperf_driver_state) == ++ DRV_STATE_STOPPED) { ++ SOCPERF_PRINT_ERROR("ERROR: RETURING EARLY from Read_Data\n"); ++ return; ++ } ++ ++ if (data_buffer == NULL) { ++ return; ++ } ++ ++ data = (U64 *)data_buffer; ++ data_index = 0; ++ ++ // Write GroupID ++ data[data_index] = cur_grp + 1; ++ // Increment the data index as the event id starts from zero ++ data_index++; ++ ++ FOR_EACH_PCI_REG_RAW(pecb, i, dev_idx) ++ { ++ if (ECB_entries_reg_type(pecb, i) == PMU_REG_EVENT_SELECT) { ++ write_To_Register(counter_virtual_address, ++ ECB_entries_reg_offset(pecb, i), ++ (ULONG)SOC_NPK_UNCORE_SAMPLE_DATA); ++ ++ data_reg = i + ECB_operations_register_len( ++ pecb, PMU_OPERATION_WRITE); ++ if (ECB_entries_reg_type(pecb, data_reg) == ++ PMU_REG_DATA) { ++ read_From_Register( ++ counter_virtual_address, ++ ECB_entries_reg_offset(pecb, data_reg), ++ &data_val); ++ if (data_val < ++ socperf_pcb[0] ++ .last_uncore_count[event_index]) { ++ counter_overflow[event_index]++; ++ } ++ socperf_pcb[0].last_uncore_count[event_index] = ++ data_val; ++ total_count = data_val + ++ counter_overflow[event_index] * ++ SOC_NPK_COUNTER_MAX_COUNT; ++ event_index++; ++ data[data_index + event_id] = total_count; ++ SOCPERF_PRINT_DEBUG("DATA[%d]=%llu\n", event_id, ++ total_count); ++ event_id++; ++ } ++ } ++ } ++ END_FOR_EACH_PCI_REG_RAW; ++} ++ ++/* ++ * Initialize the dispatch table ++ */ ++DISPATCH_NODE npk_dispatch = { ++ .init = uncore_Initialize, // initialize ++ .fini = NULL, // destroy ++ .write = uncore_Write_PMU, // write ++ .freeze = uncore_Disable_PMU, // freeze ++ .restart = NULL, // restart ++ .read_data = NULL, // read ++ .check_overflow = NULL, // check for overflow ++ .swap_group = NULL, ++ .read_lbrs = NULL, ++ .clean_up = uncore_Clean_Up, ++ .hw_errata = NULL, ++ .read_power = NULL, ++ .check_overflow_errata = NULL, ++ .read_counts = NULL, //read_counts ++ .check_overflow_gp_errata = NULL, ++ .read_power = NULL, ++ .platform_info = NULL, ++ .trigger_read = NULL, ++ .read_current_data = uncore_Read_Data, ++ .create_mem = NULL, ++ .check_status = NULL, ++ .read_mem = NULL, ++ .stop_mem = NULL ++}; +diff --git a/drivers/platform/x86/socperf/pci.c b/drivers/platform/x86/socperf/pci.c +new file mode 100644 +index 000000000000..c41fc4cfc20a +--- /dev/null ++++ b/drivers/platform/x86/socperf/pci.c +@@ -0,0 +1,188 @@ ++/* *********************************************************************************************** ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(C) 2005-2019 Intel Corporation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * BSD LICENSE ++ * ++ * Copyright(C) 2005-2019 Intel Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * *********************************************************************************************** ++ */ ++ ++#include "lwpmudrv_defines.h" ++#include ++#include ++#include ++#include ++ ++#include "lwpmudrv_types.h" ++#include "rise_errors.h" ++#include "lwpmudrv_ecb.h" ++#include "socperfdrv.h" ++#include "pci.h" ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn extern int SOCPERF_PCI_Read_From_Memory_Address(addr, val) ++ * ++ * @param addr - physical address in mmio ++ * @param *value - value at this address ++ * ++ * @return status ++ * ++ * @brief Read memory mapped i/o physical location ++ * ++ */ ++int SOCPERF_PCI_Read_From_Memory_Address(U32 addr, U32 *val) ++{ ++ U32 aligned_addr, offset, value; ++ PVOID base; ++ ++ if (addr <= 0) { ++ return OS_INVALID; ++ } ++ ++ SOCPERF_PRINT_DEBUG( ++ "SOCPERF_PCI_Read_From_Memory_Address: reading physical address:%x\n", ++ addr); ++ offset = addr & ~PAGE_MASK; ++ aligned_addr = addr & PAGE_MASK; ++ SOCPERF_PRINT_DEBUG( ++ "SOCPERF_PCI_Read_From_Memory_Address: aligned physical address:%x,offset:%x\n", ++ aligned_addr, offset); ++ ++ base = (PVOID)ioremap_nocache(aligned_addr, PAGE_SIZE); ++ if (base == NULL) { ++ return OS_INVALID; ++ } ++ ++ value = readl((void __iomem *)(base + offset)); ++ *val = value; ++ SOCPERF_PRINT_DEBUG( ++ "SOCPERF_PCI_Read_From_Memory_Address: value at this physical address:%x\n", ++ value); ++ ++ iounmap((void __iomem *)base); ++ ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn extern int SOCPERF_PCI_Write_To_Memory_Address(addr, val) ++ * ++ * @param addr - physical address in mmio ++ * @param value - value to be written ++ * ++ * @return status ++ * ++ * @brief Write to memory mapped i/o physical location ++ * ++ */ ++int SOCPERF_PCI_Write_To_Memory_Address(U32 addr, U32 val) ++{ ++ U32 aligned_addr, offset; ++ PVOID base; ++ ++ if (addr <= 0) { ++ return OS_INVALID; ++ } ++ ++ SOCPERF_PRINT_DEBUG( ++ "SOCPERF_PCI_Write_To_Memory_Address: writing physical address:%x with value:%x\n", ++ addr, val); ++ offset = addr & ~PAGE_MASK; ++ aligned_addr = addr & PAGE_MASK; ++ SOCPERF_PRINT_DEBUG( ++ "SOCPERF_PCI_Write_To_Memory_Address: aligned physical address:%x,offset:%x\n", ++ aligned_addr, offset); ++ ++ base = (PVOID)ioremap_nocache(aligned_addr, PAGE_SIZE); ++ if (base == NULL) { ++ return OS_INVALID; ++ } ++ ++ writel(val, (void __iomem *)(base + offset)); ++ ++ iounmap((void __iomem *)base); ++ ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn extern int SOCPERF_PCI_Read_Ulong(pci_address) ++ * ++ * @param pci_address - PCI configuration address ++ * ++ * @return value at this location ++ * ++ * @brief Reads a ULONG from PCI configuration space ++ * ++ */ ++int SOCPERF_PCI_Read_Ulong(U32 pci_address) ++{ ++ U32 temp_ulong = 0; ++ ++ outl(pci_address, PCI_ADDR_IO); ++ temp_ulong = inl(PCI_DATA_IO); ++ ++ return temp_ulong; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn extern int SOCPERF_PCI_Write_Ulong(addr, val) ++ * ++ * @param pci_address - PCI configuration address ++ * @param value - Value to be written ++ * ++ * @return status ++ * ++ * @brief Writes a ULONG to PCI configuration space ++ * ++ */ ++void SOCPERF_PCI_Write_Ulong(U32 pci_address, U32 value) ++{ ++ outl(pci_address, PCI_ADDR_IO); ++ outl(value, PCI_DATA_IO); ++} +diff --git a/drivers/platform/x86/socperf/soc_uncore.c b/drivers/platform/x86/socperf/soc_uncore.c +new file mode 100644 +index 000000000000..8313dc754a08 +--- /dev/null ++++ b/drivers/platform/x86/socperf/soc_uncore.c +@@ -0,0 +1,901 @@ ++/* *********************************************************************************************** ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(C) 2013-2019 Intel Corporation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * BSD LICENSE ++ * ++ * Copyright(C) 2013-2019 Intel Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * *********************************************************************************************** ++ */ ++ ++ ++#include "lwpmudrv_defines.h" ++#include ++#include ++ ++#include "lwpmudrv_types.h" ++#include "lwpmudrv_ecb.h" ++#include "lwpmudrv_struct.h" ++ ++#include "socperfdrv.h" ++#include "control.h" ++#include "soc_uncore.h" ++#include "inc/ecb_iterators.h" ++#include "inc/pci.h" ++ ++#if defined(PCI_HELPERS_API) ++#include ++#elif defined(DRV_CHROMEOS) ++#include ++static struct pci_dev *pci_root = NULL; ++#define PCI_DEVFN(slot, func) ((((slot)&0x1f) << 3) | ((func)&0x07)) ++#endif ++ ++static U32 counter_overflow[UNCORE_MAX_COUNTERS]; ++static U32 counter_port_id; ++static U64 trace_virtual_address; ++ ++#if defined(DRV_CHROMEOS) ++/*! ++ * @fn static VOID get_pci_device_handle(U32 bus_no, ++ U32 dev_no, ++ U32 func_no) ++ * ++ * @brief Get PCI device handle to be able to read/write ++ * ++ * @param bus_no - bus number ++ * dev_no - device number ++ * func_no - function number ++ * ++ * @return None ++ * ++ * Special Notes: ++ */ ++static void get_pci_device_handle(U32 bus_no, U32 dev_no, U32 func_no) ++{ ++ if (!pci_root) { ++ pci_root = pci_get_bus_and_slot(bus_no, ++ PCI_DEVFN(dev_no, func_no)); ++ if (!pci_root) { ++ SOCPERF_PRINT_DEBUG("Unable to get pci device handle"); ++ } ++ } ++} ++#endif ++ ++/*! ++ * @fn static VOID write_To_Register(U32 bus_no, ++ U32 dev_no, ++ U32 func_no, ++ U32 port_id, ++ U32 op_code, ++ U64 mmio_offset, ++ ULONG value) ++ * ++ * @brief Reads Uncore programming ++ * ++ * @param bus_no - bus number ++ * dev_no - device number ++ * func_no - function number ++ * port_id - port id ++ * op_code - operation code ++ * mmio_offset - mmio offset ++ * value - data to be written to the register ++ * ++ * @return None ++ * ++ * Special Notes: ++ */ ++static void write_To_Register(U32 bus_no, U32 dev_no, U32 func_no, U32 port_id, ++ U32 op_code, U64 mmio_offset, ULONG value) ++{ ++ U32 cmd = 0; ++ U32 mmio_offset_lo; ++ U32 mmio_offset_hi; ++#if !defined(DRV_CHROMEOS) && !defined(PCI_HELPERS_API) ++ U32 pci_address; ++#endif ++ ++ mmio_offset_hi = mmio_offset & SOC_UNCORE_OFFSET_HI_MASK; ++ mmio_offset_lo = mmio_offset & SOC_UNCORE_OFFSET_LO_MASK; ++ cmd = (op_code << SOC_UNCORE_OP_CODE_SHIFT) + ++ (port_id << SOC_UNCORE_PORT_ID_SHIFT) + (mmio_offset_lo << 8) + ++ (SOC_UNCORE_BYTE_ENABLES << 4); ++ SOCPERF_PRINT_DEBUG("write off=%llx value=%x\n", mmio_offset, value); ++ ++#if defined(PCI_HELPERS_API) ++ intel_mid_msgbus_write32_raw_ext(cmd, mmio_offset_hi, value); ++#elif defined(DRV_CHROMEOS) ++ if (!pci_root) { ++ get_pci_device_handle(bus_no, dev_no, func_no); ++ } ++ pci_write_config_dword(pci_root, SOC_UNCORE_MDR_REG_OFFSET, value); ++ pci_write_config_dword(pci_root, SOC_UNCORE_MCRX_REG_OFFSET, ++ mmio_offset_hi); ++ pci_write_config_dword(pci_root, SOC_UNCORE_MCR_REG_OFFSET, cmd); ++#else ++ pci_address = FORM_PCI_ADDR(bus_no, dev_no, func_no, ++ SOC_UNCORE_MDR_REG_OFFSET); ++ SOCPERF_PCI_Write_Ulong((ULONG)pci_address, (ULONG)value); ++ pci_address = FORM_PCI_ADDR(bus_no, dev_no, func_no, ++ SOC_UNCORE_MCRX_REG_OFFSET); ++ SOCPERF_PCI_Write_Ulong((ULONG)pci_address, mmio_offset_hi); ++ pci_address = FORM_PCI_ADDR(bus_no, dev_no, func_no, ++ SOC_UNCORE_MCR_REG_OFFSET); ++ SOCPERF_PCI_Write_Ulong((ULONG)pci_address, cmd); ++#endif ++} ++ ++/*! ++ * @fn static ULONG read_From_Register(U32 bus_no, ++ U32 dev_no, ++ U32 func_no, ++ U32 port_id, ++ U32 op_code, ++ U64 mmio_offset) ++ * ++ * @brief Reads Uncore programming info ++ * ++ * @param bus_no - bus number ++ * dev_no - device number ++ * func_no - function number ++ * port_id - port id ++ * op_code - operation code ++ * mmio_offset - mmio offset ++ * ++ * @return data from the counter ++ * ++ * Special Notes: ++ */ ++static void read_From_Register(U32 bus_no, U32 dev_no, U32 func_no, U32 port_id, ++ U32 op_code, U64 mmio_offset, U32 *data_val) ++{ ++ U32 data = 0; ++ U32 cmd = 0; ++ U32 mmio_offset_hi; ++ U32 mmio_offset_lo; ++#if !defined(DRV_CHROMEOS) && !defined(PCI_HELPERS_API) ++ U32 pci_address; ++#endif ++ ++ mmio_offset_hi = mmio_offset & SOC_UNCORE_OFFSET_HI_MASK; ++ mmio_offset_lo = mmio_offset & SOC_UNCORE_OFFSET_LO_MASK; ++ cmd = (op_code << SOC_UNCORE_OP_CODE_SHIFT) + ++ (port_id << SOC_UNCORE_PORT_ID_SHIFT) + (mmio_offset_lo << 8) + ++ (SOC_UNCORE_BYTE_ENABLES << 4); ++ ++#if defined(PCI_HELPERS_API) ++ data = intel_mid_msgbus_read32_raw_ext(cmd, mmio_offset_hi); ++#elif defined(DRV_CHROMEOS) ++ if (!pci_root) { ++ get_pci_device_handle(bus_no, dev_no, func_no); ++ } ++ pci_write_config_dword(pci_root, SOC_UNCORE_MCRX_REG_OFFSET, ++ mmio_offset_hi); ++ pci_write_config_dword(pci_root, SOC_UNCORE_MCR_REG_OFFSET, cmd); ++ pci_read_config_dword(pci_root, SOC_UNCORE_MDR_REG_OFFSET, &data); ++#else ++ pci_address = FORM_PCI_ADDR(bus_no, dev_no, func_no, ++ SOC_UNCORE_MCRX_REG_OFFSET); ++ SOCPERF_PCI_Write_Ulong((ULONG)pci_address, mmio_offset_hi); ++ pci_address = FORM_PCI_ADDR(bus_no, dev_no, func_no, ++ SOC_UNCORE_MCR_REG_OFFSET); ++ SOCPERF_PCI_Write_Ulong((ULONG)pci_address, cmd); ++ pci_address = FORM_PCI_ADDR(bus_no, dev_no, func_no, ++ SOC_UNCORE_MDR_REG_OFFSET); ++ data = SOCPERF_PCI_Read_Ulong(pci_address); ++#endif ++ SOCPERF_PRINT_DEBUG("read off=%llx value=%x\n", mmio_offset, data); ++ if (data_val) { ++ *data_val = data; ++ } ++} ++ ++/*! ++ * @fn static VOID uncore_Reset_Counters(U32 dev_idx) ++ * ++ * @brief Reset counters ++ * ++ * @param dev_idx - device index ++ * ++ * @return None ++ * ++ * Special Notes: ++ */ ++static VOID uncore_Reset_Counters(U32 dev_idx) ++{ ++ U32 data_reg = 0; ++ ++ if (counter_port_id != 0) { ++ FOR_EACH_PCI_REG_RAW(pecb, i, dev_idx) ++ { ++ if (ECB_entries_reg_type(pecb, i) == ++ PMU_REG_EVENT_SELECT) { ++ data_reg = ++ i + ECB_operations_register_len( ++ pecb, PMU_OPERATION_WRITE); ++ if (ECB_entries_reg_type(pecb, data_reg) == ++ PMU_REG_DATA) { ++ write_To_Register( ++ ECB_entries_bus_no(pecb, ++ data_reg), ++ ECB_entries_dev_no(pecb, ++ data_reg), ++ ECB_entries_func_no(pecb, ++ data_reg), ++ counter_port_id, ++ SOC_COUNTER_WRITE_OP_CODE, ++ ECB_entries_reg_offset( ++ pecb, data_reg), ++ (ULONG)0); ++ } ++ write_To_Register(ECB_entries_bus_no(pecb, i), ++ ECB_entries_dev_no(pecb, i), ++ ECB_entries_func_no(pecb, i), ++ counter_port_id, ++ SOC_COUNTER_WRITE_OP_CODE, ++ ECB_entries_reg_offset(pecb, ++ i), ++ (ULONG)SOC_UNCORE_STOP); ++ } ++ } ++ END_FOR_EACH_PCI_REG_RAW; ++ } ++} ++ ++/*! ++ * @fn static VOID uncore_Write_PMU(VOID*) ++ * ++ * @brief Initial write of PMU registers ++ * Walk through the entries and write the value of the register accordingly. ++ * When current_group = 0, then this is the first time this routine is called, ++ * ++ * @param param - device index ++ * ++ * @return None ++ * ++ * Special Notes: ++ */ ++static VOID uncore_Write_PMU(VOID *param) ++{ ++ U32 dev_idx; ++ ECB pecb; ++ DRV_PCI_DEVICE_ENTRY dpden; ++ U32 pci_address; ++ U32 bar_lo; ++ U64 bar_hi; ++ U64 final_bar; ++ U64 physical_address; ++ U32 dev_index = 0; ++ S32 bar_list[SOC_UNCORE_MAX_PCI_DEVICES]; ++ U32 bar_index = 0; ++ U32 map_size = 0; ++ U64 virtual_address = 0; ++ U32 bar_name = 0; ++ DRV_PCI_DEVICE_ENTRY curr_pci_entry = NULL; ++ U32 next_bar_offset = 0; ++ U64 mmio_offset = 0; ++ U64 map_base = 0; ++ U32 i = 0; ++ U32 cur_grp; ++ ++ dev_idx = *((U32 *)param); ++ if (device_uncore == NULL) { ++ SOCPERF_PRINT_ERROR("ERROR: NULL device_uncore!\n"); ++ return; ++ } ++ cur_grp = LWPMU_DEVICE_cur_group(device_uncore); ++ ++ pecb = (ECB)LWPMU_DEVICE_PMU_register_data(device_uncore)[cur_grp]; ++ if (pecb == NULL) { ++ SOCPERF_PRINT_ERROR("ERROR: null pecb!\n"); ++ return; ++ } ++ ++ for (dev_index = 0; dev_index < SOC_UNCORE_MAX_PCI_DEVICES; ++ dev_index++) { ++ bar_list[dev_index] = -1; ++ } ++ ++ // initialize the per-counter overflow numbers ++ for (i = 0; i < UNCORE_MAX_COUNTERS; i++) { ++ counter_overflow[i] = 0; ++ socperf_pcb[0].last_uncore_count[i] = 0; ++ } ++ ++ ECB_pcidev_entry_list(pecb) = (DRV_PCI_DEVICE_ENTRY)( ++ (S8 *)pecb + ECB_pcidev_list_offset(pecb)); ++ dpden = ECB_pcidev_entry_list(pecb); ++ ++ uncore_Reset_Counters(dev_idx); ++ ++ for (dev_index = 0; dev_index < ECB_num_pci_devices(pecb); ++ dev_index++) { ++ curr_pci_entry = &dpden[dev_index]; ++ bar_name = DRV_PCI_DEVICE_ENTRY_bar_name(curr_pci_entry); ++ mmio_offset = DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio( ++ curr_pci_entry); ++ ++ if (counter_port_id == 0 && ++ DRV_PCI_DEVICE_ENTRY_prog_type(curr_pci_entry) == ++ UNC_COUNTER) { ++ counter_port_id = ++ DRV_PCI_DEVICE_ENTRY_port_id(curr_pci_entry); ++ uncore_Reset_Counters(dev_idx); ++ } ++ if (DRV_PCI_DEVICE_ENTRY_config_type(curr_pci_entry) == ++ UNC_PCICFG) { ++ if (bar_name == UNC_SOCPCI && ++ (DRV_PCI_DEVICE_ENTRY_prog_type(curr_pci_entry) == ++ UNC_MUX || ++ DRV_PCI_DEVICE_ENTRY_prog_type(curr_pci_entry) == ++ UNC_COUNTER) && ++ DRV_PCI_DEVICE_ENTRY_operation(curr_pci_entry) == ++ UNC_OP_WRITE) { ++ SOCPERF_PRINT_DEBUG( ++ "dev_index=%d OFFSET=%x VAL=%x\n", ++ dev_index, ++ DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio( ++ curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_value( ++ curr_pci_entry)); ++ write_To_Register( ++ DRV_PCI_DEVICE_ENTRY_bus_no( ++ curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_dev_no( ++ curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_func_no( ++ curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_port_id( ++ curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_op_code( ++ curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio( ++ curr_pci_entry), ++ (ULONG)DRV_PCI_DEVICE_ENTRY_value( ++ curr_pci_entry)); ++ } ++ continue; ++ } ++ // UNC_MMIO programming ++ if (bar_list[bar_name] != -1) { ++ bar_index = bar_list[bar_name]; ++ virtual_address = DRV_PCI_DEVICE_ENTRY_virtual_address( ++ &dpden[bar_index]); ++ DRV_PCI_DEVICE_ENTRY_virtual_address(curr_pci_entry) = ++ DRV_PCI_DEVICE_ENTRY_virtual_address( ++ &dpden[bar_index]); ++ writel(DRV_PCI_DEVICE_ENTRY_value(curr_pci_entry), ++ (void __iomem *)(((char *)(UIOP)virtual_address) + ++ mmio_offset)); ++ continue; ++ } ++ pci_address = FORM_PCI_ADDR( ++ DRV_PCI_DEVICE_ENTRY_bus_no(curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_dev_no(curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_func_no(curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_bar_offset(curr_pci_entry)); ++ bar_lo = SOCPERF_PCI_Read_Ulong(pci_address); ++ next_bar_offset = ++ DRV_PCI_DEVICE_ENTRY_bar_offset(curr_pci_entry) + ++ SOC_UNCORE_NEXT_ADDR_OFFSET; ++ pci_address = FORM_PCI_ADDR( ++ DRV_PCI_DEVICE_ENTRY_bus_no(curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_dev_no(curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_func_no(curr_pci_entry), ++ next_bar_offset); ++ bar_hi = SOCPERF_PCI_Read_Ulong(pci_address); ++ final_bar = (bar_hi << SOC_UNCORE_BAR_ADDR_SHIFT) | bar_lo; ++ final_bar &= SOC_UNCORE_BAR_ADDR_MASK; ++ DRV_PCI_DEVICE_ENTRY_bar_address(curr_pci_entry) = final_bar; ++ physical_address = ++ DRV_PCI_DEVICE_ENTRY_bar_address(curr_pci_entry); ++ if (physical_address) { ++ map_size = SOC_UNCORE_OTHER_BAR_MMIO_PAGE_SIZE; ++ map_base = (mmio_offset / map_size) * map_size; ++ if (mmio_offset > map_size) { ++ physical_address = physical_address + map_base; ++ } ++ } ++ } ++} ++ ++/*! ++ * @fn static VOID uncore_Disable_PMU(PVOID) ++ * ++ * @brief Unmap the virtual address when sampling/driver stops ++ * ++ * @param param - device index ++ * ++ * @return None ++ * ++ * Special Notes: ++ */ ++static VOID uncore_Disable_PMU(PVOID param) ++{ ++ U32 dev_idx = *((U32 *)param); ++ ++ if (GLOBAL_STATE_current_phase(socperf_driver_state) == ++ DRV_STATE_PREPARE_STOP) { ++ uncore_Reset_Counters(dev_idx); ++ } ++} ++ ++/*! ++ * @fn static VOID uncore_Stop_Mem(VOID) ++ * ++ * @brief Stop trace ++ * ++ * @param param - None ++ * ++ * @return None ++ * ++ * Special Notes: ++ */ ++static VOID uncore_Stop_Mem(VOID) ++{ ++ ECB pecb; ++ DRV_PCI_DEVICE_ENTRY dpden; ++ U32 bar_name = 0; ++ DRV_PCI_DEVICE_ENTRY curr_pci_entry = NULL; ++ U64 mmio_offset = 0; ++ U32 dev_index = 0; ++ U32 data_val = 0; ++ U32 cur_grp; ++ ++ if (device_uncore == NULL) { ++ SOCPERF_PRINT_ERROR("ERROR: NULL device_uncore!\n"); ++ return; ++ } ++ cur_grp = LWPMU_DEVICE_cur_group(device_uncore); ++ ++ pecb = (ECB)LWPMU_DEVICE_PMU_register_data(device_uncore)[cur_grp]; ++ if (pecb == NULL) { ++ SOCPERF_PRINT_ERROR("ERROR: null pecb!\n"); ++ return; ++ } ++ ++ ECB_pcidev_entry_list(pecb) = (DRV_PCI_DEVICE_ENTRY)( ++ (S8 *)pecb + ECB_pcidev_list_offset(pecb)); ++ dpden = ECB_pcidev_entry_list(pecb); ++ ++ for (dev_index = 0; dev_index < ECB_num_pci_devices(pecb); ++ dev_index++) { ++ curr_pci_entry = &dpden[dev_index]; ++ bar_name = DRV_PCI_DEVICE_ENTRY_bar_name(curr_pci_entry); ++ mmio_offset = DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio( ++ curr_pci_entry); ++ ++ if (DRV_PCI_DEVICE_ENTRY_prog_type(curr_pci_entry) == ++ UNC_STOP && ++ DRV_PCI_DEVICE_ENTRY_config_type(curr_pci_entry) == ++ UNC_PCICFG && ++ bar_name == UNC_SOCPCI && ++ DRV_PCI_DEVICE_ENTRY_operation(curr_pci_entry) == ++ UNC_OP_READ) { ++ SOCPERF_PRINT_DEBUG( ++ "op=%d port=%d offset=%x val=%x\n", ++ DRV_PCI_DEVICE_ENTRY_op_code(curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_port_id(curr_pci_entry), ++ mmio_offset, data_val); ++ read_From_Register( ++ DRV_PCI_DEVICE_ENTRY_bus_no(curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_dev_no(curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_func_no(curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_port_id(curr_pci_entry), ++ SOC_COUNTER_READ_OP_CODE, mmio_offset, ++ &data_val); ++ SOCPERF_PRINT_DEBUG( ++ "op=%d port=%d offset=%x val=%x\n", ++ DRV_PCI_DEVICE_ENTRY_op_code(curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_port_id(curr_pci_entry), ++ mmio_offset, data_val); ++ write_To_Register( ++ DRV_PCI_DEVICE_ENTRY_bus_no(curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_dev_no(curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_func_no(curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_port_id(curr_pci_entry), ++ SOC_COUNTER_WRITE_OP_CODE, mmio_offset, ++ (ULONG)(data_val | 0x2000)); ++ } ++ } ++} ++ ++/*! ++ * @fn static VOID uncore_Initialize(PVOID) ++ * ++ * @brief Initialize any registers or addresses ++ * ++ * @param param ++ * ++ * @return None ++ * ++ * Special Notes: ++ */ ++static VOID uncore_Initialize(VOID *param) ++{ ++ return; ++} ++ ++/*! ++ * @fn static VOID uncore_Clean_Up(PVOID) ++ * ++ * @brief Reset any registers or addresses ++ * ++ * @param param ++ * ++ * @return None ++ * ++ * Special Notes: ++ */ ++static VOID uncore_Clean_Up(VOID *param) ++{ ++ if (trace_virtual_address) { ++ iounmap((void __iomem *)(UIOP)trace_virtual_address); ++ trace_virtual_address = 0; ++ } ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn uncore_Read_Data() ++ * ++ * @param None ++ * ++ * @return None No return needed ++ * ++ * @brief Read the counters ++ * ++ */ ++static VOID uncore_Read_Data(PVOID data_buffer) ++{ ++ U32 event_id = 0; ++ U64 *data; ++ int data_index; ++ U32 data_val = 0; ++ U32 data_reg = 0; ++ U64 total_count = 0; ++ U32 event_index = 0; ++ U32 cur_grp; ++ ++ if (device_uncore == NULL) { ++ SOCPERF_PRINT_ERROR("ERROR: NULL device_uncore!\n"); ++ return; ++ } ++ cur_grp = LWPMU_DEVICE_cur_group(device_uncore); ++ ++ if (GLOBAL_STATE_current_phase(socperf_driver_state) == ++ DRV_STATE_UNINITIALIZED || ++ GLOBAL_STATE_current_phase(socperf_driver_state) == ++ DRV_STATE_IDLE || ++ GLOBAL_STATE_current_phase(socperf_driver_state) == ++ DRV_STATE_RESERVED || ++ GLOBAL_STATE_current_phase(socperf_driver_state) == ++ DRV_STATE_PREPARE_STOP || ++ GLOBAL_STATE_current_phase(socperf_driver_state) == ++ DRV_STATE_STOPPED) { ++ SOCPERF_PRINT_ERROR("ERROR: RETURING EARLY from Read_Data\n"); ++ return; ++ } ++ ++ data = data_buffer; ++ data_index = 0; ++ ++ preempt_disable(); ++ ++ // Write GroupID ++ data[data_index] = cur_grp + 1; ++ // Increment the data index as the event id starts from zero ++ data_index++; ++ ++ FOR_EACH_PCI_REG_RAW(pecb, i, dev_idx) ++ { ++ if (ECB_entries_reg_type(pecb, i) == PMU_REG_EVENT_SELECT) { ++ write_To_Register(ECB_entries_bus_no(pecb, i), ++ ECB_entries_dev_no(pecb, i), ++ ECB_entries_func_no(pecb, i), ++ counter_port_id, ++ SOC_COUNTER_WRITE_OP_CODE, ++ ECB_entries_reg_offset(pecb, i), ++ (ULONG)SOC_UNCORE_SAMPLE_DATA); ++ ++ data_reg = i + ECB_operations_register_len( ++ pecb, PMU_OPERATION_WRITE); ++ if (ECB_entries_reg_type(pecb, data_reg) == ++ PMU_REG_DATA) { ++ read_From_Register( ++ ECB_entries_bus_no(pecb, data_reg), ++ ECB_entries_dev_no(pecb, data_reg), ++ ECB_entries_func_no(pecb, data_reg), ++ counter_port_id, ++ SOC_COUNTER_READ_OP_CODE, ++ ECB_entries_reg_offset(pecb, data_reg), ++ &data_val); ++ if (data_val < ++ socperf_pcb[0] ++ .last_uncore_count[event_index]) { ++ counter_overflow[event_index]++; ++ } ++ socperf_pcb[0].last_uncore_count[event_index] = ++ data_val; ++ total_count = data_val + ++ counter_overflow[event_index] * ++ UNCORE_MAX_COUNT; ++ event_index++; ++ data[data_index + event_id] = total_count; ++ event_id++; ++ } ++ } ++ } ++ END_FOR_EACH_PCI_REG_RAW; ++ ++ preempt_enable(); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn uncore_Create_Mem() ++ * ++ * @param None ++ * ++ * @return None No return needed ++ * ++ * @brief Read the counters ++ * ++ */ ++static VOID uncore_Create_Mem(U32 memory_size, U64 *trace_buffer) ++{ ++ ECB pecb; ++ DRV_PCI_DEVICE_ENTRY dpden; ++ U32 bar_name = 0; ++ DRV_PCI_DEVICE_ENTRY curr_pci_entry = NULL; ++ U64 mmio_offset = 0; ++ U32 dev_index = 0; ++ U32 data_val = 0; ++ U32 reg_index = 0; ++ U64 physical_high = 0; ++ U64 odla_physical_address = 0; ++ ++ if (device_uncore == NULL) { ++ SOCPERF_PRINT_ERROR("ERROR: NULL device_uncore!\n"); ++ return; ++ } ++ pecb = (ECB)LWPMU_DEVICE_PMU_register_data(device_uncore)[0]; ++ if (pecb == NULL) { ++ SOCPERF_PRINT_ERROR("ERROR: null pecb!\n"); ++ return; ++ } ++ ++ if (!trace_buffer) { ++ return; ++ } ++ ++ ECB_pcidev_entry_list(pecb) = (DRV_PCI_DEVICE_ENTRY)( ++ (S8 *)pecb + ECB_pcidev_list_offset(pecb)); ++ dpden = ECB_pcidev_entry_list(pecb); ++ ++ for (dev_index = 0; dev_index < ECB_num_pci_devices(pecb); ++ dev_index++) { ++ curr_pci_entry = &dpden[dev_index]; ++ bar_name = DRV_PCI_DEVICE_ENTRY_bar_name(curr_pci_entry); ++ mmio_offset = DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio( ++ curr_pci_entry); ++ ++ if (DRV_PCI_DEVICE_ENTRY_prog_type(curr_pci_entry) == ++ UNC_MEMORY && ++ DRV_PCI_DEVICE_ENTRY_config_type(curr_pci_entry) == ++ UNC_PCICFG && ++ bar_name == UNC_SOCPCI && ++ DRV_PCI_DEVICE_ENTRY_operation(curr_pci_entry) == ++ UNC_OP_WRITE) { ++ read_From_Register( ++ DRV_PCI_DEVICE_ENTRY_bus_no(curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_dev_no(curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_func_no(curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_port_id(curr_pci_entry), ++ SOC_COUNTER_READ_OP_CODE, mmio_offset, ++ &data_val); ++ if (reg_index == 1) { ++ odla_physical_address = data_val; ++ } else if (reg_index == 2) { ++ physical_high = data_val; ++ odla_physical_address = odla_physical_address | ++ (physical_high << 32); ++ } ++ SOCPERF_PRINT_DEBUG( ++ "op=%d port=%d offset=%x val=%x\n", ++ DRV_PCI_DEVICE_ENTRY_op_code(curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_port_id(curr_pci_entry), ++ mmio_offset, data_val); ++ reg_index++; ++ } ++ continue; ++ } ++ SOCPERF_PRINT_DEBUG("Physical Address=%llx\n", odla_physical_address); ++ if (odla_physical_address) { ++ trace_virtual_address = (U64)(UIOP)ioremap_nocache( ++ odla_physical_address, 1024 * sizeof(U64)); ++ SOCPERF_PRINT_DEBUG("PHY=%llx ODLA VIRTUAL ADDRESS=%llx\n", ++ odla_physical_address, ++ trace_virtual_address); ++ if (trace_buffer) { ++ *trace_buffer = odla_physical_address; ++ } ++ } ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn uncore_Check_Status() ++ * ++ * @param None ++ * ++ * @return None No return needed ++ * ++ * @brief Read the counters ++ * ++ */ ++static VOID uncore_Check_Status(U64 *trace_buffer, U32 *num_entries) ++{ ++ U32 dev_index = 0; ++ ECB pecb; ++ DRV_PCI_DEVICE_ENTRY dpden; ++ U32 bar_name = 0; ++ DRV_PCI_DEVICE_ENTRY curr_pci_entry = NULL; ++ U64 mmio_offset = 0; ++ U32 data_val = 0; ++ U32 data_index = 0; ++ ++ if (device_uncore == NULL) { ++ SOCPERF_PRINT_ERROR("ERROR: NULL device_uncore!\n"); ++ return; ++ } ++ pecb = (ECB)LWPMU_DEVICE_PMU_register_data(device_uncore)[0]; ++ if (pecb == NULL) { ++ SOCPERF_PRINT_ERROR("ERROR: null pecb!\n"); ++ return; ++ } ++ if (!trace_buffer) { ++ return; ++ } ++ ++ ECB_pcidev_entry_list(pecb) = (DRV_PCI_DEVICE_ENTRY)( ++ (S8 *)pecb + ECB_pcidev_list_offset(pecb)); ++ dpden = ECB_pcidev_entry_list(pecb); ++ ++ for (dev_index = 0; dev_index < ECB_num_pci_devices(pecb); ++ dev_index++) { ++ curr_pci_entry = &dpden[dev_index]; ++ bar_name = DRV_PCI_DEVICE_ENTRY_bar_name(curr_pci_entry); ++ mmio_offset = DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio( ++ curr_pci_entry); ++ ++ if (DRV_PCI_DEVICE_ENTRY_prog_type(curr_pci_entry) == ++ UNC_STATUS && ++ DRV_PCI_DEVICE_ENTRY_config_type(curr_pci_entry) == ++ UNC_PCICFG && ++ bar_name == UNC_SOCPCI && ++ DRV_PCI_DEVICE_ENTRY_operation(curr_pci_entry) == ++ UNC_OP_READ) { ++ read_From_Register( ++ DRV_PCI_DEVICE_ENTRY_bus_no(curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_dev_no(curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_func_no(curr_pci_entry), ++ DRV_PCI_DEVICE_ENTRY_port_id(curr_pci_entry), ++ SOC_COUNTER_READ_OP_CODE, mmio_offset, ++ &data_val); ++ SOCPERF_PRINT_DEBUG("TRACE STATUS=%x\n", data_val); ++ trace_buffer[data_index] = data_val; ++ data_index++; ++ continue; ++ } ++ } ++ ++ if (num_entries) { ++ *num_entries = data_index; ++ } ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn uncore_Read_Mem() ++ * ++ * @param None ++ * ++ * @return None No return needed ++ * ++ * @brief Read the counters ++ * ++ */ ++static VOID uncore_Read_Mem(U64 start_address, U64 *trace_buffer, ++ U32 num_entries) ++{ ++ U32 data_index = 0; ++ U32 data_value = 0; ++ ++ if (num_entries == 0 || !trace_buffer) { ++ return; ++ } ++ SOCPERF_PRINT_DEBUG( ++ "Reading memory for num_entries=%d from address=%llx\n", ++ num_entries, trace_virtual_address); ++ for (data_index = 0; data_index < num_entries; data_index++) { ++ if (trace_virtual_address) { ++ data_value = readl(((void __iomem *)((UIOP)trace_virtual_address + ++ data_index))); ++ ++ SOCPERF_PRINT_DEBUG("DATA VALUE=%llx\n", data_value); ++ *(trace_buffer + data_index) = data_value; ++ } ++ } ++ ++} ++ ++/* ++ * Initialize the dispatch table ++ */ ++DISPATCH_NODE soc_uncore_dispatch = { ++ .init = uncore_Initialize, // initialize ++ .fini = NULL, // destroy ++ .write = uncore_Write_PMU, // write ++ .freeze = uncore_Disable_PMU, // freeze ++ .restart = NULL, // restart ++ .read_data = NULL, // read ++ .check_overflow = NULL, // check for overflow ++ .swap_group = NULL, ++ .read_lbrs = NULL, ++ .clean_up = uncore_Clean_Up, ++ .hw_errata = NULL, ++ .read_power = NULL, ++ .check_overflow_errata = NULL, ++ .read_counts = NULL, //read_counts ++ .check_overflow_gp_errata = NULL, ++ .read_power = NULL, ++ .platform_info = NULL, ++ .trigger_read = NULL, ++ .read_current_data = uncore_Read_Data, ++ .create_mem = uncore_Create_Mem, ++ .check_status = uncore_Check_Status, ++ .read_mem = uncore_Read_Mem, ++ .stop_mem = uncore_Stop_Mem ++}; +diff --git a/drivers/platform/x86/socperf/socperfdrv.c b/drivers/platform/x86/socperf/socperfdrv.c +new file mode 100644 +index 000000000000..3a80764bbed4 +--- /dev/null ++++ b/drivers/platform/x86/socperf/socperfdrv.c +@@ -0,0 +1,1560 @@ ++/* *********************************************************************************************** ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(C) 2005-2019 Intel Corporation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * BSD LICENSE ++ * ++ * Copyright(C) 2005-2019 Intel Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * *********************************************************************************************** ++ */ ++ ++ ++#include "lwpmudrv_defines.h" ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "lwpmudrv_types.h" ++#include "rise_errors.h" ++#include "lwpmudrv_version.h" ++#include "lwpmudrv_ecb.h" ++#include "lwpmudrv_struct.h" ++#include "lwpmudrv_ioctl.h" ++#include "inc/ecb_iterators.h" ++#include "socperfdrv.h" ++#include "control.h" ++#include "inc/utility.h" ++ ++MODULE_AUTHOR("Copyright(C) 2007-2019 Intel Corporation"); ++MODULE_VERSION(SOCPERF_NAME "_" SOCPERF_VERSION_STR); ++MODULE_LICENSE("Dual BSD/GPL"); ++ ++typedef struct LWPMU_DEV_NODE_S LWPMU_DEV_NODE; ++typedef LWPMU_DEV_NODE * LWPMU_DEV; ++ ++struct LWPMU_DEV_NODE_S { ++ long buffer; ++ struct semaphore sem; ++ struct cdev cdev; ++}; ++ ++#define LWPMU_DEV_buffer(dev) ((dev)->buffer) ++#define LWPMU_DEV_sem(dev) ((dev)->sem) ++#define LWPMU_DEV_cdev(dev) ((dev)->cdev) ++ ++/* Global variables of the driver */ ++SOCPERF_VERSION_NODE socperf_drv_version; ++U64 *read_unc_ctr_info; ++DISPATCH dispatch_uncore; ++DRV_CONFIG socperf_drv_cfg; ++EVENT_CONFIG socperf_global_ec; ++volatile S32 socperf_abnormal_terminate; ++LWPMU_DEV socperf_control; ++ ++LWPMU_DEVICE device_uncore; ++CPU_STATE socperf_pcb; ++size_t socperf_pcb_size; ++ ++#if defined(DRV_USE_UNLOCKED_IOCTL) ++static struct mutex ioctl_lock; ++#endif ++ ++#define PMU_DEVICES 1 // pmu control ++ ++static dev_t lwpmu_DevNum; /* the major and minor parts for SOCPERF base */ ++ ++static struct class *pmu_class; ++ ++#define DRV_DEVICE_DELIMITER "!" ++ ++#if !defined(DRV_USE_UNLOCKED_IOCTL) ++#define MUTEX_INIT(lock) ++#define MUTEX_LOCK(lock) ++#define MUTEX_UNLOCK(lock) ++#else ++#define MUTEX_INIT(lock) mutex_init(&(lock)) ++#define MUTEX_LOCK(lock) mutex_lock(&(lock)) ++#define MUTEX_UNLOCK(lock) mutex_unlock(&(lock)) ++#endif ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Initialize_State(void) ++ * ++ * @param none ++ * ++ * @return OS_STATUS ++ * ++ * @brief Allocates the memory needed at load time. Initializes all the ++ * @brief necessary state variables with the default values. ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Initialize_State(VOID) ++{ ++ S32 i, max_cpu_id = 0; ++ ++ for_each_possible_cpu(i) { ++ if (cpu_present(i)) { ++ if (i > max_cpu_id) { ++ max_cpu_id = i; ++ } ++ } ++ } ++ max_cpu_id++; ++ ++ /* ++ * Machine Initializations ++ * Abstract this information away into a separate entry point ++ * ++ * Question: Should we allow for the use of Hot-cpu ++ * add/subtract functionality while the driver is executing? ++ */ ++ if (max_cpu_id > num_present_cpus()) { ++ GLOBAL_STATE_num_cpus(socperf_driver_state) = max_cpu_id; ++ } else { ++ GLOBAL_STATE_num_cpus(socperf_driver_state) = ++ num_present_cpus(); ++ } ++ GLOBAL_STATE_active_cpus(socperf_driver_state) = num_online_cpus(); ++ GLOBAL_STATE_cpu_count(socperf_driver_state) = 0; ++ GLOBAL_STATE_dpc_count(socperf_driver_state) = 0; ++ GLOBAL_STATE_num_em_groups(socperf_driver_state) = 0; ++ GLOBAL_STATE_current_phase(socperf_driver_state) = ++ DRV_STATE_UNINITIALIZED; ++ ++ SOCPERF_PRINT_DEBUG( ++ "%s: num_cpus=%d, active_cpus=%d\n", ++ __func__, ++ GLOBAL_STATE_num_cpus(socperf_driver_state), ++ GLOBAL_STATE_active_cpus(socperf_driver_state)); ++ ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID SOCPERF_Read_Data ++ * ++ * @brief Reads counter data ++ * ++ * @param param data_buffer - buffer for reading counter data. ++ * ++ * @return None ++ * ++ * Special Notes: ++ * ++ */ ++extern VOID SOCPERF_Read_Data3(PVOID data_buffer) ++{ ++ if (dispatch_uncore && dispatch_uncore->read_current_data) { ++ dispatch_uncore->read_current_data(data_buffer); ++ } ++ SOCPERF_PRINT_DEBUG("%s called\n", __func__); ++} ++EXPORT_SYMBOL(SOCPERF_Read_Data3); ++ ++/********************************************************************* ++ * Internal Driver functions ++ * Should be called only from the lwpmudrv_DeviceControl routine ++ *********************************************************************/ ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Version(IOCTL_ARGS arg) ++ * ++ * @param arg - pointer to the IOCTL_ARGS structure ++ * ++ * @return OS_STATUS ++ * ++ * @brief Local function that handles the LWPMU_IOCTL_VERSION call. ++ * @brief Returns the version number of the kernel mode sampling. ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Version(IOCTL_ARGS arg) ++{ ++ OS_STATUS status; ++ ++ // Check if enough space is provided for collecting the data ++ if ((arg->len_drv_to_usr != sizeof(U32)) || ++ (arg->buf_drv_to_usr == NULL)) { ++ return OS_FAULT; ++ } ++ ++ status = put_user( ++ SOCPERF_VERSION_NODE_socperf_version(&socperf_drv_version), ++ (U32 __user *)arg->buf_drv_to_usr); ++ ++ return status; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static VOID lwpmudrv_Clean_Up(DRV_BOOL) ++ * ++ * @param DRV_BOOL finish - Flag to call finish ++ * ++ * @return VOID ++ * ++ * @brief Cleans up the memory allocation. ++ * ++ * Special Notes ++ */ ++static VOID lwpmudrv_Clean_Up(DRV_BOOL finish) ++{ ++ U32 i = 0; ++ ++ if (dispatch_uncore && dispatch_uncore->clean_up) { ++ dispatch_uncore->clean_up((VOID *)&i); ++ } ++ ++ if (device_uncore) { ++ EVENT_CONFIG ec; ++ ++ if (LWPMU_DEVICE_PMU_register_data(device_uncore)) { ++ ec = LWPMU_DEVICE_ec(device_uncore); ++ for (i = 0; i < EVENT_CONFIG_num_groups_unc(ec); i++) { ++ SOCPERF_Free_Memory( ++ LWPMU_DEVICE_PMU_register_data( ++ device_uncore)[i]); ++ } ++ } ++ LWPMU_DEVICE_pcfg(device_uncore) = ++ SOCPERF_Free_Memory(LWPMU_DEVICE_pcfg(device_uncore)); ++ LWPMU_DEVICE_ec(device_uncore) = ++ SOCPERF_Free_Memory(LWPMU_DEVICE_ec(device_uncore)); ++ device_uncore = SOCPERF_Free_Memory(device_uncore); ++ } ++ ++ socperf_pcb = SOCPERF_Free_Memory(socperf_pcb); ++ socperf_pcb_size = 0; ++ GLOBAL_STATE_num_em_groups(socperf_driver_state) = 0; ++ GLOBAL_STATE_num_descriptors(socperf_driver_state) = 0; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Initialize_Driver(PVOID buf_drv_to_usr, U32 len_drv_to_usr) ++ * ++ * @param buf_drv_to_usr - pointer to the input buffer ++ * @param len_drv_to_usr - size of the input buffer ++ * ++ * @return OS_STATUS ++ * ++ * @brief Local function that handles the LWPMU_IOCTL_INIT_DRIVER call. ++ * @brief Sets up the interrupt handler. ++ * @brief Set up the output buffers/files needed to make the driver ++ * @brief operational. ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Initialize_Driver(PVOID buf_drv_to_usr, ++ U32 len_drv_to_usr) ++{ ++ if (buf_drv_to_usr == NULL) { ++ SOCPERF_PRINT_ERROR("buf_drv_to_usr ERROR!\n"); ++ return OS_FAULT; ++ } ++ ++ socperf_drv_cfg = SOCPERF_Allocate_Memory(len_drv_to_usr); ++ if (!socperf_drv_cfg) { ++ SOCPERF_PRINT_ERROR("Memory allocation failure for socperf_drv_cfg!\n"); ++ return OS_NO_MEM; ++ } ++ ++ if (copy_from_user(socperf_drv_cfg, (void __user *)buf_drv_to_usr, len_drv_to_usr)) { ++ SOCPERF_PRINT_ERROR("Failed to copy from user"); ++ return OS_FAULT; ++ } ++ ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Initialize_Uncore(PVOID buf_drv_to_usr, U32 len_drv_to_usr) ++ * ++ * @param buf_drv_to_usr - pointer to the input buffer ++ * @param len_drv_to_usr - size of the input buffer ++ * ++ * @return OS_STATUS ++ * ++ * @brief Local function that handles the LWPMU_IOCTL_INIT call. ++ * @brief Sets up the interrupt handler. ++ * @brief Set up the output buffers/files needed to make the driver ++ * @brief operational. ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Initialize_Uncore(PVOID buf_drv_to_usr, ++ U32 len_drv_to_usr) ++{ ++ DEV_UNC_CONFIG pcfg_unc; ++ U32 previous_state; ++ U32 i = 0; ++ ++ SOCPERF_PRINT_DEBUG("Entered %s\n", __func__); ++ previous_state = ++ cmpxchg(&GLOBAL_STATE_current_phase(socperf_driver_state), ++ DRV_STATE_UNINITIALIZED, DRV_STATE_IDLE); ++ ++ if (previous_state != DRV_STATE_UNINITIALIZED) { ++ SOCPERF_PRINT_ERROR("OS_IN_PROGRESS error!\n"); ++ return OS_IN_PROGRESS; ++ } ++ /* ++ * Program State Initializations: ++ * Foreach device, copy over pcfg_unc and configure dispatch table ++ */ ++ if (buf_drv_to_usr == NULL) { ++ SOCPERF_PRINT_ERROR("in_buff ERROR!\n"); ++ return OS_FAULT; ++ } ++ if (len_drv_to_usr != sizeof(DEV_UNC_CONFIG_NODE)) { ++ SOCPERF_PRINT_ERROR( ++ "Got len_drv_to_usr=%d, expecting size=%d\n", ++ len_drv_to_usr, (int)sizeof(DEV_UNC_CONFIG_NODE)); ++ return OS_FAULT; ++ } ++ ++ device_uncore = SOCPERF_Allocate_Memory(sizeof(LWPMU_DEVICE_NODE)); ++ if (!device_uncore) { ++ SOCPERF_PRINT_ERROR( ++ "Memory allocation failure for device_uncore!\n"); ++ return OS_NO_MEM; ++ } ++ socperf_pcb_size = GLOBAL_STATE_num_cpus(socperf_driver_state) * ++ sizeof(CPU_STATE_NODE); ++ socperf_pcb = SOCPERF_Allocate_Memory(socperf_pcb_size); ++ if (!socperf_pcb) { ++ SOCPERF_PRINT_ERROR( ++ "Memory allocation failure for socperf_pcb!\n"); ++ return OS_NO_MEM; ++ } ++ ++ // allocate memory ++ LWPMU_DEVICE_pcfg(device_uncore) = ++ SOCPERF_Allocate_Memory(sizeof(DEV_UNC_CONFIG_NODE)); ++ if (!LWPMU_DEVICE_pcfg(device_uncore)) { ++ SOCPERF_PRINT_ERROR( ++ "Memory allocation failure for LWPMU_DEVICE_pcfg(device_uncore)!\n"); ++ return OS_NO_MEM; ++ } ++ // copy over pcfg_unc ++ if (copy_from_user(LWPMU_DEVICE_pcfg(device_uncore), (void __user *)buf_drv_to_usr, ++ len_drv_to_usr)) { ++ SOCPERF_PRINT_ERROR("Failed to copy from user"); ++ return OS_FAULT; ++ } ++ // configure dispatch from dispatch_id ++ pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(device_uncore); ++ ++ LWPMU_DEVICE_dispatch(device_uncore) = SOCPERF_UTILITY_Configure_CPU( ++ DEV_UNC_CONFIG_dispatch_id(pcfg_unc)); ++ if (LWPMU_DEVICE_dispatch(device_uncore) == NULL) { ++ SOCPERF_PRINT_ERROR("Unable to configure CPU"); ++ return OS_FAULT; ++ } ++ ++ LWPMU_DEVICE_em_groups_count(device_uncore) = 0; ++ LWPMU_DEVICE_cur_group(device_uncore) = 0; ++ SOCPERF_PRINT_DEBUG( ++ "SocPerf Driver Config : uncore dispatch id = %d\n", ++ DEV_UNC_CONFIG_dispatch_id(pcfg_unc)); ++ dispatch_uncore = LWPMU_DEVICE_dispatch(device_uncore); ++ if (dispatch_uncore && dispatch_uncore->init) { ++ dispatch_uncore->init((VOID *)&i); ++ } ++ ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS socperf_Terminate(void) ++ * ++ * @param none ++ * ++ * @return OS_STATUS ++ * ++ * @brief Local function that handles the LWPMUDRV_IOCTL_TERMINATE call. ++ * @brief Cleans up the interrupt handler and resets the PMU state. ++ * ++ * Special Notes ++ */ ++static OS_STATUS socperf_Terminate(VOID) ++{ ++ U32 previous_state; ++ ++ if (GLOBAL_STATE_current_phase(socperf_driver_state) == ++ DRV_STATE_UNINITIALIZED) { ++ return OS_SUCCESS; ++ } ++ ++ previous_state = ++ cmpxchg(&GLOBAL_STATE_current_phase(socperf_driver_state), ++ DRV_STATE_STOPPED, DRV_STATE_UNINITIALIZED); ++ if (previous_state != DRV_STATE_STOPPED) { ++ SOCPERF_PRINT_ERROR( ++ "%s: Sampling is in progress, cannot terminate.\n", __func__); ++ return OS_IN_PROGRESS; ++ } ++ ++ GLOBAL_STATE_current_phase(socperf_driver_state) = ++ DRV_STATE_UNINITIALIZED; ++ lwpmudrv_Clean_Up(TRUE); ++ ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Trigger_Read(void) ++ * ++ * @param - none ++ * ++ * @return - OS_STATUS ++ * ++ * @brief Read the Counter Data. ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Trigger_Read(VOID) ++{ ++ dispatch_uncore = LWPMU_DEVICE_dispatch(device_uncore); ++ if (dispatch_uncore && dispatch_uncore->trigger_read) { ++ dispatch_uncore->trigger_read(); ++ } ++ ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Init_PMU(void) ++ * ++ * @param - none ++ * ++ * @return - OS_STATUS ++ * ++ * @brief Initialize the PMU and the driver state in preparation for data collection. ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Init_PMU(VOID) ++{ ++ U32 i = 0; ++ ++ if (GLOBAL_STATE_current_phase(socperf_driver_state) != ++ DRV_STATE_IDLE) { ++ return OS_IN_PROGRESS; ++ } ++ dispatch_uncore = LWPMU_DEVICE_dispatch(device_uncore); ++ if (dispatch_uncore && dispatch_uncore->write) { ++ dispatch_uncore->write((VOID *)&i); ++ } ++ SOCPERF_PRINT_DEBUG( ++ "%s: IOCTL_Init_PMU - finished initial Write\n", __func__); ++ ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Set_EM_Config_UNC(IOCTL_ARGS arg) ++ * ++ * @param arg - pointer to the IOCTL_ARGS structure ++ * ++ * @return OS_STATUS ++ * ++ * @brief Set the number of em groups in the global state node. ++ * @brief Also, copy the EVENT_CONFIG struct that has been passed in, ++ * @brief into a global location for now. ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Set_EM_Config_Uncore(IOCTL_ARGS arg) ++{ ++ EVENT_CONFIG ec; ++ SOCPERF_PRINT_DEBUG("enter %s\n", __func__); ++ if (GLOBAL_STATE_current_phase(socperf_driver_state) != ++ DRV_STATE_IDLE) { ++ return OS_IN_PROGRESS; ++ } ++ ++ if (arg->buf_usr_to_drv == NULL || arg->len_usr_to_drv == 0) { ++ return OS_INVALID; ++ } ++ // allocate memory ++ LWPMU_DEVICE_ec(device_uncore) = ++ SOCPERF_Allocate_Memory(sizeof(EVENT_CONFIG_NODE)); ++ if (!LWPMU_DEVICE_ec(device_uncore)) { ++ SOCPERF_PRINT_ERROR( ++ "Memory allocation failure for LWPMU_DEVICE_ec(device_uncore)!\n"); ++ return OS_NO_MEM; ++ } ++ if (copy_from_user(LWPMU_DEVICE_ec(device_uncore), (void __user *)arg->buf_usr_to_drv, ++ arg->len_usr_to_drv)) { ++ return OS_FAULT; ++ } ++ // configure num_groups from ec of the specific device ++ ec = (EVENT_CONFIG)LWPMU_DEVICE_ec(device_uncore); ++ LWPMU_DEVICE_PMU_register_data(device_uncore) = SOCPERF_Allocate_Memory( ++ EVENT_CONFIG_num_groups_unc(ec) * sizeof(VOID *)); ++ if (!LWPMU_DEVICE_PMU_register_data(device_uncore)) { ++ SOCPERF_PRINT_ERROR( ++ "Memory allocation failure for LWPMU_DEVICE_PMU_register_data(device_uncore)!\n"); ++ return OS_NO_MEM; ++ } ++ LWPMU_DEVICE_em_groups_count(device_uncore) = 0; ++ ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS socperf_Configure_Events_Uncore (IOCTL_ARGS arg) ++ * ++ * @param arg - pointer to the IOCTL_ARGS structure ++ * ++ * @return OS_STATUS ++ * ++ * @brief Make a copy of the uncore registers that need to be programmed ++ * @brief for the next event set used for event multiplexing ++ * ++ * Special Notes ++ */ ++static OS_STATUS socperf_Configure_Events_Uncore(IOCTL_ARGS arg) ++{ ++ VOID **PMU_register_data_unc; ++ S32 em_groups_count_unc; ++ ECB ecb; ++ EVENT_CONFIG ec_unc; ++ U32 group_id = 0; ++ ECB in_ecb = NULL; ++ ++ if (GLOBAL_STATE_current_phase(socperf_driver_state) != ++ DRV_STATE_IDLE) { ++ return OS_IN_PROGRESS; ++ } ++ ++ em_groups_count_unc = LWPMU_DEVICE_em_groups_count(device_uncore); ++ PMU_register_data_unc = LWPMU_DEVICE_PMU_register_data(device_uncore); ++ ec_unc = LWPMU_DEVICE_ec(device_uncore); ++ ++ if (ec_unc == NULL) { ++ SOCPERF_PRINT_ERROR( ++ "%s: ec_unc is NULL!\n", __func__); ++ return OS_INVALID; ++ } ++ ++ if (em_groups_count_unc >= (S32)EVENT_CONFIG_num_groups_unc(ec_unc)) { ++ SOCPERF_PRINT_ERROR( ++ "%s: Number of Uncore EM groups exceeded the initial configuration.", __func__); ++ return OS_INVALID; ++ } ++ if (arg->buf_usr_to_drv == NULL || ++ arg->len_usr_to_drv < sizeof(ECB_NODE)) { ++ SOCPERF_PRINT_ERROR( ++ "%s: args are invalid.", __func__); ++ return OS_INVALID; ++ } ++ // size is in len_usr_to_drv, data is pointed to by buf_usr_to_drv ++ // ++ in_ecb = SOCPERF_Allocate_Memory(arg->len_usr_to_drv); ++ if (!in_ecb) { ++ SOCPERF_PRINT_ERROR( ++ "%s: ECB memory allocation failed\n", __func__); ++ return OS_NO_MEM; ++ } ++ if (copy_from_user(in_ecb, (void __user *)arg->buf_usr_to_drv, arg->len_usr_to_drv)) { ++ SOCPERF_PRINT_ERROR( ++ "%s: ECB copy failed\n", __func__); ++ in_ecb = SOCPERF_Free_Memory(in_ecb); ++ return OS_NO_MEM; ++ } ++ ++ group_id = ECB_group_id(in_ecb); ++ if (group_id >= EVENT_CONFIG_num_groups_unc(ec_unc)) { ++ SOCPERF_PRINT_ERROR( ++ "%s: group_id is larger than total number of groups\n", __func__); ++ in_ecb = SOCPERF_Free_Memory(in_ecb); ++ return OS_INVALID; ++ } ++ ++ PMU_register_data_unc[group_id] = in_ecb; ++ if (!PMU_register_data_unc[group_id]) { ++ SOCPERF_PRINT_ERROR( ++ "%s: ECB memory allocation failed\n", __func__); ++ in_ecb = SOCPERF_Free_Memory(in_ecb); ++ return OS_NO_MEM; ++ } ++ ++ // ++ // Make a copy of the data for global use. ++ // ++ if (copy_from_user(PMU_register_data_unc[group_id], (void __user *)arg->buf_usr_to_drv, ++ arg->len_usr_to_drv)) { ++ SOCPERF_PRINT_ERROR( ++ "%s: ECB copy failed\n", __func__); ++ in_ecb = SOCPERF_Free_Memory(in_ecb); ++ return OS_NO_MEM; ++ } ++ ++ // at this point, we know the number of uncore events for this device, ++ // so allocate the results buffer per thread for uncore only for event based uncore counting ++ if (em_groups_count_unc == 0) { ++ ecb = PMU_register_data_unc[0]; ++ if (ecb == NULL) { ++ in_ecb = SOCPERF_Free_Memory(in_ecb); ++ return OS_INVALID; ++ } ++ LWPMU_DEVICE_num_events(device_uncore) = ECB_num_events(ecb); ++ } ++ LWPMU_DEVICE_em_groups_count(device_uncore) = group_id + 1; ++ ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS socperf_Start(void) ++ * ++ * @param none ++ * ++ * @return OS_STATUS ++ * ++ * @brief Local function that handles the LWPMU_IOCTL_START call. ++ * @brief Set up the OS hooks for process/thread/load notifications. ++ * @brief Write the initial set of MSRs. ++ * ++ * Special Notes ++ */ ++static OS_STATUS socperf_Start(VOID) ++{ ++ OS_STATUS status = OS_SUCCESS; ++ U32 previous_state; ++ U32 i = 0; ++ ++ /* ++ * To Do: Check for state == STATE_IDLE and only then enable sampling ++ */ ++ previous_state = ++ cmpxchg(&GLOBAL_STATE_current_phase(socperf_driver_state), ++ DRV_STATE_IDLE, DRV_STATE_RUNNING); ++ if (previous_state != DRV_STATE_IDLE) { ++ SOCPERF_PRINT_ERROR( ++ "%s: Unable to start sampling - State is %d\n", ++ __func__, ++ GLOBAL_STATE_current_phase(socperf_driver_state)); ++ return OS_IN_PROGRESS; ++ } ++ ++ if (dispatch_uncore && dispatch_uncore->restart) { ++ dispatch_uncore->restart((VOID *)&i); ++ } ++ ++ return status; ++} ++ ++/* ++ * @fn lwpmudrv_Prepare_Stop(); ++ * ++ * @param NONE ++ * @return OS_STATUS ++ * ++ * @brief Local function that handles the LWPMUDRV_IOCTL_STOP call. ++ * @brief Cleans up the interrupt handler. ++ */ ++static OS_STATUS socperf_Prepare_Stop(VOID) ++{ ++ U32 i = 0; ++ U32 current_state = GLOBAL_STATE_current_phase(socperf_driver_state); ++ ++ SOCPERF_PRINT_DEBUG("%s: About to stop sampling\n", __func__); ++ GLOBAL_STATE_current_phase(socperf_driver_state) = ++ DRV_STATE_PREPARE_STOP; ++ ++ if (current_state == DRV_STATE_UNINITIALIZED) { ++ return OS_SUCCESS; ++ } ++ ++ if (dispatch_uncore && dispatch_uncore->freeze) { ++ dispatch_uncore->freeze((VOID *)&i); ++ } ++ ++ return OS_SUCCESS; ++} ++ ++/* ++ * @fn socperf_Finish_Stop(); ++ * ++ * @param NONE ++ * @return OS_STATUS ++ * ++ * @brief Local function that handles the LWPMUDRV_IOCTL_STOP call. ++ * @brief Cleans up the interrupt handler. ++ */ ++static OS_STATUS socperf_Finish_Stop(VOID) ++{ ++ OS_STATUS status = OS_SUCCESS; ++ ++ GLOBAL_STATE_current_phase(socperf_driver_state) = DRV_STATE_STOPPED; ++ ++ return status; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Pause(void) ++ * ++ * @param - none ++ * ++ * @return OS_STATUS ++ * ++ * @brief Pause the collection ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Pause(VOID) ++{ ++ U32 previous_state; ++ U32 i = 0; ++ ++ previous_state = ++ cmpxchg(&GLOBAL_STATE_current_phase(socperf_driver_state), ++ DRV_STATE_RUNNING, DRV_STATE_PAUSED); ++ if (previous_state == DRV_STATE_RUNNING) { ++ dispatch_uncore = LWPMU_DEVICE_dispatch(device_uncore); ++ if (dispatch_uncore && dispatch_uncore->freeze) { ++ dispatch_uncore->freeze((VOID *)&i); ++ } ++ } else { ++ if (previous_state == DRV_STATE_PAUSED) { ++ return VT_SAMP_IN_PAUSE_STATE; ++ } ++ SOCPERF_PRINT_ERROR( ++ "There is no sampling collection running at this time\n"); ++ return VT_SAMP_IN_STOP_STATE; ++ } ++ ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static NTSTATUS lwpmudrv_Resume(void) ++ * ++ * @param - none ++ * ++ * @return OS_STATUS ++ * ++ * @brief Resume the sampling after a pause. Assumption, the pause duration ++ * @brief will be long enough for all interrupts to be processed and no ++ * @brief active sampling to occur. ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Resume(VOID) ++{ ++ U32 previous_state; ++ U32 i = 0; ++ ++ previous_state = ++ cmpxchg(&GLOBAL_STATE_current_phase(socperf_driver_state), ++ DRV_STATE_PAUSED, DRV_STATE_RUNNING); ++ ++ if (previous_state == DRV_STATE_PAUSED) { ++ dispatch_uncore = LWPMU_DEVICE_dispatch(device_uncore); ++ if (dispatch_uncore && dispatch_uncore->restart) { ++ dispatch_uncore->restart((VOID *)&i); ++ } ++ SOCPERF_PRINT_DEBUG("Resuming the sampling collection...\n"); ++ } else { ++ SOCPERF_PRINT_DEBUG( ++ "There is no paused sampling collection at this time.\n"); ++ } ++ ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Read_Uncore_Counts(void buf_usr_to_drv, U32 len_usr_to_drv) ++ * ++ * @param - buf_usr_to_drv - output buffer ++ * len_usr_to_drv - output buffer length ++ * ++ * @return - OS_STATUS ++ * ++ * @brief Read the Counter Data. ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Read_Uncore_Counts(PVOID buf_usr_to_drv, ++ U32 len_usr_to_drv) ++{ ++ if (buf_usr_to_drv == NULL) { ++ SOCPERF_PRINT_ERROR( ++ "%s: counter buffer is NULL\n", __func__); ++ return OS_FAULT; ++ } ++ ++ if (dispatch_uncore && dispatch_uncore->read_current_data) { ++ dispatch_uncore->read_current_data(buf_usr_to_drv); ++ } ++ ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS SOCPERF_Switch_Group(void) ++ * ++ * @param none ++ * ++ * @return OS_STATUS ++ * ++ * @brief Switch the current uncore group that is being collected. ++ * ++ * Special Notes ++ * This routine is called from the user mode code to handle the multiple uncore group ++ * situation. 4 distinct steps are taken: ++ * Step 1: Pause the sampling ++ * Step 2: Increment the current uncore group count ++ * Step 3: Write the new group to the uncore PMU ++ * Step 4: Resume sampling ++ */ ++extern OS_STATUS ++SOCPERF_Switch_Group3(VOID) ++{ ++ OS_STATUS status = OS_SUCCESS; ++ U32 current_state = GLOBAL_STATE_current_phase(socperf_driver_state); ++ U32 i = 0; ++ DEV_UNC_CONFIG pcfg_unc; ++ ++ SOCPERF_PRINT_DEBUG("Switching Uncore Group...\n"); ++ if (current_state != DRV_STATE_RUNNING && ++ current_state != DRV_STATE_PAUSED) { ++ return status; ++ } ++ status = lwpmudrv_Pause(); ++ LWPMU_DEVICE_cur_group(device_uncore)++; ++ LWPMU_DEVICE_cur_group(device_uncore) %= ++ LWPMU_DEVICE_em_groups_count(device_uncore); ++ dispatch_uncore = LWPMU_DEVICE_dispatch(device_uncore); ++ if (dispatch_uncore && dispatch_uncore->write) { ++ dispatch_uncore->write((VOID *)&i); ++ } ++ ++ pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(device_uncore); ++ if (pcfg_unc && (DRV_CONFIG_start_paused(socperf_drv_cfg) == FALSE)) { ++ status = lwpmudrv_Resume(); ++ } ++ ++ return status; ++} ++EXPORT_SYMBOL(SOCPERF_Switch_Group3); ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Create_Mem(IOCTL_ARGS arg) ++ * ++ * @param - none ++ * ++ * @return - OS_STATUS ++ * ++ * @brief Read the Counter Data. ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Create_Mem(IOCTL_ARGS arg) ++{ ++ U32 memory_size = 0; ++ U64 trace_phys_address = 0; ++ ++ if (arg->buf_usr_to_drv == NULL || arg->len_usr_to_drv == 0) { ++ SOCPERF_PRINT_ERROR( ++ "%s: Counter buffer is NULL\n", __func__); ++ return OS_FAULT; ++ } ++ ++ if (copy_from_user(&memory_size, (U32 __user *)arg->buf_usr_to_drv, ++ sizeof(U32))) { ++ return OS_FAULT; ++ } ++ ++ if (arg->buf_drv_to_usr == NULL || arg->len_drv_to_usr == 0) { ++ SOCPERF_PRINT_ERROR( ++ "%s: output buffer is NULL\n", __func__); ++ return OS_FAULT; ++ } ++ SOCPERF_PRINT_DEBUG("Read size=%llx\n", arg->len_drv_to_usr); ++ SOCPERF_PRINT_DEBUG("Write size=%llx\n", arg->len_usr_to_drv); ++ if (arg->len_drv_to_usr != sizeof(U64)) { ++ return OS_FAULT; ++ } ++ ++ dispatch_uncore = LWPMU_DEVICE_dispatch(device_uncore); ++ if (dispatch_uncore && dispatch_uncore->create_mem) { ++ dispatch_uncore->create_mem(memory_size, &trace_phys_address); ++ } else { ++ SOCPERF_PRINT_ERROR("dispatch table could not be called\n"); ++ } ++ ++ if (copy_to_user((void __user *)arg->buf_drv_to_usr, &trace_phys_address, ++ sizeof(U64))) { ++ return OS_FAULT; ++ } ++ ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Check_Status( IOCTL_ARGS arg) ++ * ++ * @param - none ++ * ++ * @return - OS_STATUS ++ * ++ * @brief Read the Counter Data. ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Check_Status(IOCTL_ARGS arg) ++{ ++ U32 num_entries = 0; ++ U64 *status_data = 0; ++ ++ if ((arg->len_drv_to_usr == 0) || (arg->buf_drv_to_usr == NULL)) { ++ return OS_FAULT; ++ } ++ ++ status_data = SOCPERF_Allocate_Memory(arg->len_drv_to_usr); ++ if (dispatch_uncore && dispatch_uncore->check_status) { ++ dispatch_uncore->check_status(status_data, &num_entries); ++ } ++ ++ if (copy_to_user((void __user *)arg->buf_drv_to_usr, status_data, ++ num_entries * sizeof(U64))) { ++ SOCPERF_Free_Memory(status_data); ++ return OS_FAULT; ++ } ++ SOCPERF_Free_Memory(status_data); ++ ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static OS_STATUS lwpmudrv_Read_Mem( IOCTL_ARGS arg) ++ * ++ * @param - none ++ * ++ * @return - OS_STATUS ++ * ++ * @brief Read the Counter Data. ++ * ++ * Special Notes ++ */ ++static OS_STATUS lwpmudrv_Read_Mem(IOCTL_ARGS arg) ++{ ++ U64 start_address = 0; ++ U64 *mem_address = NULL; ++ U32 mem_size = 0; ++ U32 num_entries = 0; ++ ++ if (arg->buf_usr_to_drv == NULL || arg->len_usr_to_drv == 0) { ++ SOCPERF_PRINT_ERROR( ++ "%s: Counter buffer is NULL\n", __func__); ++ return OS_FAULT; ++ } ++ ++ if (copy_from_user(&start_address, (U64 __user *)arg->buf_usr_to_drv, ++ sizeof(U64))) { ++ return OS_FAULT; ++ } ++ ++ if ((arg->len_drv_to_usr == 0) || (arg->buf_drv_to_usr == NULL)) { ++ return OS_FAULT; ++ } ++ mem_size = (U32)arg->len_drv_to_usr; ++ mem_address = SOCPERF_Allocate_Memory(mem_size); ++ if (!mem_address) { ++ return OS_NO_MEM; ++ } ++ ++ num_entries = (U32)(mem_size / sizeof(U64)); ++ if (dispatch_uncore && dispatch_uncore->read_mem) { ++ dispatch_uncore->read_mem(start_address, mem_address, ++ num_entries); ++ } ++ if (copy_to_user((void __user *)arg->buf_drv_to_usr, mem_address, mem_size)) { ++ SOCPERF_Free_Memory(mem_address); ++ return OS_FAULT; ++ } ++ SOCPERF_Free_Memory(mem_address); ++ ++ return OS_SUCCESS; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static VOID lwpmudrv_Stop_Mem(void) ++ * ++ * @param - none ++ * ++ * @return - none ++ * ++ * @brief Stop Mem ++ * ++ * Special Notes ++ */ ++VOID lwpmudrv_Stop_Mem(VOID) ++{ ++ SOCPERF_PRINT_DEBUG("Entered %s\n", __func__); ++ ++ if (dispatch_uncore && dispatch_uncore->stop_mem) { ++ dispatch_uncore->stop_mem(); ++ } ++ ++ SOCPERF_PRINT_DEBUG("Exited %s\n", __func__); ++ ++} ++ ++/******************************************************************************* ++ * External Driver functions - Open ++ * This function is common to all drivers ++ *******************************************************************************/ ++ ++static int socperf_Open(struct inode *inode, struct file *filp) ++{ ++ SOCPERF_PRINT_DEBUG("lwpmu_Open called on maj:%d, min:%d\n", ++ imajor(inode), iminor(inode)); ++ filp->private_data = container_of(inode->i_cdev, LWPMU_DEV_NODE, cdev); ++ ++ return 0; ++} ++ ++/******************************************************************************* ++ * External Driver functions ++ * These functions are registered into the file operations table that ++ * controls this device. ++ * Open, Close, Read, Write, Release ++ *******************************************************************************/ ++ ++static ssize_t socperf_Read(struct file *filp, char __user *buf, size_t count, ++ loff_t *f_pos) ++{ ++ unsigned long retval; ++ ++ /* Transferring data to user space */ ++ SOCPERF_PRINT_DEBUG("lwpmu_Read dispatched with count=%d\n", ++ (S32)count); ++ if (copy_to_user((void __user *)buf, &LWPMU_DEV_buffer(socperf_control), 1)) { ++ retval = OS_FAULT; ++ return retval; ++ } ++ /* Changing reading position as best suits */ ++ if (*f_pos == 0) { ++ *f_pos += 1; ++ return 1; ++ } ++ ++ return 0; ++} ++ ++static ssize_t socperf_Write(struct file *filp, const char __user *buf, size_t count, ++ loff_t *f_pos) ++{ ++ unsigned long retval; ++ ++ SOCPERF_PRINT_DEBUG("lwpmu_Write dispatched with count=%d\n", ++ (S32)count); ++ if (copy_from_user(&LWPMU_DEV_buffer(socperf_control), (void __user *)(buf + count - 1), ++ 1)) { ++ retval = OS_FAULT; ++ return retval; ++ } ++ ++ return 1; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn extern IOCTL_OP_TYPE socperf_Service_IOCTL(IOCTL_USE_NODE, filp, cmd, arg) ++ * ++ * @param IOCTL_USE_INODE - Used for pre 2.6.32 kernels ++ * @param struct file *filp - file pointer ++ * @param unsigned int cmd - IOCTL command ++ * @param unsigned long arg - args to the IOCTL command ++ * ++ * @return OS_STATUS ++ * ++ * @brief Worker function that handles IOCTL requests from the user mode. ++ * ++ * Special Notes ++ */ ++IOCTL_OP_TYPE socperf_Service_IOCTL(IOCTL_USE_INODE struct file *filp, ++ unsigned int cmd, ++ IOCTL_ARGS_NODE local_args) ++{ ++ int status = OS_SUCCESS; ++ ++ switch (cmd) { ++ /* ++ * Common IOCTL commands ++ */ ++ case DRV_OPERATION_VERSION: ++ SOCPERF_PRINT_DEBUG(" DRV_OPERATION_VERSION\n"); ++ status = lwpmudrv_Version(&local_args); ++ break; ++ ++ case DRV_OPERATION_RESERVE: ++ SOCPERF_PRINT_DEBUG(" DRV_OPERATION_RESERVE\n"); ++ break; ++ ++ case DRV_OPERATION_INIT_PMU: ++ SOCPERF_PRINT_DEBUG(" DRV_OPERATION_INIT_PMU\n"); ++ status = lwpmudrv_Init_PMU(); ++ break; ++ ++ case DRV_OPERATION_START: ++ SOCPERF_PRINT_DEBUG(" DRV_OPERATION_START\n"); ++ status = socperf_Start(); ++ break; ++ ++ case DRV_OPERATION_STOP: ++ SOCPERF_PRINT_DEBUG(" DRV_OPERATION_STOP\n"); ++ status = socperf_Prepare_Stop(); ++ break; ++ ++ case DRV_OPERATION_PAUSE: ++ SOCPERF_PRINT_DEBUG(" DRV_OPERATION_PAUSE\n"); ++ status = lwpmudrv_Pause(); ++ break; ++ ++ case DRV_OPERATION_RESUME: ++ SOCPERF_PRINT_DEBUG(" DRV_OPERATION_RESUME\n"); ++ status = lwpmudrv_Resume(); ++ break; ++ ++ case DRV_OPERATION_TERMINATE: ++ SOCPERF_PRINT_DEBUG(" DRV_OPERATION_TERMINATE\n"); ++ status = socperf_Terminate(); ++ break; ++ ++ case DRV_OPERATION_INIT_DRIVER: ++ SOCPERF_PRINT_DEBUG(" DRV_OPERATION_INIT_DRIVER\n"); ++ status = lwpmudrv_Initialize_Driver(local_args.buf_usr_to_drv, ++ local_args.len_usr_to_drv); ++ break; ++ ++ case DRV_OPERATION_INIT_UNCORE: ++ SOCPERF_PRINT_DEBUG(" DRV_OPERATION_INIT_UNCORE\n"); ++ status = lwpmudrv_Initialize_Uncore(local_args.buf_usr_to_drv, ++ local_args.len_usr_to_drv); ++ break; ++ case DRV_OPERATION_EM_GROUPS_UNCORE: ++ SOCPERF_PRINT_DEBUG(" DRV_OPERATION_EM_GROUPS_UNC\n"); ++ status = lwpmudrv_Set_EM_Config_Uncore(&local_args); ++ break; ++ ++ case DRV_OPERATION_EM_CONFIG_NEXT_UNCORE: ++ SOCPERF_PRINT_DEBUG(" DRV_OPERATION_EM_CONFIG_NEXT_UNC\n"); ++ status = socperf_Configure_Events_Uncore(&local_args); ++ break; ++ ++ case DRV_OPERATION_TIMER_TRIGGER_READ: ++ lwpmudrv_Trigger_Read(); ++ break; ++ ++ case DRV_OPERATION_READ_UNCORE_DATA: ++ SOCPERF_PRINT_DEBUG(" DRV_OPERATION_READ_UNCORE_DATA\n"); ++ status = lwpmudrv_Read_Uncore_Counts(local_args.buf_drv_to_usr, ++ local_args.len_drv_to_usr); ++ break; ++ ++ case DRV_OPERATION_CREATE_MEM: ++ SOCPERF_PRINT_DEBUG(" DRV_OPERATION_CREATE_MEM\n"); ++ lwpmudrv_Create_Mem(&local_args); ++ break; ++ ++ case DRV_OPERATION_READ_MEM: ++ SOCPERF_PRINT_DEBUG(" DRV_OPERATION_READ_MEM\n"); ++ lwpmudrv_Read_Mem(&local_args); ++ break; ++ ++ case DRV_OPERATION_CHECK_STATUS: ++ SOCPERF_PRINT_DEBUG(" DRV_OPERATION_CHECK_STATUS\n"); ++ lwpmudrv_Check_Status(&local_args); ++ break; ++ ++ case DRV_OPERATION_STOP_MEM: ++ SOCPERF_PRINT_DEBUG(" DRV_OPERATION_STOP_MEM\n"); ++ lwpmudrv_Stop_Mem(); ++ break; ++ ++ /* ++ * if none of the above, treat as unknown/illegal IOCTL command ++ */ ++ default: ++ SOCPERF_PRINT_ERROR("Unknown IOCTL magic:%d number:%d\n", ++ _IOC_TYPE(cmd), _IOC_NR(cmd)); ++ status = OS_ILLEGAL_IOCTL; ++ break; ++ } ++ ++ if (cmd == DRV_OPERATION_STOP && ++ GLOBAL_STATE_current_phase(socperf_driver_state) == ++ DRV_STATE_PREPARE_STOP) { ++ status = socperf_Finish_Stop(); ++ } ++ ++ return status; ++} ++ ++long socperf_Device_Control(IOCTL_USE_INODE struct file *filp, ++ unsigned int cmd, unsigned long arg) ++{ ++ int status = OS_SUCCESS; ++ IOCTL_ARGS_NODE local_args; ++ ++#if !defined(DRV_USE_UNLOCKED_IOCTL) ++ SOCPERF_PRINT_DEBUG( ++ "lwpmu_DeviceControl(0x%x) called on inode maj:%d, min:%d\n", ++ cmd, imajor(inode), iminor(inode)); ++#endif ++ SOCPERF_PRINT_DEBUG("type: %d, subcommand: %d\n", _IOC_TYPE(cmd), ++ _IOC_NR(cmd)); ++ ++ if (_IOC_TYPE(cmd) != LWPMU_IOC_MAGIC) { ++ SOCPERF_PRINT_ERROR("Unknown IOCTL magic:%d\n", _IOC_TYPE(cmd)); ++ return OS_ILLEGAL_IOCTL; ++ } ++ ++ MUTEX_LOCK(ioctl_lock); ++ if (arg) { ++ status = copy_from_user(&local_args, (void __user *)arg, ++ sizeof(IOCTL_ARGS_NODE)); ++ } ++ ++ status = socperf_Service_IOCTL(IOCTL_USE_INODE filp, _IOC_NR(cmd), ++ local_args); ++ MUTEX_UNLOCK(ioctl_lock); ++ ++ return status; ++} ++ ++#if defined(CONFIG_COMPAT) && defined(DRV_EM64T) ++long socperf_Device_Control_Compat(struct file *filp, unsigned int cmd, ++ unsigned long arg) ++{ ++ int status = OS_SUCCESS; ++ IOCTL_COMPAT_ARGS_NODE local_args_compat; ++ IOCTL_ARGS_NODE local_args; ++ ++ memset(&local_args_compat, 0, sizeof(IOCTL_COMPAT_ARGS_NODE)); ++ SOCPERF_PRINT_DEBUG("Compat: type: %d, subcommand: %d\n", ++ _IOC_TYPE(cmd), _IOC_NR(cmd)); ++ ++ if (_IOC_TYPE(cmd) != LWPMU_IOC_MAGIC) { ++ SOCPERF_PRINT_ERROR("Unknown IOCTL magic:%d\n", _IOC_TYPE(cmd)); ++ return OS_ILLEGAL_IOCTL; ++ } ++ ++ MUTEX_LOCK(ioctl_lock); ++ if (arg) { ++ status = copy_from_user(&local_args_compat, ++ (void __user *)arg, ++ sizeof(IOCTL_COMPAT_ARGS_NODE)); ++ } ++ local_args.len_drv_to_usr = local_args_compat.len_drv_to_usr; ++ local_args.len_usr_to_drv = local_args_compat.len_usr_to_drv; ++ local_args.buf_drv_to_usr = ++ (char *)compat_ptr(local_args_compat.buf_drv_to_usr); ++ local_args.buf_usr_to_drv = ++ (char *)compat_ptr(local_args_compat.buf_usr_to_drv); ++ ++ status = socperf_Service_IOCTL(filp, _IOC_NR(cmd), local_args); ++ MUTEX_UNLOCK(ioctl_lock); ++ ++ return status; ++} ++#endif ++ ++/* ++ * @fn SOCPERF_Abnormal_Terminate(void) ++ * ++ * @brief This routine is called from linuxos_Exit_Task_Notify if the user process has ++ * been killed by an uncatchable signal (example kill -9). The state variable ++ * abormal_terminate is set to 1 and the clean up routines are called. In this ++ * code path the OS notifier hooks should not be unloaded. ++ * ++ * @param None ++ * ++ * @return OS_STATUS ++ * ++ * Special Notes: ++ * ++ */ ++int SOCPERF_Abnormal_Terminate(void) ++{ ++ int status = OS_SUCCESS; ++ ++ socperf_abnormal_terminate = 1; ++ SOCPERF_PRINT_DEBUG( ++ "Abnormal-Termination: Calling socperf_Prepare_Stop\n"); ++ status = socperf_Prepare_Stop(); ++ SOCPERF_PRINT_DEBUG( ++ "Abnormal-Termination: Calling socperf_Finish_Stop\n"); ++ status = socperf_Finish_Stop(); ++ SOCPERF_PRINT_DEBUG( ++ "Abnormal-Termination: Calling lwpmudrv_Terminate\n"); ++ status = socperf_Terminate(); ++ ++ return status; ++} ++ ++/***************************************************************************************** ++ * ++ * Driver Entry / Exit functions that will be called on when the driver is loaded and ++ * unloaded ++ * ++ ****************************************************************************************/ ++ ++/* ++ * Structure that declares the usual file access functions ++ * First one is for lwpmu_c, the control functions ++ */ ++static struct file_operations socperf_Fops = { ++ .owner = THIS_MODULE, ++ IOCTL_OP = socperf_Device_Control, ++#if defined(CONFIG_COMPAT) && defined(DRV_EM64T) ++ .compat_ioctl = socperf_Device_Control_Compat, ++#endif ++ .read = socperf_Read, ++ .write = socperf_Write, ++ .open = socperf_Open, ++ .release = NULL, ++ .llseek = NULL, ++}; ++ ++/*! ++ * @fn static int lwpmudrv_setup_cdev(dev, fops, dev_number) ++ * ++ * @param LWPMU_DEV dev - pointer to the device object ++ * @param struct file_operations *fops - pointer to the file operations struct ++ * @param dev_t dev_number - major/monor device number ++ * ++ * @return OS_STATUS ++ * ++ * @brief Set up the device object. ++ * ++ * Special Notes ++ */ ++static int lwpmu_setup_cdev(LWPMU_DEV dev, struct file_operations *fops, ++ dev_t dev_number) ++{ ++ cdev_init(&LWPMU_DEV_cdev(dev), fops); ++ LWPMU_DEV_cdev(dev).owner = THIS_MODULE; ++ LWPMU_DEV_cdev(dev).ops = fops; ++ ++ return cdev_add(&LWPMU_DEV_cdev(dev), dev_number, 1); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static int socperf_Load(void) ++ * ++ * @param none ++ * ++ * @return STATUS ++ * ++ * @brief Load the driver module into the kernel. Set up the driver object. ++ * @brief Set up the initial state of the driver and allocate the memory ++ * @brief needed to keep basic state information. ++ */ ++static int socperf_Load(VOID) ++{ ++ int num_cpus; ++ OS_STATUS status = OS_SUCCESS; ++ ++ SOCPERF_Memory_Tracker_Init(); ++ ++ /* Get one major device number and one minor number. */ ++ /* The result is formatted as major+minor(0) */ ++ /* One minor number is for control (lwpmu_c), */ ++ SOCPERF_PRINT("SocPerf Driver loading...\n"); ++ SOCPERF_PRINT("SocPerf Driver about to register chrdev...\n"); ++ ++ lwpmu_DevNum = MKDEV(0, 0); ++ status = alloc_chrdev_region(&lwpmu_DevNum, 0, PMU_DEVICES, ++ SOCPERF_DRIVER_NAME); ++ SOCPERF_PRINT("SocPerf Driver: result of alloc_chrdev_region is %d\n", ++ status); ++ if (status < 0) { ++ SOCPERF_PRINT_ERROR( ++ "SocPerf driver failed to alloc chrdev_region!\n"); ++ return status; ++ } ++ SOCPERF_PRINT("SocPerf Driver: major number is %d\n", ++ MAJOR(lwpmu_DevNum)); ++ status = lwpmudrv_Initialize_State(); ++ if (status < 0) { ++ SOCPERF_PRINT_ERROR( ++ "SocPerf driver failed to initialize state!\n"); ++ return status; ++ } ++ num_cpus = GLOBAL_STATE_num_cpus(socperf_driver_state); ++ SOCPERF_PRINT("SocPerf Driver: detected %d CPUs in lwpmudrv_Load\n", ++ num_cpus); ++ ++ /* Allocate memory for the control structures */ ++ socperf_control = SOCPERF_Allocate_Memory(sizeof(LWPMU_DEV_NODE)); ++ ++ if (!socperf_control) { ++ SOCPERF_Free_Memory(socperf_control); ++ return OS_NO_MEM; ++ } ++ ++ /* Register the file operations with the OS */ ++ ++ SOCPERF_PRINT("SocPerf Driver: creating device %s...\n", ++ SOCPERF_DRIVER_NAME DRV_DEVICE_DELIMITER "c"); ++ pmu_class = class_create(THIS_MODULE, SOCPERF_DRIVER_NAME); ++ if (IS_ERR(pmu_class)) { ++ SOCPERF_PRINT_ERROR( ++ "Error registering SocPerf control class\n"); ++ } ++ device_create(pmu_class, NULL, lwpmu_DevNum, NULL, ++ SOCPERF_DRIVER_NAME DRV_DEVICE_DELIMITER "c"); ++ ++ status = lwpmu_setup_cdev(socperf_control, &socperf_Fops, lwpmu_DevNum); ++ if (status) { ++ SOCPERF_PRINT_ERROR("Error %d adding lwpmu as char device\n", ++ status); ++ return status; ++ } ++ ++ MUTEX_INIT(ioctl_lock); ++ ++ /* ++ * Initialize the SocPerf driver version (done once at driver load time) ++ */ ++ SOCPERF_VERSION_NODE_major(&socperf_drv_version) = ++ SOCPERF_MAJOR_VERSION; ++ SOCPERF_VERSION_NODE_minor(&socperf_drv_version) = ++ SOCPERF_MINOR_VERSION; ++ SOCPERF_VERSION_NODE_api(&socperf_drv_version) = SOCPERF_API_VERSION; ++ // ++ // Display driver version information ++ // ++ SOCPERF_PRINT("SocPerf Driver v%d.%d.%d has been loaded.\n", ++ SOCPERF_VERSION_NODE_major(&socperf_drv_version), ++ SOCPERF_VERSION_NODE_minor(&socperf_drv_version), ++ SOCPERF_VERSION_NODE_api(&socperf_drv_version)); ++ ++ return status; ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn static int lwpmu_Unload(void) ++ * ++ * @param none ++ * ++ * @return none ++ * ++ * @brief Remove the driver module from the kernel. ++ */ ++static VOID socperf_Unload(VOID) ++{ ++ SOCPERF_PRINT("SocPerf Driver unloading...\n"); ++ ++ socperf_pcb = SOCPERF_Free_Memory(socperf_pcb); ++ socperf_pcb_size = 0; ++ ++ unregister_chrdev(MAJOR(lwpmu_DevNum), SOCPERF_DRIVER_NAME); ++ device_destroy(pmu_class, lwpmu_DevNum); ++ device_destroy(pmu_class, lwpmu_DevNum + 1); ++ ++ cdev_del(&LWPMU_DEV_cdev(socperf_control)); ++ unregister_chrdev_region(lwpmu_DevNum, PMU_DEVICES); ++ ++ class_destroy(pmu_class); ++ ++ socperf_control = SOCPERF_Free_Memory(socperf_control); ++ ++ SOCPERF_Memory_Tracker_Free(); ++ ++ // ++ // Display driver version information ++ // ++ SOCPERF_PRINT("SocPerf Driver v%d.%d.%d has been unloaded.\n", ++ SOCPERF_VERSION_NODE_major(&socperf_drv_version), ++ SOCPERF_VERSION_NODE_minor(&socperf_drv_version), ++ SOCPERF_VERSION_NODE_api(&socperf_drv_version)); ++ ++} ++ ++/* Declaration of the init and exit functions */ ++module_init(socperf_Load); ++module_exit(socperf_Unload); +diff --git a/drivers/platform/x86/socperf/utility.c b/drivers/platform/x86/socperf/utility.c +new file mode 100644 +index 000000000000..4d5c783b5a7d +--- /dev/null ++++ b/drivers/platform/x86/socperf/utility.c +@@ -0,0 +1,170 @@ ++/************************************************************************ ++/* *********************************************************************************************** ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(C) 2005-2019 Intel Corporation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * BSD LICENSE ++ * ++ * Copyright(C) 2005-2019 Intel Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * *********************************************************************************************** ++ */ ++ ++ ++#include "lwpmudrv_defines.h" ++#include ++#include ++#include ++#include ++ ++#include "lwpmudrv_types.h" ++#include "rise_errors.h" ++#include "lwpmudrv_ecb.h" ++#include "socperfdrv.h" ++#include "utility.h" ++#if defined(DRV_SOFIA) ++#include "noc_uncore.h" ++#elif defined(DRV_BUTTER) ++#include "axi_uncore.h" ++#else ++#include "soc_uncore.h" ++#include "haswellunc_sa.h" ++#include "npk_uncore.h" ++#endif ++ ++volatile int config_done; ++ ++VOID SOCPERF_UTILITY_Read_TSC(U64 *pTsc) ++{ ++ *pTsc = rdtsc_ordered(); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID SOCPERF_UTILITY_Read_Cpuid ++ * ++ * @brief executes the cpuid_function of cpuid and returns values ++ * ++ * @param IN cpuid_function ++ * OUT rax - results of the cpuid instruction in the ++ * OUT rbx - corresponding registers ++ * OUT rcx ++ * OUT rdx ++ * ++ * @return none ++ * ++ * Special Notes: ++ * ++ * ++ */ ++VOID SOCPERF_UTILITY_Read_Cpuid(U64 cpuid_function, U64 *rax_value, ++ U64 *rbx_value, U64 *rcx_value, ++ U64 *rdx_value) ++{ ++ U32 function = (U32)cpuid_function; ++ U32 *eax = (U32 *)rax_value; ++ U32 *ebx = (U32 *)rbx_value; ++ U32 *ecx = (U32 *)rcx_value; ++ U32 *edx = (U32 *)rdx_value; ++ ++ *eax = function; ++ ++ __asm__("cpuid" ++ : "=a"(*eax), "=b"(*ebx), "=c"(*ecx), "=d"(*edx) ++ : "a"(function), "b"(*ebx), "c"(*ecx), "d"(*edx)); ++} ++ ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn VOID SOCPERF_UTILITY_Configure_CPU ++ * ++ * @brief Reads the CPU information from the hardware ++ * ++ * @param param dispatch_id - The id of the dispatch table. ++ * ++ * @return Pointer to the correct dispatch table for the CPU architecture ++ * ++ * Special Notes: ++ * ++ */ ++DISPATCH SOCPERF_UTILITY_Configure_CPU(U32 dispatch_id) ++{ ++ DISPATCH dispatch = NULL; ++ ++ switch (dispatch_id) { ++#if defined(DRV_SOFIA) ++ case 1000: ++ SOCPERF_PRINT_DEBUG( ++ "Set up the SoC Uncore NOC dispatch table\n"); ++ dispatch = &noc_dispatch; ++ break; ++#elif defined(DRV_BUTTER) ++ case 1100: ++ SOCPERF_PRINT_DEBUG( ++ "Set up the SoC Uncore AXI dispatch table\n"); ++ dispatch = &axi_dispatch; ++ break; ++#else ++ case 230: ++ SOCPERF_PRINT_DEBUG("Set up the Haswell SA dispatch table\n"); ++ dispatch = &socperf_hswunc_sa_dispatch; ++ break; ++ case 700: ++ SOCPERF_PRINT_DEBUG("Set up the SOC Uncore dispatch table\n"); ++ dispatch = &soc_uncore_dispatch; ++ break; ++ case 701: ++ SOCPERF_PRINT_DEBUG( ++ "Set up the SoC Uncore NPK dispatch table\n"); ++ dispatch = &npk_dispatch; ++ break; ++#endif ++ default: ++ dispatch = NULL; ++ SOCPERF_PRINT_ERROR( ++ "Architecture not supported (dispatch_id=%d)\n", ++ dispatch_id); ++ break; ++ } ++ ++ return dispatch; ++} +-- +2.17.1 + diff --git a/patches/0017-tools-rpmb-add-support-for-nvme-device.security b/patches/0017-tools-rpmb-add-support-for-nvme-device.security new file mode 100644 index 0000000000..2868b2c84a --- /dev/null +++ b/patches/0017-tools-rpmb-add-support-for-nvme-device.security @@ -0,0 +1,1156 @@ +From 7b29dc93aa671a20faa77004ace9b208ce4c3d37 Mon Sep 17 00:00:00 2001 +From: Tomas Winkler +Date: Mon, 30 Apr 2018 12:57:51 +0300 +Subject: [PATCH 17/65] tools/rpmb: add support for nvme device + +V9: 1. New in the series. + +Change-Id: Iab3171a22d3f502b11beed1496959bfd8f47d568 +Signed-off-by: Tomas Winkler +--- + tools/rpmb/rpmb.c | 825 +++++++++++++++++++++++++++++++++++++--------- + 1 file changed, 664 insertions(+), 161 deletions(-) + +diff --git a/tools/rpmb/rpmb.c b/tools/rpmb/rpmb.c +index 6b0c2b74e8ce..43315fb8b11a 100644 +--- a/tools/rpmb/rpmb.c ++++ b/tools/rpmb/rpmb.c +@@ -25,13 +25,22 @@ + #include + #include + +-#include "linux/rpmb.h" ++#include ++ ++#ifndef __unused ++#define __unused __attribute__((__unused__)) ++#endif + + #define RPMB_KEY_SIZE 32 + #define RPMB_MAC_SIZE 32 + #define RPMB_NONCE_SIZE 16 + + static bool verbose; ++#define RPMB_FRAME_TYPE_JDEC 0 ++#define RPMB_FRAME_TYPE_NVME 1 ++#define RPMB_BLOCK_SIZE 256 ++#define RPMB_SECTOR_SIZE 512 ++ + #define rpmb_dbg(fmt, ARGS...) do { \ + if (verbose) \ + fprintf(stderr, "rpmb: " fmt, ##ARGS); \ +@@ -239,9 +248,11 @@ static ssize_t write_file(int fd, unsigned char *data, size_t size) + return ret; + } + +-static void dbg_dump_frame(const char *title, const struct rpmb_frame_jdec *f) ++static void dbg_dump_frame_jdec(const char *title, const void *f, ++ uint32_t __unused cnt) + { + uint16_t result, req_resp; ++ const struct rpmb_frame_jdec *frame = f; + + if (!verbose) + return; +@@ -249,37 +260,407 @@ static void dbg_dump_frame(const char *title, const struct rpmb_frame_jdec *f) + if (!f) + return; + +- result = be16toh(f->result); +- req_resp = be16toh(f->req_resp); ++ result = be16toh(frame->result); ++ req_resp = be16toh(frame->req_resp); + if (req_resp & 0xf00) + req_resp = RPMB_RESP2REQ(req_resp); + + fprintf(stderr, "--------------- %s ---------------\n", + title ? title : "start"); + fprintf(stderr, "ptr: %p\n", f); +- dump_hex_buffer("key_mac: ", f->key_mac, 32); +- dump_hex_buffer("data: ", f->data, 256); +- dump_hex_buffer("nonce: ", f->nonce, 16); +- fprintf(stderr, "write_counter: %u\n", be32toh(f->write_counter)); +- fprintf(stderr, "address: %0X\n", be16toh(f->addr)); +- fprintf(stderr, "block_count: %u\n", be16toh(f->block_count)); ++ dump_hex_buffer("key_mac: ", frame->key_mac, 32); ++ dump_hex_buffer("data: ", frame->data, 256); ++ dump_hex_buffer("nonce: ", frame->nonce, 16); ++ fprintf(stderr, "write_counter: %u\n", be32toh(frame->write_counter)); ++ fprintf(stderr, "address: %0X\n", be16toh(frame->addr)); ++ fprintf(stderr, "block_count: %u\n", be16toh(frame->block_count)); + fprintf(stderr, "result %s:%d\n", rpmb_result_str(result), result); + fprintf(stderr, "req_resp %s\n", rpmb_op_str(req_resp)); + fprintf(stderr, "--------------- End ---------------\n"); + } + +-static struct rpmb_frame_jdec *rpmb_alloc_frames(unsigned int cnt) ++static void dbg_dump_frame_nvme(const char *title, const void *f, uint32_t cnt) ++{ ++ uint16_t result, req_resp; ++ uint32_t keysize = 4; ++ uint32_t sector_count; ++ const struct rpmb_frame_nvme *frame = f; ++ ++ if (!verbose) ++ return; ++ ++ if (!f) ++ return; ++ ++ result = le16toh(frame->result); ++ req_resp = le16toh(frame->req_resp); ++ if (req_resp & 0xf00) ++ req_resp = RPMB_RESP2REQ(req_resp); ++ ++ sector_count = le32toh(frame->block_count); ++ ++ fprintf(stderr, "--------------- %s ---------------\n", ++ title ? title : "start"); ++ fprintf(stderr, "ptr: %p\n", f); ++ dump_hex_buffer("key_mac: ", &frame->key_mac[223 - keysize], keysize); ++ dump_hex_buffer("nonce: ", frame->nonce, 16); ++ fprintf(stderr, "rpmb_target: %u\n", frame->rpmb_target); ++ fprintf(stderr, "write_counter: %u\n", le32toh(frame->write_counter)); ++ fprintf(stderr, "address: %0X\n", le32toh(frame->addr)); ++ fprintf(stderr, "block_count: %u\n", sector_count); ++ fprintf(stderr, "result %s:%d\n", rpmb_result_str(result), result); ++ fprintf(stderr, "req_resp %s\n", rpmb_op_str(req_resp)); ++ dump_hex_buffer("data: ", frame->data, RPMB_SECTOR_SIZE * cnt); ++ fprintf(stderr, "--------------- End --------------\n"); ++} ++ ++static void dbg_dump_frame(uint8_t frame_type, const char *title, ++ const void *f, uint32_t cnt) ++{ ++ if (frame_type == RPMB_FRAME_TYPE_NVME) ++ dbg_dump_frame_nvme(title, f, cnt); ++ else ++ dbg_dump_frame_jdec(title, f, cnt); ++} ++ ++static int rpmb_frame_set_key_mac_jdec(void *f, uint32_t block_count, ++ uint8_t *key_mac, size_t key_mac_size) ++{ ++ struct rpmb_frame_jdec *frames = f; ++ ++ if (block_count == 0) ++ block_count = 1; ++ ++ memcpy(&frames[block_count - 1].key_mac, key_mac, key_mac_size); ++ ++ return 0; ++} ++ ++static int rpmb_frame_set_key_mac_nvme(void *f, ++ uint32_t __unused block_count, ++ uint8_t *key_mac, size_t key_mac_size) ++{ ++ struct rpmb_frame_nvme *frame = f; ++ ++ memcpy(&frame->key_mac[223 - key_mac_size], key_mac, key_mac_size); ++ ++ return 0; ++} ++ ++static int rpmb_frame_set_key_mac(uint8_t frame_type, void *f, ++ uint32_t block_count, ++ uint8_t *key_mac, size_t key_mac_size) ++{ ++ if (frame_type == RPMB_FRAME_TYPE_NVME) ++ return rpmb_frame_set_key_mac_nvme(f, block_count, ++ key_mac, key_mac_size); ++ else ++ return rpmb_frame_set_key_mac_jdec(f, block_count, ++ key_mac, key_mac_size); ++} ++ ++static uint8_t *rpmb_frame_get_key_mac_ptr_jdec(void *f, uint32_t block_count, ++ size_t __unused key_size) ++{ ++ struct rpmb_frame_jdec *frame = f; ++ ++ if (block_count == 0) ++ block_count = 1; ++ ++ return frame[block_count - 1].key_mac; ++} ++ ++static uint8_t *rpmb_frame_get_key_mac_ptr_nvme(void *f, ++ uint32_t __unused block_count, ++ size_t key_size) ++{ ++ struct rpmb_frame_nvme *frame = f; ++ ++ return &frame->key_mac[223 - key_size]; ++} ++ ++static uint8_t *rpmb_frame_get_key_mac_ptr(uint8_t frame_type, void *f, ++ uint32_t block_count, ++ size_t key_size) ++{ ++ if (frame_type == RPMB_FRAME_TYPE_NVME) ++ return rpmb_frame_get_key_mac_ptr_nvme(f, block_count, ++ key_size); ++ else ++ return rpmb_frame_get_key_mac_ptr_jdec(f, block_count, ++ key_size); ++} ++ ++static uint8_t *rpmb_frame_get_nonce_ptr_jdec(void *f) ++{ ++ struct rpmb_frame_jdec *frame = f; ++ ++ return frame->nonce; ++} ++ ++static uint8_t *rpmb_frame_get_nonce_ptr_nvme(void *f) ++{ ++ struct rpmb_frame_nvme *frame = f; ++ ++ return frame->nonce; ++} ++ ++static uint8_t *rpmb_frame_get_nonce_ptr(uint8_t frame_type, void *f) ++{ ++ return frame_type == RPMB_FRAME_TYPE_NVME ? ++ rpmb_frame_get_nonce_ptr_nvme(f) : ++ rpmb_frame_get_nonce_ptr_jdec(f); ++} ++ ++static uint32_t rpmb_frame_get_write_counter_jdec(void *f) ++{ ++ struct rpmb_frame_jdec *frame = f; ++ ++ return be32toh(frame->write_counter); ++} ++ ++static uint32_t rpmb_frame_get_write_counter_nvme(void *f) ++{ ++ struct rpmb_frame_nvme *frame = f; ++ ++ return le32toh(frame->write_counter); ++} ++ ++static uint32_t rpmb_frame_get_write_counter(uint8_t frame_type, void *f) ++{ ++ return (frame_type == RPMB_FRAME_TYPE_NVME) ? ++ rpmb_frame_get_write_counter_nvme(f) : ++ rpmb_frame_get_write_counter_jdec(f); ++} ++ ++static uint32_t rpmb_frame_get_addr_jdec(void *f) ++{ ++ struct rpmb_frame_jdec *frame = f; ++ ++ return be16toh(frame->addr); ++} ++ ++static uint32_t rpmb_frame_get_addr_nvme(void *f) ++{ ++ struct rpmb_frame_nvme *frame = f; ++ ++ return le32toh(frame->addr); ++} ++ ++static uint32_t rpmb_frame_get_addr(uint8_t frame_type, void *f) ++{ ++ return (frame_type == RPMB_FRAME_TYPE_NVME) ? ++ rpmb_frame_get_addr_nvme(f) : ++ rpmb_frame_get_addr_jdec(f); ++} ++ ++static uint16_t rpmb_frame_get_result_jdec(void *f) ++{ ++ struct rpmb_frame_jdec *frames = f; ++ uint16_t block_count = be16toh(frames[0].block_count); ++ ++ if (block_count == 0) ++ block_count = 1; ++ ++ return be16toh(frames[block_count - 1].result); ++} ++ ++static uint16_t rpmb_frame_get_result_nvme(void *f) ++{ ++ struct rpmb_frame_nvme *frame = f; ++ ++ return le16toh(frame->result); ++} ++ ++static uint16_t rpmb_frame_get_result(uint8_t frame_type, void *f) ++{ ++ return (frame_type == RPMB_FRAME_TYPE_NVME) ? ++ rpmb_frame_get_result_nvme(f) : ++ rpmb_frame_get_result_jdec(f); ++} ++ ++static uint16_t rpmb_frame_get_req_resp_jdec(void *f) ++{ ++ struct rpmb_frame_jdec *frame = f; ++ ++ return be16toh(frame->req_resp); ++} ++ ++static uint16_t rpmb_frame_get_req_resp_nvme(void *f) ++{ ++ struct rpmb_frame_nvme *frame = f; ++ ++ return le16toh(frame->req_resp); ++} ++ ++static uint16_t rpmb_frame_get_req_resp(uint8_t frame_type, void *f) ++{ ++ return frame_type == RPMB_FRAME_TYPE_NVME ? ++ rpmb_frame_get_req_resp_nvme(f) : ++ rpmb_frame_get_req_resp_jdec(f); ++} ++ ++static int rpmb_frame_set_jdec(void *f, ++ uint16_t req_resp, uint32_t block_count, ++ uint32_t addr, uint32_t write_counter) ++{ ++ struct rpmb_frame_jdec *frames = f; ++ uint32_t i; ++ /* FIMXE: validate overflow */ ++ uint16_t __block_count = (uint16_t)block_count; ++ uint16_t __addr = (uint16_t)addr; ++ ++ for (i = 0; i < (block_count ?: 1); i++) { ++ frames[i].req_resp = htobe16(req_resp); ++ frames[i].block_count = htobe16(__block_count); ++ frames[i].addr = htobe16(__addr); ++ frames[i].write_counter = htobe32(write_counter); ++ } ++ ++ return 0; ++} ++ ++static int rpmb_frame_set_nvme(void *f, ++ uint16_t req_resp, uint32_t block_count, ++ uint32_t addr, uint32_t write_counter) ++{ ++ struct rpmb_frame_nvme *frame = f; ++ ++ frame->req_resp = htole16(req_resp); ++ frame->block_count = htole32(block_count); ++ frame->addr = htole32(addr); ++ frame->write_counter = htole32(write_counter); ++ ++ return 0; ++} ++ ++static int rpmb_frame_set(uint8_t frame_type, void *f, ++ uint16_t req_resp, uint32_t block_count, ++ uint32_t addr, uint32_t write_counter) ++{ ++ if (frame_type == RPMB_FRAME_TYPE_NVME) { ++ return rpmb_frame_set_nvme(f, req_resp, block_count, ++ addr, write_counter); ++ } else { ++ return rpmb_frame_set_jdec(f, req_resp, block_count, ++ addr, write_counter); ++ } ++} ++ ++static int rpmb_frame_write_data_jdec(int fd, void *f) ++{ ++ struct rpmb_frame_jdec *frames = f; ++ uint16_t i, block_count = be16toh(frames[0].block_count); ++ ++ for (i = 0; i < block_count; i++) { ++ int ret; ++ ++ ret = write_file(fd, frames[i].data, sizeof(frames[i].data)); ++ if (ret < 0) ++ return ret; ++ } ++ return 0; ++} ++ ++static int rpmb_frame_write_data_nvme(int fd, void *f) ++{ ++ struct rpmb_frame_nvme *frame = f; ++ uint32_t i, block_count = le32toh(frame->block_count); ++ ++ for (i = 0; i < block_count; i++) { ++ int ret; ++ ++ ret = write_file(fd, &frame->data[i], RPMB_SECTOR_SIZE); ++ if (ret < 0) ++ return ret; ++ } ++ return 0; ++} ++ ++static int rpmb_frame_write_data(uint8_t frame_type, int fd, void *f) ++{ ++ return frame_type == RPMB_FRAME_TYPE_NVME ? ++ rpmb_frame_write_data_nvme(fd, f) : ++ rpmb_frame_write_data_jdec(fd, f); ++} ++ ++static int rpmb_frame_read_data_jdec(int fd, void *f) + { +- return calloc(1, rpmb_ioc_frames_len_jdec(cnt)); ++ struct rpmb_frame_jdec *frames = f; ++ uint16_t i, block_count = be16toh(frames[0].block_count); ++ ++ for (i = 0; i < block_count; i++) { ++ int ret = read_file(fd, frames[i].data, ++ sizeof(frames[0].data)); ++ if (ret < 0) ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static int rpmb_frame_read_data_nvme(int fd, void *f) ++{ ++ struct rpmb_frame_nvme *frame = f; ++ uint32_t i, block_count = le32toh(frame->block_count); ++ ++ for (i = 0; i < block_count; i++) { ++ int ret; ++ ++ ret = read_file(fd, &frame->data[i], RPMB_SECTOR_SIZE); ++ if (ret < 0) ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static int rpmb_frame_read_data(uint8_t frame_type, int fd, void *f) ++{ ++ return frame_type == RPMB_FRAME_TYPE_NVME ? ++ rpmb_frame_read_data_nvme(fd, f) : ++ rpmb_frame_read_data_jdec(fd, f); + } + + #if OPENSSL_VERSION_NUMBER < 0x10100000L +-static int rpmb_calc_hmac_sha256(struct rpmb_frame_jdec *frames, +- size_t blocks_cnt, +- const unsigned char key[], +- unsigned int key_size, +- unsigned char mac[], +- unsigned int mac_size) ++static int rpmb_calc_hmac_sha256_jdec(struct rpmb_frame_jdec *frames, ++ size_t blocks_cnt, ++ const unsigned char key[], ++ unsigned int key_size, ++ unsigned char mac[], ++ unsigned int mac_size) ++{ ++ HMAC_CTX ctx; ++ int ret; ++ unsigned int i; ++ ++ /* SSL returns 1 on success 0 on failure */ ++ ++ HMAC_CTX_init(&ctx); ++ ret = HMAC_Init_ex(&ctx, key, key_size, EVP_sha256(), NULL); ++ if (ret == 0) ++ goto out; ++ for (i = 0; i < block_count; i++) ++ HMAC_Update(&ctx, frames[i].data, rpmb_jdec_hmac_data_len); ++ ++ ret = HMAC_Final(&ctx, mac, &mac_size); ++ if (ret == 0) ++ goto out; ++ if (mac_size != RPMB_MAC_SIZE) ++ ret = 0; ++ ++ ret = 1; ++out: ++ HMAC_CTX_cleanup(&ctx); ++ return ret == 1 ? 0 : -1; ++} ++ ++static int rpmb_calc_hmac_sha256_nvme(struct rpmb_frame_nvme *frame, ++ size_t block_count, ++ const unsigned char key[], ++ unsigned int key_size, ++ unsigned char mac[], ++ unsigned int mac_size) + { + HMAC_CTX ctx; + int ret; +@@ -291,8 +672,10 @@ static int rpmb_calc_hmac_sha256(struct rpmb_frame_jdec *frames, + ret = HMAC_Init_ex(&ctx, key, key_size, EVP_sha256(), NULL); + if (ret == 0) + goto out; +- for (i = 0; i < blocks_cnt; i++) +- HMAC_Update(&ctx, frames[i].data, hmac_data_len); ++ ++ HMAC_Update(&ctx, &frame->rpmb_target, hmac_nvme_data_len); ++ for (i = 0; i < block_count; i++) ++ HMAC_Update(&ctx, frames->data[i], RPMB_SECTOR_SIZE); + + ret = HMAC_Final(&ctx, mac, &mac_size); + if (ret == 0) +@@ -306,12 +689,12 @@ static int rpmb_calc_hmac_sha256(struct rpmb_frame_jdec *frames, + return ret == 1 ? 0 : -1; + } + #else +-static int rpmb_calc_hmac_sha256(struct rpmb_frame_jdec *frames, +- size_t blocks_cnt, +- const unsigned char key[], +- unsigned int key_size, +- unsigned char mac[], +- unsigned int mac_size) ++static int rpmb_calc_hmac_sha256_jdec(struct rpmb_frame_jdec *frames, ++ size_t blocks_cnt, ++ const unsigned char key[], ++ unsigned int key_size, ++ unsigned char mac[], ++ unsigned int mac_size) + { + HMAC_CTX *ctx; + int ret; +@@ -338,37 +721,83 @@ static int rpmb_calc_hmac_sha256(struct rpmb_frame_jdec *frames, + HMAC_CTX_free(ctx); + return ret == 1 ? 0 : -1; + } ++ ++static int rpmb_calc_hmac_sha256_nvme(struct rpmb_frame_nvme *frame, ++ size_t block_count, ++ const unsigned char key[], ++ unsigned int key_size, ++ unsigned char mac[], ++ unsigned int mac_size) ++{ ++ HMAC_CTX *ctx; ++ int ret; ++ unsigned int i; ++ ++ /* SSL returns 1 on success 0 on failure */ ++ ++ ctx = HMAC_CTX_new(); ++ ++ ret = HMAC_Init_ex(ctx, key, key_size, EVP_sha256(), NULL); ++ if (ret == 0) ++ goto out; ++ ++ HMAC_Update(ctx, &frame->rpmb_target, rpmb_nvme_hmac_data_len); ++ for (i = 0; i < block_count; i++) ++ HMAC_Update(ctx, &frame->data[i], RPMB_SECTOR_SIZE); ++ ++ ret = HMAC_Final(ctx, mac, &mac_size); ++ if (ret == 0) ++ goto out; ++ if (mac_size != RPMB_MAC_SIZE) ++ ret = 0; ++ ++ ret = 1; ++out: ++ HMAC_CTX_free(ctx); ++ return ret == 1 ? 0 : -1; ++} + #endif + +-static int rpmb_check_req_resp(uint16_t req, struct rpmb_frame_jdec *frame_out) ++static int rpmb_calc_hmac_sha256(uint8_t frame_type, void *f, ++ size_t block_count, ++ const unsigned char key[], ++ unsigned int key_size, ++ unsigned char mac[], ++ unsigned int mac_size) + { +- if (RPMB_REQ2RESP(req) != be16toh(frame_out->req_resp)) { +- rpmb_err("RPMB response mismatch %04X != %04X\n.", +- RPMB_REQ2RESP(req), be16toh(frame_out->req_resp)); +- return -1; +- } +- return 0; ++ if (frame_type == RPMB_FRAME_TYPE_NVME) ++ return rpmb_calc_hmac_sha256_nvme(f, block_count, ++ key, key_size, ++ mac, mac_size); ++ else ++ return rpmb_calc_hmac_sha256_jdec(f, block_count, ++ key, key_size, ++ mac, mac_size); + } + +-static int rpmb_check_mac(const unsigned char *key, +- struct rpmb_frame_jdec *frames_out, +- unsigned int cnt_out) ++static int rpmb_check_mac(uint8_t frame_type, ++ const unsigned char *key, size_t key_size, ++ void *frames_out, unsigned int block_count) + { + unsigned char mac[RPMB_MAC_SIZE]; ++ unsigned char *mac_out; ++ int ret; + +- if (cnt_out == 0) { ++ if (block_count == 0) { + rpmb_err("RPMB 0 output frames.\n"); + return -1; + } + +- rpmb_calc_hmac_sha256(frames_out, cnt_out, +- key, RPMB_KEY_SIZE, +- mac, RPMB_MAC_SIZE); ++ ret = rpmb_calc_hmac_sha256(frame_type, frames_out, block_count, ++ key, key_size, mac, RPMB_MAC_SIZE); ++ if (ret) ++ return ret; + +- if (memcmp(mac, frames_out[cnt_out - 1].key_mac, RPMB_MAC_SIZE)) { ++ mac_out = rpmb_frame_get_key_mac_ptr(frame_type, frames_out, ++ block_count, RPMB_MAC_SIZE); ++ if (memcmp(mac, mac_out, RPMB_MAC_SIZE)) { + rpmb_err("RPMB hmac mismatch:\n"); +- dump_hex_buffer("Result MAC: ", +- frames_out[cnt_out - 1].key_mac, RPMB_MAC_SIZE); ++ dump_hex_buffer("Result MAC: ", mac_out, RPMB_MAC_SIZE); + dump_hex_buffer("Expected MAC: ", mac, RPMB_MAC_SIZE); + return -1; + } +@@ -376,23 +805,54 @@ static int rpmb_check_mac(const unsigned char *key, + return 0; + } + +-static int rpmb_ioctl(int fd, uint16_t req, +- const struct rpmb_frame_jdec *frames_in, +- unsigned int cnt_in, +- struct rpmb_frame_jdec *frames_out, +- unsigned int cnt_out) ++static int rpmb_check_req_resp(uint8_t frame_type, ++ uint16_t req, void *frame_out) ++{ ++ uint16_t req_resp = rpmb_frame_get_req_resp(frame_type, frame_out); ++ ++ if (RPMB_REQ2RESP(req) != req_resp) { ++ rpmb_err("RPMB response mismatch %04X != %04X\n.", ++ RPMB_REQ2RESP(req), req_resp); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++static struct rpmb_frame_jdec *rpmb_frame_alloc_jdec(size_t block_count) ++{ ++ return calloc(1, rpmb_ioc_frames_len_jdec(block_count)); ++} ++ ++static struct rpmb_frame_nvme *rpmb_frame_alloc_nvme(size_t sector_count) ++{ ++ return calloc(1, rpmb_ioc_frames_len_nvme(sector_count)); ++} ++ ++static void *rpmb_frame_alloc(uint8_t type, size_t count) ++{ ++ if (type == RPMB_FRAME_TYPE_NVME) ++ return rpmb_frame_alloc_nvme(count); ++ else ++ return rpmb_frame_alloc_jdec(count); ++} ++ ++static int rpmb_ioctl(uint8_t frame_type, int fd, uint16_t req, ++ const void *frames_in, unsigned int cnt_in, ++ void *frames_out, unsigned int cnt_out) + { + int ret; + struct __packed { + struct rpmb_ioc_seq_cmd h; + struct rpmb_ioc_cmd cmd[3]; + } iseq = {}; +- struct rpmb_frame_jdec *frame_res = NULL; ++ ++ void *frame_res = NULL; + int i; + uint32_t flags; + + rpmb_dbg("RPMB OP: %s\n", rpmb_op_str(req)); +- dbg_dump_frame("In Frame: ", frames_in); ++ dbg_dump_frame(frame_type, "In Frame: ", frames_in, cnt_in); + + i = 0; + flags = RPMB_F_WRITE; +@@ -402,10 +862,11 @@ static int rpmb_ioctl(int fd, uint16_t req, + i++; + + if (req == RPMB_WRITE_DATA || req == RPMB_PROGRAM_KEY) { +- frame_res = rpmb_alloc_frames(0); ++ frame_res = rpmb_frame_alloc(frame_type, 0); + if (!frame_res) + return -ENOMEM; +- frame_res->req_resp = htobe16(RPMB_RESULT_READ); ++ rpmb_frame_set(frame_type, frame_res, ++ RPMB_RESULT_READ, 0, 0, 0); + rpmb_ioc_cmd_set(iseq.cmd[i], RPMB_F_WRITE, frame_res, 0); + i++; + } +@@ -418,10 +879,10 @@ static int rpmb_ioctl(int fd, uint16_t req, + if (ret < 0) + rpmb_err("ioctl failure %d: %s.\n", ret, strerror(errno)); + +- ret = rpmb_check_req_resp(req, frames_out); ++ ret = rpmb_check_req_resp(frame_type, req, frames_out); + +- dbg_dump_frame("Res Frame: ", frame_res); +- dbg_dump_frame("Out Frame: ", frames_out); ++ dbg_dump_frame(frame_type, "Res Frame: ", frame_res, 1); ++ dbg_dump_frame(frame_type, "Out Frame: ", frames_out, cnt_out); + free(frame_res); + return ret; + } +@@ -453,13 +914,61 @@ static int op_get_info(int nargs, char *argv[]) + return 0; + } + ++static int __rpmb_program_key(uint8_t frame_type, int dev_fd, ++ uint8_t *key, size_t key_size) ++{ ++ void *frame_in, *frame_out; ++ uint16_t req = RPMB_PROGRAM_KEY; ++ int ret; ++ ++ frame_in = rpmb_frame_alloc(frame_type, 0); ++ frame_out = rpmb_frame_alloc(frame_type, 0); ++ if (!frame_in || !frame_out) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ rpmb_frame_set(frame_type, frame_in, req, 0, 0, 0); ++ ++ ret = rpmb_frame_set_key_mac(frame_type, frame_in, 0, key, key_size); ++ if (ret) ++ goto out; ++ ++ ret = rpmb_ioctl(frame_type, dev_fd, req, frame_in, 1, frame_out, 1); ++ if (ret) ++ goto out; ++ ++ ret = rpmb_check_req_resp(frame_type, req, frame_out); ++ if (ret) ++ goto out; ++ ++ ret = rpmb_frame_get_result(frame_type, frame_out); ++ if (ret) ++ rpmb_err("RPMB operation %s failed, %s[0x%04x].\n", ++ rpmb_op_str(req), rpmb_result_str(ret), ret); ++ ++out: ++ free(frame_in); ++ free(frame_out); ++ ++ return 0; ++} ++ ++static uint8_t rpmb_cap_get_frame_type(struct rpmb_ioc_cap_cmd *cap) ++{ ++ if (cap->device_type == RPMB_TYPE_NVME) ++ return RPMB_FRAME_TYPE_NVME; ++ else ++ return RPMB_FRAME_TYPE_JDEC; ++} ++ + static int op_rpmb_program_key(int nargs, char *argv[]) + { + int ret; + int dev_fd = -1, key_fd = -1; +- uint16_t req = RPMB_PROGRAM_KEY; ++ uint8_t key[RPMB_KEY_SIZE]; ++ uint8_t frame_type; + struct rpmb_ioc_cap_cmd cap; +- struct rpmb_frame_jdec *frame_in = NULL, *frame_out = NULL; + + ret = -EINVAL; + if (nargs != 2) +@@ -475,87 +984,73 @@ static int op_rpmb_program_key(int nargs, char *argv[]) + goto out; + argv++; + +- frame_in = rpmb_alloc_frames(0); +- frame_out = rpmb_alloc_frames(0); +- if (!frame_in || !frame_out) { +- ret = -ENOMEM; +- goto out; +- } ++ read_file(key_fd, key, RPMB_KEY_SIZE); + +- frame_in->req_resp = htobe16(req); ++ frame_type = rpmb_cap_get_frame_type(&cap); + +- read_file(key_fd, frame_in->key_mac, RPMB_KEY_SIZE); +- +- ret = rpmb_ioctl(dev_fd, req, frame_in, 0, frame_out, 0); +- if (ret) +- goto out; +- +- if (RPMB_REQ2RESP(req) != be16toh(frame_out->req_resp)) { +- rpmb_err("RPMB response mismatch.\n"); +- ret = -1; +- goto out; +- } +- +- ret = be16toh(frame_out->result); +- if (ret) +- rpmb_err("RPMB operation %s failed, %s[0x%04x].\n", +- rpmb_op_str(req), rpmb_result_str(ret), ret); ++ ret = __rpmb_program_key(frame_type, dev_fd, key, RPMB_KEY_SIZE); + + out: +- free(frame_in); +- free(frame_out); + close_fd(dev_fd); + close_fd(key_fd); + + return ret; + } + +-static int rpmb_get_write_counter(int dev_fd, unsigned int *cnt, +- const unsigned char *key) ++static int rpmb_get_write_counter(uint8_t frame_type, int dev_fd, ++ unsigned int *cnt, const unsigned char *key) + { + int ret; + uint16_t res = 0x000F; + uint16_t req = RPMB_GET_WRITE_COUNTER; +- struct rpmb_frame_jdec *frame_in = NULL; +- struct rpmb_frame_jdec *frame_out = NULL; ++ void *frame_in = NULL; ++ void *frame_out = NULL; ++ uint8_t *nonce_in; ++ uint8_t *nonce_out; + +- frame_in = rpmb_alloc_frames(0); +- frame_out = rpmb_alloc_frames(0); ++ frame_in = rpmb_frame_alloc(frame_type, 0); ++ frame_out = rpmb_frame_alloc(frame_type, 0); + if (!frame_in || !frame_out) { + ret = -ENOMEM; + goto out; + } + +- frame_in->req_resp = htobe16(req); +- RAND_bytes(frame_in->nonce, RPMB_NONCE_SIZE); ++ rpmb_frame_set(frame_type, frame_in, req, 0, 0, 0); ++ nonce_in = rpmb_frame_get_nonce_ptr(frame_type, frame_in); ++ RAND_bytes(nonce_in, RPMB_NONCE_SIZE); + +- ret = rpmb_ioctl(dev_fd, req, frame_in, 0, frame_out, 0); ++ ret = rpmb_ioctl(frame_type, dev_fd, req, frame_in, 0, frame_out, 0); + if (ret) + goto out; + +- res = be16toh(frame_out->result); ++ ret = rpmb_check_req_resp(frame_type, req, frame_out); ++ if (ret) ++ goto out; ++ ++ res = rpmb_frame_get_result(frame_type, frame_out); + if (res != RPMB_ERR_OK) { + ret = -1; + goto out; + } + +- if (memcmp(&frame_in->nonce, &frame_out->nonce, RPMB_NONCE_SIZE)) { ++ nonce_out = rpmb_frame_get_nonce_ptr(frame_type, frame_out); ++ ++ if (memcmp(nonce_in, nonce_out, RPMB_NONCE_SIZE)) { + rpmb_err("RPMB NONCE mismatch\n"); +- dump_hex_buffer("Result NONCE:", +- &frame_out->nonce, RPMB_NONCE_SIZE); +- dump_hex_buffer("Expected NONCE: ", +- &frame_in->nonce, RPMB_NONCE_SIZE); ++ dump_hex_buffer("Result NONCE:", nonce_out, RPMB_NONCE_SIZE); ++ dump_hex_buffer("Expected NONCE: ", nonce_in, RPMB_NONCE_SIZE); + ret = -1; + goto out; + } + + if (key) { +- ret = rpmb_check_mac(key, frame_out, 1); ++ ret = rpmb_check_mac(frame_type, key, RPMB_KEY_SIZE, ++ frame_out, 1); + if (ret) + goto out; + } + +- *cnt = be32toh(frame_out->write_counter); ++ *cnt = rpmb_frame_get_write_counter(frame_type, frame_out); + + out: + if (ret) +@@ -573,7 +1068,8 @@ static int op_rpmb_get_write_counter(int nargs, char **argv) + bool has_key; + struct rpmb_ioc_cap_cmd cap; + unsigned char key[RPMB_KEY_SIZE]; +- unsigned int cnt; ++ unsigned int cnt = 0; ++ uint8_t frame_type; + + if (nargs == 2) + has_key = true; +@@ -588,6 +1084,8 @@ static int op_rpmb_get_write_counter(int nargs, char **argv) + return ret; + argv++; + ++ frame_type = rpmb_cap_get_frame_type(&cap); ++ + if (has_key) { + key_fd = open_rd_file(argv[0], "key file"); + if (key_fd < 0) +@@ -598,9 +1096,9 @@ static int op_rpmb_get_write_counter(int nargs, char **argv) + if (ret < 0) + goto out; + +- ret = rpmb_get_write_counter(dev_fd, &cnt, key); ++ ret = rpmb_get_write_counter(frame_type, dev_fd, &cnt, key); + } else { +- ret = rpmb_get_write_counter(dev_fd, &cnt, NULL); ++ ret = rpmb_get_write_counter(frame_type, dev_fd, &cnt, NULL); + } + + if (!ret) +@@ -614,17 +1112,18 @@ static int op_rpmb_get_write_counter(int nargs, char **argv) + + static int op_rpmb_read_blocks(int nargs, char **argv) + { +- int i, ret; ++ int ret; + int dev_fd = -1, data_fd = -1, key_fd = -1; + uint16_t req = RPMB_READ_DATA; +- uint16_t addr, blocks_cnt; ++ uint32_t addr, block_count; + unsigned char key[RPMB_KEY_SIZE]; ++ uint8_t *nonce_in; + unsigned long numarg; + bool has_key; + struct rpmb_ioc_cap_cmd cap; +- struct rpmb_frame_jdec *frame_in = NULL; +- struct rpmb_frame_jdec *frames_out = NULL; +- struct rpmb_frame_jdec *frame_out; ++ void *frame_in = NULL; ++ void *frames_out = NULL; ++ uint8_t frame_type; + + ret = -EINVAL; + if (nargs == 4) +@@ -641,23 +1140,23 @@ static int op_rpmb_read_blocks(int nargs, char **argv) + + errno = 0; + numarg = strtoul(argv[0], NULL, 0); +- if (errno || numarg > USHRT_MAX) { ++ if (errno || numarg > UINT_MAX) { + rpmb_err("wrong block address\n"); + goto out; + } +- addr = (uint16_t)numarg; ++ addr = (uint32_t)numarg; + argv++; + + errno = 0; + numarg = strtoul(argv[0], NULL, 0); +- if (errno || numarg > USHRT_MAX) { ++ if (errno || numarg > UINT_MAX) { + rpmb_err("wrong blocks count\n"); + goto out; + } +- blocks_cnt = (uint16_t)numarg; ++ block_count = (uint32_t)numarg; + argv++; + +- if (blocks_cnt == 0) { ++ if (block_count == 0) { + rpmb_err("wrong blocks count\n"); + goto out; + } +@@ -678,27 +1177,30 @@ static int op_rpmb_read_blocks(int nargs, char **argv) + goto out; + } + ++ frame_type = rpmb_cap_get_frame_type(&cap); ++ + ret = 0; +- frames_out = rpmb_alloc_frames(blocks_cnt); +- frame_in = rpmb_alloc_frames(0); ++ frames_out = rpmb_frame_alloc(frame_type, block_count); ++ frame_in = rpmb_frame_alloc(frame_type, 0); + if (!frames_out || !frame_in) { +- rpmb_err("Cannot allocate %d RPMB frames\n", blocks_cnt); ++ rpmb_err("Cannot allocate %d RPMB frames\n", block_count); + ret = -ENOMEM; + goto out; + } + +- frame_in->req_resp = htobe16(req); +- frame_in->addr = htobe16(addr); +- /* eMMc spec ask for 0 here this will be translated by the rpmb layer */ +- frame_in->block_count = htobe16(blocks_cnt); +- RAND_bytes(frame_in->nonce, RPMB_NONCE_SIZE); ++ /* eMMc spec ask for 0 block_count here ++ * this will be translated by the rpmb layer ++ */ ++ rpmb_frame_set(frame_type, frame_in, req, block_count, addr, 0); ++ nonce_in = rpmb_frame_get_nonce_ptr(frame_type, frame_in); ++ RAND_bytes(nonce_in, RPMB_NONCE_SIZE); + +- ret = rpmb_ioctl(dev_fd, req, frame_in, 0, frames_out, blocks_cnt); ++ ret = rpmb_ioctl(frame_type, dev_fd, req, frame_in, 0, ++ frames_out, block_count); + if (ret) + goto out; + +- frame_out = &frames_out[blocks_cnt - 1]; +- ret = be16toh(frame_out->result); ++ ret = rpmb_frame_get_result(frame_type, frames_out); + if (ret) { + rpmb_err("RPMB operation %s failed, %s[0x%04x]\n", + rpmb_op_str(req), rpmb_result_str(ret), ret); +@@ -706,17 +1208,13 @@ static int op_rpmb_read_blocks(int nargs, char **argv) + } + + if (has_key) { +- ret = rpmb_check_mac(key, frames_out, blocks_cnt); ++ ret = rpmb_check_mac(frame_type, key, RPMB_KEY_SIZE, ++ frames_out, block_count); + if (ret) + goto out; + } + +- for (i = 0; i < blocks_cnt; i++) { +- ret = write_file(data_fd, frames_out[i].data, +- sizeof(frames_out[i].data)); +- if (ret < 0) +- goto out; +- } ++ ret = rpmb_frame_write_data(frame_type, data_fd, frames_out); + + out: + free(frame_in); +@@ -732,16 +1230,17 @@ static int op_rpmb_write_blocks(int nargs, char **argv) + { + int ret; + int dev_fd = -1, key_fd = -1, data_fd = -1; +- int i; + uint16_t req = RPMB_WRITE_DATA; + unsigned char key[RPMB_KEY_SIZE]; + unsigned char mac[RPMB_MAC_SIZE]; + unsigned long numarg; +- uint16_t addr, blocks_cnt; +- uint32_t write_counter; + struct rpmb_ioc_cap_cmd cap; +- struct rpmb_frame_jdec *frames_in = NULL; +- struct rpmb_frame_jdec *frame_out = NULL; ++ uint16_t addr, block_count; ++ uint32_t write_counter = 0; ++ uint32_t write_counter_out = 0; ++ void *frames_in = NULL; ++ void *frame_out = NULL; ++ uint8_t frame_type; + + ret = -EINVAL; + if (nargs != 5) +@@ -767,10 +1266,10 @@ static int op_rpmb_write_blocks(int nargs, char **argv) + rpmb_err("wrong blocks count\n"); + goto out; + } +- blocks_cnt = (uint16_t)numarg; ++ block_count = (uint16_t)numarg; + argv++; + +- if (blocks_cnt == 0) { ++ if (block_count == 0) { + rpmb_err("wrong blocks count\n"); + goto out; + } +@@ -789,60 +1288,64 @@ static int op_rpmb_write_blocks(int nargs, char **argv) + if (ret < 0) + goto out; + +- frames_in = rpmb_alloc_frames(blocks_cnt); +- frame_out = rpmb_alloc_frames(0); ++ frame_type = rpmb_cap_get_frame_type(&cap); ++ ++ frames_in = rpmb_frame_alloc(frame_type, block_count); ++ frame_out = rpmb_frame_alloc(frame_type, 0); + if (!frames_in || !frame_out) { + rpmb_err("can't allocate memory for RPMB outer frames\n"); + ret = -ENOMEM; + goto out; + } + +- ret = rpmb_get_write_counter(dev_fd, &write_counter, key); ++ ret = rpmb_get_write_counter(frame_type, dev_fd, &write_counter, NULL); + if (ret) + goto out; + +- for (i = 0; i < blocks_cnt; i++) { +- frames_in[i].req_resp = htobe16(req); +- frames_in[i].block_count = htobe16(blocks_cnt); +- frames_in[i].addr = htobe16(addr); +- frames_in[i].write_counter = htobe32(write_counter); +- } ++ ret = rpmb_frame_set(frame_type, frames_in, ++ req, block_count, addr, write_counter); ++ if (ret) ++ goto out; + +- for (i = 0; i < blocks_cnt; i++) { +- ret = read_file(data_fd, frames_in[i].data, +- sizeof(frames_in[0].data)); +- if (ret < 0) +- goto out; +- } ++ ret = rpmb_frame_read_data(frame_type, data_fd, frames_in); ++ if (ret) ++ goto out; + +- rpmb_calc_hmac_sha256(frames_in, blocks_cnt, ++ rpmb_calc_hmac_sha256(frame_type, frames_in, ++ block_count, + key, RPMB_KEY_SIZE, + mac, RPMB_MAC_SIZE); +- memcpy(frames_in[blocks_cnt - 1].key_mac, mac, RPMB_MAC_SIZE); +- ret = rpmb_ioctl(dev_fd, req, frames_in, blocks_cnt, frame_out, 0); ++ ++ rpmb_frame_set_key_mac(frame_type, frames_in, block_count, ++ mac, RPMB_MAC_SIZE); ++ ++ ret = rpmb_ioctl(frame_type, dev_fd, req, ++ frames_in, block_count, ++ frame_out, 0); + if (ret != 0) + goto out; + +- ret = be16toh(frame_out->result); ++ ret = rpmb_frame_get_result(frame_type, frame_out); + if (ret) { + rpmb_err("RPMB operation %s failed, %s[0x%04x]\n", + rpmb_op_str(req), rpmb_result_str(ret), ret); + ret = -1; + } + +- if (be16toh(frame_out->addr) != addr) { ++ if (rpmb_frame_get_addr(frame_type, frame_out) != addr) { + rpmb_err("RPMB addr mismatchs res=%04x req=%04x\n", +- be16toh(frame_out->addr), addr); ++ rpmb_frame_get_addr(frame_type, frame_out), addr); + ret = -1; + } + +- if (be32toh(frame_out->write_counter) <= write_counter) { ++ write_counter_out = rpmb_frame_get_write_counter(frame_type, frame_out); ++ if (write_counter_out <= write_counter) { + rpmb_err("RPMB write counter not incremented res=%x req=%x\n", +- be32toh(frame_out->write_counter), write_counter); ++ write_counter_out, write_counter); + ret = -1; + } + +- ret = rpmb_check_mac(key, frame_out, 1); ++ /* TODO: check mac: spec is not clear what is computed by the device */ + out: + free(frames_in); + free(frame_out); +-- +2.17.1 + diff --git a/patches/0017-usb-typec-ucsi-Remove-all-bit-fields.usb-typec b/patches/0017-usb-typec-ucsi-Remove-all-bit-fields.usb-typec new file mode 100644 index 0000000000..e19f1f2d9f --- /dev/null +++ b/patches/0017-usb-typec-ucsi-Remove-all-bit-fields.usb-typec @@ -0,0 +1,360 @@ +From d9cd911a4100800a0f7fd1ad61e074b79fecc78d Mon Sep 17 00:00:00 2001 +From: Heikki Krogerus +Date: Thu, 26 Sep 2019 12:38:25 +0300 +Subject: [PATCH 17/18] usb: typec: ucsi: Remove all bit-fields + +We can't use bit fields with data that is received or send +to/from the device. + +Signed-off-by: Heikki Krogerus +--- + drivers/usb/typec/ucsi/trace.h | 12 ++--- + drivers/usb/typec/ucsi/ucsi.c | 52 +++++++++++-------- + drivers/usb/typec/ucsi/ucsi.h | 93 +++++++++++++++++----------------- + 3 files changed, 85 insertions(+), 72 deletions(-) + +diff --git a/drivers/usb/typec/ucsi/trace.h b/drivers/usb/typec/ucsi/trace.h +index 2262229dae8e..a0d3a934d3d9 100644 +--- a/drivers/usb/typec/ucsi/trace.h ++++ b/drivers/usb/typec/ucsi/trace.h +@@ -56,13 +56,13 @@ DECLARE_EVENT_CLASS(ucsi_log_connector_status, + TP_fast_assign( + __entry->port = port - 1; + __entry->change = status->change; +- __entry->opmode = status->pwr_op_mode; +- __entry->connected = status->connected; +- __entry->pwr_dir = status->pwr_dir; +- __entry->partner_flags = status->partner_flags; +- __entry->partner_type = status->partner_type; ++ __entry->opmode = UCSI_CONSTAT_PWR_OPMODE(status->flags); ++ __entry->connected = !!(status->flags & UCSI_CONSTAT_CONNECTED); ++ __entry->pwr_dir = !!(status->flags & UCSI_CONSTAT_PWR_DIR); ++ __entry->partner_flags = UCSI_CONSTAT_PARTNER_FLAGS(status->flags); ++ __entry->partner_type = UCSI_CONSTAT_PARTNER_TYPE(status->flags); + __entry->request_data_obj = status->request_data_obj; +- __entry->bc_status = status->bc_status; ++ __entry->bc_status = UCSI_CONSTAT_BC_STATUS(status->pwr_status); + ), + TP_printk("port%d status: change=%04x, opmode=%x, connected=%d, " + "sourcing=%d, partner_flags=%x, partner_type=%x, " +diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c +index 19892511120b..fbc0ae1851f9 100644 +--- a/drivers/usb/typec/ucsi/ucsi.c ++++ b/drivers/usb/typec/ucsi/ucsi.c +@@ -392,7 +392,7 @@ static void ucsi_unregister_altmodes(struct ucsi_connector *con, u8 recipient) + + static void ucsi_pwr_opmode_change(struct ucsi_connector *con) + { +- switch (con->status.pwr_op_mode) { ++ switch (UCSI_CONSTAT_PWR_OPMODE(con->status.flags)) { + case UCSI_CONSTAT_PWR_OPMODE_PD: + typec_set_pwr_opmode(con->port, TYPEC_PWR_MODE_PD); + break; +@@ -410,6 +410,7 @@ static void ucsi_pwr_opmode_change(struct ucsi_connector *con) + + static int ucsi_register_partner(struct ucsi_connector *con) + { ++ u8 pwr_opmode = UCSI_CONSTAT_PWR_OPMODE(con->status.flags); + struct typec_partner_desc desc; + struct typec_partner *partner; + +@@ -418,7 +419,7 @@ static int ucsi_register_partner(struct ucsi_connector *con) + + memset(&desc, 0, sizeof(desc)); + +- switch (con->status.partner_type) { ++ switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) { + case UCSI_CONSTAT_PARTNER_TYPE_DEBUG: + desc.accessory = TYPEC_ACCESSORY_DEBUG; + break; +@@ -429,7 +430,7 @@ static int ucsi_register_partner(struct ucsi_connector *con) + break; + } + +- desc.usb_pd = con->status.pwr_op_mode == UCSI_CONSTAT_PWR_OPMODE_PD; ++ desc.usb_pd = pwr_opmode == UCSI_CONSTAT_PWR_OPMODE_PD; + + partner = typec_register_partner(con->port, &desc); + if (IS_ERR(partner)) { +@@ -461,7 +462,7 @@ static void ucsi_partner_change(struct ucsi_connector *con) + if (!con->partner) + return; + +- switch (con->status.partner_type) { ++ switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) { + case UCSI_CONSTAT_PARTNER_TYPE_UFP: + typec_set_data_role(con->port, TYPEC_HOST); + break; +@@ -491,6 +492,7 @@ static void ucsi_handle_connector_change(struct work_struct *work) + struct ucsi_connector *con = container_of(work, struct ucsi_connector, + work); + struct ucsi *ucsi = con->ucsi; ++ enum typec_role role; + u64 command; + int ret; + +@@ -505,11 +507,13 @@ static void ucsi_handle_connector_change(struct work_struct *work) + goto out_unlock; + } + ++ role = !!(con->status.flags & UCSI_CONSTAT_PWR_DIR); ++ + if (con->status.change & UCSI_CONSTAT_POWER_OPMODE_CHANGE) + ucsi_pwr_opmode_change(con); + + if (con->status.change & UCSI_CONSTAT_POWER_DIR_CHANGE) { +- typec_set_pwr_role(con->port, con->status.pwr_dir); ++ typec_set_pwr_role(con->port, role); + + /* Complete pending power role swap */ + if (!completion_done(&con->complete)) +@@ -517,9 +521,9 @@ static void ucsi_handle_connector_change(struct work_struct *work) + } + + if (con->status.change & UCSI_CONSTAT_CONNECT_CHANGE) { +- typec_set_pwr_role(con->port, con->status.pwr_dir); ++ typec_set_pwr_role(con->port, role); + +- switch (con->status.partner_type) { ++ switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) { + case UCSI_CONSTAT_PARTNER_TYPE_UFP: + typec_set_data_role(con->port, TYPEC_HOST); + break; +@@ -530,7 +534,7 @@ static void ucsi_handle_connector_change(struct work_struct *work) + break; + } + +- if (con->status.connected) ++ if (con->status.flags & UCSI_CONSTAT_CONNECTED) + ucsi_register_partner(con); + else + ucsi_unregister_partner(con); +@@ -649,6 +653,7 @@ static int ucsi_role_cmd(struct ucsi_connector *con, u64 command) + static int ucsi_dr_swap(struct typec_port *port, enum typec_data_role role) + { + struct ucsi_connector *con = typec_get_drvdata(port); ++ u8 partner_type; + u64 command; + int ret = 0; + +@@ -659,9 +664,10 @@ static int ucsi_dr_swap(struct typec_port *port, enum typec_data_role role) + goto out_unlock; + } + +- if ((con->status.partner_type == UCSI_CONSTAT_PARTNER_TYPE_DFP && ++ partner_type = UCSI_CONSTAT_PARTNER_TYPE(con->status.flags); ++ if ((partner_type == UCSI_CONSTAT_PARTNER_TYPE_DFP && + role == TYPEC_DEVICE) || +- (con->status.partner_type == UCSI_CONSTAT_PARTNER_TYPE_UFP && ++ (partner_type == UCSI_CONSTAT_PARTNER_TYPE_UFP && + role == TYPEC_HOST)) + goto out_unlock; + +@@ -685,6 +691,7 @@ static int ucsi_dr_swap(struct typec_port *port, enum typec_data_role role) + static int ucsi_pr_swap(struct typec_port *port, enum typec_role role) + { + struct ucsi_connector *con = typec_get_drvdata(port); ++ enum typec_role cur_role; + u64 command; + int ret = 0; + +@@ -695,7 +702,9 @@ static int ucsi_pr_swap(struct typec_port *port, enum typec_role role) + goto out_unlock; + } + +- if (con->status.pwr_dir == role) ++ cur_role = !!(con->status.flags & UCSI_CONSTAT_PWR_DIR); ++ ++ if (cur_role == role) + goto out_unlock; + + command = UCSI_SET_PDR | UCSI_CONNECTOR_NUMBER(con->num); +@@ -712,7 +721,8 @@ static int ucsi_pr_swap(struct typec_port *port, enum typec_role role) + } + + /* Something has gone wrong while swapping the role */ +- if (con->status.pwr_op_mode != UCSI_CONSTAT_PWR_OPMODE_PD) { ++ if (UCSI_CONSTAT_PWR_OPMODE(con->status.flags) != ++ UCSI_CONSTAT_PWR_OPMODE_PD) { + ucsi_reset_connector(con, true); + ret = -EPROTO; + } +@@ -767,11 +777,12 @@ static int ucsi_register_port(struct ucsi *ucsi, int index) + else if (con->cap.op_mode & UCSI_CONCAP_OPMODE_UFP) + cap->data = TYPEC_PORT_UFP; + +- if (con->cap.provider && con->cap.consumer) ++ if ((con->cap.flags & UCSI_CONCAP_FLAG_PROVIDER) && ++ (con->cap.flags & UCSI_CONCAP_FLAG_CONSUMER)) + cap->type = TYPEC_PORT_DRP; +- else if (con->cap.provider) ++ else if (con->cap.flags & UCSI_CONCAP_FLAG_PROVIDER) + cap->type = TYPEC_PORT_SRC; +- else if (con->cap.consumer) ++ else if (con->cap.flags & UCSI_CONCAP_FLAG_CONSUMER) + cap->type = TYPEC_PORT_SNK; + + cap->revision = ucsi->cap.typec_version; +@@ -807,10 +818,7 @@ static int ucsi_register_port(struct ucsi *ucsi, int index) + return 0; + } + +- ucsi_pwr_opmode_change(con); +- typec_set_pwr_role(con->port, con->status.pwr_dir); +- +- switch (con->status.partner_type) { ++ switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) { + case UCSI_CONSTAT_PARTNER_TYPE_UFP: + typec_set_data_role(con->port, TYPEC_HOST); + break; +@@ -822,8 +830,12 @@ static int ucsi_register_port(struct ucsi *ucsi, int index) + } + + /* Check if there is already something connected */ +- if (con->status.connected) ++ if (con->status.flags & UCSI_CONSTAT_CONNECTED) { ++ typec_set_pwr_role(con->port, ++ !!(con->status.flags & UCSI_CONSTAT_PWR_DIR)); ++ ucsi_pwr_opmode_change(con); + ucsi_register_partner(con); ++ } + + if (con->partner) { + ret = ucsi_register_altmodes(con, UCSI_RECIPIENT_SOP); +diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h +index 755c8936bff4..3affd5f11678 100644 +--- a/drivers/usb/typec/ucsi/ucsi.h ++++ b/drivers/usb/typec/ucsi/ucsi.h +@@ -144,8 +144,8 @@ struct ucsi_capability { + #define UCSI_CAP_ATTR_POWER_AC_SUPPLY BIT(8) + #define UCSI_CAP_ATTR_POWER_OTHER BIT(10) + #define UCSI_CAP_ATTR_POWER_VBUS BIT(14) +- u32 num_connectors:8; +- u32 features:24; ++ u8 num_connectors; ++ u8 features; + #define UCSI_CAP_SET_UOM BIT(0) + #define UCSI_CAP_SET_PDM BIT(1) + #define UCSI_CAP_ALT_MODE_DETAILS BIT(2) +@@ -154,8 +154,9 @@ struct ucsi_capability { + #define UCSI_CAP_CABLE_DETAILS BIT(5) + #define UCSI_CAP_EXT_SUPPLY_NOTIFICATIONS BIT(6) + #define UCSI_CAP_PD_RESET BIT(7) ++ u16 reserved_1; + u8 num_alt_modes; +- u8 reserved; ++ u8 reserved_2; + u16 bc_version; + u16 pd_version; + u16 typec_version; +@@ -172,9 +173,9 @@ struct ucsi_connector_capability { + #define UCSI_CONCAP_OPMODE_USB2 BIT(5) + #define UCSI_CONCAP_OPMODE_USB3 BIT(6) + #define UCSI_CONCAP_OPMODE_ALT_MODE BIT(7) +- u8 provider:1; +- u8 consumer:1; +- u8:6; /* reserved */ ++ u8 flags; ++#define UCSI_CONCAP_FLAG_PROVIDER BIT(0) ++#define UCSI_CONCAP_FLAG_CONSUMER BIT(1) + } __packed; + + struct ucsi_altmode { +@@ -186,18 +187,17 @@ struct ucsi_altmode { + struct ucsi_cable_property { + u16 speed_supported; + u8 current_capability; +- u8 vbus_in_cable:1; +- u8 active_cable:1; +- u8 directionality:1; +- u8 plug_type:2; +-#define UCSI_CABLE_PROPERTY_PLUG_TYPE_A 0 +-#define UCSI_CABLE_PROPERTY_PLUG_TYPE_B 1 +-#define UCSI_CABLE_PROPERTY_PLUG_TYPE_C 2 +-#define UCSI_CABLE_PROPERTY_PLUG_OTHER 3 +- u8 mode_support:1; +- u8:2; /* reserved */ +- u8 latency:4; +- u8:4; /* reserved */ ++ u8 flags; ++#define UCSI_CABLE_PROP_FLAG_VBUS_IN_CABLE BIT(0) ++#define UCSI_CABLE_PROP_FLAG_ACTIVE_CABLE BIT(1) ++#define UCSI_CABLE_PROP_FLAG_DIRECTIONALITY BIT(2) ++#define UCSI_CABLE_PROP_FLAG_PLUG_TYPE(_f_) ((_f_) & GENMASK(3, 0)) ++#define UCSI_CABLE_PROPERTY_PLUG_TYPE_A 0 ++#define UCSI_CABLE_PROPERTY_PLUG_TYPE_B 1 ++#define UCSI_CABLE_PROPERTY_PLUG_TYPE_C 2 ++#define UCSI_CABLE_PROPERTY_PLUG_OTHER 3 ++#define UCSI_CABLE_PROP_MODE_SUPPORT BIT(5) ++ u8 latency; + } __packed; + + /* Data structure filled by PPM in response to GET_CONNECTOR_STATUS command. */ +@@ -214,35 +214,36 @@ struct ucsi_connector_status { + #define UCSI_CONSTAT_POWER_DIR_CHANGE BIT(12) + #define UCSI_CONSTAT_CONNECT_CHANGE BIT(14) + #define UCSI_CONSTAT_ERROR BIT(15) +- u16 pwr_op_mode:3; +-#define UCSI_CONSTAT_PWR_OPMODE_NONE 0 +-#define UCSI_CONSTAT_PWR_OPMODE_DEFAULT 1 +-#define UCSI_CONSTAT_PWR_OPMODE_BC 2 +-#define UCSI_CONSTAT_PWR_OPMODE_PD 3 +-#define UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5 4 +-#define UCSI_CONSTAT_PWR_OPMODE_TYPEC3_0 5 +- u16 connected:1; +- u16 pwr_dir:1; +- u16 partner_flags:8; +-#define UCSI_CONSTAT_PARTNER_FLAG_USB BIT(0) +-#define UCSI_CONSTAT_PARTNER_FLAG_ALT_MODE BIT(1) +- u16 partner_type:3; +-#define UCSI_CONSTAT_PARTNER_TYPE_DFP 1 +-#define UCSI_CONSTAT_PARTNER_TYPE_UFP 2 +-#define UCSI_CONSTAT_PARTNER_TYPE_CABLE 3 /* Powered Cable */ +-#define UCSI_CONSTAT_PARTNER_TYPE_CABLE_AND_UFP 4 /* Powered Cable */ +-#define UCSI_CONSTAT_PARTNER_TYPE_DEBUG 5 +-#define UCSI_CONSTAT_PARTNER_TYPE_AUDIO 6 ++ u16 flags; ++#define UCSI_CONSTAT_PWR_OPMODE(_f_) ((_f_) & GENMASK(2, 0)) ++#define UCSI_CONSTAT_PWR_OPMODE_NONE 0 ++#define UCSI_CONSTAT_PWR_OPMODE_DEFAULT 1 ++#define UCSI_CONSTAT_PWR_OPMODE_BC 2 ++#define UCSI_CONSTAT_PWR_OPMODE_PD 3 ++#define UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5 4 ++#define UCSI_CONSTAT_PWR_OPMODE_TYPEC3_0 5 ++#define UCSI_CONSTAT_CONNECTED BIT(3) ++#define UCSI_CONSTAT_PWR_DIR BIT(4) ++#define UCSI_CONSTAT_PARTNER_FLAGS(_f_) ((_f_) & GENMASK(12, 5) >> 5) ++#define UCSI_CONSTAT_PARTNER_FLAG_USB 1 ++#define UCSI_CONSTAT_PARTNER_FLAG_ALT_MODE 2 ++#define UCSI_CONSTAT_PARTNER_TYPE(_f_) ((_f_) & GENMASK(15, 13) >> 13) ++#define UCSI_CONSTAT_PARTNER_TYPE_DFP 1 ++#define UCSI_CONSTAT_PARTNER_TYPE_UFP 2 ++#define UCSI_CONSTAT_PARTNER_TYPE_CABLE 3 /* Powered Cable */ ++#define UCSI_CONSTAT_PARTNER_TYPE_CABLE_AND_UFP 4 /* Powered Cable */ ++#define UCSI_CONSTAT_PARTNER_TYPE_DEBUG 5 ++#define UCSI_CONSTAT_PARTNER_TYPE_AUDIO 6 + u32 request_data_obj; +- u8 bc_status:2; +-#define UCSI_CONSTAT_BC_NOT_CHARGING 0 +-#define UCSI_CONSTAT_BC_NOMINAL_CHARGING 1 +-#define UCSI_CONSTAT_BC_SLOW_CHARGING 2 +-#define UCSI_CONSTAT_BC_TRICKLE_CHARGING 3 +- u8 provider_cap_limit_reason:4; +-#define UCSI_CONSTAT_CAP_PWR_LOWERED 0 +-#define UCSI_CONSTAT_CAP_PWR_BUDGET_LIMIT 1 +- u8:2; /* reserved */ ++ u8 pwr_status; ++#define UCSI_CONSTAT_BC_STATUS(_p_) ((_p_) & GENMASK(2, 0)) ++#define UCSI_CONSTAT_BC_NOT_CHARGING 0 ++#define UCSI_CONSTAT_BC_NOMINAL_CHARGING 1 ++#define UCSI_CONSTAT_BC_SLOW_CHARGING 2 ++#define UCSI_CONSTAT_BC_TRICKLE_CHARGING 3 ++#define UCSI_CONSTAT_PROVIDER_CAP_LIMIT(_p_) ((_p_) & GENMASK(6, 3) >> 3) ++#define UCSI_CONSTAT_CAP_PWR_LOWERED 0 ++#define UCSI_CONSTAT_CAP_PWR_BUDGET_LIMIT 1 + } __packed; + + /* -------------------------------------------------------------------------- */ +-- +2.17.1 + diff --git a/patches/0018-ASoC-Intel-Skylake-Reuse-sst_dsp_new.audio b/patches/0018-ASoC-Intel-Skylake-Reuse-sst_dsp_new.audio new file mode 100644 index 0000000000..ad5473ce31 --- /dev/null +++ b/patches/0018-ASoC-Intel-Skylake-Reuse-sst_dsp_new.audio @@ -0,0 +1,144 @@ +From 3531585cd73ba415a8310788b904c347cc3d5f49 Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Mon, 12 Aug 2019 11:47:24 +0200 +Subject: [PATCH 018/193] ASoC: Intel: Skylake: Reuse sst_dsp_new + +skl_dsp_ctx_init is dumplication of sst_dsp_new and usage of such +bypasses natural DSP framework's flow. Remove it and reuse sst_dsp_new +constructor which invokes sst specific init internally so nothing is +missed. + +Skylake does not even define any sst_ops::init so portion of existing +skl_dsp_ctx_init can be regarded as DEADCODE. + +Signed-off-by: Cezary Rojewski +--- + sound/soc/intel/skylake/bxt-sst.c | 2 +- + sound/soc/intel/skylake/cnl-sst.c | 2 +- + sound/soc/intel/skylake/skl-sst-dsp.c | 28 ------------------------- + sound/soc/intel/skylake/skl-sst-dsp.h | 2 -- + sound/soc/intel/skylake/skl-sst-utils.c | 6 +++++- + sound/soc/intel/skylake/skl-sst.c | 2 +- + 6 files changed, 8 insertions(+), 34 deletions(-) + +diff --git a/sound/soc/intel/skylake/bxt-sst.c b/sound/soc/intel/skylake/bxt-sst.c +index 29b59ce50816..65cbbe4fd57c 100644 +--- a/sound/soc/intel/skylake/bxt-sst.c ++++ b/sound/soc/intel/skylake/bxt-sst.c +@@ -588,7 +588,7 @@ int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, + INIT_DELAYED_WORK(&skl->d0i3.work, bxt_set_dsp_D0i3); + skl->d0i3.state = SKL_DSP_D0I3_NONE; + +- return skl_dsp_acquire_irq(sst); ++ return 0; + } + EXPORT_SYMBOL_GPL(bxt_sst_dsp_init); + +diff --git a/sound/soc/intel/skylake/cnl-sst.c b/sound/soc/intel/skylake/cnl-sst.c +index 5cdf4960617d..c89ae72b7ef4 100644 +--- a/sound/soc/intel/skylake/cnl-sst.c ++++ b/sound/soc/intel/skylake/cnl-sst.c +@@ -459,7 +459,7 @@ int cnl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, + cnl->boot_complete = false; + init_waitqueue_head(&cnl->boot_wait); + +- return skl_dsp_acquire_irq(sst); ++ return 0; + } + EXPORT_SYMBOL_GPL(cnl_sst_dsp_init); + +diff --git a/sound/soc/intel/skylake/skl-sst-dsp.c b/sound/soc/intel/skylake/skl-sst-dsp.c +index 1c4ecbcd7db7..9d8eb1af4798 100644 +--- a/sound/soc/intel/skylake/skl-sst-dsp.c ++++ b/sound/soc/intel/skylake/skl-sst-dsp.c +@@ -418,34 +418,6 @@ int skl_dsp_sleep(struct sst_dsp *ctx) + } + EXPORT_SYMBOL_GPL(skl_dsp_sleep); + +-struct sst_dsp *skl_dsp_ctx_init(struct device *dev, +- struct sst_pdata *pdata, int irq) +-{ +- int ret; +- struct sst_dsp *sst; +- +- sst = devm_kzalloc(dev, sizeof(*sst), GFP_KERNEL); +- if (sst == NULL) +- return NULL; +- +- spin_lock_init(&sst->spinlock); +- mutex_init(&sst->mutex); +- sst->dev = dev; +- sst->pdata = pdata; +- sst->irq = irq; +- sst->ops = pdata->ops; +- sst->thread_context = pdata->dsp; +- +- /* Initialise SST Audio DSP */ +- if (sst->ops->init) { +- ret = sst->ops->init(sst, NULL); +- if (ret < 0) +- return NULL; +- } +- +- return sst; +-} +- + int skl_dsp_acquire_irq(struct sst_dsp *sst) + { + int ret; +diff --git a/sound/soc/intel/skylake/skl-sst-dsp.h b/sound/soc/intel/skylake/skl-sst-dsp.h +index 7f6e853a6f5c..840353cc35df 100644 +--- a/sound/soc/intel/skylake/skl-sst-dsp.h ++++ b/sound/soc/intel/skylake/skl-sst-dsp.h +@@ -209,8 +209,6 @@ int skl_cldma_prepare(struct sst_dsp *ctx); + int skl_cldma_wait_interruptible(struct sst_dsp *ctx); + + void skl_dsp_set_state_locked(struct sst_dsp *ctx, int state); +-struct sst_dsp *skl_dsp_ctx_init(struct device *dev, +- struct sst_pdata *pdata, int irq); + int skl_dsp_acquire_irq(struct sst_dsp *sst); + bool is_skl_dsp_running(struct sst_dsp *ctx); + +diff --git a/sound/soc/intel/skylake/skl-sst-utils.c b/sound/soc/intel/skylake/skl-sst-utils.c +index c7eeba920534..25114257b5b8 100644 +--- a/sound/soc/intel/skylake/skl-sst-utils.c ++++ b/sound/soc/intel/skylake/skl-sst-utils.c +@@ -6,6 +6,7 @@ + */ + + #include ++#include + #include + #include + #include "../common/sst-dsp.h" +@@ -400,10 +401,13 @@ int skl_sst_ctx_init(struct device *dev, int irq, const char *fw_name, + struct skl_dev *skl = *dsp; + struct sst_dsp *sst; + ++ pdata->id = skl->pci->device; ++ pdata->irq = irq; ++ pdata->resindex_dma_base = -1; + skl->dev = dev; + pdata->dsp = skl; + INIT_LIST_HEAD(&skl->module_list); +- skl->dsp = skl_dsp_ctx_init(dev, pdata, irq); ++ skl->dsp = sst_dsp_new(dev, pdata); + if (!skl->dsp) { + dev_err(skl->dev, "%s: no device\n", __func__); + return -ENODEV; +diff --git a/sound/soc/intel/skylake/skl-sst.c b/sound/soc/intel/skylake/skl-sst.c +index 598e76d2a3fc..8545ef58a1ec 100644 +--- a/sound/soc/intel/skylake/skl-sst.c ++++ b/sound/soc/intel/skylake/skl-sst.c +@@ -550,7 +550,7 @@ int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, + + sst->fw_ops = skl_fw_ops; + +- return skl_dsp_acquire_irq(sst); ++ return 0; + } + EXPORT_SYMBOL_GPL(skl_sst_dsp_init); + +-- +2.17.1 + diff --git a/patches/0018-Fix-the-issue-for-tipc-test-case-closer1.trusty b/patches/0018-Fix-the-issue-for-tipc-test-case-closer1.trusty new file mode 100644 index 0000000000..7bc399811f --- /dev/null +++ b/patches/0018-Fix-the-issue-for-tipc-test-case-closer1.trusty @@ -0,0 +1,74 @@ +From f04e758ea0dc7ec60659a886adbf8bedb70c78e6 Mon Sep 17 00:00:00 2001 +From: weideng +Date: Mon, 20 Jun 2016 14:19:52 +0800 +Subject: [PATCH 18/63] Fix the issue for tipc test case closer1 + +If the server channel accept the connection and immediately close +the server channel, the client channel will receive one CONN_RSP +message and then immediately one DISCONN_REQ message. At this time, +channel status will maintain in CONNECTED status for one short time +and dn_wait_for_reply() cannot capture the channel status. This is +the reason why closer1 will fail. This patch will add one pulse +variable to capture channel CONNECTED status. And it can work well +for both UP and SMP mode. + +Change-Id: I4aac5af714daa67d3095093907c0b9f26af4d76c +Signed-off-by: weideng +--- + drivers/trusty/trusty-ipc.c | 11 +++++++++-- + 1 file changed, 9 insertions(+), 2 deletions(-) + +diff --git a/drivers/trusty/trusty-ipc.c b/drivers/trusty/trusty-ipc.c +index 7d66e9f74220..d6765f1d4510 100644 +--- a/drivers/trusty/trusty-ipc.c ++++ b/drivers/trusty/trusty-ipc.c +@@ -38,6 +38,9 @@ + #define REPLY_TIMEOUT 5000 + #define TXBUF_TIMEOUT 15000 + ++#define PULSE_ACTIVE 1 ++#define PULSE_DEACTIVE 0 ++ + #define MAX_SRV_NAME_LEN 256 + #define MAX_DEV_NAME_LEN 32 + +@@ -705,6 +708,7 @@ EXPORT_SYMBOL(tipc_chan_destroy); + /***************************************************************************/ + + struct tipc_dn_chan { ++ int pulse; + int state; + struct mutex lock; /* protects rx_msg_queue list and channel state */ + struct tipc_chan *chan; +@@ -729,9 +733,10 @@ static int dn_wait_for_reply(struct tipc_dn_chan *dn, int timeout) + ret = -ETIMEDOUT; + } else { + /* got reply */ +- if (dn->state == TIPC_CONNECTED) ++ if (dn->pulse == PULSE_ACTIVE) { ++ dn->pulse = PULSE_DEACTIVE; + ret = 0; +- else if (dn->state == TIPC_DISCONNECTED) ++ } else if (dn->state == TIPC_DISCONNECTED) + if (!list_empty(&dn->rx_msg_queue)) + ret = 0; + else +@@ -775,6 +780,7 @@ static void dn_connected(struct tipc_dn_chan *dn) + { + mutex_lock(&dn->lock); + dn->state = TIPC_CONNECTED; ++ dn->pulse = PULSE_ACTIVE; + + /* complete all pending */ + complete(&dn->reply_comp); +@@ -883,6 +889,7 @@ static int tipc_open(struct inode *inode, struct file *filp) + INIT_LIST_HEAD(&dn->rx_msg_queue); + + dn->state = TIPC_DISCONNECTED; ++ dn->pulse = PULSE_DEACTIVE; + + dn->chan = vds_create_channel(vds, &_dn_ops, dn); + if (IS_ERR(dn->chan)) { +-- +2.17.1 + diff --git a/patches/0018-dmaengine-acpi-Provide-consumer-device-to-acpi_dma_xl.lpss b/patches/0018-dmaengine-acpi-Provide-consumer-device-to-acpi_dma_xl.lpss new file mode 100644 index 0000000000..90c4f4409e --- /dev/null +++ b/patches/0018-dmaengine-acpi-Provide-consumer-device-to-acpi_dma_xl.lpss @@ -0,0 +1,52 @@ +From 4c2c26baf1ad231100eba2752db5c68009e83b23 Mon Sep 17 00:00:00 2001 +From: Andy Shevchenko +Date: Mon, 5 Aug 2019 15:50:09 +0300 +Subject: [PATCH 18/40] dmaengine: acpi: Provide consumer device to + ->acpi_dma_xlate() + +In the future ->acpi_dma_xlate() callback function may use the consumer +device pointer to be utilized for DMA crossbar programming. + +As a preparation step provide consumer device pointer to ->acpi_dma_xlate(). + +Signed-off-by: Andy Shevchenko +--- + drivers/dma/acpi-dma.c | 1 + + include/linux/acpi_dma.h | 2 ++ + 2 files changed, 3 insertions(+) + +diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c +index 50ddc8871a5f..f4ada8ff550d 100644 +--- a/drivers/dma/acpi-dma.c ++++ b/drivers/dma/acpi-dma.c +@@ -374,6 +374,7 @@ struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev, + memset(&pdata, 0, sizeof(pdata)); + + /* Initial values for the request line and channel */ ++ dma_spec->consumer = dev; + dma_spec->index = index; + dma_spec->chan_id = -1; + dma_spec->slave_id = -1; +diff --git a/include/linux/acpi_dma.h b/include/linux/acpi_dma.h +index 2caebb8fb158..3b97d0b702af 100644 +--- a/include/linux/acpi_dma.h ++++ b/include/linux/acpi_dma.h +@@ -18,6 +18,7 @@ + + /** + * struct acpi_dma_spec - slave device DMA resources ++ * @consumer: struct device of the DMA resources consumer + * @index: index of FixedDMA() resource + * @chan_id: channel unique id + * @slave_id: request line unique id +@@ -25,6 +26,7 @@ + * function + */ + struct acpi_dma_spec { ++ struct device *consumer; + size_t index; + int chan_id; + int slave_id; +-- +2.17.1 + diff --git a/patches/0018-drm-i915-Add-function-to-set-subslices.drm b/patches/0018-drm-i915-Add-function-to-set-subslices.drm new file mode 100644 index 0000000000..0bed666cb0 --- /dev/null +++ b/patches/0018-drm-i915-Add-function-to-set-subslices.drm @@ -0,0 +1,118 @@ +From 49ab573a6dbc5039962d966d02530c3d25ec6d50 Mon Sep 17 00:00:00 2001 +From: Stuart Summers +Date: Fri, 23 Aug 2019 09:03:02 -0700 +Subject: [PATCH 018/690] drm/i915: Add function to set subslices + +Add a new function to set a set of subslices for a given +slice. + +v2: Fix typo in subslice_mask assignment + +Signed-off-by: Stuart Summers +Reviewed-by: Chris Wilson +Signed-off-by: Chris Wilson +Link: https://patchwork.freedesktop.org/patch/msgid/20190823160307.180813-7-stuart.summers@intel.com +--- + drivers/gpu/drm/i915/gt/intel_sseu.c | 6 ++++++ + drivers/gpu/drm/i915/gt/intel_sseu.h | 3 +++ + drivers/gpu/drm/i915/intel_device_info.c | 18 +++++++++++------- + 3 files changed, 20 insertions(+), 7 deletions(-) + +diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c +index d52686a1afdc..3a5db0dbac72 100644 +--- a/drivers/gpu/drm/i915/gt/intel_sseu.c ++++ b/drivers/gpu/drm/i915/gt/intel_sseu.c +@@ -32,6 +32,12 @@ intel_sseu_subslice_total(const struct sseu_dev_info *sseu) + return total; + } + ++void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice, ++ u8 ss_mask) ++{ ++ sseu->subslice_mask[slice] = ss_mask; ++} ++ + unsigned int + intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice) + { +diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h +index 7f2355ce963d..7f600f50dedb 100644 +--- a/drivers/gpu/drm/i915/gt/intel_sseu.h ++++ b/drivers/gpu/drm/i915/gt/intel_sseu.h +@@ -78,6 +78,9 @@ intel_sseu_subslice_total(const struct sseu_dev_info *sseu); + unsigned int + intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice); + ++void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice, ++ u8 ss_mask); ++ + u32 intel_sseu_make_rpcs(struct drm_i915_private *i915, + const struct intel_sseu *req_sseu); + +diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c +index 52515efe9f4e..1a45728ac712 100644 +--- a/drivers/gpu/drm/i915/intel_device_info.c ++++ b/drivers/gpu/drm/i915/intel_device_info.c +@@ -206,7 +206,10 @@ static void gen11_sseu_info_init(struct drm_i915_private *dev_priv) + int ss; + + sseu->slice_mask |= BIT(s); +- sseu->subslice_mask[s] = (ss_en >> ss_idx) & ss_en_mask; ++ ++ intel_sseu_set_subslices(sseu, s, (ss_en >> ss_idx) & ++ ss_en_mask); ++ + for (ss = 0; ss < sseu->max_subslices; ss++) { + if (sseu->subslice_mask[s] & BIT(ss)) + sseu_set_eus(sseu, s, ss, eu_en); +@@ -274,8 +277,9 @@ static void gen10_sseu_info_init(struct drm_i915_private *dev_priv) + * Slice0 can have up to 3 subslices, but there are only 2 in + * slice1/2. + */ +- sseu->subslice_mask[s] = s == 0 ? subslice_mask_with_eus : +- subslice_mask_with_eus & 0x3; ++ intel_sseu_set_subslices(sseu, s, s == 0 ? ++ subslice_mask_with_eus : ++ subslice_mask_with_eus & 0x3); + } + + sseu->eu_total = compute_eu_total(sseu); +@@ -330,7 +334,7 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv) + sseu_set_eus(sseu, 0, 1, ~disabled_mask); + } + +- sseu->subslice_mask[0] = subslice_mask; ++ intel_sseu_set_subslices(sseu, 0, subslice_mask); + + sseu->eu_total = compute_eu_total(sseu); + +@@ -384,7 +388,7 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv) + /* skip disabled slice */ + continue; + +- sseu->subslice_mask[s] = subslice_mask; ++ intel_sseu_set_subslices(sseu, s, subslice_mask); + + eu_disable = I915_READ(GEN9_EU_DISABLE(s)); + for (ss = 0; ss < sseu->max_subslices; ss++) { +@@ -491,7 +495,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv) + /* skip disabled slice */ + continue; + +- sseu->subslice_mask[s] = subslice_mask; ++ intel_sseu_set_subslices(sseu, s, subslice_mask); + + for (ss = 0; ss < sseu->max_subslices; ss++) { + u8 eu_disabled_mask; +@@ -588,7 +592,7 @@ static void haswell_sseu_info_init(struct drm_i915_private *dev_priv) + sseu->eu_per_subslice); + + for (s = 0; s < sseu->max_slices; s++) { +- sseu->subslice_mask[s] = subslice_mask; ++ intel_sseu_set_subslices(sseu, s, subslice_mask); + + for (ss = 0; ss < sseu->max_subslices; ss++) { + sseu_set_eus(sseu, s, ss, +-- +2.17.1 + diff --git a/patches/0018-hypercall-refine-hypercall-interfaces.acrn b/patches/0018-hypercall-refine-hypercall-interfaces.acrn new file mode 100644 index 0000000000..8a435865ba --- /dev/null +++ b/patches/0018-hypercall-refine-hypercall-interfaces.acrn @@ -0,0 +1,729 @@ +From 961a1dcc0db31a033ff4a27182306f20e4889562 Mon Sep 17 00:00:00 2001 +From: Jason Chen CJ +Date: Fri, 31 Aug 2018 10:58:57 +0800 +Subject: [PATCH 018/150] hypercall: refine hypercall interfaces + +- HC_VM_SET_MEMMAP: remove MAP_UPDATE, refine API structure +- HC_NOTIFY_REQUEST_FINISH: use vcpu_id instead of vcpu_mask +- HC_VM_GPA2HPA: refine API structure +- HC_SET_IOREQ_BUFFER: refine API structure +- IC_XXXX_IRQLINE: refine API structure + +Change-Id: Ie9f6af005160a807335b2f266d7139abc06d8db9 +Tracked-On: 218445 +Signed-off-by: Jason Chen CJ +Signed-off-by: Edwin Zhai +Reviewed-on: +Reviewed-by: Chi, Mingqiang +Reviewed-by: Dong, Eddie +Tested-by: Dong, Eddie +--- + drivers/char/vhm/vhm_dev.c | 5 +- + drivers/vbs/vbs_rng.c | 3 +- + drivers/vhm/vhm_hypercall.c | 4 +- + drivers/vhm/vhm_ioreq.c | 26 ++--- + drivers/vhm/vhm_mm.c | 29 ++--- + include/linux/vhm/acrn_common.h | 181 ++++++++++------------------- + include/linux/vhm/acrn_hv_defs.h | 63 +++++++--- + include/linux/vhm/acrn_vhm_ioreq.h | 6 +- + include/linux/vhm/acrn_vhm_mm.h | 21 +--- + include/linux/vhm/vhm_hypercall.h | 3 +- + include/linux/vhm/vhm_ioctl_defs.h | 5 + + 11 files changed, 152 insertions(+), 194 deletions(-) + +diff --git a/drivers/char/vhm/vhm_dev.c b/drivers/char/vhm/vhm_dev.c +index 6236b524d4b9..22cc7797f664 100644 +--- a/drivers/char/vhm/vhm_dev.c ++++ b/drivers/char/vhm/vhm_dev.c +@@ -287,14 +287,13 @@ static long vhm_dev_ioctl(struct file *filep, + } + + case IC_NOTIFY_REQUEST_FINISH: { +- struct acrn_ioreq_notify notify; ++ struct ioreq_notify notify; + + if (copy_from_user(¬ify, (void *)ioctl_param, + sizeof(notify))) + return -EFAULT; + +- ret = acrn_ioreq_complete_request(notify.client_id, +- notify.vcpu_mask); ++ ret = acrn_ioreq_complete_request(notify.client_id, notify.vcpu); + if (ret < 0) + return -EFAULT; + break; +diff --git a/drivers/vbs/vbs_rng.c b/drivers/vbs/vbs_rng.c +index ef6f8776e71c..f2234e73034d 100644 +--- a/drivers/vbs/vbs_rng.c ++++ b/drivers/vbs/vbs_rng.c +@@ -369,8 +369,7 @@ static int handle_kick(int client_id, int req_cnt) + req->reqs.pio_request.size, + req->reqs.pio_request.value); + req->processed = REQ_STATE_SUCCESS; +- acrn_ioreq_complete_request(client->vhm_client_id, +- 1 << i); ++ acrn_ioreq_complete_request(client->vhm_client_id, i); + } + } + +diff --git a/drivers/vhm/vhm_hypercall.c b/drivers/vhm/vhm_hypercall.c +index 11ca6b86baed..94a95933d51e 100644 +--- a/drivers/vhm/vhm_hypercall.c ++++ b/drivers/vhm/vhm_hypercall.c +@@ -87,9 +87,9 @@ inline long hcall_set_ioreq_buffer(unsigned long vmid, unsigned long buffer) + return acrn_hypercall2(HC_SET_IOREQ_BUFFER, vmid, buffer); + } + +-inline long hcall_notify_req_finish(unsigned long vmid, unsigned long vcpu_mask) ++inline long hcall_notify_req_finish(unsigned long vmid, unsigned long vcpu) + { +- return acrn_hypercall2(HC_NOTIFY_REQUEST_FINISH, vmid, vcpu_mask); ++ return acrn_hypercall2(HC_NOTIFY_REQUEST_FINISH, vmid, vcpu); + } + + inline long hcall_assert_irqline(unsigned long vmid, unsigned long irq) +diff --git a/drivers/vhm/vhm_ioreq.c b/drivers/vhm/vhm_ioreq.c +index 6054e3d00eb2..08826c575780 100644 +--- a/drivers/vhm/vhm_ioreq.c ++++ b/drivers/vhm/vhm_ioreq.c +@@ -67,7 +67,7 @@ + + struct ioreq_range { + struct list_head list; +- enum request_type type; ++ uint32_t type; + long start; + long end; + }; +@@ -130,7 +130,7 @@ static DECLARE_BITMAP(client_bitmap, MAX_CLIENT); + + static void acrn_ioreq_notify_client(struct ioreq_client *client); + +-static inline bool is_range_type(enum request_type type) ++static inline bool is_range_type(uint32_t type) + { + return (type == REQ_MMIO || type == REQ_PORTIO || type == REQ_WP); + } +@@ -335,7 +335,7 @@ static void __attribute__((unused)) dump_iorange(struct ioreq_client *client) + * NOTE: here just add iorange entry directly, no check for the overlap.. + * please client take care of it + */ +-int acrn_ioreq_add_iorange(int client_id, enum request_type type, ++int acrn_ioreq_add_iorange(int client_id, uint32_t type, + long start, long end) + { + struct ioreq_client *client; +@@ -375,7 +375,7 @@ int acrn_ioreq_add_iorange(int client_id, enum request_type type, + return 0; + } + +-int acrn_ioreq_del_iorange(int client_id, enum request_type type, ++int acrn_ioreq_del_iorange(int client_id, uint32_t type, + long start, long end) + { + struct ioreq_client *client; +@@ -617,13 +617,6 @@ static bool req_in_range(struct ioreq_range *range, struct vhm_request *req) + ret = true; + break; + } +- case REQ_MSR: /*TODO: add bitmap for MSR range */ +- case REQ_CPUID: +- case REQ_EXIT: +- { +- ret = true; +- break; +- } + + default: + ret = false; +@@ -707,7 +700,7 @@ static int handle_cf8cfc(struct vhm_vm *vm, struct vhm_request *req, int vcpu) + + if (req_handled) { + req->processed = REQ_STATE_SUCCESS; +- if (hcall_notify_req_finish(vm->vmid, 1 << vcpu) < 0) { ++ if (hcall_notify_req_finish(vm->vmid, vcpu) < 0) { + pr_err("vhm-ioreq: failed to " + "notify request finished !\n"); + return -EFAULT; +@@ -815,7 +808,7 @@ int acrn_ioreq_distribute_request(struct vhm_vm *vm) + return 0; + } + +-int acrn_ioreq_complete_request(int client_id, uint64_t vcpu_mask) ++int acrn_ioreq_complete_request(int client_id, uint64_t vcpu) + { + struct ioreq_client *client; + int ret; +@@ -830,9 +823,8 @@ int acrn_ioreq_complete_request(int client_id, uint64_t vcpu_mask) + return -EINVAL; + } + +- atomic_sub(bitmap_weight((unsigned long *)&vcpu_mask, +- VHM_REQUEST_MAX), &client->req); +- ret = hcall_notify_req_finish(client->vmid, vcpu_mask); ++ atomic_dec(&client->req); ++ ret = hcall_notify_req_finish(client->vmid, vcpu); + if (ret < 0) { + pr_err("vhm-ioreq: failed to notify request finished !\n"); + return -EFAULT; +@@ -887,7 +879,7 @@ int acrn_ioreq_init(struct vhm_vm *vm, unsigned long vma) + vm->req_buf = page_address(page); + vm->pg = page; + +- set_buffer.req_buf = (long) page_to_phys(page); ++ set_buffer.req_buf = page_to_phys(page); + + ret = hcall_set_ioreq_buffer(vm->vmid, virt_to_phys(&set_buffer)); + if (ret < 0) { +diff --git a/drivers/vhm/vhm_mm.c b/drivers/vhm/vhm_mm.c +index 61ebb8c508d2..b475aa91a348 100644 +--- a/drivers/vhm/vhm_mm.c ++++ b/drivers/vhm/vhm_mm.c +@@ -152,13 +152,14 @@ int alloc_guest_memseg(struct vhm_vm *vm, struct vm_memseg *memseg) + } + + static int _mem_set_memmap(unsigned long vmid, unsigned long guest_gpa, +- unsigned long host_gpa, unsigned long len, int prot, int type) ++ unsigned long host_gpa, unsigned long len, ++ unsigned int prot, unsigned int type) + { + struct vm_set_memmap set_memmap; + + set_memmap.type = type; +- set_memmap.foreign_gpa = guest_gpa; +- set_memmap.hvm_gpa = host_gpa; ++ set_memmap.remote_gpa = guest_gpa; ++ set_memmap.vm0_gpa = host_gpa; + set_memmap.length = len; + set_memmap.prot = prot; + +@@ -177,24 +178,24 @@ static int _mem_set_memmap(unsigned long vmid, unsigned long guest_gpa, + } + + int set_mmio_map(unsigned long vmid, unsigned long guest_gpa, +- unsigned long host_gpa, unsigned long len, int prot) ++ unsigned long host_gpa, unsigned long len, unsigned int prot) + { + return _mem_set_memmap(vmid, guest_gpa, host_gpa, len, + prot, MAP_MMIO); + } + + int unset_mmio_map(unsigned long vmid, unsigned long guest_gpa, +- unsigned long host_gpa, unsigned long len, int prot) ++ unsigned long host_gpa, unsigned long len, unsigned int prot) + { + return _mem_set_memmap(vmid, guest_gpa, host_gpa, len, + prot, MAP_UNMAP); + } + +-int update_mem_map(unsigned long vmid, unsigned long guest_gpa, +- unsigned long host_gpa, unsigned long len, int prot) ++int update_mmio_map(unsigned long vmid, unsigned long guest_gpa, ++ unsigned long host_gpa, unsigned long len, unsigned int prot) + { + return _mem_set_memmap(vmid, guest_gpa, host_gpa, len, +- prot, MAP_UPDATE); ++ prot, MAP_MMIO); + } + + int map_guest_memseg(struct vhm_vm *vm, struct vm_memmap *memmap) +@@ -217,18 +218,18 @@ int map_guest_memseg(struct vhm_vm *vm, struct vm_memmap *memmap) + } + seg->prot = memmap->mem.prot; + set_memmap.type = MAP_MEM; +- set_memmap.foreign_gpa = seg->gpa; +- set_memmap.hvm_gpa = seg->base; ++ set_memmap.remote_gpa = seg->gpa; ++ set_memmap.vm0_gpa = seg->base; + set_memmap.length = seg->len; + set_memmap.prot = seg->prot; +- set_memmap.prot |= MMU_MEM_ATTR_WB_CACHE; ++ set_memmap.prot |= MEM_ATTR_WB_CACHE; + } else { + set_memmap.type = MAP_MMIO; +- set_memmap.foreign_gpa = memmap->mmio.gpa; +- set_memmap.hvm_gpa = memmap->mmio.hpa; ++ set_memmap.remote_gpa = memmap->mmio.gpa; ++ set_memmap.vm0_gpa = memmap->mmio.hpa; + set_memmap.length = memmap->mmio.len; + set_memmap.prot = memmap->mmio.prot; +- set_memmap.prot |= MMU_MEM_ATTR_UNCACHED; ++ set_memmap.prot |= MEM_ATTR_UNCACHED; + } + + /* hypercall to notify hv the guest EPT setting*/ +diff --git a/include/linux/vhm/acrn_common.h b/include/linux/vhm/acrn_common.h +index 42e2c53e3a3a..ea6f77c017dc 100644 +--- a/include/linux/vhm/acrn_common.h ++++ b/include/linux/vhm/acrn_common.h +@@ -1,5 +1,5 @@ + /* +- * virtio and hyperviosr service module (VHM): commom.h ++ * common definition + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. +@@ -59,125 +59,89 @@ + #endif + + /* +- * Commmon structures for ACRN/VHM/DM ++ * Common structures for ACRN/VHM/DM + */ + +-/* ISA type +- * inject interrut to both PIC and IOAPIC +- */ +-enum interrupt_type { +- ACRN_INTR_TYPE_ISA, +- ACRN_INTR_TYPE_IOAPIC, +-} __attribute__((aligned(4))); +- + /* + * IO request + */ + #define VHM_REQUEST_MAX 16 + +-enum request_state { +- REQ_STATE_SUCCESS = 1, +- REQ_STATE_PENDING = 0, +- REQ_STATE_PROCESSING = 2, +- REQ_STATE_FAILED = -1, +-} __attribute__((aligned(4))); +- +-enum request_type { +- REQ_MSR, +- REQ_CPUID, +- REQ_PORTIO, +- REQ_MMIO, +- REQ_PCICFG, +- REQ_WP, +- REQ_EXIT, +- REQ_MAX, +-} __attribute__((aligned(4))); ++#define REQ_STATE_PENDING 0 ++#define REQ_STATE_SUCCESS 1 ++#define REQ_STATE_PROCESSING 2 ++#define REQ_STATE_FAILED -1 + +-enum request_direction { +- REQUEST_READ, +- REQUEST_WRITE, +- DIRECTION_MAX, +-} __attribute__((aligned(4))); +- +-/* +- * IRQ type for ptdev +- */ +-enum irq_type { +- IRQ_INTX, +- IRQ_MSI, +- IRQ_MSIX, +-} __attribute__((aligned(4))); +- +-struct msr_request { +- enum request_direction direction; +- long index; +- long value; +-} __attribute__((aligned(8))); ++#define REQ_PORTIO 0 ++#define REQ_MMIO 1 ++#define REQ_PCICFG 2 ++#define REQ_WP 3 + ++#define REQUEST_READ 0 ++#define REQUEST_WRITE 1 + + struct mmio_request { +- enum request_direction direction; +- long address; +- long size; +- long value; ++ uint32_t direction; ++ uint32_t reserved; ++ int64_t address; ++ int64_t size; ++ int64_t value; + } __attribute__((aligned(8))); + +-struct io_request { +- enum request_direction direction; +- long address; +- long size; +- int value; ++struct pio_request { ++ uint32_t direction; ++ uint32_t reserved; ++ int64_t address; ++ int64_t size; ++ int32_t value; + } __attribute__((aligned(8))); + + struct pci_request { +- enum request_direction direction; +- long reserve; /*io_request address*/ +- long size; +- int value; +- int bus; +- int dev; +- int func; +- int reg; ++ uint32_t direction; ++ uint32_t reserved[3];/* need keep same header fields with pio_request */ ++ int64_t size; ++ int32_t value; ++ int32_t bus; ++ int32_t dev; ++ int32_t func; ++ int32_t reg; + } __attribute__((aligned(8))); + + /* vhm_request are 256Bytes aligned */ + struct vhm_request { + /* offset: 0bytes - 63bytes */ + union { +- int exitcode; +- enum request_type type; +- unsigned long rip; +- int reserved0[16]; ++ uint32_t type; ++ int32_t reserved0[16]; + }; + /* offset: 64bytes-127bytes */ + union { +- struct msr_request msr_request; +- struct io_request pio_request; ++ struct pio_request pio_request; + struct pci_request pci_request; + struct mmio_request mmio_request; +- long reserved1[8]; ++ int64_t reserved1[8]; + } reqs; + + /* True: valid req which need VHM to process. + * ACRN write, VHM read only + **/ +- int valid; ++ int32_t valid; + + /* the client which is distributed to handle this request */ +- int client; ++ int32_t client; + + /* 1: VHM had processed and success + * 0: VHM had not yet processed + * -1: VHM failed to process. Invalid request + * VHM write, ACRN read only + **/ +- enum request_state processed; ++ int32_t processed; + } __attribute__((aligned(256))); + + struct vhm_request_buffer { + union { + struct vhm_request req_queue[VHM_REQUEST_MAX]; +- char reserved[4096]; ++ int8_t reserved[4096]; + }; + } __attribute__((aligned(4096))); + +@@ -188,69 +152,52 @@ struct acrn_create_vm { + } __attribute__((aligned(8))); + + struct acrn_create_vcpu { +- int vcpuid; /* IN: vcpu id */ ++ int vcpuid; /* IN: vcpu id */ + int pcpuid; /* IN: pcpu id */ + } __attribute__((aligned(8))); + + struct acrn_set_ioreq_buffer { +- long req_buf; /* IN: gpa of per VM request_buffer*/ ++ uint64_t req_buf; /* IN: gpa of per VM request_buffer*/ + } __attribute__((aligned(8))); + +-struct acrn_ioreq_notify { +- int client_id; +- unsigned long vcpu_mask; +-} __attribute__((aligned(8))); ++/* ++ * intr type ++ * IOAPIC: inject interrupt to IOAPIC ++ * ISA: inject interrupt to both PIC and IOAPIC ++ */ ++#define ACRN_INTR_TYPE_ISA 0 ++#define ACRN_INTR_TYPE_IOAPIC 1 + + /* For ISA, PIC, IOAPIC etc */ + struct acrn_irqline { +- enum interrupt_type intr_type; +- unsigned long pic_irq; /* IN: for ISA type */ +- unsigned long ioapic_irq; /* IN: for IOAPIC type, -1 don't inject */ ++ uint32_t intr_type; ++ uint32_t reserved; ++ uint64_t pic_irq; /* IN: for ISA type */ ++ uint64_t ioapic_irq; /* IN: for IOAPIC type, -1 don't inject */ + } __attribute__((aligned(8))); + + /* For MSI type inject */ + struct acrn_msi_entry { +- unsigned long msi_addr; /* IN: addr[19:12] with dest vcpu id */ +- unsigned long msi_data; /* IN: data[7:0] with vector */ ++ uint64_t msi_addr; /* IN: addr[19:12] with dest vcpu id */ ++ uint64_t msi_data; /* IN: data[7:0] with vector */ + } __attribute__((aligned(8))); + + /* For NMI inject */ + struct acrn_nmi_entry { +- unsigned long vcpuid; /* IN: -1 means vcpu0 */ +-} __attribute__((aligned(8))); +- +-struct acrn_ptdev_irq { +- enum irq_type type; +- unsigned short virt_bdf; /* IN: Device virtual BDF# */ +- unsigned short phys_bdf; /* IN: Device physical BDF# */ +- union { +- struct { +- int virt_pin; /* IN: virtual IOAPIC pin */ +- int phys_pin; /* IN: physical IOAPIC pin */ +- bool pic_pin; /* IN: pin from PIC? */ +- } intx; +- struct { +- int vector_cnt; /* IN: vector count of MSI/MSIX */ +- +- /* IN: physcial address of MSI-X table */ +- unsigned long table_paddr; +- +- /* IN: size of MSI-X table (round up to 4K) */ +- int table_size; +- } msix; +- }; ++ int64_t vcpuid; /* IN: -1 means vcpu0 */ + } __attribute__((aligned(8))); + + struct acrn_vm_pci_msix_remap { +- unsigned short virt_bdf; /* IN: Device virtual BDF# */ +- unsigned short phys_bdf; /* IN: Device physical BDF# */ +- unsigned short msi_ctl; /* IN: PCI MSI/x cap control data */ +- unsigned long msi_addr; /* IN/OUT: msi address to fix */ +- unsigned int msi_data; /* IN/OUT: msi data to fix */ +- int msix; /* IN: 0 - MSI, 1 - MSI-X */ +- int msix_entry_index; /* IN: MSI-X the entry table index */ ++ uint16_t virt_bdf; /* IN: Device virtual BDF# */ ++ uint16_t phys_bdf; /* IN: Device physical BDF# */ ++ uint16_t msi_ctl; /* IN: PCI MSI/x cap control data */ ++ uint16_t reserved; ++ uint64_t msi_addr; /* IN/OUT: msi address to fix */ ++ uint32_t msi_data; /* IN/OUT: msi data to fix */ ++ int32_t msix; /* IN: 0 - MSI, 1 - MSI-X */ ++ int32_t msix_entry_index; /* IN: MSI-X the entry table index */ + /* IN: Vector Control for MSI-X Entry, field defined in MSIX spec */ +- unsigned int vector_ctl; ++ uint32_t vector_ctl; + } __attribute__((aligned(8))); + + #endif /* ACRN_COMMON_H */ +diff --git a/include/linux/vhm/acrn_hv_defs.h b/include/linux/vhm/acrn_hv_defs.h +index eeac0e9b4e76..fa32243a6407 100644 +--- a/include/linux/vhm/acrn_hv_defs.h ++++ b/include/linux/vhm/acrn_hv_defs.h +@@ -1,5 +1,5 @@ + /* +- * virtio and hyperviosr service module (VHM): hypercall header ++ * hypercall definition + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. +@@ -53,12 +53,12 @@ + #define ACRN_HV_DEFS_H + + /* +- * Commmon structures for ACRN/VHM/DM ++ * Common structures for ACRN/VHM/DM + */ + #include "acrn_common.h" + + /* +- * Commmon structures for HV/VHM ++ * Common structures for HV/VHM + */ + + #define _HC_ID(x, y) (((x)<<24)|(y)) +@@ -101,35 +101,59 @@ + #define HC_SET_PTDEV_INTR_INFO _HC_ID(HC_ID, HC_ID_PCI_BASE + 0x03) + #define HC_RESET_PTDEV_INTR_INFO _HC_ID(HC_ID, HC_ID_PCI_BASE + 0x04) + ++/* TRACE */ ++#define HC_ID_TRACE_BASE 0x600UL ++#define HC_ACRN_SBUF_SETUP _HC_ID(HC_ID, HC_ID_TRACE_BASE + 0x00) ++ + #define ACRN_DOM0_VMID (0UL) + #define ACRN_INVALID_VMID (-1UL) + #define ACRN_INVALID_HPA (-1UL) + +-enum vm_memmap_type { +- MAP_MEM = 0, +- MAP_MMIO, +- MAP_UNMAP, +- MAP_UPDATE, +-}; ++/* Generic memory attributes */ ++#define MEM_ATTR_READ 0x00000001 ++#define MEM_ATTR_WRITE 0x00000002 ++#define MEM_ATTR_EXECUTE 0x00000004 ++#define MEM_ATTR_USER 0x00000008 ++#define MEM_ATTR_WB_CACHE 0x00000040 ++#define MEM_ATTR_WT_CACHE 0x00000080 ++#define MEM_ATTR_UNCACHED 0x00000100 ++#define MEM_ATTR_WC 0x00000200 ++#define MEM_ATTR_WP 0x00000400 ++ ++#define MEM_ATTR_ALL 0x00000007 ++#define MEM_ATTR_WRITE_PROT 0x00000005 ++#define MEM_ATTR_ALL_WB 0x00000047 ++#define MEM_ATTR_ALL_WC 0x00000207 + + struct vm_set_memmap { +- enum vm_memmap_type type; ++#define MAP_MEM 0 ++#define MAP_MMIO 1 ++#define MAP_UNMAP 2 ++ uint32_t type; ++ uint32_t reserved; ++ + /* IN: beginning guest GPA to map */ +- unsigned long foreign_gpa; ++ uint64_t remote_gpa; + + /* IN: VM0's GPA which foreign gpa will be mapped to */ +- unsigned long hvm_gpa; ++ uint64_t vm0_gpa; + + /* IN: length of the range */ +- unsigned long length; ++ uint64_t length; + +- /* IN: not used right now */ +- int prot; ++ /* IN: mem attr */ ++ uint32_t prot; ++} __attribute__((aligned(8))); ++ ++struct sbuf_setup_param { ++ uint32_t pcpu_id; ++ uint32_t sbuf_id; ++ uint64_t gpa; + } __attribute__((aligned(8))); + + struct vm_gpa2hpa { +- unsigned long gpa; /* IN: gpa to translation */ +- unsigned long hpa; /* OUT: -1 means invalid gpa */ ++ uint64_t gpa; /* IN: gpa to translation */ ++ uint64_t hpa; /* OUT: -1 means invalid gpa */ + } __attribute__((aligned(8))); + + struct hc_ptdev_irq { +@@ -152,4 +176,9 @@ struct hc_ptdev_irq { + }; + } __attribute__((aligned(8))); + ++struct hc_api_version { ++ uint32_t major_version; ++ uint32_t minor_version; ++} __attribute__((aligned(8))); ++ + #endif /* ACRN_HV_DEFS_H */ +diff --git a/include/linux/vhm/acrn_vhm_ioreq.h b/include/linux/vhm/acrn_vhm_ioreq.h +index 0daf46dcf9f7..fcec2c1e2eac 100644 +--- a/include/linux/vhm/acrn_vhm_ioreq.h ++++ b/include/linux/vhm/acrn_vhm_ioreq.h +@@ -63,16 +63,16 @@ int acrn_ioreq_create_client(unsigned long vmid, ioreq_handler_t handler, + char *name); + void acrn_ioreq_destroy_client(int client_id); + +-int acrn_ioreq_add_iorange(int client_id, enum request_type type, ++int acrn_ioreq_add_iorange(int client_id, uint32_t type, + long start, long end); +-int acrn_ioreq_del_iorange(int client_id, enum request_type type, ++int acrn_ioreq_del_iorange(int client_id, uint32_t type, + long start, long end); + + struct vhm_request *acrn_ioreq_get_reqbuf(int client_id); + int acrn_ioreq_attach_client(int client_id, bool check_kthread_stop); + + int acrn_ioreq_distribute_request(struct vhm_vm *vm); +-int acrn_ioreq_complete_request(int client_id, uint64_t vcpu_mask); ++int acrn_ioreq_complete_request(int client_id, uint64_t vcpu); + + void acrn_ioreq_intercept_bdf(int client_id, int bus, int dev, int func); + void acrn_ioreq_unintercept_bdf(int client_id); +diff --git a/include/linux/vhm/acrn_vhm_mm.h b/include/linux/vhm/acrn_vhm_mm.h +index 1af6fd3aa11b..f0401ac6a942 100644 +--- a/include/linux/vhm/acrn_vhm_mm.h ++++ b/include/linux/vhm/acrn_vhm_mm.h +@@ -57,19 +57,6 @@ + #include + #include + +-#define MMU_MEM_ATTR_READ 0x00000001 +-#define MMU_MEM_ATTR_WRITE 0x00000002 +-#define MMU_MEM_ATTR_EXECUTE 0x00000004 +-#define MMU_MEM_ATTR_WB_CACHE 0x00000040 +-#define MMU_MEM_ATTR_WT_CACHE 0x00000080 +-#define MMU_MEM_ATTR_UNCACHED 0x00000100 +-#define MMU_MEM_ATTR_WC 0x00000200 +- +-#define MMU_MEM_ATTR_ALL 0x00000007 +-#define MMU_MEM_ATTR_WP 0x00000005 +-#define MMU_MEM_ATTR_ALL_WB 0x00000047 +-#define MMU_MEM_ATTR_ALL_WC 0x00000207 +- + /* 1:1 mapping for service OS */ + static inline unsigned long acrn_hpa2gpa(unsigned long hpa) + { +@@ -79,11 +66,11 @@ static inline unsigned long acrn_hpa2gpa(unsigned long hpa) + void *map_guest_phys(unsigned long vmid, u64 uos_phys, size_t size); + int unmap_guest_phys(unsigned long vmid, u64 uos_phys); + int set_mmio_map(unsigned long vmid, unsigned long guest_gpa, +- unsigned long host_gpa, unsigned long len, int prot); ++ unsigned long host_gpa, unsigned long len, unsigned int prot); + int unset_mmio_map(unsigned long vmid, unsigned long guest_gpa, +- unsigned long host_gpa, unsigned long len, int prot); +-int update_mem_map(unsigned long vmid, unsigned long guest_gpa, +- unsigned long host_gpa, unsigned long len, int prot); ++ unsigned long host_gpa, unsigned long len, unsigned int prot); ++int update_mmio_map(unsigned long vmid, unsigned long guest_gpa, ++ unsigned long host_gpa, unsigned long len, unsigned int prot); + + int vhm_dev_mmap(struct file *file, struct vm_area_struct *vma); + +diff --git a/include/linux/vhm/vhm_hypercall.h b/include/linux/vhm/vhm_hypercall.h +index 35bb48ae6cd3..5447e951bf4b 100644 +--- a/include/linux/vhm/vhm_hypercall.h ++++ b/include/linux/vhm/vhm_hypercall.h +@@ -146,8 +146,7 @@ inline long hcall_set_memmap(unsigned long vmid, + unsigned long memmap); + inline long hcall_set_ioreq_buffer(unsigned long vmid, + unsigned long buffer); +-inline long hcall_notify_req_finish(unsigned long vmid, +- unsigned long vcpu_mask); ++inline long hcall_notify_req_finish(unsigned long vmid, unsigned long vcpu); + inline long hcall_assert_irqline(unsigned long vmid, unsigned long irq); + inline long hcall_deassert_irqline(unsigned long vmid, unsigned long irq); + inline long hcall_pulse_irqline(unsigned long vmid, unsigned long irq); +diff --git a/include/linux/vhm/vhm_ioctl_defs.h b/include/linux/vhm/vhm_ioctl_defs.h +index df07e3c93467..79d91a858226 100644 +--- a/include/linux/vhm/vhm_ioctl_defs.h ++++ b/include/linux/vhm/vhm_ioctl_defs.h +@@ -156,4 +156,9 @@ struct ic_ptdev_irq { + }; + }; + ++struct ioreq_notify { ++ int32_t client_id; ++ uint32_t vcpu; ++}; ++ + #endif /* VHM_IOCTL_DEFS_H */ +-- +2.17.1 + diff --git a/patches/0018-net-stmmac-add-dwxpcs-boardinfo-for-mdio_devi.connectivity b/patches/0018-net-stmmac-add-dwxpcs-boardinfo-for-mdio_devi.connectivity new file mode 100644 index 0000000000..3c10297a28 --- /dev/null +++ b/patches/0018-net-stmmac-add-dwxpcs-boardinfo-for-mdio_devi.connectivity @@ -0,0 +1,225 @@ +From a4f3aa4bf616be109e24170b21b0790fc491dcca Mon Sep 17 00:00:00 2001 +From: Ong Boon Leong +Date: Mon, 8 Jul 2019 22:50:56 +0800 +Subject: [PATCH 018/108] net: stmmac: add dwxpcs boardinfo for mdio_device + registration + +For EHL & TGL Ethernet PCS, the mdio bus address is the same across all +TSN controller instances. External PHY is using default mdio bus address of +0x0. As Ethernet DW PCS is only applicable for SGMII interface, we only +register setup_intel_mgbe_phy_conv() for all TSN controller with SGMII +interface only. + +Also introduce callback for remove mdio_device for unloading driver. + +Signed-off-by: Ong Boon Leong +Signed-off-by: Voon Weifeng +--- + drivers/net/ethernet/stmicro/stmmac/Kconfig | 1 + + drivers/net/ethernet/stmicro/stmmac/stmmac.h | 2 + + .../net/ethernet/stmicro/stmmac/stmmac_main.c | 27 +++++++++++ + .../net/ethernet/stmicro/stmmac/stmmac_pci.c | 46 ++++++++++++++++++- + include/linux/stmmac.h | 4 ++ + 5 files changed, 79 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig +index 338e25a6374e..3425d37b6ae9 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig ++++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig +@@ -201,6 +201,7 @@ config STMMAC_PCI + tristate "STMMAC PCI bus support" + depends on STMMAC_ETH && PCI + depends on COMMON_CLK ++ select DWXPCS + ---help--- + This selects the platform specific bus support for the stmmac driver. + This driver was tested on XLINX XC2V3000 FF1152AMT0221 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h +index d993fc7e82c3..47fc750585a8 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h +@@ -29,6 +29,7 @@ struct stmmac_resources { + int wol_irq; + int lpi_irq; + int irq; ++ int phy_conv_irq; + }; + + struct stmmac_tx_info { +@@ -213,6 +214,7 @@ struct stmmac_priv { + void __iomem *mmcaddr; + void __iomem *ptpaddr; + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; ++ int phy_conv_irq; + + #ifdef CONFIG_DEBUG_FS + struct dentry *dbgfs_dir; +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index ff93b270ef47..2c819c59d190 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -2740,11 +2740,25 @@ static int stmmac_open(struct net_device *dev) + } + } + ++ /* Start phy converter after MDIO bus IRQ handling is up */ ++ if (priv->plat->setup_phy_conv) { ++ ret = priv->plat->setup_phy_conv(priv->mii, priv->phy_conv_irq, ++ priv->plat->phy_addr); ++ ++ if (ret < 0) { ++ netdev_err(priv->dev, ++ "%s: ERROR: setup phy conv (error: %d)\n", ++ __func__, ret); ++ goto phy_conv_error; ++ } ++ } ++ + stmmac_enable_all_queues(priv); + stmmac_start_all_queues(priv); + + return 0; + ++phy_conv_error: + lpiirq_error: + if (priv->wol_irq != dev->irq) + free_irq(priv->wol_irq, dev); +@@ -2774,6 +2788,7 @@ static int stmmac_release(struct net_device *dev) + { + struct stmmac_priv *priv = netdev_priv(dev); + u32 chan; ++ int ret; + + if (priv->eee_enabled) + del_timer_sync(&priv->eee_ctrl_timer); +@@ -2796,6 +2811,17 @@ static int stmmac_release(struct net_device *dev) + if (priv->lpi_irq > 0) + free_irq(priv->lpi_irq, dev); + ++ /* Start phy converter after MDIO bus IRQ handling is up */ ++ if (priv->plat->remove_phy_conv) { ++ ret = priv->plat->remove_phy_conv(priv->mii); ++ if (ret < 0) { ++ netdev_err(priv->dev, ++ "%s: ERROR: remove phy conv (error: %d)\n", ++ __func__, ret); ++ return 0; ++ } ++ } ++ + /* Stop TX/RX DMA and clear the descriptors */ + stmmac_stop_all_dma(priv); + +@@ -4452,6 +4478,7 @@ int stmmac_dvr_probe(struct device *device, + priv->dev->irq = res->irq; + priv->wol_irq = res->wol_irq; + priv->lpi_irq = res->lpi_irq; ++ priv->phy_conv_irq = res->phy_conv_irq; + + if (!IS_ERR_OR_NULL(res->mac)) + memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN); +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +index 92d090a17afd..c5626c989a87 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +@@ -10,9 +10,10 @@ + *******************************************************************************/ + + #include ++#include + #include + #include +- ++#include + #include "stmmac.h" + + /* +@@ -109,6 +110,43 @@ static const struct stmmac_pci_info stmmac_pci_info = { + .setup = stmmac_default_data, + }; + ++static struct dwxpcs_platform_data intel_mgbe_pdata = { ++ .mode = DWXPCS_MODE_SGMII_AN, ++}; ++ ++static struct mdio_board_info intel_mgbe_bdinfo = { ++ .bus_id = "stmmac-1", ++ .modalias = "dwxpcs", ++ .mdio_addr = 0x16, ++ .platform_data = &intel_mgbe_pdata, ++}; ++ ++static int setup_intel_mgbe_phy_conv(struct mii_bus *bus, int irq, ++ int phy_addr) ++{ ++ struct dwxpcs_platform_data *pdata = &intel_mgbe_pdata; ++ ++ pdata->irq = irq; ++ pdata->ext_phy_addr = phy_addr; ++ ++ return mdiobus_create_device(bus, &intel_mgbe_bdinfo); ++} ++ ++static int remove_intel_mgbe_phy_conv(struct mii_bus *bus) ++{ ++ struct mdio_board_info *bdinfo = &intel_mgbe_bdinfo; ++ struct mdio_device *mdiodev; ++ ++ mdiodev = mdiobus_get_mdio_device(bus, bdinfo->mdio_addr); ++ ++ if (!mdiodev) ++ return -1; ++ ++ mdio_device_remove(mdiodev); ++ ++ return 0; ++} ++ + static int intel_mgbe_common_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) + { +@@ -198,6 +236,11 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, + /* Set the maxmtu to a default of JUMBO_LEN */ + plat->maxmtu = JUMBO_LEN; + ++ if (plat->phy_interface == PHY_INTERFACE_MODE_SGMII) { ++ plat->setup_phy_conv = setup_intel_mgbe_phy_conv; ++ plat->remove_phy_conv = remove_intel_mgbe_phy_conv; ++ } ++ + return 0; + } + +@@ -573,6 +616,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev, + res.addr = pcim_iomap_table(pdev)[i]; + res.wol_irq = pdev->irq; + res.irq = pdev->irq; ++ res.phy_conv_irq = res.irq; + + return stmmac_dvr_probe(&pdev->dev, plat, &res); + } +diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h +index 848dbe22d648..0a99e79fd991 100644 +--- a/include/linux/stmmac.h ++++ b/include/linux/stmmac.h +@@ -12,6 +12,7 @@ + #ifndef __STMMAC_PLATFORM_DATA + #define __STMMAC_PLATFORM_DATA + ++#include + #include + + #define MTL_MAX_RX_QUEUES 8 +@@ -164,6 +165,9 @@ struct plat_stmmacenet_data { + int (*init)(struct platform_device *pdev, void *priv); + void (*exit)(struct platform_device *pdev, void *priv); + struct mac_device_info *(*setup)(void *priv); ++ int (*setup_phy_conv)(struct mii_bus *bus, int irq, ++ int phy_addr); ++ int (*remove_phy_conv)(struct mii_bus *bus); + void *bsp_priv; + struct clk *stmmac_clk; + struct clk *pclk; +-- +2.17.1 + diff --git a/patches/0018-nvme-connect-to-rpmb-layer.security b/patches/0018-nvme-connect-to-rpmb-layer.security new file mode 100644 index 0000000000..d00c9bf285 --- /dev/null +++ b/patches/0018-nvme-connect-to-rpmb-layer.security @@ -0,0 +1,317 @@ +From b2867f29203ef73998807a4b522a94348e41dddc Mon Sep 17 00:00:00 2001 +From: Tomas Winkler +Date: Wed, 25 Apr 2018 19:07:06 +0300 +Subject: [PATCH 18/65] nvme: connect to rpmb layer + +This patch covers rpmb storage operation +as defined in NVMe spec 1.3a in section 8.10. +It only covers standard RPMB storage API, the device +configuration is not covered. + +Change-Id: I35c9cc7aeec5a08041b9986d60fc9ee55c66dda7 +Signed-off-by: Tomas Winkler +--- + drivers/nvme/host/Kconfig | 1 + + drivers/nvme/host/Makefile | 1 + + drivers/nvme/host/core.c | 51 ++++++++++++---- + drivers/nvme/host/nvme.h | 9 +++ + drivers/nvme/host/pci.c | 4 ++ + drivers/nvme/host/rpmb.c | 119 +++++++++++++++++++++++++++++++++++++ + 6 files changed, 175 insertions(+), 10 deletions(-) + create mode 100644 drivers/nvme/host/rpmb.c + +diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig +index 2b36f052bfb9..ea59a1638113 100644 +--- a/drivers/nvme/host/Kconfig ++++ b/drivers/nvme/host/Kconfig +@@ -6,6 +6,7 @@ config BLK_DEV_NVME + tristate "NVM Express block device" + depends on PCI && BLOCK + select NVME_CORE ++ select RPMB + ---help--- + The NVM Express driver is for solid state drives directly + connected to the PCI or PCI Express bus. If you know you +diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile +index 8a4b671c5f0c..3b7f99da2c9a 100644 +--- a/drivers/nvme/host/Makefile ++++ b/drivers/nvme/host/Makefile +@@ -16,6 +16,7 @@ nvme-core-$(CONFIG_NVM) += lightnvm.o + nvme-core-$(CONFIG_FAULT_INJECTION_DEBUG_FS) += fault_inject.o + + nvme-y += pci.o ++nvme-y += rpmb.o + + nvme-fabrics-y += fabrics.o + +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c +index fa7ba09dca77..9b7b637e7830 100644 +--- a/drivers/nvme/host/core.c ++++ b/drivers/nvme/host/core.c +@@ -3,7 +3,6 @@ + * NVM Express device driver + * Copyright (c) 2011-2014, Intel Corporation. + */ +- + #include + #include + #include +@@ -1990,25 +1989,54 @@ static const struct pr_ops nvme_pr_ops = { + .pr_clear = nvme_pr_clear, + }; + +-#ifdef CONFIG_BLK_SED_OPAL +-int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, +- bool send) ++int nvme_sec_send(struct nvme_ctrl *ctrl, u8 nssf, u16 spsp, u8 secp, ++ void *buffer, size_t len) + { +- struct nvme_ctrl *ctrl = data; + struct nvme_command cmd; + ++ dev_dbg(ctrl->device, "%s target = %hhu SPSP = %hu SECP = %hhX len=%zd\n", ++ __func__, nssf, spsp, secp, len); ++ + memset(&cmd, 0, sizeof(cmd)); +- if (send) +- cmd.common.opcode = nvme_admin_security_send; +- else +- cmd.common.opcode = nvme_admin_security_recv; ++ cmd.common.opcode = nvme_admin_security_send; + cmd.common.nsid = 0; +- cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8); ++ cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8 | nssf); + cmd.common.cdw11 = cpu_to_le32(len); + + return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, + ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0, false); + } ++EXPORT_SYMBOL_GPL(nvme_sec_send); ++ ++int nvme_sec_recv(struct nvme_ctrl *ctrl, u8 nssf, u16 spsp, u8 secp, ++ void *buffer, size_t len) ++{ ++ struct nvme_command cmd; ++ ++ dev_dbg(ctrl->device, "%s target = %hhu SPSP = %hu SECP = %hhX len=%zd\n", ++ __func__, nssf, spsp, secp, len); ++ ++ memset(&cmd, 0, sizeof(cmd)); ++ cmd.common.opcode = nvme_admin_security_recv; ++ cmd.common.nsid = 0; ++ cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8 | nssf); ++ cmd.common.cdw11 = cpu_to_le32(len); ++ return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, ++ ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0, false); ++} ++EXPORT_SYMBOL_GPL(nvme_sec_recv); ++ ++#ifdef CONFIG_BLK_SED_OPAL ++int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, ++ bool send) ++{ ++ struct nvme_ctrl *ctrl = data; ++ ++ if (send) ++ return nvme_sec_send(ctrl, 0, spsp, secp, buffer, len); ++ else ++ return nvme_sec_recv(ctrl, 0, spsp, secp, buffer, len); ++} + EXPORT_SYMBOL_GPL(nvme_sec_submit); + #endif /* CONFIG_BLK_SED_OPAL */ + +@@ -2868,7 +2896,10 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) + ctrl->hmmaxd = le16_to_cpu(id->hmmaxd); + } + ++ ctrl->rpmbs = le32_to_cpu(id->rpmbs); ++ + ret = nvme_mpath_init(ctrl, id); ++ + kfree(id); + + if (ret < 0) +diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h +index 22e8401352c2..56dfb1f6e79c 100644 +--- a/drivers/nvme/host/nvme.h ++++ b/drivers/nvme/host/nvme.h +@@ -13,6 +13,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -205,6 +206,7 @@ struct nvme_ctrl { + struct list_head subsys_entry; + + struct opal_dev *opal_dev; ++ struct rpmb_dev *rdev; + + char name[12]; + u16 cntlid; +@@ -234,6 +236,7 @@ struct nvme_ctrl { + u32 oaes; + u32 aen_result; + u32 ctratt; ++ u32 rpmbs; + unsigned int shutdown_timeout; + unsigned int kato; + bool subsystem; +@@ -461,6 +464,12 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl); + void nvme_stop_ctrl(struct nvme_ctrl *ctrl); + void nvme_put_ctrl(struct nvme_ctrl *ctrl); + int nvme_init_identify(struct nvme_ctrl *ctrl); ++int nvme_sec_send(struct nvme_ctrl *ctrl, u8 nssf, u16 spsp, u8 secp, ++ void *buffer, size_t len); ++int nvme_sec_recv(struct nvme_ctrl *ctrl, u8 nssf, u16 spsp, u8 secp, ++ void *buffer, size_t len); ++int nvme_init_rpmb(struct nvme_ctrl *ctrl); ++void nvme_exit_rpmb(struct nvme_ctrl *ctrl); + + void nvme_remove_namespaces(struct nvme_ctrl *ctrl); + +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c +index 869f462e6b6e..c6160563b065 100644 +--- a/drivers/nvme/host/pci.c ++++ b/drivers/nvme/host/pci.c +@@ -2594,6 +2594,10 @@ static void nvme_reset_work(struct work_struct *work) + if (result) + goto out; + ++ result = nvme_init_rpmb(&dev->ctrl); ++ if (result < 0) ++ goto out; ++ + if (dev->ctrl.oacs & NVME_CTRL_OACS_SEC_SUPP) { + if (!dev->ctrl.opal_dev) + dev->ctrl.opal_dev = +diff --git a/drivers/nvme/host/rpmb.c b/drivers/nvme/host/rpmb.c +new file mode 100644 +index 000000000000..b43e04287628 +--- /dev/null ++++ b/drivers/nvme/host/rpmb.c +@@ -0,0 +1,119 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright(c) 2018-2019 Intel Corporation. ++ */ ++#include ++#include "nvme.h" ++#define NVME_SECP_RPMB 0xEA /* Security Protocol EAh is assigned ++ * for NVMe use (refer to ACS-4) ++ */ ++#define NVME_SPSP_RPMB 0x0001 /* RPMB Target */ ++static int nvme_rpmb_cmd_seq(struct device *dev, u8 target, ++ struct rpmb_cmd *cmds, u32 ncmds) ++{ ++ struct nvme_ctrl *ctrl; ++ struct rpmb_cmd *cmd; ++ u32 size; ++ int ret; ++ int i; ++ ++ ctrl = dev_get_drvdata(dev); ++ ++ for (ret = 0, i = 0; i < ncmds && !ret; i++) { ++ cmd = &cmds[i]; ++ size = rpmb_ioc_frames_len_nvme(cmd->nframes); ++ if (cmd->flags & RPMB_F_WRITE) ++ ret = nvme_sec_send(ctrl, target, ++ NVME_SPSP_RPMB, NVME_SECP_RPMB, ++ cmd->frames, size); ++ else ++ ret = nvme_sec_recv(ctrl, target, ++ NVME_SPSP_RPMB, NVME_SECP_RPMB, ++ cmd->frames, size); ++ } ++ ++ return ret; ++} ++ ++static int nvme_rpmb_get_capacity(struct device *dev, u8 target) ++{ ++ struct nvme_ctrl *ctrl; ++ ++ ctrl = dev_get_drvdata(dev); ++ ++ return ((ctrl->rpmbs >> 16) & 0xFF) + 1; ++} ++ ++static struct rpmb_ops nvme_rpmb_dev_ops = { ++ .cmd_seq = nvme_rpmb_cmd_seq, ++ .get_capacity = nvme_rpmb_get_capacity, ++ .type = RPMB_TYPE_NVME, ++}; ++ ++static void nvme_rpmb_set_cap(struct nvme_ctrl *ctrl, ++ struct rpmb_ops *ops) ++{ ++ ops->wr_cnt_max = ((ctrl->rpmbs >> 24) & 0xFF) + 1; ++ ops->rd_cnt_max = ops->wr_cnt_max; ++ ops->block_size = 2; /* 1 sector == 2 half sectors */ ++ ops->auth_method = (ctrl->rpmbs >> 3) & 0x3; ++} ++ ++static void nvme_rpmb_add(struct nvme_ctrl *ctrl) ++{ ++ struct rpmb_dev *rdev; ++ int ndevs = ctrl->rpmbs & 0x7; ++ int i; ++ ++ nvme_rpmb_set_cap(ctrl, &nvme_rpmb_dev_ops); ++ ++ /* Add RPMB partitions */ ++ for (i = 0; i < ndevs; i++) { ++ rdev = rpmb_dev_register(ctrl->device, i, &nvme_rpmb_dev_ops); ++ if (IS_ERR(rdev)) { ++ dev_warn(ctrl->device, "%s: cannot register to rpmb %ld\n", ++ dev_name(ctrl->device), PTR_ERR(rdev)); ++ } ++ dev_set_drvdata(&rdev->dev, ctrl); ++ } ++} ++ ++static void nvme_rpmb_remove(struct nvme_ctrl *ctrl) ++{ ++ int ndevs = ctrl->rpmbs & 0x7; ++ int i; ++ ++ /* FIXME: target */ ++ for (i = 0; i < ndevs; i++) ++ rpmb_dev_unregister_by_device(ctrl->device, i); ++} ++ ++int nvme_init_rpmb(struct nvme_ctrl *ctrl) ++{ ++ dev_err(ctrl->device, "RPMBS %X\n", ctrl->rpmbs); ++ ++ if ((ctrl->rpmbs & 0x7) == 0x0) { ++ dev_err(ctrl->device, "RPMBS No partitions\n"); ++ return 0; ++ } ++ ++ dev_err(ctrl->device, "RPMBS Number of partitions %d\n", ++ ctrl->rpmbs & 0x7); ++ dev_err(ctrl->device, "RPMBS Authentication Method: %d\n", ++ (ctrl->rpmbs >> 3) & 0x3); ++ dev_err(ctrl->device, "RPMBS Total Size: %d %dK", ++ (ctrl->rpmbs >> 16) & 0xFF, ++ (((ctrl->rpmbs >> 16) & 0xFF) + 1) * 128); ++ dev_err(ctrl->device, "RPMBS Access Size: %d %dB", ++ (ctrl->rpmbs >> 24) & 0xFF, ++ (((ctrl->rpmbs >> 24) & 0xFF) + 1) * 512); ++ ++ nvme_rpmb_add(ctrl); ++ ++ return 0; ++} ++ ++void nvme_exit_rpmb(struct nvme_ctrl *ctrl) ++{ ++ nvme_rpmb_remove(ctrl); ++} +-- +2.17.1 + diff --git a/patches/0018-platform-x86-SoCWatch-build-issue-with-5.2-ke.sep-socwatch b/patches/0018-platform-x86-SoCWatch-build-issue-with-5.2-ke.sep-socwatch new file mode 100644 index 0000000000..1c08fd9fb8 --- /dev/null +++ b/patches/0018-platform-x86-SoCWatch-build-issue-with-5.2-ke.sep-socwatch @@ -0,0 +1,33 @@ +From 60e52dadf169882239ac3b875ff1a77afc1c2aff Mon Sep 17 00:00:00 2001 +From: Faycal Benmlih +Date: Thu, 23 May 2019 06:49:32 -0500 +Subject: [PATCH 18/27] platform/x86: SoCWatch build issue with 5.2 kernel + +In kernel version 5.2, cpufreq_freqs structure now has a pointer +to a cpufreq_policy structure in the place of cpu. +cpu is grabbed from the policy. + +Signed-off-by: Faycal Benmlih +--- + drivers/platform/x86/socwatch/sw_trace_notifier_provider.c | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/drivers/platform/x86/socwatch/sw_trace_notifier_provider.c b/drivers/platform/x86/socwatch/sw_trace_notifier_provider.c +index 2bba11072985..e482a628d3ab 100644 +--- a/drivers/platform/x86/socwatch/sw_trace_notifier_provider.c ++++ b/drivers/platform/x86/socwatch/sw_trace_notifier_provider.c +@@ -1756,7 +1756,11 @@ static int sw_probe_cpufreq_notifier_i(struct notifier_block *block, + { + struct cpufreq_freqs *freqs = data; + static struct sw_trace_notifier_data *node; ++#if KERNEL_VERSION(5, 2, 0) > LINUX_VERSION_CODE + int cpu = freqs->cpu; ++#else ++ int cpu = freqs->policy->cpu; ++#endif /* KERNEL_VERSION(5, 2, 0) > LINUX_VERSION_CODE */ + + if (state == CPUFREQ_PRECHANGE) { + pw_pr_debug( +-- +2.17.1 + diff --git a/patches/0018-usb-typec-ucsi-New-error-codes.usb-typec b/patches/0018-usb-typec-ucsi-New-error-codes.usb-typec new file mode 100644 index 0000000000..5b0a3fb499 --- /dev/null +++ b/patches/0018-usb-typec-ucsi-New-error-codes.usb-typec @@ -0,0 +1,77 @@ +From 26190fb1419b938603b354a2c83b52eb070e5885 Mon Sep 17 00:00:00 2001 +From: Heikki Krogerus +Date: Tue, 1 Oct 2019 09:18:57 +0300 +Subject: [PATCH 18/18] usb: typec: ucsi: New error codes + +Adding new error codes to the driver that were introduced in +UCSI specification v1.1. + +Signed-off-by: Heikki Krogerus +--- + drivers/usb/typec/ucsi/ucsi.c | 25 ++++++++++++++++++++----- + drivers/usb/typec/ucsi/ucsi.h | 6 ++++++ + 2 files changed, 26 insertions(+), 5 deletions(-) + +diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c +index fbc0ae1851f9..0ec2a38f3f2a 100644 +--- a/drivers/usb/typec/ucsi/ucsi.c ++++ b/drivers/usb/typec/ucsi/ucsi.c +@@ -86,18 +86,33 @@ static int ucsi_read_error(struct ucsi *ucsi) + case UCSI_ERROR_DEAD_BATTERY: + dev_warn(ucsi->dev, "Dead battery condition!\n"); + return -EPERM; +- /* The following mean a bug in this driver */ + case UCSI_ERROR_INVALID_CON_NUM: + case UCSI_ERROR_UNREGONIZED_CMD: + case UCSI_ERROR_INVALID_CMD_ARGUMENT: +- dev_err(ucsi->dev, "possible UCSI driver bug (0x%x)\n", error); ++ dev_err(ucsi->dev, "possible UCSI driver bug %u\n", error); + return -EINVAL; ++ case UCSI_ERROR_OVERCURRENT: ++ dev_warn(ucsi->dev, "Overcurrent condition\n"); ++ break; ++ case UCSI_ERROR_PARTNER_REJECTED_SWAP: ++ dev_warn(ucsi->dev, "Partner rejected swap\n"); ++ break; ++ case UCSI_ERROR_HARD_RESET: ++ dev_warn(ucsi->dev, "Hard reset occurred\n"); ++ break; ++ case UCSI_ERROR_PPM_POLICY_CONFLICT: ++ dev_warn(ucsi->dev, "PPM Policy conflict\n"); ++ break; ++ case UCSI_ERROR_SWAP_REJECTED: ++ dev_warn(ucsi->dev, "Swap rejected\n"); ++ break; ++ case UCSI_ERROR_UNDEFINED: + default: +- dev_err(ucsi->dev, "%s: error without status\n", __func__); +- return -EIO; ++ dev_err(ucsi->dev, "unknown error %u\n", error); ++ break; + } + +- return 0; ++ return -EIO; + } + + static int ucsi_exec_command(struct ucsi *ucsi, u64 cmd) +diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h +index 3affd5f11678..48ab35f549e8 100644 +--- a/drivers/usb/typec/ucsi/ucsi.h ++++ b/drivers/usb/typec/ucsi/ucsi.h +@@ -133,6 +133,12 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num); + #define UCSI_ERROR_CC_COMMUNICATION_ERR BIT(4) + #define UCSI_ERROR_DEAD_BATTERY BIT(5) + #define UCSI_ERROR_CONTRACT_NEGOTIATION_FAIL BIT(6) ++#define UCSI_ERROR_OVERCURRENT BIT(7) ++#define UCSI_ERROR_UNDEFINED BIT(8) ++#define UCSI_ERROR_PARTNER_REJECTED_SWAP BIT(9) ++#define UCSI_ERROR_HARD_RESET BIT(10) ++#define UCSI_ERROR_PPM_POLICY_CONFLICT BIT(11) ++#define UCSI_ERROR_SWAP_REJECTED BIT(12) + + /* Data structure filled by PPM in response to GET_CAPABILITY command. */ + struct ucsi_capability { +-- +2.17.1 + diff --git a/patches/0019-ASoC-Intel-Skylake-Remove-skl_dsp_acquire_irq.audio b/patches/0019-ASoC-Intel-Skylake-Remove-skl_dsp_acquire_irq.audio new file mode 100644 index 0000000000..3e75655bca --- /dev/null +++ b/patches/0019-ASoC-Intel-Skylake-Remove-skl_dsp_acquire_irq.audio @@ -0,0 +1,55 @@ +From c1ba189b80c922dccdf53791191c397bcc6b4722 Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Thu, 15 Aug 2019 17:42:22 +0200 +Subject: [PATCH 019/193] ASoC: Intel: Skylake: Remove skl_dsp_acquire_irq + +With Skylake following the unified sst_dsp init and free flow, there is +no need for custom _acquire_irq function. Framework takes care of this +with constructor for us. Remove redundant handler. + +Signed-off-by: Cezary Rojewski +--- + sound/soc/intel/skylake/skl-sst-dsp.c | 14 -------------- + sound/soc/intel/skylake/skl-sst-dsp.h | 1 - + 2 files changed, 15 deletions(-) + +diff --git a/sound/soc/intel/skylake/skl-sst-dsp.c b/sound/soc/intel/skylake/skl-sst-dsp.c +index 9d8eb1af4798..773b4b562a07 100644 +--- a/sound/soc/intel/skylake/skl-sst-dsp.c ++++ b/sound/soc/intel/skylake/skl-sst-dsp.c +@@ -418,20 +418,6 @@ int skl_dsp_sleep(struct sst_dsp *ctx) + } + EXPORT_SYMBOL_GPL(skl_dsp_sleep); + +-int skl_dsp_acquire_irq(struct sst_dsp *sst) +-{ +- int ret; +- +- /* Register the ISR */ +- ret = request_threaded_irq(sst->irq, sst->ops->irq_handler, +- sst->ops->thread_fn, IRQF_SHARED, "AudioDSP", sst); +- if (ret) +- dev_err(sst->dev, "unable to grab threaded IRQ %d, disabling device\n", +- sst->irq); +- +- return ret; +-} +- + void skl_dsp_free(struct sst_dsp *dsp) + { + struct skl_dev *skl = dsp->thread_context; +diff --git a/sound/soc/intel/skylake/skl-sst-dsp.h b/sound/soc/intel/skylake/skl-sst-dsp.h +index 840353cc35df..8aba81c8c13e 100644 +--- a/sound/soc/intel/skylake/skl-sst-dsp.h ++++ b/sound/soc/intel/skylake/skl-sst-dsp.h +@@ -209,7 +209,6 @@ int skl_cldma_prepare(struct sst_dsp *ctx); + int skl_cldma_wait_interruptible(struct sst_dsp *ctx); + + void skl_dsp_set_state_locked(struct sst_dsp *ctx, int state); +-int skl_dsp_acquire_irq(struct sst_dsp *sst); + bool is_skl_dsp_running(struct sst_dsp *ctx); + + unsigned int skl_dsp_get_enabled_cores(struct sst_dsp *ctx); +-- +2.17.1 + diff --git a/patches/0019-dmaengine-dw-Implement-DMA-crossbar-support.lpss b/patches/0019-dmaengine-dw-Implement-DMA-crossbar-support.lpss new file mode 100644 index 0000000000..81d15a9d45 --- /dev/null +++ b/patches/0019-dmaengine-dw-Implement-DMA-crossbar-support.lpss @@ -0,0 +1,203 @@ +From d8365ef4120411a4795aa5943494a67b179b622e Mon Sep 17 00:00:00 2001 +From: Andy Shevchenko +Date: Mon, 5 Aug 2019 20:33:15 +0300 +Subject: [PATCH 19/40] dmaengine: dw: Implement DMA crossbar support + +Some of DMA controllers, supported by Synopsys DesignWare DMA driver +may have an additional IP, so called DMA crossbar. This IP allows to use +less request lines of DMA controller than slave devices need altogether. + +Here is the implementation of driver to support DMA crossbar. + +NOTE! There is a proof-of-concept solely based on non-existing CSRT / DSDT +implementations of the PSE DMA. It also questionable if this approach is what +upstream will take. + +Signed-off-by: Andy Shevchenko +--- + drivers/dma/dw/Makefile | 2 +- + drivers/dma/dw/internal.h | 3 + + drivers/dma/dw/xbar.c | 145 ++++++++++++++++++++++++++++++++++++++ + 3 files changed, 149 insertions(+), 1 deletion(-) + create mode 100644 drivers/dma/dw/xbar.c + +diff --git a/drivers/dma/dw/Makefile b/drivers/dma/dw/Makefile +index b6f06699e91a..b7ebe6edbdc1 100644 +--- a/drivers/dma/dw/Makefile ++++ b/drivers/dma/dw/Makefile +@@ -1,6 +1,6 @@ + # SPDX-License-Identifier: GPL-2.0 + obj-$(CONFIG_DW_DMAC_CORE) += dw_dmac_core.o +-dw_dmac_core-objs := core.o dw.o idma32.o ++dw_dmac_core-objs := core.o dw.o idma32.o xbar.o + + obj-$(CONFIG_DW_DMAC) += dw_dmac.o + dw_dmac-y := platform.o +diff --git a/drivers/dma/dw/internal.h b/drivers/dma/dw/internal.h +index 2e1c52eefdeb..2bf119386dc6 100644 +--- a/drivers/dma/dw/internal.h ++++ b/drivers/dma/dw/internal.h +@@ -23,6 +23,9 @@ int do_dw_dma_enable(struct dw_dma_chip *chip); + + extern bool dw_dma_filter(struct dma_chan *chan, void *param); + ++int idma32_xbar_probe(struct dw_dma_chip *chip); ++int idma32_xbar_remove(struct dw_dma_chip *chip); ++ + #ifdef CONFIG_ACPI + void dw_dma_acpi_controller_register(struct dw_dma *dw); + void dw_dma_acpi_controller_free(struct dw_dma *dw); +diff --git a/drivers/dma/dw/xbar.c b/drivers/dma/dw/xbar.c +new file mode 100644 +index 000000000000..b7f24059ea18 +--- /dev/null ++++ b/drivers/dma/dw/xbar.c +@@ -0,0 +1,145 @@ ++// SPDX-License-Identifier: GPL-2.0 ++// Copyright (C) 2019 Intel Corporation ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "internal.h" ++ ++#define DMA_CTL_CH(x) (0x1000 + (x) * 4) ++#define DMA_SRC_ADDR_FILLIN(x) (0x1100 + (x) * 4) ++#define DMA_DST_ADDR_FILLIN(x) (0x1200 + (x) * 4) ++#define DMA_XBAR_SEL(x) (0x1300 + (x) * 4) ++ ++#define CTL_CH_TRANSFER_MODE_MASK GENMASK(1, 0) ++#define CTL_CH_TRANSFER_MODE_S2S 0 ++#define CTL_CH_TRANSFER_MODE_S2D 1 ++#define CTL_CH_TRANSFER_MODE_D2S 2 ++#define CTL_CH_TRANSFER_MODE_D2D 3 ++#define CTL_CH_RD_RS_MASK GENMASK(4, 3) ++#define CTL_CH_WR_RS_MASK GENMASK(6, 5) ++#define CTL_CH_RD_NON_SNOOP_BIT BIT(8) ++#define CTL_CH_WR_NON_SNOOP_BIT BIT(9) ++ ++#define XBAR_SEL_DEVID_MASK GENMASK(15, 0) ++#define XBAR_SEL_RX_TX_BIT BIT(16) ++#define XBAR_SEL_RX_TX_SHIFT 16 ++ ++static bool xbar_filter(struct dma_chan *chan, void *param) ++{ ++ struct acpi_dma_spec *dma_spec = param; ++ struct dw_dma_slave slave = { ++ .dma_dev = dma_spec->dev, ++ .src_id = dma_spec->slave_id, ++ .dst_id = dma_spec->slave_id, ++ }; ++ ++ return dw_dma_filter(chan, &slave); ++} ++ ++static void xbar_configure(struct acpi_dma_spec *dma_spec, struct dma_chan *chan) ++{ ++ struct dw_dma_chip_pdata *data = dev_get_drvdata(dma_spec->dev); ++ struct pci_dev *pdev = to_pci_dev(dma_spec->consumer); ++ phys_addr_t base = pci_resource_start(pdev, 0); ++ void __iomem *x = data->chip->regs; ++ size_t d = dma_spec->index; ++ int c = chan->chan_id; ++ u32 value; ++ ++ /* Configure upper part of the address */ ++ if (d) { ++ writel(upper_32_bits(base), x + DMA_SRC_ADDR_FILLIN(c)); ++ writel(0, x + DMA_DST_ADDR_FILLIN(c)); ++ } else { ++ writel(0, x + DMA_SRC_ADDR_FILLIN(c)); ++ writel(upper_32_bits(base), x + DMA_DST_ADDR_FILLIN(c)); ++ } ++ ++ /* Configure crossbar selection */ ++ value = readl(x + DMA_XBAR_SEL(c)); ++ value &= XBAR_SEL_DEVID_MASK | XBAR_SEL_RX_TX_BIT; ++ value |= pdev->devfn | (d << XBAR_SEL_RX_TX_SHIFT); ++ writel(value, x + DMA_XBAR_SEL(c)); ++ ++ /* Configure channel attributes */ ++ value = readl(x + DMA_CTL_CH(c)); ++ value &= CTL_CH_RD_NON_SNOOP_BIT | CTL_CH_WR_NON_SNOOP_BIT; ++ value &= CTL_CH_RD_RS_MASK | CTL_CH_WR_RS_MASK; ++ value &= CTL_CH_TRANSFER_MODE_MASK; ++ value |= d ? CTL_CH_RD_NON_SNOOP_BIT : CTL_CH_WR_NON_SNOOP_BIT; ++ value |= d ? CTL_CH_TRANSFER_MODE_S2D : CTL_CH_TRANSFER_MODE_D2S; ++ writel(value, x + DMA_CTL_CH(c)); ++} ++ ++static struct dma_chan *xbar_xlate(struct acpi_dma_spec *dma_spec, struct acpi_dma *adma) ++{ ++ struct acpi_dma_filter_info *info = adma->data; ++ struct dma_chan *chan; ++ ++ if (!info || !info->filter_fn) ++ return NULL; ++ ++ chan = dma_request_channel(info->dma_cap, info->filter_fn, dma_spec); ++ if (!chan) ++ return NULL; ++ ++ xbar_configure(dma_spec, chan); ++ return chan; ++} ++ ++static void xbar_controller_register(struct dw_dma *dw) ++{ ++ struct device *dev = dw->dma.dev; ++ struct acpi_dma_filter_info *info; ++ int ret; ++ ++ if (!has_acpi_companion(dev)) ++ return; ++ ++ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); ++ if (!info) ++ return; ++ ++ dma_cap_zero(info->dma_cap); ++ dma_cap_set(DMA_SLAVE, info->dma_cap); ++ info->filter_fn = xbar_filter; ++ ++ ret = acpi_dma_controller_register(dev, xbar_xlate, info); ++ if (ret) ++ dev_err(dev, "could not register acpi_dma_controller\n"); ++} ++ ++static void xbar_controller_free(struct dw_dma *dw) ++{ ++ struct device *dev = dw->dma.dev; ++ ++ if (!has_acpi_companion(dev)) ++ return; ++ ++ acpi_dma_controller_free(dev); ++} ++ ++int idma32_xbar_probe(struct dw_dma_chip *chip) ++{ ++ int ret; ++ ++ ret = idma32_dma_probe(chip); ++ if (ret) ++ return ret; ++ ++ xbar_controller_register(chip->dw); ++ return 0; ++} ++EXPORT_SYMBOL_GPL(idma32_xbar_probe); ++ ++int idma32_xbar_remove(struct dw_dma_chip *chip) ++{ ++ xbar_controller_free(chip->dw); ++ return idma32_dma_remove(chip); ++} ++EXPORT_SYMBOL_GPL(idma32_xbar_remove); +-- +2.17.1 + diff --git a/patches/0019-drm-i915-Use-subslice-stride-to-set-subslices-for-a-gi.drm b/patches/0019-drm-i915-Use-subslice-stride-to-set-subslices-for-a-gi.drm new file mode 100644 index 0000000000..06c7440e2e --- /dev/null +++ b/patches/0019-drm-i915-Use-subslice-stride-to-set-subslices-for-a-gi.drm @@ -0,0 +1,65 @@ +From 11ae6325b2b3242c3c888f9ac10e0ebd456fc57b Mon Sep 17 00:00:00 2001 +From: Stuart Summers +Date: Fri, 23 Aug 2019 09:03:03 -0700 +Subject: [PATCH 019/690] drm/i915: Use subslice stride to set subslices for a + given slice + +Add a subslice stride calculation when setting subslices. This +aligns more closely with the userspace expectation of the subslice +mask structure. + +v2: Use local variable for subslice_mask on HSW and + clean up a few other subslice_mask local variable + changes +v3: Add GEM_BUG_ON for ss_stride to prevent array overflow (Chris) + Split main set function and refactors in intel_device_info.c + into separate patches (Chris) +v4: Reduce ss_stride size check when setting subslices per slice + based on actual expected max stride (Chris) + Move that GEM_BUG_ON check for the ss_stride out to the patch + which adds the ss_stride +v5: Use memcpy instead of looping through each stride index + +Signed-off-by: Stuart Summers +Reviewed-by: Chris Wilson +Signed-off-by: Chris Wilson +Link: https://patchwork.freedesktop.org/patch/msgid/20190823160307.180813-8-stuart.summers@intel.com +--- + drivers/gpu/drm/i915/gt/intel_sseu.c | 6 ++++-- + drivers/gpu/drm/i915/gt/intel_sseu.h | 2 +- + 2 files changed, 5 insertions(+), 3 deletions(-) + +diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c +index 3a5db0dbac72..1505042d7b5d 100644 +--- a/drivers/gpu/drm/i915/gt/intel_sseu.c ++++ b/drivers/gpu/drm/i915/gt/intel_sseu.c +@@ -33,9 +33,11 @@ intel_sseu_subslice_total(const struct sseu_dev_info *sseu) + } + + void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice, +- u8 ss_mask) ++ u32 ss_mask) + { +- sseu->subslice_mask[slice] = ss_mask; ++ int offset = slice * sseu->ss_stride; ++ ++ memcpy(&sseu->subslice_mask[offset], &ss_mask, sseu->ss_stride); + } + + unsigned int +diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h +index 7f600f50dedb..73a9064291a2 100644 +--- a/drivers/gpu/drm/i915/gt/intel_sseu.h ++++ b/drivers/gpu/drm/i915/gt/intel_sseu.h +@@ -79,7 +79,7 @@ unsigned int + intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice); + + void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice, +- u8 ss_mask); ++ u32 ss_mask); + + u32 intel_sseu_make_rpcs(struct drm_i915_private *i915, + const struct intel_sseu *req_sseu); +-- +2.17.1 + diff --git a/patches/0019-net-stmmac-Enable-SERDES-power-up-down-sequen.connectivity b/patches/0019-net-stmmac-Enable-SERDES-power-up-down-sequen.connectivity new file mode 100644 index 0000000000..9e5e39a9ff --- /dev/null +++ b/patches/0019-net-stmmac-Enable-SERDES-power-up-down-sequen.connectivity @@ -0,0 +1,577 @@ +From 88f2e40859777641eba131c9371d107b6e647221 Mon Sep 17 00:00:00 2001 +From: Voon Weifeng +Date: Wed, 24 Apr 2019 21:41:27 +0800 +Subject: [PATCH 019/108] net: stmmac: Enable SERDES power up/down sequence + +net: stmmac: enable Intel PHY IF power up/down sequence PHY IF +converts 8/10 bits data to SGMII signal. Below is an example of +HW configuration. + +<-----------------GBE Controller---------->|<--External PHY chip--> ++----------+ +----+ +---+ +----------+ +| EQoS | <-GMII->| DW | < ------ > |PHY| <-SGMII-> | External | +| MAC | |xPCS| |IF | | PHY | ++----------+ +----+ +---+ +----------+ + ^ ^ ^ ^ + | | | | + +---------------------MDIO-------------------------+ + +PHY IF configuration and status registers are accessible through +mdio address 0x15 which is defined as intel_adhoc_addr. During D0, +The driver will need to power up PHY IF by changing the power state +to P0. Likewise, for D3, the driver sets PHY IF power state to P3. + +Signed-off-by: Voon Weifeng +Signed-off-by: Ong Boon Leong +--- + drivers/net/ethernet/stmicro/stmmac/Makefile | 2 +- + drivers/net/ethernet/stmicro/stmmac/common.h | 1 + + .../net/ethernet/stmicro/stmmac/dwmac4_core.c | 1 + + drivers/net/ethernet/stmicro/stmmac/hwif.c | 43 ++++- + drivers/net/ethernet/stmicro/stmmac/hwif.h | 12 ++ + .../ethernet/stmicro/stmmac/intel_serdes.c | 181 ++++++++++++++++++ + .../ethernet/stmicro/stmmac/intel_serdes.h | 23 +++ + .../net/ethernet/stmicro/stmmac/stmmac_main.c | 8 + + .../net/ethernet/stmicro/stmmac/stmmac_pci.c | 3 + + include/linux/stmmac.h | 2 + + 10 files changed, 274 insertions(+), 2 deletions(-) + create mode 100644 drivers/net/ethernet/stmicro/stmmac/intel_serdes.c + create mode 100644 drivers/net/ethernet/stmicro/stmmac/intel_serdes.h + +diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile +index c59926d96bcc..3230d2673cb5 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/Makefile ++++ b/drivers/net/ethernet/stmicro/stmmac/Makefile +@@ -6,7 +6,7 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \ + mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o dwmac4_descs.o \ + dwmac4_dma.o dwmac4_lib.o dwmac4_core.o dwmac5.o hwif.o \ + stmmac_tc.o dwxgmac2_core.o dwxgmac2_dma.o dwxgmac2_descs.o \ +- $(stmmac-y) ++ intel_serdes.o $(stmmac-y) + + stmmac-$(CONFIG_STMMAC_SELFTESTS) += stmmac_selftests.o + +diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h +index 33c9f3aa10e4..2e4d69e5bb21 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/common.h ++++ b/drivers/net/ethernet/stmicro/stmmac/common.h +@@ -434,6 +434,7 @@ struct mii_regs { + + struct mac_device_info { + const struct stmmac_ops *mac; ++ const struct stmmac_serdes_ops *serdes; + const struct stmmac_desc_ops *desc; + const struct stmmac_dma_ops *dma; + const struct stmmac_mode_ops *mode; +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +index e8879a78bcce..56a0e858c9cd 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +@@ -17,6 +17,7 @@ + #include + #include "stmmac.h" + #include "stmmac_pcs.h" ++#include "intel_serdes.h" + #include "dwmac4.h" + #include "dwmac5.h" + +diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c +index 11c7f92e99b4..b5dc33cb7054 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/hwif.c ++++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c +@@ -74,11 +74,13 @@ static const struct stmmac_hwif_entry { + bool gmac4; + bool xgmac; + bool mdio_intr_en; ++ bool has_serdes; + u32 min_id; + const struct stmmac_regs_off regs; + const void *desc; + const void *dma; + const void *mac; ++ const void *serdes; + const void *hwtimestamp; + const void *mode; + const void *tc; +@@ -92,6 +94,7 @@ static const struct stmmac_hwif_entry { + .gmac4 = false, + .xgmac = false, + .mdio_intr_en = false, ++ .has_serdes = false, + .min_id = 0, + .regs = { + .ptp_off = PTP_GMAC3_X_OFFSET, +@@ -100,6 +103,7 @@ static const struct stmmac_hwif_entry { + .desc = NULL, + .dma = &dwmac100_dma_ops, + .mac = &dwmac100_ops, ++ .serdes = NULL, + .hwtimestamp = &stmmac_ptp, + .mode = NULL, + .tc = NULL, +@@ -111,6 +115,7 @@ static const struct stmmac_hwif_entry { + .gmac4 = false, + .xgmac = false, + .mdio_intr_en = false, ++ .has_serdes = false, + .min_id = 0, + .regs = { + .ptp_off = PTP_GMAC3_X_OFFSET, +@@ -119,6 +124,7 @@ static const struct stmmac_hwif_entry { + .desc = NULL, + .dma = &dwmac1000_dma_ops, + .mac = &dwmac1000_ops, ++ .serdes = NULL, + .hwtimestamp = &stmmac_ptp, + .mode = NULL, + .tc = NULL, +@@ -130,6 +136,7 @@ static const struct stmmac_hwif_entry { + .gmac4 = true, + .xgmac = false, + .mdio_intr_en = false, ++ .has_serdes = false, + .min_id = 0, + .regs = { + .ptp_off = PTP_GMAC4_OFFSET, +@@ -137,6 +144,7 @@ static const struct stmmac_hwif_entry { + }, + .desc = &dwmac4_desc_ops, + .dma = &dwmac4_dma_ops, ++ .serdes = NULL, + .mac = &dwmac4_ops, + .hwtimestamp = &stmmac_ptp, + .mode = NULL, +@@ -149,6 +157,7 @@ static const struct stmmac_hwif_entry { + .gmac4 = true, + .xgmac = false, + .mdio_intr_en = false, ++ .has_serdes = false, + .min_id = DWMAC_CORE_4_00, + .regs = { + .ptp_off = PTP_GMAC4_OFFSET, +@@ -157,6 +166,7 @@ static const struct stmmac_hwif_entry { + .desc = &dwmac4_desc_ops, + .dma = &dwmac4_dma_ops, + .mac = &dwmac410_ops, ++ .serdes = NULL, + .hwtimestamp = &stmmac_ptp, + .mode = &dwmac4_ring_mode_ops, + .tc = &dwmac510_tc_ops, +@@ -168,6 +178,7 @@ static const struct stmmac_hwif_entry { + .gmac4 = true, + .xgmac = false, + .mdio_intr_en = false, ++ .has_serdes = false, + .min_id = DWMAC_CORE_4_10, + .regs = { + .ptp_off = PTP_GMAC4_OFFSET, +@@ -176,6 +187,7 @@ static const struct stmmac_hwif_entry { + .desc = &dwmac4_desc_ops, + .dma = &dwmac410_dma_ops, + .mac = &dwmac410_ops, ++ .serdes = NULL, + .hwtimestamp = &stmmac_ptp, + .mode = &dwmac4_ring_mode_ops, + .tc = &dwmac510_tc_ops, +@@ -187,6 +199,7 @@ static const struct stmmac_hwif_entry { + .gmac4 = true, + .xgmac = false, + .mdio_intr_en = true, ++ .has_serdes = false, + .min_id = DWMAC_CORE_5_10, + .regs = { + .ptp_off = PTP_GMAC4_OFFSET, +@@ -195,6 +208,7 @@ static const struct stmmac_hwif_entry { + .desc = &dwmac4_desc_ops, + .dma = &dwmac410_dma_ops, + .mac = &dwmac510_ops, ++ .serdes = NULL, + .hwtimestamp = &stmmac_ptp, + .mode = &dwmac4_ring_mode_ops, + .tc = &dwmac510_tc_ops, +@@ -206,6 +220,7 @@ static const struct stmmac_hwif_entry { + .gmac4 = false, + .xgmac = true, + .mdio_intr_en = false, ++ .has_serdes = false, + .min_id = DWXGMAC_CORE_2_10, + .regs = { + .ptp_off = PTP_XGMAC_OFFSET, +@@ -214,13 +229,35 @@ static const struct stmmac_hwif_entry { + .desc = &dwxgmac210_desc_ops, + .dma = &dwxgmac210_dma_ops, + .mac = &dwxgmac210_ops, ++ .serdes = NULL, + .hwtimestamp = &stmmac_ptp, + .mode = NULL, + .tc = &dwmac510_tc_ops, + .mmc = &dwxgmac_mmc_ops, + .setup = dwxgmac2_setup, + .quirks = NULL, +- }, ++ }, { ++ .gmac = false, ++ .gmac4 = true, ++ .xgmac = false, ++ .mdio_intr_en = true, ++ .has_serdes = true, ++ .min_id = DWMAC_CORE_5_10, ++ .regs = { ++ .ptp_off = PTP_GMAC4_OFFSET, ++ .mmc_off = MMC_GMAC4_OFFSET, ++ }, ++ .desc = &dwmac4_desc_ops, ++ .dma = &dwmac410_dma_ops, ++ .mac = &dwmac510_ops, ++ .serdes = &intel_serdes_ops, ++ .hwtimestamp = &stmmac_ptp, ++ .mode = &dwmac4_ring_mode_ops, ++ .tc = &dwmac510_tc_ops, ++ .mmc = &dwmac_mmc_ops, ++ .setup = dwmac4_setup, ++ .quirks = NULL, ++ } + }; + + int stmmac_hwif_init(struct stmmac_priv *priv) +@@ -228,6 +265,7 @@ int stmmac_hwif_init(struct stmmac_priv *priv) + bool needs_xgmac = priv->plat->has_xgmac; + bool needs_gmac4 = priv->plat->has_gmac4; + bool needs_gmac = priv->plat->has_gmac; ++ bool needs_serdes = priv->plat->has_serdes; + const struct stmmac_hwif_entry *entry; + struct mac_device_info *mac; + bool needs_setup = true; +@@ -272,6 +310,8 @@ int stmmac_hwif_init(struct stmmac_priv *priv) + continue; + if (needs_xgmac ^ entry->xgmac) + continue; ++ if (needs_serdes ^ entry->has_serdes) ++ continue; + /* Use synopsys_id var because some setups can override this */ + if (priv->synopsys_id < entry->min_id) + continue; +@@ -280,6 +320,7 @@ int stmmac_hwif_init(struct stmmac_priv *priv) + mac->desc = mac->desc ? : entry->desc; + mac->dma = mac->dma ? : entry->dma; + mac->mac = mac->mac ? : entry->mac; ++ mac->serdes = mac->serdes ? : entry->serdes; + mac->ptp = mac->ptp ? : entry->hwtimestamp; + mac->mode = mac->mode ? : entry->mode; + mac->tc = mac->tc ? : entry->tc; +diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h +index ddb851d99618..9c41cb46b9aa 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h ++++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h +@@ -458,6 +458,17 @@ struct stmmac_ops { + #define stmmac_set_arp_offload(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, set_arp_offload, __args) + ++/* Helpers for serdes */ ++struct stmmac_serdes_ops { ++ int (*serdes_powerup)(struct net_device *ndev); ++ int (*serdes_powerdown)(struct net_device *ndev); ++}; ++ ++#define stmmac_serdes_powerup(__priv, __args...) \ ++ stmmac_do_callback(__priv, serdes, serdes_powerup, __args) ++#define stmmac_serdes_powerdown(__priv, __args...) \ ++ stmmac_do_callback(__priv, serdes, serdes_powerdown, __args) ++ + /* PTP and HW Timer helpers */ + struct stmmac_hwtimestamp { + void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data); +@@ -555,6 +566,7 @@ struct stmmac_regs_off { + }; + + extern const struct stmmac_ops dwmac100_ops; ++extern const struct stmmac_serdes_ops intel_serdes_ops; + extern const struct stmmac_dma_ops dwmac100_dma_ops; + extern const struct stmmac_ops dwmac1000_ops; + extern const struct stmmac_dma_ops dwmac1000_dma_ops; +diff --git a/drivers/net/ethernet/stmicro/stmmac/intel_serdes.c b/drivers/net/ethernet/stmicro/stmmac/intel_serdes.c +new file mode 100644 +index 000000000000..f3c9b9892229 +--- /dev/null ++++ b/drivers/net/ethernet/stmicro/stmmac/intel_serdes.c +@@ -0,0 +1,181 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* Copyright (c) 2019, Intel Corporation ++ * Intel Serdes ++ */ ++ ++#include ++#include ++#include ++#include "intel_serdes.h" ++#include "stmmac.h" ++ ++static int serdes_status_poll(struct stmmac_priv *priv, int phyaddr, ++ int phyreg, u32 mask, u32 val) ++{ ++ unsigned int retries = 10; ++ int val_rd = 0; ++ ++ do { ++ val_rd = mdiobus_read(priv->mii, phyaddr, phyreg); ++ if ((val_rd & mask) == (val & mask)) ++ return 0; ++ udelay(POLL_DELAY_US); ++ } while (--retries); ++ ++ return -ETIMEDOUT; ++} ++ ++static int intel_serdes_powerup(struct net_device *ndev) ++{ ++ struct stmmac_priv *priv = netdev_priv(ndev); ++ int serdes_phy_addr = 0; ++ u32 data = 0; ++ ++ if (!priv->plat->intel_adhoc_addr) ++ return 0; ++ ++ serdes_phy_addr = priv->plat->intel_adhoc_addr; ++ ++ /* assert clk_req */ ++ data = mdiobus_read(priv->mii, serdes_phy_addr, ++ SERDES_GCR0); ++ ++ data |= SERDES_PLL_CLK; ++ ++ mdiobus_write(priv->mii, serdes_phy_addr, ++ SERDES_GCR0, data); ++ ++ /* check for clk_ack assertion */ ++ data = serdes_status_poll(priv, serdes_phy_addr, ++ SERDES_GSR0, ++ SERDES_PLL_CLK, ++ SERDES_PLL_CLK); ++ ++ if (data) { ++ dev_err(priv->device, "Serdes PLL clk request timeout\n"); ++ return data; ++ } ++ ++ /* assert lane reset */ ++ data = mdiobus_read(priv->mii, serdes_phy_addr, ++ SERDES_GCR0); ++ ++ data |= SERDES_RST; ++ ++ mdiobus_write(priv->mii, serdes_phy_addr, ++ SERDES_GCR0, data); ++ ++ /* check for assert lane reset reflection */ ++ data = serdes_status_poll(priv, serdes_phy_addr, ++ SERDES_GSR0, ++ SERDES_RST, ++ SERDES_RST); ++ ++ if (data) { ++ dev_err(priv->device, "Serdes assert lane reset timeout\n"); ++ return data; ++ } ++ ++ /* move power state to P0 */ ++ data = mdiobus_read(priv->mii, serdes_phy_addr, ++ SERDES_GCR0); ++ ++ data &= ~SERDES_PWR_ST_MASK; ++ data |= SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT; ++ ++ mdiobus_write(priv->mii, serdes_phy_addr, ++ SERDES_GCR0, data); ++ ++ /* Check for P0 state */ ++ data = serdes_status_poll(priv, serdes_phy_addr, ++ SERDES_GSR0, ++ SERDES_PWR_ST_MASK, ++ SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT); ++ ++ if (data) { ++ dev_err(priv->device, "Serdes power state P0 timeout.\n"); ++ return data; ++ } ++ ++ return 0; ++} ++ ++static int intel_serdes_powerdown(struct net_device *ndev) ++{ ++ struct stmmac_priv *priv = netdev_priv(ndev); ++ int serdes_phy_addr = 0; ++ u32 data = 0; ++ ++ serdes_phy_addr = priv->plat->intel_adhoc_addr; ++ ++ if (!priv->plat->intel_adhoc_addr) ++ return 0; ++ ++ /* move power state to P3 */ ++ data = mdiobus_read(priv->mii, serdes_phy_addr, ++ SERDES_GCR0); ++ ++ data &= ~SERDES_PWR_ST_MASK; ++ data |= SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT; ++ ++ mdiobus_write(priv->mii, serdes_phy_addr, ++ SERDES_GCR0, data); ++ ++ /* Check for P3 state */ ++ data = serdes_status_poll(priv, serdes_phy_addr, ++ SERDES_GSR0, ++ SERDES_PWR_ST_MASK, ++ SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT); ++ ++ if (data) { ++ dev_err(priv->device, "Serdes power state P3 timeout\n"); ++ return data; ++ } ++ ++ /* de-assert clk_req */ ++ data = mdiobus_read(priv->mii, serdes_phy_addr, ++ SERDES_GCR0); ++ ++ data &= ~SERDES_PLL_CLK; ++ ++ mdiobus_write(priv->mii, serdes_phy_addr, ++ SERDES_GCR0, data); ++ ++ /* check for clk_ack de-assert */ ++ data = serdes_status_poll(priv, serdes_phy_addr, ++ SERDES_GSR0, ++ SERDES_PLL_CLK, ++ (u32)~SERDES_PLL_CLK); ++ ++ if (data) { ++ dev_err(priv->device, "Serdes PLL clk de-assert timeout\n"); ++ return data; ++ } ++ ++ /* de-assert lane reset */ ++ data = mdiobus_read(priv->mii, serdes_phy_addr, ++ SERDES_GCR0); ++ ++ data &= ~SERDES_RST; ++ ++ mdiobus_write(priv->mii, serdes_phy_addr, ++ SERDES_GCR0, data); ++ ++ /* check for de-assert lane reset reflection */ ++ data = serdes_status_poll(priv, serdes_phy_addr, ++ SERDES_GSR0, ++ SERDES_RST, ++ (u32)~SERDES_RST); ++ ++ if (data) { ++ dev_err(priv->device, "Serdes de-assert lane reset timeout\n"); ++ return data; ++ } ++ ++ return 0; ++} ++ ++const struct stmmac_serdes_ops intel_serdes_ops = { ++ .serdes_powerup = intel_serdes_powerup, ++ .serdes_powerdown = intel_serdes_powerdown, ++}; +diff --git a/drivers/net/ethernet/stmicro/stmmac/intel_serdes.h b/drivers/net/ethernet/stmicro/stmmac/intel_serdes.h +new file mode 100644 +index 000000000000..22b0b71b657b +--- /dev/null ++++ b/drivers/net/ethernet/stmicro/stmmac/intel_serdes.h +@@ -0,0 +1,23 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Copyright (c) 2019, Intel Corporation ++ * Intel Serdes ++ */ ++ ++#ifndef __INTEL_SERDES_H__ ++#define __INTEL_SERDES_H__ ++ ++#define POLL_DELAY_US 8 ++ ++/* SERDES Register */ ++#define SERDES_GSR0 0x5 /* Global Status Reg0 */ ++#define SERDES_GCR0 0xb /* Global Configuration Reg0 */ ++ ++/* SERDES defines */ ++#define SERDES_PLL_CLK BIT(0) /* PLL clk valid signal */ ++#define SERDES_RST BIT(2) /* Serdes Reset */ ++#define SERDES_PWR_ST_MASK GENMASK(6, 4) /* Serdes Power state*/ ++#define SERDES_PWR_ST_SHIFT 4 ++#define SERDES_PWR_ST_P0 0x0 ++#define SERDES_PWR_ST_P3 0x3 ++ ++#endif /* __INTEL_SERDES_H__ */ +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index 2c819c59d190..672a619e8348 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -44,6 +44,7 @@ + #include "dwmac1000.h" + #include "dwxgmac2.h" + #include "hwif.h" ++#include "intel_serdes.h" + + #define STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES) + #define TSO_MAX_BUFF_SIZE (SZ_16K - 1) +@@ -2541,6 +2542,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) + u32 chan; + int ret; + ++ /* Power up Serdes */ ++ if (priv->plat->has_serdes) ++ stmmac_serdes_powerup(priv, dev); ++ + /* DMA initialization and SW reset */ + ret = stmmac_init_dma_engine(priv); + if (ret < 0) { +@@ -4715,6 +4720,9 @@ int stmmac_dvr_remove(struct device *dev) + #endif + stmmac_stop_all_dma(priv); + ++ if (priv->plat->has_serdes) ++ stmmac_serdes_powerdown(priv, ndev); ++ + stmmac_mac_set(priv, priv->ioaddr, false); + netif_carrier_off(ndev); + unregister_netdev(ndev); +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +index c5626c989a87..02466a1b657d 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +@@ -239,6 +239,9 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, + if (plat->phy_interface == PHY_INTERFACE_MODE_SGMII) { + plat->setup_phy_conv = setup_intel_mgbe_phy_conv; + plat->remove_phy_conv = remove_intel_mgbe_phy_conv; ++ plat->has_serdes = 1; ++ /* intel specific adhoc (mdio) address for serdes & etc */ ++ plat->intel_adhoc_addr = 0x15; + } + + return 0; +diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h +index 0a99e79fd991..5f9d027dfe5a 100644 +--- a/include/linux/stmmac.h ++++ b/include/linux/stmmac.h +@@ -131,6 +131,7 @@ struct stmmac_txq_cfg { + struct plat_stmmacenet_data { + int bus_id; + int phy_addr; ++ int intel_adhoc_addr; + int interface; + int phy_interface; + struct stmmac_mdio_bus_data *mdio_bus_data; +@@ -178,6 +179,7 @@ struct plat_stmmacenet_data { + struct reset_control *stmmac_rst; + struct stmmac_axi *axi; + int has_gmac4; ++ int has_serdes; + bool has_sun8i; + bool tso_en; + int rss_en; +-- +2.17.1 + diff --git a/patches/0019-platform-x86-Use-srctree-instead-of-src-to-ad.sep-socwatch b/patches/0019-platform-x86-Use-srctree-instead-of-src-to-ad.sep-socwatch new file mode 100644 index 0000000000..1e090ffb14 --- /dev/null +++ b/patches/0019-platform-x86-Use-srctree-instead-of-src-to-ad.sep-socwatch @@ -0,0 +1,109 @@ +From 562ac97573be956e8ad6ebf2ab5ad5497e9f7025 Mon Sep 17 00:00:00 2001 +From: Faycal Benmlih +Date: Wed, 29 May 2019 12:43:06 -0500 +Subject: [PATCH 19/27] platform/x86: Use $(srctree) instead of $(src) to adapt + to kbuild regression + +$(src) is no longer resolved when O=OUTPUT_DIR is specified +resulting in header files not being found. +The quick fix is to use $(srctree)/ instead. + +Signed-off-by: Faycal Benmlih +--- + drivers/platform/x86/sepdk/pax/Makefile | 2 +- + drivers/platform/x86/sepdk/sep/Makefile | 4 ++-- + drivers/platform/x86/socperf/Makefile | 2 +- + drivers/platform/x86/socwatch/Makefile | 4 ++-- + drivers/platform/x86/socwatch/inc/sw_version.h | 2 +- + drivers/platform/x86/socwatchhv/Makefile | 6 +++--- + 6 files changed, 10 insertions(+), 10 deletions(-) + +diff --git a/drivers/platform/x86/sepdk/pax/Makefile b/drivers/platform/x86/sepdk/pax/Makefile +index 267d70eeaab5..48825674a97e 100755 +--- a/drivers/platform/x86/sepdk/pax/Makefile ++++ b/drivers/platform/x86/sepdk/pax/Makefile +@@ -1,4 +1,4 @@ +-ccflags-y := -I$(src)/../include -I$(src)/../inc ++ccflags-y := -I$(srctree)/drivers/platform/x86/sepdk/include -I$(srctree)/drivers/platform/x86/sepdk/inc + + obj-$(CONFIG_SEP_PAX) += pax.o + +diff --git a/drivers/platform/x86/sepdk/sep/Makefile b/drivers/platform/x86/sepdk/sep/Makefile +index c616fc1f7ce8..819480207705 100755 +--- a/drivers/platform/x86/sepdk/sep/Makefile ++++ b/drivers/platform/x86/sepdk/sep/Makefile +@@ -1,10 +1,10 @@ +-ccflags-y := -I$(src)/../include -I$(src)/../inc -I$(src)/.. ++ccflags-y := -I$(srctree)/drivers/platform/x86/sepdk/include -I$(srctree)/drivers/platform/x86/sepdk/inc -I$(srctree)/drivers/platform/x86/sepdk + ccflags-y += -DSEP_CONFIG_MODULE_LAYOUT + # TODO: verify kaiser.h + #ccflags-y += -DKAISER_HEADER_PRESENT + ccflags-y += -DDRV_CPU_HOTPLUG -DDRV_USE_TASKLET_WORKAROUND -DENABLE_CPUS -DBUILD_CHIPSET -DBUILD_GFX + +-asflags-y := -I$(src)/.. ++asflags-y := -I$(srctree)/drivers/platform/x86/sepdk + + ifdef CONFIG_SEP_PER_USER_MODE + ccflags-y += -DSECURE_SEP +diff --git a/drivers/platform/x86/socperf/Makefile b/drivers/platform/x86/socperf/Makefile +index a67e6a5c9e1f..8453075ca884 100644 +--- a/drivers/platform/x86/socperf/Makefile ++++ b/drivers/platform/x86/socperf/Makefile +@@ -1,4 +1,4 @@ +-ccflags-y := -I$(src)/include -I$(src)/inc ++ccflags-y := -I$(srctree)/drivers/platform/x86/socperf/include -I$(srctree)/drivers/platform/x86/socperf/inc + + obj-$(CONFIG_SOCPERF) += socperf3.o + +diff --git a/drivers/platform/x86/socwatch/Makefile b/drivers/platform/x86/socwatch/Makefile +index 073397d5aec0..63af91b57e80 100644 +--- a/drivers/platform/x86/socwatch/Makefile ++++ b/drivers/platform/x86/socwatch/Makefile +@@ -10,8 +10,8 @@ DRIVER_NAME=${DRIVER_BASE}${DRIVER_MAJOR}_${DRIVER_MINOR} + + DO_DRIVER_PROFILING=0 + +-ccflags-y += -Idrivers/platform/x86/socwatch/inc/ \ +- -DDO_DRIVER_PROFILING=$(DO_DRIVER_PROFILING) ++ccflags-y := -I$(srctree)/drivers/platform/x86/socwatch/inc ++ccflags-y += -DDO_DRIVER_PROFILING=$(DO_DRIVER_PROFILING) + + obj-$(CONFIG_INTEL_SOCWATCH) += $(DRIVER_NAME).o + +diff --git a/drivers/platform/x86/socwatch/inc/sw_version.h b/drivers/platform/x86/socwatch/inc/sw_version.h +index b6fe1eecdd0e..2b3330a57f93 100644 +--- a/drivers/platform/x86/socwatch/inc/sw_version.h ++++ b/drivers/platform/x86/socwatch/inc/sw_version.h +@@ -61,7 +61,7 @@ + */ + #define SW_DRIVER_VERSION_MAJOR 2 + #define SW_DRIVER_VERSION_MINOR 10 +-#define SW_DRIVER_VERSION_OTHER 0 ++#define SW_DRIVER_VERSION_OTHER 1 + + /* + * Every SOC Watch userspace component shares the same version number. +diff --git a/drivers/platform/x86/socwatchhv/Makefile b/drivers/platform/x86/socwatchhv/Makefile +index bd4b58a61f06..c5ad7f109aac 100644 +--- a/drivers/platform/x86/socwatchhv/Makefile ++++ b/drivers/platform/x86/socwatchhv/Makefile +@@ -1,4 +1,4 @@ +-# ++ + # Makefile for the socwatch hv driver. + # + +@@ -10,8 +10,8 @@ DRIVER_NAME=${DRIVER_BASE}${DRIVER_MAJOR}_${DRIVER_MINOR} + + HYPERVISOR=2 # ACRN + +-ccflags-y += -Idrivers/ \ +- -Idrivers/platform/x86/socwatchhv/inc/ \ ++EXTRA_CFLAGS += -Idrivers/ \ ++ -I$(src)/inc/ \ + -DHYPERVISOR=$(HYPERVISOR) + + obj-$(CONFIG_INTEL_SOCWATCH_HV) += $(DRIVER_NAME).o +-- +2.17.1 + diff --git a/patches/0019-rpmb-VRPMB-FE-create-virtio-rpmb-frontend-driver.security b/patches/0019-rpmb-VRPMB-FE-create-virtio-rpmb-frontend-driver.security new file mode 100644 index 0000000000..09bcc20d0e --- /dev/null +++ b/patches/0019-rpmb-VRPMB-FE-create-virtio-rpmb-frontend-driver.security @@ -0,0 +1,377 @@ +From ff2b113f05899f9f161ef5a53f089e73cc0d656c Mon Sep 17 00:00:00 2001 +From: Tomas Winkler +Date: Thu, 24 May 2018 14:57:02 +0300 +Subject: [PATCH 19/65] rpmb: VRPMB-FE create virtio rpmb frontend driver + +This patch implements virtio rpmb frontend driver. +The driver will work with RPMB VBS-U together to +provide one communication channel between UOS and SOS. + +V2: 1. Change license to dual BSD/GPL + 2. Fix coding style. + 3. Use pr_fmt macro instead of ERR, DBG, ... +V3: 1. Replace - with _ in file name. + 2. Plug to rpmb framework instead of using own misc device + 3. Use arrays of scatter lists instead of linearizing the data. +V4: 1. Allocate memory for control structures, it's not possible to DMA + from the stack. +V5: 1. Add mutex and use wait queue instead of completion. + 2. WIP code for getting capabilities +V6: 1. Fix calculation of the allocation size for seq cmd + 2. WIP code for getting capabilities + 3. Drop unused constant RPMB_MAX_FRAMES +V7: 1. Set auth algorithm + +Change-Id: I88a42f2e8f2ea1573aad9b5cafeae812c669a73e +Signed-off-by: Tomas Winkler +--- + drivers/char/rpmb/Kconfig | 10 ++ + drivers/char/rpmb/Makefile | 1 + + drivers/char/rpmb/virtio_rpmb.c | 306 ++++++++++++++++++++++++++++++++ + 3 files changed, 317 insertions(+) + create mode 100644 drivers/char/rpmb/virtio_rpmb.c + +diff --git a/drivers/char/rpmb/Kconfig b/drivers/char/rpmb/Kconfig +index 2b91c8e24c92..1cfcd287a665 100644 +--- a/drivers/char/rpmb/Kconfig ++++ b/drivers/char/rpmb/Kconfig +@@ -32,3 +32,13 @@ config RPMB_SIM + suitable only for testing of the RPMB subsystem or RPMB applications + prior to RPMB key provisioning. + Most people should say N here. ++ ++config VIRTIO_RPMB ++ tristate "Virtio RPMB character device interface /dev/vrpmb" ++ default n ++ depends on VIRTIO ++ select RPMB ++ help ++ Say yes here if you want to access virtio RPMB from user space ++ via character device interface /dev/vrpmb. ++ This device interface is only for guest/frontend virtio driver. +diff --git a/drivers/char/rpmb/Makefile b/drivers/char/rpmb/Makefile +index e4faa1c99373..7306e42f5be2 100644 +--- a/drivers/char/rpmb/Makefile ++++ b/drivers/char/rpmb/Makefile +@@ -5,5 +5,6 @@ obj-$(CONFIG_RPMB) += rpmb.o + rpmb-objs += core.o + rpmb-$(CONFIG_RPMB_INTF_DEV) += cdev.o + obj-$(CONFIG_RPMB_SIM) += rpmb_sim.o ++obj-$(CONFIG_VIRTIO_RPMB) += virtio_rpmb.o + + ccflags-y += -D__CHECK_ENDIAN__ +diff --git a/drivers/char/rpmb/virtio_rpmb.c b/drivers/char/rpmb/virtio_rpmb.c +new file mode 100644 +index 000000000000..dbfeeeccec0b +--- /dev/null ++++ b/drivers/char/rpmb/virtio_rpmb.c +@@ -0,0 +1,306 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Virtio RPMB Front End Driver ++ * ++ * Copyright (c) 2018-2019 Intel Corporation. ++ */ ++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++static const char id[] = "RPMB:VIRTIO"; ++#ifndef VIRTIO_ID_RPMB ++#define VIRTIO_ID_RPMB 0xFFFF ++#endif ++ ++#define RPMB_SEQ_CMD_MAX 3 /* support up to 3 cmds */ ++ ++struct virtio_rpmb_info { ++ struct virtqueue *vq; ++ struct mutex lock; /* info lock */ ++ wait_queue_head_t have_data; ++ struct rpmb_dev *rdev; ++}; ++ ++struct virtio_rpmb_ioc { ++ unsigned int ioc_cmd; ++ int result; ++ u8 target; ++ u8 reserved[3]; ++}; ++ ++static void virtio_rpmb_recv_done(struct virtqueue *vq) ++{ ++ struct virtio_rpmb_info *vi; ++ struct virtio_device *vdev = vq->vdev; ++ ++ vi = vq->vdev->priv; ++ if (!vi) { ++ dev_err(&vdev->dev, "Error: no found vi data.\n"); ++ return; ++ } ++ ++ wake_up(&vi->have_data); ++} ++ ++static int rpmb_virtio_cmd_seq(struct device *dev, u8 target, ++ struct rpmb_cmd *cmds, u32 ncmds) ++{ ++ struct virtio_device *vdev = dev_to_virtio(dev); ++ struct virtio_rpmb_info *vi = vdev->priv; ++ unsigned int i; ++ struct virtio_rpmb_ioc *vio_cmd; ++ struct rpmb_ioc_seq_cmd *seq_cmd; ++ size_t seq_cmd_sz; ++ struct scatterlist vio_ioc, vio_seq, frame[3]; ++ struct scatterlist *sgs[5]; ++ unsigned int num_out = 0, num_in = 0; ++ size_t sz; ++ int ret; ++ unsigned int len; ++ ++ if (ncmds > RPMB_SEQ_CMD_MAX) ++ return -EINVAL; ++ ++ mutex_lock(&vi->lock); ++ ++ vio_cmd = kzalloc(sizeof(*vio_cmd), GFP_KERNEL); ++ seq_cmd_sz = sizeof(*seq_cmd) + sizeof(struct rpmb_ioc_cmd) * ncmds; ++ seq_cmd = kzalloc(seq_cmd_sz, GFP_KERNEL); ++ if (!vio_cmd || !seq_cmd) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ vio_cmd->ioc_cmd = RPMB_IOC_SEQ_CMD; ++ vio_cmd->result = 0; ++ vio_cmd->target = target; ++ sg_init_one(&vio_ioc, vio_cmd, sizeof(*vio_cmd)); ++ sgs[num_out + num_in++] = &vio_ioc; ++ ++ seq_cmd->num_of_cmds = ncmds; ++ for (i = 0; i < ncmds; i++) { ++ seq_cmd->cmds[i].flags = cmds[i].flags; ++ seq_cmd->cmds[i].nframes = cmds[i].nframes; ++ seq_cmd->cmds[i].frames_ptr = i; ++ } ++ sg_init_one(&vio_seq, seq_cmd, seq_cmd_sz); ++ sgs[num_out + num_in++] = &vio_seq; ++ ++ for (i = 0; i < ncmds; i++) { ++ sz = sizeof(struct rpmb_frame_jdec) * (cmds[i].nframes ?: 1); ++ sg_init_one(&frame[i], cmds[i].frames, sz); ++ sgs[num_out + num_in++] = &frame[i]; ++ } ++ ++ virtqueue_add_sgs(vi->vq, sgs, num_out, num_in, vi, GFP_KERNEL); ++ virtqueue_kick(vi->vq); ++ ++ wait_event(vi->have_data, virtqueue_get_buf(vi->vq, &len)); ++ ++ ret = 0; ++ ++ if (vio_cmd->result != 0) { ++ dev_err(dev, "Error: command error = %d.\n", vio_cmd->result); ++ ret = -EIO; ++ } ++ ++out: ++ kfree(vio_cmd); ++ kfree(seq_cmd); ++ mutex_unlock(&vi->lock); ++ return ret; ++} ++ ++static int rpmb_virtio_cmd_cap(struct device *dev, u8 target) ++{ ++ struct virtio_device *vdev = dev_to_virtio(dev); ++ struct virtio_rpmb_info *vi = vdev->priv; ++ struct virtio_rpmb_ioc *vio_cmd; ++ struct rpmb_ioc_cap_cmd *cap_cmd; ++ struct scatterlist vio_ioc, cap_ioc; ++ struct scatterlist *sgs[2]; ++ unsigned int num_out = 0, num_in = 0; ++ unsigned int len; ++ int ret; ++ ++ mutex_lock(&vi->lock); ++ ++ vio_cmd = kzalloc(sizeof(*vio_cmd), GFP_KERNEL); ++ cap_cmd = kzalloc(sizeof(*cap_cmd), GFP_KERNEL); ++ if (!vio_cmd || !cap_cmd) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ vio_cmd->ioc_cmd = RPMB_IOC_CAP_CMD; ++ vio_cmd->result = 0; ++ vio_cmd->target = target; ++ sg_init_one(&vio_ioc, vio_cmd, sizeof(*vio_cmd)); ++ sgs[num_out + num_in++] = &vio_ioc; ++ ++ sg_init_one(&cap_ioc, cap_cmd, sizeof(*cap_cmd)); ++ sgs[num_out + num_in++] = &cap_ioc; ++ ++ virtqueue_add_sgs(vi->vq, sgs, num_out, num_in, vi, GFP_KERNEL); ++ virtqueue_kick(vi->vq); ++ ++ wait_event(vi->have_data, virtqueue_get_buf(vi->vq, &len)); ++ ++ ret = 0; ++ ++ if (vio_cmd->result != 0) { ++ dev_err(dev, "Error: command error = %d.\n", vio_cmd->result); ++ ret = -EIO; ++ } ++ ++out: ++ kfree(vio_cmd); ++ kfree(cap_cmd); ++ ++ mutex_unlock(&vi->lock); ++ return ret; ++} ++ ++static int rpmb_virtio_get_capacity(struct device *dev, u8 target) ++{ ++ return 0; ++} ++ ++static struct rpmb_ops rpmb_virtio_ops = { ++ .cmd_seq = rpmb_virtio_cmd_seq, ++ .get_capacity = rpmb_virtio_get_capacity, ++ .type = RPMB_TYPE_EMMC, ++ .auth_method = RPMB_HMAC_ALGO_SHA_256, ++}; ++ ++static int rpmb_virtio_dev_init(struct virtio_rpmb_info *vi) ++{ ++ int ret = 0; ++ struct device *dev = &vi->vq->vdev->dev; ++ ++ rpmb_virtio_ops.dev_id_len = strlen(id); ++ rpmb_virtio_ops.dev_id = id; ++ rpmb_virtio_ops.wr_cnt_max = 1; ++ rpmb_virtio_ops.rd_cnt_max = 1; ++ rpmb_virtio_ops.block_size = 1; ++ ++ vi->rdev = rpmb_dev_register(dev, 0, &rpmb_virtio_ops); ++ if (IS_ERR(vi->rdev)) { ++ ret = PTR_ERR(vi->rdev); ++ goto err; ++ } ++ ++ dev_set_drvdata(dev, vi); ++err: ++ return ret; ++} ++ ++static int virtio_rpmb_init(struct virtio_device *vdev) ++{ ++ int ret; ++ struct virtio_rpmb_info *vi; ++ ++ vi = kzalloc(sizeof(*vi), GFP_KERNEL); ++ if (!vi) ++ return -ENOMEM; ++ ++ init_waitqueue_head(&vi->have_data); ++ mutex_init(&vi->lock); ++ vdev->priv = vi; ++ ++ /* We expect a single virtqueue. */ ++ vi->vq = virtio_find_single_vq(vdev, virtio_rpmb_recv_done, "request"); ++ if (IS_ERR(vi->vq)) { ++ dev_err(&vdev->dev, "get single vq failed!\n"); ++ ret = PTR_ERR(vi->vq); ++ goto err; ++ } ++ ++ /* create vrpmb device. */ ++ ret = rpmb_virtio_dev_init(vi); ++ if (ret) { ++ dev_err(&vdev->dev, "create vrpmb device failed.\n"); ++ goto err; ++ } ++ ++ dev_info(&vdev->dev, "init done!\n"); ++ ++ return 0; ++ ++err: ++ kfree(vi); ++ return ret; ++} ++ ++static void virtio_rpmb_remove(struct virtio_device *vdev) ++{ ++ struct virtio_rpmb_info *vi; ++ ++ vi = vdev->priv; ++ if (!vi) ++ return; ++ ++ if (wq_has_sleeper(&vi->have_data)) ++ wake_up(&vi->have_data); ++ ++ rpmb_dev_unregister(vi->rdev); ++ ++ if (vdev->config->reset) ++ vdev->config->reset(vdev); ++ ++ if (vdev->config->del_vqs) ++ vdev->config->del_vqs(vdev); ++ ++ kfree(vi); ++} ++ ++static int virtio_rpmb_probe(struct virtio_device *vdev) ++{ ++ return virtio_rpmb_init(vdev); ++} ++ ++#ifdef CONFIG_PM_SLEEP ++static int virtio_rpmb_freeze(struct virtio_device *vdev) ++{ ++ virtio_rpmb_remove(vdev); ++ return 0; ++} ++ ++static int virtio_rpmb_restore(struct virtio_device *vdev) ++{ ++ return virtio_rpmb_init(vdev); ++} ++#endif ++ ++static struct virtio_device_id id_table[] = { ++ { VIRTIO_ID_RPMB, VIRTIO_DEV_ANY_ID }, ++ { 0 }, ++}; ++ ++static struct virtio_driver virtio_rpmb_driver = { ++ .driver.name = KBUILD_MODNAME, ++ .driver.owner = THIS_MODULE, ++ .id_table = id_table, ++ .probe = virtio_rpmb_probe, ++ .remove = virtio_rpmb_remove, ++#ifdef CONFIG_PM_SLEEP ++ .freeze = virtio_rpmb_freeze, ++ .restore = virtio_rpmb_restore, ++#endif ++}; ++ ++module_virtio_driver(virtio_rpmb_driver); ++MODULE_DEVICE_TABLE(virtio, id_table); ++ ++MODULE_DESCRIPTION("Virtio rpmb frontend driver"); ++MODULE_AUTHOR("Intel Corporation"); ++MODULE_LICENSE("Dual BSD/GPL"); +-- +2.17.1 + diff --git a/patches/0019-trusty-implement-trusty-OS-timer-proxy-for-performa.trusty b/patches/0019-trusty-implement-trusty-OS-timer-proxy-for-performa.trusty new file mode 100644 index 0000000000..15fd2b62d5 --- /dev/null +++ b/patches/0019-trusty-implement-trusty-OS-timer-proxy-for-performa.trusty @@ -0,0 +1,179 @@ +From 12d9988934fdcedefba5a387b46d1e178ebf6533 Mon Sep 17 00:00:00 2001 +From: "Zhu, Bing" +Date: Fri, 15 Jul 2016 13:24:42 +0800 +Subject: [PATCH 19/63] trusty: implement trusty OS timer proxy for performance + enhancement + +Previously VMX timer causes 14 times of vmexit/vmresume switches every +10ms and VMX timer stops when processor enters C3+ sleep state. With +linux proxiedtimer implementation, we can reduces vmexit/vmresume +switches down to 4. But a drawback is that Trusty OS has no timer +during the boot time (before Linux kernel bringup), because Trusty OS +also intends to be used to provide services for bootloader, like GVB +and FRP(factor reset protection). We plan to solve it in other ways, +e.g. taking control of lapic timer before Linux kernel boot. + +Change-Id: I4baa827ecca51fcca5315a1e973a7533553073a0 +Signed-off-by: Zhu, Bing +Signed-off-by: Feng, Wang +Signed-off-by: weideng +Tracked-On: OAM-39152 +--- + drivers/trusty/trusty-irq.c | 2 - + drivers/trusty/trusty.c | 87 +++++++++++++++++++++++++++++++++++++ + 2 files changed, 87 insertions(+), 2 deletions(-) + +diff --git a/drivers/trusty/trusty-irq.c b/drivers/trusty/trusty-irq.c +index b325bff33774..2c2a792a3636 100644 +--- a/drivers/trusty/trusty-irq.c ++++ b/drivers/trusty/trusty-irq.c +@@ -631,8 +631,6 @@ static int trusty_irq_probe(struct platform_device *pdev) + for (irq = 0; irq >= 0;) + irq = trusty_irq_init_one(is, irq, false); + +- irq_register_done(); +- + is->cpu_notifier.notifier_call = trusty_irq_cpu_notify; + ret = register_hotcpu_notifier(&is->cpu_notifier); + if (ret) { +diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c +index 12a90224eb27..8daf817634d8 100644 +--- a/drivers/trusty/trusty.c ++++ b/drivers/trusty/trusty.c +@@ -26,11 +26,23 @@ + #include + + #define TRUSTY_VMCALL_SMC 0x74727500 ++#define TRUSTY_LKTIMER_INTERVAL 10 /* 10 ms */ ++#define TRUSTY_LKTIMER_VECTOR 0x31 /* INT_PIT */ ++ ++enum lktimer_mode { ++ ONESHOT_TIMER, ++ PERIODICAL_TIMER, ++}; + + struct trusty_state { ++ struct device *dev; + struct mutex smc_lock; + struct atomic_notifier_head notifier; + struct completion cpu_idle_completion; ++ struct timer_list timer; ++ struct work_struct timer_work; ++ enum lktimer_mode timer_mode; ++ unsigned long timer_interval; + char *version_str; + u32 api_version; + }; +@@ -40,6 +52,72 @@ struct trusty_smc_interface { + ulong args[5]; + }; + ++static void trusty_lktimer_work_func(struct work_struct *work) ++{ ++ int ret; ++ unsigned int vector; ++ struct trusty_state *s = ++ container_of(work, struct trusty_state, timer_work); ++ ++ dev_dbg(s->dev, "%s\n", __func__); ++ ++ /* need vector number only for the first time */ ++ vector = TRUSTY_LKTIMER_VECTOR; ++ ++ do { ++ ret = trusty_std_call32(s->dev, SMC_SC_NOP, vector, 0, 0); ++ vector = 0; ++ } while (ret == SM_ERR_NOP_INTERRUPTED); ++ ++ if (ret != SM_ERR_NOP_DONE) ++ dev_err(s->dev, "%s: SMC_SC_NOP failed %d", __func__, ret); ++ ++ dev_notice_once(s->dev, "LK OS proxy timer works\n"); ++} ++ ++static void trusty_lktimer_func(unsigned long data) ++{ ++ struct trusty_state *s = (struct trusty_state *)data; ++ ++ /* binding it physical CPU0 only because trusty OS runs on it */ ++ schedule_work_on(0, &s->timer_work); ++ ++ /* reactivate the timer again in periodic mode */ ++ if (s->timer_mode == PERIODICAL_TIMER) ++ mod_timer(&s->timer, ++ jiffies + msecs_to_jiffies(s->timer_interval)); ++} ++ ++static void trusty_init_lktimer(struct trusty_state *s) ++{ ++ INIT_WORK(&s->timer_work, trusty_lktimer_work_func); ++ setup_timer(&s->timer, trusty_lktimer_func, (unsigned long)s); ++} ++ ++/* note that this function is not thread-safe */ ++static void trusty_configure_lktimer(struct trusty_state *s, ++ enum lktimer_mode mode, unsigned long interval) ++{ ++ if (mode != ONESHOT_TIMER && mode != PERIODICAL_TIMER) { ++ pr_err("%s: invalid timer mode: %d\n", __func__, mode); ++ return; ++ } ++ ++ s->timer_mode = mode; ++ s->timer_interval = interval; ++ mod_timer(&s->timer, jiffies + msecs_to_jiffies(s->timer_interval)); ++} ++ ++/* ++ * this should be called when removing trusty dev and ++ * when LK/Trusty crashes, to disable proxy timer. ++ */ ++static void trusty_del_lktimer(struct trusty_state *s) ++{ ++ del_timer_sync(&s->timer); ++ flush_work(&s->timer_work); ++} ++ + static inline ulong smc(ulong r0, ulong r1, ulong r2, ulong r3) + { + __asm__ __volatile__( +@@ -246,6 +324,9 @@ s32 trusty_std_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2) + + WARN_ONCE(ret == SM_ERR_PANIC, "trusty crashed"); + ++ if (ret == SM_ERR_PANIC) ++ trusty_del_lktimer(s); ++ + if (smcnr == SMC_SC_NOP) + complete(&s->cpu_idle_completion); + else +@@ -384,6 +465,7 @@ static int trusty_probe(struct platform_device *pdev) + ATOMIC_INIT_NOTIFIER_HEAD(&s->notifier); + init_completion(&s->cpu_idle_completion); + platform_set_drvdata(pdev, s); ++ s->dev = &pdev->dev; + + trusty_init_version(s, &pdev->dev); + +@@ -391,6 +473,10 @@ static int trusty_probe(struct platform_device *pdev) + if (ret < 0) + goto err_api_version; + ++ trusty_init_lktimer(s); ++ trusty_configure_lktimer(s, ++ PERIODICAL_TIMER, TRUSTY_LKTIMER_INTERVAL); ++ + return 0; + + err_api_version: +@@ -417,6 +503,7 @@ static int trusty_remove(struct platform_device *pdev) + device_remove_file(&pdev->dev, &dev_attr_trusty_version); + kfree(s->version_str); + } ++ trusty_del_lktimer(s); + kfree(s); + return 0; + } +-- +2.17.1 + diff --git a/patches/0019-vhm-refine-vm-related-hypercall-ioctrl.acrn b/patches/0019-vhm-refine-vm-related-hypercall-ioctrl.acrn new file mode 100644 index 0000000000..12cfd7795f --- /dev/null +++ b/patches/0019-vhm-refine-vm-related-hypercall-ioctrl.acrn @@ -0,0 +1,168 @@ +From 676af3134024fa684deb897011e2f6fec7451918 Mon Sep 17 00:00:00 2001 +From: Yin Fengwei +Date: Fri, 31 Aug 2018 10:58:57 +0800 +Subject: [PATCH 019/150] vhm: refine vm related hypercall/ioctrl + +Change-Id: I028f59998733f3d066e2ead7768297570d97bf22 +Tracked-On:218445 +Signed-off-by: Yin Fengwei +Reviewed-on: +Reviewed-by: Dong, Eddie +Tested-by: Dong, Eddie +--- + drivers/char/vhm/vhm_dev.c | 17 ++++------------- + drivers/vhm/vhm_hypercall.c | 9 ++------- + include/linux/vhm/acrn_common.h | 11 +++++++---- + include/linux/vhm/acrn_hv_defs.h | 5 ++--- + include/linux/vhm/vhm_hypercall.h | 2 +- + include/linux/vhm/vhm_ioctl_defs.h | 5 ++--- + 6 files changed, 18 insertions(+), 31 deletions(-) + +diff --git a/drivers/char/vhm/vhm_dev.c b/drivers/char/vhm/vhm_dev.c +index 22cc7797f664..a3510b722dab 100644 +--- a/drivers/char/vhm/vhm_dev.c ++++ b/drivers/char/vhm/vhm_dev.c +@@ -177,12 +177,12 @@ static long vhm_dev_ioctl(struct file *filep, + + vm->vmid = created_vm.vmid; + +- pr_info("vhm: VM %ld created\n", created_vm.vmid); ++ pr_info("vhm: VM %d created\n", created_vm.vmid); + break; + } + +- case IC_RESUME_VM: { +- ret = hcall_resume_vm(vm->vmid); ++ case IC_START_VM: { ++ ret = hcall_start_vm(vm->vmid); + if (ret < 0) { + pr_err("vhm: failed to start VM %ld!\n", vm->vmid); + return -EFAULT; +@@ -209,15 +209,6 @@ static long vhm_dev_ioctl(struct file *filep, + break; + } + +- case IC_QUERY_VMSTATE: { +- ret = hcall_query_vm_state(vm->vmid); +- if (ret < 0) { +- pr_err("vhm: failed to query VM State%ld!\n", vm->vmid); +- return -EFAULT; +- } +- return ret; +- } +- + case IC_CREATE_VCPU: { + struct acrn_create_vcpu cv; + +@@ -228,7 +219,7 @@ static long vhm_dev_ioctl(struct file *filep, + ret = acrn_hypercall2(HC_CREATE_VCPU, vm->vmid, + virt_to_phys(&cv)); + if (ret < 0) { +- pr_err("vhm: failed to create vcpu %ld!\n", cv.vcpuid); ++ pr_err("vhm: failed to create vcpu %d!\n", cv.vcpu_id); + return -EFAULT; + } + +diff --git a/drivers/vhm/vhm_hypercall.c b/drivers/vhm/vhm_hypercall.c +index 94a95933d51e..b2738474afaf 100644 +--- a/drivers/vhm/vhm_hypercall.c ++++ b/drivers/vhm/vhm_hypercall.c +@@ -57,9 +57,9 @@ inline long hcall_create_vm(unsigned long vminfo) + return acrn_hypercall2(HC_CREATE_VM, 0, vminfo); + } + +-inline long hcall_resume_vm(unsigned long vmid) ++inline long hcall_start_vm(unsigned long vmid) + { +- return acrn_hypercall1(HC_RESUME_VM, vmid); ++ return acrn_hypercall1(HC_START_VM, vmid); + } + + inline long hcall_pause_vm(unsigned long vmid) +@@ -72,11 +72,6 @@ inline long hcall_destroy_vm(unsigned long vmid) + return acrn_hypercall1(HC_DESTROY_VM, vmid); + } + +-inline long hcall_query_vm_state(unsigned long vmid) +-{ +- return acrn_hypercall1(HC_QUERY_VMSTATE, vmid); +-} +- + inline long hcall_set_memmap(unsigned long vmid, unsigned long memmap) + { + return acrn_hypercall2(HC_VM_SET_MEMMAP, vmid, memmap); +diff --git a/include/linux/vhm/acrn_common.h b/include/linux/vhm/acrn_common.h +index ea6f77c017dc..ed9dd7fc6f82 100644 +--- a/include/linux/vhm/acrn_common.h ++++ b/include/linux/vhm/acrn_common.h +@@ -147,13 +147,16 @@ struct vhm_request_buffer { + + /* Common API params */ + struct acrn_create_vm { +- unsigned long vmid; /* OUT: HV return vmid to VHM */ +- unsigned long vcpu_num; /* IN: VM vcpu number */ ++ int32_t vmid; /* OUT: return vmid to VHM. Keep it first field */ ++ uint32_t vcpu_num; /* IN: VM vcpu number */ ++ uint8_t GUID[16]; /* IN: GUID of this vm */ ++ uint8_t trusty_enabled;/* IN: whether trusty is enabled */ ++ uint8_t reserved[31]; /* Reserved for future use */ + } __attribute__((aligned(8))); + + struct acrn_create_vcpu { +- int vcpuid; /* IN: vcpu id */ +- int pcpuid; /* IN: pcpu id */ ++ uint32_t vcpu_id; /* IN: vcpu id */ ++ uint32_t pcpu_id; /* IN: pcpu id */ + } __attribute__((aligned(8))); + + struct acrn_set_ioreq_buffer { +diff --git a/include/linux/vhm/acrn_hv_defs.h b/include/linux/vhm/acrn_hv_defs.h +index fa32243a6407..eb1d4c974a3e 100644 +--- a/include/linux/vhm/acrn_hv_defs.h ++++ b/include/linux/vhm/acrn_hv_defs.h +@@ -70,10 +70,9 @@ + #define HC_GET_API_VERSION _HC_ID(HC_ID, HC_ID_VM_BASE + 0x00) + #define HC_CREATE_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x01) + #define HC_DESTROY_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x02) +-#define HC_RESUME_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x03) ++#define HC_START_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x03) + #define HC_PAUSE_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x04) +-#define HC_QUERY_VMSTATE _HC_ID(HC_ID, HC_ID_VM_BASE + 0x05) +-#define HC_CREATE_VCPU _HC_ID(HC_ID, HC_ID_VM_BASE + 0x06) ++#define HC_CREATE_VCPU _HC_ID(HC_ID, HC_ID_VM_BASE + 0x05) + + /* IRQ and Interrupts */ + #define HC_ID_IRQ_BASE 0x100UL +diff --git a/include/linux/vhm/vhm_hypercall.h b/include/linux/vhm/vhm_hypercall.h +index 5447e951bf4b..b40f8f898046 100644 +--- a/include/linux/vhm/vhm_hypercall.h ++++ b/include/linux/vhm/vhm_hypercall.h +@@ -138,7 +138,7 @@ static inline long acrn_hypercall4(unsigned long hcall_id, unsigned long param1, + } + + inline long hcall_create_vm(unsigned long vminfo); +-inline long hcall_resume_vm(unsigned long vmid); ++inline long hcall_start_vm(unsigned long vmid); + inline long hcall_pause_vm(unsigned long vmid); + inline long hcall_destroy_vm(unsigned long vmid); + inline long hcall_query_vm_state(unsigned long vmid); +diff --git a/include/linux/vhm/vhm_ioctl_defs.h b/include/linux/vhm/vhm_ioctl_defs.h +index 79d91a858226..e157d6a86a66 100644 +--- a/include/linux/vhm/vhm_ioctl_defs.h ++++ b/include/linux/vhm/vhm_ioctl_defs.h +@@ -60,10 +60,9 @@ + #define IC_GET_API_VERSION _IC_ID(IC_ID, IC_ID_VM_BASE + 0x00) + #define IC_CREATE_VM _IC_ID(IC_ID, IC_ID_VM_BASE + 0x01) + #define IC_DESTROY_VM _IC_ID(IC_ID, IC_ID_VM_BASE + 0x02) +-#define IC_RESUME_VM _IC_ID(IC_ID, IC_ID_VM_BASE + 0x03) ++#define IC_START_VM _IC_ID(IC_ID, IC_ID_VM_BASE + 0x03) + #define IC_PAUSE_VM _IC_ID(IC_ID, IC_ID_VM_BASE + 0x04) +-#define IC_QUERY_VMSTATE _IC_ID(IC_ID, IC_ID_VM_BASE + 0x05) +-#define IC_CREATE_VCPU _IC_ID(IC_ID, IC_ID_VM_BASE + 0x06) ++#define IC_CREATE_VCPU _IC_ID(IC_ID, IC_ID_VM_BASE + 0x05) + + /* IRQ and Interrupts */ + #define IC_ID_IRQ_BASE 0x100UL +-- +2.17.1 + diff --git a/patches/0020-ASoC-Intel-Skylake-Use-dsp-loading-functions-directl.audio b/patches/0020-ASoC-Intel-Skylake-Use-dsp-loading-functions-directl.audio new file mode 100644 index 0000000000..378c0e19e9 --- /dev/null +++ b/patches/0020-ASoC-Intel-Skylake-Use-dsp-loading-functions-directl.audio @@ -0,0 +1,232 @@ +From 5173943d367a4742ca78ad53a118ceec97d4add4 Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Sun, 11 Aug 2019 14:46:52 +0200 +Subject: [PATCH 020/193] ASoC: Intel: Skylake: Use dsp loading functions + directly + +None of skl_dsp_loader_ops are actually extended as any parameter that +could be "extended" is already part of given function's parameter list. +Rather than obfustace non-derived calls with ops and dereferences, make +use of said operation directly. Takes part in remal of +skl_dsp_loader_ops structure. + +Signed-off-by: Cezary Rojewski +--- + sound/soc/intel/skylake/bxt-sst.c | 18 +++++++++--------- + sound/soc/intel/skylake/cnl-sst.c | 10 +++++----- + sound/soc/intel/skylake/skl-messages.c | 10 +++++----- + sound/soc/intel/skylake/skl-sst-cldma.c | 10 +++++----- + sound/soc/intel/skylake/skl-sst-dsp.h | 9 +++++++++ + 5 files changed, 33 insertions(+), 24 deletions(-) + +diff --git a/sound/soc/intel/skylake/bxt-sst.c b/sound/soc/intel/skylake/bxt-sst.c +index 65cbbe4fd57c..aa3e5017d749 100644 +--- a/sound/soc/intel/skylake/bxt-sst.c ++++ b/sound/soc/intel/skylake/bxt-sst.c +@@ -60,7 +60,7 @@ bxt_load_library(struct sst_dsp *ctx, struct skl_lib_info *linfo, int lib_count) + if (ret < 0) + goto load_library_failed; + +- stream_tag = ctx->dsp_ops.prepare(ctx->dev, 0x40, ++ stream_tag = skl_dsp_prepare(ctx->dev, 0x40, + stripped_fw.size, &dmab); + if (stream_tag <= 0) { + dev_err(ctx->dev, "Lib prepare DMA err: %x\n", +@@ -72,14 +72,14 @@ bxt_load_library(struct sst_dsp *ctx, struct skl_lib_info *linfo, int lib_count) + dma_id = stream_tag - 1; + memcpy(dmab.area, stripped_fw.data, stripped_fw.size); + +- ctx->dsp_ops.trigger(ctx->dev, true, stream_tag); ++ skl_dsp_trigger(ctx->dev, true, stream_tag); + ret = skl_sst_ipc_load_library(&skl->ipc, dma_id, i, true); + if (ret < 0) + dev_err(ctx->dev, "IPC Load Lib for %s fail: %d\n", + linfo[i].name, ret); + +- ctx->dsp_ops.trigger(ctx->dev, false, stream_tag); +- ctx->dsp_ops.cleanup(ctx->dev, &dmab, stream_tag); ++ skl_dsp_trigger(ctx->dev, false, stream_tag); ++ skl_dsp_cleanup(ctx->dev, &dmab, stream_tag); + } + + return ret; +@@ -100,7 +100,7 @@ static int sst_bxt_prepare_fw(struct sst_dsp *ctx, + { + int stream_tag, ret; + +- stream_tag = ctx->dsp_ops.prepare(ctx->dev, 0x40, fwsize, &ctx->dmab); ++ stream_tag = skl_dsp_prepare(ctx->dev, 0x40, fwsize, &ctx->dmab); + if (stream_tag <= 0) { + dev_err(ctx->dev, "Failed to prepare DMA FW loading err: %x\n", + stream_tag); +@@ -162,7 +162,7 @@ static int sst_bxt_prepare_fw(struct sst_dsp *ctx, + return ret; + + base_fw_load_failed: +- ctx->dsp_ops.cleanup(ctx->dev, &ctx->dmab, stream_tag); ++ skl_dsp_cleanup(ctx->dev, &ctx->dmab, stream_tag); + skl_dsp_core_power_down(ctx, SKL_DSP_CORE_MASK(1)); + skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK); + return ret; +@@ -172,12 +172,12 @@ static int sst_transfer_fw_host_dma(struct sst_dsp *ctx) + { + int ret; + +- ctx->dsp_ops.trigger(ctx->dev, true, ctx->dsp_ops.stream_tag); ++ skl_dsp_trigger(ctx->dev, true, ctx->dsp_ops.stream_tag); + ret = sst_dsp_register_poll(ctx, BXT_ADSP_FW_STATUS, SKL_FW_STS_MASK, + BXT_ROM_INIT, BXT_BASEFW_TIMEOUT, "Firmware boot"); + +- ctx->dsp_ops.trigger(ctx->dev, false, ctx->dsp_ops.stream_tag); +- ctx->dsp_ops.cleanup(ctx->dev, &ctx->dmab, ctx->dsp_ops.stream_tag); ++ skl_dsp_trigger(ctx->dev, false, ctx->dsp_ops.stream_tag); ++ skl_dsp_cleanup(ctx->dev, &ctx->dmab, ctx->dsp_ops.stream_tag); + + return ret; + } +diff --git a/sound/soc/intel/skylake/cnl-sst.c b/sound/soc/intel/skylake/cnl-sst.c +index c89ae72b7ef4..13ec2705ddbc 100644 +--- a/sound/soc/intel/skylake/cnl-sst.c ++++ b/sound/soc/intel/skylake/cnl-sst.c +@@ -48,7 +48,7 @@ static int cnl_prepare_fw(struct sst_dsp *ctx, const void *fwdata, u32 fwsize) + + int ret, stream_tag; + +- stream_tag = ctx->dsp_ops.prepare(ctx->dev, 0x40, fwsize, &ctx->dmab); ++ stream_tag = skl_dsp_prepare(ctx->dev, 0x40, fwsize, &ctx->dmab); + if (stream_tag <= 0) { + dev_err(ctx->dev, "dma prepare failed: 0%#x\n", stream_tag); + return stream_tag; +@@ -84,7 +84,7 @@ static int cnl_prepare_fw(struct sst_dsp *ctx, const void *fwdata, u32 fwsize) + return 0; + + base_fw_load_failed: +- ctx->dsp_ops.cleanup(ctx->dev, &ctx->dmab, stream_tag); ++ skl_dsp_cleanup(ctx->dev, &ctx->dmab, stream_tag); + cnl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK); + + return ret; +@@ -94,13 +94,13 @@ static int sst_transfer_fw_host_dma(struct sst_dsp *ctx) + { + int ret; + +- ctx->dsp_ops.trigger(ctx->dev, true, ctx->dsp_ops.stream_tag); ++ skl_dsp_trigger(ctx->dev, true, ctx->dsp_ops.stream_tag); + ret = sst_dsp_register_poll(ctx, CNL_ADSP_FW_STATUS, CNL_FW_STS_MASK, + CNL_FW_INIT, CNL_BASEFW_TIMEOUT, + "firmware boot"); + +- ctx->dsp_ops.trigger(ctx->dev, false, ctx->dsp_ops.stream_tag); +- ctx->dsp_ops.cleanup(ctx->dev, &ctx->dmab, ctx->dsp_ops.stream_tag); ++ skl_dsp_trigger(ctx->dev, false, ctx->dsp_ops.stream_tag); ++ skl_dsp_cleanup(ctx->dev, &ctx->dmab, ctx->dsp_ops.stream_tag); + + return ret; + } +diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c +index c24d6d14f889..527f464a6cdd 100644 +--- a/sound/soc/intel/skylake/skl-messages.c ++++ b/sound/soc/intel/skylake/skl-messages.c +@@ -22,13 +22,13 @@ + #include "../common/sst-dsp-priv.h" + #include "skl-topology.h" + +-static int skl_alloc_dma_buf(struct device *dev, ++int skl_alloc_dma_buf(struct device *dev, + struct snd_dma_buffer *dmab, size_t size) + { + return snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, dev, size, dmab); + } + +-static int skl_free_dma_buf(struct device *dev, struct snd_dma_buffer *dmab) ++int skl_free_dma_buf(struct device *dev, struct snd_dma_buffer *dmab) + { + snd_dma_free_pages(dmab); + return 0; +@@ -66,7 +66,7 @@ static int skl_dsp_setup_spib(struct device *dev, unsigned int size, + return 0; + } + +-static int skl_dsp_prepare(struct device *dev, unsigned int format, ++int skl_dsp_prepare(struct device *dev, unsigned int format, + unsigned int size, struct snd_dma_buffer *dmab) + { + struct hdac_bus *bus = dev_get_drvdata(dev); +@@ -98,7 +98,7 @@ static int skl_dsp_prepare(struct device *dev, unsigned int format, + return stream->stream_tag; + } + +-static int skl_dsp_trigger(struct device *dev, bool start, int stream_tag) ++int skl_dsp_trigger(struct device *dev, bool start, int stream_tag) + { + struct hdac_bus *bus = dev_get_drvdata(dev); + struct hdac_stream *stream; +@@ -116,7 +116,7 @@ static int skl_dsp_trigger(struct device *dev, bool start, int stream_tag) + return 0; + } + +-static int skl_dsp_cleanup(struct device *dev, ++int skl_dsp_cleanup(struct device *dev, + struct snd_dma_buffer *dmab, int stream_tag) + { + struct hdac_bus *bus = dev_get_drvdata(dev); +diff --git a/sound/soc/intel/skylake/skl-sst-cldma.c b/sound/soc/intel/skylake/skl-sst-cldma.c +index 5a2c35f58fda..ca2e18666582 100644 +--- a/sound/soc/intel/skylake/skl-sst-cldma.c ++++ b/sound/soc/intel/skylake/skl-sst-cldma.c +@@ -152,8 +152,8 @@ static void skl_cldma_cleanup(struct sst_dsp *ctx) + skl_cldma_cleanup_spb(ctx); + skl_cldma_stream_clear(ctx); + +- ctx->dsp_ops.free_dma_buf(ctx->dev, &ctx->cl_dev.dmab_data); +- ctx->dsp_ops.free_dma_buf(ctx->dev, &ctx->cl_dev.dmab_bdl); ++ skl_free_dma_buf(ctx->dev, &ctx->cl_dev.dmab_data); ++ skl_free_dma_buf(ctx->dev, &ctx->cl_dev.dmab_bdl); + } + + int skl_cldma_wait_interruptible(struct sst_dsp *ctx) +@@ -337,18 +337,18 @@ int skl_cldma_prepare(struct sst_dsp *ctx) + ctx->cl_dev.ops.cl_stop_dma = skl_cldma_stop; + + /* Allocate buffer*/ +- ret = ctx->dsp_ops.alloc_dma_buf(ctx->dev, ++ ret = skl_alloc_dma_buf(ctx->dev, + &ctx->cl_dev.dmab_data, ctx->cl_dev.bufsize); + if (ret < 0) { + dev_err(ctx->dev, "Alloc buffer for base fw failed: %x\n", ret); + return ret; + } + /* Setup Code loader BDL */ +- ret = ctx->dsp_ops.alloc_dma_buf(ctx->dev, ++ ret = skl_alloc_dma_buf(ctx->dev, + &ctx->cl_dev.dmab_bdl, PAGE_SIZE); + if (ret < 0) { + dev_err(ctx->dev, "Alloc buffer for blde failed: %x\n", ret); +- ctx->dsp_ops.free_dma_buf(ctx->dev, &ctx->cl_dev.dmab_data); ++ skl_free_dma_buf(ctx->dev, &ctx->cl_dev.dmab_data); + return ret; + } + bdl = (__le32 *)ctx->cl_dev.dmab_bdl.area; +diff --git a/sound/soc/intel/skylake/skl-sst-dsp.h b/sound/soc/intel/skylake/skl-sst-dsp.h +index 8aba81c8c13e..1523ac7115b7 100644 +--- a/sound/soc/intel/skylake/skl-sst-dsp.h ++++ b/sound/soc/intel/skylake/skl-sst-dsp.h +@@ -203,6 +203,15 @@ struct skl_module_table { + struct list_head list; + }; + ++int skl_alloc_dma_buf(struct device *dev, ++ struct snd_dma_buffer *dmab, size_t size); ++int skl_free_dma_buf(struct device *dev, struct snd_dma_buffer *dmab); ++int skl_dsp_prepare(struct device *dev, unsigned int format, ++ unsigned int size, struct snd_dma_buffer *dmab); ++int skl_dsp_trigger(struct device *dev, bool start, int stream_tag); ++int skl_dsp_cleanup(struct device *dev, struct snd_dma_buffer *dmab, ++ int stream_tag); ++ + void skl_cldma_process_intr(struct sst_dsp *ctx); + void skl_cldma_int_disable(struct sst_dsp *ctx); + int skl_cldma_prepare(struct sst_dsp *ctx); +-- +2.17.1 + diff --git a/patches/0020-Replace-CPU_STARTING-CPU_DYING-with-CPU_UP_PREPARE-.trusty b/patches/0020-Replace-CPU_STARTING-CPU_DYING-with-CPU_UP_PREPARE-.trusty new file mode 100644 index 0000000000..29e6021c8d --- /dev/null +++ b/patches/0020-Replace-CPU_STARTING-CPU_DYING-with-CPU_UP_PREPARE-.trusty @@ -0,0 +1,49 @@ +From 5bea6a55ab729479a28d88dc20863cb03ebd5e70 Mon Sep 17 00:00:00 2001 +From: "Deng, Wei A" +Date: Wed, 16 Nov 2016 16:31:43 +0800 +Subject: [PATCH 20/63] Replace CPU_STARTING/CPU_DYING with + CPU_UP_PREPARE/CPU_DEAD + +CPU_STARTING and CPU_DYING notifier are removed from kernel 4.9. +Add this patch to replace them with CPU_UP_PREPARE/CPU_DEAD. + +Change-Id: I1f48e7a8598dc684e70c8e4bc678723cbb1a0353 +Signed-off-by: Deng, Wei A +--- + drivers/trusty/trusty-irq.c | 4 ++-- + drivers/trusty/trusty-virtio.c | 2 +- + 2 files changed, 3 insertions(+), 3 deletions(-) + +diff --git a/drivers/trusty/trusty-irq.c b/drivers/trusty/trusty-irq.c +index 2c2a792a3636..aeb0918dc572 100644 +--- a/drivers/trusty/trusty-irq.c ++++ b/drivers/trusty/trusty-irq.c +@@ -280,10 +280,10 @@ static int trusty_irq_cpu_notify(struct notifier_block *nb, + dev_dbg(is->dev, "%s: 0x%lx\n", __func__, action); + + switch (action & ~CPU_TASKS_FROZEN) { +- case CPU_STARTING: ++ case CPU_UP_PREPARE: + trusty_irq_cpu_up(is); + break; +- case CPU_DYING: ++ case CPU_DEAD: + trusty_irq_cpu_down(is); + break; + } +diff --git a/drivers/trusty/trusty-virtio.c b/drivers/trusty/trusty-virtio.c +index f00c4ece03bf..a48f4f9884a8 100644 +--- a/drivers/trusty/trusty-virtio.c ++++ b/drivers/trusty/trusty-virtio.c +@@ -320,7 +320,7 @@ static struct virtqueue *_find_vq(struct virtio_device *vdev, + /* da field is only 32 bit wide. Use previously unused 'reserved' field + * to store top 32 bits of 64-bit address + */ +- tvr->vr_descr->reserved = (u32)(pa >> 32); ++ tvr->vr_descr->pa = (u32)(pa >> 32); + + dev_info(&vdev->dev, "vring%d: va(pa) %p(%llx) qsz %d notifyid %d\n", + id, tvr->vaddr, (u64)tvr->paddr, tvr->elem_num, tvr->notifyid); +-- +2.17.1 + diff --git a/patches/0020-SEP-fix-for-undeclared-variable-in-lwpmudrv.c.sep-socwatch b/patches/0020-SEP-fix-for-undeclared-variable-in-lwpmudrv.c.sep-socwatch new file mode 100644 index 0000000000..d95692166f --- /dev/null +++ b/patches/0020-SEP-fix-for-undeclared-variable-in-lwpmudrv.c.sep-socwatch @@ -0,0 +1,53 @@ +From ad1af89286b2508cef3641173eda1a703b12f22a Mon Sep 17 00:00:00 2001 +From: Manisha Chinthapally +Date: Mon, 3 Jun 2019 13:03:32 -0700 +Subject: [PATCH 20/27] SEP fix for undeclared variable in lwpmudrv.c + +Signed-off-by: Manisha Chinthapally +--- + drivers/platform/x86/sepdk/sep/lwpmudrv.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +diff --git a/drivers/platform/x86/sepdk/sep/lwpmudrv.c b/drivers/platform/x86/sepdk/sep/lwpmudrv.c +index ab827edf3e7c..8ae975b9a833 100755 +--- a/drivers/platform/x86/sepdk/sep/lwpmudrv.c ++++ b/drivers/platform/x86/sepdk/sep/lwpmudrv.c +@@ -3228,7 +3228,7 @@ static OS_STATUS lwpmudrv_Read_Counters_And_Switch_Group(IOCTL_ARGS arg) + // step 2 + // if per_cpu_tsc is not defined, read cpu0's tsc and save in var cpu_tsc[0] + // if per_cpu_tsc is defined, read all cpu's tsc and save in var cpu_tsc by lwpmudrv_Fill_TSC_Info +-#if !defined(CONFIG_PREEMPT_COUNT) ++#if !defined(CONFIG_PREEMPT_COUNT) && !defined(DRV_SEP_ACRN_ON) + if (DRV_CONFIG_per_cpu_tsc(drv_cfg)) { + atomic_set(&read_now, GLOBAL_STATE_num_cpus(driver_state)); + init_waitqueue_head(&read_tsc_now); +@@ -3297,7 +3297,7 @@ static OS_STATUS lwpmudrv_Read_Counters_And_Switch_Group(IOCTL_ARGS arg) + + // step 9 + // if per_cpu_tsc is defined, read all cpu's tsc and save in cpu_tsc for next run +-#if !defined(CONFIG_PREEMPT_COUNT) ++#if !defined(CONFIG_PREEMPT_COUNT) && !defined(DRV_SEP_ACRN_ON) + if (DRV_CONFIG_per_cpu_tsc(drv_cfg)) { + atomic_set(&read_now, GLOBAL_STATE_num_cpus(driver_state)); + init_waitqueue_head(&read_tsc_now); +@@ -3384,7 +3384,7 @@ static OS_STATUS lwpmudrv_Read_And_Reset_Counters(IOCTL_ARGS arg) + // step 2 + // if per_cpu_tsc is not defined, read cpu0's tsc into var cpu_tsc[0] + // if per_cpu_tsc is defined, read all cpu's tsc into var cpu_tsc by lwpmudrv_Fill_TSC_Info +-#if !defined(CONFIG_PREEMPT_COUNT) ++#if !defined(CONFIG_PREEMPT_COUNT) && !defined(DRV_SEP_ACRN_ON) + if (DRV_CONFIG_per_cpu_tsc(drv_cfg)) { + atomic_set(&read_now, GLOBAL_STATE_num_cpus(driver_state)); + init_waitqueue_head(&read_tsc_now); +@@ -3450,7 +3450,7 @@ static OS_STATUS lwpmudrv_Read_And_Reset_Counters(IOCTL_ARGS arg) + + // step 8 + // if per_cpu_tsc is defined, read all cpu's tsc and save in cpu_tsc for next run +-#if !defined(CONFIG_PREEMPT_COUNT) ++#if !defined(CONFIG_PREEMPT_COUNT) && !defined(DRV_SEP_ACRN_ON) + if (DRV_CONFIG_per_cpu_tsc(drv_cfg)) { + atomic_set(&read_now, GLOBAL_STATE_num_cpus(driver_state)); + init_waitqueue_head(&read_tsc_now); +-- +2.17.1 + diff --git a/patches/0020-char-rpmb-Document-Replay-Protected-Memory-Block-.security b/patches/0020-char-rpmb-Document-Replay-Protected-Memory-Block-.security new file mode 100644 index 0000000000..4e63f93118 --- /dev/null +++ b/patches/0020-char-rpmb-Document-Replay-Protected-Memory-Block-.security @@ -0,0 +1,281 @@ +From 0a1447bbd99f61a715617edae537c429ab146b9d Mon Sep 17 00:00:00 2001 +From: Tomas Winkler +Date: Tue, 19 Jul 2016 00:08:05 +0300 +Subject: [PATCH 20/65] char: rpmb: Document Replay Protected Memory Block + (RPMB) subsystem + +Add rpmb documentatin in sphinx format. + +V7: new in the series +V8: Rebase for v4.10 fix conf.py +V9: 1. Rebase for v4.17 + 2. Add SPDX intentifiers. + 3. Move under driver-api + 4. Drop req_cmd() +V10: Update docs. +V11: index.rst update + +Change-Id: I4ec3481a8cf443ea6f5fb88a11b616d815163e8c +Signed-off-by: Tomas Winkler +Signed-off-by: Alexander Usyskin +--- + Documentation/conf.py | 2 + + Documentation/driver-api/index.rst | 1 + + Documentation/driver-api/rpmb/index.rst | 18 +++ + .../driver-api/rpmb/introduction.rst | 125 ++++++++++++++++++ + Documentation/driver-api/rpmb/rpmb-tool.rst | 19 +++ + .../driver-api/rpmb/simulation-device.rst | 21 +++ + MAINTAINERS | 1 + + 7 files changed, 187 insertions(+) + create mode 100644 Documentation/driver-api/rpmb/index.rst + create mode 100644 Documentation/driver-api/rpmb/introduction.rst + create mode 100644 Documentation/driver-api/rpmb/rpmb-tool.rst + create mode 100644 Documentation/driver-api/rpmb/simulation-device.rst + +diff --git a/Documentation/conf.py b/Documentation/conf.py +index a8fe845832bc..9fbdbb8f5f2c 100644 +--- a/Documentation/conf.py ++++ b/Documentation/conf.py +@@ -421,6 +421,8 @@ latex_documents = [ + 'The kernel development community', 'manual'), + ('userspace-api/index', 'userspace-api.tex', 'The Linux kernel user-space API guide', + 'The kernel development community', 'manual'), ++ ('rpmb/index', 'rpmb.tex', 'Linux RPMB Subsystem Documentation', ++ 'The kernel development community', 'manual'), + ] + + # Add all other index files from Documentation/ subdirectories +diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst +index 38e638abe3eb..7882b5fc9478 100644 +--- a/Documentation/driver-api/index.rst ++++ b/Documentation/driver-api/index.rst +@@ -103,6 +103,7 @@ available subsections can be seen below. + xilinx/index + xillybus + zorro ++ rpmb/index + + .. only:: subproject and html + +diff --git a/Documentation/driver-api/rpmb/index.rst b/Documentation/driver-api/rpmb/index.rst +new file mode 100644 +index 000000000000..15ebd67dde0f +--- /dev/null ++++ b/Documentation/driver-api/rpmb/index.rst +@@ -0,0 +1,18 @@ ++.. SPDX-License-Identifier: GPL-2.0 ++ ++============================================== ++Replay Protected Memory Block (RPMB) subsystem ++============================================== ++ ++.. toctree:: ++ ++ introduction ++ simulation-device.rst ++ rpmb-tool.rst ++ ++.. only:: subproject ++ ++ Indices ++ ======= ++ ++ * :ref:`genindex` +diff --git a/Documentation/driver-api/rpmb/introduction.rst b/Documentation/driver-api/rpmb/introduction.rst +new file mode 100644 +index 000000000000..fbd64c57c235 +--- /dev/null ++++ b/Documentation/driver-api/rpmb/introduction.rst +@@ -0,0 +1,125 @@ ++.. SPDX-License-Identifier: GPL-2.0 ++ ++============= ++Introduction: ++============= ++ ++Some storage technologies such is EMMC, UFS, and NVMe support RPMB ++hardware partition with common protocol and frame layout. ++The RPMB partition `cannot` be accessed via standard block layer, ++but by a set of specific commands: ++ ++WRITE, READ, GET_WRITE_COUNTER, and PROGRAM_KEY. ++ ++The commands and the data are embedded within :c:type:`rpmb_frame `. ++ ++An RPMB partition provides authenticated and replay protected access, ++hence it is suitable as a secure storage. ++ ++In-kernel API ++------------- ++The RPMB layer aims to provide in-kernel API for Trusted Execution ++Environment (TEE) devices that are capable to securely compute the block ++frame signature. In case a TEE device wish to store a replay protected ++data, it creates an RPMB frame with requested data and computes HMAC of ++the frame, then it requests the storage device via RPMB layer to store ++the data. ++ ++The layer provides APIs, for :c:func:`rpmb_cmd_seq()` for issuing sequence ++of raw RPMB protocol frames, which is close to the functionality provided ++by emmc multi ioctl interface. ++ ++.. c:function:: int rpmb_cmd_seq(struct rpmb_dev *rdev, struct rpmb_cmd *cmds, u32 ncmds); ++ ++In addition the layer provides API for :c:func:`rpmb_get_capacity()` that returns ++the capacity of the rbmp device in units of 128K ++ ++.. c:function:: int rpmb_get_capacity(struct rpmb_dev *rdev) ++ ++ ++A TEE driver can claim the RPMB interface, for example, via ++:c:func:`class_interface_register`: ++ ++.. code-block:: c ++ ++ struct class_interface tee_rpmb_intf = { ++ .class = &rpmb_class; ++ .add_dev = rpmb_add_device; ++ .remove_dev = rpmb_remove_device; ++ } ++ class_interface_register(&tee_rpmb_intf); ++ ++ ++RPMB device registration ++---------------------------- ++ ++A storage device registers its RPMB hardware (eMMC or NVMe) partition ++or RPMB W-LUN (UFS) with the RPMB layer :c:func:`rpmb_dev_register` ++providing an implementation for :c:func:`rpmb_seq_cmd()` handler. ++The interface enables sending sequence of RPMB standard frames. ++ ++.. code-block:: c ++ ++ struct rpmb_ops mmc_rpmb_dev_ops = { ++ .cmd_seq = mmc_blk_rpmb_cmd_seq, ++ .type = RPMB_TYPE_EMMC, ++ ... ++ } ++ rpmb_dev_register(disk_to_dev(part_md->disk), &mmc_rpmb_dev_ops); ++ ++ ++User space API ++-------------- ++ ++A parallel user space API is provided via /dev/rpmbX character ++device with two IOCTL commands. ++- First ``RPMB_IOC_VER_CMD``, return driver protocol version, ++- second ``RPMB_IOC_CAP_CMD`` return capability structure, ++- last ``RPMB_IOC_SEQ_CMD`` where the whole RPMB sequence, and ++ including ``RESULT_READ`` is supplied by the caller. ++https://android.googlesource.com/trusty/app/storage/ ++ ++.. code-block:: c ++ ++ struct rpmb_ioc_req_cmd ireq; ++ int ret; ++ ++ ireq.req_type = RPMB_WRITE_DATA; ++ rpmb_ioc_cmd_set(ireq.icmd, RPMB_F_WRITE, frames_in, cnt_in); ++ rpmb_ioc_cmd_set(ireq.ocmd, 0, frames_out, cnt_out); ++ ++ ret = ioctl(fd, RPMB_IOC_REQ_CMD, &ireq); ++ ++There are some differences in RPMB API usage over NVMe, eMMC and UFS cases, ++such as RPMB frame structure and size, big/little endian fields etc ++ ++UFS and eMMC use the JDEC RPMB Data Frame described in JESD220B standard. ++Each frame includes 256B of data that is being encrypted along ++with other fields. If several data frames are sent as part of one ++request or response then the input message to MAC is the concatenation ++of bytes [228:511] of each data frame in the order in which the data ++frames are sent. The MAC is added only to the last data frame. ++All the fields in the JDEC frame have big endian bit order. ++ ++ ++NVMe RPMB Data Frame is described in NVM Express standard. ++Each frame includes data of 512B * block_count size. ++The capabilities of the device (such as capacity, rd/wr_cnt_max) are taken ++from the Replay Protected Memory Block Support (RPMBS) of the Identify ++Control Data Structure of NVMe. ++All the fields in the NVMe frame have little endian bit order. ++ ++The only Authentication Method that is currently supported for all ++device types is HMAC SHA-256. ++ ++ ++API ++--- ++.. kernel-doc:: include/linux/rpmb.h ++ ++.. kernel-doc:: drivers/char/rpmb/core.c ++ ++.. kernel-doc:: include/uapi/linux/rpmb.h ++ ++.. kernel-doc:: drivers/char/rpmb/cdev.c ++ +diff --git a/Documentation/driver-api/rpmb/rpmb-tool.rst b/Documentation/driver-api/rpmb/rpmb-tool.rst +new file mode 100644 +index 000000000000..b97b70710029 +--- /dev/null ++++ b/Documentation/driver-api/rpmb/rpmb-tool.rst +@@ -0,0 +1,19 @@ ++.. SPDX-License-Identifier: GPL-2.0 ++========== ++RPMB Tool ++========== ++ ++There is a sample rpmb tool under tools/rpmb/ directory that exercises ++the RPMB devices via RPMB character devices interface (/dev/rpmbX) ++ ++.. code-block:: none ++ ++ rpmb [-v] [-r|-s] ++ ++ rpmb get-info ++ rpmb program-key ++ rpmb write-counter [KEY_FILE] ++ rpmb write-blocks
++ rpmb read-blocks
[KEY_FILE] ++ ++ rpmb -v/--verbose: runs in verbose mode +diff --git a/Documentation/driver-api/rpmb/simulation-device.rst b/Documentation/driver-api/rpmb/simulation-device.rst +new file mode 100644 +index 000000000000..f6b8a83f11c1 +--- /dev/null ++++ b/Documentation/driver-api/rpmb/simulation-device.rst +@@ -0,0 +1,21 @@ ++.. SPDX-License-Identifier: GPL-2.0 ++ ++====================== ++RPMB Simulation Device ++====================== ++ ++RPMB partition simulation device is a virtual device that ++provides simulation of the RPMB protocol and uses kernel memory ++as storage. ++ ++This driver cannot promise any real security, it is suitable for testing ++of the RPMB subsystem it self and mostly it was found useful for testing of ++RPMB applications prior to RPMB key provisioning/programming as ++The RPMB key programming can be performed only once in the life time ++of the storage device. ++ ++Implementation: ++--------------- ++ ++.. kernel-doc:: drivers/char/rpmb/rpmb_sim.c ++ +diff --git a/MAINTAINERS b/MAINTAINERS +index 3863d92b1fe9..a5fd8483214d 100644 +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -13984,6 +13984,7 @@ F: drivers/char/rpmb/* + F: include/uapi/linux/rpmb.h + F: include/linux/rpmb.h + F: Documentation/ABI/testing/sysfs-class-rpmb ++F: Documentation/driver-api/rpmb.rst + F: tools/rpmb/ + + RTL2830 MEDIA DRIVER +-- +2.17.1 + diff --git a/patches/0020-dmaengine-dw-Switch-Intel-Elkhart-Lake-to-use-DMA-cro.lpss b/patches/0020-dmaengine-dw-Switch-Intel-Elkhart-Lake-to-use-DMA-cro.lpss new file mode 100644 index 0000000000..e4a799e091 --- /dev/null +++ b/patches/0020-dmaengine-dw-Switch-Intel-Elkhart-Lake-to-use-DMA-cro.lpss @@ -0,0 +1,71 @@ +From 62996baa7d89af8902a41b28b6466ff74d5ed45b Mon Sep 17 00:00:00 2001 +From: Andy Shevchenko +Date: Tue, 20 Aug 2019 16:02:41 +0300 +Subject: [PATCH 20/40] dmaengine: dw: Switch Intel Elkhart Lake to use DMA + crossbar +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +It's known that Intel® PSE DMA controller supports DMA crossbar +on Intel Elkhart Lake. Switch Intel Elkhart Lake to use the DMA crossbar. + +Signed-off-by: Andy Shevchenko +--- + drivers/dma/dw/internal.h | 6 ++++++ + drivers/dma/dw/pci.c | 6 +++--- + drivers/dma/dw/platform.c | 6 +++--- + 3 files changed, 12 insertions(+), 6 deletions(-) + +diff --git a/drivers/dma/dw/internal.h b/drivers/dma/dw/internal.h +index 2bf119386dc6..1a98a850219c 100644 +--- a/drivers/dma/dw/internal.h ++++ b/drivers/dma/dw/internal.h +@@ -77,4 +77,10 @@ static __maybe_unused const struct dw_dma_chip_pdata idma32_chip_pdata = { + .remove = idma32_dma_remove, + }; + ++static __maybe_unused const struct dw_dma_chip_pdata idma32_xbar_pdata = { ++ .pdata = &idma32_pdata, ++ .probe = idma32_xbar_probe, ++ .remove = idma32_xbar_remove, ++}; ++ + #endif /* _DMA_DW_INTERNAL_H */ +diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c +index cf6e8ec4c0ff..051a4df82263 100644 +--- a/drivers/dma/dw/pci.c ++++ b/drivers/dma/dw/pci.c +@@ -116,9 +116,9 @@ static const struct pci_device_id dw_pci_id_table[] = { + { PCI_VDEVICE(INTEL, 0x22c0), (kernel_ulong_t)&dw_dma_chip_pdata }, + + /* Elkhart Lake iDMA 32-bit (PSE DMA) */ +- { PCI_VDEVICE(INTEL, 0x4bb4), (kernel_ulong_t)&idma32_chip_pdata }, +- { PCI_VDEVICE(INTEL, 0x4bb5), (kernel_ulong_t)&idma32_chip_pdata }, +- { PCI_VDEVICE(INTEL, 0x4bb6), (kernel_ulong_t)&idma32_chip_pdata }, ++ { PCI_VDEVICE(INTEL, 0x4bb4), (kernel_ulong_t)&idma32_xbar_pdata }, ++ { PCI_VDEVICE(INTEL, 0x4bb5), (kernel_ulong_t)&idma32_xbar_pdata }, ++ { PCI_VDEVICE(INTEL, 0x4bb6), (kernel_ulong_t)&idma32_xbar_pdata }, + + /* Haswell */ + { PCI_VDEVICE(INTEL, 0x9c60), (kernel_ulong_t)&dw_dma_chip_pdata }, +diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c +index 0585d749d935..bffc79a620ae 100644 +--- a/drivers/dma/dw/platform.c ++++ b/drivers/dma/dw/platform.c +@@ -149,9 +149,9 @@ static const struct acpi_device_id dw_dma_acpi_id_table[] = { + { "808622C0", (kernel_ulong_t)&dw_dma_chip_pdata }, + + /* Elkhart Lake iDMA 32-bit (PSE DMA) */ +- { "80864BB4", (kernel_ulong_t)&idma32_chip_pdata }, +- { "80864BB5", (kernel_ulong_t)&idma32_chip_pdata }, +- { "80864BB6", (kernel_ulong_t)&idma32_chip_pdata }, ++ { "80864BB4", (kernel_ulong_t)&idma32_xbar_pdata }, ++ { "80864BB5", (kernel_ulong_t)&idma32_xbar_pdata }, ++ { "80864BB6", (kernel_ulong_t)&idma32_xbar_pdata }, + + { } + }; +-- +2.17.1 + diff --git a/patches/0020-drm-i915-Add-function-to-determine-if-a-slice-has-a-su.drm b/patches/0020-drm-i915-Add-function-to-determine-if-a-slice-has-a-su.drm new file mode 100644 index 0000000000..c5ef30f3bf --- /dev/null +++ b/patches/0020-drm-i915-Add-function-to-determine-if-a-slice-has-a-su.drm @@ -0,0 +1,90 @@ +From b4aa582b07fd021ff1d101119df9108a033abfac Mon Sep 17 00:00:00 2001 +From: Stuart Summers +Date: Fri, 23 Aug 2019 09:03:04 -0700 +Subject: [PATCH 020/690] drm/i915: Add function to determine if a slice has a + subslice + +Add a new function to determine whether a particular slice +has a given subslice. + +Signed-off-by: Stuart Summers +Reviewed-by: Chris Wilson +Signed-off-by: Chris Wilson +Link: https://patchwork.freedesktop.org/patch/msgid/20190823160307.180813-9-stuart.summers@intel.com +--- + drivers/gpu/drm/i915/gt/intel_sseu.h | 16 ++++++++++++++++ + drivers/gpu/drm/i915/intel_device_info.c | 9 ++++----- + 2 files changed, 20 insertions(+), 5 deletions(-) + +diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h +index 73a9064291a2..7703d75f2da3 100644 +--- a/drivers/gpu/drm/i915/gt/intel_sseu.h ++++ b/drivers/gpu/drm/i915/gt/intel_sseu.h +@@ -10,6 +10,8 @@ + #include + #include + ++#include "i915_gem.h" ++ + struct drm_i915_private; + + #define GEN_MAX_SLICES (6) /* CNL upper bound */ +@@ -69,6 +71,20 @@ intel_sseu_from_device_info(const struct sseu_dev_info *sseu) + return value; + } + ++static inline bool ++intel_sseu_has_subslice(const struct sseu_dev_info *sseu, int slice, ++ int subslice) ++{ ++ u8 mask; ++ int ss_idx = subslice / BITS_PER_BYTE; ++ ++ GEM_BUG_ON(ss_idx >= sseu->ss_stride); ++ ++ mask = sseu->subslice_mask[slice * sseu->ss_stride + ss_idx]; ++ ++ return mask & BIT(subslice % BITS_PER_BYTE); ++} ++ + void intel_sseu_set_info(struct sseu_dev_info *sseu, u8 max_slices, + u8 max_subslices, u8 max_eus_per_subslice); + +diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c +index 1a45728ac712..c20f74ee5f22 100644 +--- a/drivers/gpu/drm/i915/intel_device_info.c ++++ b/drivers/gpu/drm/i915/intel_device_info.c +@@ -210,10 +210,9 @@ static void gen11_sseu_info_init(struct drm_i915_private *dev_priv) + intel_sseu_set_subslices(sseu, s, (ss_en >> ss_idx) & + ss_en_mask); + +- for (ss = 0; ss < sseu->max_subslices; ss++) { +- if (sseu->subslice_mask[s] & BIT(ss)) ++ for (ss = 0; ss < sseu->max_subslices; ss++) ++ if (intel_sseu_has_subslice(sseu, s, ss)) + sseu_set_eus(sseu, s, ss, eu_en); +- } + } + } + sseu->eu_per_subslice = hweight8(eu_en); +@@ -395,7 +394,7 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv) + int eu_per_ss; + u8 eu_disabled_mask; + +- if (!(sseu->subslice_mask[s] & BIT(ss))) ++ if (!intel_sseu_has_subslice(sseu, s, ss)) + /* skip disabled subslice */ + continue; + +@@ -501,7 +500,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv) + u8 eu_disabled_mask; + u32 n_disabled; + +- if (!(sseu->subslice_mask[s] & BIT(ss))) ++ if (!intel_sseu_has_subslice(sseu, s, ss)) + /* skip disabled subslice */ + continue; + +-- +2.17.1 + diff --git a/patches/0020-hypercall-refine-HC-ID-and-parameter-number.acrn b/patches/0020-hypercall-refine-HC-ID-and-parameter-number.acrn new file mode 100644 index 0000000000..19b66d224d --- /dev/null +++ b/patches/0020-hypercall-refine-HC-ID-and-parameter-number.acrn @@ -0,0 +1,148 @@ +From 3464063af4a3c1278f2873d5c8a0753cbdfd401c Mon Sep 17 00:00:00 2001 +From: Jason Chen CJ +Date: Fri, 31 Aug 2018 10:58:57 +0800 +Subject: [PATCH 020/150] hypercall: refine HC ID and parameter number + +Change-Id: Ie5b73055add4c69b4dbf5cae1bb8bf941997ee6b +Tracked-On: 218445 +Signed-off-by: Jason Chen CJ +Reviewed-on: +Reviewed-by: Chi, Mingqiang +Reviewed-by: Dong, Eddie +Tested-by: Dong, Eddie +--- + drivers/vhm/vhm_hypercall.c | 7 +++++- + include/linux/vhm/acrn_common.h | 10 ++++----- + include/linux/vhm/acrn_hv_defs.h | 36 ++++++++++++++++--------------- + include/linux/vhm/vhm_hypercall.h | 1 + + 4 files changed, 31 insertions(+), 23 deletions(-) + +diff --git a/drivers/vhm/vhm_hypercall.c b/drivers/vhm/vhm_hypercall.c +index b2738474afaf..741b8bd837cc 100644 +--- a/drivers/vhm/vhm_hypercall.c ++++ b/drivers/vhm/vhm_hypercall.c +@@ -52,9 +52,14 @@ + #include + #include + ++inline long hcall_get_api_version(unsigned long api_version) ++{ ++ return acrn_hypercall1(HC_GET_API_VERSION, api_version); ++} ++ + inline long hcall_create_vm(unsigned long vminfo) + { +- return acrn_hypercall2(HC_CREATE_VM, 0, vminfo); ++ return acrn_hypercall1(HC_CREATE_VM, vminfo); + } + + inline long hcall_start_vm(unsigned long vmid) +diff --git a/include/linux/vhm/acrn_common.h b/include/linux/vhm/acrn_common.h +index ed9dd7fc6f82..29af96a7ee95 100644 +--- a/include/linux/vhm/acrn_common.h ++++ b/include/linux/vhm/acrn_common.h +@@ -147,11 +147,11 @@ struct vhm_request_buffer { + + /* Common API params */ + struct acrn_create_vm { +- int32_t vmid; /* OUT: return vmid to VHM. Keep it first field */ +- uint32_t vcpu_num; /* IN: VM vcpu number */ +- uint8_t GUID[16]; /* IN: GUID of this vm */ +- uint8_t trusty_enabled;/* IN: whether trusty is enabled */ +- uint8_t reserved[31]; /* Reserved for future use */ ++ int32_t vmid; /* OUT: return vmid to VHM. Keep it first field */ ++ uint32_t vcpu_num; /* IN: VM vcpu number */ ++ uint8_t GUID[16]; /* IN: GUID of this vm */ ++ uint8_t trusty_enabled;/* IN: whether trusty is enabled */ ++ uint8_t reserved[31]; /* Reserved for future use */ + } __attribute__((aligned(8))); + + struct acrn_create_vcpu { +diff --git a/include/linux/vhm/acrn_hv_defs.h b/include/linux/vhm/acrn_hv_defs.h +index eb1d4c974a3e..bb57fb4f5cdd 100644 +--- a/include/linux/vhm/acrn_hv_defs.h ++++ b/include/linux/vhm/acrn_hv_defs.h +@@ -63,49 +63,51 @@ + + #define _HC_ID(x, y) (((x)<<24)|(y)) + +-#define HC_ID 0x7FUL ++#define HC_ID 0x80UL ++ ++/* general */ ++#define HC_ID_GEN_BASE 0x0UL ++#define HC_GET_API_VERSION _HC_ID(HC_ID, HC_ID_GEN_BASE + 0x00) + + /* VM management */ +-#define HC_ID_VM_BASE 0x0UL +-#define HC_GET_API_VERSION _HC_ID(HC_ID, HC_ID_VM_BASE + 0x00) +-#define HC_CREATE_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x01) +-#define HC_DESTROY_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x02) +-#define HC_START_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x03) +-#define HC_PAUSE_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x04) +-#define HC_CREATE_VCPU _HC_ID(HC_ID, HC_ID_VM_BASE + 0x05) ++#define HC_ID_VM_BASE 0x10UL ++#define HC_CREATE_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x00) ++#define HC_DESTROY_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x01) ++#define HC_START_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x02) ++#define HC_PAUSE_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x03) ++#define HC_CREATE_VCPU _HC_ID(HC_ID, HC_ID_VM_BASE + 0x04) + + /* IRQ and Interrupts */ +-#define HC_ID_IRQ_BASE 0x100UL ++#define HC_ID_IRQ_BASE 0x20UL + #define HC_ASSERT_IRQLINE _HC_ID(HC_ID, HC_ID_IRQ_BASE + 0x00) + #define HC_DEASSERT_IRQLINE _HC_ID(HC_ID, HC_ID_IRQ_BASE + 0x01) + #define HC_PULSE_IRQLINE _HC_ID(HC_ID, HC_ID_IRQ_BASE + 0x02) + #define HC_INJECT_MSI _HC_ID(HC_ID, HC_ID_IRQ_BASE + 0x03) + + /* DM ioreq management */ +-#define HC_ID_IOREQ_BASE 0x200UL ++#define HC_ID_IOREQ_BASE 0x30UL + #define HC_SET_IOREQ_BUFFER _HC_ID(HC_ID, HC_ID_IOREQ_BASE + 0x00) + #define HC_NOTIFY_REQUEST_FINISH _HC_ID(HC_ID, HC_ID_IOREQ_BASE + 0x01) + +- + /* Guest memory management */ +-#define HC_ID_MEM_BASE 0x300UL ++#define HC_ID_MEM_BASE 0x40UL + #define HC_VM_SET_MEMMAP _HC_ID(HC_ID, HC_ID_MEM_BASE + 0x00) + #define HC_VM_GPA2HPA _HC_ID(HC_ID, HC_ID_MEM_BASE + 0x01) + + /* PCI assignment*/ +-#define HC_ID_PCI_BASE 0x400UL ++#define HC_ID_PCI_BASE 0x50UL + #define HC_ASSIGN_PTDEV _HC_ID(HC_ID, HC_ID_PCI_BASE + 0x00) + #define HC_DEASSIGN_PTDEV _HC_ID(HC_ID, HC_ID_PCI_BASE + 0x01) + #define HC_VM_PCI_MSIX_REMAP _HC_ID(HC_ID, HC_ID_PCI_BASE + 0x02) + #define HC_SET_PTDEV_INTR_INFO _HC_ID(HC_ID, HC_ID_PCI_BASE + 0x03) + #define HC_RESET_PTDEV_INTR_INFO _HC_ID(HC_ID, HC_ID_PCI_BASE + 0x04) + +-/* TRACE */ +-#define HC_ID_TRACE_BASE 0x600UL +-#define HC_ACRN_SBUF_SETUP _HC_ID(HC_ID, HC_ID_TRACE_BASE + 0x00) ++/* DEBUG */ ++#define HC_ID_DBG_BASE 0x60UL ++#define HC_SBUF_SETUP _HC_ID(HC_ID, HC_ID_DBG_BASE + 0x00) + + #define ACRN_DOM0_VMID (0UL) +-#define ACRN_INVALID_VMID (-1UL) ++#define ACRN_INVALID_VMID (-1) + #define ACRN_INVALID_HPA (-1UL) + + /* Generic memory attributes */ +diff --git a/include/linux/vhm/vhm_hypercall.h b/include/linux/vhm/vhm_hypercall.h +index b40f8f898046..f4a5793f3ef7 100644 +--- a/include/linux/vhm/vhm_hypercall.h ++++ b/include/linux/vhm/vhm_hypercall.h +@@ -137,6 +137,7 @@ static inline long acrn_hypercall4(unsigned long hcall_id, unsigned long param1, + return result; + } + ++inline long hcall_get_api_version(unsigned long api_version); + inline long hcall_create_vm(unsigned long vminfo); + inline long hcall_start_vm(unsigned long vmid); + inline long hcall_pause_vm(unsigned long vmid); +-- +2.17.1 + diff --git a/patches/0020-net-stmmac-introduce-DMA-interrupt-status-mas.connectivity b/patches/0020-net-stmmac-introduce-DMA-interrupt-status-mas.connectivity new file mode 100644 index 0000000000..1a5d3affe9 --- /dev/null +++ b/patches/0020-net-stmmac-introduce-DMA-interrupt-status-mas.connectivity @@ -0,0 +1,302 @@ +From 263c984e091fe868155f7d33e1d88ae16c66d808 Mon Sep 17 00:00:00 2001 +From: Ong Boon Leong +Date: Fri, 26 Jul 2019 03:23:36 +0800 +Subject: [PATCH 020/108] net: stmmac: introduce DMA interrupt status masking + per traffic direction + +In preparation to make stmmac support multi-vector MSI, we introduce the +interrupt status masking according to RX, TX or RXTX. Default to use RXTX +inside stmmac_dma_interrupt(), so there is no run-time logic difference +now. + +Signed-off-by: Ong Boon Leong +--- + drivers/net/ethernet/stmicro/stmmac/common.h | 6 +++++ + .../net/ethernet/stmicro/stmmac/dwmac-sun8i.c | 24 ++++++++++++++++++- + .../net/ethernet/stmicro/stmmac/dwmac4_dma.h | 21 +++++++++++++++- + .../net/ethernet/stmicro/stmmac/dwmac4_lib.c | 7 +++++- + .../net/ethernet/stmicro/stmmac/dwmac_dma.h | 22 ++++++++++++++++- + .../net/ethernet/stmicro/stmmac/dwmac_lib.c | 8 ++++++- + .../net/ethernet/stmicro/stmmac/dwxgmac2.h | 6 +++++ + .../ethernet/stmicro/stmmac/dwxgmac2_dma.c | 8 ++++++- + drivers/net/ethernet/stmicro/stmmac/hwif.h | 2 +- + .../net/ethernet/stmicro/stmmac/stmmac_main.c | 7 +++--- + 10 files changed, 101 insertions(+), 10 deletions(-) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h +index 2e4d69e5bb21..b0cb5e9f0592 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/common.h ++++ b/drivers/net/ethernet/stmicro/stmmac/common.h +@@ -289,6 +289,12 @@ enum dma_irq_status { + handle_tx = 0x8, + }; + ++enum dma_irq_dir { ++ DMA_DIR_RX = 0x1, ++ DMA_DIR_TX = 0x2, ++ DMA_DIR_RXTX = 0x3, ++}; ++ + /* EEE and LPI defines */ + #define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 0) + #define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 1) +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +index ddcc191febdb..a3ae17db3655 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +@@ -237,6 +237,22 @@ static const struct emac_variant emac_variant_h6 = { + #define EMAC_RX_EARLY_INT BIT(13) + #define EMAC_RGMII_STA_INT BIT(16) + ++#define EMAC_INT_MSK_COMMON EMAC_RGMII_STA_INT ++#define EMAC_INT_MSK_TX (EMAC_TX_INT | \ ++ EMAC_TX_DMA_STOP_INT | \ ++ EMAC_TX_BUF_UA_INT | \ ++ EMAC_TX_TIMEOUT_INT | \ ++ EMAC_TX_UNDERFLOW_INT | \ ++ EMAC_TX_EARLY_INT |\ ++ EMAC_INT_MSK_COMMON) ++#define EMAC_INT_MSK_RX (EMAC_RX_INT | \ ++ EMAC_RX_BUF_UA_INT | \ ++ EMAC_RX_DMA_STOP_INT | \ ++ EMAC_RX_TIMEOUT_INT | \ ++ EMAC_RX_OVERFLOW_INT | \ ++ EMAC_RX_EARLY_INT | \ ++ EMAC_INT_MSK_COMMON) ++ + #define MAC_ADDR_TYPE_DST BIT(31) + + /* H3 specific bits for EPHY */ +@@ -394,13 +410,19 @@ static void sun8i_dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan) + } + + static int sun8i_dwmac_dma_interrupt(void __iomem *ioaddr, +- struct stmmac_extra_stats *x, u32 chan) ++ struct stmmac_extra_stats *x, u32 chan, ++ u32 dir) + { + u32 v; + int ret = 0; + + v = readl(ioaddr + EMAC_INT_STA); + ++ if (dir == DMA_DIR_RX) ++ v &= EMAC_INT_MSK_RX; ++ else if (dir == DMA_DIR_TX) ++ v &= EMAC_INT_MSK_TX; ++ + if (v & EMAC_TX_INT) { + ret |= handle_tx; + x->tx_normal_irq_n++; +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h +index b66da0237d2a..153bac8bdb23 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h +@@ -138,6 +138,25 @@ + #define DMA_CHAN_STATUS_TPS BIT(1) + #define DMA_CHAN_STATUS_TI BIT(0) + ++#define DMA_CHAN_STATUS_MSK_COMMON (DMA_CHAN_STATUS_NIS | \ ++ DMA_CHAN_STATUS_AIS | \ ++ DMA_CHAN_STATUS_CDE | \ ++ DMA_CHAN_STATUS_FBE) ++ ++#define DMA_CHAN_STATUS_MSK_RX (DMA_CHAN_STATUS_REB | \ ++ DMA_CHAN_STATUS_ERI | \ ++ DMA_CHAN_STATUS_RWT | \ ++ DMA_CHAN_STATUS_RPS | \ ++ DMA_CHAN_STATUS_RBU | \ ++ DMA_CHAN_STATUS_RI | \ ++ DMA_CHAN_STATUS_MSK_COMMON) ++ ++#define DMA_CHAN_STATUS_MSK_TX (DMA_CHAN_STATUS_ETI | \ ++ DMA_CHAN_STATUS_TBU | \ ++ DMA_CHAN_STATUS_TPS | \ ++ DMA_CHAN_STATUS_TI | \ ++ DMA_CHAN_STATUS_MSK_COMMON) ++ + /* Interrupt enable bits per channel */ + #define DMA_CHAN_INTR_ENA_NIE BIT(16) + #define DMA_CHAN_INTR_ENA_AIE BIT(15) +@@ -190,7 +209,7 @@ void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan); + void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan); + void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan); + int dwmac4_dma_interrupt(void __iomem *ioaddr, +- struct stmmac_extra_stats *x, u32 chan); ++ struct stmmac_extra_stats *x, u32 chan, u32 dir); + void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan); + void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan); + void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan); +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c +index f2a29a90e085..211931bc814f 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c +@@ -115,12 +115,17 @@ void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan) + } + + int dwmac4_dma_interrupt(void __iomem *ioaddr, +- struct stmmac_extra_stats *x, u32 chan) ++ struct stmmac_extra_stats *x, u32 chan, u32 dir) + { + u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan)); + u32 intr_en = readl(ioaddr + DMA_CHAN_INTR_ENA(chan)); + int ret = 0; + ++ if (dir == DMA_DIR_RX) ++ intr_status &= DMA_CHAN_STATUS_MSK_RX; ++ else if (dir == DMA_DIR_TX) ++ intr_status &= DMA_CHAN_STATUS_MSK_TX; ++ + /* ABNORMAL interrupts */ + if (unlikely(intr_status & DMA_CHAN_STATUS_AIS)) { + if (unlikely(intr_status & DMA_CHAN_STATUS_RBU)) +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h +index 292b880f3f9f..d8aea5277313 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h +@@ -126,6 +126,26 @@ + #define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */ + #define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */ + ++#define DMA_STATUS_MSK_COMMON (DMA_STATUS_NIS | \ ++ DMA_STATUS_AIS | \ ++ DMA_STATUS_FBI) ++ ++#define DMA_STATUS_MSK_RX (DMA_STATUS_ERI | \ ++ DMA_STATUS_RWT | \ ++ DMA_STATUS_RPS | \ ++ DMA_STATUS_RU | \ ++ DMA_STATUS_RI | \ ++ DMA_STATUS_OVF | \ ++ DMA_STATUS_MSK_COMMON) ++ ++#define DMA_STATUS_MSK_TX (DMA_STATUS_ETI | \ ++ DMA_STATUS_UNF | \ ++ DMA_STATUS_TJT | \ ++ DMA_STATUS_TU | \ ++ DMA_STATUS_TPS | \ ++ DMA_STATUS_TI | \ ++ DMA_STATUS_MSK_COMMON) ++ + #define NUM_DWMAC100_DMA_REGS 9 + #define NUM_DWMAC1000_DMA_REGS 23 + +@@ -137,7 +157,7 @@ void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan); + void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan); + void dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan); + int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x, +- u32 chan); ++ u32 chan, u32 dir); + int dwmac_dma_reset(void __iomem *ioaddr); + + #endif /* __DWMAC_DMA_H__ */ +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c +index 1bc25aa86dbd..3b9fcaa20bc5 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c +@@ -146,7 +146,7 @@ static void show_rx_process_state(unsigned int status) + #endif + + int dwmac_dma_interrupt(void __iomem *ioaddr, +- struct stmmac_extra_stats *x, u32 chan) ++ struct stmmac_extra_stats *x, u32 chan, u32 dir) + { + int ret = 0; + /* read the status register (CSR5) */ +@@ -158,6 +158,12 @@ int dwmac_dma_interrupt(void __iomem *ioaddr, + show_tx_process_state(intr_status); + show_rx_process_state(intr_status); + #endif ++ ++ if (dir == DMA_DIR_RX) ++ intr_status &= DMA_STATUS_MSK_RX; ++ else if (dir == DMA_DIR_TX) ++ intr_status &= DMA_STATUS_MSK_TX; ++ + /* ABNORMAL interrupts */ + if (unlikely(intr_status & DMA_STATUS_AIS)) { + if (unlikely(intr_status & DMA_STATUS_UNF)) { +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h +index 99037386080a..fbb559c53219 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h ++++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h +@@ -374,6 +374,12 @@ + #define XGMAC_TI BIT(0) + #define XGMAC_REGSIZE ((0x0000317c + (0x80 * 15)) / 4) + ++#define XGMAC_DMA_STATUS_MSK_COMMON (XGMAC_NIS | XGMAC_AIS | XGMAC_FBE) ++#define XGMAC_DMA_STATUS_MSK_RX (XGMAC_RBU | XGMAC_RI | \ ++ XGMAC_DMA_STATUS_MSK_COMMON) ++#define XGMAC_DMA_STATUS_MSK_TX (XGMAC_TBU | XGMAC_TPS | XGMAC_TI | \ ++ XGMAC_DMA_STATUS_MSK_COMMON) ++ + /* Descriptors */ + #define XGMAC_TDES2_IVT GENMASK(31, 16) + #define XGMAC_TDES2_IVT_SHIFT 16 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c +index 965cbe3e6f51..7433409136a2 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c +@@ -314,12 +314,18 @@ static void dwxgmac2_dma_stop_rx(void __iomem *ioaddr, u32 chan) + } + + static int dwxgmac2_dma_interrupt(void __iomem *ioaddr, +- struct stmmac_extra_stats *x, u32 chan) ++ struct stmmac_extra_stats *x, u32 chan, ++ u32 dir) + { + u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan)); + u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan)); + int ret = 0; + ++ if (dir == DMA_DIR_RX) ++ intr_status &= XGMAC_DMA_STATUS_MSK_RX; ++ else if (dir == DMA_DIR_TX) ++ intr_status &= XGMAC_DMA_STATUS_MSK_TX; ++ + /* ABNORMAL interrupts */ + if (unlikely(intr_status & XGMAC_AIS)) { + if (unlikely(intr_status & XGMAC_RBU)) { +diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h +index 9c41cb46b9aa..591c227cd50e 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h ++++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h +@@ -194,7 +194,7 @@ struct stmmac_dma_ops { + void (*start_rx)(void __iomem *ioaddr, u32 chan); + void (*stop_rx)(void __iomem *ioaddr, u32 chan); + int (*dma_interrupt) (void __iomem *ioaddr, +- struct stmmac_extra_stats *x, u32 chan); ++ struct stmmac_extra_stats *x, u32 chan, u32 dir); + /* If supported then get the optional core features */ + void (*get_hw_feature)(void __iomem *ioaddr, + struct dma_features *dma_cap); +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index 672a619e8348..b2f7e1c33c34 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -2064,10 +2064,10 @@ static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv) + return false; + } + +-static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan) ++static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir) + { + int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, +- &priv->xstats, chan); ++ &priv->xstats, chan, dir); + struct stmmac_channel *ch = &priv->channel[chan]; + + if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) { +@@ -2105,7 +2105,8 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv) + channels_to_check = ARRAY_SIZE(status); + + for (chan = 0; chan < channels_to_check; chan++) +- status[chan] = stmmac_napi_check(priv, chan); ++ status[chan] = stmmac_napi_check(priv, chan, ++ DMA_DIR_RXTX); + + for (chan = 0; chan < tx_channel_count; chan++) { + if (unlikely(status[chan] & tx_hard_error_bump_tc)) { +-- +2.17.1 + diff --git a/patches/0021-ASoC-Intel-Skylake-Make-dsp_ops-stream_tag-obsolete.audio b/patches/0021-ASoC-Intel-Skylake-Make-dsp_ops-stream_tag-obsolete.audio new file mode 100644 index 0000000000..1b33a557c6 --- /dev/null +++ b/patches/0021-ASoC-Intel-Skylake-Make-dsp_ops-stream_tag-obsolete.audio @@ -0,0 +1,133 @@ +From 285780316590ad3008dfe2cd1e3c29cad3451958 Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Sun, 11 Aug 2019 15:31:16 +0200 +Subject: [PATCH 021/193] ASoC: Intel: Skylake: Make dsp_ops::stream_tag + obsolete + +stream_tag field is the last obstacle in removing skl_dsp_loader_ops +structure. It is required only during fw load procedure and never lives +outside of that process - prepare_fw functions take place directly +before actual FW file transfer and once that's done, these are +redundant. Update _prepare_fw routines to return stream_tag on success +and use said return immediately in _transfer_fw_host_dma. + +Signed-off-by: Cezary Rojewski +--- + sound/soc/intel/skylake/bxt-sst.c | 14 ++++++-------- + sound/soc/intel/skylake/cnl-sst.c | 13 ++++++------- + 2 files changed, 12 insertions(+), 15 deletions(-) + +diff --git a/sound/soc/intel/skylake/bxt-sst.c b/sound/soc/intel/skylake/bxt-sst.c +index aa3e5017d749..3b9fe6cf5687 100644 +--- a/sound/soc/intel/skylake/bxt-sst.c ++++ b/sound/soc/intel/skylake/bxt-sst.c +@@ -107,7 +107,6 @@ static int sst_bxt_prepare_fw(struct sst_dsp *ctx, + return stream_tag; + } + +- ctx->dsp_ops.stream_tag = stream_tag; + memcpy(ctx->dmab.area, fwdata, fwsize); + + /* Step 1: Power up core 0 and core1 */ +@@ -159,7 +158,7 @@ static int sst_bxt_prepare_fw(struct sst_dsp *ctx, + goto base_fw_load_failed; + } + +- return ret; ++ return stream_tag; + + base_fw_load_failed: + skl_dsp_cleanup(ctx->dev, &ctx->dmab, stream_tag); +@@ -168,16 +167,16 @@ static int sst_bxt_prepare_fw(struct sst_dsp *ctx, + return ret; + } + +-static int sst_transfer_fw_host_dma(struct sst_dsp *ctx) ++static int sst_transfer_fw_host_dma(struct sst_dsp *ctx, int stream_tag) + { + int ret; + +- skl_dsp_trigger(ctx->dev, true, ctx->dsp_ops.stream_tag); ++ skl_dsp_trigger(ctx->dev, true, stream_tag); + ret = sst_dsp_register_poll(ctx, BXT_ADSP_FW_STATUS, SKL_FW_STS_MASK, + BXT_ROM_INIT, BXT_BASEFW_TIMEOUT, "Firmware boot"); + +- skl_dsp_trigger(ctx->dev, false, ctx->dsp_ops.stream_tag); +- skl_dsp_cleanup(ctx->dev, &ctx->dmab, ctx->dsp_ops.stream_tag); ++ skl_dsp_trigger(ctx->dev, false, stream_tag); ++ skl_dsp_cleanup(ctx->dev, &ctx->dmab, stream_tag); + + return ret; + } +@@ -207,7 +206,6 @@ static int bxt_load_base_firmware(struct sst_dsp *ctx) + stripped_fw.size = ctx->fw->size; + skl_dsp_strip_extended_manifest(&stripped_fw); + +- + for (i = 0; i < BXT_FW_ROM_INIT_RETRY; i++) { + ret = sst_bxt_prepare_fw(ctx, stripped_fw.data, stripped_fw.size); + if (ret == 0) +@@ -223,7 +221,7 @@ static int bxt_load_base_firmware(struct sst_dsp *ctx) + goto sst_load_base_firmware_failed; + } + +- ret = sst_transfer_fw_host_dma(ctx); ++ ret = sst_transfer_fw_host_dma(ctx, ret); + if (ret < 0) { + dev_err(ctx->dev, "Transfer firmware failed %d\n", ret); + dev_info(ctx->dev, "Error code=0x%x: FW status=0x%x\n", +diff --git a/sound/soc/intel/skylake/cnl-sst.c b/sound/soc/intel/skylake/cnl-sst.c +index 13ec2705ddbc..2bf61b3fdc8c 100644 +--- a/sound/soc/intel/skylake/cnl-sst.c ++++ b/sound/soc/intel/skylake/cnl-sst.c +@@ -54,7 +54,6 @@ static int cnl_prepare_fw(struct sst_dsp *ctx, const void *fwdata, u32 fwsize) + return stream_tag; + } + +- ctx->dsp_ops.stream_tag = stream_tag; + memcpy(ctx->dmab.area, fwdata, fwsize); + + /* purge FW request */ +@@ -81,7 +80,7 @@ static int cnl_prepare_fw(struct sst_dsp *ctx, const void *fwdata, u32 fwsize) + goto base_fw_load_failed; + } + +- return 0; ++ return stream_tag; + + base_fw_load_failed: + skl_dsp_cleanup(ctx->dev, &ctx->dmab, stream_tag); +@@ -90,17 +89,17 @@ static int cnl_prepare_fw(struct sst_dsp *ctx, const void *fwdata, u32 fwsize) + return ret; + } + +-static int sst_transfer_fw_host_dma(struct sst_dsp *ctx) ++static int sst_transfer_fw_host_dma(struct sst_dsp *ctx, int stream_tag) + { + int ret; + +- skl_dsp_trigger(ctx->dev, true, ctx->dsp_ops.stream_tag); ++ skl_dsp_trigger(ctx->dev, true, stream_tag); + ret = sst_dsp_register_poll(ctx, CNL_ADSP_FW_STATUS, CNL_FW_STS_MASK, + CNL_FW_INIT, CNL_BASEFW_TIMEOUT, + "firmware boot"); + +- skl_dsp_trigger(ctx->dev, false, ctx->dsp_ops.stream_tag); +- skl_dsp_cleanup(ctx->dev, &ctx->dmab, ctx->dsp_ops.stream_tag); ++ skl_dsp_trigger(ctx->dev, false, stream_tag); ++ skl_dsp_cleanup(ctx->dev, &ctx->dmab, stream_tag); + + return ret; + } +@@ -136,7 +135,7 @@ static int cnl_load_base_firmware(struct sst_dsp *ctx) + goto cnl_load_base_firmware_failed; + } + +- ret = sst_transfer_fw_host_dma(ctx); ++ ret = sst_transfer_fw_host_dma(ctx, ret); + if (ret < 0) { + dev_err(ctx->dev, "transfer firmware failed: %d\n", ret); + cnl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK); +-- +2.17.1 + diff --git a/patches/0021-block-export-block_class-to-be-used-by-class-inte.security b/patches/0021-block-export-block_class-to-be-used-by-class-inte.security new file mode 100644 index 0000000000..46cee18371 --- /dev/null +++ b/patches/0021-block-export-block_class-to-be-used-by-class-inte.security @@ -0,0 +1,30 @@ +From 5fc36aa4af67583bc02cde7be9e0898f3a791dcd Mon Sep 17 00:00:00 2001 +From: Tomas Winkler +Date: Tue, 29 Mar 2016 16:14:01 +0300 +Subject: [PATCH 21/65] block: export block_class to be used by class + interfaces + +Enable access to block devices via class_interface outside +of the block subsystem. + +Change-Id: I6115a9b4655e47ec42e47c9720da8784139557bd +Signed-off-by: Tomas Winkler +--- + block/genhd.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/block/genhd.c b/block/genhd.c +index 26b31fcae217..50c3b6f44b63 100644 +--- a/block/genhd.c ++++ b/block/genhd.c +@@ -1336,6 +1336,7 @@ static void disk_release(struct device *dev) + struct class block_class = { + .name = "block", + }; ++EXPORT_SYMBOL_GPL(block_class); + + static char *block_devnode(struct device *dev, umode_t *mode, + kuid_t *uid, kgid_t *gid) +-- +2.17.1 + diff --git a/patches/0021-drm-i915-Refactor-instdone-loops-on-new-subslice-funct.drm b/patches/0021-drm-i915-Refactor-instdone-loops-on-new-subslice-funct.drm new file mode 100644 index 0000000000..ea83523648 --- /dev/null +++ b/patches/0021-drm-i915-Refactor-instdone-loops-on-new-subslice-funct.drm @@ -0,0 +1,158 @@ +From 6d9f5cf3fed120a86c2bc10762c77c60d8014843 Mon Sep 17 00:00:00 2001 +From: Stuart Summers +Date: Fri, 23 Aug 2019 09:03:05 -0700 +Subject: [PATCH 021/690] drm/i915: Refactor instdone loops on new subslice + functions + +Refactor instdone loops to use the new intel_sseu_has_subslice +function. + +Signed-off-by: Stuart Summers +Reviewed-by: Chris Wilson +Signed-off-by: Chris Wilson +Link: https://patchwork.freedesktop.org/patch/msgid/20190823160307.180813-10-stuart.summers@intel.com +--- + drivers/gpu/drm/i915/gt/intel_engine_cs.c | 3 +- + drivers/gpu/drm/i915/gt/intel_engine_types.h | 30 +++++++++----------- + drivers/gpu/drm/i915/gt/intel_hangcheck.c | 3 +- + drivers/gpu/drm/i915/i915_debugfs.c | 5 ++-- + drivers/gpu/drm/i915/i915_gpu_error.c | 5 ++-- + 5 files changed, 24 insertions(+), 22 deletions(-) + +diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c +index 4ce8626b140e..e4945ae23e63 100644 +--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c ++++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c +@@ -948,6 +948,7 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine, + struct intel_instdone *instdone) + { + struct drm_i915_private *i915 = engine->i915; ++ const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu; + struct intel_uncore *uncore = engine->uncore; + u32 mmio_base = engine->mmio_base; + int slice; +@@ -965,7 +966,7 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine, + + instdone->slice_common = + intel_uncore_read(uncore, GEN7_SC_INSTDONE); +- for_each_instdone_slice_subslice(i915, slice, subslice) { ++ for_each_instdone_slice_subslice(i915, sseu, slice, subslice) { + instdone->sampler[slice][subslice] = + read_subslice_reg(engine, slice, subslice, + GEN7_SAMPLER_INSTDONE); +diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h +index a82cea95c2f2..15e02cb58a67 100644 +--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h ++++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h +@@ -576,20 +576,18 @@ intel_engine_is_virtual(const struct intel_engine_cs *engine) + return engine->flags & I915_ENGINE_IS_VIRTUAL; + } + +-#define instdone_slice_mask(dev_priv__) \ +- (IS_GEN(dev_priv__, 7) ? \ +- 1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask) +- +-#define instdone_subslice_mask(dev_priv__) \ +- (IS_GEN(dev_priv__, 7) ? \ +- 1 : RUNTIME_INFO(dev_priv__)->sseu.subslice_mask[0]) +- +-#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \ +- for ((slice__) = 0, (subslice__) = 0; \ +- (slice__) < I915_MAX_SLICES; \ +- (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \ +- (slice__) += ((subslice__) == 0)) \ +- for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \ +- (BIT(subslice__) & instdone_subslice_mask(dev_priv__))) +- ++#define instdone_has_slice(dev_priv___, sseu___, slice___) \ ++ ((IS_GEN(dev_priv___, 7) ? 1 : ((sseu___)->slice_mask)) & BIT(slice___)) ++ ++#define instdone_has_subslice(dev_priv__, sseu__, slice__, subslice__) \ ++ (IS_GEN(dev_priv__, 7) ? (1 & BIT(subslice__)) : \ ++ intel_sseu_has_subslice(sseu__, 0, subslice__)) ++ ++#define for_each_instdone_slice_subslice(dev_priv_, sseu_, slice_, subslice_) \ ++ for ((slice_) = 0, (subslice_) = 0; (slice_) < I915_MAX_SLICES; \ ++ (subslice_) = ((subslice_) + 1) % I915_MAX_SUBSLICES, \ ++ (slice_) += ((subslice_) == 0)) \ ++ for_each_if((instdone_has_slice(dev_priv_, sseu_, slice_)) && \ ++ (instdone_has_subslice(dev_priv_, sseu_, slice_, \ ++ subslice_))) + #endif /* __INTEL_ENGINE_TYPES_H__ */ +diff --git a/drivers/gpu/drm/i915/gt/intel_hangcheck.c b/drivers/gpu/drm/i915/gt/intel_hangcheck.c +index 05d042cdefe2..40f62f780be5 100644 +--- a/drivers/gpu/drm/i915/gt/intel_hangcheck.c ++++ b/drivers/gpu/drm/i915/gt/intel_hangcheck.c +@@ -53,6 +53,7 @@ static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone) + static bool subunits_stuck(struct intel_engine_cs *engine) + { + struct drm_i915_private *dev_priv = engine->i915; ++ const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; + struct intel_instdone instdone; + struct intel_instdone *accu_instdone = &engine->hangcheck.instdone; + bool stuck; +@@ -71,7 +72,7 @@ static bool subunits_stuck(struct intel_engine_cs *engine) + stuck &= instdone_unchanged(instdone.slice_common, + &accu_instdone->slice_common); + +- for_each_instdone_slice_subslice(dev_priv, slice, subslice) { ++ for_each_instdone_slice_subslice(dev_priv, sseu, slice, subslice) { + stuck &= instdone_unchanged(instdone.sampler[slice][subslice], + &accu_instdone->sampler[slice][subslice]); + stuck &= instdone_unchanged(instdone.row[slice][subslice], +diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c +index 28713c9c98ea..44797f8b7c50 100644 +--- a/drivers/gpu/drm/i915/i915_debugfs.c ++++ b/drivers/gpu/drm/i915/i915_debugfs.c +@@ -996,6 +996,7 @@ static void i915_instdone_info(struct drm_i915_private *dev_priv, + struct seq_file *m, + struct intel_instdone *instdone) + { ++ const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; + int slice; + int subslice; + +@@ -1011,11 +1012,11 @@ static void i915_instdone_info(struct drm_i915_private *dev_priv, + if (INTEL_GEN(dev_priv) <= 6) + return; + +- for_each_instdone_slice_subslice(dev_priv, slice, subslice) ++ for_each_instdone_slice_subslice(dev_priv, sseu, slice, subslice) + seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n", + slice, subslice, instdone->sampler[slice][subslice]); + +- for_each_instdone_slice_subslice(dev_priv, slice, subslice) ++ for_each_instdone_slice_subslice(dev_priv, sseu, slice, subslice) + seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n", + slice, subslice, instdone->row[slice][subslice]); + } +diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c +index e284bd76fa86..4aff342b8944 100644 +--- a/drivers/gpu/drm/i915/i915_gpu_error.c ++++ b/drivers/gpu/drm/i915/i915_gpu_error.c +@@ -421,6 +421,7 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m) + static void error_print_instdone(struct drm_i915_error_state_buf *m, + const struct drm_i915_error_engine *ee) + { ++ const struct sseu_dev_info *sseu = &RUNTIME_INFO(m->i915)->sseu; + int slice; + int subslice; + +@@ -436,12 +437,12 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m, + if (INTEL_GEN(m->i915) <= 6) + return; + +- for_each_instdone_slice_subslice(m->i915, slice, subslice) ++ for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice) + err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n", + slice, subslice, + ee->instdone.sampler[slice][subslice]); + +- for_each_instdone_slice_subslice(m->i915, slice, subslice) ++ for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice) + err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n", + slice, subslice, + ee->instdone.row[slice][subslice]); +-- +2.17.1 + diff --git a/patches/0021-ioctl-cleanup-ioctl-structure.acrn b/patches/0021-ioctl-cleanup-ioctl-structure.acrn new file mode 100644 index 0000000000..129521f6db --- /dev/null +++ b/patches/0021-ioctl-cleanup-ioctl-structure.acrn @@ -0,0 +1,82 @@ +From 5db3b04c25cce51af1ee3dbfb00c4b54579bc498 Mon Sep 17 00:00:00 2001 +From: Edwin Zhai +Date: Fri, 31 Aug 2018 10:58:57 +0800 +Subject: [PATCH 021/150] ioctl: cleanup ioctl structure + +vm_memseg/vm_memmap: remove unused fileds and define field size + +Change-Id: I9cb01cc6ea8eb97989e0b4b4ff6c55fa9b9822c8 +Tracked-On: 218445 +Signed-off-by: Edwin Zhai +Reviewed-on: +Reviewed-by: Chi, Mingqiang +Reviewed-by: Dong, Eddie +Tested-by: Dong, Eddie +--- + include/linux/vhm/vhm_ioctl_defs.h | 30 +++++++++++++----------------- + 1 file changed, 13 insertions(+), 17 deletions(-) + +diff --git a/include/linux/vhm/vhm_ioctl_defs.h b/include/linux/vhm/vhm_ioctl_defs.h +index e157d6a86a66..60bfb299e040 100644 +--- a/include/linux/vhm/vhm_ioctl_defs.h ++++ b/include/linux/vhm/vhm_ioctl_defs.h +@@ -60,7 +60,7 @@ + #define IC_GET_API_VERSION _IC_ID(IC_ID, IC_ID_VM_BASE + 0x00) + #define IC_CREATE_VM _IC_ID(IC_ID, IC_ID_VM_BASE + 0x01) + #define IC_DESTROY_VM _IC_ID(IC_ID, IC_ID_VM_BASE + 0x02) +-#define IC_START_VM _IC_ID(IC_ID, IC_ID_VM_BASE + 0x03) ++#define IC_START_VM _IC_ID(IC_ID, IC_ID_VM_BASE + 0x03) + #define IC_PAUSE_VM _IC_ID(IC_ID, IC_ID_VM_BASE + 0x04) + #define IC_CREATE_VCPU _IC_ID(IC_ID, IC_ID_VM_BASE + 0x05) + +@@ -95,35 +95,31 @@ + + #define SPECNAMELEN 63 + +-enum { +- VM_SYSMEM, +- VM_BOOTROM, +- VM_FRAMEBUFFER, +- VM_MMIO, +-}; ++#define VM_SYSMEM 0 ++#define VM_MMIO 1 + + struct vm_memseg { +- int segid; +- size_t len; ++ uint32_t segid; ++ uint32_t reserved; ++ uint64_t len; ++ uint64_t gpa; + char name[SPECNAMELEN + 1]; +- unsigned long gpa; + }; + + struct vm_memmap { +- int segid; /* memory segment */ ++ uint32_t segid; /* memory segment */ ++ uint32_t reserved; + union { + struct { + uint64_t gpa; +- uint64_t segoff; /* offset into memory segment */ +- size_t len; /* mmap length */ +- int prot; /* RWX */ +- int flags; ++ uint64_t len; /* mmap length */ ++ uint32_t prot; /* RWX */ + } mem; + struct { + uint64_t gpa; + uint64_t hpa; +- size_t len; +- int prot; ++ uint64_t len; ++ uint32_t prot; + } mmio; + }; + }; +-- +2.17.1 + diff --git a/patches/0021-net-stmmac-make-stmmac_interrupt-function-mor.connectivity b/patches/0021-net-stmmac-make-stmmac_interrupt-function-mor.connectivity new file mode 100644 index 0000000000..2c04be73e2 --- /dev/null +++ b/patches/0021-net-stmmac-make-stmmac_interrupt-function-mor.connectivity @@ -0,0 +1,110 @@ +From 341d41e501b0fa2f7d18c0f81385a91e5156260e Mon Sep 17 00:00:00 2001 +From: Ong Boon Leong +Date: Fri, 26 Jul 2019 07:08:30 +0800 +Subject: [PATCH 021/108] net: stmmac: make stmmac_interrupt() function more + friendly to MSI + +Refactor stmmac_interrupt() by introducing stmmac_common_interrupt() +so that we prepare the ISR operation to be friendly to MSI later. + +Signed-off-by: Ong Boon Leong +--- + .../net/ethernet/stmicro/stmmac/stmmac_main.c | 64 +++++++++++-------- + 1 file changed, 36 insertions(+), 28 deletions(-) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index b2f7e1c33c34..a49b5ed3051b 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -3851,21 +3851,8 @@ static int stmmac_set_features(struct net_device *netdev, + return 0; + } + +-/** +- * stmmac_interrupt - main ISR +- * @irq: interrupt number. +- * @dev_id: to pass the net device pointer. +- * Description: this is the main driver interrupt service routine. +- * It can call: +- * o DMA service routine (to manage incoming frame reception and transmission +- * status) +- * o Core interrupts to manage: remote wake-up, management counter, LPI +- * interrupts. +- */ +-static irqreturn_t stmmac_interrupt(int irq, void *dev_id) ++static void stmmac_common_interrupt(struct stmmac_priv *priv) + { +- struct net_device *dev = (struct net_device *)dev_id; +- struct stmmac_priv *priv = netdev_priv(dev); + u32 rx_cnt = priv->plat->rx_queues_to_use; + u32 tx_cnt = priv->plat->tx_queues_to_use; + u32 queues_count; +@@ -3878,18 +3865,6 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) + if (priv->irq_wake) + pm_wakeup_event(priv->device, 0); + +- if (unlikely(!dev)) { +- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); +- return IRQ_NONE; +- } +- +- /* Check if adapter is up */ +- if (test_bit(STMMAC_DOWN, &priv->state)) +- return IRQ_HANDLED; +- /* Check if a fatal error happened */ +- if (stmmac_safety_feat_interrupt(priv)) +- return IRQ_HANDLED; +- + /* To handle GMAC own interrupts */ + if ((priv->plat->has_gmac) || xmac) { + int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats); +@@ -3920,11 +3895,44 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) + /* PCS link status */ + if (priv->hw->pcs) { + if (priv->xstats.pcs_link) +- netif_carrier_on(dev); ++ netif_carrier_on(priv->dev); + else +- netif_carrier_off(dev); ++ netif_carrier_off(priv->dev); + } + } ++} ++ ++/** ++ * stmmac_interrupt - main ISR ++ * @irq: interrupt number. ++ * @dev_id: to pass the net device pointer. ++ * Description: this is the main driver interrupt service routine. ++ * It can call: ++ * o DMA service routine (to manage incoming frame reception and transmission ++ * status) ++ * o Core interrupts to manage: remote wake-up, management counter, LPI ++ * interrupts. ++ */ ++static irqreturn_t stmmac_interrupt(int irq, void *dev_id) ++{ ++ struct net_device *dev = (struct net_device *)dev_id; ++ struct stmmac_priv *priv = netdev_priv(dev); ++ ++ if (unlikely(!dev)) { ++ netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); ++ return IRQ_NONE; ++ } ++ ++ /* Check if adapter is up */ ++ if (test_bit(STMMAC_DOWN, &priv->state)) ++ return IRQ_HANDLED; ++ ++ /* Check if a fatal error happened */ ++ if (stmmac_safety_feat_interrupt(priv)) ++ return IRQ_HANDLED; ++ ++ /* To handle Common interrupts */ ++ stmmac_common_interrupt(priv); + + /* To handle DMA interrupts */ + stmmac_dma_interrupt(priv); +-- +2.17.1 + diff --git a/patches/0021-platform-x86-Use-srctree-instead-of-src.sep-socwatch b/patches/0021-platform-x86-Use-srctree-instead-of-src.sep-socwatch new file mode 100644 index 0000000000..7594668668 --- /dev/null +++ b/patches/0021-platform-x86-Use-srctree-instead-of-src.sep-socwatch @@ -0,0 +1,32 @@ +From d3808fa3f347cdebc37ea28e7941f3fa08978622 Mon Sep 17 00:00:00 2001 +From: Manisha Chinthapally +Date: Tue, 4 Jun 2019 11:05:07 -0700 +Subject: [PATCH 21/27] platform/x86: Use $(srctree) instead of $(src) + +$(src) is no longer resolved when O=OUTPUT_DIR is specified +resulting in header files not being found. +Applying fix for socwatchhv driver + +Signed-off-by: Manisha Chinthapally +--- + drivers/platform/x86/socwatchhv/Makefile | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/drivers/platform/x86/socwatchhv/Makefile b/drivers/platform/x86/socwatchhv/Makefile +index c5ad7f109aac..7006c6a01a43 100644 +--- a/drivers/platform/x86/socwatchhv/Makefile ++++ b/drivers/platform/x86/socwatchhv/Makefile +@@ -10,8 +10,8 @@ DRIVER_NAME=${DRIVER_BASE}${DRIVER_MAJOR}_${DRIVER_MINOR} + + HYPERVISOR=2 # ACRN + +-EXTRA_CFLAGS += -Idrivers/ \ +- -I$(src)/inc/ \ ++EXTRA_CFLAGS += -I$(srctree)/drivers/ \ ++ -I$(srctree)/drivers/platform/x86/socwatchhv/inc/ \ + -DHYPERVISOR=$(HYPERVISOR) + + obj-$(CONFIG_INTEL_SOCWATCH_HV) += $(DRIVER_NAME).o +-- +2.17.1 + diff --git a/patches/0021-trusty-fix-incompatible-pointer-types.trusty b/patches/0021-trusty-fix-incompatible-pointer-types.trusty new file mode 100644 index 0000000000..d43d8ec5d2 --- /dev/null +++ b/patches/0021-trusty-fix-incompatible-pointer-types.trusty @@ -0,0 +1,33 @@ +From b2fdb7c35b0d0067ee50dfaf12f0e8d20e2f672e Mon Sep 17 00:00:00 2001 +From: Dwane Pottratz +Date: Thu, 17 Nov 2016 12:53:31 -0800 +Subject: [PATCH 21/63] trusty: fix incompatible-pointer-types + +incompatible-pointer-types found in function trusty_virtio_find_vps + +drivers/trusty/trusty-virtio.c:380:14: error: initialization from +incompatible pointer type [-Werror=incompatible-pointer-types] + .find_vqs = trusty_virtio_find_vqs, + +Change-Id: Idfd949f9ca20b46537db135621bfe17ad1178d36 +Signed-off-by: Dwane Pottratz +--- + drivers/trusty/trusty-virtio.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/trusty/trusty-virtio.c b/drivers/trusty/trusty-virtio.c +index a48f4f9884a8..eb4c0d31e249 100644 +--- a/drivers/trusty/trusty-virtio.c ++++ b/drivers/trusty/trusty-virtio.c +@@ -347,7 +347,7 @@ static struct virtqueue *_find_vq(struct virtio_device *vdev, + static int trusty_virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], +- const char *names[]) ++ const char * const names[]) + { + uint i; + int ret; +-- +2.17.1 + diff --git a/patches/0021-tty-8250_lpss-check-for-NULL-function-ptr.lpss b/patches/0021-tty-8250_lpss-check-for-NULL-function-ptr.lpss new file mode 100644 index 0000000000..eeb84c777e --- /dev/null +++ b/patches/0021-tty-8250_lpss-check-for-NULL-function-ptr.lpss @@ -0,0 +1,35 @@ +From 985ed7a3e02eb6fc76db79f5eff0ab1310224db1 Mon Sep 17 00:00:00 2001 +From: Raymond Tan +Date: Fri, 6 Sep 2019 01:54:28 +0800 +Subject: [PATCH 21/40] tty: 8250_lpss: check for NULL function ptr + +Check for setup() func availability before calling it, similar handling with exit() + +Signed-off-by: Raymond Tan +Signed-off-by: Andy Shevchenko +--- + drivers/tty/serial/8250/8250_lpss.c | 8 +++++--- + 1 file changed, 5 insertions(+), 3 deletions(-) + +diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c +index 5f72ef3ea574..b160ec44893e 100644 +--- a/drivers/tty/serial/8250/8250_lpss.c ++++ b/drivers/tty/serial/8250/8250_lpss.c +@@ -315,9 +315,11 @@ static int lpss8250_probe(struct pci_dev *pdev, const struct pci_device_id *id) + if (!uart.port.membase) + return -ENOMEM; + +- ret = lpss->board->setup(lpss, &uart.port); +- if (ret) +- return ret; ++ if (lpss->board->setup) { ++ ret = lpss->board->setup(lpss, &uart.port); ++ if (ret) ++ return ret; ++ } + + dw8250_setup_port(&uart.port); + +-- +2.17.1 + diff --git a/patches/0022-ASoC-Intel-Skylake-Remove-skl_dsp_loader_ops.audio b/patches/0022-ASoC-Intel-Skylake-Remove-skl_dsp_loader_ops.audio new file mode 100644 index 0000000000..8f331540a3 --- /dev/null +++ b/patches/0022-ASoC-Intel-Skylake-Remove-skl_dsp_loader_ops.audio @@ -0,0 +1,295 @@ +From 9bcbef55fabb1fb7268a7dc3275adb1d9f07dd21 Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Sun, 11 Aug 2019 15:39:59 +0200 +Subject: [PATCH 022/193] ASoC: Intel: Skylake: Remove skl_dsp_loader_ops + +With all fields deprecated, yet another struct can be removed. Let's do +so and don't forget about any functions or fields tied to its existence. + +Signed-off-by: Cezary Rojewski +--- + sound/soc/intel/common/sst-dsp-priv.h | 1 - + sound/soc/intel/skylake/bxt-sst.c | 5 ++-- + sound/soc/intel/skylake/cnl-sst-dsp.h | 3 +- + sound/soc/intel/skylake/cnl-sst.c | 5 ++-- + sound/soc/intel/skylake/skl-messages.c | 40 +------------------------ + sound/soc/intel/skylake/skl-sst-dsp.h | 24 ++------------- + sound/soc/intel/skylake/skl-sst-utils.c | 3 +- + sound/soc/intel/skylake/skl-sst.c | 5 ++-- + sound/soc/intel/skylake/skl.h | 2 -- + 9 files changed, 12 insertions(+), 76 deletions(-) + +diff --git a/sound/soc/intel/common/sst-dsp-priv.h b/sound/soc/intel/common/sst-dsp-priv.h +index 8a7009937d59..0fe9bebcfb38 100644 +--- a/sound/soc/intel/common/sst-dsp-priv.h ++++ b/sound/soc/intel/common/sst-dsp-priv.h +@@ -306,7 +306,6 @@ struct sst_dsp { + const char *fw_name; + + /* To allocate CL dma buffers */ +- struct skl_dsp_loader_ops dsp_ops; + struct skl_dsp_fw_ops fw_ops; + int sst_state; + struct skl_cl_dev cl_dev; +diff --git a/sound/soc/intel/skylake/bxt-sst.c b/sound/soc/intel/skylake/bxt-sst.c +index 3b9fe6cf5687..250cf4612713 100644 +--- a/sound/soc/intel/skylake/bxt-sst.c ++++ b/sound/soc/intel/skylake/bxt-sst.c +@@ -546,14 +546,13 @@ static struct sst_pdata skl_dev = { + }; + + int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, +- const char *fw_name, struct skl_dsp_loader_ops dsp_ops, +- struct skl_dev **dsp) ++ const char *fw_name, struct skl_dev **dsp) + { + struct skl_dev *skl; + struct sst_dsp *sst; + int ret; + +- ret = skl_sst_ctx_init(dev, irq, fw_name, dsp_ops, dsp, &skl_dev); ++ ret = skl_sst_ctx_init(dev, irq, fw_name, dsp, &skl_dev); + if (ret < 0) { + dev_err(dev, "%s: no device\n", __func__); + return ret; +diff --git a/sound/soc/intel/skylake/cnl-sst-dsp.h b/sound/soc/intel/skylake/cnl-sst-dsp.h +index 30b586acc858..70da4f312f53 100644 +--- a/sound/soc/intel/skylake/cnl-sst-dsp.h ++++ b/sound/soc/intel/skylake/cnl-sst-dsp.h +@@ -94,7 +94,6 @@ void cnl_ipc_op_int_disable(struct sst_dsp *ctx); + bool cnl_ipc_int_status(struct sst_dsp *ctx); + + int cnl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, +- const char *fw_name, struct skl_dsp_loader_ops dsp_ops, +- struct skl_dev **dsp); ++ const char *fw_name, struct skl_dev **dsp); + + #endif /*__CNL_SST_DSP_H__*/ +diff --git a/sound/soc/intel/skylake/cnl-sst.c b/sound/soc/intel/skylake/cnl-sst.c +index 2bf61b3fdc8c..0718018cad8d 100644 +--- a/sound/soc/intel/skylake/cnl-sst.c ++++ b/sound/soc/intel/skylake/cnl-sst.c +@@ -422,14 +422,13 @@ static struct sst_pdata cnl_dev = { + }; + + int cnl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, +- const char *fw_name, struct skl_dsp_loader_ops dsp_ops, +- struct skl_dev **dsp) ++ const char *fw_name, struct skl_dev **dsp) + { + struct skl_dev *cnl; + struct sst_dsp *sst; + int ret; + +- ret = skl_sst_ctx_init(dev, irq, fw_name, dsp_ops, dsp, &cnl_dev); ++ ret = skl_sst_ctx_init(dev, irq, fw_name, dsp, &cnl_dev); + if (ret < 0) { + dev_err(dev, "%s: no device\n", __func__); + return ret; +diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c +index 527f464a6cdd..c52d0d11767d 100644 +--- a/sound/soc/intel/skylake/skl-messages.c ++++ b/sound/soc/intel/skylake/skl-messages.c +@@ -140,72 +140,37 @@ int skl_dsp_cleanup(struct device *dev, + return 0; + } + +-static struct skl_dsp_loader_ops skl_get_loader_ops(void) +-{ +- struct skl_dsp_loader_ops loader_ops; +- +- memset(&loader_ops, 0, sizeof(struct skl_dsp_loader_ops)); +- +- loader_ops.alloc_dma_buf = skl_alloc_dma_buf; +- loader_ops.free_dma_buf = skl_free_dma_buf; +- +- return loader_ops; +-}; +- +-static struct skl_dsp_loader_ops bxt_get_loader_ops(void) +-{ +- struct skl_dsp_loader_ops loader_ops; +- +- memset(&loader_ops, 0, sizeof(loader_ops)); +- +- loader_ops.alloc_dma_buf = skl_alloc_dma_buf; +- loader_ops.free_dma_buf = skl_free_dma_buf; +- loader_ops.prepare = skl_dsp_prepare; +- loader_ops.trigger = skl_dsp_trigger; +- loader_ops.cleanup = skl_dsp_cleanup; +- +- return loader_ops; +-}; +- + static const struct skl_dsp_ops dsp_ops[] = { + { + .id = 0x9d70, +- .loader_ops = skl_get_loader_ops, + .init = skl_sst_dsp_init, + }, + { + .id = 0x9d71, +- .loader_ops = skl_get_loader_ops, + .init = skl_sst_dsp_init, + }, + { + .id = 0x5a98, +- .loader_ops = bxt_get_loader_ops, + .init = bxt_sst_dsp_init, + }, + { + .id = 0x3198, +- .loader_ops = bxt_get_loader_ops, + .init = bxt_sst_dsp_init, + }, + { + .id = 0x9dc8, +- .loader_ops = bxt_get_loader_ops, + .init = cnl_sst_dsp_init, + }, + { + .id = 0xa348, +- .loader_ops = bxt_get_loader_ops, + .init = cnl_sst_dsp_init, + }, + { + .id = 0x02c8, +- .loader_ops = bxt_get_loader_ops, + .init = cnl_sst_dsp_init, + }, + { + .id = 0x06c8, +- .loader_ops = bxt_get_loader_ops, + .init = cnl_sst_dsp_init, + }, + }; +@@ -226,7 +191,6 @@ int skl_init_dsp(struct skl_dev *skl) + { + void __iomem *mmio_base; + struct hdac_bus *bus = skl_to_bus(skl); +- struct skl_dsp_loader_ops loader_ops; + int irq = bus->irq; + const struct skl_dsp_ops *ops; + int ret; +@@ -248,10 +212,8 @@ int skl_init_dsp(struct skl_dev *skl) + goto unmap_mmio; + } + +- loader_ops = ops->loader_ops(); + ret = ops->init(bus->dev, mmio_base, irq, +- skl->fw_name, loader_ops, +- &skl); ++ skl->fw_name, &skl); + + if (ret < 0) + goto unmap_mmio; +diff --git a/sound/soc/intel/skylake/skl-sst-dsp.h b/sound/soc/intel/skylake/skl-sst-dsp.h +index 1523ac7115b7..eaf87dddbb17 100644 +--- a/sound/soc/intel/skylake/skl-sst-dsp.h ++++ b/sound/soc/intel/skylake/skl-sst-dsp.h +@@ -150,22 +150,6 @@ struct skl_dsp_fw_ops { + + }; + +-struct skl_dsp_loader_ops { +- int stream_tag; +- +- int (*alloc_dma_buf)(struct device *dev, +- struct snd_dma_buffer *dmab, size_t size); +- int (*free_dma_buf)(struct device *dev, +- struct snd_dma_buffer *dmab); +- int (*prepare)(struct device *dev, unsigned int format, +- unsigned int byte_size, +- struct snd_dma_buffer *bufp); +- int (*trigger)(struct device *dev, bool start, int stream_tag); +- +- int (*cleanup)(struct device *dev, struct snd_dma_buffer *dmab, +- int stream_tag); +-}; +- + struct adsp_module_config { + u32 par[4]; /* module parameters */ + u32 is_bytes; /* actual size of instance .bss (bytes) */ +@@ -240,11 +224,9 @@ int skl_dsp_put_core(struct sst_dsp *ctx, unsigned int core_id); + + int skl_dsp_boot(struct sst_dsp *ctx); + int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, +- const char *fw_name, struct skl_dsp_loader_ops dsp_ops, +- struct skl_dev **dsp); ++ const char *fw_name, struct skl_dev **dsp); + int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, +- const char *fw_name, struct skl_dsp_loader_ops dsp_ops, +- struct skl_dev **dsp); ++ const char *fw_name, struct skl_dev **dsp); + int bxt_load_library(struct sst_dsp *ctx, struct skl_lib_info *linfo, + int lib_count); + +@@ -263,7 +245,7 @@ int skl_dsp_strip_extended_manifest(struct firmware *fw); + void skl_dsp_set_astate_cfg(struct skl_dev *skl, u32 cnt, void *data); + + int skl_sst_ctx_init(struct device *dev, int irq, const char *fw_name, +- struct skl_dsp_loader_ops dsp_ops, struct skl_dev **dsp, ++ struct skl_dev **dsp, + struct sst_pdata *pdata); + int skl_prepare_lib_load(struct skl_dev *skl, struct skl_lib_info *linfo, + struct firmware *stripped_fw, +diff --git a/sound/soc/intel/skylake/skl-sst-utils.c b/sound/soc/intel/skylake/skl-sst-utils.c +index 25114257b5b8..67ff31102d6e 100644 +--- a/sound/soc/intel/skylake/skl-sst-utils.c ++++ b/sound/soc/intel/skylake/skl-sst-utils.c +@@ -395,7 +395,7 @@ int skl_dsp_strip_extended_manifest(struct firmware *fw) + } + + int skl_sst_ctx_init(struct device *dev, int irq, const char *fw_name, +- struct skl_dsp_loader_ops dsp_ops, struct skl_dev **dsp, ++ struct skl_dev **dsp, + struct sst_pdata *pdata) + { + struct skl_dev *skl = *dsp; +@@ -415,7 +415,6 @@ int skl_sst_ctx_init(struct device *dev, int irq, const char *fw_name, + + sst = skl->dsp; + sst->fw_name = fw_name; +- sst->dsp_ops = dsp_ops; + init_waitqueue_head(&skl->mod_load_wait); + INIT_LIST_HEAD(&sst->module_list); + +diff --git a/sound/soc/intel/skylake/skl-sst.c b/sound/soc/intel/skylake/skl-sst.c +index 8545ef58a1ec..7faf627e2f56 100644 +--- a/sound/soc/intel/skylake/skl-sst.c ++++ b/sound/soc/intel/skylake/skl-sst.c +@@ -517,14 +517,13 @@ static struct sst_pdata skl_dev = { + }; + + int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, +- const char *fw_name, struct skl_dsp_loader_ops dsp_ops, +- struct skl_dev **dsp) ++ const char *fw_name, struct skl_dev **dsp) + { + struct skl_dev *skl; + struct sst_dsp *sst; + int ret; + +- ret = skl_sst_ctx_init(dev, irq, fw_name, dsp_ops, dsp, &skl_dev); ++ ret = skl_sst_ctx_init(dev, irq, fw_name, dsp, &skl_dev); + if (ret < 0) { + dev_err(dev, "%s: no device\n", __func__); + return ret; +diff --git a/sound/soc/intel/skylake/skl.h b/sound/soc/intel/skylake/skl.h +index 142bcdf89cbd..45e13240a989 100644 +--- a/sound/soc/intel/skylake/skl.h ++++ b/sound/soc/intel/skylake/skl.h +@@ -154,10 +154,8 @@ struct skl_machine_pdata { + + struct skl_dsp_ops { + int id; +- struct skl_dsp_loader_ops (*loader_ops)(void); + int (*init)(struct device *dev, void __iomem *mmio_base, + int irq, const char *fw_name, +- struct skl_dsp_loader_ops loader_ops, + struct skl_dev **skl_sst); + }; + +-- +2.17.1 + diff --git a/patches/0022-Get-profiling-status-info.sep-socwatch b/patches/0022-Get-profiling-status-info.sep-socwatch new file mode 100644 index 0000000000..2f70e8ebdc --- /dev/null +++ b/patches/0022-Get-profiling-status-info.sep-socwatch @@ -0,0 +1,149 @@ +From dfff98b90e04abaa8332769d3188021258ba1139 Mon Sep 17 00:00:00 2001 +From: Manisha Chinthapally +Date: Wed, 30 Jan 2019 13:58:55 -0800 +Subject: [PATCH 22/27] Get profiling status info + +In remote or acrn based collections, it is necessary to know if all the samples generated is copied to the host. +So, added an IOCTL to collect important statistical data like samples logged, dropped etc + +Tracked-On: PKT-1717 +Signed-off-by: Manisha Chinthapally +--- + drivers/platform/x86/sepdk/inc/lwpmudrv.h | 6 ++ + drivers/platform/x86/sepdk/sep/lwpmudrv.c | 79 ++++++++++++++++++++++- + 2 files changed, 84 insertions(+), 1 deletion(-) + mode change 100755 => 100644 drivers/platform/x86/sepdk/sep/lwpmudrv.c + +diff --git a/drivers/platform/x86/sepdk/inc/lwpmudrv.h b/drivers/platform/x86/sepdk/inc/lwpmudrv.h +index ae8a3aee26a1..d682ab632127 100644 +--- a/drivers/platform/x86/sepdk/inc/lwpmudrv.h ++++ b/drivers/platform/x86/sepdk/inc/lwpmudrv.h +@@ -545,6 +545,12 @@ struct profiling_pcpuid { + uint32_t ecx; + uint32_t edx; + }; ++ ++struct profiling_status { ++ uint32_t samples_logged; ++ uint32_t samples_dropped; ++}; ++ + #endif + + #endif +diff --git a/drivers/platform/x86/sepdk/sep/lwpmudrv.c b/drivers/platform/x86/sepdk/sep/lwpmudrv.c +old mode 100755 +new mode 100644 +index 8ae975b9a833..742a1ea87cb5 +--- a/drivers/platform/x86/sepdk/sep/lwpmudrv.c ++++ b/drivers/platform/x86/sepdk/sep/lwpmudrv.c +@@ -4235,6 +4235,7 @@ static OS_STATUS lwpmudrv_Prepare_Stop(void) + #if !defined(DRV_SEP_ACRN_ON) + CONTROL_Invoke_Parallel(lwpmudrv_Pause_Op, NULL); + #else ++ + control = (struct profiling_control *)CONTROL_Allocate_Memory( + sizeof(struct profiling_control)); + if (control == NULL) { +@@ -5953,6 +5954,77 @@ static OS_STATUS lwpmudrv_Control_Driver_Log(IOCTL_ARGS args) + return OS_SUCCESS; + } + ++/* ------------------------------------------------------------------------- */ ++/*! ++ * @fn U64 lwpmudrv_Get_Sample_Drop_Info ++ * ++ * @brief Get the information of dropped samples ++ * ++ * @param arg Pointer to the IOCTL structure ++ * ++ * @return status ++ * ++ * Special Notes: ++ * ++ */ ++static OS_STATUS lwpmudrv_Get_Sample_Drop_Info(IOCTL_ARGS args) ++{ ++ U32 size; ++ static SAMPLE_DROP_INFO_NODE req_sample_drop_info; ++#if defined(DRV_SEP_ACRN_ON) ++ U32 i; ++ struct profiling_status *stats = NULL; ++#endif ++ size = 0; ++ if (args->buf_drv_to_usr == NULL) { ++ return OS_INVALID; ++ } ++ if (args->len_drv_to_usr != sizeof(SAMPLE_DROP_INFO_NODE)) { ++ return OS_INVALID; ++ } ++ ++ memset((char *)&req_sample_drop_info, 0, sizeof(SAMPLE_DROP_INFO_NODE)); ++#if defined(DRV_SEP_ACRN_ON) ++ stats = (struct profiling_status *)CONTROL_Allocate_Memory( ++ GLOBAL_STATE_num_cpus(driver_state)*sizeof(struct profiling_status)); ++ ++ if (stats == NULL) { ++ SEP_PRINT_ERROR("lwpmudrv_Start: Unable to allocate memory\n"); ++ return OS_NO_MEM; ++ } ++ memset(stats, 0, GLOBAL_STATE_num_cpus(driver_state)* ++ sizeof(struct profiling_status)); ++ ++ acrn_hypercall2(HC_PROFILING_OPS, PROFILING_GET_STATUS, ++ virt_to_phys(stats)); ++ ++ for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state) ++ && size < MAX_SAMPLE_DROP_NODES; i++) { ++ if (stats[i].samples_logged || stats[i].samples_dropped) { ++ SAMPLE_DROP_INFO_drop_info( ++ &req_sample_drop_info, size).os_id = OS_ID_ACORN; ++ SAMPLE_DROP_INFO_drop_info( ++ &req_sample_drop_info, size).cpu_id = i; ++ SAMPLE_DROP_INFO_drop_info( ++ &req_sample_drop_info, size).sampled = stats[i].samples_logged; ++ SAMPLE_DROP_INFO_drop_info( ++ &req_sample_drop_info, size).dropped = stats[i].samples_dropped; ++ size++; ++ } ++ } ++ ++ stats = CONTROL_Free_Memory(stats); ++#endif ++ SAMPLE_DROP_INFO_size(&req_sample_drop_info) = size; ++ ++ if (copy_to_user((void __user *)args->buf_drv_to_usr, ++ &req_sample_drop_info, args->len_drv_to_usr)) { ++ return OS_FAULT; ++ } ++ ++ return OS_SUCCESS; ++} ++ + /* ------------------------------------------------------------------------- */ + /*! + * @fn U64 lwpmudrv_Get_Drv_Setup_Info +@@ -6684,7 +6756,7 @@ static IOCTL_OP_TYPE lwpmu_Service_IOCTL(IOCTL_USE_INODE struct file *filp, + break; + + case DRV_OPERATION_SET_OSID: +- SEP_DRV_LOG_TRACE("LWPMUDRV_IOCTL_SET_OSID\n"); ++ SEP_DRV_LOG_TRACE("DRV_OPERATION_IOCTL_SET_OSID\n"); + status = lwpmudrv_Set_OSID(&local_args); + break; + +@@ -6797,6 +6869,11 @@ static IOCTL_OP_TYPE lwpmu_Service_IOCTL(IOCTL_USE_INODE struct file *filp, + status = lwpmudrv_Flush(); + break; + ++ case DRV_OPERATION_GET_SAMPLE_DROP_INFO: ++ SEP_PRINT_DEBUG("DRV_OPERATION_IOCTL_GET_SAMPLE_DROP_INFO\n"); ++ status = lwpmudrv_Get_Sample_Drop_Info(&local_args); ++ break; ++ + case DRV_OPERATION_SET_EMON_BUFFER_DRIVER_HELPER: + SEP_DRV_LOG_TRACE( + "DRV_OPERATION_SET_EMON_BUFFER_DRIVER_HELPER."); +-- +2.17.1 + diff --git a/patches/0022-Shared_buf-add-shared-buffer.acrn b/patches/0022-Shared_buf-add-shared-buffer.acrn new file mode 100644 index 0000000000..9a6a804c27 --- /dev/null +++ b/patches/0022-Shared_buf-add-shared-buffer.acrn @@ -0,0 +1,385 @@ +From 38a836c862b10a09bf1c137ede1c84a43538c195 Mon Sep 17 00:00:00 2001 +From: "Li, Fei1" +Date: Fri, 31 Aug 2018 10:58:57 +0800 +Subject: [PATCH 022/150] Shared_buf: add shared buffer + +Added a ring buffer shared between ACRN hypervisor and service OS. + +Change-Id: Ib82f50d842592099629e0f764e0576306252c51b +Tracked-On: Tracked-On: https://rtc.intel.com/ccm0001001/resource/itemName/com.ibm.team.workitem.WorkItem/216912 +Signed-off-by: Li, Fei1 +--- + drivers/Kconfig | 1 + + drivers/Makefile | 1 + + drivers/acrn/Kconfig | 5 ++ + drivers/acrn/Makefile | 1 + + drivers/acrn/sbuf.c | 188 ++++++++++++++++++++++++++++++++++++++++++ + drivers/acrn/sbuf.h | 119 ++++++++++++++++++++++++++ + 6 files changed, 315 insertions(+) + create mode 100644 drivers/acrn/Kconfig + create mode 100644 drivers/acrn/Makefile + create mode 100644 drivers/acrn/sbuf.c + create mode 100644 drivers/acrn/sbuf.h + +diff --git a/drivers/Kconfig b/drivers/Kconfig +index 06c326db1799..fe126bf244d9 100644 +--- a/drivers/Kconfig ++++ b/drivers/Kconfig +@@ -230,4 +230,5 @@ source "drivers/counter/Kconfig" + + source "drivers/vbs/Kconfig" + ++source "drivers/acrn/Kconfig" + endmenu +diff --git a/drivers/Makefile b/drivers/Makefile +index dee6b447c1b5..a62dc9b8f4ca 100644 +--- a/drivers/Makefile ++++ b/drivers/Makefile +@@ -183,6 +183,7 @@ obj-$(CONFIG_FSI) += fsi/ + obj-$(CONFIG_TEE) += tee/ + obj-$(CONFIG_MULTIPLEXER) += mux/ + obj-$(CONFIG_ACRN_GUEST) += vhm/ ++obj-$(CONFIG_ACRN_GUEST) += acrn/ + obj-$(CONFIG_UNISYS_VISORBUS) += visorbus/ + obj-$(CONFIG_SIOX) += siox/ + obj-$(CONFIG_GNSS) += gnss/ +diff --git a/drivers/acrn/Kconfig b/drivers/acrn/Kconfig +new file mode 100644 +index 000000000000..f25f0ae77727 +--- /dev/null ++++ b/drivers/acrn/Kconfig +@@ -0,0 +1,5 @@ ++config ACRN_SHARED_BUFFER ++ bool "Intel ACRN SHARED BUFFER" ++ ---help--- ++ Ring buffer shared between ACRN Hypervisor and its SOS. ++ Help ACRN performance profiling. +diff --git a/drivers/acrn/Makefile b/drivers/acrn/Makefile +new file mode 100644 +index 000000000000..bc475f8116e3 +--- /dev/null ++++ b/drivers/acrn/Makefile +@@ -0,0 +1 @@ ++obj-$(CONFIG_ACRN_SHARED_BUFFER) += sbuf.o +diff --git a/drivers/acrn/sbuf.c b/drivers/acrn/sbuf.c +new file mode 100644 +index 000000000000..dcf203222c5b +--- /dev/null ++++ b/drivers/acrn/sbuf.c +@@ -0,0 +1,188 @@ ++/* ++ * shared buffer ++ * ++ * This file is provided under a dual BSD/GPLv2 license.  When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright (c) 2017 Intel Corporation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: Li Fei ++ * ++ * BSD LICENSE ++ * ++ * Copyright (C) 2017 Intel Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * ++ * Li Fei ++ * ++ */ ++ ++#define pr_fmt(fmt) "SBuf: " fmt ++ ++#include ++#include ++#include "sbuf.h" ++ ++static inline bool sbuf_is_empty(shared_buf_t *sbuf) ++{ ++ return (sbuf->head == sbuf->tail); ++} ++ ++static inline uint32_t sbuf_next_ptr(uint32_t pos, ++ uint32_t span, uint32_t scope) ++{ ++ pos += span; ++ pos = (pos >= scope) ? (pos - scope) : pos; ++ return pos; ++} ++ ++static inline uint32_t sbuf_calculate_allocate_size(uint32_t ele_num, ++ uint32_t ele_size) ++{ ++ uint64_t sbuf_allocate_size; ++ ++ sbuf_allocate_size = ele_num * ele_size; ++ sbuf_allocate_size += SBUF_HEAD_SIZE; ++ if (sbuf_allocate_size > SBUF_MAX_SIZE) { ++ pr_err("num=0x%x, size=0x%x exceed 0x%llx!\n", ++ ele_num, ele_size, SBUF_MAX_SIZE); ++ return 0; ++ } ++ ++ /* align to PAGE_SIZE */ ++ return (sbuf_allocate_size + PAGE_SIZE - 1) & PAGE_MASK; ++} ++ ++shared_buf_t *sbuf_allocate(uint32_t ele_num, uint32_t ele_size) ++{ ++ shared_buf_t *sbuf; ++ struct page *page; ++ uint32_t sbuf_allocate_size; ++ ++ if (!ele_num || !ele_size) { ++ pr_err("invalid parameter %s!\n", __func__); ++ return NULL; ++ } ++ ++ sbuf_allocate_size = sbuf_calculate_allocate_size(ele_num, ele_size); ++ if (!sbuf_allocate_size) ++ return NULL; ++ ++ page = alloc_pages(GFP_KERNEL | __GFP_ZERO, ++ get_order(sbuf_allocate_size)); ++ if (page == NULL) { ++ pr_err("failed to alloc pages!\n"); ++ return NULL; ++ } ++ ++ sbuf = phys_to_virt(page_to_phys(page)); ++ sbuf->ele_num = ele_num; ++ sbuf->ele_size = ele_size; ++ sbuf->size = ele_num * ele_size; ++ sbuf->magic = SBUF_MAGIC; ++ pr_info("ele_num=0x%x, ele_size=0x%x allocated!\n", ++ ele_num, ele_size); ++ return sbuf; ++} ++EXPORT_SYMBOL(sbuf_allocate); ++ ++void sbuf_free(shared_buf_t *sbuf) ++{ ++ uint32_t sbuf_allocate_size; ++ ++ if ((sbuf == NULL) || sbuf->magic != SBUF_MAGIC) { ++ pr_err("invalid parameter %s\n", __func__); ++ return; ++ } ++ ++ sbuf_allocate_size = sbuf_calculate_allocate_size(sbuf->ele_num, ++ sbuf->ele_size); ++ if (!sbuf_allocate_size) ++ return; ++ ++ sbuf->magic = 0; ++ __free_pages((struct page *)virt_to_page(sbuf), ++ get_order(sbuf_allocate_size)); ++} ++EXPORT_SYMBOL(sbuf_free); ++ ++int sbuf_get(shared_buf_t *sbuf, uint8_t *data) ++{ ++ const void *from; ++ ++ if ((sbuf == NULL) || (data == NULL)) ++ return -EINVAL; ++ ++ if (sbuf_is_empty(sbuf)) { ++ /* no data available */ ++ return 0; ++ } ++ ++ from = (void *)sbuf + SBUF_HEAD_SIZE + sbuf->head; ++ ++ memcpy(data, from, sbuf->ele_size); ++ ++ sbuf->head = sbuf_next_ptr(sbuf->head, sbuf->ele_size, sbuf->size); ++ ++ return sbuf->ele_size; ++} ++EXPORT_SYMBOL(sbuf_get); ++ ++shared_buf_t *sbuf_construct(uint32_t ele_num, uint32_t ele_size, ++ uint64_t paddr) ++{ ++ shared_buf_t *sbuf; ++ ++ if (!ele_num || !ele_size || !paddr) ++ return NULL; ++ ++ sbuf = (shared_buf_t *)phys_to_virt(paddr); ++ BUG_ON(!virt_addr_valid(sbuf)); ++ ++ if ((sbuf->magic == SBUF_MAGIC) && ++ (sbuf->ele_num == ele_num) && ++ (sbuf->ele_size == ele_size)) { ++ pr_info("construct sbuf at 0x%llx.\n", paddr); ++ /* return sbuf for dump */ ++ return sbuf; ++ } ++ ++ return NULL; ++} ++EXPORT_SYMBOL(sbuf_construct); +diff --git a/drivers/acrn/sbuf.h b/drivers/acrn/sbuf.h +new file mode 100644 +index 000000000000..7f3694920232 +--- /dev/null ++++ b/drivers/acrn/sbuf.h +@@ -0,0 +1,119 @@ ++/* ++ * shared buffer ++ * ++ * This file is provided under a dual BSD/GPLv2 license.  When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright (c) 2017 Intel Corporation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: Li Fei ++ * ++ * BSD LICENSE ++ * ++ * Copyright (C) 2017 Intel Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * ++ * Li Fei ++ * ++ */ ++ ++#ifndef SHARED_BUF_H ++#define SHARED_BUF_H ++ ++#include ++ ++ ++#define SBUF_MAGIC 0x5aa57aa71aa13aa3 ++#define SBUF_MAX_SIZE (1ULL << 22) ++#define SBUF_HEAD_SIZE 64 ++ ++/* sbuf flags */ ++#define OVERRUN_CNT_EN (1ULL << 0) /* whether overrun counting is enabled */ ++#define OVERWRITE_EN (1ULL << 1) /* whether overwrite is enabled */ ++ ++/** ++ * (sbuf) head + buf (store (ele_num - 1) elements at most) ++ * buffer empty: tail == head ++ * buffer full: (tail + ele_size) % size == head ++ * ++ * Base of memory for elements ++ * | ++ * | ++ * --------------------------------------------------------------------------------------- ++ * | shared_buf_t | raw data (ele_size)| raw date (ele_size) | ... | raw data (ele_size) | ++ * --------------------------------------------------------------------------------------- ++ * | ++ * | ++ * shared_buf_t *buf ++ */ ++ ++/* Make sure sizeof(shared_buf_t) == SBUF_HEAD_SIZE */ ++typedef struct shared_buf { ++ uint64_t magic; ++ uint32_t ele_num; /* number of elements */ ++ uint32_t ele_size; /* sizeof of elements */ ++ uint32_t head; /* offset from base, to read */ ++ uint32_t tail; /* offset from base, to write */ ++ uint64_t flags; ++ uint32_t overrun_cnt; /* count of overrun */ ++ uint32_t size; /* ele_num * ele_size */ ++ uint32_t padding[6]; ++} ____cacheline_aligned shared_buf_t; ++ ++static inline void sbuf_clear_flags(shared_buf_t *sbuf, uint64_t flags) ++{ ++ sbuf->flags &= ~flags; ++} ++ ++static inline void sbuf_set_flags(shared_buf_t *sbuf, uint64_t flags) ++{ ++ sbuf->flags = flags; ++} ++ ++static inline void sbuf_add_flags(shared_buf_t *sbuf, uint64_t flags) ++{ ++ sbuf->flags |= flags; ++} ++ ++shared_buf_t *sbuf_allocate(uint32_t ele_num, uint32_t ele_size); ++void sbuf_free(shared_buf_t *sbuf); ++int sbuf_get(shared_buf_t *sbuf, uint8_t *data); ++shared_buf_t *sbuf_construct(uint32_t ele_num, uint32_t ele_size, uint64_t gpa); ++ ++#endif /* SHARED_BUF_H */ +-- +2.17.1 + diff --git a/patches/0022-drm-i915-Add-new-function-to-copy-subslices-for-a-slic.drm b/patches/0022-drm-i915-Add-new-function-to-copy-subslices-for-a-slic.drm new file mode 100644 index 0000000000..21a530b2d1 --- /dev/null +++ b/patches/0022-drm-i915-Add-new-function-to-copy-subslices-for-a-slic.drm @@ -0,0 +1,71 @@ +From 881d80eaa81f3e24b1c57b6022c3b3824e3779ff Mon Sep 17 00:00:00 2001 +From: Stuart Summers +Date: Fri, 23 Aug 2019 09:03:06 -0700 +Subject: [PATCH 022/690] drm/i915: Add new function to copy subslices for a + slice + +Add a new function to copy subslices for a specified slice +between intel_sseu structures for the purpose of determining +power-gate status. Note that currently ss_stride has a max +of 1. + +Signed-off-by: Stuart Summers +Reviewed-by: Chris Wilson +Signed-off-by: Chris Wilson +Link: https://patchwork.freedesktop.org/patch/msgid/20190823160307.180813-11-stuart.summers@intel.com +--- + drivers/gpu/drm/i915/i915_debugfs.c | 17 ++++++++++++++--- + 1 file changed, 14 insertions(+), 3 deletions(-) + +diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c +index 44797f8b7c50..6e8b40299939 100644 +--- a/drivers/gpu/drm/i915/i915_debugfs.c ++++ b/drivers/gpu/drm/i915/i915_debugfs.c +@@ -3726,6 +3726,15 @@ i915_cache_sharing_set(void *data, u64 val) + return 0; + } + ++static void ++intel_sseu_copy_subslices(const struct sseu_dev_info *sseu, int slice, ++ u8 *to_mask) ++{ ++ int offset = slice * sseu->ss_stride; ++ ++ memcpy(&to_mask[offset], &sseu->subslice_mask[offset], sseu->ss_stride); ++} ++ + DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops, + i915_cache_sharing_get, i915_cache_sharing_set, + "%llu\n"); +@@ -3799,7 +3808,7 @@ static void gen10_sseu_device_status(struct drm_i915_private *dev_priv, + continue; + + sseu->slice_mask |= BIT(s); +- sseu->subslice_mask[s] = info->sseu.subslice_mask[s]; ++ intel_sseu_copy_subslices(&info->sseu, s, sseu->subslice_mask); + + for (ss = 0; ss < info->sseu.max_subslices; ss++) { + unsigned int eu_cnt; +@@ -3850,7 +3859,8 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, + sseu->slice_mask |= BIT(s); + + if (IS_GEN9_BC(dev_priv)) +- sseu->subslice_mask[s] = info->sseu.subslice_mask[s]; ++ intel_sseu_copy_subslices(&info->sseu, s, ++ sseu->subslice_mask); + + for (ss = 0; ss < info->sseu.max_subslices; ss++) { + unsigned int eu_cnt; +@@ -3886,7 +3896,8 @@ static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv, + if (sseu->slice_mask) { + sseu->eu_per_subslice = info->sseu.eu_per_subslice; + for (s = 0; s < fls(sseu->slice_mask); s++) +- sseu->subslice_mask[s] = info->sseu.subslice_mask[s]; ++ intel_sseu_copy_subslices(&info->sseu, s, ++ sseu->subslice_mask); + sseu->eu_total = sseu->eu_per_subslice * + intel_sseu_subslice_total(sseu); + +-- +2.17.1 + diff --git a/patches/0022-mei-spd-storage-proxy-driver.security b/patches/0022-mei-spd-storage-proxy-driver.security new file mode 100644 index 0000000000..b8d6b177b0 --- /dev/null +++ b/patches/0022-mei-spd-storage-proxy-driver.security @@ -0,0 +1,1422 @@ +From 19722a7943124acab75d444caf735419693def10 Mon Sep 17 00:00:00 2001 +From: Alexander Usyskin +Date: Tue, 23 Dec 2014 17:18:37 +0200 +Subject: [PATCH 22/65] mei: spd: storage proxy driver + +Host storage proxy driver enables ME FW to store its data on a storage +devices such as eMMC or UFS via host interface on a dedicated partition. +This patch implements storage over an eMMC GPP partition and UFS LUN. +A GPP partition is required for pre manufacturing operation since RPMB +partition key can be written only once. + +V9: 1. Add SPDX Identifiers +V10: 1. Fix Kdoc + +Change-Id: I524c237d240d93accc9ce071e92744191f23ce87 +Signed-off-by: Alexander Usyskin +Signed-off-by: Tomas Winkler +--- + drivers/misc/mei/Kconfig | 1 + + drivers/misc/mei/Makefile | 1 + + drivers/misc/mei/spd/Kconfig | 13 + + drivers/misc/mei/spd/Makefile | 12 + + drivers/misc/mei/spd/cmd.c | 483 +++++++++++++++++++++++++++++++++ + drivers/misc/mei/spd/cmd.h | 218 +++++++++++++++ + drivers/misc/mei/spd/debugfs.c | 79 ++++++ + drivers/misc/mei/spd/gpp.c | 299 ++++++++++++++++++++ + drivers/misc/mei/spd/main.c | 118 ++++++++ + drivers/misc/mei/spd/spd.h | 93 +++++++ + 10 files changed, 1317 insertions(+) + create mode 100644 drivers/misc/mei/spd/Kconfig + create mode 100644 drivers/misc/mei/spd/Makefile + create mode 100644 drivers/misc/mei/spd/cmd.c + create mode 100644 drivers/misc/mei/spd/cmd.h + create mode 100644 drivers/misc/mei/spd/debugfs.c + create mode 100644 drivers/misc/mei/spd/gpp.c + create mode 100644 drivers/misc/mei/spd/main.c + create mode 100644 drivers/misc/mei/spd/spd.h + +diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig +index 9d7b3719bfa0..241ab7acd742 100644 +--- a/drivers/misc/mei/Kconfig ++++ b/drivers/misc/mei/Kconfig +@@ -47,3 +47,4 @@ config INTEL_MEI_TXE + Intel Bay Trail + + source "drivers/misc/mei/hdcp/Kconfig" ++source "drivers/misc/mei/spd/Kconfig" +diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile +index f1c76f7ee804..f4721e8116e7 100644 +--- a/drivers/misc/mei/Makefile ++++ b/drivers/misc/mei/Makefile +@@ -26,3 +26,4 @@ mei-$(CONFIG_EVENT_TRACING) += mei-trace.o + CFLAGS_mei-trace.o = -I$(src) + + obj-$(CONFIG_INTEL_MEI_HDCP) += hdcp/ ++obj-$(CONFIG_INTEL_MEI_SPD) += spd/ +diff --git a/drivers/misc/mei/spd/Kconfig b/drivers/misc/mei/spd/Kconfig +new file mode 100644 +index 000000000000..17aa461033c8 +--- /dev/null ++++ b/drivers/misc/mei/spd/Kconfig +@@ -0,0 +1,13 @@ ++# SPDX-License-Identifier: GPL-2.0 ++# ++# Storage proxy device configuration ++# ++config INTEL_MEI_SPD ++ tristate "Intel MEI Host Storage Proxy Driver" ++ depends on INTEL_MEI && BLOCK ++ help ++ A driver for the host storage proxy ME client ++ The driver enables ME FW to store data on a storage devices ++ that are accessible only from the host. ++ ++ To compile this driver as a module, choose M here. +diff --git a/drivers/misc/mei/spd/Makefile b/drivers/misc/mei/spd/Makefile +new file mode 100644 +index 000000000000..353d284eaf01 +--- /dev/null ++++ b/drivers/misc/mei/spd/Makefile +@@ -0,0 +1,12 @@ ++# SPDX-License-Identifier: GPL-2.0 ++# ++# Makefile for the Storage Proxy device driver. ++# ++ ++obj-$(CONFIG_INTEL_MEI_SPD) += mei_spd.o ++mei_spd-objs := main.o ++mei_spd-objs += cmd.o ++mei_spd-objs += gpp.o ++mei_spd-$(CONFIG_DEBUG_FS) += debugfs.o ++ ++ccflags-y += -D__CHECK_ENDIAN__ +diff --git a/drivers/misc/mei/spd/cmd.c b/drivers/misc/mei/spd/cmd.c +new file mode 100644 +index 000000000000..931d99f99c7e +--- /dev/null ++++ b/drivers/misc/mei/spd/cmd.c +@@ -0,0 +1,483 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright(c) 2015 - 2019 Intel Corporation. ++ */ ++#include ++#include ++#include ++ ++#include "cmd.h" ++#include "spd.h" ++ ++#define spd_cmd_size(_cmd) \ ++ (sizeof(struct spd_cmd_hdr) + \ ++ sizeof(struct spd_cmd_##_cmd)) ++#define to_spd_hdr(_buf) (struct spd_cmd_hdr *)(_buf) ++#define to_spd_cmd(_cmd, _buf) \ ++ (struct spd_cmd_##_cmd *)((_buf) + sizeof(struct spd_cmd_hdr)) ++ ++const char *spd_cmd_str(enum spd_cmd_type cmd) ++{ ++#define __SPD_CMD(_cmd) SPD_##_cmd##_CMD ++#define SPD_CMD(cmd) case __SPD_CMD(cmd): return #cmd ++ switch (cmd) { ++ SPD_CMD(NONE); ++ SPD_CMD(START_STOP); ++ SPD_CMD(RPMB_WRITE); ++ SPD_CMD(RPMB_READ); ++ SPD_CMD(RPMB_GET_COUNTER); ++ SPD_CMD(GPP_WRITE); ++ SPD_CMD(GPP_READ); ++ SPD_CMD(TRIM); ++ SPD_CMD(INIT); ++ SPD_CMD(STORAGE_STATUS); ++ SPD_CMD(MAX); ++ default: ++ return "unknown"; ++ } ++#undef SPD_CMD ++#undef __SPD_CMD ++} ++ ++const char *mei_spd_dev_str(enum spd_storage_type type) ++{ ++#define SPD_TYPE(type) case SPD_TYPE_##type: return #type ++ switch (type) { ++ SPD_TYPE(UNDEF); ++ SPD_TYPE(EMMC); ++ SPD_TYPE(UFS); ++ default: ++ return "unknown"; ++ } ++#undef SPD_TYPE ++} ++ ++const char *mei_spd_state_str(enum mei_spd_state state) ++{ ++#define SPD_STATE(state) case MEI_SPD_STATE_##state: return #state ++ switch (state) { ++ SPD_STATE(INIT); ++ SPD_STATE(INIT_WAIT); ++ SPD_STATE(INIT_DONE); ++ SPD_STATE(RUNNING); ++ SPD_STATE(STOPPING); ++ default: ++ return "unknown"; ++ } ++#undef SPD_STATE ++} ++ ++/** ++ * mei_spd_init_req() - send init request ++ * @spd: spd device ++ * Return: ++ * * 0 on success ++ * * -EPROTO if called in wrong state ++ * * < 0 on write error ++ */ ++int mei_spd_cmd_init_req(struct mei_spd *spd) ++{ ++ const int req_len = sizeof(struct spd_cmd_hdr); ++ struct spd_cmd_hdr *hdr; ++ u32 cmd_type = SPD_INIT_CMD; ++ ssize_t ret; ++ ++ spd_dbg(spd, "cmd [%d] %s : state [%d] %s\n", ++ cmd_type, spd_cmd_str(cmd_type), ++ spd->state, mei_spd_state_str(spd->state)); ++ ++ if (spd->state != MEI_SPD_STATE_INIT) ++ return -EPROTO; ++ ++ memset(spd->buf, 0, req_len); ++ hdr = to_spd_hdr(spd->buf); ++ ++ hdr->command_type = cmd_type; ++ hdr->is_response = 0; ++ hdr->len = req_len; ++ ++ spd->state = MEI_SPD_STATE_INIT_WAIT; ++ ret = mei_cldev_send(spd->cldev, spd->buf, req_len); ++ if (ret != req_len) { ++ spd_err(spd, "start send failed ret = %zd\n", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++/** ++ * mei_spd_cmd_init_rsp() - handle init response message ++ * @spd: spd device ++ * @cmd: received spd command ++ * @cmd_sz: received command size ++ * Return: ++ * * 0 on success ++ * * < 0 otherwise ++ */ ++static int mei_spd_cmd_init_rsp(struct mei_spd *spd, struct spd_cmd *cmd, ++ ssize_t cmd_sz) ++{ ++ int type; ++ int gpp_id; ++ int i; ++ ++ if (cmd_sz < spd_cmd_size(init_resp)) { ++ spd_err(spd, "Wrong init response size\n"); ++ return -EINVAL; ++ } ++ ++ if (spd->state != MEI_SPD_STATE_INIT_WAIT) ++ return -EPROTO; ++ ++ type = cmd->init_rsp.type; ++ gpp_id = cmd->init_rsp.gpp_partition_id; ++ ++ switch (type) { ++ case SPD_TYPE_EMMC: ++ if (gpp_id < 1 || gpp_id > 4) { ++ spd_err(spd, "%s unsupported gpp id %d\n", ++ mei_spd_dev_str(type), gpp_id); ++ return -EINVAL; ++ } ++ break; ++ ++ case SPD_TYPE_UFS: ++ if (gpp_id < 1 || gpp_id > 6) { ++ spd_err(spd, "%s unsupported gpp id %d\n", ++ mei_spd_dev_str(type), gpp_id); ++ return -EINVAL; ++ } ++ break; ++ ++ default: ++ spd_err(spd, "unsupported storage type %d\n", ++ cmd->init_rsp.type); ++ return -EINVAL; ++ } ++ ++ spd->dev_type = type; ++ spd->gpp_partition_id = gpp_id; ++ ++ if (cmd->init_rsp.serial_no_sz != 0) { ++ if (cmd->init_rsp.serial_no_sz != ++ cmd_sz - spd_cmd_size(init_resp)) { ++ spd_err(spd, "wrong serial no size %u?=%zu\n", ++ cmd->init_rsp.serial_no_sz, ++ cmd_sz - spd_cmd_size(init_resp)); ++ return -EMSGSIZE; ++ } ++ ++ if (cmd->init_rsp.serial_no_sz > 256) { ++ spd_err(spd, "serial no is too large %u\n", ++ cmd->init_rsp.serial_no_sz); ++ return -EMSGSIZE; ++ } ++ ++ spd->dev_id = kzalloc(cmd->init_rsp.serial_no_sz, GFP_KERNEL); ++ if (!spd->dev_id) ++ return -ENOMEM; ++ ++ spd->dev_id_sz = cmd->init_rsp.serial_no_sz; ++ if (type == SPD_TYPE_EMMC) { ++ /* FW have this in be32 format */ ++ __be32 *sno = (__be32 *)cmd->init_rsp.serial_no; ++ u32 *dev_id = (u32 *)spd->dev_id; ++ ++ for (i = 0; i < spd->dev_id_sz / sizeof(u32); i++) ++ dev_id[i] = be32_to_cpu(sno[i]); ++ } else { ++ memcpy(spd->dev_id, &cmd->init_rsp.serial_no, ++ cmd->init_rsp.serial_no_sz); ++ } ++ } ++ ++ spd->state = MEI_SPD_STATE_INIT_DONE; ++ ++ return 0; ++} ++ ++/** ++ * mei_spd_cmd_storage_status_req() - send storage status message ++ * @spd: spd device ++ * Return: ++ * * 0 on success ++ * * -EPROTO if called in wrong state ++ * * < 0 on write error ++ */ ++int mei_spd_cmd_storage_status_req(struct mei_spd *spd) ++{ ++ struct spd_cmd_hdr *hdr; ++ struct spd_cmd_storage_status_req *req; ++ const int req_len = spd_cmd_size(storage_status_req); ++ u32 cmd_type = SPD_STORAGE_STATUS_CMD; ++ ssize_t ret; ++ ++ spd_dbg(spd, "cmd [%d] %s : state [%d] %s\n", ++ cmd_type, spd_cmd_str(cmd_type), ++ spd->state, mei_spd_state_str(spd->state)); ++ ++ if (spd->state < MEI_SPD_STATE_INIT_DONE) ++ return -EPROTO; ++ ++ memset(spd->buf, 0, req_len); ++ hdr = to_spd_hdr(spd->buf); ++ ++ hdr->command_type = cmd_type; ++ hdr->is_response = 0; ++ hdr->len = req_len; ++ ++ req = to_spd_cmd(storage_status_req, spd->buf); ++ req->gpp_on = mei_spd_gpp_is_open(spd); ++ req->rpmb_on = 0; ++ ++ ret = mei_cldev_send(spd->cldev, spd->buf, req_len); ++ if (ret != req_len) { ++ spd_err(spd, "send storage status failed ret = %zd\n", ret); ++ return ret; ++ } ++ ++ if (req->gpp_on || req->rpmb_on) ++ spd->state = MEI_SPD_STATE_RUNNING; ++ else ++ spd->state = MEI_SPD_STATE_INIT_DONE; ++ ++ spd_dbg(spd, "cmd [%d] %s : state [%d] %s\n", ++ cmd_type, spd_cmd_str(cmd_type), ++ spd->state, mei_spd_state_str(spd->state)); ++ ++ return 0; ++} ++ ++static int mei_spd_cmd_gpp_write(struct mei_spd *spd, struct spd_cmd *cmd, ++ ssize_t out_buf_sz) ++{ ++ size_t len = SPD_GPP_WRITE_DATA_LEN(*cmd); ++ int ret; ++ ++ if (out_buf_sz < spd_cmd_size(gpp_write_req)) { ++ spd_err(spd, "Wrong request size\n"); ++ return SPD_STATUS_INVALID_COMMAND; ++ } ++ ++ ret = mei_spd_gpp_write(spd, cmd->gpp_write_req.offset, ++ cmd->gpp_write_req.data, len); ++ if (ret) { ++ spd_err(spd, "Failed to write to gpp ret = %d\n", ret); ++ return SPD_STATUS_GENERAL_FAILURE; ++ } ++ ++ spd_dbg(spd, "wrote %zd bytes of data\n", len); ++ ++ cmd->header.len = spd_cmd_size(gpp_write_rsp); ++ ++ return SPD_STATUS_SUCCESS; ++} ++ ++static int mei_spd_cmd_gpp_read(struct mei_spd *spd, struct spd_cmd *cmd, ++ ssize_t out_buf_sz) ++{ ++ size_t len; ++ int ret; ++ ++ if (out_buf_sz < spd_cmd_size(gpp_read_req)) { ++ spd_err(spd, "Wrong request size\n"); ++ return SPD_STATUS_INVALID_COMMAND; ++ } ++ ++ len = cmd->gpp_read_req.size_to_read; ++ if (len > SPD_CLIENT_GPP_DATA_MAX_SIZE) { ++ spd_err(spd, "Block is to large to read\n"); ++ return SPD_STATUS_INVALID_COMMAND; ++ } ++ ++ ret = mei_spd_gpp_read(spd, cmd->gpp_read_req.offset, ++ cmd->gpp_read_resp.data, len); ++ ++ if (ret) { ++ spd_err(spd, "Failed to read from gpp ret = %d\n", ret); ++ return SPD_STATUS_GENERAL_FAILURE; ++ } ++ ++ spd_dbg(spd, "read %zd bytes of data\n", len); ++ ++ cmd->header.len = spd_cmd_size(gpp_read_rsp) + len; ++ ++ return SPD_STATUS_SUCCESS; ++} ++ ++static int mei_spd_cmd_response(struct mei_spd *spd, ssize_t out_buf_sz) ++{ ++ struct spd_cmd *cmd = (struct spd_cmd *)spd->buf; ++ u32 spd_cmd; ++ int ret; ++ ++ spd_cmd = cmd->header.command_type; ++ ++ spd_dbg(spd, "rsp [%d] %s : state [%d] %s\n", ++ spd_cmd, spd_cmd_str(spd_cmd), ++ spd->state, mei_spd_state_str(spd->state)); ++ ++ switch (spd_cmd) { ++ case SPD_INIT_CMD: ++ ret = mei_spd_cmd_init_rsp(spd, cmd, out_buf_sz); ++ if (ret) ++ break; ++ mutex_unlock(&spd->lock); ++ mei_spd_gpp_init(spd); ++ mutex_lock(&spd->lock); ++ break; ++ default: ++ ret = -EINVAL; ++ spd_err(spd, "Wrong response command %d\n", spd_cmd); ++ break; ++ } ++ ++ return ret; ++} ++ ++/** ++ * mei_spd_cmd_request() - dispatch command requests from the SPD device ++ * @spd: spd device ++ * @out_buf_sz: buffer size ++ * Return: ++ * * 0 on success ++ * * < 0 otherwise ++ */ ++static int mei_spd_cmd_request(struct mei_spd *spd, ssize_t out_buf_sz) ++{ ++ struct spd_cmd *cmd = (struct spd_cmd *)spd->buf; ++ ssize_t written; ++ u32 spd_cmd; ++ int ret; ++ ++ spd_cmd = cmd->header.command_type; ++ ++ spd_dbg(spd, "req [%d] %s : state [%d] %s\n", ++ spd_cmd, spd_cmd_str(spd_cmd), ++ spd->state, mei_spd_state_str(spd->state)); ++ ++ if (spd->state < MEI_SPD_STATE_RUNNING) { ++ spd_err(spd, "Wrong state %d\n", spd->state); ++ ret = SPD_STATUS_INVALID_COMMAND; ++ goto reply; ++ } ++ ++ switch (spd_cmd) { ++ case SPD_RPMB_WRITE_CMD: ++ case SPD_RPMB_READ_CMD: ++ case SPD_RPMB_GET_COUNTER_CMD: ++ spd_err(spd, "Command %d is not supported\n", spd_cmd); ++ ret = SPD_STATUS_NOT_SUPPORTED; ++ break; ++ case SPD_GPP_WRITE_CMD: ++ ret = mei_spd_cmd_gpp_write(spd, cmd, out_buf_sz); ++ break; ++ case SPD_GPP_READ_CMD: ++ ret = mei_spd_cmd_gpp_read(spd, cmd, out_buf_sz); ++ break; ++ case SPD_TRIM_CMD: ++ spd_err(spd, "Command %d is not supported\n", spd_cmd); ++ ret = SPD_STATUS_NOT_SUPPORTED; ++ break; ++ default: ++ spd_err(spd, "Wrong request command %d\n", spd_cmd); ++ ret = SPD_STATUS_INVALID_COMMAND; ++ break; ++ } ++reply: ++ cmd->header.is_response = 1; ++ cmd->header.status = ret; ++ if (ret != SPD_STATUS_SUCCESS) ++ cmd->header.len = sizeof(struct spd_cmd_hdr); ++ ++ written = mei_cldev_send(spd->cldev, spd->buf, cmd->header.len); ++ if (written != cmd->header.len) { ++ ret = SPD_STATUS_GENERAL_FAILURE; ++ spd_err(spd, "Failed to send reply written = %zd\n", written); ++ } ++ ++ /* FIXME: translate ret to errno */ ++ if (ret) ++ return -EINVAL; ++ ++ return 0; ++} ++ ++ssize_t mei_spd_cmd(struct mei_spd *spd) ++{ ++ struct spd_cmd *cmd = (struct spd_cmd *)spd->buf; ++ ssize_t out_buf_sz; ++ int ret; ++ ++ out_buf_sz = mei_cldev_recv(spd->cldev, spd->buf, spd->buf_sz); ++ if (out_buf_sz < 0) { ++ spd_err(spd, "failure in receive ret = %zd\n", out_buf_sz); ++ return out_buf_sz; ++ } ++ ++ if (out_buf_sz == 0) { ++ spd_err(spd, "received empty msg\n"); ++ return 0; ++ } ++ ++ /* check that we've received at least sizeof(header) */ ++ if (out_buf_sz < sizeof(struct spd_cmd_hdr)) { ++ spd_err(spd, "Request is too short\n"); ++ return -EFAULT; ++ } ++ ++ if (cmd->header.is_response) ++ ret = mei_spd_cmd_response(spd, out_buf_sz); ++ else ++ ret = mei_spd_cmd_request(spd, out_buf_sz); ++ ++ return ret; ++} ++ ++static void mei_spd_status_send_work(struct work_struct *work) ++{ ++ struct mei_spd *spd = ++ container_of(work, struct mei_spd, status_send_w); ++ ++ mutex_lock(&spd->lock); ++ mei_spd_cmd_storage_status_req(spd); ++ mutex_unlock(&spd->lock); ++} ++ ++void mei_spd_free(struct mei_spd *spd) ++{ ++ if (!spd) ++ return; ++ ++ cancel_work_sync(&spd->status_send_w); ++ ++ kfree(spd->buf); ++ kfree(spd); ++} ++ ++struct mei_spd *mei_spd_alloc(struct mei_cl_device *cldev) ++{ ++ struct mei_spd *spd; ++ u8 *buf; ++ ++ spd = kzalloc(sizeof(*spd), GFP_KERNEL); ++ if (!spd) ++ return NULL; ++ ++ spd->buf_sz = sizeof(struct spd_cmd) + SPD_CLIENT_GPP_DATA_MAX_SIZE; ++ buf = kmalloc(spd->buf_sz, GFP_KERNEL); ++ if (!buf) ++ goto free; ++ ++ spd->cldev = cldev; ++ spd->buf = buf; ++ spd->state = MEI_SPD_STATE_INIT; ++ mutex_init(&spd->lock); ++ INIT_WORK(&spd->status_send_w, mei_spd_status_send_work); ++ ++ return spd; ++free: ++ kfree(spd); ++ return NULL; ++} +diff --git a/drivers/misc/mei/spd/cmd.h b/drivers/misc/mei/spd/cmd.h +new file mode 100644 +index 000000000000..a7f99ec7f8f4 +--- /dev/null ++++ b/drivers/misc/mei/spd/cmd.h +@@ -0,0 +1,218 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright (C) 2015-2019 Intel Corp. ++ */ ++#ifndef _SPD_CMD_H ++#define _SPD_CMD_H ++ ++#include ++ ++/** ++ * enum spd_cmd_type - available commands ++ * @SPD_NONE_CMD : Lower command sentinel. ++ * @SPD_START_STOP_CMD : start stop command (deprecated). [Host -> TEE] ++ * @SPD_RPMB_WRITE_CMD : RPMB write request. [TEE -> Host] ++ * @SPD_RPMB_READ_CMD : RPMB read request. [TEE -> Host] ++ * @SPD_RPMB_GET_COUNTER_CMD: get counter request [TEE -> Host] ++ * @SPD_GPP_WRITE_CMD : GPP write request. [TEE -> Host] ++ * @SPD_GPP_READ_CMD : GPP read request. [TEE -> Host] ++ * @SPD_TRIM_CMD : TRIM command [TEE -> Host] ++ * @SPD_INIT_CMD : initial handshake between host and fw. [Host -> TEE] ++ * @SPD_STORAGE_STATUS_CMD : the backing storage status. [Host -> TEE] ++ * @SPD_MAX_CMD: Upper command sentinel. ++ */ ++enum spd_cmd_type { ++ SPD_NONE_CMD = 0, ++ SPD_START_STOP_CMD, ++ SPD_RPMB_WRITE_CMD, ++ SPD_RPMB_READ_CMD, ++ SPD_RPMB_GET_COUNTER_CMD, ++ SPD_GPP_WRITE_CMD, ++ SPD_GPP_READ_CMD, ++ SPD_TRIM_CMD, ++ SPD_INIT_CMD, ++ SPD_STORAGE_STATUS_CMD, ++ SPD_MAX_CMD, ++}; ++ ++enum spd_status { ++ SPD_STATUS_SUCCESS = 0, ++ SPD_STATUS_GENERAL_FAILURE = 1, ++ SPD_STATUS_NOT_READY = 2, ++ SPD_STATUS_NOT_SUPPORTED = 3, ++ SPD_STATUS_INVALID_COMMAND = 4, ++}; ++ ++/** ++ * enum spd_storage_type - storage device type ++ * @SPD_TYPE_UNDEF: lower enum sentinel ++ * @SPD_TYPE_EMMC: emmc device ++ * @SPD_TYPE_UFS: ufs device ++ * @SPD_TYPE_MAX: upper enum sentinel ++ */ ++enum spd_storage_type { ++ SPD_TYPE_UNDEF = 0, ++ SPD_TYPE_EMMC = 1, ++ SPD_TYPE_UFS = 2, ++ SPD_TYPE_MAX ++}; ++ ++/** ++ * struct spd_cmd_hdr - Host storage Command Header ++ * @command_type: SPD_TYPES ++ * @is_response: 1 == Response, 0 == Request ++ * @len: command length ++ * @status: command status ++ * @reserved: reserved ++ */ ++struct spd_cmd_hdr { ++ u32 command_type : 7; ++ u32 is_response : 1; ++ u32 len : 13; ++ u32 status : 8; ++ u32 reserved : 3; ++} __packed; ++ ++/** ++ * RPMB Frame Size as defined by the JDEC spec ++ */ ++#define SPD_CLIENT_RPMB_DATA_MAX_SIZE (512) ++ ++/** ++ * struct spd_cmd_init_resp ++ * commandType == HOST_STORAGE_INIT_CMD ++ * @gpp_partition_id: gpp_partition: ++ * UFS: LUN Number (0-7) ++ * EMMC: 1-4. ++ * 0xff: GPP not supported ++ * @type: storage hw type ++ * SPD_TYPE_EMMC ++ * SPD_TYPE_UFS ++ * @serial_no_sz: serial_no size ++ * @serial_no: device serial number ++ */ ++struct spd_cmd_init_resp { ++ u32 gpp_partition_id; ++ u32 type; ++ u32 serial_no_sz; ++ u8 serial_no[0]; ++}; ++ ++/** ++ * struct spd_cmd_storage_status_req ++ * commandType == SPD_STORAGE_STATUS_CMD ++ * @gpp_on: availability of the gpp backing storage ++ * 0 - GP partition is accessible ++ * 1 - GP partition is not accessible ++ * @rpmb_on: availability of the backing storage ++ * 0 - RPMB partition is accessible ++ * 1 - RPBM partition is not accessible ++ */ ++struct spd_cmd_storage_status_req { ++ u32 gpp_on; ++ u32 rpmb_on; ++} __packed; ++ ++/** ++ * struct spd_cmd_rpmb_write ++ * command_type == SPD_RPMB_WRITE_CMD ++ * @rpmb_frame: RPMB frame are constant size (512) ++ */ ++struct spd_cmd_rpmb_write { ++ u8 rpmb_frame[0]; ++} __packed; ++ ++/** ++ * struct spd_cmd_rpmb_read ++ * command_type == SPD_RPMB_READ_CMD ++ * @rpmb_frame: RPMB frame are constant size (512) ++ */ ++struct spd_cmd_rpmb_read { ++ u8 rpmb_frame[0]; ++} __packed; ++ ++/** ++ * struct spd_cmd_rpmb_get_counter ++ * command_type == SPD_RPMB_GET_COUNTER_CMD ++ * @rpmb_frame: frame containing frame counter ++ */ ++struct spd_cmd_rpmb_get_counter { ++ u8 rpmb_frame[0]; ++} __packed; ++ ++/** ++ * struct spd_cmd_gpp_write_req ++ * command_type == SPD_GPP_WRITE_CMD ++ * @offset: frame offset in partition ++ * @data: 4K page ++ */ ++struct spd_cmd_gpp_write_req { ++ u32 offset; ++ u8 data[0]; ++} __packed; ++ ++/** ++ * struct spd_cmd_gpp_write_rsp ++ * command_type == SPD_GPP_WRITE_CMD ++ * @reserved: reserved ++ */ ++struct spd_cmd_gpp_write_rsp { ++ u32 reserved[2]; ++} __packed; ++ ++/** ++ * struct spd_cmd_gpp_read_req ++ * command_type == SPD_GPP_READ_CMD ++ * @offset: offset of a frame on GPP partition ++ * @size_to_read: data length to read (must be ) ++ */ ++struct spd_cmd_gpp_read_req { ++ u32 offset; ++ u32 size_to_read; ++} __packed; ++ ++/** ++ * struct spd_cmd_gpp_read_rsp ++ * command_type == SPD_GPP_READ_CMD ++ * @reserved: reserved ++ * @data: data ++ */ ++struct spd_cmd_gpp_read_rsp { ++ u32 reserved; ++ u8 data[0]; ++} __packed; ++ ++#define SPD_GPP_READ_DATA_LEN(cmd) ((cmd).header.len - \ ++ (sizeof(struct spd_cmd_hdr) + \ ++ sizeof(struct spd_cmd_gpp_read_rsp))) ++ ++#define SPD_GPP_WRITE_DATA_LEN(cmd) ((cmd).header.len - \ ++ (sizeof(struct spd_cmd_hdr) + \ ++ sizeof(struct spd_cmd_gpp_write_req))) ++ ++struct spd_cmd { ++ struct spd_cmd_hdr header; ++ ++ union { ++ struct spd_cmd_rpmb_write rpmb_write; ++ struct spd_cmd_rpmb_read rpmb_read; ++ struct spd_cmd_rpmb_get_counter rpmb_get_counter; ++ ++ struct spd_cmd_gpp_write_req gpp_write_req; ++ struct spd_cmd_gpp_write_rsp gpp_write_rsp; ++ ++ struct spd_cmd_gpp_read_req gpp_read_req; ++ struct spd_cmd_gpp_read_rsp gpp_read_resp; ++ ++ struct spd_cmd_init_resp init_rsp; ++ struct spd_cmd_storage_status_req status_req; ++ }; ++} __packed; ++ ++/* GPP Max data 4K */ ++#define SPD_CLIENT_GPP_DATA_MAX_SIZE (4096) ++ ++const char *spd_cmd_str(enum spd_cmd_type cmd); ++const char *mei_spd_dev_str(enum spd_storage_type type); ++ ++#endif /* _SPD_CMD_H */ +diff --git a/drivers/misc/mei/spd/debugfs.c b/drivers/misc/mei/spd/debugfs.c +new file mode 100644 +index 000000000000..6e1c789cd0f2 +--- /dev/null ++++ b/drivers/misc/mei/spd/debugfs.c +@@ -0,0 +1,79 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright(c) 2015 - 2019 Intel Corporation. ++ */ ++#include ++#include ++#include ++#include ++ ++#include "cmd.h" ++#include "spd.h" ++ ++static ssize_t mei_spd_dbgfs_read_info(struct file *fp, char __user *ubuf, ++ size_t cnt, loff_t *ppos) ++{ ++ struct mei_spd *spd = fp->private_data; ++ size_t bufsz = 4095; ++ char *buf; ++ int pos = 0; ++ int ret; ++ ++ buf = kzalloc(bufsz, GFP_KERNEL); ++ if (!buf) ++ return -ENOMEM; ++ ++ pos += scnprintf(buf + pos, bufsz - pos, "DEV STATE: [%d] %s\n", ++ spd->state, mei_spd_state_str(spd->state)); ++ pos += scnprintf(buf + pos, bufsz - pos, "DEV TYPE : [%d] %s\n", ++ spd->dev_type, mei_spd_dev_str(spd->dev_type)); ++ pos += scnprintf(buf + pos, bufsz - pos, " ID SIZE : %d\n", ++ spd->dev_id_sz); ++ pos += scnprintf(buf + pos, bufsz - pos, " ID : '%s'\n", "N/A"); ++ pos += scnprintf(buf + pos, bufsz - pos, "GPP\n"); ++ pos += scnprintf(buf + pos, bufsz - pos, " id : %d\n", ++ spd->gpp_partition_id); ++ pos += scnprintf(buf + pos, bufsz - pos, " opened : %1d\n", ++ mei_spd_gpp_is_open(spd)); ++ ++ ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, pos); ++ kfree(buf); ++ return ret; ++} ++ ++static const struct file_operations mei_spd_dbgfs_fops_info = { ++ .open = simple_open, ++ .read = mei_spd_dbgfs_read_info, ++ .llseek = generic_file_llseek, ++}; ++ ++void mei_spd_dbgfs_deregister(struct mei_spd *spd) ++{ ++ if (!spd->dbgfs_dir) ++ return; ++ debugfs_remove_recursive(spd->dbgfs_dir); ++ spd->dbgfs_dir = NULL; ++} ++ ++int mei_spd_dbgfs_register(struct mei_spd *spd, const char *name) ++{ ++ struct dentry *dir, *f; ++ ++ dir = debugfs_create_dir(name, NULL); ++ if (!dir) ++ return -ENOMEM; ++ ++ spd->dbgfs_dir = dir; ++ ++ f = debugfs_create_file("info", 0400, dir, ++ spd, &mei_spd_dbgfs_fops_info); ++ if (!f) { ++ spd_err(spd, "info: registration failed\n"); ++ goto err; ++ } ++ ++ return 0; ++err: ++ mei_spd_dbgfs_deregister(spd); ++ return -ENODEV; ++} +diff --git a/drivers/misc/mei/spd/gpp.c b/drivers/misc/mei/spd/gpp.c +new file mode 100644 +index 000000000000..cb296ccf086f +--- /dev/null ++++ b/drivers/misc/mei/spd/gpp.c +@@ -0,0 +1,299 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright(c) 2015 - 2019 Intel Corporation. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "cmd.h" ++#include "spd.h" ++ ++static struct page *page_read(struct address_space *mapping, int index) ++{ ++ return read_mapping_page(mapping, index, NULL); ++} ++ ++static int mei_spd_bd_read(struct mei_spd *spd, loff_t from, size_t len, ++ size_t *retlen, u_char *buf) ++{ ++ struct page *page; ++ int index = from >> PAGE_SHIFT; ++ int offset = from & (PAGE_SIZE - 1); ++ int cpylen; ++ ++ while (len) { ++ if ((offset + len) > PAGE_SIZE) ++ cpylen = PAGE_SIZE - offset; ++ else ++ cpylen = len; ++ len = len - cpylen; ++ ++ page = page_read(spd->gpp->bd_inode->i_mapping, index); ++ if (IS_ERR(page)) ++ return PTR_ERR(page); ++ ++ memcpy(buf, page_address(page) + offset, cpylen); ++ put_page(page); ++ ++ if (retlen) ++ *retlen += cpylen; ++ buf += cpylen; ++ offset = 0; ++ index++; ++ } ++ return 0; ++} ++ ++static int _mei_spd_bd_write(struct block_device *dev, const u_char *buf, ++ loff_t to, size_t len, size_t *retlen) ++{ ++ struct page *page; ++ struct address_space *mapping = dev->bd_inode->i_mapping; ++ int index = to >> PAGE_SHIFT; /* page index */ ++ int offset = to & ~PAGE_MASK; /* page offset */ ++ int cpylen; ++ ++ while (len) { ++ if ((offset + len) > PAGE_SIZE) ++ cpylen = PAGE_SIZE - offset; ++ else ++ cpylen = len; ++ len = len - cpylen; ++ ++ page = page_read(mapping, index); ++ if (IS_ERR(page)) ++ return PTR_ERR(page); ++ ++ if (memcmp(page_address(page) + offset, buf, cpylen)) { ++ lock_page(page); ++ memcpy(page_address(page) + offset, buf, cpylen); ++ set_page_dirty(page); ++ unlock_page(page); ++ balance_dirty_pages_ratelimited(mapping); ++ } ++ put_page(page); ++ ++ if (retlen) ++ *retlen += cpylen; ++ ++ buf += cpylen; ++ offset = 0; ++ index++; ++ } ++ return 0; ++} ++ ++static int mei_spd_bd_write(struct mei_spd *spd, loff_t to, size_t len, ++ size_t *retlen, const u_char *buf) ++{ ++ int ret; ++ ++ ret = _mei_spd_bd_write(spd->gpp, buf, to, len, retlen); ++ if (ret > 0) ++ ret = 0; ++ ++ sync_blockdev(spd->gpp); ++ ++ return ret; ++} ++ ++static void mei_spd_bd_sync(struct mei_spd *spd) ++{ ++ sync_blockdev(spd->gpp); ++} ++ ++#define GPP_FMODE (FMODE_WRITE | FMODE_READ | FMODE_EXCL) ++ ++bool mei_spd_gpp_is_open(struct mei_spd *spd) ++{ ++ struct request_queue *q; ++ ++ if (!spd->gpp) ++ return false; ++ ++ q = spd->gpp->bd_queue; ++ if (q && !blk_queue_stopped(q)) ++ return true; ++ ++ return false; ++} ++ ++static int mei_spd_gpp_open(struct mei_spd *spd, struct device *dev) ++{ ++ int ret; ++ ++ if (spd->gpp) ++ return 0; ++ ++ spd->gpp = blkdev_get_by_dev(dev->devt, GPP_FMODE, spd); ++ if (IS_ERR(spd->gpp)) { ++ ret = PTR_ERR(spd->gpp); ++ spd->gpp = NULL; ++ spd_dbg(spd, "Can't get GPP block device %s ret = %d\n", ++ dev_name(dev), ret); ++ return ret; ++ } ++ ++ spd_dbg(spd, "gpp partition created\n"); ++ return 0; ++} ++ ++static int mei_spd_gpp_close(struct mei_spd *spd) ++{ ++ if (!spd->gpp) ++ return 0; ++ ++ mei_spd_bd_sync(spd); ++ blkdev_put(spd->gpp, GPP_FMODE); ++ spd->gpp = NULL; ++ ++ spd_dbg(spd, "gpp partition removed\n"); ++ return 0; ++} ++ ++#define UFSHCD "ufshcd" ++static bool mei_spd_lun_ufs_match(struct mei_spd *spd, struct device *dev) ++{ ++ struct gendisk *disk = dev_to_disk(dev); ++ struct scsi_device *sdev; ++ ++ switch (disk->major) { ++ case SCSI_DISK0_MAJOR: ++ case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR: ++ case SCSI_DISK8_MAJOR ... SCSI_DISK15_MAJOR: ++ break; ++ default: ++ return false; ++ } ++ ++ sdev = to_scsi_device(dev->parent); ++ ++ if (!sdev->host || ++ strncmp(sdev->host->hostt->name, UFSHCD, strlen(UFSHCD))) ++ return false; ++ ++ return sdev->lun == spd->gpp_partition_id; ++} ++ ++static bool mei_spd_gpp_mmc_match(struct mei_spd *spd, struct device *dev) ++{ ++ struct gendisk *disk = dev_to_disk(dev); ++ int idx, part_id; ++ ++ if (disk->major != MMC_BLOCK_MAJOR) ++ return false; ++ ++ if (sscanf(disk->disk_name, "mmcblk%dgp%d", &idx, &part_id) != 2) ++ return false; ++ ++ return part_id == spd->gpp_partition_id - 1; ++} ++ ++static bool mei_spd_gpp_match(struct mei_spd *spd, struct device *dev) ++{ ++ /* we are only interested in physical partitions */ ++ if (strncmp(dev->type->name, "disk", sizeof("disk"))) ++ return false; ++ ++ if (spd->dev_type == SPD_TYPE_EMMC) ++ return mei_spd_gpp_mmc_match(spd, dev); ++ else if (spd->dev_type == SPD_TYPE_UFS) ++ return mei_spd_lun_ufs_match(spd, dev); ++ else ++ return false; ++} ++ ++static int gpp_add_device(struct device *dev, struct class_interface *intf) ++{ ++ struct mei_spd *spd = container_of(intf, struct mei_spd, gpp_interface); ++ ++ if (!mei_spd_gpp_match(spd, dev)) ++ return 0; ++ ++ mutex_lock(&spd->lock); ++ if (mei_spd_gpp_open(spd, dev)) { ++ mutex_unlock(&spd->lock); ++ return 0; ++ } ++ ++ schedule_work(&spd->status_send_w); ++ mutex_unlock(&spd->lock); ++ ++ return 0; ++} ++ ++static void gpp_remove_device(struct device *dev, struct class_interface *intf) ++{ ++ struct mei_spd *spd = container_of(intf, struct mei_spd, gpp_interface); ++ ++ if (!mei_spd_gpp_match(spd, dev)) ++ return; ++ ++ mutex_lock(&spd->lock); ++ if (mei_spd_gpp_close(spd)) { ++ mutex_unlock(&spd->lock); ++ return; ++ } ++ ++ if (spd->state != MEI_SPD_STATE_STOPPING) ++ schedule_work(&spd->status_send_w); ++ mutex_unlock(&spd->lock); ++} ++ ++int mei_spd_gpp_read(struct mei_spd *spd, size_t off, u8 *data, size_t size) ++{ ++ int ret; ++ ++ spd_dbg(spd, "GPP read offset = %zx, size = %zx\n", off, size); ++ ++ if (!mei_spd_gpp_is_open(spd)) ++ return -ENODEV; ++ ++ ret = mei_spd_bd_read(spd, off, size, NULL, data); ++ if (ret) ++ spd_err(spd, "GPP read failed ret = %d\n", ret); ++ ++ return ret; ++} ++ ++int mei_spd_gpp_write(struct mei_spd *spd, size_t off, u8 *data, size_t size) ++{ ++ int ret; ++ ++ spd_dbg(spd, "GPP write offset = %zx, size = %zx\n", off, size); ++ ++ if (!mei_spd_gpp_is_open(spd)) ++ return -ENODEV; ++ ++ ret = mei_spd_bd_write(spd, off, size, NULL, data); ++ if (ret) ++ spd_err(spd, "GPP write failed ret = %d\n", ret); ++ ++ return ret; ++} ++ ++void mei_spd_gpp_prepare(struct mei_spd *spd) ++{ ++ spd->gpp_interface.add_dev = gpp_add_device; ++ spd->gpp_interface.remove_dev = gpp_remove_device; ++ spd->gpp_interface.class = &block_class; ++} ++ ++int mei_spd_gpp_init(struct mei_spd *spd) ++{ ++ int ret; ++ ++ ret = class_interface_register(&spd->gpp_interface); ++ if (ret) ++ spd_err(spd, "Can't register interface\n"); ++ return ret; ++} ++ ++void mei_spd_gpp_exit(struct mei_spd *spd) ++{ ++ class_interface_unregister(&spd->gpp_interface); ++} +diff --git a/drivers/misc/mei/spd/main.c b/drivers/misc/mei/spd/main.c +new file mode 100644 +index 000000000000..ff7ee23df494 +--- /dev/null ++++ b/drivers/misc/mei/spd/main.c +@@ -0,0 +1,118 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright(c) 2015 - 2019 Intel Corporation. ++ */ ++#include ++ ++#include "spd.h" ++ ++static void mei_spd_rx_cb(struct mei_cl_device *cldev) ++{ ++ struct mei_spd *spd = mei_cldev_get_drvdata(cldev); ++ ++ mutex_lock(&spd->lock); ++ mei_spd_cmd(spd); ++ mutex_unlock(&spd->lock); ++} ++ ++static int mei_spd_probe(struct mei_cl_device *cldev, ++ const struct mei_cl_device_id *id) ++{ ++ struct mei_spd *spd; ++ u8 ver = mei_cldev_ver(cldev); ++ int ret; ++ ++ dev_dbg(&cldev->dev, "probing mei spd ver = %d\n", ver); ++ ++ if (ver < 2) { ++ dev_warn(&cldev->dev, "unuspported protocol version %d\n", ver); ++ return -ENODEV; ++ } ++ ++ spd = mei_spd_alloc(cldev); ++ if (!spd) ++ return -ENOMEM; ++ ++ mei_cldev_set_drvdata(cldev, spd); ++ ++ ret = mei_spd_dbgfs_register(spd, "spd"); ++ if (ret) ++ goto free; ++ ++ ret = mei_cldev_enable(cldev); ++ if (ret < 0) { ++ dev_err(&cldev->dev, "Could not enable device ret = %d\n", ret); ++ goto free; ++ } ++ ++ ret = mei_cldev_register_rx_cb(cldev, mei_spd_rx_cb); ++ if (ret) { ++ dev_err(&cldev->dev, "Error register event %d\n", ret); ++ goto disable; ++ } ++ ++ spd_dbg(spd, "protocol version %d\n", ver); ++ mei_spd_gpp_prepare(spd); ++ mutex_lock(&spd->lock); ++ ret = mei_spd_cmd_init_req(spd); ++ mutex_unlock(&spd->lock); ++ if (ret) { ++ dev_err(&cldev->dev, "Could not start ret = %d\n", ret); ++ goto disable; ++ } ++ ++ return 0; ++ ++disable: ++ mei_cldev_disable(cldev); ++ ++free: ++ mei_spd_dbgfs_deregister(spd); ++ mei_cldev_set_drvdata(cldev, NULL); ++ mei_spd_free(spd); ++ return ret; ++} ++ ++static int mei_spd_remove(struct mei_cl_device *cldev) ++{ ++ struct mei_spd *spd = mei_cldev_get_drvdata(cldev); ++ ++ if (spd->state == MEI_SPD_STATE_RUNNING) { ++ spd->state = MEI_SPD_STATE_STOPPING; ++ mei_spd_gpp_exit(spd); ++ mutex_lock(&spd->lock); ++ mei_spd_cmd_storage_status_req(spd); ++ mutex_unlock(&spd->lock); ++ } ++ ++ mei_cldev_disable(cldev); ++ mei_spd_dbgfs_deregister(spd); ++ mei_cldev_set_drvdata(cldev, NULL); ++ mei_spd_free(spd); ++ ++ return 0; ++} ++ ++#define MEI_SPD_UUID UUID_LE(0x2a39291f, 0x5551, 0x482f, \ ++ 0x99, 0xcb, 0x9e, 0x22, 0x74, 0x97, 0x8c, 0xa8) ++ ++static struct mei_cl_device_id mei_spd_tbl[] = { ++ { .uuid = MEI_SPD_UUID, .version = MEI_CL_VERSION_ANY}, ++ /* required last entry */ ++ { } ++}; ++MODULE_DEVICE_TABLE(mei, mei_spd_tbl); ++ ++static struct mei_cl_driver mei_spd_driver = { ++ .id_table = mei_spd_tbl, ++ .name = "mei_spd", ++ ++ .probe = mei_spd_probe, ++ .remove = mei_spd_remove, ++}; ++ ++module_mei_cl_driver(mei_spd_driver); ++ ++MODULE_AUTHOR("Intel Corporation"); ++MODULE_LICENSE("GPL v2"); ++MODULE_DESCRIPTION("Storage Proxy driver based on mei bus"); +diff --git a/drivers/misc/mei/spd/spd.h b/drivers/misc/mei/spd/spd.h +new file mode 100644 +index 000000000000..c6d4a3359b3c +--- /dev/null ++++ b/drivers/misc/mei/spd/spd.h +@@ -0,0 +1,93 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright (C) 2015-2019 Intel Corp. ++ */ ++#ifndef _MEI_SPD_H ++#define _MEI_SPD_H ++ ++#include ++#include ++ ++enum mei_spd_state { ++ MEI_SPD_STATE_INIT, ++ MEI_SPD_STATE_INIT_WAIT, ++ MEI_SPD_STATE_INIT_DONE, ++ MEI_SPD_STATE_RUNNING, ++ MEI_SPD_STATE_STOPPING, ++}; ++ ++/** ++ * struct mei_spd - spd device struct ++ * ++ * @cldev: client bus device ++ * @gpp: GPP partition block device ++ * @gpp_partition_id: GPP partition id (1-6) ++ * @gpp_interface: gpp class interface for discovery ++ * @dev_type: storage device type ++ * @dev_id_sz: device id size ++ * @dev_id: device id string ++ * @lock: mutex to sync request processing ++ * @state: driver state ++ * @status_send_w: workitem for sending status to the FW ++ * @buf_sz: receive/transmit buffer allocated size ++ * @buf: receive/transmit buffer ++ * @dbgfs_dir: debugfs directory entry ++ */ ++struct mei_spd { ++ struct mei_cl_device *cldev; ++ struct block_device *gpp; ++ u32 gpp_partition_id; ++ struct class_interface gpp_interface; ++ u32 dev_type; ++ u32 dev_id_sz; ++ u8 *dev_id; ++ struct mutex lock; /* mutex to sync request processing */ ++ enum mei_spd_state state; ++ struct work_struct status_send_w; ++ size_t buf_sz; ++ u8 *buf; ++ ++#if IS_ENABLED(CONFIG_DEBUG_FS) ++ struct dentry *dbgfs_dir; ++#endif /* CONFIG_DEBUG_FS */ ++}; ++ ++struct mei_spd *mei_spd_alloc(struct mei_cl_device *cldev); ++void mei_spd_free(struct mei_spd *spd); ++ ++int mei_spd_cmd_init_req(struct mei_spd *spd); ++int mei_spd_cmd_storage_status_req(struct mei_spd *spd); ++ssize_t mei_spd_cmd(struct mei_spd *spd); ++ ++void mei_spd_gpp_prepare(struct mei_spd *spd); ++bool mei_spd_gpp_is_open(struct mei_spd *spd); ++int mei_spd_gpp_init(struct mei_spd *spd); ++void mei_spd_gpp_exit(struct mei_spd *spd); ++int mei_spd_gpp_read(struct mei_spd *spd, size_t off, u8 *data, size_t size); ++int mei_spd_gpp_write(struct mei_spd *spd, size_t off, u8 *data, size_t size); ++ ++#if IS_ENABLED(CONFIG_DEBUG_FS) ++int mei_spd_dbgfs_register(struct mei_spd *spd, const char *name); ++void mei_spd_dbgfs_deregister(struct mei_spd *spd); ++#else ++static inline int mei_spd_dbgfs_register(struct mei_spd *spd, const char *name) ++{ ++ return 0; ++} ++ ++static inline void mei_spd_dbgfs_deregister(struct mei_spd *spd) ++{ ++} ++ ++#endif /* CONFIG_DEBUG_FS */ ++ ++const char *mei_spd_state_str(enum mei_spd_state state); ++ ++#define spd_err(spd, fmt, ...) \ ++ dev_err(&(spd)->cldev->dev, fmt, ##__VA_ARGS__) ++#define spd_warn(spd, fmt, ...) \ ++ dev_warn(&(spd)->cldev->dev, fmt, ##__VA_ARGS__) ++#define spd_dbg(spd, fmt, ...) \ ++ dev_dbg(&(spd)->cldev->dev, fmt, ##__VA_ARGS__) ++ ++#endif /* _MEI_SPD_H */ +-- +2.17.1 + diff --git a/patches/0022-net-stmmac-introduce-MSI-Interrupt-routines-f.connectivity b/patches/0022-net-stmmac-introduce-MSI-Interrupt-routines-f.connectivity new file mode 100644 index 0000000000..fc08d4928f --- /dev/null +++ b/patches/0022-net-stmmac-introduce-MSI-Interrupt-routines-f.connectivity @@ -0,0 +1,575 @@ +From 59604429c3534a01d20d2d13e3c1e523cf73ff3d Mon Sep 17 00:00:00 2001 +From: Ong Boon Leong +Date: Fri, 26 Jul 2019 10:30:21 +0800 +Subject: [PATCH 022/108] net: stmmac: introduce MSI Interrupt routines for + mac, safety, RX & TX + +Now we introduce MSI interrupt service routines and hook these routines +up if stmmac_open() sees valid irq line being requested:- + +stmmac_mac_interrupt() :- MAC (dev->irq), WOL (wol_irq), LPI (lpi_irq) +stmmac_safety_interrupt() :- Safety Feat Correctible Error (sfty_ce_irq) + & Uncorrectible Error (sfty_ue_irq) +stmmac_msi_intr_rx() :- For all RX MSI irq (rx_irq) +stmmac_msi_intr_tx() :- For all TX MSI irq (tx_irq) + +Each of IRQs will have its unique name so that we can differentiate +them easily under /proc/interrupts. + +Signed-off-by: Ong Boon Leong +--- + drivers/net/ethernet/stmicro/stmmac/common.h | 12 + + drivers/net/ethernet/stmicro/stmmac/stmmac.h | 16 + + .../net/ethernet/stmicro/stmmac/stmmac_main.c | 418 ++++++++++++++++-- + include/linux/stmmac.h | 1 + + 4 files changed, 402 insertions(+), 45 deletions(-) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h +index b0cb5e9f0592..a65a5bf38359 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/common.h ++++ b/drivers/net/ethernet/stmicro/stmmac/common.h +@@ -295,6 +295,18 @@ enum dma_irq_dir { + DMA_DIR_RXTX = 0x3, + }; + ++enum request_irq_err { ++ REQ_IRQ_ERR_ALL, ++ REQ_IRQ_ERR_TX, ++ REQ_IRQ_ERR_RX, ++ REQ_IRQ_ERR_SFTY_UE, ++ REQ_IRQ_ERR_SFTY_CE, ++ REQ_IRQ_ERR_LPI, ++ REQ_IRQ_ERR_WOL, ++ REQ_IRQ_ERR_MAC, ++ REQ_IRQ_ERR_NO, ++}; ++ + /* EEE and LPI defines */ + #define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 0) + #define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 1) +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h +index 47fc750585a8..7b089089d204 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h +@@ -30,6 +30,10 @@ struct stmmac_resources { + int lpi_irq; + int irq; + int phy_conv_irq; ++ int sfty_ce_irq; ++ int sfty_ue_irq; ++ int rx_irq[MTL_MAX_RX_QUEUES]; ++ int tx_irq[MTL_MAX_TX_QUEUES]; + }; + + struct stmmac_tx_info { +@@ -215,6 +219,18 @@ struct stmmac_priv { + void __iomem *ptpaddr; + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + int phy_conv_irq; ++ int sfty_ce_irq; ++ int sfty_ue_irq; ++ int rx_irq[MTL_MAX_RX_QUEUES]; ++ int tx_irq[MTL_MAX_TX_QUEUES]; ++ /*irq name */ ++ char int_name_mac[IFNAMSIZ + 9]; ++ char int_name_wol[IFNAMSIZ + 9]; ++ char int_name_lpi[IFNAMSIZ + 9]; ++ char int_name_sfty_ce[IFNAMSIZ + 9]; ++ char int_name_sfty_ue[IFNAMSIZ + 9]; ++ char int_name_rx_irq[MTL_MAX_TX_QUEUES][IFNAMSIZ + 9]; ++ char int_name_tx_irq[MTL_MAX_TX_QUEUES][IFNAMSIZ + 9]; + + #ifdef CONFIG_DEBUG_FS + struct dentry *dbgfs_dir; +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index a49b5ed3051b..2fccc9af4073 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -104,6 +104,11 @@ module_param(chain_mode, int, 0444); + MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode"); + + static irqreturn_t stmmac_interrupt(int irq, void *dev_id); ++/* For MSI interrupts handling */ ++static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id); ++static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id); ++static irqreturn_t stmmac_msi_intr_tx(int irq, void *data); ++static irqreturn_t stmmac_msi_intr_rx(int irq, void *data); + + #ifdef CONFIG_DEBUG_FS + static void stmmac_init_fs(struct net_device *dev); +@@ -2654,6 +2659,238 @@ static void stmmac_hw_teardown(struct net_device *dev) + clk_disable_unprepare(priv->plat->clk_ptp_ref); + } + ++static void stmmac_free_irq(struct net_device *dev, ++ enum request_irq_err irq_err, int irq_idx) ++{ ++ struct stmmac_priv *priv = netdev_priv(dev); ++ int j; ++ ++ switch (irq_err) { ++ case REQ_IRQ_ERR_ALL: ++ irq_idx = priv->plat->tx_queues_to_use; ++ /* fall through */ ++ case REQ_IRQ_ERR_TX: ++ for (j = irq_idx - 1; j >= 0; j--) { ++ if (priv->tx_irq[j] > 0) ++ free_irq(priv->tx_irq[j], &priv->tx_queue[j]); ++ } ++ irq_idx = priv->plat->rx_queues_to_use; ++ /* fall through */ ++ case REQ_IRQ_ERR_RX: ++ for (j = irq_idx - 1; j >= 0; j--) { ++ if (priv->rx_irq[j] > 0) ++ free_irq(priv->rx_irq[j], &priv->rx_queue[j]); ++ } ++ ++ if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) ++ free_irq(priv->sfty_ue_irq, dev); ++ /* fall through */ ++ case REQ_IRQ_ERR_SFTY_UE: ++ if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) ++ free_irq(priv->sfty_ce_irq, dev); ++ /* fall through */ ++ case REQ_IRQ_ERR_SFTY_CE: ++ if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) ++ free_irq(priv->lpi_irq, dev); ++ /* fall through */ ++ case REQ_IRQ_ERR_LPI: ++ if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) ++ free_irq(priv->wol_irq, dev); ++ /* fall through */ ++ case REQ_IRQ_ERR_WOL: ++ free_irq(dev->irq, dev); ++ /* fall through */ ++ case REQ_IRQ_ERR_MAC: ++ case REQ_IRQ_ERR_NO: ++ /* If MAC IRQ request error, no more IRQ to free */ ++ break; ++ } ++} ++ ++static int stmmac_request_irq(struct net_device *dev) ++{ ++ enum request_irq_err irq_err = REQ_IRQ_ERR_NO; ++ struct stmmac_priv *priv = netdev_priv(dev); ++ int irq_idx = 0; ++ int ret; ++ int i; ++ ++ /* Request the IRQ lines */ ++ if (priv->plat->multi_msi_en) { ++ char *int_name; ++ ++ /* For common interrupt */ ++ int_name = priv->int_name_mac; ++ sprintf(int_name, "%s:%s", dev->name, "mac"); ++ ret = request_irq(dev->irq, stmmac_mac_interrupt, ++ 0, int_name, dev); ++ if (unlikely(ret < 0)) { ++ netdev_err(priv->dev, ++ "%s: alloc mac MSI %d (error: %d)\n", ++ __func__, dev->irq, ret); ++ irq_err = REQ_IRQ_ERR_MAC; ++ goto irq_error; ++ } ++ ++ /* Request the Wake IRQ in case of another line ++ * is used for WoL ++ */ ++ if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { ++ int_name = priv->int_name_wol; ++ sprintf(int_name, "%s:%s", dev->name, "wol"); ++ ret = request_irq(priv->wol_irq, ++ stmmac_mac_interrupt, ++ 0, int_name, dev); ++ if (unlikely(ret < 0)) { ++ netdev_err(priv->dev, ++ "%s: alloc wol MSI %d (error: %d)\n", ++ __func__, priv->wol_irq, ret); ++ irq_err = REQ_IRQ_ERR_WOL; ++ goto irq_error; ++ } ++ } ++ ++ /* Request the LPI IRQ in case of another line ++ * is used for LPI ++ */ ++ if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { ++ int_name = priv->int_name_lpi; ++ sprintf(int_name, "%s:%s", dev->name, "lpi"); ++ ret = request_irq(priv->lpi_irq, ++ stmmac_mac_interrupt, ++ 0, int_name, dev); ++ if (unlikely(ret < 0)) { ++ netdev_err(priv->dev, ++ "%s: alloc lpi MSI %d (error: %d)\n", ++ __func__, priv->lpi_irq, ret); ++ irq_err = REQ_IRQ_ERR_LPI; ++ goto irq_error; ++ } ++ } ++ ++ /* Request the Safety Feature Correctible Error line in ++ * case of another line is used ++ */ ++ if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) { ++ int_name = priv->int_name_sfty_ce; ++ sprintf(int_name, "%s:%s", dev->name, "safety-ce"); ++ ret = request_irq(priv->sfty_ce_irq, ++ stmmac_safety_interrupt, ++ 0, int_name, dev); ++ if (unlikely(ret < 0)) { ++ netdev_err(priv->dev, ++ "%s: alloc sfty ce MSI %d (error: %d)\n", ++ __func__, priv->sfty_ce_irq, ret); ++ irq_err = REQ_IRQ_ERR_SFTY_CE; ++ goto irq_error; ++ } ++ } ++ ++ /* Request the Safety Feature Uncorrectible Error line in ++ * case of another line is used ++ */ ++ if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) { ++ int_name = priv->int_name_sfty_ue; ++ sprintf(int_name, "%s:%s", dev->name, "safety-ue"); ++ ret = request_irq(priv->sfty_ue_irq, ++ stmmac_safety_interrupt, ++ 0, int_name, dev); ++ if (unlikely(ret < 0)) { ++ netdev_err(priv->dev, ++ "%s: alloc sfty ue MSI %d (error: %d)\n", ++ __func__, priv->sfty_ue_irq, ret); ++ irq_err = REQ_IRQ_ERR_SFTY_UE; ++ goto irq_error; ++ } ++ } ++ ++ /* Request Rx MSI irq */ ++ for (i = 0; i < priv->plat->rx_queues_to_use; i++) { ++ if (priv->rx_irq[i] == 0) ++ continue; ++ ++ int_name = priv->int_name_rx_irq[i]; ++ sprintf(int_name, "%s:%s-%d", dev->name, "rx", i); ++ ret = request_irq(priv->rx_irq[i], ++ stmmac_msi_intr_rx, ++ 0, int_name, &priv->rx_queue[i]); ++ if (unlikely(ret < 0)) { ++ netdev_err(priv->dev, ++ "%s: alloc rx-%d MSI %d (error: %d)\n", ++ __func__, i, priv->rx_irq[i], ret); ++ irq_err = REQ_IRQ_ERR_RX; ++ irq_idx = i; ++ goto irq_error; ++ } ++ } ++ ++ /* Request Tx MSI irq */ ++ for (i = 0; i < priv->plat->tx_queues_to_use; i++) { ++ if (priv->tx_irq[i] == 0) ++ continue; ++ ++ int_name = priv->int_name_tx_irq[i]; ++ sprintf(int_name, "%s:%s-%d", dev->name, "tx", i); ++ ret = request_irq(priv->tx_irq[i], ++ stmmac_msi_intr_tx, ++ 0, int_name, &priv->tx_queue[i]); ++ if (unlikely(ret < 0)) { ++ netdev_err(priv->dev, ++ "%s: alloc tx-%d MSI %d (error: %d)\n", ++ __func__, i, priv->tx_irq[i], ret); ++ irq_err = REQ_IRQ_ERR_TX; ++ irq_idx = i; ++ goto irq_error; ++ } ++ } ++ } else { ++ ret = request_irq(dev->irq, stmmac_interrupt, ++ IRQF_SHARED, dev->name, dev); ++ if (unlikely(ret < 0)) { ++ netdev_err(priv->dev, ++ "%s: ERROR: allocating the IRQ %d (error: %d)\n", ++ __func__, dev->irq, ret); ++ irq_err = REQ_IRQ_ERR_MAC; ++ goto irq_error; ++ } ++ ++ /* Request the Wake IRQ in case of another line ++ * is used for WoL ++ */ ++ if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { ++ ret = request_irq(priv->wol_irq, stmmac_interrupt, ++ IRQF_SHARED, dev->name, dev); ++ if (unlikely(ret < 0)) { ++ netdev_err(priv->dev, ++ "%s: ERROR: allocating the WoL IRQ %d (%d)\n", ++ __func__, priv->wol_irq, ret); ++ irq_err = REQ_IRQ_ERR_WOL; ++ goto irq_error; ++ } ++ } ++ ++ /* Request the IRQ lines */ ++ if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { ++ ret = request_irq(priv->lpi_irq, stmmac_interrupt, ++ IRQF_SHARED, dev->name, dev); ++ if (unlikely(ret < 0)) { ++ netdev_err(priv->dev, ++ "%s: ERROR: allocating the LPI IRQ %d (%d)\n", ++ __func__, priv->lpi_irq, ret); ++ irq_err = REQ_IRQ_ERR_LPI; ++ goto irq_error; ++ } ++ } ++ } ++ ++ netdev_info(priv->dev, "PASS: requesting IRQs\n"); ++ return ret; ++ ++irq_error: ++ stmmac_free_irq(dev, irq_err, irq_idx); ++ return ret; ++} ++ + /** + * stmmac_open - open entry point of the driver + * @dev : pointer to the device structure. +@@ -2712,39 +2949,9 @@ static int stmmac_open(struct net_device *dev) + + phylink_start(priv->phylink); + +- /* Request the IRQ lines */ +- ret = request_irq(dev->irq, stmmac_interrupt, +- IRQF_SHARED, dev->name, dev); +- if (unlikely(ret < 0)) { +- netdev_err(priv->dev, +- "%s: ERROR: allocating the IRQ %d (error: %d)\n", +- __func__, dev->irq, ret); ++ ret = stmmac_request_irq(dev); ++ if (ret) + goto irq_error; +- } +- +- /* Request the Wake IRQ in case of another line is used for WoL */ +- if (priv->wol_irq != dev->irq) { +- ret = request_irq(priv->wol_irq, stmmac_interrupt, +- IRQF_SHARED, dev->name, dev); +- if (unlikely(ret < 0)) { +- netdev_err(priv->dev, +- "%s: ERROR: allocating the WoL IRQ %d (%d)\n", +- __func__, priv->wol_irq, ret); +- goto wolirq_error; +- } +- } +- +- /* Request the IRQ lines */ +- if (priv->lpi_irq > 0) { +- ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED, +- dev->name, dev); +- if (unlikely(ret < 0)) { +- netdev_err(priv->dev, +- "%s: ERROR: allocating the LPI IRQ %d (%d)\n", +- __func__, priv->lpi_irq, ret); +- goto lpiirq_error; +- } +- } + + /* Start phy converter after MDIO bus IRQ handling is up */ + if (priv->plat->setup_phy_conv) { +@@ -2765,11 +2972,9 @@ static int stmmac_open(struct net_device *dev) + return 0; + + phy_conv_error: +-lpiirq_error: +- if (priv->wol_irq != dev->irq) +- free_irq(priv->wol_irq, dev); +-wolirq_error: +- free_irq(dev->irq, dev); ++ /* Free the IRQ lines */ ++ stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0); ++ + irq_error: + phylink_stop(priv->phylink); + +@@ -2811,11 +3016,7 @@ static int stmmac_release(struct net_device *dev) + del_timer_sync(&priv->tx_queue[chan].txtimer); + + /* Free the IRQ lines */ +- free_irq(dev->irq, dev); +- if (priv->wol_irq != dev->irq) +- free_irq(priv->wol_irq, dev); +- if (priv->lpi_irq > 0) +- free_irq(priv->lpi_irq, dev); ++ stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0); + + /* Start phy converter after MDIO bus IRQ handling is up */ + if (priv->plat->remove_phy_conv) { +@@ -3940,15 +4141,136 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) + return IRQ_HANDLED; + } + ++static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id) ++{ ++ struct net_device *dev = (struct net_device *)dev_id; ++ struct stmmac_priv *priv = netdev_priv(dev); ++ ++ if (unlikely(!dev)) { ++ netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); ++ return IRQ_NONE; ++ } ++ ++ /* Check if adapter is up */ ++ if (test_bit(STMMAC_DOWN, &priv->state)) ++ return IRQ_HANDLED; ++ ++ /* To handle Common interrupts */ ++ stmmac_common_interrupt(priv); ++ ++ return IRQ_HANDLED; ++} ++ ++static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id) ++{ ++ struct net_device *dev = (struct net_device *)dev_id; ++ struct stmmac_priv *priv = netdev_priv(dev); ++ ++ if (unlikely(!dev)) { ++ netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); ++ return IRQ_NONE; ++ } ++ ++ /* Check if adapter is up */ ++ if (test_bit(STMMAC_DOWN, &priv->state)) ++ return IRQ_HANDLED; ++ ++ /* Check if a fatal error happened */ ++ stmmac_safety_feat_interrupt(priv); ++ ++ return IRQ_HANDLED; ++} ++ ++static irqreturn_t stmmac_msi_intr_tx(int irq, void *data) ++{ ++ struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data; ++ int chan = tx_q->queue_index; ++ struct stmmac_priv *priv; ++ int status; ++ ++ priv = container_of(tx_q, struct stmmac_priv, tx_queue[chan]); ++ ++ if (unlikely(!data)) { ++ netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); ++ return IRQ_NONE; ++ } ++ ++ /* Check if adapter is up */ ++ if (test_bit(STMMAC_DOWN, &priv->state)) ++ return IRQ_HANDLED; ++ ++ status = stmmac_napi_check(priv, chan, DMA_DIR_TX); ++ ++ if (unlikely(status & tx_hard_error_bump_tc)) { ++ /* Try to bump up the dma threshold on this failure */ ++ if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && ++ tc <= 256) { ++ tc += 64; ++ if (priv->plat->force_thresh_dma_mode) ++ stmmac_set_dma_operation_mode(priv, ++ tc, ++ tc, ++ chan); ++ else ++ stmmac_set_dma_operation_mode(priv, ++ tc, ++ SF_DMA_MODE, ++ chan); ++ priv->xstats.threshold = tc; ++ } ++ } else if (unlikely(status == tx_hard_error)) { ++ stmmac_tx_err(priv, chan); ++ } ++ ++ return IRQ_HANDLED; ++} ++ ++static irqreturn_t stmmac_msi_intr_rx(int irq, void *data) ++{ ++ struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data; ++ int chan = rx_q->queue_index; ++ struct stmmac_priv *priv; ++ ++ priv = container_of(rx_q, struct stmmac_priv, rx_queue[chan]); ++ ++ if (unlikely(!data)) { ++ netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); ++ return IRQ_NONE; ++ } ++ ++ /* Check if adapter is up */ ++ if (test_bit(STMMAC_DOWN, &priv->state)) ++ return IRQ_HANDLED; ++ ++ stmmac_napi_check(priv, chan, DMA_DIR_RX); ++ ++ return IRQ_HANDLED; ++} ++ + #ifdef CONFIG_NET_POLL_CONTROLLER + /* Polling receive - used by NETCONSOLE and other diagnostic tools + * to allow network I/O with interrupts disabled. + */ + static void stmmac_poll_controller(struct net_device *dev) + { +- disable_irq(dev->irq); +- stmmac_interrupt(dev->irq, dev); +- enable_irq(dev->irq); ++ struct stmmac_priv *priv = netdev_priv(dev); ++ int i; ++ ++ /* If adapter is down, do nothing */ ++ if (test_bit(STMMAC_DOWN, &priv->state)) ++ return; ++ ++ if (priv->plat->multi_msi_en) { ++ for (i = 0; i < priv->plat->rx_queues_to_use; i++) ++ stmmac_msi_intr_rx(0, &priv->rx_queue[i]); ++ ++ for (i = 0; i < priv->plat->tx_queues_to_use; i++) ++ stmmac_msi_intr_tx(0, &priv->tx_queue[i]); ++ } else { ++ disable_irq(dev->irq); ++ stmmac_interrupt(dev->irq, dev); ++ enable_irq(dev->irq); ++ } + } + #endif + +@@ -4493,6 +4815,12 @@ int stmmac_dvr_probe(struct device *device, + priv->wol_irq = res->wol_irq; + priv->lpi_irq = res->lpi_irq; + priv->phy_conv_irq = res->phy_conv_irq; ++ priv->sfty_ce_irq = res->sfty_ce_irq; ++ priv->sfty_ue_irq = res->sfty_ue_irq; ++ for (i = 0; i < MTL_MAX_RX_QUEUES; i++) ++ priv->rx_irq[i] = res->rx_irq[i]; ++ for (i = 0; i < MTL_MAX_TX_QUEUES; i++) ++ priv->tx_irq[i] = res->tx_irq[i]; + + if (!IS_ERR_OR_NULL(res->mac)) + memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN); +diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h +index 5f9d027dfe5a..eedbf48e763e 100644 +--- a/include/linux/stmmac.h ++++ b/include/linux/stmmac.h +@@ -186,5 +186,6 @@ struct plat_stmmacenet_data { + int mac_port_sel_speed; + bool en_tx_lpi_clockgating; + int has_xgmac; ++ bool multi_msi_en; + }; + #endif +-- +2.17.1 + diff --git a/patches/0022-serial-8250_dwlib-Basic-RS485-support.lpss b/patches/0022-serial-8250_dwlib-Basic-RS485-support.lpss new file mode 100644 index 0000000000..dcb20fc95d --- /dev/null +++ b/patches/0022-serial-8250_dwlib-Basic-RS485-support.lpss @@ -0,0 +1,116 @@ +From aa50a85d5866d3c880b8fb5a131e9b6e41cb0903 Mon Sep 17 00:00:00 2001 +From: Heikki Krogerus +Date: Fri, 6 Sep 2019 13:42:44 +0300 +Subject: [PATCH 22/40] serial: 8250_dwlib: Basic RS485 support + +The Synopsys DesignWare UART has a build-in support for the +RS485 protocol from IP version 4.0 onward. This commit +enabled basic support for it. + +There are three RS485 modes defined for the DesignWare UART: +Full Duplex mode, Software-Controlled Half Duplex Mode and +Hardware-Controlled Half Duplex Mode. First the driver will +support only the hardware controlled half duplex mode. + +In hardware controlled half duplex mode both transmitting +and receiving are supported by enabling the driver +(transmitter) and disabling the receiver automatically when +data is being sent out, and otherwise keeping the receiver +enabled and the driver disabled. + +Signed-off-by: Heikki Krogerus +Signed-off-by: Andy Shevchenko +--- + drivers/tty/serial/8250/8250_dwlib.c | 54 ++++++++++++++++++++++++++++ + 1 file changed, 54 insertions(+) + +diff --git a/drivers/tty/serial/8250/8250_dwlib.c b/drivers/tty/serial/8250/8250_dwlib.c +index 6d6a78eead3e..52fc246cbaed 100644 +--- a/drivers/tty/serial/8250/8250_dwlib.c ++++ b/drivers/tty/serial/8250/8250_dwlib.c +@@ -5,16 +5,26 @@ + #include + #include + #include ++#include + #include + #include + + #include "8250_dwlib.h" + + /* Offsets for the DesignWare specific registers */ ++#define DW_UART_TCR 0xac /* Transceiver Control Register (RS485) */ ++#define DW_UART_DE_EN 0xb0 /* Driver Output Enable Register */ ++#define DW_UART_RE_EN 0xb4 /* Receiver Output Enable Register */ + #define DW_UART_DLF 0xc0 /* Divisor Latch Fraction Register */ + #define DW_UART_CPR 0xf4 /* Component Parameter Register */ + #define DW_UART_UCV 0xf8 /* UART Component Version */ + ++/* Trasceiver Control Register bits */ ++#define DW_UART_TCR_RS485_EN BIT(0) ++#define DW_UART_TCR_RE_POL BIT(1) ++#define DW_UART_TCR_DE_POL BIT(2) ++#define DW_UART_TCR_XFER_MODE(_mode_) ((_mode_) << 3) ++ + /* Component Parameter Register bits */ + #define DW_UART_CPR_ABP_DATA_WIDTH (3 << 0) + #define DW_UART_CPR_AFCE_MODE (1 << 4) +@@ -77,11 +87,55 @@ static void dw8250_set_divisor(struct uart_port *p, unsigned int baud, + serial8250_do_set_divisor(p, baud, quot, quot_frac); + } + ++static int dw8250_rs485_config(struct uart_port *p, struct serial_rs485 *rs485) ++{ ++ u32 tcr; ++ ++ /* Clearing unsupported flags. */ ++ rs485->flags &= SER_RS485_ENABLED; ++ ++ tcr = dw8250_readl_ext(p, DW_UART_TCR); ++ ++ /* REVISIT: Only supporting Hardware Controlled Half Duplex mode. */ ++ if (rs485->flags & SER_RS485_ENABLED) { ++ tcr |= DW_UART_TCR_RS485_EN | DW_UART_TCR_XFER_MODE(2); ++ dw8250_writel_ext(p, DW_UART_DE_EN, 1); ++ dw8250_writel_ext(p, DW_UART_RE_EN, 1); ++ } else { ++ tcr &= ~(DW_UART_TCR_RS485_EN | DW_UART_TCR_XFER_MODE(3)); ++ dw8250_writel_ext(p, DW_UART_DE_EN, 0); ++ dw8250_writel_ext(p, DW_UART_RE_EN, 0); ++ } ++ ++ if (device_property_read_bool(p->dev, "snps,de-active-high")) ++ tcr |= DW_UART_TCR_DE_POL; ++ if (device_property_read_bool(p->dev, "snps,re-active-high")) ++ tcr |= DW_UART_TCR_RE_POL; ++ ++ dw8250_writel_ext(p, DW_UART_TCR, tcr); ++ ++ /* ++ * XXX: Though we could interpret the "RTS" timings as Driver Enable ++ * (DE) assertion/de-assertion timings, initially not supporting that. ++ * Ideally we should have timing values for the Driver instead of the ++ * RTS signal. ++ */ ++ rs485->delay_rts_before_send = 0; ++ rs485->delay_rts_after_send = 0; ++ ++ p->rs485 = *rs485; ++ ++ return 0; ++} ++ + void dw8250_setup_port(struct uart_port *p) + { + struct uart_8250_port *up = up_to_u8250p(p); + u32 reg; + ++ if (device_property_read_bool(p->dev, "snps,rs485-interface-en")) ++ p->rs485_config = dw8250_rs485_config; ++ + /* + * If the Component Version Register returns zero, we know that + * ADDITIONAL_FEATURES are not enabled. No need to go any further. +-- +2.17.1 + diff --git a/patches/0022-trusty-move-async-works-off-system-workqueue.trusty b/patches/0022-trusty-move-async-works-off-system-workqueue.trusty new file mode 100644 index 0000000000..931f53aba1 --- /dev/null +++ b/patches/0022-trusty-move-async-works-off-system-workqueue.trusty @@ -0,0 +1,146 @@ +From fbd9a018dd98f17c67a49cdcb5e456a601ab884d Mon Sep 17 00:00:00 2001 +From: Michael Ryleev +Date: Mon, 12 Dec 2016 14:18:25 +0000 +Subject: [PATCH 22/63] trusty: move async works off system workqueue + +Trusty async works might be very CPU intensive, move +all Trusty works to separate workqueues. + +Change-Id: I78a906bc0963beea9b20ad8d8599a31b34546376 +Signed-off-by: Michael Ryleev +Signed-off-by: weideng +Reviewed-by: mark gross +--- + drivers/trusty/trusty-irq.c | 12 +++++++++++- + drivers/trusty/trusty-virtio.c | 29 +++++++++++++++++++++++++++-- + 2 files changed, 38 insertions(+), 3 deletions(-) + +diff --git a/drivers/trusty/trusty-irq.c b/drivers/trusty/trusty-irq.c +index aeb0918dc572..5a74d75ce820 100644 +--- a/drivers/trusty/trusty-irq.c ++++ b/drivers/trusty/trusty-irq.c +@@ -57,6 +57,7 @@ struct trusty_irq_state { + struct trusty_irq_irqset __percpu *percpu_irqs; + struct notifier_block trusty_call_notifier; + struct notifier_block cpu_notifier; ++ struct workqueue_struct *wq; + }; + + #define TRUSTY_VMCALL_PENDING_INTR 0x74727505 +@@ -239,7 +240,7 @@ irqreturn_t trusty_irq_handler(int irq, void *data) + } + spin_unlock(&is->normal_irqs_lock); + +- schedule_work_on(raw_smp_processor_id(), &trusty_irq_work->work); ++ queue_work_on(raw_smp_processor_id(), is->wq, &trusty_irq_work->work); + + dev_dbg(is->dev, "%s: irq %d done\n", __func__, irq); + +@@ -588,6 +589,12 @@ static int trusty_irq_probe(struct platform_device *pdev) + goto err_alloc_is; + } + ++ is->wq = alloc_workqueue("trusty-irq-wq", WQ_CPU_INTENSIVE, 0); ++ if (!is->wq) { ++ ret = -ENOMEM; ++ goto err_alloc_wq; ++ } ++ + is->dev = &pdev->dev; + is->trusty_dev = is->dev->parent; + is->irq_work = alloc_percpu(struct trusty_irq_work); +@@ -668,6 +675,8 @@ static int trusty_irq_probe(struct platform_device *pdev) + } + free_percpu(is->irq_work); + err_alloc_irq_work: ++ destroy_workqueue(is->wq); ++err_alloc_wq: + kfree(is); + err_alloc_is: + return ret; +@@ -704,6 +713,7 @@ static int trusty_irq_remove(struct platform_device *pdev) + flush_work(&trusty_irq_work->work); + } + free_percpu(is->irq_work); ++ destroy_workqueue(is->wq); + kfree(is); + + return 0; +diff --git a/drivers/trusty/trusty-virtio.c b/drivers/trusty/trusty-virtio.c +index eb4c0d31e249..eaeb020e98f4 100644 +--- a/drivers/trusty/trusty-virtio.c ++++ b/drivers/trusty/trusty-virtio.c +@@ -46,6 +46,8 @@ struct trusty_ctx { + struct notifier_block call_notifier; + struct list_head vdev_list; + struct mutex mlock; /* protects vdev_list */ ++ struct workqueue_struct *kick_wq; ++ struct workqueue_struct *check_wq; + }; + + struct trusty_vring { +@@ -97,7 +99,7 @@ static int trusty_call_notify(struct notifier_block *nb, + return NOTIFY_DONE; + + tctx = container_of(nb, struct trusty_ctx, call_notifier); +- schedule_work(&tctx->check_vqs); ++ queue_work(tctx->check_wq, &tctx->check_vqs); + + return NOTIFY_OK; + } +@@ -143,7 +145,7 @@ static bool trusty_virtio_notify(struct virtqueue *vq) + struct trusty_ctx *tctx = tvdev->tctx; + + atomic_set(&tvr->needs_kick, 1); +- schedule_work(&tctx->kick_vqs); ++ queue_work(tctx->kick_wq, &tctx->kick_vqs); + + return true; + } +@@ -641,6 +643,21 @@ static int trusty_virtio_probe(struct platform_device *pdev) + INIT_WORK(&tctx->kick_vqs, kick_vqs); + platform_set_drvdata(pdev, tctx); + ++ tctx->check_wq = alloc_workqueue("trusty-check-wq", WQ_UNBOUND, 0); ++ if (!tctx->check_wq) { ++ ret = -ENODEV; ++ dev_err(&pdev->dev, "Failed create trusty-check-wq\n"); ++ goto err_create_check_wq; ++ } ++ ++ tctx->kick_wq = alloc_workqueue("trusty-kick-wq", ++ WQ_UNBOUND | WQ_CPU_INTENSIVE, 0); ++ if (!tctx->kick_wq) { ++ ret = -ENODEV; ++ dev_err(&pdev->dev, "Failed create trusty-kick-wq\n"); ++ goto err_create_kick_wq; ++ } ++ + ret = trusty_virtio_add_devices(tctx); + if (ret) { + dev_err(&pdev->dev, "Failed to add virtio devices\n"); +@@ -651,6 +668,10 @@ static int trusty_virtio_probe(struct platform_device *pdev) + return 0; + + err_add_devices: ++ destroy_workqueue(tctx->kick_wq); ++err_create_kick_wq: ++ destroy_workqueue(tctx->check_wq); ++err_create_check_wq: + kfree(tctx); + return ret; + } +@@ -670,6 +691,10 @@ static int trusty_virtio_remove(struct platform_device *pdev) + trusty_virtio_remove_devices(tctx); + cancel_work_sync(&tctx->kick_vqs); + ++ /* destroy workqueues */ ++ destroy_workqueue(tctx->kick_wq); ++ destroy_workqueue(tctx->check_wq); ++ + /* notify remote that shared area goes away */ + trusty_virtio_stop(tctx, tctx->shared_va, tctx->shared_sz); + +-- +2.17.1 + diff --git a/patches/0023-ASoC-Intel-Skylake-Remove-window0-sst_addr-fields.audio b/patches/0023-ASoC-Intel-Skylake-Remove-window0-sst_addr-fields.audio new file mode 100644 index 0000000000..b8c3c31e54 --- /dev/null +++ b/patches/0023-ASoC-Intel-Skylake-Remove-window0-sst_addr-fields.audio @@ -0,0 +1,100 @@ +From e72e876078d8ab8907366ddb6e8c7ea377317f5c Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Thu, 15 Aug 2019 11:36:51 +0200 +Subject: [PATCH 023/193] ASoC: Intel: Skylake: Remove window0 sst_addr fields + +w0_stat_sz and w0_up_sz are Skylake-specific fields and should not be +part of common sst framework. The latter is also completely unused. +Remove both while declaring global FW register-area size, shared for all +SKL+ platforms. + +Signed-off-by: Cezary Rojewski +--- + sound/soc/intel/common/sst-dsp-priv.h | 2 -- + sound/soc/intel/skylake/bxt-sst.c | 2 -- + sound/soc/intel/skylake/cnl-sst.c | 2 -- + sound/soc/intel/skylake/skl-debug.c | 2 +- + sound/soc/intel/skylake/skl-sst-dsp.h | 1 + + sound/soc/intel/skylake/skl-sst.c | 2 -- + 6 files changed, 2 insertions(+), 9 deletions(-) + +diff --git a/sound/soc/intel/common/sst-dsp-priv.h b/sound/soc/intel/common/sst-dsp-priv.h +index 0fe9bebcfb38..53dcd87bab44 100644 +--- a/sound/soc/intel/common/sst-dsp-priv.h ++++ b/sound/soc/intel/common/sst-dsp-priv.h +@@ -71,8 +71,6 @@ struct sst_addr { + u32 dsp_dram_offset; + u32 sram0_base; + u32 sram1_base; +- u32 w0_stat_sz; +- u32 w0_up_sz; + void __iomem *lpe; + void __iomem *shim; + void __iomem *pci_cfg; +diff --git a/sound/soc/intel/skylake/bxt-sst.c b/sound/soc/intel/skylake/bxt-sst.c +index 250cf4612713..e6d2f548802a 100644 +--- a/sound/soc/intel/skylake/bxt-sst.c ++++ b/sound/soc/intel/skylake/bxt-sst.c +@@ -565,8 +565,6 @@ int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, + sst->addr.shim = mmio_base; + sst->addr.sram0_base = BXT_ADSP_SRAM0_BASE; + sst->addr.sram1_base = BXT_ADSP_SRAM1_BASE; +- sst->addr.w0_stat_sz = SKL_ADSP_W0_STAT_SZ; +- sst->addr.w0_up_sz = SKL_ADSP_W0_UP_SZ; + + sst_dsp_mailbox_init(sst, (BXT_ADSP_SRAM0_BASE + SKL_ADSP_W0_STAT_SZ), + SKL_ADSP_W0_UP_SZ, BXT_ADSP_SRAM1_BASE, SKL_ADSP_W1_SZ); +diff --git a/sound/soc/intel/skylake/cnl-sst.c b/sound/soc/intel/skylake/cnl-sst.c +index 0718018cad8d..b1df8bc3da27 100644 +--- a/sound/soc/intel/skylake/cnl-sst.c ++++ b/sound/soc/intel/skylake/cnl-sst.c +@@ -441,8 +441,6 @@ int cnl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, + sst->addr.shim = mmio_base; + sst->addr.sram0_base = CNL_ADSP_SRAM0_BASE; + sst->addr.sram1_base = CNL_ADSP_SRAM1_BASE; +- sst->addr.w0_stat_sz = CNL_ADSP_W0_STAT_SZ; +- sst->addr.w0_up_sz = CNL_ADSP_W0_UP_SZ; + + sst_dsp_mailbox_init(sst, (CNL_ADSP_SRAM0_BASE + CNL_ADSP_W0_STAT_SZ), + CNL_ADSP_W0_UP_SZ, CNL_ADSP_SRAM1_BASE, +diff --git a/sound/soc/intel/skylake/skl-debug.c b/sound/soc/intel/skylake/skl-debug.c +index 3466675f2678..c9c6d40f7d1d 100644 +--- a/sound/soc/intel/skylake/skl-debug.c ++++ b/sound/soc/intel/skylake/skl-debug.c +@@ -173,7 +173,7 @@ static ssize_t fw_softreg_read(struct file *file, char __user *user_buf, + { + struct skl_debug *d = file->private_data; + struct sst_dsp *sst = d->skl->dsp; +- size_t w0_stat_sz = sst->addr.w0_stat_sz; ++ size_t w0_stat_sz = SKL_FW_REGS_SIZE; + void __iomem *in_base = sst->mailbox.in_base; + void __iomem *fw_reg_addr; + unsigned int offset; +diff --git a/sound/soc/intel/skylake/skl-sst-dsp.h b/sound/soc/intel/skylake/skl-sst-dsp.h +index eaf87dddbb17..9d1cb1a64411 100644 +--- a/sound/soc/intel/skylake/skl-sst-dsp.h ++++ b/sound/soc/intel/skylake/skl-sst-dsp.h +@@ -63,6 +63,7 @@ struct skl_dev; + + #define SKL_ADSP_W1_SZ 0x1000 + ++#define SKL_FW_REGS_SIZE PAGE_SIZE + #define SKL_FW_STS_MASK 0xf + + #define SKL_FW_INIT 0x1 +diff --git a/sound/soc/intel/skylake/skl-sst.c b/sound/soc/intel/skylake/skl-sst.c +index 7faf627e2f56..26a4688ff938 100644 +--- a/sound/soc/intel/skylake/skl-sst.c ++++ b/sound/soc/intel/skylake/skl-sst.c +@@ -535,8 +535,6 @@ int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, + sst->addr.shim = mmio_base; + sst->addr.sram0_base = SKL_ADSP_SRAM0_BASE; + sst->addr.sram1_base = SKL_ADSP_SRAM1_BASE; +- sst->addr.w0_stat_sz = SKL_ADSP_W0_STAT_SZ; +- sst->addr.w0_up_sz = SKL_ADSP_W0_UP_SZ; + + sst_dsp_mailbox_init(sst, (SKL_ADSP_SRAM0_BASE + SKL_ADSP_W0_STAT_SZ), + SKL_ADSP_W0_UP_SZ, SKL_ADSP_SRAM1_BASE, SKL_ADSP_W1_SZ); +-- +2.17.1 + diff --git a/patches/0023-SEP-Bug-fix-to-prevent-Vtune-crash-with-event.sep-socwatch b/patches/0023-SEP-Bug-fix-to-prevent-Vtune-crash-with-event.sep-socwatch new file mode 100644 index 0000000000..2e2c70150b --- /dev/null +++ b/patches/0023-SEP-Bug-fix-to-prevent-Vtune-crash-with-event.sep-socwatch @@ -0,0 +1,58 @@ +From 0aae3c150a89e68a5d5cdb582eb95baa4adc329a Mon Sep 17 00:00:00 2001 +From: Manisha Chinthapally +Date: Fri, 15 Feb 2019 14:00:28 -0800 +Subject: [PATCH 23/27] SEP Bug fix to prevent Vtune crash with event + multiplexing + +SEP doesn't support Multiplexing of events for ACRN. +Vtune/SEP crashes when MUX events are provided, +Fixed the issue by exiting gracefully instead of crashing. + +Tracked-on: PKT-1745 +Signed-off-by: Lim, Min Yeol +Signed-off-by: Manisha Chinthapally +--- + drivers/platform/x86/sepdk/include/lwpmudrv_struct.h | 5 ++++- + drivers/platform/x86/sepdk/sep/lwpmudrv.c | 3 +++ + 2 files changed, 7 insertions(+), 1 deletion(-) + +diff --git a/drivers/platform/x86/sepdk/include/lwpmudrv_struct.h b/drivers/platform/x86/sepdk/include/lwpmudrv_struct.h +index 629750152fdb..6afb2ac8839d 100644 +--- a/drivers/platform/x86/sepdk/include/lwpmudrv_struct.h ++++ b/drivers/platform/x86/sepdk/include/lwpmudrv_struct.h +@@ -1475,7 +1475,8 @@ struct DRV_SETUP_INFO_NODE_S { + U64 matrix_inaccessible : 1; + U64 page_table_isolation : 2; + U64 pebs_ignored_by_pti : 1; +- U64 reserved1 : 47; ++ U64 core_event_mux_unavailable : 1; ++ U64 reserved1 : 46; + } s1; + } u1; + U64 reserved2; +@@ -1495,6 +1496,8 @@ struct DRV_SETUP_INFO_NODE_S { + ((info)->u1.s1.page_table_isolation) + #define DRV_SETUP_INFO_pebs_ignored_by_pti(info) \ + ((info)->u1.s1.pebs_ignored_by_pti) ++#define DRV_SETUP_INFO_core_event_mux_unavailable(info) \ ++ ((info)->u1.s1.core_event_mux_unavailable) + + #define DRV_SETUP_INFO_PTI_DISABLED 0 + #define DRV_SETUP_INFO_PTI_KPTI 1 +diff --git a/drivers/platform/x86/sepdk/sep/lwpmudrv.c b/drivers/platform/x86/sepdk/sep/lwpmudrv.c +index 742a1ea87cb5..8a1bab3a5453 100644 +--- a/drivers/platform/x86/sepdk/sep/lwpmudrv.c ++++ b/drivers/platform/x86/sepdk/sep/lwpmudrv.c +@@ -6182,6 +6182,9 @@ static OS_STATUS lwpmudrv_Get_Drv_Setup_Info(IOCTL_ARGS args) + DRV_SETUP_INFO_PTI_KPTI; + } + #endif ++#if defined(DRV_SEP_ACRN_ON) ++ DRV_SETUP_INFO_core_event_mux_unavailable(&req_drv_setup_info) = 1; ++#endif + + SEP_DRV_LOG_TRACE("DRV_SETUP_INFO nmi_mode %d.", + DRV_SETUP_INFO_nmi_mode(&req_drv_setup_info)); +-- +2.17.1 + diff --git a/patches/0023-Shared_buf-added-hypercall-for-shared_buf-setup.acrn b/patches/0023-Shared_buf-added-hypercall-for-shared_buf-setup.acrn new file mode 100644 index 0000000000..53bc710eb9 --- /dev/null +++ b/patches/0023-Shared_buf-added-hypercall-for-shared_buf-setup.acrn @@ -0,0 +1,111 @@ +From 2f718381fd4cb4823f82710d0c6a8ffa572826c1 Mon Sep 17 00:00:00 2001 +From: "Li, Fei1" +Date: Fri, 31 Aug 2018 10:58:57 +0800 +Subject: [PATCH 023/150] Shared_buf: added hypercall for shared_buf setup + +Change-Id: I24ad2f767c7d633ad41d787c7d1a052b0fb75fb4 +Tracked-On: https://rtc.intel.com/ccm0001001/resource/itemName/com.ibm.team.workitem.WorkItem/216912 +Signed-off-by: Li, Fei1 +--- + drivers/acrn/sbuf.c | 21 +++++++++++++++++++++ + drivers/acrn/sbuf.h | 1 + + drivers/vhm/vhm_hypercall.c | 5 +++++ + include/linux/vhm/acrn_hv_defs.h | 2 +- + include/linux/vhm/vhm_hypercall.h | 1 + + 5 files changed, 29 insertions(+), 1 deletion(-) + +diff --git a/drivers/acrn/sbuf.c b/drivers/acrn/sbuf.c +index dcf203222c5b..8849ce28a06c 100644 +--- a/drivers/acrn/sbuf.c ++++ b/drivers/acrn/sbuf.c +@@ -57,6 +57,8 @@ + + #include + #include ++#include ++#include + #include "sbuf.h" + + static inline bool sbuf_is_empty(shared_buf_t *sbuf) +@@ -164,6 +166,25 @@ int sbuf_get(shared_buf_t *sbuf, uint8_t *data) + } + EXPORT_SYMBOL(sbuf_get); + ++int sbuf_share_setup(uint32_t pcpu_id, uint32_t sbuf_id, shared_buf_t *sbuf) ++{ ++ struct sbuf_setup_param ssp; ++ ++ ssp.pcpu_id = pcpu_id; ++ ssp.sbuf_id = sbuf_id; ++ ++ if (!sbuf) { ++ ssp.gpa = 0; ++ } else { ++ BUG_ON(!virt_addr_valid(sbuf)); ++ ssp.gpa = virt_to_phys(sbuf); ++ } ++ pr_info("setup phys add = 0x%llx\n", ssp.gpa); ++ ++ return hcall_setup_sbuf(virt_to_phys(&ssp)); ++} ++EXPORT_SYMBOL(sbuf_share_setup); ++ + shared_buf_t *sbuf_construct(uint32_t ele_num, uint32_t ele_size, + uint64_t paddr) + { +diff --git a/drivers/acrn/sbuf.h b/drivers/acrn/sbuf.h +index 7f3694920232..73608c35046c 100644 +--- a/drivers/acrn/sbuf.h ++++ b/drivers/acrn/sbuf.h +@@ -114,6 +114,7 @@ static inline void sbuf_add_flags(shared_buf_t *sbuf, uint64_t flags) + shared_buf_t *sbuf_allocate(uint32_t ele_num, uint32_t ele_size); + void sbuf_free(shared_buf_t *sbuf); + int sbuf_get(shared_buf_t *sbuf, uint8_t *data); ++int sbuf_share_setup(uint32_t pcpu_id, uint32_t sbuf_id, shared_buf_t *sbuf); + shared_buf_t *sbuf_construct(uint32_t ele_num, uint32_t ele_size, uint64_t gpa); + + #endif /* SHARED_BUF_H */ +diff --git a/drivers/vhm/vhm_hypercall.c b/drivers/vhm/vhm_hypercall.c +index 741b8bd837cc..d0da22f2a88b 100644 +--- a/drivers/vhm/vhm_hypercall.c ++++ b/drivers/vhm/vhm_hypercall.c +@@ -77,6 +77,11 @@ inline long hcall_destroy_vm(unsigned long vmid) + return acrn_hypercall1(HC_DESTROY_VM, vmid); + } + ++inline long hcall_setup_sbuf(unsigned long sbuf_head) ++{ ++ return acrn_hypercall1(HC_SETUP_SBUF, sbuf_head); ++} ++ + inline long hcall_set_memmap(unsigned long vmid, unsigned long memmap) + { + return acrn_hypercall2(HC_VM_SET_MEMMAP, vmid, memmap); +diff --git a/include/linux/vhm/acrn_hv_defs.h b/include/linux/vhm/acrn_hv_defs.h +index bb57fb4f5cdd..688d69b6f5b0 100644 +--- a/include/linux/vhm/acrn_hv_defs.h ++++ b/include/linux/vhm/acrn_hv_defs.h +@@ -104,7 +104,7 @@ + + /* DEBUG */ + #define HC_ID_DBG_BASE 0x60UL +-#define HC_SBUF_SETUP _HC_ID(HC_ID, HC_ID_DBG_BASE + 0x00) ++#define HC_SETUP_SBUF _HC_ID(HC_ID, HC_ID_DBG_BASE + 0x00) + + #define ACRN_DOM0_VMID (0UL) + #define ACRN_INVALID_VMID (-1) +diff --git a/include/linux/vhm/vhm_hypercall.h b/include/linux/vhm/vhm_hypercall.h +index f4a5793f3ef7..e56a16c5518f 100644 +--- a/include/linux/vhm/vhm_hypercall.h ++++ b/include/linux/vhm/vhm_hypercall.h +@@ -143,6 +143,7 @@ inline long hcall_start_vm(unsigned long vmid); + inline long hcall_pause_vm(unsigned long vmid); + inline long hcall_destroy_vm(unsigned long vmid); + inline long hcall_query_vm_state(unsigned long vmid); ++inline long hcall_setup_sbuf(unsigned long sbuf_head); + inline long hcall_set_memmap(unsigned long vmid, + unsigned long memmap); + inline long hcall_set_ioreq_buffer(unsigned long vmid, +-- +2.17.1 + diff --git a/patches/0023-drm-i915-Expand-subslice-mask.drm b/patches/0023-drm-i915-Expand-subslice-mask.drm new file mode 100644 index 0000000000..084e7047db --- /dev/null +++ b/patches/0023-drm-i915-Expand-subslice-mask.drm @@ -0,0 +1,156 @@ +From 38d61eff509907d7eedc36025acbc211eb1b4076 Mon Sep 17 00:00:00 2001 +From: Stuart Summers +Date: Fri, 23 Aug 2019 09:03:07 -0700 +Subject: [PATCH 023/690] drm/i915: Expand subslice mask + +Currently, the subslice_mask runtime parameter is stored as an +array of subslices per slice. Expand the subslice mask array to +better match what is presented to userspace through the +I915_QUERY_TOPOLOGY_INFO ioctl. The index into this array is +then calculated: + slice * subslice stride + subslice index / 8 + +v2: Fix 32-bit build +v3: Use new helper function in SSEU workaround warning message +v4: Use GEM_BUG_ON to force developers to use valid SSEU configurations + per platform (Chris) + +Signed-off-by: Stuart Summers +Reviewed-by: Chris Wilson +Signed-off-by: Chris Wilson +Link: https://patchwork.freedesktop.org/patch/msgid/20190823160307.180813-12-stuart.summers@intel.com +--- + drivers/gpu/drm/i915/gt/intel_sseu.c | 16 +++++++++++++++- + drivers/gpu/drm/i915/gt/intel_sseu.h | 4 +++- + drivers/gpu/drm/i915/gt/intel_workarounds.c | 5 ++--- + drivers/gpu/drm/i915/i915_debugfs.c | 5 ++++- + drivers/gpu/drm/i915/intel_device_info.c | 8 ++++---- + 5 files changed, 28 insertions(+), 10 deletions(-) + +diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c +index 1505042d7b5d..74f793423231 100644 +--- a/drivers/gpu/drm/i915/gt/intel_sseu.c ++++ b/drivers/gpu/drm/i915/gt/intel_sseu.c +@@ -32,6 +32,20 @@ intel_sseu_subslice_total(const struct sseu_dev_info *sseu) + return total; + } + ++u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice) ++{ ++ int i, offset = slice * sseu->ss_stride; ++ u32 mask = 0; ++ ++ GEM_BUG_ON(slice >= sseu->max_slices); ++ ++ for (i = 0; i < sseu->ss_stride; i++) ++ mask |= (u32)sseu->subslice_mask[offset + i] << ++ i * BITS_PER_BYTE; ++ ++ return mask; ++} ++ + void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice, + u32 ss_mask) + { +@@ -43,7 +57,7 @@ void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice, + unsigned int + intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice) + { +- return hweight8(sseu->subslice_mask[slice]); ++ return hweight32(intel_sseu_get_subslices(sseu, slice)); + } + + u32 intel_sseu_make_rpcs(struct drm_i915_private *i915, +diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h +index 7703d75f2da3..4070f6ff1db6 100644 +--- a/drivers/gpu/drm/i915/gt/intel_sseu.h ++++ b/drivers/gpu/drm/i915/gt/intel_sseu.h +@@ -23,7 +23,7 @@ struct drm_i915_private; + + struct sseu_dev_info { + u8 slice_mask; +- u8 subslice_mask[GEN_MAX_SLICES]; ++ u8 subslice_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICE_STRIDE]; + u16 eu_total; + u8 eu_per_subslice; + u8 min_eu_in_pool; +@@ -94,6 +94,8 @@ intel_sseu_subslice_total(const struct sseu_dev_info *sseu); + unsigned int + intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice); + ++u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice); ++ + void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice, + u32 ss_mask); + +diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c +index 5f6ec2fd29a0..d1b68c868ecd 100644 +--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c ++++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c +@@ -796,11 +796,10 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal) + } + + slice = fls(sseu->slice_mask) - 1; +- GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask)); +- subslice = fls(l3_en & sseu->subslice_mask[slice]); ++ subslice = fls(l3_en & intel_sseu_get_subslices(sseu, slice)); + if (!subslice) { + DRM_WARN("No common index found between subslice mask %x and L3 bank mask %x!\n", +- sseu->subslice_mask[slice], l3_en); ++ intel_sseu_get_subslices(sseu, slice), l3_en); + subslice = fls(l3_en); + WARN_ON(!subslice); + } +diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c +index 6e8b40299939..8c1d70425424 100644 +--- a/drivers/gpu/drm/i915/i915_debugfs.c ++++ b/drivers/gpu/drm/i915/i915_debugfs.c +@@ -3864,13 +3864,16 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, + + for (ss = 0; ss < info->sseu.max_subslices; ss++) { + unsigned int eu_cnt; ++ u8 ss_idx = s * info->sseu.ss_stride + ++ ss / BITS_PER_BYTE; + + if (IS_GEN9_LP(dev_priv)) { + if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss)))) + /* skip disabled subslice */ + continue; + +- sseu->subslice_mask[s] |= BIT(ss); ++ sseu->subslice_mask[ss_idx] |= ++ BIT(ss % BITS_PER_BYTE); + } + + eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] & +diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c +index c20f74ee5f22..d9b5baaef5d0 100644 +--- a/drivers/gpu/drm/i915/intel_device_info.c ++++ b/drivers/gpu/drm/i915/intel_device_info.c +@@ -93,9 +93,9 @@ static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p) + hweight8(sseu->slice_mask), sseu->slice_mask); + drm_printf(p, "subslice total: %u\n", intel_sseu_subslice_total(sseu)); + for (s = 0; s < sseu->max_slices; s++) { +- drm_printf(p, "slice%d: %u subslices, mask=%04x\n", ++ drm_printf(p, "slice%d: %u subslices, mask=%08x\n", + s, intel_sseu_subslices_per_slice(sseu, s), +- sseu->subslice_mask[s]); ++ intel_sseu_get_subslices(sseu, s)); + } + drm_printf(p, "EU total: %u\n", sseu->eu_total); + drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice); +@@ -159,9 +159,9 @@ void intel_device_info_dump_topology(const struct sseu_dev_info *sseu, + } + + for (s = 0; s < sseu->max_slices; s++) { +- drm_printf(p, "slice%d: %u subslice(s) (0x%hhx):\n", ++ drm_printf(p, "slice%d: %u subslice(s) (0x%08x):\n", + s, intel_sseu_subslices_per_slice(sseu, s), +- sseu->subslice_mask[s]); ++ intel_sseu_get_subslices(sseu, s)); + + for (ss = 0; ss < sseu->max_subslices; ss++) { + u16 enabled_eus = sseu_get_eus(sseu, s, ss); +-- +2.17.1 + diff --git a/patches/0023-mei-spd-connect-to-the-rpmb-subsystem.security b/patches/0023-mei-spd-connect-to-the-rpmb-subsystem.security new file mode 100644 index 0000000000..ea496b59a9 --- /dev/null +++ b/patches/0023-mei-spd-connect-to-the-rpmb-subsystem.security @@ -0,0 +1,421 @@ +From 2dddee999f8a01b137402c559d7ed0555ff2f6b2 Mon Sep 17 00:00:00 2001 +From: Alexander Usyskin +Date: Mon, 9 Feb 2015 17:13:20 +0200 +Subject: [PATCH 23/65] mei: spd: connect to the rpmb subsystem + +Connect SPD to RPMB subsystem and implement RPMB storage commands. + +V9: add SPDX identifiers. + +Change-Id: I21c9f4526ae5906779b03a488c289c037a18d6e2 +Signed-off-by: Alexander Usyskin +Signed-off-by: Tomas Winkler +--- + drivers/misc/mei/spd/Kconfig | 2 +- + drivers/misc/mei/spd/Makefile | 1 + + drivers/misc/mei/spd/cmd.c | 67 +++++++++++- + drivers/misc/mei/spd/main.c | 2 + + drivers/misc/mei/spd/rpmb.c | 194 ++++++++++++++++++++++++++++++++++ + drivers/misc/mei/spd/spd.h | 11 ++ + 6 files changed, 273 insertions(+), 4 deletions(-) + create mode 100644 drivers/misc/mei/spd/rpmb.c + +diff --git a/drivers/misc/mei/spd/Kconfig b/drivers/misc/mei/spd/Kconfig +index 17aa461033c8..e38d8bb53ff3 100644 +--- a/drivers/misc/mei/spd/Kconfig ++++ b/drivers/misc/mei/spd/Kconfig +@@ -4,7 +4,7 @@ + # + config INTEL_MEI_SPD + tristate "Intel MEI Host Storage Proxy Driver" +- depends on INTEL_MEI && BLOCK ++ depends on INTEL_MEI && BLOCK && RPMB + help + A driver for the host storage proxy ME client + The driver enables ME FW to store data on a storage devices +diff --git a/drivers/misc/mei/spd/Makefile b/drivers/misc/mei/spd/Makefile +index 353d284eaf01..1d156b6b9e2e 100644 +--- a/drivers/misc/mei/spd/Makefile ++++ b/drivers/misc/mei/spd/Makefile +@@ -7,6 +7,7 @@ obj-$(CONFIG_INTEL_MEI_SPD) += mei_spd.o + mei_spd-objs := main.o + mei_spd-objs += cmd.o + mei_spd-objs += gpp.o ++mei_spd-objs += rpmb.o + mei_spd-$(CONFIG_DEBUG_FS) += debugfs.o + + ccflags-y += -D__CHECK_ENDIAN__ +diff --git a/drivers/misc/mei/spd/cmd.c b/drivers/misc/mei/spd/cmd.c +index 931d99f99c7e..ea26204f42c0 100644 +--- a/drivers/misc/mei/spd/cmd.c ++++ b/drivers/misc/mei/spd/cmd.c +@@ -12,6 +12,9 @@ + #define spd_cmd_size(_cmd) \ + (sizeof(struct spd_cmd_hdr) + \ + sizeof(struct spd_cmd_##_cmd)) ++#define spd_cmd_rpmb_size(_cmd) \ ++ (spd_cmd_size(_cmd) + SPD_CLIENT_RPMB_DATA_MAX_SIZE) ++ + #define to_spd_hdr(_buf) (struct spd_cmd_hdr *)(_buf) + #define to_spd_cmd(_cmd, _buf) \ + (struct spd_cmd_##_cmd *)((_buf) + sizeof(struct spd_cmd_hdr)) +@@ -229,7 +232,7 @@ int mei_spd_cmd_storage_status_req(struct mei_spd *spd) + + req = to_spd_cmd(storage_status_req, spd->buf); + req->gpp_on = mei_spd_gpp_is_open(spd); +- req->rpmb_on = 0; ++ req->rpmb_on = mei_spd_rpmb_is_open(spd); + + ret = mei_cldev_send(spd->cldev, spd->buf, req_len); + if (ret != req_len) { +@@ -306,6 +309,60 @@ static int mei_spd_cmd_gpp_read(struct mei_spd *spd, struct spd_cmd *cmd, + return SPD_STATUS_SUCCESS; + } + ++static int mei_spd_cmd_rpmb_read(struct mei_spd *spd, ++ struct spd_cmd *cmd, ++ ssize_t out_buf_sz) ++{ ++ u8 *frame = cmd->rpmb_read.rpmb_frame; ++ ++ if (out_buf_sz != spd_cmd_rpmb_size(rpmb_read)) { ++ spd_err(spd, "Wrong request size\n"); ++ return SPD_STATUS_INVALID_COMMAND; ++ } ++ ++ if (mei_spd_rpmb_cmd_req(spd, RPMB_READ_DATA, frame)) ++ return SPD_STATUS_GENERAL_FAILURE; ++ ++ spd_dbg(spd, "read RPMB frame performed\n"); ++ return SPD_STATUS_SUCCESS; ++} ++ ++static int mei_spd_cmd_rpmb_write(struct mei_spd *spd, ++ struct spd_cmd *cmd, ++ ssize_t out_buf_sz) ++{ ++ u8 *frame = cmd->rpmb_write.rpmb_frame; ++ ++ if (out_buf_sz != spd_cmd_rpmb_size(rpmb_write)) { ++ spd_err(spd, "Wrong request size\n"); ++ return SPD_STATUS_INVALID_COMMAND; ++ } ++ ++ if (mei_spd_rpmb_cmd_req(spd, RPMB_WRITE_DATA, frame)) ++ return SPD_STATUS_GENERAL_FAILURE; ++ ++ spd_dbg(spd, "write RPMB frame performed\n"); ++ return SPD_STATUS_SUCCESS; ++} ++ ++static int mei_spd_cmd_rpmb_get_counter(struct mei_spd *spd, ++ struct spd_cmd *cmd, ++ ssize_t out_buf_sz) ++{ ++ u8 *frame = cmd->rpmb_get_counter.rpmb_frame; ++ ++ if (out_buf_sz != spd_cmd_rpmb_size(rpmb_get_counter)) { ++ spd_err(spd, "Wrong request size\n"); ++ return SPD_STATUS_INVALID_COMMAND; ++ } ++ ++ if (mei_spd_rpmb_cmd_req(spd, RPMB_WRITE_DATA, frame)) ++ return SPD_STATUS_GENERAL_FAILURE; ++ ++ spd_dbg(spd, "get RPMB counter performed\n"); ++ return SPD_STATUS_SUCCESS; ++} ++ + static int mei_spd_cmd_response(struct mei_spd *spd, ssize_t out_buf_sz) + { + struct spd_cmd *cmd = (struct spd_cmd *)spd->buf; +@@ -324,6 +381,7 @@ static int mei_spd_cmd_response(struct mei_spd *spd, ssize_t out_buf_sz) + if (ret) + break; + mutex_unlock(&spd->lock); ++ mei_spd_rpmb_init(spd); + mei_spd_gpp_init(spd); + mutex_lock(&spd->lock); + break; +@@ -365,10 +423,13 @@ static int mei_spd_cmd_request(struct mei_spd *spd, ssize_t out_buf_sz) + + switch (spd_cmd) { + case SPD_RPMB_WRITE_CMD: ++ ret = mei_spd_cmd_rpmb_write(spd, cmd, out_buf_sz); ++ break; + case SPD_RPMB_READ_CMD: ++ ret = mei_spd_cmd_rpmb_read(spd, cmd, out_buf_sz); ++ break; + case SPD_RPMB_GET_COUNTER_CMD: +- spd_err(spd, "Command %d is not supported\n", spd_cmd); +- ret = SPD_STATUS_NOT_SUPPORTED; ++ ret = mei_spd_cmd_rpmb_get_counter(spd, cmd, out_buf_sz); + break; + case SPD_GPP_WRITE_CMD: + ret = mei_spd_cmd_gpp_write(spd, cmd, out_buf_sz); +diff --git a/drivers/misc/mei/spd/main.c b/drivers/misc/mei/spd/main.c +index ff7ee23df494..6b379171a136 100644 +--- a/drivers/misc/mei/spd/main.c ++++ b/drivers/misc/mei/spd/main.c +@@ -53,6 +53,7 @@ static int mei_spd_probe(struct mei_cl_device *cldev, + + spd_dbg(spd, "protocol version %d\n", ver); + mei_spd_gpp_prepare(spd); ++ mei_spd_rpmb_prepare(spd); + mutex_lock(&spd->lock); + ret = mei_spd_cmd_init_req(spd); + mutex_unlock(&spd->lock); +@@ -80,6 +81,7 @@ static int mei_spd_remove(struct mei_cl_device *cldev) + if (spd->state == MEI_SPD_STATE_RUNNING) { + spd->state = MEI_SPD_STATE_STOPPING; + mei_spd_gpp_exit(spd); ++ mei_spd_rpmb_exit(spd); + mutex_lock(&spd->lock); + mei_spd_cmd_storage_status_req(spd); + mutex_unlock(&spd->lock); +diff --git a/drivers/misc/mei/spd/rpmb.c b/drivers/misc/mei/spd/rpmb.c +new file mode 100644 +index 000000000000..ae5d5ac517ad +--- /dev/null ++++ b/drivers/misc/mei/spd/rpmb.c +@@ -0,0 +1,194 @@ ++// SPDX-License-Identifier: OR GPL-2.0 ++/* ++ * Copyright (c) 2015 - 2019, Intel Corporation. ++ */ ++ ++#include "cmd.h" ++#include "spd.h" ++#include ++ ++static int mei_spd_rpmb_start(struct mei_spd *spd, struct rpmb_dev *rdev) ++{ ++ if (spd->rdev == rdev) ++ return 0; ++ ++ if (spd->rdev) { ++ spd_warn(spd, "rpmb device already registered\n"); ++ return -EEXIST; ++ } ++ ++ spd->rdev = rpmb_dev_get(rdev); ++ spd_dbg(spd, "rpmb partition created\n"); ++ return 0; ++} ++ ++static int mei_spd_rpmb_stop(struct mei_spd *spd, struct rpmb_dev *rdev) ++{ ++ if (!spd->rdev) { ++ spd_dbg(spd, "Already stopped\n"); ++ return -EPROTO; ++ } ++ ++ if (rdev && spd->rdev != rdev) { ++ spd_dbg(spd, "Wrong RPMB on stop\n"); ++ return -EINVAL; ++ } ++ ++ rpmb_dev_put(spd->rdev); ++ spd->rdev = NULL; ++ ++ spd_dbg(spd, "rpmb partition removed\n"); ++ return 0; ++} ++ ++static int mei_spd_rpmb_match(struct mei_spd *spd, struct rpmb_dev *rdev) ++{ ++ if (spd->dev_id_sz && rdev->ops->dev_id) { ++ if (rdev->ops->dev_id_len != spd->dev_id_sz || ++ memcmp(rdev->ops->dev_id, spd->dev_id, ++ rdev->ops->dev_id_len)) { ++ spd_dbg(spd, "ignore request for another rpmb\n"); ++ /* return 0; FW sends garbage now, ignore it */ ++ } ++ } ++ ++ switch (rdev->ops->type) { ++ case RPMB_TYPE_EMMC: ++ if (spd->dev_type != SPD_TYPE_EMMC) ++ return 0; ++ break; ++ case RPMB_TYPE_UFS: ++ if (spd->dev_type != SPD_TYPE_UFS) ++ return 0; ++ break; ++ default: ++ return 0; ++ } ++ ++ return 1; ++} ++ ++static int rpmb_add_device(struct device *dev, struct class_interface *intf) ++{ ++ struct mei_spd *spd = ++ container_of(intf, struct mei_spd, rpmb_interface); ++ struct rpmb_dev *rdev = to_rpmb_dev(dev); ++ ++ if (!mei_spd_rpmb_match(spd, rdev)) ++ return 0; ++ ++ mutex_lock(&spd->lock); ++ if (mei_spd_rpmb_start(spd, rdev)) { ++ mutex_unlock(&spd->lock); ++ return 0; ++ } ++ ++ schedule_work(&spd->status_send_w); ++ mutex_unlock(&spd->lock); ++ ++ return 0; ++} ++ ++static void rpmb_remove_device(struct device *dev, struct class_interface *intf) ++{ ++ struct mei_spd *spd = ++ container_of(intf, struct mei_spd, rpmb_interface); ++ struct rpmb_dev *rdev = to_rpmb_dev(dev); ++ ++ if (!mei_spd_rpmb_match(spd, rdev)) ++ return; ++ ++ mutex_lock(&spd->lock); ++ if (mei_spd_rpmb_stop(spd, rdev)) { ++ mutex_unlock(&spd->lock); ++ return; ++ } ++ ++ if (spd->state != MEI_SPD_STATE_STOPPING) ++ schedule_work(&spd->status_send_w); ++ mutex_unlock(&spd->lock); ++} ++ ++void mei_spd_rpmb_prepare(struct mei_spd *spd) ++{ ++ spd->rpmb_interface.add_dev = rpmb_add_device; ++ spd->rpmb_interface.remove_dev = rpmb_remove_device; ++ spd->rpmb_interface.class = &rpmb_class; ++} ++ ++/** ++ * mei_spd_rpmb_init() - init RPMB connection ++ * @spd: device ++ * Locking: spd->lock should not be held ++ * Returns: 0 if initialized successfully, <0 otherwise ++ */ ++int mei_spd_rpmb_init(struct mei_spd *spd) ++{ ++ int ret; ++ ++ ret = class_interface_register(&spd->rpmb_interface); ++ if (ret) ++ spd_err(spd, "Can't register interface\n"); ++ return ret; ++} ++ ++/** ++ * mei_spd_rpmb_exit() - clean RPMB connection ++ * @spd: device ++ * Locking: spd->lock should not be held ++ */ ++void mei_spd_rpmb_exit(struct mei_spd *spd) ++{ ++ class_interface_unregister(&spd->rpmb_interface); ++} ++ ++int mei_spd_rpmb_cmd_req(struct mei_spd *spd, u16 req, void *buf) ++{ ++ struct rpmb_cmd cmd[3]; ++ struct rpmb_frame_jdec *frame_res = NULL; ++ u32 flags; ++ unsigned int i; ++ int ret; ++ ++ if (!spd->rdev) { ++ spd_err(spd, "RPMB not ready\n"); ++ return -ENODEV; ++ } ++ ++ i = 0; ++ flags = RPMB_F_WRITE; ++ if (req == RPMB_WRITE_DATA || req == RPMB_PROGRAM_KEY) ++ flags |= RPMB_F_REL_WRITE; ++ cmd[i].flags = flags; ++ cmd[i].nframes = 1; ++ cmd[i].frames = buf; ++ i++; ++ ++ if (req == RPMB_WRITE_DATA || req == RPMB_PROGRAM_KEY) { ++ frame_res = kzalloc(sizeof(*frame_res), GFP_KERNEL); ++ if (!frame_res) ++ return -ENOMEM; ++ frame_res->req_resp = cpu_to_be16(RPMB_RESULT_READ); ++ cmd[i].flags = RPMB_F_WRITE; ++ cmd[i].nframes = 1; ++ cmd[i].frames = frame_res; ++ i++; ++ } ++ ++ cmd[i].flags = 0; ++ cmd[i].nframes = 1; ++ cmd[i].frames = buf; ++ i++; ++ ++ ret = rpmb_cmd_seq(spd->rdev, cmd, i); ++ if (ret) ++ spd_err(spd, "RPMB req failed ret = %d\n", ret); ++ ++ kfree(frame_res); ++ return ret; ++} ++ ++bool mei_spd_rpmb_is_open(struct mei_spd *spd) ++{ ++ return !!spd->rdev; ++} +diff --git a/drivers/misc/mei/spd/spd.h b/drivers/misc/mei/spd/spd.h +index c6d4a3359b3c..4700a27b2f28 100644 +--- a/drivers/misc/mei/spd/spd.h ++++ b/drivers/misc/mei/spd/spd.h +@@ -7,6 +7,7 @@ + + #include + #include ++#include + + enum mei_spd_state { + MEI_SPD_STATE_INIT, +@@ -26,6 +27,8 @@ enum mei_spd_state { + * @dev_type: storage device type + * @dev_id_sz: device id size + * @dev_id: device id string ++ * @rdev: RPMB device ++ * @rpmb_interface: gpp class interface for discovery + * @lock: mutex to sync request processing + * @state: driver state + * @status_send_w: workitem for sending status to the FW +@@ -41,6 +44,8 @@ struct mei_spd { + u32 dev_type; + u32 dev_id_sz; + u8 *dev_id; ++ struct rpmb_dev *rdev; ++ struct class_interface rpmb_interface; + struct mutex lock; /* mutex to sync request processing */ + enum mei_spd_state state; + struct work_struct status_send_w; +@@ -66,6 +71,12 @@ void mei_spd_gpp_exit(struct mei_spd *spd); + int mei_spd_gpp_read(struct mei_spd *spd, size_t off, u8 *data, size_t size); + int mei_spd_gpp_write(struct mei_spd *spd, size_t off, u8 *data, size_t size); + ++void mei_spd_rpmb_prepare(struct mei_spd *spd); ++bool mei_spd_rpmb_is_open(struct mei_spd *spd); ++int mei_spd_rpmb_init(struct mei_spd *spd); ++void mei_spd_rpmb_exit(struct mei_spd *spd); ++int mei_spd_rpmb_cmd_req(struct mei_spd *spd, u16 req_type, void *buf); ++ + #if IS_ENABLED(CONFIG_DEBUG_FS) + int mei_spd_dbgfs_register(struct mei_spd *spd, const char *name); + void mei_spd_dbgfs_deregister(struct mei_spd *spd); +-- +2.17.1 + diff --git a/patches/0023-net-stmmac-add-support-for-msi-pci-for-stmmac.connectivity b/patches/0023-net-stmmac-add-support-for-msi-pci-for-stmmac.connectivity new file mode 100644 index 0000000000..3bb47c3e36 --- /dev/null +++ b/patches/0023-net-stmmac-add-support-for-msi-pci-for-stmmac.connectivity @@ -0,0 +1,207 @@ +From b0e813745158f21f50ecb61cb59e02b37560410c Mon Sep 17 00:00:00 2001 +From: Ong Boon Leong +Date: Sat, 27 Jul 2019 06:23:43 +0800 +Subject: [PATCH 023/108] net: stmmac: add support for msi pci for stmmac_pci + +Introduce the capability to 1st try to setup multi-vector MSI. If fail, +try to setup single IRQ. To ensure MSI vector to IRQ is correctly mapped, +introduce a list of msi_xxx_vec entries in plat_stmmacenet_data so that +SoC specific platform data can define its own MSI vector to irq mapping. + +As an example, the mapping for EHL & TGL is implemented. + +The initial version of this patch was co-authored-by Wong, Vincent Por Yin. + +Signed-off-by: Wong, Vincent Por Yin +Signed-off-by: Ong Boon Leong +--- + drivers/net/ethernet/stmicro/stmmac/common.h | 3 + + .../net/ethernet/stmicro/stmmac/stmmac_pci.c | 118 +++++++++++++++++- + include/linux/stmmac.h | 8 ++ + 3 files changed, 124 insertions(+), 5 deletions(-) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h +index a65a5bf38359..a2ad992be4e2 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/common.h ++++ b/drivers/net/ethernet/stmicro/stmmac/common.h +@@ -240,6 +240,9 @@ struct stmmac_safety_stats { + #define DMA_HW_FEAT_ACTPHYIF 0x70000000 /* Active/selected PHY iface */ + #define DEFAULT_DMA_PBL 8 + ++/* MSI defines */ ++#define STMMAC_MSI_VEC_MAX 32 ++ + /* PCS status and mask defines */ + #define PCS_ANE_IRQ BIT(2) /* PCS Auto-Negotiation */ + #define PCS_LINK_IRQ BIT(1) /* PCS Link */ +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +index 02466a1b657d..c34514bfbf2d 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +@@ -244,6 +244,15 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, + plat->intel_adhoc_addr = 0x15; + } + ++ /* Setup MSI vector offset specific to Intel mGbE controller */ ++ plat->msi_phy_conv_vec = 30; ++ plat->msi_mac_vec = 29; ++ plat->msi_lpi_vec = 28; ++ plat->msi_sfty_ce_vec = 27; ++ plat->msi_sfty_ue_vec = 26; ++ plat->msi_rx_base_vec = 0; ++ plat->msi_tx_base_vec = 1; ++ + return 0; + } + +@@ -553,6 +562,85 @@ static const struct stmmac_pci_info snps_gmac5_pci_info = { + .setup = snps_gmac5_default_data, + }; + ++static int stmmac_config_single_msi(struct pci_dev *pdev, ++ struct plat_stmmacenet_data *plat, ++ struct stmmac_resources *res) ++{ ++ int ret; ++ ++ ret = pci_alloc_irq_vectors(pdev, 1, 1, ++ PCI_IRQ_LEGACY | PCI_IRQ_MSI); ++ if (ret < 0) { ++ dev_info(&pdev->dev, "%s: Single IRQ enablement failed\n", ++ __func__); ++ return ret; ++ } ++ ++ res->irq = pci_irq_vector(pdev, 0); ++ res->wol_irq = res->irq; ++ res->wol_irq = res->irq; ++ res->phy_conv_irq = res->irq; ++ plat->multi_msi_en = 0; ++ dev_info(&pdev->dev, "%s: Single IRQ enablement successful\n", ++ __func__); ++ ++ return 0; ++} ++ ++static int stmmac_config_multi_msi(struct pci_dev *pdev, ++ struct plat_stmmacenet_data *plat, ++ struct stmmac_resources *res) ++{ ++ int ret; ++ int i; ++ ++ ret = pci_alloc_irq_vectors(pdev, 1, STMMAC_MSI_VEC_MAX, ++ PCI_IRQ_MSI); ++ if (ret < 0) { ++ dev_info(&pdev->dev, "%s: multi MSI enablement failed\n", ++ __func__); ++ return ret; ++ } ++ ++ if (plat->msi_rx_base_vec >= STMMAC_MSI_VEC_MAX || ++ plat->msi_tx_base_vec >= STMMAC_MSI_VEC_MAX) { ++ dev_info(&pdev->dev, "%s: Invalid RX & TX vector defined\n", ++ __func__); ++ return -1; ++ } ++ ++ /* For RX MSI */ ++ for (i = 0; i < plat->rx_queues_to_use; i++) { ++ res->rx_irq[i] = pci_irq_vector(pdev, ++ plat->msi_rx_base_vec + i * 2); ++ } ++ ++ /* For TX MSI */ ++ for (i = 0; i < plat->tx_queues_to_use; i++) { ++ res->tx_irq[i] = pci_irq_vector(pdev, ++ plat->msi_tx_base_vec + i * 2); ++ } ++ ++ if (plat->msi_mac_vec < STMMAC_MSI_VEC_MAX) ++ res->irq = pci_irq_vector(pdev, plat->msi_mac_vec); ++ if (plat->msi_wol_vec < STMMAC_MSI_VEC_MAX) ++ res->wol_irq = pci_irq_vector(pdev, plat->msi_wol_vec); ++ if (plat->msi_lpi_vec < STMMAC_MSI_VEC_MAX) ++ res->lpi_irq = pci_irq_vector(pdev, plat->msi_lpi_vec); ++ if (plat->msi_phy_conv_vec < STMMAC_MSI_VEC_MAX) ++ res->phy_conv_irq = pci_irq_vector(pdev, ++ plat->msi_phy_conv_vec); ++ if (plat->msi_sfty_ce_vec < STMMAC_MSI_VEC_MAX) ++ res->sfty_ce_irq = pci_irq_vector(pdev, plat->msi_sfty_ce_vec); ++ if (plat->msi_sfty_ue_vec < STMMAC_MSI_VEC_MAX) ++ res->sfty_ue_irq = pci_irq_vector(pdev, plat->msi_sfty_ue_vec); ++ ++ plat->multi_msi_en = 1; ++ dev_info(&pdev->dev, "%s: multi MSI enablement successful\n", __func__); ++ ++ return 0; ++} ++ + /** + * stmmac_pci_probe + * +@@ -609,18 +697,38 @@ static int stmmac_pci_probe(struct pci_dev *pdev, + + pci_set_master(pdev); + ++ /* Initialize all MSI vectors to invalid so that it can be set ++ * according to platform data settings below. ++ * Note: MSI vector takes value from 0 upto 31 (STMMAC_MSI_VEC_MAX) ++ */ ++ plat->msi_mac_vec = STMMAC_MSI_VEC_MAX; ++ plat->msi_wol_vec = STMMAC_MSI_VEC_MAX; ++ plat->msi_lpi_vec = STMMAC_MSI_VEC_MAX; ++ plat->msi_phy_conv_vec = STMMAC_MSI_VEC_MAX; ++ plat->msi_sfty_ce_vec = STMMAC_MSI_VEC_MAX; ++ plat->msi_sfty_ue_vec = STMMAC_MSI_VEC_MAX; ++ plat->msi_rx_base_vec = STMMAC_MSI_VEC_MAX; ++ plat->msi_tx_base_vec = STMMAC_MSI_VEC_MAX; ++ + ret = info->setup(pdev, plat); + if (ret) + return ret; + +- pci_enable_msi(pdev); +- + memset(&res, 0, sizeof(res)); + res.addr = pcim_iomap_table(pdev)[i]; +- res.wol_irq = pdev->irq; +- res.irq = pdev->irq; +- res.phy_conv_irq = res.irq; + ++ ret = stmmac_config_multi_msi(pdev, plat, &res); ++ if (!ret) ++ goto msi_done; ++ ++ ret = stmmac_config_single_msi(pdev, plat, &res); ++ if (!ret) { ++ dev_err(&pdev->dev, "%s: ERROR: failed to enable IRQ\n", ++ __func__); ++ return ret; ++ } ++ ++msi_done: + return stmmac_dvr_probe(&pdev->dev, plat, &res); + } + +diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h +index eedbf48e763e..735116a0af02 100644 +--- a/include/linux/stmmac.h ++++ b/include/linux/stmmac.h +@@ -187,5 +187,13 @@ struct plat_stmmacenet_data { + bool en_tx_lpi_clockgating; + int has_xgmac; + bool multi_msi_en; ++ int msi_mac_vec; ++ int msi_wol_vec; ++ int msi_lpi_vec; ++ int msi_phy_conv_vec; ++ int msi_sfty_ce_vec; ++ int msi_sfty_ue_vec; ++ int msi_rx_base_vec; ++ int msi_tx_base_vec; + }; + #endif +-- +2.17.1 + diff --git a/patches/0023-serial-Preliminary-support-for-9-bit-transfer-mode-wi.lpss b/patches/0023-serial-Preliminary-support-for-9-bit-transfer-mode-wi.lpss new file mode 100644 index 0000000000..ff50451325 --- /dev/null +++ b/patches/0023-serial-Preliminary-support-for-9-bit-transfer-mode-wi.lpss @@ -0,0 +1,53 @@ +From db5c9167e205bc6520b5c756e1919ab001ec3bf9 Mon Sep 17 00:00:00 2001 +From: Heikki Krogerus +Date: Fri, 6 Sep 2019 13:42:45 +0300 +Subject: [PATCH 23/40] serial: Preliminary support for 9-bit transfer mode + with RS-485 + +Interim. This approach ties 9-bit transfer support to only +RS-485. That is definitely not ideal, so this can't be the +final solution. We should consider a dedicated IOCTL for +9-bit transfers only. Use this only as a "Proof-of-concept". + +This adds a flag to the struct serial_rs485 that can be used +for enabling 9-bit transfer mode (9-bit framing), a second +flag that the user can tell the driver the next transmit +address, and a third flag for receive address. The address is +expected to be in the first "padding" member of the data +structure. + +When the user supplies the address transmit, it is expected +to be send out with the 9th bit set to 1 immediately. After +that ioctl, the following transfers will therefore go to the +device with the adders. + +After the user has supplied the receive address, the driver +is expected to handle the matching of the address, and +forwarding only data with that address to the tty interface. + +Signed-off-by: Heikki Krogerus +Signed-off-by: Andy Shevchenko +--- + include/uapi/linux/serial.h | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/include/uapi/linux/serial.h b/include/uapi/linux/serial.h +index 93eb3c496ff1..8370e97d8e6e 100644 +--- a/include/uapi/linux/serial.h ++++ b/include/uapi/linux/serial.h +@@ -126,6 +126,12 @@ struct serial_rs485 { + #define SER_RS485_TERMINATE_BUS (1 << 5) /* Enable bus + termination + (if supported) */ ++#define SER_RS485_9BIT_ENABLED (1 << 6) /* Enable 9-Bit transfer ++ mode */ ++#define SER_RS485_9BIT_TX_ADDR (1 << 7) /* TX address available ++ in padding */ ++#define SER_RS485_9BIT_RX_ADDR (1 << 8) /* RX address available ++ in padding */ + __u32 delay_rts_before_send; /* Delay before send (milliseconds) */ + __u32 delay_rts_after_send; /* Delay after send (milliseconds) */ + __u32 padding[5]; /* Memory is cheap, new structs +-- +2.17.1 + diff --git a/patches/0023-trusty-print-out-Built-in-kernel-directly.trusty b/patches/0023-trusty-print-out-Built-in-kernel-directly.trusty new file mode 100644 index 0000000000..749092eb18 --- /dev/null +++ b/patches/0023-trusty-print-out-Built-in-kernel-directly.trusty @@ -0,0 +1,32 @@ +From 5cb370382491acb497d3f01cb18cab101e986d72 Mon Sep 17 00:00:00 2001 +From: "Yan, Shaoou" +Date: Thu, 8 Dec 2016 04:58:55 +0000 +Subject: [PATCH 23/63] trusty: print out "Built: " in kernel directly. + +do this instead of get them from trusty which can save 28 +times vmexit/vmresume switch, so we can reduce some boot time + +Change-Id: I196d506f606a77c1abe9a87d4d48dc18e40ca6bc +Tracked-On: OAM-40750 +Signed-off-by: Feng, Wang +Reviewed-by: Ilkka Koskinen +--- + drivers/trusty/trusty.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c +index 8daf817634d8..cfef965402c4 100644 +--- a/drivers/trusty/trusty.c ++++ b/drivers/trusty/trusty.c +@@ -401,7 +401,7 @@ static void trusty_init_version(struct trusty_state *s, struct device *dev) + } + s->version_str[i] = '\0'; + +- dev_info(dev, "trusty version: %s\n", s->version_str); ++ dev_info(dev, "trusty version: Built: %s\n", s->version_str); + + ret = device_create_file(dev, &dev_attr_trusty_version); + if (ret) +-- +2.17.1 + diff --git a/patches/0024-ACRNTrace-add-acrn-trace-module.acrn b/patches/0024-ACRNTrace-add-acrn-trace-module.acrn new file mode 100644 index 0000000000..35437a6080 --- /dev/null +++ b/patches/0024-ACRNTrace-add-acrn-trace-module.acrn @@ -0,0 +1,345 @@ +From adaa57b39f39bbec55cfc1049f2238a2e360598d Mon Sep 17 00:00:00 2001 +From: "Li, Fei1" +Date: Fri, 31 Aug 2018 10:58:57 +0800 +Subject: [PATCH 024/150] ACRNTrace: add acrn trace module + +Change-Id: I9bf3a0a13e411e15063eb50905875e86e5731d1b +Tracked-On: https://rtc.intel.com/ccm0001001/resource/itemName/com.ibm.team.workitem.WorkItem/216912 +Signed-off-by: Li, Fei1 +--- + drivers/acrn/Kconfig | 8 + + drivers/acrn/Makefile | 1 + + drivers/acrn/acrn_trace.c | 297 ++++++++++++++++++++++++++++++++++++++ + 3 files changed, 306 insertions(+) + create mode 100644 drivers/acrn/acrn_trace.c + +diff --git a/drivers/acrn/Kconfig b/drivers/acrn/Kconfig +index f25f0ae77727..08b24a168167 100644 +--- a/drivers/acrn/Kconfig ++++ b/drivers/acrn/Kconfig +@@ -3,3 +3,11 @@ config ACRN_SHARED_BUFFER + ---help--- + Ring buffer shared between ACRN Hypervisor and its SOS. + Help ACRN performance profiling. ++ ++config ACRN_TRACE ++ tristate "Intel ACRN Hypervisor Trace support" ++ select ACRN_SHARED_BUFFER ++ ---help--- ++ This is the Trace driver for the Intel ACRN hypervisor. ++ You can say y to build it into the kernel, or m to build ++ it as a module. +diff --git a/drivers/acrn/Makefile b/drivers/acrn/Makefile +index bc475f8116e3..5430f4fa06fd 100644 +--- a/drivers/acrn/Makefile ++++ b/drivers/acrn/Makefile +@@ -1 +1,2 @@ + obj-$(CONFIG_ACRN_SHARED_BUFFER) += sbuf.o ++obj-$(CONFIG_ACRN_TRACE) += acrn_trace.o +\ No newline at end of file +diff --git a/drivers/acrn/acrn_trace.c b/drivers/acrn/acrn_trace.c +new file mode 100644 +index 000000000000..31470a3de6ac +--- /dev/null ++++ b/drivers/acrn/acrn_trace.c +@@ -0,0 +1,297 @@ ++/* ++* ++* ACRN Trace module ++* ++* This file is provided under a dual BSD/GPLv2 license.  When using or ++* redistributing this file, you may do so under either license. ++* ++* GPL LICENSE SUMMARY ++* ++* Copyright (c) 2017 Intel Corporation. All rights reserved. ++* ++* This program is free software; you can redistribute it and/or modify ++* it under the terms of version 2 of the GNU General Public License as ++* published by the Free Software Foundation. ++* ++* This program is distributed in the hope that it will be useful, but ++* WITHOUT ANY WARRANTY; without even the implied warranty of ++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU ++* General Public License for more details. ++* ++* Contact Information: Yan, Like ++* ++* BSD LICENSE ++* ++* Copyright (c) 2017 Intel Corporation. All rights reserved. ++* ++* Redistribution and use in source and binary forms, with or without ++* modification, are permitted provided that the following conditions ++* are met: ++* ++*   * Redistributions of source code must retain the above copyright ++*     notice, this list of conditions and the following disclaimer. ++*   * Redistributions in binary form must reproduce the above copyright ++*     notice, this list of conditions and the following disclaimer in ++*     the documentation and/or other materials provided with the ++*     distribution. ++*   * Neither the name of Intel Corporation nor the names of its ++*     contributors may be used to endorse or promote products derived ++*     from this software without specific prior written permission. ++* ++* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++* ++* Like Yan ++* ++*/ ++ ++#define pr_fmt(fmt) "ACRNTrace: " fmt ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "sbuf.h" ++ ++ ++#define TRACE_SBUF_SIZE (4 * 1024 * 1024) ++#define TRACE_ELEMENT_SIZE 32 /* byte */ ++#define TRACE_ELEMENT_NUM ((TRACE_SBUF_SIZE - SBUF_HEAD_SIZE) / \ ++ TRACE_ELEMENT_SIZE) ++ ++#define foreach_cpu(cpu, cpu_num) \ ++ for ((cpu) = 0; (cpu) < (cpu_num); (cpu)++) ++ ++#define MAX_NR_CPUS 4 ++/* actual physical cpu number, initialized by module init */ ++static int pcpu_num; ++ ++static int nr_cpus = MAX_NR_CPUS; ++module_param(nr_cpus, int, S_IRUSR | S_IWUSR); ++ ++static atomic_t open_cnt[MAX_NR_CPUS]; ++static shared_buf_t *sbuf_per_cpu[MAX_NR_CPUS]; ++ ++static inline int get_id_from_devname(struct file *filep) ++{ ++ uint32_t cpuid; ++ int err; ++ char id_str[16]; ++ struct miscdevice *dev = filep->private_data; ++ ++ strncpy(id_str, (void *)dev->name + sizeof("acrn_trace_") - 1, 16); ++ id_str[15] = '\0'; ++ err = kstrtoul(&id_str[0], 10, (unsigned long *)&cpuid); ++ ++ if (err) ++ return err; ++ ++ if (cpuid >= pcpu_num) { ++ pr_err("%s, failed to get cpuid, cpuid %d\n", ++ __func__, cpuid); ++ return -1; ++ } ++ ++ return cpuid; ++} ++ ++/************************************************************************ ++ * ++ * file_operations functions ++ * ++ ***********************************************************************/ ++static int acrn_trace_open(struct inode *inode, struct file *filep) ++{ ++ int cpuid = get_id_from_devname(filep); ++ ++ pr_debug("%s, cpu %d\n", __func__, cpuid); ++ if (cpuid < 0) ++ return -ENXIO; ++ ++ /* More than one reader at the same time could get data messed up */ ++ if (atomic_read(&open_cnt[cpuid])) ++ return -EBUSY; ++ ++ atomic_inc(&open_cnt[cpuid]); ++ ++ return 0; ++} ++ ++static int acrn_trace_release(struct inode *inode, struct file *filep) ++{ ++ int cpuid = get_id_from_devname(filep); ++ ++ pr_debug("%s, cpu %d\n", __func__, cpuid); ++ if (cpuid < 0) ++ return -ENXIO; ++ ++ atomic_dec(&open_cnt[cpuid]); ++ ++ return 0; ++} ++ ++static int acrn_trace_mmap(struct file *filep, struct vm_area_struct *vma) ++{ ++ int cpuid = get_id_from_devname(filep); ++ phys_addr_t paddr; ++ ++ pr_debug("%s, cpu %d\n", __func__, cpuid); ++ if (cpuid < 0) ++ return -ENXIO; ++ ++ BUG_ON(!virt_addr_valid(sbuf_per_cpu[cpuid])); ++ paddr = virt_to_phys(sbuf_per_cpu[cpuid]); ++ ++ if (remap_pfn_range(vma, vma->vm_start, ++ paddr >> PAGE_SHIFT, ++ vma->vm_end - vma->vm_start, ++ vma->vm_page_prot)) { ++ pr_err("Failed to mmap sbuf for cpu%d\n", cpuid); ++ return -EAGAIN; ++ } ++ ++ return 0; ++} ++ ++static const struct file_operations acrn_trace_fops = { ++ .owner = THIS_MODULE, ++ .open = acrn_trace_open, ++ .release = acrn_trace_release, ++ .mmap = acrn_trace_mmap, ++}; ++ ++static struct miscdevice acrn_trace_dev0 = { ++ .name = "acrn_trace_0", ++ .minor = MISC_DYNAMIC_MINOR, ++ .fops = &acrn_trace_fops, ++}; ++ ++static struct miscdevice acrn_trace_dev1 = { ++ .name = "acrn_trace_1", ++ .minor = MISC_DYNAMIC_MINOR, ++ .fops = &acrn_trace_fops, ++}; ++ ++static struct miscdevice acrn_trace_dev2 = { ++ .name = "acrn_trace_2", ++ .minor = MISC_DYNAMIC_MINOR, ++ .fops = &acrn_trace_fops, ++}; ++ ++static struct miscdevice acrn_trace_dev3 = { ++ .name = "acrn_trace_3", ++ .minor = MISC_DYNAMIC_MINOR, ++ .fops = &acrn_trace_fops, ++}; ++ ++static struct miscdevice *acrn_trace_devs[4] = { ++ &acrn_trace_dev0, ++ &acrn_trace_dev1, ++ &acrn_trace_dev2, ++ &acrn_trace_dev3, ++}; ++ ++/* ++ * acrn_trace_init() ++ */ ++static int __init acrn_trace_init(void) ++{ ++ int ret = 0; ++ int i, cpu; ++ ++ /* TBD: we could get the native cpu number by hypercall later */ ++ pr_info("%s, cpu_num %d\n", __func__, nr_cpus); ++ if (nr_cpus > MAX_NR_CPUS) { ++ pr_err("nr_cpus %d exceed MAX_NR_CPUS %d !\n", ++ nr_cpus, MAX_NR_CPUS); ++ return -EINVAL; ++ } ++ pcpu_num = nr_cpus; ++ ++ foreach_cpu(cpu, pcpu_num) { ++ /* allocate shared_buf */ ++ sbuf_per_cpu[cpu] = sbuf_allocate(TRACE_ELEMENT_NUM, ++ TRACE_ELEMENT_SIZE); ++ if (!sbuf_per_cpu[cpu]) { ++ pr_err("Failed alloc SBuf, cpuid %d\n", cpu); ++ ret = -ENOMEM; ++ goto out_free; ++ } ++ } ++ ++ foreach_cpu(cpu, pcpu_num) { ++ ret = sbuf_share_setup(cpu, 0, sbuf_per_cpu[cpu]); ++ if (ret < 0) { ++ pr_err("Failed to setup SBuf, cpuid %d\n", cpu); ++ goto out_sbuf; ++ } ++ } ++ ++ foreach_cpu(cpu, pcpu_num) { ++ ret = misc_register(acrn_trace_devs[cpu]); ++ if (ret < 0) { ++ pr_err("Failed to register acrn_trace_%d, errno %d\n", ++ cpu, ret); ++ goto out_dereg; ++ } ++ } ++ ++ return ret; ++ ++out_dereg: ++ for (i = --cpu; i >= 0; i--) ++ misc_deregister(acrn_trace_devs[i]); ++ cpu = pcpu_num; ++ ++out_sbuf: ++ for (i = --cpu; i >= 0; i--) ++ sbuf_share_setup(i, 0, NULL); ++ cpu = pcpu_num; ++ ++out_free: ++ for (i = --cpu; i >= 0; i--) ++ sbuf_free(sbuf_per_cpu[i]); ++ ++ return ret; ++} ++ ++/* ++ * acrn_trace_exit() ++ */ ++static void __exit acrn_trace_exit(void) ++{ ++ int cpu; ++ ++ pr_info("%s, cpu_num %d\n", __func__, pcpu_num); ++ ++ foreach_cpu(cpu, pcpu_num) { ++ /* deregister devices */ ++ misc_deregister(acrn_trace_devs[cpu]); ++ ++ /* set sbuf pointer to NULL in HV */ ++ sbuf_share_setup(cpu, 0, NULL); ++ ++ /* free sbuf, sbuf_per_cpu[cpu] should be set NULL */ ++ sbuf_free(sbuf_per_cpu[cpu]); ++ } ++} ++ ++module_init(acrn_trace_init); ++module_exit(acrn_trace_exit); ++ ++MODULE_LICENSE("Dual BSD/GPL"); ++MODULE_AUTHOR("Intel Corp., http://www.intel.com"); ++MODULE_DESCRIPTION("Driver for the Intel ACRN Hypervisor Trace"); ++MODULE_VERSION("0.1"); +-- +2.17.1 + diff --git a/patches/0024-ASoC-Intel-Skylake-Remove-redundant-W0-and-W1-macros.audio b/patches/0024-ASoC-Intel-Skylake-Remove-redundant-W0-and-W1-macros.audio new file mode 100644 index 0000000000..b7b19d53f4 --- /dev/null +++ b/patches/0024-ASoC-Intel-Skylake-Remove-redundant-W0-and-W1-macros.audio @@ -0,0 +1,163 @@ +From 5adcd4ad8da14954c700474df7a495a503752e42 Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Thu, 15 Aug 2019 13:00:04 +0200 +Subject: [PATCH 024/193] ASoC: Intel: Skylake: Remove redundant W0 and W1 + macros + +The existing upling, downling and FW register size macros are +duplicates. Remove these and replace by global mailbox size macro - FW +register-area size is represented by SKL_FW_REGS_SIZE added in +precedding change. + +Signed-off-by: Cezary Rojewski +--- + sound/soc/intel/skylake/bxt-sst.c | 5 +++-- + sound/soc/intel/skylake/cnl-sst-dsp.h | 6 ------ + sound/soc/intel/skylake/cnl-sst.c | 10 +++++----- + sound/soc/intel/skylake/skl-sst-dsp.h | 7 +------ + sound/soc/intel/skylake/skl-sst-ipc.c | 12 ++++++------ + sound/soc/intel/skylake/skl-sst.c | 5 +++-- + 6 files changed, 18 insertions(+), 27 deletions(-) + +diff --git a/sound/soc/intel/skylake/bxt-sst.c b/sound/soc/intel/skylake/bxt-sst.c +index e6d2f548802a..af20a3e76560 100644 +--- a/sound/soc/intel/skylake/bxt-sst.c ++++ b/sound/soc/intel/skylake/bxt-sst.c +@@ -566,8 +566,9 @@ int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, + sst->addr.sram0_base = BXT_ADSP_SRAM0_BASE; + sst->addr.sram1_base = BXT_ADSP_SRAM1_BASE; + +- sst_dsp_mailbox_init(sst, (BXT_ADSP_SRAM0_BASE + SKL_ADSP_W0_STAT_SZ), +- SKL_ADSP_W0_UP_SZ, BXT_ADSP_SRAM1_BASE, SKL_ADSP_W1_SZ); ++ sst_dsp_mailbox_init(sst, ++ (BXT_ADSP_SRAM0_BASE + SKL_FW_REGS_SIZE), SKL_MAILBOX_SIZE, ++ BXT_ADSP_SRAM1_BASE, SKL_MAILBOX_SIZE); + + ret = skl_ipc_init(dev, skl); + if (ret) { +diff --git a/sound/soc/intel/skylake/cnl-sst-dsp.h b/sound/soc/intel/skylake/cnl-sst-dsp.h +index 70da4f312f53..a465cc42b7e8 100644 +--- a/sound/soc/intel/skylake/cnl-sst-dsp.h ++++ b/sound/soc/intel/skylake/cnl-sst-dsp.h +@@ -52,12 +52,6 @@ struct sst_generic_ipc; + + #define CNL_ADSP_MMIO_LEN 0x10000 + +-#define CNL_ADSP_W0_STAT_SZ 0x1000 +- +-#define CNL_ADSP_W0_UP_SZ 0x1000 +- +-#define CNL_ADSP_W1_SZ 0x1000 +- + #define CNL_FW_STS_MASK 0xf + + #define CNL_ADSPIC_IPC 0x1 +diff --git a/sound/soc/intel/skylake/cnl-sst.c b/sound/soc/intel/skylake/cnl-sst.c +index b1df8bc3da27..68900b5daf7d 100644 +--- a/sound/soc/intel/skylake/cnl-sst.c ++++ b/sound/soc/intel/skylake/cnl-sst.c +@@ -389,8 +389,8 @@ static int cnl_ipc_init(struct device *dev, struct skl_dev *cnl) + ipc->dsp = cnl->dsp; + ipc->dev = dev; + +- ipc->tx_data_max_size = CNL_ADSP_W1_SZ; +- ipc->rx_data_max_size = CNL_ADSP_W0_UP_SZ; ++ ipc->tx_data_max_size = SKL_MAILBOX_SIZE; ++ ipc->rx_data_max_size = SKL_MAILBOX_SIZE; + + err = sst_ipc_init(ipc); + if (err) +@@ -442,9 +442,9 @@ int cnl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, + sst->addr.sram0_base = CNL_ADSP_SRAM0_BASE; + sst->addr.sram1_base = CNL_ADSP_SRAM1_BASE; + +- sst_dsp_mailbox_init(sst, (CNL_ADSP_SRAM0_BASE + CNL_ADSP_W0_STAT_SZ), +- CNL_ADSP_W0_UP_SZ, CNL_ADSP_SRAM1_BASE, +- CNL_ADSP_W1_SZ); ++ sst_dsp_mailbox_init(sst, ++ (CNL_ADSP_SRAM0_BASE + SKL_FW_REGS_SIZE), SKL_MAILBOX_SIZE, ++ CNL_ADSP_SRAM1_BASE, SKL_MAILBOX_SIZE); + + ret = cnl_ipc_init(dev, cnl); + if (ret) { +diff --git a/sound/soc/intel/skylake/skl-sst-dsp.h b/sound/soc/intel/skylake/skl-sst-dsp.h +index 9d1cb1a64411..a2122577e8c3 100644 +--- a/sound/soc/intel/skylake/skl-sst-dsp.h ++++ b/sound/soc/intel/skylake/skl-sst-dsp.h +@@ -57,12 +57,7 @@ struct skl_dev; + + #define SKL_ADSP_MMIO_LEN 0x10000 + +-#define SKL_ADSP_W0_STAT_SZ 0x1000 +- +-#define SKL_ADSP_W0_UP_SZ 0x1000 +- +-#define SKL_ADSP_W1_SZ 0x1000 +- ++#define SKL_MAILBOX_SIZE PAGE_SIZE + #define SKL_FW_REGS_SIZE PAGE_SIZE + #define SKL_FW_STS_MASK 0xf + +diff --git a/sound/soc/intel/skylake/skl-sst-ipc.c b/sound/soc/intel/skylake/skl-sst-ipc.c +index 2700f882103d..72d7284d2fff 100644 +--- a/sound/soc/intel/skylake/skl-sst-ipc.c ++++ b/sound/soc/intel/skylake/skl-sst-ipc.c +@@ -606,8 +606,8 @@ int skl_ipc_init(struct device *dev, struct skl_dev *skl) + ipc->dsp = skl->dsp; + ipc->dev = dev; + +- ipc->tx_data_max_size = SKL_ADSP_W1_SZ; +- ipc->rx_data_max_size = SKL_ADSP_W0_UP_SZ; ++ ipc->tx_data_max_size = SKL_MAILBOX_SIZE; ++ ipc->rx_data_max_size = SKL_MAILBOX_SIZE; + + err = sst_ipc_init(ipc); + if (err) +@@ -922,8 +922,8 @@ int skl_ipc_set_large_config(struct sst_generic_ipc *ipc, + sz_remaining = msg->param_data_size; + data_offset = 0; + while (sz_remaining != 0) { +- tx_size = sz_remaining > SKL_ADSP_W1_SZ +- ? SKL_ADSP_W1_SZ : sz_remaining; ++ tx_size = sz_remaining > SKL_MAILBOX_SIZE ++ ? SKL_MAILBOX_SIZE : sz_remaining; + if (tx_size == sz_remaining) + header.extension |= IPC_FINAL_BLOCK(1); + +@@ -965,7 +965,7 @@ int skl_ipc_get_large_config(struct sst_generic_ipc *ipc, + unsigned int *buf; + int ret; + +- reply.data = kzalloc(SKL_ADSP_W1_SZ, GFP_KERNEL); ++ reply.data = kzalloc(SKL_MAILBOX_SIZE, GFP_KERNEL); + if (!reply.data) + return -ENOMEM; + +@@ -983,7 +983,7 @@ int skl_ipc_get_large_config(struct sst_generic_ipc *ipc, + request.header = *(u64 *)&header; + request.data = *payload; + request.size = *bytes; +- reply.size = SKL_ADSP_W1_SZ; ++ reply.size = SKL_MAILBOX_SIZE; + + ret = sst_ipc_tx_message_wait(ipc, request, &reply); + if (ret < 0) +diff --git a/sound/soc/intel/skylake/skl-sst.c b/sound/soc/intel/skylake/skl-sst.c +index 26a4688ff938..195ca12b351a 100644 +--- a/sound/soc/intel/skylake/skl-sst.c ++++ b/sound/soc/intel/skylake/skl-sst.c +@@ -536,8 +536,9 @@ int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, + sst->addr.sram0_base = SKL_ADSP_SRAM0_BASE; + sst->addr.sram1_base = SKL_ADSP_SRAM1_BASE; + +- sst_dsp_mailbox_init(sst, (SKL_ADSP_SRAM0_BASE + SKL_ADSP_W0_STAT_SZ), +- SKL_ADSP_W0_UP_SZ, SKL_ADSP_SRAM1_BASE, SKL_ADSP_W1_SZ); ++ sst_dsp_mailbox_init(sst, ++ (SKL_ADSP_SRAM0_BASE + SKL_FW_REGS_SIZE), SKL_MAILBOX_SIZE, ++ SKL_ADSP_SRAM1_BASE, SKL_MAILBOX_SIZE); + + ret = skl_ipc_init(dev, skl); + if (ret) { +-- +2.17.1 + diff --git a/patches/0024-SEP-Bug-Fix-for-VMM-symbol-resolution.sep-socwatch b/patches/0024-SEP-Bug-Fix-for-VMM-symbol-resolution.sep-socwatch new file mode 100644 index 0000000000..2eca2075e1 --- /dev/null +++ b/patches/0024-SEP-Bug-Fix-for-VMM-symbol-resolution.sep-socwatch @@ -0,0 +1,71 @@ +From 8e7545aaa5a5b74c468578cc064a9dd30d253ab9 Mon Sep 17 00:00:00 2001 +From: Manisha +Date: Tue, 19 Feb 2019 20:26:50 +0000 +Subject: [PATCH 24/27] SEP Bug Fix for VMM symbol resolution + +VMM symbol is not loading correctly in Vtune. +This patch fix symbol loading issue and +changed the macro name from OS_ID_ACORN to OS_ID_ACRN + +Tracked-on: PKT-1745 +Signed-off-by: Lim, Min Yeol +Signed-off-by: Manisha +--- + drivers/platform/x86/sepdk/include/lwpmudrv_defines.h | 2 +- + drivers/platform/x86/sepdk/sep/linuxos.c | 6 ++++-- + drivers/platform/x86/sepdk/sep/lwpmudrv.c | 2 +- + 3 files changed, 6 insertions(+), 4 deletions(-) + +diff --git a/drivers/platform/x86/sepdk/include/lwpmudrv_defines.h b/drivers/platform/x86/sepdk/include/lwpmudrv_defines.h +index 8346ea72d587..0120aeea9631 100644 +--- a/drivers/platform/x86/sepdk/include/lwpmudrv_defines.h ++++ b/drivers/platform/x86/sepdk/include/lwpmudrv_defines.h +@@ -511,7 +511,7 @@ extern "C" { + #define OS_ID_MODEM 1 + #define OS_ID_ANDROID 2 + #define OS_ID_SECVM 3 +-#define OS_ID_ACORN 0xFFFF ++#define OS_ID_ACRN 0xFFFF + + #define PERF_HW_VER4 (5) + #if defined(__cplusplus) +diff --git a/drivers/platform/x86/sepdk/sep/linuxos.c b/drivers/platform/x86/sepdk/sep/linuxos.c +index 1f877e6e4bc8..3ef35ac66b49 100755 +--- a/drivers/platform/x86/sepdk/sep/linuxos.c ++++ b/drivers/platform/x86/sepdk/sep/linuxos.c +@@ -161,7 +161,9 @@ static S32 linuxos_Load_Image_Notify_Routine(char *name, U64 base, U32 size, + MODULE_RECORD_parent_pid(mra) = parent_pid; + MODULE_RECORD_osid(mra) = osid; + MODULE_RECORD_pid_rec_index(mra) = pid; +- ++ if (osid == OS_ID_ACRN) { ++ MODULE_RECORD_unknown_load_address(mra) = 1; ++ } + if (kernel_modules) { + MODULE_RECORD_tsc(mra) = 0; + MR_unloadTscSet(mra, (U64)(0xffffffffffffffffLL)); +@@ -328,7 +330,7 @@ static S32 linuxos_Map_Kernel_Modules(void) + linuxos_Load_Image_Notify_Routine( + "VMM", 0x0, (U32)0xffffffffffffffffLL, 0, 0, 0, + LOPTS_1ST_MODREC | LOPTS_GLOBAL_MODULE | LOPTS_EXE, exec_mode, +- -1, MR_SEG_NUM, 1, OS_ID_ACORN); ++ -1, MR_SEG_NUM, 1, OS_ID_ACRN); + #endif + + for (modules = (struct list_head *)(THIS_MODULE->list.prev); +diff --git a/drivers/platform/x86/sepdk/sep/lwpmudrv.c b/drivers/platform/x86/sepdk/sep/lwpmudrv.c +index 8a1bab3a5453..87b82a20864b 100644 +--- a/drivers/platform/x86/sepdk/sep/lwpmudrv.c ++++ b/drivers/platform/x86/sepdk/sep/lwpmudrv.c +@@ -6002,7 +6002,7 @@ static OS_STATUS lwpmudrv_Get_Sample_Drop_Info(IOCTL_ARGS args) + && size < MAX_SAMPLE_DROP_NODES; i++) { + if (stats[i].samples_logged || stats[i].samples_dropped) { + SAMPLE_DROP_INFO_drop_info( +- &req_sample_drop_info, size).os_id = OS_ID_ACORN; ++ &req_sample_drop_info, size).os_id = OS_ID_ACRN; + SAMPLE_DROP_INFO_drop_info( + &req_sample_drop_info, size).cpu_id = i; + SAMPLE_DROP_INFO_drop_info( +-- +2.17.1 + diff --git a/patches/0024-drm-i915-Use-enum-pipe-instead-of-crtc-index-to-track-.drm b/patches/0024-drm-i915-Use-enum-pipe-instead-of-crtc-index-to-track-.drm new file mode 100644 index 0000000000..ee85d14a89 --- /dev/null +++ b/patches/0024-drm-i915-Use-enum-pipe-instead-of-crtc-index-to-track-.drm @@ -0,0 +1,271 @@ +From 6c0b5cfac1dbc089de354e21d23cf5d59b438e0a Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= +Date: Wed, 21 Aug 2019 20:30:29 +0300 +Subject: [PATCH 024/690] drm/i915: Use enum pipe instead of crtc index to + track active pipes +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +We may need to eliminate the crtc->index == pipe assumptions from +the code to support arbitrary pipes being fused off. Start that by +switching some bitmasks over to using pipe instead of the crtc index. + +Signed-off-by: Ville Syrjälä +Link: https://patchwork.freedesktop.org/patch/msgid/20190821173033.24123-1-ville.syrjala@linux.intel.com +Reviewed-by: Jani Nikula +--- + drivers/gpu/drm/i915/display/intel_cdclk.c | 12 +++++------ + drivers/gpu/drm/i915/display/intel_display.c | 20 +++++++++---------- + .../drm/i915/display/intel_display_types.h | 4 ++-- + drivers/gpu/drm/i915/i915_drv.h | 2 +- + drivers/gpu/drm/i915/intel_pm.c | 20 +++++++++---------- + 5 files changed, 29 insertions(+), 29 deletions(-) + +diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c +index d0bc42e5039c..939088c7d814 100644 +--- a/drivers/gpu/drm/i915/display/intel_cdclk.c ++++ b/drivers/gpu/drm/i915/display/intel_cdclk.c +@@ -2369,7 +2369,7 @@ static int vlv_modeset_calc_cdclk(struct intel_atomic_state *state) + state->cdclk.logical.voltage_level = + vlv_calc_voltage_level(dev_priv, cdclk); + +- if (!state->active_crtcs) { ++ if (!state->active_pipes) { + cdclk = vlv_calc_cdclk(dev_priv, state->cdclk.force_min_cdclk); + + state->cdclk.actual.cdclk = cdclk; +@@ -2400,7 +2400,7 @@ static int bdw_modeset_calc_cdclk(struct intel_atomic_state *state) + state->cdclk.logical.voltage_level = + bdw_calc_voltage_level(cdclk); + +- if (!state->active_crtcs) { ++ if (!state->active_pipes) { + cdclk = bdw_calc_cdclk(state->cdclk.force_min_cdclk); + + state->cdclk.actual.cdclk = cdclk; +@@ -2470,7 +2470,7 @@ static int skl_modeset_calc_cdclk(struct intel_atomic_state *state) + state->cdclk.logical.voltage_level = + skl_calc_voltage_level(cdclk); + +- if (!state->active_crtcs) { ++ if (!state->active_pipes) { + cdclk = skl_calc_cdclk(state->cdclk.force_min_cdclk, vco); + + state->cdclk.actual.vco = vco; +@@ -2506,7 +2506,7 @@ static int bxt_modeset_calc_cdclk(struct intel_atomic_state *state) + state->cdclk.logical.voltage_level = + bxt_calc_voltage_level(cdclk); + +- if (!state->active_crtcs) { ++ if (!state->active_pipes) { + if (IS_GEMINILAKE(dev_priv)) { + cdclk = glk_calc_cdclk(state->cdclk.force_min_cdclk); + vco = glk_de_pll_vco(dev_priv, cdclk); +@@ -2544,7 +2544,7 @@ static int cnl_modeset_calc_cdclk(struct intel_atomic_state *state) + max(cnl_calc_voltage_level(cdclk), + cnl_compute_min_voltage_level(state)); + +- if (!state->active_crtcs) { ++ if (!state->active_pipes) { + cdclk = cnl_calc_cdclk(state->cdclk.force_min_cdclk); + vco = cnl_cdclk_pll_vco(dev_priv, cdclk); + +@@ -2578,7 +2578,7 @@ static int icl_modeset_calc_cdclk(struct intel_atomic_state *state) + max(icl_calc_voltage_level(dev_priv, cdclk), + cnl_compute_min_voltage_level(state)); + +- if (!state->active_crtcs) { ++ if (!state->active_pipes) { + cdclk = icl_calc_cdclk(state->cdclk.force_min_cdclk, ref); + vco = icl_calc_cdclk_pll_vco(dev_priv, cdclk); + +diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c +index aa54bb22796d..6bbf04bbb2db 100644 +--- a/drivers/gpu/drm/i915/display/intel_display.c ++++ b/drivers/gpu/drm/i915/display/intel_display.c +@@ -7093,7 +7093,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc, + intel_display_power_put_unchecked(dev_priv, domain); + intel_crtc->enabled_power_domains = 0; + +- dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe); ++ dev_priv->active_pipes &= ~BIT(intel_crtc->pipe); + dev_priv->min_cdclk[intel_crtc->pipe] = 0; + dev_priv->min_voltage_level[intel_crtc->pipe] = 0; + +@@ -13469,7 +13469,7 @@ static int intel_modeset_checks(struct intel_atomic_state *state) + state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk; + + state->modeset = true; +- state->active_crtcs = dev_priv->active_crtcs; ++ state->active_pipes = dev_priv->active_pipes; + state->cdclk.logical = dev_priv->cdclk.logical; + state->cdclk.actual = dev_priv->cdclk.actual; + state->cdclk.pipe = INVALID_PIPE; +@@ -13477,12 +13477,12 @@ static int intel_modeset_checks(struct intel_atomic_state *state) + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { + if (new_crtc_state->base.active) +- state->active_crtcs |= 1 << i; ++ state->active_pipes |= BIT(crtc->pipe); + else +- state->active_crtcs &= ~(1 << i); ++ state->active_pipes &= ~BIT(crtc->pipe); + + if (old_crtc_state->base.active != new_crtc_state->base.active) +- state->active_pipe_changes |= drm_crtc_mask(&crtc->base); ++ state->active_pipe_changes |= BIT(crtc->pipe); + } + + /* +@@ -13511,11 +13511,11 @@ static int intel_modeset_checks(struct intel_atomic_state *state) + return ret; + } + +- if (is_power_of_2(state->active_crtcs)) { ++ if (is_power_of_2(state->active_pipes)) { + struct intel_crtc *crtc; + struct intel_crtc_state *crtc_state; + +- pipe = ilog2(state->active_crtcs); ++ pipe = ilog2(state->active_pipes); + crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + crtc_state = intel_atomic_get_new_crtc_state(state, crtc); + if (crtc_state && needs_modeset(crtc_state)) +@@ -14208,7 +14208,7 @@ static int intel_atomic_commit(struct drm_device *dev, + sizeof(state->min_cdclk)); + memcpy(dev_priv->min_voltage_level, state->min_voltage_level, + sizeof(state->min_voltage_level)); +- dev_priv->active_crtcs = state->active_crtcs; ++ dev_priv->active_pipes = state->active_pipes; + dev_priv->cdclk.force_min_cdclk = state->cdclk.force_min_cdclk; + + intel_cdclk_swap_state(state); +@@ -16657,7 +16657,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) + struct drm_connector_list_iter conn_iter; + int i; + +- dev_priv->active_crtcs = 0; ++ dev_priv->active_pipes = 0; + + for_each_intel_crtc(dev, crtc) { + struct intel_crtc_state *crtc_state = +@@ -16674,7 +16674,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) + crtc->active = crtc_state->base.active; + + if (crtc_state->base.active) +- dev_priv->active_crtcs |= 1 << crtc->pipe; ++ dev_priv->active_pipes |= BIT(crtc->pipe); + + DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n", + crtc->base.base.id, crtc->base.name, +diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h +index 449abaea619f..12523456143f 100644 +--- a/drivers/gpu/drm/i915/display/intel_display_types.h ++++ b/drivers/gpu/drm/i915/display/intel_display_types.h +@@ -481,9 +481,9 @@ struct intel_atomic_state { + * but the converse is not necessarily true; simply changing a mode may + * not flip the final active status of any CRTC's + */ +- unsigned int active_pipe_changes; ++ u8 active_pipe_changes; + +- unsigned int active_crtcs; ++ u8 active_pipes; + /* minimum acceptable cdclk for each pipe */ + int min_cdclk[I915_MAX_PIPES]; + /* minimum acceptable voltage level for each pipe */ +diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h +index d9fadc38fcfa..82b919e51896 100644 +--- a/drivers/gpu/drm/i915/i915_drv.h ++++ b/drivers/gpu/drm/i915/i915_drv.h +@@ -1469,7 +1469,7 @@ struct drm_i915_private { + */ + struct mutex dpll_lock; + +- unsigned int active_crtcs; ++ u8 active_pipes; + /* minimum acceptable cdclk for each pipe */ + int min_cdclk[I915_MAX_PIPES]; + /* minimum acceptable voltage level for each pipe */ +diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c +index d3ea193cd093..09f29a337313 100644 +--- a/drivers/gpu/drm/i915/intel_pm.c ++++ b/drivers/gpu/drm/i915/intel_pm.c +@@ -3761,18 +3761,18 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state) + /* + * If there are no active CRTCs, no additional checks need be performed + */ +- if (hweight32(state->active_crtcs) == 0) ++ if (hweight32(state->active_pipes) == 0) + return true; + + /* + * SKL+ workaround: bspec recommends we disable SAGV when we have + * more then one pipe enabled + */ +- if (hweight32(state->active_crtcs) > 1) ++ if (hweight32(state->active_pipes) > 1) + return false; + + /* Since we're now guaranteed to only have one active CRTC... */ +- pipe = ffs(state->active_crtcs) - 1; ++ pipe = ffs(state->active_pipes) - 1; + crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + crtc_state = to_intel_crtc_state(crtc->base.state); + +@@ -3867,14 +3867,14 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv, + if (WARN_ON(!state) || !crtc_state->base.active) { + alloc->start = 0; + alloc->end = 0; +- *num_active = hweight32(dev_priv->active_crtcs); ++ *num_active = hweight32(dev_priv->active_pipes); + return; + } + + if (intel_state->active_pipe_changes) +- *num_active = hweight32(intel_state->active_crtcs); ++ *num_active = hweight32(intel_state->active_pipes); + else +- *num_active = hweight32(dev_priv->active_crtcs); ++ *num_active = hweight32(dev_priv->active_pipes); + + ddb_size = intel_get_ddb_size(dev_priv, crtc_state, total_data_rate, + *num_active, ddb); +@@ -5464,7 +5464,7 @@ skl_ddb_add_affected_pipes(struct intel_atomic_state *state, bool *changed) + * If this transaction isn't actually touching any CRTC's, don't + * bother with watermark calculation. Note that if we pass this + * test, we're guaranteed to hold at least one CRTC state mutex, +- * which means we can safely use values like dev_priv->active_crtcs ++ * which means we can safely use values like dev_priv->active_pipes + * since any racing commits that want to update them would need to + * hold _all_ CRTC state mutexes. + */ +@@ -5489,13 +5489,13 @@ skl_ddb_add_affected_pipes(struct intel_atomic_state *state, bool *changed) + state->active_pipe_changes = ~0; + + /* +- * We usually only initialize state->active_crtcs if we ++ * We usually only initialize state->active_pipes if we + * we're doing a modeset; make sure this field is always + * initialized during the sanitization process that happens + * on the first commit too. + */ + if (!state->modeset) +- state->active_crtcs = dev_priv->active_crtcs; ++ state->active_pipes = dev_priv->active_pipes; + } + + /* +@@ -5811,7 +5811,7 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv) + hw->dirty_pipes |= drm_crtc_mask(&crtc->base); + } + +- if (dev_priv->active_crtcs) { ++ if (dev_priv->active_pipes) { + /* Fully recompute DDB on first atomic commit */ + dev_priv->wm.distrust_bios_wm = true; + } +-- +2.17.1 + diff --git a/patches/0024-mei-spd-add-support-for-spd-protocol-version-3.security b/patches/0024-mei-spd-add-support-for-spd-protocol-version-3.security new file mode 100644 index 0000000000..a7916bdb61 --- /dev/null +++ b/patches/0024-mei-spd-add-support-for-spd-protocol-version-3.security @@ -0,0 +1,236 @@ +From f779104b34411fdde9534b5b1ab10b2902febe4b Mon Sep 17 00:00:00 2001 +From: Vitaly Lubart +Date: Thu, 29 Nov 2018 16:15:00 +0200 +Subject: [PATCH 24/65] mei: spd: add support for spd protocol version 3 + +Change-Id: I6c5abe97923ba45deb18df12ba69f66e59f04014 +Signed-off-by: Vitaly Lubart +Signed-off-by: Tomas Winkler +--- + drivers/misc/mei/spd/cmd.c | 43 ++++++++++++++++++++++++++++++------- + drivers/misc/mei/spd/cmd.h | 33 +++++++++++++++++++++++++++- + drivers/misc/mei/spd/main.c | 2 +- + drivers/misc/mei/spd/rpmb.c | 3 ++- + drivers/misc/mei/spd/spd.h | 6 ++++-- + 5 files changed, 74 insertions(+), 13 deletions(-) + +diff --git a/drivers/misc/mei/spd/cmd.c b/drivers/misc/mei/spd/cmd.c +index ea26204f42c0..91521c05e47a 100644 +--- a/drivers/misc/mei/spd/cmd.c ++++ b/drivers/misc/mei/spd/cmd.c +@@ -34,6 +34,10 @@ const char *spd_cmd_str(enum spd_cmd_type cmd) + SPD_CMD(TRIM); + SPD_CMD(INIT); + SPD_CMD(STORAGE_STATUS); ++ SPD_CMD(ALLOCATE_BUFFER); ++ SPD_CMD(WRITE_FROM_BUFFER); ++ SPD_CMD(READ_FROM_BUFFER); ++ SPD_CMD(MANAGE_CRITICAL_SECTION); + SPD_CMD(MAX); + default: + return "unknown"; +@@ -121,11 +125,12 @@ int mei_spd_cmd_init_req(struct mei_spd *spd) + static int mei_spd_cmd_init_rsp(struct mei_spd *spd, struct spd_cmd *cmd, + ssize_t cmd_sz) + { +- int type; +- int gpp_id; +- int i; ++ unsigned int type; ++ unsigned int gpp_id; ++ unsigned int rpmb_id; ++ unsigned int i; + +- if (cmd_sz < spd_cmd_size(init_resp)) { ++ if (cmd_sz < (ssize_t)spd_cmd_size(init_resp)) { + spd_err(spd, "Wrong init response size\n"); + return -EINVAL; + } +@@ -134,23 +139,44 @@ static int mei_spd_cmd_init_rsp(struct mei_spd *spd, struct spd_cmd *cmd, + return -EPROTO; + + type = cmd->init_rsp.type; +- gpp_id = cmd->init_rsp.gpp_partition_id; ++ gpp_id = cmd->init_rsp.gpp_partition_id; ++ rpmb_id = cmd->init_rsp.rpmb_partition_id; ++ ++ spd_dbg(spd, "cmd init rsp : type [%d] gpp_id [%d] rpmb_id [%d]\n", ++ type, gpp_id, rpmb_id); + + switch (type) { + case SPD_TYPE_EMMC: +- if (gpp_id < 1 || gpp_id > 4) { ++ if (gpp_id > 4) { + spd_err(spd, "%s unsupported gpp id %d\n", + mei_spd_dev_str(type), gpp_id); + return -EINVAL; + } ++ ++ /* Only one RPMB partition exists for EMMC */ ++ rpmb_id = 0; + break; + + case SPD_TYPE_UFS: +- if (gpp_id < 1 || gpp_id > 6) { ++ if (gpp_id > 7) { + spd_err(spd, "%s unsupported gpp id %d\n", + mei_spd_dev_str(type), gpp_id); + return -EINVAL; + } ++ ++ /* For UFS version 2.0 and 2.1 the RPMB od must be 0 */ ++ /* because there is only one RPMB partition. */ ++ /* For UFS version 3.0 there can be up to 4 RPMBs and */ ++ /* the RPMB id is later being used in CDB format of */ ++ /* Security Protocol IN/OUT Commands ( Security */ ++ /* Protocol Specific field. */ ++ /* See the UFS Version 3.0 spec for details */ ++ if (rpmb_id > 3) { ++ spd_err(spd, "%s unsupported rpmb id %d\n", ++ mei_spd_dev_str(type), rpmb_id); ++ return -EINVAL; ++ } ++ + break; + + default: +@@ -160,7 +186,8 @@ static int mei_spd_cmd_init_rsp(struct mei_spd *spd, struct spd_cmd *cmd, + } + + spd->dev_type = type; +- spd->gpp_partition_id = gpp_id; ++ spd->gpp_partition_id = gpp_id; ++ spd->rpmb_partition_id = rpmb_id; + + if (cmd->init_rsp.serial_no_sz != 0) { + if (cmd->init_rsp.serial_no_sz != +diff --git a/drivers/misc/mei/spd/cmd.h b/drivers/misc/mei/spd/cmd.h +index a7f99ec7f8f4..1ecf02bf289c 100644 +--- a/drivers/misc/mei/spd/cmd.h ++++ b/drivers/misc/mei/spd/cmd.h +@@ -18,7 +18,11 @@ + * @SPD_GPP_READ_CMD : GPP read request. [TEE -> Host] + * @SPD_TRIM_CMD : TRIM command [TEE -> Host] + * @SPD_INIT_CMD : initial handshake between host and fw. [Host -> TEE] +- * @SPD_STORAGE_STATUS_CMD : the backing storage status. [Host -> TEE] ++ * @SPD_STORAGE_STATUS_CMD : the backing storage status. [Host -> TEE] ++ * @SPD_ALLOCATE_BUFFER_CMD : not used by the SW. [TEE -> Host] ++ * @SPD_WRITE_FROM_BUFFER_CMD : not used by the SW. [TEE -> Host] ++ * @SPD_READ_FROM_BUFFER_CMD : not used by the SW. [TEE -> Host] ++ * @SPD_MANAGE_CRITICAL_SECTION_CMD : not used by the SW [TEE -> Host] + * @SPD_MAX_CMD: Upper command sentinel. + */ + enum spd_cmd_type { +@@ -32,6 +36,10 @@ enum spd_cmd_type { + SPD_TRIM_CMD, + SPD_INIT_CMD, + SPD_STORAGE_STATUS_CMD, ++ SPD_ALLOCATE_BUFFER_CMD, ++ SPD_WRITE_FROM_BUFFER_CMD, ++ SPD_READ_FROM_BUFFER_CMD, ++ SPD_MANAGE_CRITICAL_SECTION_CMD, + SPD_MAX_CMD, + }; + +@@ -85,6 +93,11 @@ struct spd_cmd_hdr { + * UFS: LUN Number (0-7) + * EMMC: 1-4. + * 0xff: GPP not supported ++ * @rpmb_partition_id: rpmb_partition: ++ * UFS: W-LUN Number (0-3) ++ * EMMC: Not defined ++ * 0xff: RPMB not supported ++ * @reserved: reserved + * @type: storage hw type + * SPD_TYPE_EMMC + * SPD_TYPE_UFS +@@ -93,6 +106,8 @@ struct spd_cmd_hdr { + */ + struct spd_cmd_init_resp { + u32 gpp_partition_id; ++ u32 rpmb_partition_id; ++ u32 reserved[2]; + u32 type; + u32 serial_no_sz; + u8 serial_no[0]; +@@ -107,10 +122,26 @@ struct spd_cmd_init_resp { + * @rpmb_on: availability of the backing storage + * 0 - RPMB partition is accessible + * 1 - RPBM partition is not accessible ++ * @boot_on: availability of the boot partition ++ * 0 - boot partition is accessible ++ * 1 - boot partition is not accessible ++ * @reserved: reserved ++ * @critical_section: support of critical section message ++ * 0 - critical section off ++ * 1 - critical section on ++ * @buffer_dma_support: buffer dma support capability ++ * 0 - buffer dma support disabled ++ * 1 - buffer dma support enabled ++ * @reserved_capabilities: reserved + */ + struct spd_cmd_storage_status_req { + u32 gpp_on; + u32 rpmb_on; ++ u32 boot_on; ++ u32 reserved[2]; ++ u32 critical_section; ++ u32 buffer_dma_support : 1; ++ u32 reserved_capabilities : 31; + } __packed; + + /** +diff --git a/drivers/misc/mei/spd/main.c b/drivers/misc/mei/spd/main.c +index 6b379171a136..ea3e5b88db58 100644 +--- a/drivers/misc/mei/spd/main.c ++++ b/drivers/misc/mei/spd/main.c +@@ -99,7 +99,7 @@ static int mei_spd_remove(struct mei_cl_device *cldev) + 0x99, 0xcb, 0x9e, 0x22, 0x74, 0x97, 0x8c, 0xa8) + + static struct mei_cl_device_id mei_spd_tbl[] = { +- { .uuid = MEI_SPD_UUID, .version = MEI_CL_VERSION_ANY}, ++ { .uuid = MEI_SPD_UUID, .version = 0x3}, + /* required last entry */ + { } + }; +diff --git a/drivers/misc/mei/spd/rpmb.c b/drivers/misc/mei/spd/rpmb.c +index ae5d5ac517ad..aa966e9b144f 100644 +--- a/drivers/misc/mei/spd/rpmb.c ++++ b/drivers/misc/mei/spd/rpmb.c +@@ -18,7 +18,8 @@ static int mei_spd_rpmb_start(struct mei_spd *spd, struct rpmb_dev *rdev) + } + + spd->rdev = rpmb_dev_get(rdev); +- spd_dbg(spd, "rpmb partition created\n"); ++ spd->rdev->target = spd->rpmb_partition_id; ++ spd_dbg(spd, "rpmb partition created, target %d\n", spd->rdev->target); + return 0; + } + +diff --git a/drivers/misc/mei/spd/spd.h b/drivers/misc/mei/spd/spd.h +index 4700a27b2f28..00a053c4db39 100644 +--- a/drivers/misc/mei/spd/spd.h ++++ b/drivers/misc/mei/spd/spd.h +@@ -22,7 +22,8 @@ enum mei_spd_state { + * + * @cldev: client bus device + * @gpp: GPP partition block device +- * @gpp_partition_id: GPP partition id (1-6) ++ * @gpp_partition_id: GPP partition id (0-7) ++ * @rpmb_partition_id: RPMB partition id (0-3) + * @gpp_interface: gpp class interface for discovery + * @dev_type: storage device type + * @dev_id_sz: device id size +@@ -39,7 +40,8 @@ enum mei_spd_state { + struct mei_spd { + struct mei_cl_device *cldev; + struct block_device *gpp; +- u32 gpp_partition_id; ++ u8 gpp_partition_id; ++ u8 rpmb_partition_id; + struct class_interface gpp_interface; + u32 dev_type; + u32 dev_id_sz; +-- +2.17.1 + diff --git a/patches/0024-net-stmmac-free-pci-irqs-during-driver-unload.connectivity b/patches/0024-net-stmmac-free-pci-irqs-during-driver-unload.connectivity new file mode 100644 index 0000000000..83d8a212c4 --- /dev/null +++ b/patches/0024-net-stmmac-free-pci-irqs-during-driver-unload.connectivity @@ -0,0 +1,27 @@ +From b03b5c1a7242e0a10f2883404c2070495cc502b2 Mon Sep 17 00:00:00 2001 +From: Ong Boon Leong +Date: Thu, 1 Aug 2019 13:44:53 +0800 +Subject: [PATCH 024/108] net: stmmac: free pci irqs during driver unloading + +We need to free PCI MSIs after the associated ISRs have been released. + +Signed-off-by: Ong Boon Leong +--- + drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +index c34514bfbf2d..b7cc71ca53c4 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +@@ -757,6 +757,7 @@ static void stmmac_pci_remove(struct pci_dev *pdev) + break; + } + ++ pci_free_irq_vectors(pdev); + pci_disable_device(pdev); + } + +-- +2.17.1 + diff --git a/patches/0024-serial-8250_dwlib-Support-for-9-bit-transfer-mode.lpss b/patches/0024-serial-8250_dwlib-Support-for-9-bit-transfer-mode.lpss new file mode 100644 index 0000000000..2191c3832f --- /dev/null +++ b/patches/0024-serial-8250_dwlib-Support-for-9-bit-transfer-mode.lpss @@ -0,0 +1,82 @@ +From b1f0c4dc2c3a5cc980fdaebf91adadf988952861 Mon Sep 17 00:00:00 2001 +From: Heikki Krogerus +Date: Fri, 6 Sep 2019 13:42:46 +0300 +Subject: [PATCH 24/40] serial: 8250_dwlib: Support for 9-bit transfer mode + +Interim. As the ABI is not final, this patch is +"proof-of-concept" only. + +This enables 9-bit transfer mode if the user requests it +with the new (temporary) flags in struct serial_rs485. + +Signed-off-by: Heikki Krogerus +Signed-off-by: Andy Shevchenko +--- + drivers/tty/serial/8250/8250_dwlib.c | 27 ++++++++++++++++++++++++++- + 1 file changed, 26 insertions(+), 1 deletion(-) + +diff --git a/drivers/tty/serial/8250/8250_dwlib.c b/drivers/tty/serial/8250/8250_dwlib.c +index 52fc246cbaed..22800852a1b1 100644 +--- a/drivers/tty/serial/8250/8250_dwlib.c ++++ b/drivers/tty/serial/8250/8250_dwlib.c +@@ -16,6 +16,9 @@ + #define DW_UART_DE_EN 0xb0 /* Driver Output Enable Register */ + #define DW_UART_RE_EN 0xb4 /* Receiver Output Enable Register */ + #define DW_UART_DLF 0xc0 /* Divisor Latch Fraction Register */ ++#define DW_UART_RAR 0xc4 /* Receive Address Register */ ++#define DW_UART_TAR 0xc8 /* Transmit Address Register */ ++#define DW_UART_LCR_EXT 0xcc /* Line Extended Control Register */ + #define DW_UART_CPR 0xf4 /* Component Parameter Register */ + #define DW_UART_UCV 0xf8 /* UART Component Version */ + +@@ -25,6 +28,12 @@ + #define DW_UART_TCR_DE_POL BIT(2) + #define DW_UART_TCR_XFER_MODE(_mode_) ((_mode_) << 3) + ++/* Line Extended Control Register bits */ ++#define DW_UART_LCR_EXT_DLS_E BIT(0) ++#define DW_UART_LCR_EXT_ADDR_MATCH BIT(1) ++#define DW_UART_LCR_EXT_SEND_ADDR BIT(2) ++#define DW_UART_LCR_EXT_TRANSMIT_MODE BIT(3) ++ + /* Component Parameter Register bits */ + #define DW_UART_CPR_ABP_DATA_WIDTH (3 << 0) + #define DW_UART_CPR_AFCE_MODE (1 << 4) +@@ -89,10 +98,12 @@ static void dw8250_set_divisor(struct uart_port *p, unsigned int baud, + + static int dw8250_rs485_config(struct uart_port *p, struct serial_rs485 *rs485) + { ++ u32 lcr = 0; + u32 tcr; + + /* Clearing unsupported flags. */ +- rs485->flags &= SER_RS485_ENABLED; ++ rs485->flags &= SER_RS485_ENABLED | SER_RS485_9BIT_ENABLED | ++ SER_RS485_9BIT_TX_ADDR | SER_RS485_9BIT_RX_ADDR; + + tcr = dw8250_readl_ext(p, DW_UART_TCR); + +@@ -123,6 +134,20 @@ static int dw8250_rs485_config(struct uart_port *p, struct serial_rs485 *rs485) + rs485->delay_rts_before_send = 0; + rs485->delay_rts_after_send = 0; + ++ /* XXX: Proof of concept for 9-bit transfer mode. */ ++ if (rs485->flags & SER_RS485_9BIT_ENABLED) { ++ lcr = DW_UART_LCR_EXT_DLS_E; ++ if (SER_RS485_9BIT_TX_ADDR) { ++ dw8250_writel_ext(p, DW_UART_TAR, rs485->padding[0]); ++ lcr |= DW_UART_LCR_EXT_SEND_ADDR; ++ } else if (SER_RS485_9BIT_RX_ADDR) { ++ dw8250_writel_ext(p, DW_UART_RAR, rs485->padding[0]); ++ lcr |= DW_UART_LCR_EXT_ADDR_MATCH; ++ } ++ } ++ ++ dw8250_writel_ext(p, DW_UART_LCR_EXT, lcr); ++ + p->rs485 = *rs485; + + return 0; +-- +2.17.1 + diff --git a/patches/0024-trusty-Popup-warning-when-LK-timer-interrupt-is-not.trusty b/patches/0024-trusty-Popup-warning-when-LK-timer-interrupt-is-not.trusty new file mode 100644 index 0000000000..be4824e020 --- /dev/null +++ b/patches/0024-trusty-Popup-warning-when-LK-timer-interrupt-is-not.trusty @@ -0,0 +1,61 @@ +From 475230c1f75144482e01ba7f03838668b821de38 Mon Sep 17 00:00:00 2001 +From: "Yan, Shaoou" +Date: Thu, 8 Dec 2016 05:14:48 +0000 +Subject: [PATCH 24/63] trusty: Popup warning when LK timer interrupt is not as + expected + +LK timer interrupt vector 0x31 should map to irq 1, if not LK timer +interrupt is not work as expected + +Change-Id: I4936bf3dd1d9a21e6913d8d3c4353568eb67c2b2 +Tracked-On: OAM-40751 +Signed-off-by: Feng, Wang +Reviewed-by: Ilkka Koskinen +--- + drivers/trusty/trusty-irq.c | 14 +++++++++++--- + 1 file changed, 11 insertions(+), 3 deletions(-) + +diff --git a/drivers/trusty/trusty-irq.c b/drivers/trusty/trusty-irq.c +index 5a74d75ce820..6c510a65e784 100644 +--- a/drivers/trusty/trusty-irq.c ++++ b/drivers/trusty/trusty-irq.c +@@ -29,6 +29,9 @@ + #include + #include + ++#define IRQ_VECTOR_OFFSET 0x30 ++#define IRQ_FOR_LK_TIMER 1 ++ + struct trusty_irq { + struct trusty_irq_state *is; + struct hlist_node node; +@@ -223,7 +226,9 @@ irqreturn_t trusty_irq_handler(int irq, void *data) + __func__, irq, trusty_irq->irq, smp_processor_id(), + trusty_irq->enable); + +- set_pending_intr_to_lk(irq+0x30); ++ WARN_ON(irq != IRQ_FOR_LK_TIMER); ++ ++ set_pending_intr_to_lk(irq+IRQ_VECTOR_OFFSET); + + if (trusty_irq->percpu) { + disable_percpu_irq(irq); +@@ -528,10 +533,13 @@ static int trusty_irq_init_one(struct trusty_irq_state *is, + if (irq < 0) + return irq; + dev_info(is->dev, "irq from lk = %d\n", irq); ++ ++ WARN_ON(irq-IRQ_VECTOR_OFFSET != IRQ_FOR_LK_TIMER); ++ + if (per_cpu) +- ret = trusty_irq_init_per_cpu_irq(is, irq-0x30); ++ ret = trusty_irq_init_per_cpu_irq(is, irq-IRQ_VECTOR_OFFSET); + else +- ret = trusty_irq_init_normal_irq(is, irq-0x30); ++ ret = trusty_irq_init_normal_irq(is, irq-IRQ_VECTOR_OFFSET); + + if (ret) { + dev_warn(is->dev, +-- +2.17.1 + diff --git a/patches/0025-ASoC-Intel-Skylake-Remove-redundant-SRAM-fields.audio b/patches/0025-ASoC-Intel-Skylake-Remove-redundant-SRAM-fields.audio new file mode 100644 index 0000000000..9b177f124e --- /dev/null +++ b/patches/0025-ASoC-Intel-Skylake-Remove-redundant-SRAM-fields.audio @@ -0,0 +1,72 @@ +From bbfa57800e252c360adb6a4169ea9f7569ca891d Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Thu, 15 Aug 2019 13:47:57 +0200 +Subject: [PATCH 025/193] ASoC: Intel: Skylake: Remove redundant SRAM fields + +sram0_base and sram1_base are Skylake-specific fields and should not be +part of common sst framework. Moreover, these are completely unused, so +remove them. + +Signed-off-by: Cezary Rojewski +--- + sound/soc/intel/common/sst-dsp-priv.h | 2 -- + sound/soc/intel/skylake/bxt-sst.c | 2 -- + sound/soc/intel/skylake/cnl-sst.c | 2 -- + sound/soc/intel/skylake/skl-sst.c | 2 -- + 4 files changed, 8 deletions(-) + +diff --git a/sound/soc/intel/common/sst-dsp-priv.h b/sound/soc/intel/common/sst-dsp-priv.h +index 53dcd87bab44..a4628a89d47d 100644 +--- a/sound/soc/intel/common/sst-dsp-priv.h ++++ b/sound/soc/intel/common/sst-dsp-priv.h +@@ -69,8 +69,6 @@ struct sst_addr { + u32 dram_offset; + u32 dsp_iram_offset; + u32 dsp_dram_offset; +- u32 sram0_base; +- u32 sram1_base; + void __iomem *lpe; + void __iomem *shim; + void __iomem *pci_cfg; +diff --git a/sound/soc/intel/skylake/bxt-sst.c b/sound/soc/intel/skylake/bxt-sst.c +index af20a3e76560..af2d18333afc 100644 +--- a/sound/soc/intel/skylake/bxt-sst.c ++++ b/sound/soc/intel/skylake/bxt-sst.c +@@ -563,8 +563,6 @@ int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, + sst->fw_ops = bxt_fw_ops; + sst->addr.lpe = mmio_base; + sst->addr.shim = mmio_base; +- sst->addr.sram0_base = BXT_ADSP_SRAM0_BASE; +- sst->addr.sram1_base = BXT_ADSP_SRAM1_BASE; + + sst_dsp_mailbox_init(sst, + (BXT_ADSP_SRAM0_BASE + SKL_FW_REGS_SIZE), SKL_MAILBOX_SIZE, +diff --git a/sound/soc/intel/skylake/cnl-sst.c b/sound/soc/intel/skylake/cnl-sst.c +index 68900b5daf7d..a206bc140279 100644 +--- a/sound/soc/intel/skylake/cnl-sst.c ++++ b/sound/soc/intel/skylake/cnl-sst.c +@@ -439,8 +439,6 @@ int cnl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, + sst->fw_ops = cnl_fw_ops; + sst->addr.lpe = mmio_base; + sst->addr.shim = mmio_base; +- sst->addr.sram0_base = CNL_ADSP_SRAM0_BASE; +- sst->addr.sram1_base = CNL_ADSP_SRAM1_BASE; + + sst_dsp_mailbox_init(sst, + (CNL_ADSP_SRAM0_BASE + SKL_FW_REGS_SIZE), SKL_MAILBOX_SIZE, +diff --git a/sound/soc/intel/skylake/skl-sst.c b/sound/soc/intel/skylake/skl-sst.c +index 195ca12b351a..30d7f5169550 100644 +--- a/sound/soc/intel/skylake/skl-sst.c ++++ b/sound/soc/intel/skylake/skl-sst.c +@@ -533,8 +533,6 @@ int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, + sst = skl->dsp; + sst->addr.lpe = mmio_base; + sst->addr.shim = mmio_base; +- sst->addr.sram0_base = SKL_ADSP_SRAM0_BASE; +- sst->addr.sram1_base = SKL_ADSP_SRAM1_BASE; + + sst_dsp_mailbox_init(sst, + (SKL_ADSP_SRAM0_BASE + SKL_FW_REGS_SIZE), SKL_MAILBOX_SIZE, +-- +2.17.1 + diff --git a/patches/0025-SEP-Error-handling-based-on-acrn-hypercall-re.sep-socwatch b/patches/0025-SEP-Error-handling-based-on-acrn-hypercall-re.sep-socwatch new file mode 100644 index 0000000000..ce4f3559f1 --- /dev/null +++ b/patches/0025-SEP-Error-handling-based-on-acrn-hypercall-re.sep-socwatch @@ -0,0 +1,252 @@ +From 7c202797f87729dcf00a3fb32c9eec69e014639b Mon Sep 17 00:00:00 2001 +From: Manisha Chinthapally +Date: Tue, 19 Feb 2019 15:17:53 -0800 +Subject: [PATCH 25/27] SEP Error handling based on acrn hypercall return code + +SEP hypercalls to acrn hypervisor return, +0 for success and -EINVAL for all failure cases. +So, if acrn_hypercall fails print error message and handle it appropriately + +Tracked-on: PKT-1745 +Signed-off-by: Manisha Chinthapally +--- + drivers/platform/x86/sepdk/sep/lwpmudrv.c | 87 ++++++++++++++++++----- + drivers/platform/x86/sepdk/sep/perfver4.c | 20 ++++-- + drivers/platform/x86/sepdk/sep/utility.c | 9 ++- + 3 files changed, 89 insertions(+), 27 deletions(-) + +diff --git a/drivers/platform/x86/sepdk/sep/lwpmudrv.c b/drivers/platform/x86/sepdk/sep/lwpmudrv.c +index 87b82a20864b..f3efb9b0eab2 100644 +--- a/drivers/platform/x86/sepdk/sep/lwpmudrv.c ++++ b/drivers/platform/x86/sepdk/sep/lwpmudrv.c +@@ -2677,8 +2677,13 @@ static VOID lwpmudrv_Read_MSR(PVOID param) + + BUG_ON(!virt_addr_valid(msr_list)); + +- acrn_hypercall2(HC_PROFILING_OPS, PROFILING_MSR_OPS, +- virt_to_phys(msr_list)); ++ if (acrn_hypercall2(HC_PROFILING_OPS, PROFILING_MSR_OPS, ++ virt_to_phys(msr_list)) != OS_SUCCESS) { ++ SEP_DRV_LOG_ERROR( ++ "[ACRN][HC:MSR_OPS][%s]: returned with error", ++ __func__); ++ goto cleanup; ++ } + + for (cpu_idx = 0; cpu_idx < GLOBAL_STATE_num_cpus(driver_state); + cpu_idx++) { +@@ -2686,6 +2691,7 @@ static VOID lwpmudrv_Read_MSR(PVOID param) + MSR_DATA_value(this_node) = msr_list[cpu_idx].entries[0].value; + } + ++cleanup: + msr_list = CONTROL_Free_Memory(msr_list); + #endif + +@@ -2845,9 +2851,12 @@ static VOID lwpmudrv_Write_MSR(PVOID param) + + BUG_ON(!virt_addr_valid(msr_list)); + +- acrn_hypercall2(HC_PROFILING_OPS, PROFILING_MSR_OPS, +- virt_to_phys(msr_list)); +- ++ if (acrn_hypercall2(HC_PROFILING_OPS, PROFILING_MSR_OPS, ++ virt_to_phys(msr_list)) != OS_SUCCESS) { ++ SEP_DRV_LOG_ERROR( ++ "[ACRN][HC:MSR_OPS][%s]: returned with error", ++ __func__); ++ } + msr_list = CONTROL_Free_Memory(msr_list); + #endif + +@@ -4100,9 +4109,16 @@ static OS_STATUS lwpmudrv_Start(void) + BUG_ON(!virt_addr_valid(control)); + control->collector_id = COLLECTOR_SEP; + +- acrn_hypercall2(HC_PROFILING_OPS, PROFILING_GET_CONTROL_SWITCH, ++ status = acrn_hypercall2(HC_PROFILING_OPS, ++ PROFILING_GET_CONTROL_SWITCH, + virt_to_phys(control)); +- ++ if (status != OS_SUCCESS) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "[ACRN][HC:GET_CONTROL_SWITCH][%s]: Failed to get control switch info", ++ __func__); ++ control = CONTROL_Free_Memory(control); ++ return status; ++ } + SEP_DRV_LOG_TRACE("ACRN profiling collection running 0x%llx\n", + control->switches); + +@@ -4115,10 +4131,19 @@ static OS_STATUS lwpmudrv_Start(void) + control->switches |= (1 << CORE_PMU_COUNTING); + } + +- acrn_hypercall2(HC_PROFILING_OPS, PROFILING_SET_CONTROL_SWITCH, ++ status = acrn_hypercall2(HC_PROFILING_OPS, ++ PROFILING_SET_CONTROL_SWITCH, + virt_to_phys(control)); ++ + control = CONTROL_Free_Memory(control); + ++ if (status != OS_SUCCESS) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "[ACRN][HC:SET_CONTROL_SWITCH][%s]: Failed to set control switch info", ++ __func__); ++ return status; ++ } ++ + lwpmudrv_ACRN_Flush_Start_Timer(); + #endif + +@@ -4145,6 +4170,7 @@ static OS_STATUS lwpmudrv_Start(void) + } + + SEP_DRV_LOG_FLOW_OUT("Return value: %d", status); ++ + return status; + } + +@@ -4247,8 +4273,12 @@ static OS_STATUS lwpmudrv_Prepare_Stop(void) + BUG_ON(!virt_addr_valid(control)); + control->collector_id = COLLECTOR_SEP; + +- acrn_hypercall2(HC_PROFILING_OPS, PROFILING_GET_CONTROL_SWITCH, +- virt_to_phys(control)); ++ if (acrn_hypercall2(HC_PROFILING_OPS, PROFILING_GET_CONTROL_SWITCH, ++ virt_to_phys(control)) != OS_SUCCESS) { ++ SEP_DRV_LOG_ERROR( ++ "[ACRN][HC:GET_CONTROL_SWITCH][%s]: Failed to get control info", ++ __func__); ++ } + + SEP_DRV_LOG_TRACE("ACRN profiling collection running 0x%llx\n", + control->switches); +@@ -4260,10 +4290,13 @@ static OS_STATUS lwpmudrv_Prepare_Stop(void) + control->switches &= ~(1 << CORE_PMU_COUNTING); + } + +- acrn_hypercall2(HC_PROFILING_OPS, PROFILING_SET_CONTROL_SWITCH, +- virt_to_phys(control)); ++ if (acrn_hypercall2(HC_PROFILING_OPS, PROFILING_SET_CONTROL_SWITCH, ++ virt_to_phys(control)) != OS_SUCCESS) { ++ SEP_DRV_LOG_ERROR( ++ "[ACRN][HC:SET_CONTROL_SWITCH][%s]: Failed to set control info", ++ __func__); ++ } + control = CONTROL_Free_Memory(control); +- + lwpmudrv_ACRN_Flush_Stop_Timer(); + SEP_DRV_LOG_TRACE("Calling final PMI_Buffer_Handler\n"); + +@@ -5995,8 +6028,14 @@ static OS_STATUS lwpmudrv_Get_Sample_Drop_Info(IOCTL_ARGS args) + memset(stats, 0, GLOBAL_STATE_num_cpus(driver_state)* + sizeof(struct profiling_status)); + +- acrn_hypercall2(HC_PROFILING_OPS, PROFILING_GET_STATUS, +- virt_to_phys(stats)); ++ if (acrn_hypercall2(HC_PROFILING_OPS, PROFILING_GET_STATUS, ++ virt_to_phys(stats)) != OS_SUCCESS) { ++ stats = CONTROL_Free_Memory(stats); ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "[ACRN][HC:GET_STATUS][%s]: Failed to get sample drop info", ++ __func__); ++ return OS_INVALID; ++ } + + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state) + && size < MAX_SAMPLE_DROP_NODES; i++) { +@@ -6370,9 +6409,13 @@ static OS_STATUS lwpmudrv_Get_Num_Of_Vms(IOCTL_ARGS args) + + BUG_ON(!virt_addr_valid(vm_info_list)); + +- acrn_hypercall2(HC_PROFILING_OPS, PROFILING_GET_VMINFO, +- virt_to_phys(vm_info_list)); +- ++ if (acrn_hypercall2(HC_PROFILING_OPS, PROFILING_GET_VMINFO, ++ virt_to_phys(vm_info_list)) != OS_SUCCESS) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "[ACRN][HC:GET_VMINFO][%s]: Failed to get VM info", ++ __func__); ++ return OS_INVALID; ++ } + vm_map.num_vms = 0; + for (i = 0; i < vm_info_list->num_vms; i++) { + if (vm_info_list->vm_list[i].num_vcpus != 0) { +@@ -7296,8 +7339,14 @@ static int lwpmu_Load(void) + + BUG_ON(!virt_addr_valid(vm_info_list)); + +- acrn_hypercall2(HC_PROFILING_OPS, PROFILING_GET_VMINFO, ++ status = acrn_hypercall2(HC_PROFILING_OPS, PROFILING_GET_VMINFO, + virt_to_phys(vm_info_list)); ++ if (status != OS_SUCCESS) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "[ACRN][HC:GET_VMINFO][%s]: Failed to get VM information", ++ __func__); ++ return OS_INVALID; ++ } + #endif + + #if !defined(CONFIG_XEN_HAVE_VPMU) +diff --git a/drivers/platform/x86/sepdk/sep/perfver4.c b/drivers/platform/x86/sepdk/sep/perfver4.c +index ae8fa717f4bf..7995c3d20f49 100755 +--- a/drivers/platform/x86/sepdk/sep/perfver4.c ++++ b/drivers/platform/x86/sepdk/sep/perfver4.c +@@ -409,9 +409,12 @@ static VOID perfver4_Write_PMU(VOID *param) + + BUG_ON(!virt_addr_valid(pmi_config)); + +- acrn_hypercall2(HC_PROFILING_OPS, PROFILING_CONFIG_PMI, +- virt_to_phys(pmi_config)); +- ++ if (acrn_hypercall2(HC_PROFILING_OPS, PROFILING_CONFIG_PMI, ++ virt_to_phys(pmi_config)) != OS_SUCCESS) { ++ SEP_DRV_LOG_ERROR( ++ "[ACRN][HC:CONFIG_PMI][%s]: Failed to write PMI config info", ++ __func__); ++ } + pmi_config = CONTROL_Free_Memory(pmi_config); + #endif + +@@ -767,9 +770,14 @@ static void perfver4_Read_PMU_Data(PVOID param) + + BUG_ON(!virt_addr_valid(msr_list)); + +- acrn_hypercall2(HC_PROFILING_OPS, PROFILING_MSR_OPS, +- virt_to_phys(msr_list)); +- ++ if (acrn_hypercall2(HC_PROFILING_OPS, PROFILING_MSR_OPS, ++ virt_to_phys(msr_list)) != OS_SUCCESS) { ++ msr_list = CONTROL_Free_Memory(msr_list); ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "[ACRN][HC:MSR_OPS][%s]: MSR operation failed", ++ __func__); ++ return; ++ } + for (cpu_idx = 0; cpu_idx < GLOBAL_STATE_num_cpus(driver_state); + cpu_idx++) { + pcpu = &pcb[cpu_idx]; +diff --git a/drivers/platform/x86/sepdk/sep/utility.c b/drivers/platform/x86/sepdk/sep/utility.c +index cc4f0cba5e9e..65fa379ff0a8 100755 +--- a/drivers/platform/x86/sepdk/sep/utility.c ++++ b/drivers/platform/x86/sepdk/sep/utility.c +@@ -126,8 +126,13 @@ VOID UTILITY_Read_Cpuid(U64 cpuid_function, U64 *rax_value, + + BUG_ON(!virt_addr_valid(&pcpuid)); + +- acrn_hypercall2(HC_PROFILING_OPS, PROFILING_GET_PCPUID, +- virt_to_phys(&pcpuid)); ++ if (acrn_hypercall2(HC_PROFILING_OPS, PROFILING_GET_PCPUID, ++ virt_to_phys(&pcpuid)) != OS_SUCCESS) { ++ SEP_DRV_LOG_ERROR_FLOW_OUT( ++ "[ACRN][HC:GET_PCPUID][%s]: Failed to get CPUID info", ++ __func__); ++ return; ++ } + + if (rax_value != NULL) { + *rax_value = pcpuid.eax; +-- +2.17.1 + diff --git a/patches/0025-console-introduce-exit-callback.lpss b/patches/0025-console-introduce-exit-callback.lpss new file mode 100644 index 0000000000..8df716bf5e --- /dev/null +++ b/patches/0025-console-introduce-exit-callback.lpss @@ -0,0 +1,45 @@ +From 9df3a5829e566bbd324602507a252a81c092991a Mon Sep 17 00:00:00 2001 +From: Andy Shevchenko +Date: Wed, 23 Nov 2016 17:04:16 +0200 +Subject: [PATCH 25/40] console: introduce ->exit() callback + +Some consoles might require special operations on unregistering. For example, +serial console, when registered in the kernel, keeps power on for entire time, +until it gets unregistered. For such cases to have a balance we would provide +->exit() callback. + +Signed-off-by: Andy Shevchenko +--- + include/linux/console.h | 1 + + kernel/printk/printk.c | 3 +++ + 2 files changed, 4 insertions(+) + +diff --git a/include/linux/console.h b/include/linux/console.h +index d09951d5a94e..13c26a1db3f5 100644 +--- a/include/linux/console.h ++++ b/include/linux/console.h +@@ -149,6 +149,7 @@ struct console { + struct tty_driver *(*device)(struct console *, int *); + void (*unblank)(void); + int (*setup)(struct console *, char *); ++ void (*exit)(struct console *); + int (*match)(struct console *, char *name, int idx, char *options); + short flags; + short index; +diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c +index ca65327a6de8..28349e725dc4 100644 +--- a/kernel/printk/printk.c ++++ b/kernel/printk/printk.c +@@ -2850,6 +2850,9 @@ int unregister_console(struct console *console) + if (console_drivers != NULL && console->flags & CON_CONSDEV) + console_drivers->flags |= CON_CONSDEV; + ++ if (console->exit) ++ console->exit(console); ++ + console->flags &= ~CON_ENABLED; + console_unlock(); + console_sysfs_notify(); +-- +2.17.1 + diff --git a/patches/0025-drm-i915-Unconfuse-pipe-vs.-crtc-index-in-i915_get_crt.drm b/patches/0025-drm-i915-Unconfuse-pipe-vs.-crtc-index-in-i915_get_crt.drm new file mode 100644 index 0000000000..01f610c3b6 --- /dev/null +++ b/patches/0025-drm-i915-Unconfuse-pipe-vs.-crtc-index-in-i915_get_crt.drm @@ -0,0 +1,53 @@ +From 35977d5d93ecc6b1c8b7bf908b43c313d4a0d276 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= +Date: Wed, 21 Aug 2019 20:30:30 +0300 +Subject: [PATCH 025/690] drm/i915: Unconfuse pipe vs. crtc->index in + i915_get_crtc_scanoutpos() +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +The "pipe" argument passed in by the vblank code is in fact the crtc +index. Don't assume that is the same as the pipe. + +Signed-off-by: Ville Syrjälä +Link: https://patchwork.freedesktop.org/patch/msgid/20190821173033.24123-2-ville.syrjala@linux.intel.com +Reviewed-by: Jani Nikula +--- + drivers/gpu/drm/i915/i915_irq.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c +index 77391d8325bf..8ac6f6849981 100644 +--- a/drivers/gpu/drm/i915/i915_irq.c ++++ b/drivers/gpu/drm/i915/i915_irq.c +@@ -942,14 +942,14 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc) + return (position + crtc->scanline_offset) % vtotal; + } + +-bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, ++bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int index, + bool in_vblank_irq, int *vpos, int *hpos, + ktime_t *stime, ktime_t *etime, + const struct drm_display_mode *mode) + { + struct drm_i915_private *dev_priv = to_i915(dev); +- struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv, +- pipe); ++ struct intel_crtc *crtc = to_intel_crtc(drm_crtc_from_index(dev, index)); ++ enum pipe pipe = crtc->pipe; + int position; + int vbl_start, vbl_end, hsync_start, htotal, vtotal; + unsigned long irqflags; +@@ -992,7 +992,7 @@ bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, + /* No obvious pixelcount register. Only query vertical + * scanout position from Display scan line register. + */ +- position = __intel_get_crtc_scanline(intel_crtc); ++ position = __intel_get_crtc_scanline(crtc); + } else { + /* Have access to pixelcount since start of frame. + * We can split this into vertical and horizontal +-- +2.17.1 + diff --git a/patches/0025-net-stmmac-Add-support-for-HW-accelerated-VLA.connectivity b/patches/0025-net-stmmac-Add-support-for-HW-accelerated-VLA.connectivity new file mode 100644 index 0000000000..1238a7c4c0 --- /dev/null +++ b/patches/0025-net-stmmac-Add-support-for-HW-accelerated-VLA.connectivity @@ -0,0 +1,261 @@ +From b0d9fb01e4d021c26ee55cf10f15bd8ee5bfd06e Mon Sep 17 00:00:00 2001 +From: "Chuah, Kim Tatt" +Date: Tue, 26 Jun 2018 06:14:17 +0800 +Subject: [PATCH 025/108] net: stmmac: Add support for HW-accelerated VLAN + stripping + +Currently, VLAN tag stripping is done by driver in stmmac_rx_vlan(). +Add support for VLAN tag stripping by the MAC hardware for MAC drivers +that support it. This is done by adding rx_hw_van() and set_hw_vlan_mode() +callbacks at stmmac_ops struct which are called if registered by the MAC +driver. + +Signed-off-by: Chuah, Kim Tatt +Signed-off-by: Ong Boon Leong +--- + drivers/net/ethernet/stmicro/stmmac/dwmac4.h | 10 +++++ + .../net/ethernet/stmicro/stmmac/dwmac4_core.c | 40 +++++++++++++++++++ + .../ethernet/stmicro/stmmac/dwmac4_descs.c | 13 ++++++ + drivers/net/ethernet/stmicro/stmmac/hwif.h | 16 ++++++++ + .../net/ethernet/stmicro/stmmac/stmmac_main.c | 18 ++++++++- + 5 files changed, 96 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +index b960d14014f7..29dd67a48a15 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +@@ -82,6 +82,16 @@ + #define GMAC_VLAN_VLC GENMASK(17, 16) + #define GMAC_VLAN_VLC_SHIFT 16 + ++/* MAC VLAN Tag Control */ ++#define GMAC_VLAN_TAG_CTRL_EVLS_MASK GENMASK(22, 21) ++#define GMAC_VLAN_TAG_CTRL_EVLS_SHIFT 21 ++#define GMAC_VLAN_TAG_CTRL_EVLRXS BIT(24) ++ ++#define GMAC_VLAN_TAG_STRIP_NONE (0x0 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT) ++#define GMAC_VLAN_TAG_STRIP_PASS (0x1 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT) ++#define GMAC_VLAN_TAG_STRIP_FAIL (0x2 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT) ++#define GMAC_VLAN_TAG_STRIP_ALL (0x3 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT) ++ + /* MAC RX Queue Enable */ + #define GMAC_RX_QUEUE_CLEAR(queue) ~(GENMASK(1, 0) << ((queue) * 2)) + #define GMAC_RX_AV_QUEUE_ENABLE(queue) BIT((queue) * 2) +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +index 56a0e858c9cd..e29e189cec8c 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +@@ -15,6 +15,7 @@ + #include + #include + #include ++#include + #include "stmmac.h" + #include "stmmac_pcs.h" + #include "intel_serdes.h" +@@ -806,6 +807,39 @@ static void dwmac4_set_arp_offload(struct mac_device_info *hw, bool en, + writel(value, ioaddr + GMAC_CONFIG); + } + ++static void dwmac4_rx_hw_vlan(struct net_device *dev, ++ struct mac_device_info *hw, ++ struct dma_desc *rx_desc, struct sk_buff *skb) ++{ ++ if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && ++ hw->desc->get_rx_vlan_valid(rx_desc)) { ++ u16 vid = (u16)hw->desc->get_rx_vlan_tci(rx_desc); ++ ++ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); ++ } ++} ++ ++static void dwmac4_set_hw_vlan_mode(void __iomem *ioaddr, ++ netdev_features_t features) ++{ ++ u32 val; ++ ++ val = readl(ioaddr + GMAC_VLAN_TAG); ++ val &= ~GMAC_VLAN_TAG_CTRL_EVLS_MASK; ++ ++ if (features & NETIF_F_HW_VLAN_CTAG_RX) ++ /* Always strip VLAN on Receive */ ++ val |= GMAC_VLAN_TAG_STRIP_ALL; ++ else ++ /* Do not strip VLAN on Receive */ ++ val |= GMAC_VLAN_TAG_STRIP_NONE; ++ ++ /* Enable outer VLAN Tag in Rx DMA descriptor */ ++ val |= GMAC_VLAN_TAG_CTRL_EVLRXS; ++ ++ writel(val, ioaddr + GMAC_VLAN_TAG); ++} ++ + const struct stmmac_ops dwmac4_ops = { + .core_init = dwmac4_core_init, + .set_mac = stmmac_set_mac, +@@ -840,6 +874,8 @@ const struct stmmac_ops dwmac4_ops = { + .sarc_configure = dwmac4_sarc_configure, + .enable_vlan = dwmac4_enable_vlan, + .set_arp_offload = dwmac4_set_arp_offload, ++ .rx_hw_vlan = dwmac4_rx_hw_vlan, ++ .set_hw_vlan_mode = dwmac4_set_hw_vlan_mode, + }; + + const struct stmmac_ops dwmac410_ops = { +@@ -876,6 +912,8 @@ const struct stmmac_ops dwmac410_ops = { + .sarc_configure = dwmac4_sarc_configure, + .enable_vlan = dwmac4_enable_vlan, + .set_arp_offload = dwmac4_set_arp_offload, ++ .rx_hw_vlan = dwmac4_rx_hw_vlan, ++ .set_hw_vlan_mode = dwmac4_set_hw_vlan_mode, + }; + + const struct stmmac_ops dwmac510_ops = { +@@ -917,6 +955,8 @@ const struct stmmac_ops dwmac510_ops = { + .sarc_configure = dwmac4_sarc_configure, + .enable_vlan = dwmac4_enable_vlan, + .set_arp_offload = dwmac4_set_arp_offload, ++ .rx_hw_vlan = dwmac4_rx_hw_vlan, ++ .set_hw_vlan_mode = dwmac4_set_hw_vlan_mode, + }; + + int dwmac4_setup(struct stmmac_priv *priv) +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +index 15eb1abba91d..4677dd96148a 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +@@ -200,6 +200,17 @@ static int dwmac4_get_tx_ls(struct dma_desc *p) + >> TDES3_LAST_DESCRIPTOR_SHIFT; + } + ++static inline int dwmac4_wrback_get_rx_vlan_tci(struct dma_desc *p) ++{ ++ return (le32_to_cpu(p->des0) & RDES0_VLAN_TAG_MASK); ++} ++ ++static inline bool dwmac4_wrback_get_rx_vlan_valid(struct dma_desc *p) ++{ ++ return ((le32_to_cpu(p->des3) & RDES3_LAST_DESCRIPTOR) && ++ (le32_to_cpu(p->des3) & RDES3_RDES0_VALID)); ++} ++ + static int dwmac4_wrback_get_rx_frame_len(struct dma_desc *p, int rx_coe) + { + return (le32_to_cpu(p->des3) & RDES3_PACKET_SIZE_MASK); +@@ -500,6 +511,8 @@ const struct stmmac_desc_ops dwmac4_desc_ops = { + .set_tx_owner = dwmac4_set_tx_owner, + .set_rx_owner = dwmac4_set_rx_owner, + .get_tx_ls = dwmac4_get_tx_ls, ++ .get_rx_vlan_tci = dwmac4_wrback_get_rx_vlan_tci, ++ .get_rx_vlan_valid = dwmac4_wrback_get_rx_vlan_valid, + .get_rx_frame_len = dwmac4_wrback_get_rx_frame_len, + .enable_tx_timestamp = dwmac4_rd_enable_tx_timestamp, + .get_tx_timestamp_status = dwmac4_wrback_get_tx_timestamp_status, +diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h +index 591c227cd50e..f0e3dd59f1ae 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h ++++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h +@@ -54,6 +54,10 @@ struct stmmac_desc_ops { + void (*set_tx_ic)(struct dma_desc *p); + /* Last tx segment reports the transmit status */ + int (*get_tx_ls)(struct dma_desc *p); ++ /* RX VLAN TCI */ ++ int (*get_rx_vlan_tci)(struct dma_desc *p); ++ /* RX VLAN valid */ ++ bool (*get_rx_vlan_valid)(struct dma_desc *p); + /* Return the transmit status looking at the TDES1 */ + int (*tx_status)(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p, void __iomem *ioaddr); +@@ -115,6 +119,10 @@ struct stmmac_desc_ops { + stmmac_do_void_callback(__priv, desc, set_tx_ic, __args) + #define stmmac_get_tx_ls(__priv, __args...) \ + stmmac_do_callback(__priv, desc, get_tx_ls, __args) ++#define stmmac_get_rx_vlan_tci(__priv, __args...) \ ++ stmmac_do_callback(__priv, desc, get_rx_vlan_tci, __args) ++#define stmmac_get_rx_vlan_valid(__priv, __args...) \ ++ stmmac_do_callback(__priv, desc, get_rx_vlan_valid, __args) + #define stmmac_tx_status(__priv, __args...) \ + stmmac_do_callback(__priv, desc, tx_status, __args) + #define stmmac_get_tx_len(__priv, __args...) \ +@@ -359,6 +367,10 @@ struct stmmac_ops { + void (*update_vlan_hash)(struct mac_device_info *hw, u32 hash, + bool is_double); + void (*enable_vlan)(struct mac_device_info *hw, u32 type); ++ void (*rx_hw_vlan)(struct net_device *dev, struct mac_device_info *hw, ++ struct dma_desc *rx_desc, struct sk_buff *skb); ++ void (*set_hw_vlan_mode)(void __iomem *ioaddr, ++ netdev_features_t features); + /* TX Timestamp */ + int (*get_mac_tx_timestamp)(struct mac_device_info *hw, u64 *ts); + /* Source Address Insertion / Replacement */ +@@ -447,6 +459,10 @@ struct stmmac_ops { + stmmac_do_void_callback(__priv, mac, update_vlan_hash, __args) + #define stmmac_enable_vlan(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, enable_vlan, __args) ++#define stmmac_rx_hw_vlan(__priv, __args...) \ ++ stmmac_do_void_callback(__priv, mac, rx_hw_vlan, __args) ++#define stmmac_set_hw_vlan_mode(__priv, __args...) \ ++ stmmac_do_void_callback(__priv, mac, set_hw_vlan_mode, __args) + #define stmmac_get_mac_tx_timestamp(__priv, __args...) \ + stmmac_do_callback(__priv, mac, get_mac_tx_timestamp, __args) + #define stmmac_sarc_configure(__priv, __args...) \ +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index 2fccc9af4073..ae49261ff89a 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -2649,6 +2649,9 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) + /* Start the ball rolling... */ + stmmac_start_all_dma(priv); + ++ /* Set HW VLAN stripping mode */ ++ stmmac_set_hw_vlan_mode(priv, priv->ioaddr, dev->features); ++ + return 0; + } + +@@ -3714,6 +3717,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) + unsigned int sec_len; + int entry; + u32 hash; ++ int ret; + + if (!count && rx_q->state_saved) { + skb = rx_q->state.skb; +@@ -3862,7 +3866,13 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) + /* Got entire packet into SKB. Finish it. */ + + stmmac_get_rx_hwtstamp(priv, p, np, skb); +- stmmac_rx_vlan(priv->dev, skb); ++ ++ /* Use HW to strip VLAN header before fallback to SW. */ ++ ret = stmmac_rx_hw_vlan(priv, priv->dev, ++ priv->hw, p, skb); ++ if (ret == -EINVAL) ++ stmmac_rx_vlan(priv->dev, skb); ++ + skb->protocol = eth_type_trans(skb, priv->dev); + + if (unlikely(!coe)) +@@ -4032,6 +4042,9 @@ static int stmmac_set_features(struct net_device *netdev, + struct stmmac_priv *priv = netdev_priv(netdev); + bool sph_en; + u32 chan; ++ netdev_features_t changed; ++ ++ changed = netdev->features ^ features; + + /* Keep the COE Type in case of csum is supporting */ + if (features & NETIF_F_RXCSUM) +@@ -4047,6 +4060,9 @@ static int stmmac_set_features(struct net_device *netdev, + for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++) + stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); + ++ if (changed & NETIF_F_HW_VLAN_CTAG_RX) ++ stmmac_set_hw_vlan_mode(priv, priv->ioaddr, features); ++ + netdev->features = features; + + return 0; +-- +2.17.1 + diff --git a/patches/0025-rpmb-add-rpmb-multiplexor-kernel-module.security b/patches/0025-rpmb-add-rpmb-multiplexor-kernel-module.security new file mode 100644 index 0000000000..53b3808eda --- /dev/null +++ b/patches/0025-rpmb-add-rpmb-multiplexor-kernel-module.security @@ -0,0 +1,814 @@ +From dd92fa10b036724f700a987d212952f6ead9cf20 Mon Sep 17 00:00:00 2001 +From: "Huang, Yang" +Date: Tue, 24 Jul 2018 13:52:54 +0800 +Subject: [PATCH 25/65] rpmb: add rpmb multiplexor kernel module + +This module owns RPMB authentication key in ACRN SOS kernel. +It receives the requests from SOS/DM and replaces with real +contents such as MAC calculated by real RPMB auth key. +And it will forward the requests by calling RPMB kernel driver +directly. + +Change-Id: I326f1a04b45a2bc450be4b120635489ec102a0e6 +Signed-off-by: Huang, Yang +Signed-off-by: Tomas Winkler +--- + drivers/char/rpmb/Kconfig | 2 + + drivers/char/rpmb/Makefile | 2 + + drivers/char/rpmb/mux/Kconfig | 16 + + drivers/char/rpmb/mux/Makefile | 6 + + drivers/char/rpmb/mux/mux.c | 726 +++++++++++++++++++++++++++++++++ + 5 files changed, 752 insertions(+) + create mode 100644 drivers/char/rpmb/mux/Kconfig + create mode 100644 drivers/char/rpmb/mux/Makefile + create mode 100644 drivers/char/rpmb/mux/mux.c + +diff --git a/drivers/char/rpmb/Kconfig b/drivers/char/rpmb/Kconfig +index 1cfcd287a665..8fdc98145586 100644 +--- a/drivers/char/rpmb/Kconfig ++++ b/drivers/char/rpmb/Kconfig +@@ -42,3 +42,5 @@ config VIRTIO_RPMB + Say yes here if you want to access virtio RPMB from user space + via character device interface /dev/vrpmb. + This device interface is only for guest/frontend virtio driver. ++ ++source "drivers/char/rpmb/mux/Kconfig" +diff --git a/drivers/char/rpmb/Makefile b/drivers/char/rpmb/Makefile +index 7306e42f5be2..5600cef232b2 100644 +--- a/drivers/char/rpmb/Makefile ++++ b/drivers/char/rpmb/Makefile +@@ -8,3 +8,5 @@ obj-$(CONFIG_RPMB_SIM) += rpmb_sim.o + obj-$(CONFIG_VIRTIO_RPMB) += virtio_rpmb.o + + ccflags-y += -D__CHECK_ENDIAN__ ++ ++obj-$(CONFIG_RPMB_MUX) += mux/ +diff --git a/drivers/char/rpmb/mux/Kconfig b/drivers/char/rpmb/mux/Kconfig +new file mode 100644 +index 000000000000..cea084fca1a1 +--- /dev/null ++++ b/drivers/char/rpmb/mux/Kconfig +@@ -0,0 +1,16 @@ ++# SPDX-License-Identifier: GPL-2.0 ++# Copyright (c) 2018-2019, Intel Corporation. ++ ++config RPMB_MUX ++ tristate "RPMB Mux kernel module interface /dev/rpmbmux" ++ default n ++ select RPMB ++ select CRYPTO_SHA256 ++ select CRYPTO_HMAC ++ help ++ Say yes here if you want to access RPMB from user space ++ via character device interface /dev/rpmbmux, which acts ++ as a multiplexor above RPMB native driver. ++ ++ RPMB MUX owns RPMB authentication key internally for RPMB ++ virtualization usage. +diff --git a/drivers/char/rpmb/mux/Makefile b/drivers/char/rpmb/mux/Makefile +new file mode 100644 +index 000000000000..127a3bf2df64 +--- /dev/null ++++ b/drivers/char/rpmb/mux/Makefile +@@ -0,0 +1,6 @@ ++# SPDX-License-Identifier: GPL-2.0 ++# Copyright (c) 2018-2019, Intel Corporation. ++ ++obj-$(CONFIG_RPMB_MUX) += mux.o ++ ++ccflags-y += -D__CHECK_ENDIAN__ +diff --git a/drivers/char/rpmb/mux/mux.c b/drivers/char/rpmb/mux/mux.c +new file mode 100644 +index 000000000000..59c7ed2c8944 +--- /dev/null ++++ b/drivers/char/rpmb/mux/mux.c +@@ -0,0 +1,726 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (c) 2018-2019 Intel Corporation. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/** ++ * struct rpmb_mux_dev - device which can support RPMB partition ++ * @lock : the device lock ++ * @rdev : point to the rpmb device ++ * @cdev : character dev ++ * @rpmb_interface : rpmb class interface ++ * @write_counter : write counter of RPMB ++ * @wc_inited : write counter is initialized ++ * @rpmb_key : RPMB authentication key ++ * @hash_desc : hmac(sha256) shash descriptor ++ */ ++struct rpmb_mux_dev { ++ struct mutex lock; /* device serialization lock */ ++ struct rpmb_dev *rdev; ++ struct cdev cdev; ++ struct class_interface rpmb_interface; ++ ++ u32 write_counter; ++ u32 wc_inited; ++ u8 rpmb_key[32]; ++ struct shash_desc *hash_desc; ++}; ++ ++static dev_t rpmb_mux_devt; ++static struct rpmb_mux_dev *__mux_dev; ++static struct class *rpmb_mux_class; ++/* from MMC_IOC_MAX_CMDS */ ++#define RPMB_MAX_FRAMES 255 ++ ++static int rpmb_mux_open(struct inode *inode, struct file *fp) ++{ ++ struct rpmb_mux_dev *mux_dev; ++ ++ mux_dev = container_of(inode->i_cdev, struct rpmb_mux_dev, cdev); ++ if (!mux_dev) ++ return -ENODEV; ++ ++ mutex_lock(&mux_dev->lock); ++ ++ fp->private_data = mux_dev; ++ ++ mutex_unlock(&mux_dev->lock); ++ ++ return nonseekable_open(inode, fp); ++} ++ ++static int rpmb_mux_release(struct inode *inode, struct file *fp) ++{ ++ return 0; ++} ++ ++static int rpmb_key_retrieval(void *rpmb_key) ++{ ++ /* hard code */ ++ memset(rpmb_key, 0x31, 32); ++ return 0; ++} ++ ++static int rpmb_mux_hmac_256_alloc(struct rpmb_mux_dev *mux_dev) ++{ ++ struct shash_desc *desc; ++ struct crypto_shash *tfm; ++ ++ tfm = crypto_alloc_shash("hmac(sha256)", 0, 0); ++ if (IS_ERR(tfm)) ++ return PTR_ERR(tfm); ++ ++ desc = kzalloc(sizeof(*desc) + crypto_shash_descsize(tfm), GFP_KERNEL); ++ if (!desc) { ++ crypto_free_shash(tfm); ++ return -ENOMEM; ++ } ++ ++ desc->tfm = tfm; ++ mux_dev->hash_desc = desc; ++ ++ return 0; ++} ++ ++static void rpmb_mux_hmac_256_free(struct rpmb_mux_dev *mux_dev) ++{ ++ struct shash_desc *desc = mux_dev->hash_desc; ++ ++ crypto_free_shash(desc->tfm); ++ kfree(desc); ++ ++ mux_dev->hash_desc = NULL; ++} ++ ++static int rpmb_mux_calc_hmac(struct rpmb_mux_dev *mux_dev, ++ struct rpmb_frame_jdec *frames, ++ unsigned int blks, u8 *mac) ++{ ++ struct shash_desc *desc = mux_dev->hash_desc; ++ int ret; ++ unsigned int i; ++ ++ ret = crypto_shash_init(desc); ++ if (ret) ++ return ret; ++ ++ for (i = 0; i < blks; i++) { ++ ret = crypto_shash_update(desc, frames[i].data, ++ rpmb_jdec_hmac_data_len); ++ if (ret) ++ return ret; ++ } ++ ++ ret = crypto_shash_final(desc, mac); ++ ++ return ret; ++} ++ ++static int rpmb_program_key(struct rpmb_mux_dev *mux_dev) ++{ ++ struct rpmb_frame_jdec *frame_write, *frame_rel, *frame_out; ++ struct rpmb_cmd *cmds; ++ int ret; ++ ++ frame_write = kzalloc(sizeof(*frame_write), GFP_KERNEL); ++ frame_rel = kzalloc(sizeof(*frame_rel), GFP_KERNEL); ++ frame_out = kzalloc(sizeof(*frame_out), GFP_KERNEL); ++ cmds = kcalloc(3, sizeof(*cmds), GFP_KERNEL); ++ if (!frame_write || !frame_rel || !frame_out || !cmds) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ /* fill rel write frame */ ++ memcpy(frame_rel->key_mac, mux_dev->rpmb_key, ++ sizeof(mux_dev->rpmb_key)); ++ frame_rel->req_resp = cpu_to_be16(RPMB_PROGRAM_KEY); ++ ++ /* fill write frame */ ++ frame_write->req_resp = cpu_to_be16(RPMB_RESULT_READ); ++ ++ /* fill io cmd */ ++ cmds[0].flags = RPMB_F_WRITE | RPMB_F_REL_WRITE; ++ cmds[0].nframes = 1; ++ cmds[0].frames = frame_rel; ++ cmds[1].flags = RPMB_F_WRITE; ++ cmds[1].nframes = 1; ++ cmds[1].frames = frame_write; ++ cmds[2].flags = 0; ++ cmds[2].nframes = 1; ++ cmds[2].frames = frame_out; ++ ++ ret = rpmb_cmd_seq(mux_dev->rdev, cmds, 3); ++ if (ret) ++ goto out; ++ ++ if (be16_to_cpu(frame_out->result) != RPMB_ERR_OK) { ++ ret = -EPERM; ++ dev_err(&mux_dev->rdev->dev, "rpmb program key failed(0x%X).\n", ++ be16_to_cpu(frame_out->result)); ++ } ++ ++out: ++ kfree(frame_write); ++ kfree(frame_rel); ++ kfree(frame_out); ++ kfree(cmds); ++ ++ return ret; ++} ++ ++static int rpmb_get_counter(struct rpmb_mux_dev *mux_dev) ++{ ++ struct rpmb_frame_jdec *in_frame, *out_frame; ++ struct rpmb_cmd *cmds; ++ int ret; ++ u8 mac[32]; ++ ++ in_frame = kzalloc(sizeof(*in_frame), GFP_KERNEL); ++ out_frame = kzalloc(sizeof(*out_frame), GFP_KERNEL); ++ cmds = kcalloc(2, sizeof(*cmds), GFP_KERNEL); ++ if (!in_frame || !out_frame || !cmds) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ in_frame->req_resp = cpu_to_be16(RPMB_GET_WRITE_COUNTER); ++ cmds[0].flags = RPMB_F_WRITE; ++ cmds[0].nframes = 1; ++ cmds[0].frames = in_frame; ++ cmds[1].flags = 0; ++ cmds[1].nframes = 1; ++ cmds[1].frames = out_frame; ++ ++ ret = rpmb_cmd_seq(mux_dev->rdev, cmds, 2); ++ if (ret) ++ goto out; ++ ++ ret = rpmb_mux_calc_hmac(mux_dev, out_frame, 1, mac); ++ if (ret) { ++ dev_err(&mux_dev->rdev->dev, "MAC calculation failed for read counter\n"); ++ goto out; ++ } ++ ++ if (memcmp(mac, out_frame->key_mac, sizeof(mac))) { ++ ret = -EPERM; ++ dev_err(&mux_dev->rdev->dev, "MAC check failed for read counter\n"); ++ goto out; ++ } ++ ++ if (be16_to_cpu(out_frame->result) == RPMB_ERR_NO_KEY) { ++ dev_dbg(&mux_dev->rdev->dev, "Start to program key...\n"); ++ ret = rpmb_program_key(mux_dev); ++ if (ret) ++ goto out; ++ } else if (be16_to_cpu(out_frame->result) != RPMB_ERR_OK) { ++ ret = -EPERM; ++ dev_err(&mux_dev->rdev->dev, "get rpmb counter failed(0x%X).\n", ++ be16_to_cpu(out_frame->result)); ++ goto out; ++ } ++ ++ mux_dev->write_counter = be32_to_cpu(out_frame->write_counter); ++ ++out: ++ kfree(in_frame); ++ kfree(out_frame); ++ kfree(cmds); ++ ++ return ret; ++} ++ ++static size_t rpmb_ioc_frames_len(struct rpmb_dev *rdev, size_t nframes) ++{ ++ return rpmb_ioc_frames_len_jdec(nframes); ++} ++ ++/** ++ * rpmb_mux_copy_from_user - copy rpmb command from the user space ++ * ++ * @rdev: rpmb device ++ * @cmd: internal cmd structure ++ * @ucmd: user space cmd structure ++ * ++ * Return: 0 on success, <0 on error ++ */ ++static int rpmb_mux_copy_from_user(struct rpmb_dev *rdev, ++ struct rpmb_cmd *cmd, ++ struct rpmb_ioc_cmd __user *ucmd) ++{ ++ void *frames; ++ u64 frames_ptr; ++ ++ if (get_user(cmd->flags, &ucmd->flags)) ++ return -EFAULT; ++ ++ if (get_user(cmd->nframes, &ucmd->nframes)) ++ return -EFAULT; ++ ++ if (cmd->nframes > RPMB_MAX_FRAMES) ++ return -EOVERFLOW; ++ ++ /* some archs have issues with 64bit get_user */ ++ if (copy_from_user(&frames_ptr, &ucmd->frames_ptr, sizeof(frames_ptr))) ++ return -EFAULT; ++ ++ frames = memdup_user(u64_to_user_ptr(frames_ptr), ++ rpmb_ioc_frames_len(rdev, cmd->nframes)); ++ if (IS_ERR(frames)) ++ return PTR_ERR(frames); ++ ++ cmd->frames = frames; ++ return 0; ++} ++ ++/** ++ * rpmb_mux_copy_to_user - copy rpmb command to the user space ++ * ++ * @rdev: rpmb device ++ * @ucmd: user space cmd structure ++ * @cmd: internal cmd structure ++ * ++ * Return: 0 on success, <0 on error ++ */ ++static int rpmb_mux_copy_to_user(struct rpmb_dev *rdev, ++ struct rpmb_ioc_cmd __user *ucmd, ++ struct rpmb_cmd *cmd) ++{ ++ u64 frames_ptr; ++ ++ if (copy_from_user(&frames_ptr, &ucmd->frames_ptr, sizeof(frames_ptr))) ++ return -EFAULT; ++ ++ /* some archs have issues with 64bit get_user */ ++ if (copy_to_user(u64_to_user_ptr(frames_ptr), cmd->frames, ++ rpmb_ioc_frames_len(rdev, cmd->nframes))) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++static int rpmb_replace_write_frame(struct rpmb_mux_dev *mux_dev, ++ struct rpmb_cmd *cmds, u32 ncmd) ++{ ++ u32 i; ++ u32 frame_cnt; ++ __be32 write_counter; ++ struct rpmb_frame_jdec *in_frames = cmds[0].frames; ++ ++ if (in_frames->req_resp != cpu_to_be16(RPMB_WRITE_DATA)) { ++ dev_err(&mux_dev->rdev->dev, "rpmb ioctl frame is unsupported(0x%X).\n", ++ in_frames->req_resp); ++ return -EINVAL; ++ } ++ ++ frame_cnt = cmds[0].nframes; ++ write_counter = cpu_to_be32(mux_dev->write_counter); ++ for (i = 0; i < frame_cnt; i++) ++ in_frames[i].write_counter = write_counter; ++ ++ if (rpmb_mux_calc_hmac(mux_dev, in_frames, frame_cnt, ++ in_frames[frame_cnt - 1].key_mac)) { ++ dev_err(&mux_dev->rdev->dev, "MAC calculation failed for rpmb write\n"); ++ return -ERANGE; ++ } ++ ++ return 0; ++} ++ ++static int rpmb_check_mac(struct rpmb_mux_dev *mux_dev, struct rpmb_cmd *cmds) ++{ ++ u32 frame_cnt; ++ u8 mac[32]; ++ struct rpmb_frame_jdec *in_frames = cmds[0].frames; ++ ++ frame_cnt = cmds[0].nframes; ++ ++ if (rpmb_mux_calc_hmac(mux_dev, in_frames, frame_cnt, mac)) { ++ dev_err(&mux_dev->rdev->dev, "MAC calculation failed for rpmb write\n"); ++ return -ERANGE; ++ } ++ ++ if (memcmp(mac, in_frames[frame_cnt - 1].key_mac, sizeof(mac))) { ++ dev_err(&mux_dev->rdev->dev, "MAC check failed for write data\n"); ++ return -EPERM; ++ } ++ ++ return 0; ++} ++ ++static int rpmb_check_result(struct rpmb_mux_dev *mux_dev, ++ struct rpmb_cmd *cmds, u32 ncmd) ++{ ++ struct rpmb_frame_jdec *out_frames = cmds[ncmd - 1].frames; ++ int ret; ++ ++ ret = rpmb_check_mac(mux_dev, cmds); ++ if (ret) { ++ dev_err(&mux_dev->rdev->dev, "rpmb check mac fail!\n"); ++ return ret; ++ } ++ ++ /* write retry */ ++ if (out_frames->result == cpu_to_be16(RPMB_ERR_COUNTER)) { ++ dev_err(&mux_dev->rdev->dev, "rpmb counter error, write retry!\n"); ++ memset(out_frames, 0, sizeof(*out_frames)); ++ ++ ret = rpmb_get_counter(mux_dev); ++ if (ret) { ++ dev_err(&mux_dev->rdev->dev, "rpmb_get_counter failed!\n"); ++ return ret; ++ } ++ ++ /* Since phy_counter has changed, ++ * so we have to generate mac again ++ */ ++ ret = rpmb_replace_write_frame(mux_dev, cmds, ncmd); ++ if (ret) { ++ dev_err(&mux_dev->rdev->dev, "rpmb replace write frame failed\n"); ++ return ret; ++ } ++ ++ ret = rpmb_cmd_seq(mux_dev->rdev, cmds, ncmd); ++ if (ret) { ++ dev_err(&mux_dev->rdev->dev, "rpmb write retry failed\n"); ++ return ret; ++ } ++ ++ ret = rpmb_check_mac(mux_dev, cmds); ++ if (ret) { ++ dev_err(&mux_dev->rdev->dev, "write retry rpmb check mac fail!\n"); ++ return ret; ++ } ++ } ++ ++ if (out_frames->result == cpu_to_be16(RPMB_ERR_OK)) { ++ dev_dbg(&mux_dev->rdev->dev, "write_counter =%d\n", ++ mux_dev->write_counter); ++ mux_dev->write_counter++; ++ } else { ++ dev_err(&mux_dev->rdev->dev, "ERR result is 0x%X.\n", ++ be16_to_cpu(out_frames->result)); ++ } ++ ++ return 0; ++} ++ ++/** ++ * rpmb_ioctl_seq_cmd() - issue an rpmb command sequence ++ * @mux_dev: rpmb mux_device ++ * @ptr: rpmb cmd sequence ++ * ++ * RPMB_IOC_SEQ_CMD handler ++ * ++ * Return: 0 on success, <0 on error ++ */ ++static long rpmb_ioctl_seq_cmd(struct rpmb_mux_dev *mux_dev, ++ struct rpmb_ioc_seq_cmd __user *ptr) ++{ ++ struct rpmb_dev *rdev = mux_dev->rdev; ++ __u64 ncmds; ++ struct rpmb_cmd *cmds; ++ struct rpmb_ioc_cmd __user *ucmds; ++ unsigned int i; ++ int ret; ++ ++ /* The caller must have CAP_SYS_RAWIO, like mmc ioctl */ ++ if (!capable(CAP_SYS_RAWIO)) ++ return -EPERM; ++ ++ /* some archs have issues with 64bit get_user */ ++ if (copy_from_user(&ncmds, &ptr->num_of_cmds, sizeof(ncmds))) ++ return -EFAULT; ++ ++ if (ncmds > 3) { ++ dev_err(&rdev->dev, "supporting up to 3 packets (%llu)\n", ++ ncmds); ++ return -EINVAL; ++ } ++ ++ cmds = kcalloc(ncmds, sizeof(*cmds), GFP_KERNEL); ++ if (!cmds) ++ return -ENOMEM; ++ ++ ucmds = (struct rpmb_ioc_cmd __user *)ptr->cmds; ++ for (i = 0; i < ncmds; i++) { ++ ret = rpmb_mux_copy_from_user(rdev, &cmds[i], &ucmds[i]); ++ if (ret) ++ goto out; ++ } ++ ++ if (cmds->flags & RPMB_F_REL_WRITE) { ++ ret = rpmb_replace_write_frame(mux_dev, cmds, ncmds); ++ if (ret) ++ goto out; ++ } ++ ++ ret = rpmb_cmd_seq(rdev, cmds, ncmds); ++ if (ret) ++ goto out; ++ ++ if (cmds->flags & RPMB_F_REL_WRITE) { ++ ret = rpmb_check_result(mux_dev, cmds, ncmds); ++ if (ret) ++ goto out; ++ } ++ ++ for (i = 0; i < ncmds; i++) { ++ ret = rpmb_mux_copy_to_user(rdev, &ucmds[i], &cmds[i]); ++ if (ret) ++ goto out; ++ } ++ ++out: ++ for (i = 0; i < ncmds; i++) ++ kfree(cmds[i].frames); ++ kfree(cmds); ++ ++ return ret; ++} ++ ++static long rpmb_mux_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) ++{ ++ long ret; ++ struct rpmb_mux_dev *mux_dev = fp->private_data; ++ void __user *ptr = (void __user *)arg; ++ ++ mutex_lock(&mux_dev->lock); ++ ++ if (!mux_dev->rdev) { ++ pr_err("rpmb dev is NULL!\n"); ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ if (!mux_dev->wc_inited) { ++ ret = rpmb_get_counter(mux_dev); ++ if (ret) { ++ dev_err(&mux_dev->rdev->dev, ++ "init counter failed = %ld\n", ret); ++ goto out; ++ } ++ ++ mux_dev->wc_inited = true; ++ } ++ ++ switch (cmd) { ++ case RPMB_IOC_SEQ_CMD: ++ ret = rpmb_ioctl_seq_cmd(mux_dev, ptr); ++ break; ++ default: ++ dev_err(&mux_dev->rdev->dev, "unsupport:0x%X!!!\n", cmd); ++ ret = -ENOIOCTLCMD; ++ } ++ ++out: ++ mutex_unlock(&mux_dev->lock); ++ ++ return ret; ++} ++ ++static int rpmb_mux_start(struct rpmb_mux_dev *mux_dev, struct rpmb_dev *rdev) ++{ ++ if (mux_dev->rdev == rdev) ++ return 0; ++ ++ if (mux_dev->rdev) { ++ dev_err(&rdev->dev, "rpmb device already registered\n"); ++ return -EEXIST; ++ } ++ ++ mux_dev->rdev = rpmb_dev_get(rdev); ++ dev_dbg(&rdev->dev, "rpmb partition created\n"); ++ return 0; ++} ++ ++static int rpmb_mux_stop(struct rpmb_mux_dev *mux_dev, struct rpmb_dev *rdev) ++{ ++ if (!mux_dev->rdev) { ++ dev_err(&rdev->dev, "Already stopped\n"); ++ return -EPROTO; ++ } ++ ++ if (rdev && mux_dev->rdev != rdev) { ++ dev_err(&rdev->dev, "Wrong RPMB on stop\n"); ++ return -EINVAL; ++ } ++ ++ rpmb_dev_put(mux_dev->rdev); ++ mux_dev->rdev = NULL; ++ ++ dev_dbg(&rdev->dev, "rpmb partition removed\n"); ++ return 0; ++} ++ ++static int rpmb_add_device(struct device *dev, struct class_interface *intf) ++{ ++ struct rpmb_mux_dev *mux_dev; ++ struct rpmb_dev *rdev = to_rpmb_dev(dev); ++ int ret; ++ ++ mux_dev = container_of(intf, struct rpmb_mux_dev, rpmb_interface); ++ ++ if (!rdev->ops) ++ return -EINVAL; ++ ++ if (rdev->ops->type != RPMB_TYPE_EMMC) { ++ dev_err(&rdev->dev, "support RPMB_TYPE_EMMC only.\n"); ++ return -ENOENT; ++ } ++ ++ mutex_lock(&mux_dev->lock); ++ ++ ret = rpmb_mux_start(mux_dev, rdev); ++ if (ret) { ++ dev_err(&rdev->dev, "fail in rpmb_mux_start.\n"); ++ mutex_unlock(&mux_dev->lock); ++ return ret; ++ } ++ ++ mutex_unlock(&mux_dev->lock); ++ ++ return 0; ++} ++ ++static void rpmb_remove_device(struct device *dev, struct class_interface *intf) ++{ ++ struct rpmb_mux_dev *mux_dev; ++ struct rpmb_dev *rdev = to_rpmb_dev(dev); ++ ++ mux_dev = container_of(intf, struct rpmb_mux_dev, rpmb_interface); ++ ++ mutex_lock(&mux_dev->lock); ++ if (rpmb_mux_stop(mux_dev, rdev)) ++ dev_err(&rdev->dev, "fail in rpmb_mux_stop.\n"); ++ mutex_unlock(&mux_dev->lock); ++} ++ ++static const struct file_operations rpmb_mux_fops = { ++ .open = rpmb_mux_open, ++ .release = rpmb_mux_release, ++ .unlocked_ioctl = rpmb_mux_ioctl, ++ .llseek = noop_llseek, ++ .owner = THIS_MODULE, ++}; ++ ++static int __init rpmb_mux_init(void) ++{ ++ int ret; ++ struct device *class_dev; ++ struct rpmb_mux_dev *mux_dev; ++ ++ ret = alloc_chrdev_region(&rpmb_mux_devt, 0, MINORMASK, "rpmbmux"); ++ if (ret < 0) { ++ pr_err("unable to allocate char dev region\n"); ++ return ret; ++ } ++ ++ mux_dev = kzalloc(sizeof(*mux_dev), GFP_KERNEL); ++ if (!mux_dev) { ++ ret = -ENOMEM; ++ goto err_kzalloc; ++ } ++ __mux_dev = mux_dev; ++ ++ cdev_init(&mux_dev->cdev, &rpmb_mux_fops); ++ mux_dev->cdev.owner = THIS_MODULE; ++ ret = cdev_add(&mux_dev->cdev, rpmb_mux_devt, 1); ++ if (ret) { ++ pr_err("unable to cdev_add.\n"); ++ goto err_cdev_add; ++ } ++ ++ rpmb_mux_class = class_create(THIS_MODULE, "rpmbmux"); ++ if (IS_ERR(rpmb_mux_class)) { ++ ret = PTR_ERR(rpmb_mux_class); ++ goto err_class_create; ++ } ++ ++ class_dev = device_create(rpmb_mux_class, NULL, ++ rpmb_mux_devt, mux_dev, "rpmbmux"); ++ if (IS_ERR(class_dev)) { ++ pr_err("failed to device_create!!!\n"); ++ ret = PTR_ERR(class_dev); ++ goto err_device_create; ++ } ++ ++ ret = rpmb_mux_hmac_256_alloc(mux_dev); ++ if (ret) { ++ pr_err("failed to set rpmb_mux_hmac_256_alloc.\n"); ++ goto err_rpmb_mux_hmac_256_alloc; ++ } ++ ++ ret = rpmb_key_retrieval(mux_dev->rpmb_key); ++ if (ret) { ++ pr_err("rpmb_key_retrieval failed.\n"); ++ goto err_rpmb_key_retrieval; ++ } ++ ++ ret = crypto_shash_setkey(mux_dev->hash_desc->tfm, ++ mux_dev->rpmb_key, 32); ++ if (ret) { ++ pr_err("set key failed = %d\n", ret); ++ goto err_crypto_shash_setkey; ++ } ++ ++ mux_dev->rpmb_interface.add_dev = rpmb_add_device; ++ mux_dev->rpmb_interface.remove_dev = rpmb_remove_device; ++ mux_dev->rpmb_interface.class = &rpmb_class; ++ ++ ret = class_interface_register(&mux_dev->rpmb_interface); ++ if (ret) { ++ pr_err("Can't register interface\n"); ++ goto err_class_interface_register; ++ } ++ ++ return 0; ++ ++err_class_interface_register: ++err_crypto_shash_setkey: ++ memset(mux_dev->rpmb_key, 0, sizeof(mux_dev->rpmb_key)); ++err_rpmb_key_retrieval: ++ rpmb_mux_hmac_256_free(mux_dev); ++err_rpmb_mux_hmac_256_alloc: ++ device_destroy(rpmb_mux_class, rpmb_mux_devt); ++err_device_create: ++ class_destroy(rpmb_mux_class); ++err_class_create: ++ cdev_del(&mux_dev->cdev); ++err_cdev_add: ++ kfree(mux_dev); ++err_kzalloc: ++ unregister_chrdev_region(rpmb_mux_devt, 0); ++ return ret; ++} ++ ++static void __exit rpmb_mux_exit(void) ++{ ++ struct rpmb_mux_dev *mux_dev = __mux_dev; ++ ++ class_interface_unregister(&mux_dev->rpmb_interface); ++ device_destroy(rpmb_mux_class, rpmb_mux_devt); ++ class_destroy(rpmb_mux_class); ++ cdev_del(&mux_dev->cdev); ++ unregister_chrdev_region(rpmb_mux_devt, 0); ++ ++ rpmb_mux_hmac_256_free(mux_dev); ++ memset(mux_dev->rpmb_key, 0, sizeof(mux_dev->rpmb_key)); ++ kfree(mux_dev); ++} ++ ++module_init(rpmb_mux_init); ++module_exit(rpmb_mux_exit); ++ ++MODULE_AUTHOR("Intel Corporation"); ++MODULE_DESCRIPTION("RPMB Mux kernel module"); ++MODULE_LICENSE("GPL v2"); +-- +2.17.1 + diff --git a/patches/0025-trusty-log-Add-vmm-panic-notifier-for-vmm-deadloop-.trusty b/patches/0025-trusty-log-Add-vmm-panic-notifier-for-vmm-deadloop-.trusty new file mode 100644 index 0000000000..bf1528f244 --- /dev/null +++ b/patches/0025-trusty-log-Add-vmm-panic-notifier-for-vmm-deadloop-.trusty @@ -0,0 +1,209 @@ +From 5316324ca43146736474614f13764e199181dd18 Mon Sep 17 00:00:00 2001 +From: "Yan, Shaoou" +Date: Fri, 9 Dec 2016 05:32:20 +0000 +Subject: [PATCH 25/63] trusty-log: Add vmm panic notifier for vmm deadloop + dumping + +register a new vmcall TRUSTY_VMCALL_DUMP_INIT. + +Change-Id: Icee169358f30c64da44894dc5816ce5f3020fc70 +Tracked-On: OAM-40748 +Signed-off-by: syan10 +Reviewed-by: Ilkka Koskinen +--- + drivers/trusty/trusty-log.c | 107 ++++++++++++++++++++++++++++++++++++ + drivers/trusty/trusty-log.h | 22 ++++++++ + 2 files changed, 129 insertions(+) + +diff --git a/drivers/trusty/trusty-log.c b/drivers/trusty/trusty-log.c +index 112287cd4739..a066481c4f1d 100644 +--- a/drivers/trusty/trusty-log.c ++++ b/drivers/trusty/trusty-log.c +@@ -26,6 +26,8 @@ + #define TRUSTY_LOG_SIZE (PAGE_SIZE * 2) + #define TRUSTY_LINE_BUFFER_SIZE 256 + ++static uint64_t g_vmm_debug_buf; ++ + struct trusty_log_state { + struct device *dev; + struct device *trusty_dev; +@@ -135,6 +137,72 @@ static int trusty_log_panic_notify(struct notifier_block *nb, + return NOTIFY_OK; + } + ++static void trusty_vmm_dump_header(struct deadloop_dump *dump) ++{ ++ struct dump_header *header; ++ ++ if (!dump) ++ return; ++ ++ header = &(dump->header); ++ pr_info("VMM version = %s\n", header->vmm_version); ++ pr_info("Signature = %s\n", header->signature); ++ pr_info("Error_info = %s\n", header->error_info); ++ pr_info("Cpuid = %d\n", header->cpuid); ++} ++ ++static void trusty_vmm_dump_data(struct deadloop_dump *dump) ++{ ++ struct dump_data *dump_data; ++ int i; ++ ++ if (!dump) ++ return; ++ ++ dump_data = &(dump->data); ++ ++ for (i = 0; i < dump_data->length; i++) ++ pr_info("%c", dump_data->data[i]); ++} ++ ++static int trusty_vmm_panic_notify(struct notifier_block *nb, ++ unsigned long action, void *data) ++{ ++ struct deadloop_dump *dump_info; ++ ++ if (g_vmm_debug_buf) { ++ dump_info = (struct deadloop_dump *)g_vmm_debug_buf; ++ ++ if (dump_info->is_valid) { ++ pr_info("trusty-vmm panic start!\n"); ++ trusty_vmm_dump_header(dump_info); ++ trusty_vmm_dump_data(dump_info); ++ pr_info("trusty-vmm panic dump end!\n"); ++ } ++ } ++ ++ return NOTIFY_OK; ++} ++ ++static struct notifier_block trusty_vmm_panic_nb = { ++ .notifier_call = trusty_vmm_panic_notify, ++ .priority = 0, ++}; ++ ++#define TRUSTY_VMCALL_DUMP_INIT 0x74727507 ++static int trusty_vmm_dump_init(void *gva) ++{ ++ int ret = -1; ++ ++ __asm__ __volatile__( ++ "vmcall" ++ : "=a"(ret) ++ : "a"(TRUSTY_VMCALL_DUMP_INIT), "D"(gva) ++ ); ++ ++ return ret; ++} ++ + static bool trusty_supports_logging(struct device *device) + { + int result; +@@ -164,6 +232,7 @@ static int trusty_log_probe(struct platform_device *pdev) + struct trusty_log_state *s; + int result; + phys_addr_t pa; ++ struct deadloop_dump *dump; + + dev_dbg(&pdev->dev, "%s\n", __func__); + if (!trusty_supports_logging(pdev->dev.parent)) { +@@ -216,10 +285,45 @@ static int trusty_log_probe(struct platform_device *pdev) + "failed to register panic notifier\n"); + goto error_panic_notifier; + } ++ ++ /* allocate debug buffer for vmm panic dump */ ++ g_vmm_debug_buf = get_zeroed_page(GFP_KERNEL); ++ if (!g_vmm_debug_buf) { ++ result = -ENOMEM; ++ goto error_alloc_vmm; ++ } ++ ++ dump = (struct deadloop_dump *)g_vmm_debug_buf; ++ dump->version_of_this_struct = VMM_DUMP_VERSION; ++ dump->size_of_this_struct = sizeof(struct deadloop_dump); ++ dump->is_valid = false; ++ ++ /* shared the buffer to vmm by VMCALL */ ++ result = trusty_vmm_dump_init(dump); ++ if (result < 0) { ++ dev_err(&pdev->dev, ++ "failed to share the dump buffer to VMM\n"); ++ goto error_vmm_panic_notifier; ++ } ++ ++ /* register the panic notifier for vmm */ ++ result = atomic_notifier_chain_register(&panic_notifier_list, ++ &trusty_vmm_panic_nb); ++ if (result < 0) { ++ dev_err(&pdev->dev, ++ "failed to register vmm panic notifier\n"); ++ goto error_vmm_panic_notifier; ++ } ++ + platform_set_drvdata(pdev, s); + + return 0; + ++error_vmm_panic_notifier: ++ free_page(g_vmm_debug_buf); ++error_alloc_vmm: ++ atomic_notifier_chain_unregister(&panic_notifier_list, ++ &s->panic_notifier); + error_panic_notifier: + trusty_call_notifier_unregister(s->trusty_dev, &s->call_notifier); + error_call_notifier: +@@ -241,6 +345,8 @@ static int trusty_log_remove(struct platform_device *pdev) + + dev_dbg(&pdev->dev, "%s\n", __func__); + ++ atomic_notifier_chain_unregister(&panic_notifier_list, ++ &trusty_vmm_panic_nb); + atomic_notifier_chain_unregister(&panic_notifier_list, + &s->panic_notifier); + trusty_call_notifier_unregister(s->trusty_dev, &s->call_notifier); +@@ -253,6 +359,7 @@ static int trusty_log_remove(struct platform_device *pdev) + } + __free_pages(s->log_pages, get_order(TRUSTY_LOG_SIZE)); + kfree(s); ++ free_page(g_vmm_debug_buf); + + return 0; + } +diff --git a/drivers/trusty/trusty-log.h b/drivers/trusty/trusty-log.h +index 09f60213e1f6..587bc7aaa145 100644 +--- a/drivers/trusty/trusty-log.h ++++ b/drivers/trusty/trusty-log.h +@@ -18,5 +18,27 @@ struct log_rb { + + #define TRUSTY_LOG_API_VERSION 1 + ++#define VMM_DUMP_VERSION 1 ++ ++struct dump_data { ++ uint32_t length; ++ uint8_t data[0]; ++} __packed; ++ ++struct dump_header { ++ uint8_t vmm_version[64]; /* version of the vmm */ ++ uint8_t signature[16]; /* signature for the dump structure */ ++ uint8_t error_info[32]; /* filename:linenum */ ++ uint16_t cpuid; ++} __packed; ++ ++struct deadloop_dump { ++ uint16_t size_of_this_struct; ++ uint16_t version_of_this_struct; ++ uint32_t is_valid; ++ struct dump_header header; ++ struct dump_data data; ++} __packed; ++ + #endif + +-- +2.17.1 + diff --git a/patches/0025-vhm-cleanup-ioctls.acrn b/patches/0025-vhm-cleanup-ioctls.acrn new file mode 100644 index 0000000000..e00476816c --- /dev/null +++ b/patches/0025-vhm-cleanup-ioctls.acrn @@ -0,0 +1,76 @@ +From 2b196504cca831cbf557d8f745a133da269f966f Mon Sep 17 00:00:00 2001 +From: Edwin Zhai +Date: Fri, 31 Aug 2018 10:58:57 +0800 +Subject: [PATCH 025/150] vhm: cleanup ioctls + +Redefine ioctl command number + +Change-Id: I555cdbdd03c50f9fa5b66eb95d61c8d83c60a276 +Tracked-On: 212688 +Signed-off-by: Edwin Zhai +--- + include/linux/vhm/vhm_ioctl_defs.h | 27 +++++++++++++++------------ + 1 file changed, 15 insertions(+), 12 deletions(-) + +diff --git a/include/linux/vhm/vhm_ioctl_defs.h b/include/linux/vhm/vhm_ioctl_defs.h +index 60bfb299e040..d00b6588f296 100644 +--- a/include/linux/vhm/vhm_ioctl_defs.h ++++ b/include/linux/vhm/vhm_ioctl_defs.h +@@ -53,26 +53,29 @@ + * Commmon IOCTL ID defination for VHM/DM + */ + #define _IC_ID(x, y) (((x)<<24)|(y)) +-#define IC_ID 0x5FUL ++#define IC_ID 0x43UL ++ ++/* General */ ++#define IC_ID_GEN_BASE 0x0UL ++#define IC_GET_API_VERSION _IC_ID(IC_ID, IC_ID_GEN_BASE + 0x00) + + /* VM management */ +-#define IC_ID_VM_BASE 0x0UL +-#define IC_GET_API_VERSION _IC_ID(IC_ID, IC_ID_VM_BASE + 0x00) +-#define IC_CREATE_VM _IC_ID(IC_ID, IC_ID_VM_BASE + 0x01) +-#define IC_DESTROY_VM _IC_ID(IC_ID, IC_ID_VM_BASE + 0x02) +-#define IC_START_VM _IC_ID(IC_ID, IC_ID_VM_BASE + 0x03) +-#define IC_PAUSE_VM _IC_ID(IC_ID, IC_ID_VM_BASE + 0x04) +-#define IC_CREATE_VCPU _IC_ID(IC_ID, IC_ID_VM_BASE + 0x05) ++#define IC_ID_VM_BASE 0x10UL ++#define IC_CREATE_VM _IC_ID(IC_ID, IC_ID_VM_BASE + 0x00) ++#define IC_DESTROY_VM _IC_ID(IC_ID, IC_ID_VM_BASE + 0x01) ++#define IC_START_VM _IC_ID(IC_ID, IC_ID_VM_BASE + 0x02) ++#define IC_PAUSE_VM _IC_ID(IC_ID, IC_ID_VM_BASE + 0x03) ++#define IC_CREATE_VCPU _IC_ID(IC_ID, IC_ID_VM_BASE + 0x04) + + /* IRQ and Interrupts */ +-#define IC_ID_IRQ_BASE 0x100UL ++#define IC_ID_IRQ_BASE 0x20UL + #define IC_ASSERT_IRQLINE _IC_ID(IC_ID, IC_ID_IRQ_BASE + 0x00) + #define IC_DEASSERT_IRQLINE _IC_ID(IC_ID, IC_ID_IRQ_BASE + 0x01) + #define IC_PULSE_IRQLINE _IC_ID(IC_ID, IC_ID_IRQ_BASE + 0x02) + #define IC_INJECT_MSI _IC_ID(IC_ID, IC_ID_IRQ_BASE + 0x03) + + /* DM ioreq management */ +-#define IC_ID_IOREQ_BASE 0x200UL ++#define IC_ID_IOREQ_BASE 0x30UL + #define IC_SET_IOREQ_BUFFER _IC_ID(IC_ID, IC_ID_IOREQ_BASE + 0x00) + #define IC_NOTIFY_REQUEST_FINISH _IC_ID(IC_ID, IC_ID_IOREQ_BASE + 0x01) + #define IC_CREATE_IOREQ_CLIENT _IC_ID(IC_ID, IC_ID_IOREQ_BASE + 0x02) +@@ -81,12 +84,12 @@ + + + /* Guest memory management */ +-#define IC_ID_MEM_BASE 0x300UL ++#define IC_ID_MEM_BASE 0x40UL + #define IC_ALLOC_MEMSEG _IC_ID(IC_ID, IC_ID_MEM_BASE + 0x00) + #define IC_SET_MEMSEG _IC_ID(IC_ID, IC_ID_MEM_BASE + 0x01) + + /* PCI assignment*/ +-#define IC_ID_PCI_BASE 0x400UL ++#define IC_ID_PCI_BASE 0x50UL + #define IC_ASSIGN_PTDEV _IC_ID(IC_ID, IC_ID_PCI_BASE + 0x00) + #define IC_DEASSIGN_PTDEV _IC_ID(IC_ID, IC_ID_PCI_BASE + 0x01) + #define IC_VM_PCI_MSIX_REMAP _IC_ID(IC_ID, IC_ID_PCI_BASE + 0x02) +-- +2.17.1 + diff --git a/patches/0026-ASoC-Intel-Expose-ACPI-loading-members.audio b/patches/0026-ASoC-Intel-Expose-ACPI-loading-members.audio new file mode 100644 index 0000000000..811398bbcd --- /dev/null +++ b/patches/0026-ASoC-Intel-Expose-ACPI-loading-members.audio @@ -0,0 +1,122 @@ +From 40a235186586f64cd6c737223ddd64f76a61a68d Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Fri, 16 Aug 2019 17:22:12 +0200 +Subject: [PATCH 026/193] ASoC: Intel: Expose ACPI loading members + +No framework should know upfront about specifics of its inheriting +members. sst-acpi contains Baytrail and Haswell specific data. In order +to prevent circular dependency, it's compiled into separate module. + +Let's do it right and obey inheritance rule. As a first step, elevate +sst-acpi members so they could be shared by Haswell and +Baytrail-specific handlers - this is to prevent code duplication. + +Change-Id: Icc19830b2f97852550000c4ebe817a840f26f358 +Signed-off-by: Cezary Rojewski +--- + sound/soc/intel/common/sst-acpi.c | 27 ++++++--------------------- + sound/soc/intel/common/sst-dsp.h | 21 +++++++++++++++++++++ + 2 files changed, 27 insertions(+), 21 deletions(-) + +diff --git a/sound/soc/intel/common/sst-acpi.c b/sound/soc/intel/common/sst-acpi.c +index 5854868650b9..3547e923033a 100644 +--- a/sound/soc/intel/common/sst-acpi.c ++++ b/sound/soc/intel/common/sst-acpi.c +@@ -19,23 +19,6 @@ + #define SST_WPT_DSP_DMA_ADDR_OFFSET 0x0FE000 + #define SST_LPT_DSP_DMA_SIZE (1024 - 1) + +-/* Descriptor for setting up SST platform data */ +-struct sst_acpi_desc { +- const char *drv_name; +- struct snd_soc_acpi_mach *machines; +- /* Platform resource indexes. Must set to -1 if not used */ +- int resindex_lpe_base; +- int resindex_pcicfg_base; +- int resindex_fw_base; +- int irqindex_host_ipc; +- int resindex_dma_base; +- /* Unique number identifying the SST core on platform */ +- int sst_id; +- /* DMA only valid when resindex_dma_base != -1*/ +- int dma_engine; +- int dma_size; +-}; +- + struct sst_acpi_priv { + struct platform_device *pdev_mach; + struct platform_device *pdev_pcm; +@@ -71,7 +54,7 @@ static void sst_acpi_fw_cb(const struct firmware *fw, void *context) + return; + } + +-static int sst_acpi_probe(struct platform_device *pdev) ++int sst_dsp_acpi_probe(struct platform_device *pdev) + { + const struct acpi_device_id *id; + struct device *dev = &pdev->dev; +@@ -158,8 +141,9 @@ static int sst_acpi_probe(struct platform_device *pdev) + + return ret; + } ++EXPORT_SYMBOL_GPL(sst_dsp_acpi_probe); + +-static int sst_acpi_remove(struct platform_device *pdev) ++int sst_dsp_acpi_remove(struct platform_device *pdev) + { + struct sst_acpi_priv *sst_acpi = platform_get_drvdata(pdev); + struct sst_pdata *sst_pdata = &sst_acpi->sst_pdata; +@@ -171,6 +155,7 @@ static int sst_acpi_remove(struct platform_device *pdev) + + return 0; + } ++EXPORT_SYMBOL_GPL(sst_dsp_acpi_remove); + + static struct sst_acpi_desc sst_acpi_haswell_desc = { + .drv_name = "haswell-pcm-audio", +@@ -222,8 +207,8 @@ static const struct acpi_device_id sst_acpi_match[] = { + MODULE_DEVICE_TABLE(acpi, sst_acpi_match); + + static struct platform_driver sst_acpi_driver = { +- .probe = sst_acpi_probe, +- .remove = sst_acpi_remove, ++ .probe = sst_dsp_acpi_probe, ++ .remove = sst_dsp_acpi_remove, + .driver = { + .name = "sst-acpi", + .acpi_match_table = ACPI_PTR(sst_acpi_match), +diff --git a/sound/soc/intel/common/sst-dsp.h b/sound/soc/intel/common/sst-dsp.h +index 63c29bb45cf1..6326c7ba10b8 100644 +--- a/sound/soc/intel/common/sst-dsp.h ++++ b/sound/soc/intel/common/sst-dsp.h +@@ -166,6 +166,27 @@ + #define SST_PMCS_PS_MASK 0x3 + + struct sst_dsp; ++struct platform_device; ++ ++/* Descriptor for setting up SST platform data */ ++struct sst_acpi_desc { ++ const char *drv_name; ++ struct snd_soc_acpi_mach *machines; ++ /* Platform resource indexes. Must set to -1 if not used */ ++ int resindex_lpe_base; ++ int resindex_pcicfg_base; ++ int resindex_fw_base; ++ int irqindex_host_ipc; ++ int resindex_dma_base; ++ /* Unique number identifying the SST core on platform */ ++ int sst_id; ++ /* DMA only valid when resindex_dma_base != -1*/ ++ int dma_engine; ++ int dma_size; ++}; ++ ++int sst_dsp_acpi_probe(struct platform_device *pdev); ++int sst_dsp_acpi_remove(struct platform_device *pdev); + + /* + * SST Platform Data. +-- +2.17.1 + diff --git a/patches/0026-VHM-check-HV-api-version-for-VHM-module-init.acrn b/patches/0026-VHM-check-HV-api-version-for-VHM-module-init.acrn new file mode 100644 index 0000000000..89d0158867 --- /dev/null +++ b/patches/0026-VHM-check-HV-api-version-for-VHM-module-init.acrn @@ -0,0 +1,51 @@ +From cecc1aa16d200900f711d908c82341f644bb2195 Mon Sep 17 00:00:00 2001 +From: Jason Chen CJ +Date: Fri, 31 Aug 2018 10:58:57 +0800 +Subject: [PATCH 026/150] VHM: check HV api version for VHM module init + +Change-Id: I8d49db28e235fe643380b4e8b82fb629e89accaf +Tracked-On: 218802 +Signed-off-by: Jason Chen CJ +Signed-off-by: Yonghua Huang +--- + drivers/char/vhm/vhm_dev.c | 18 ++++++++++++++++++ + 1 file changed, 18 insertions(+) + +diff --git a/drivers/char/vhm/vhm_dev.c b/drivers/char/vhm/vhm_dev.c +index a3510b722dab..a87324b6f2af 100644 +--- a/drivers/char/vhm/vhm_dev.c ++++ b/drivers/char/vhm/vhm_dev.c +@@ -470,12 +470,30 @@ static const struct file_operations fops = { + .poll = vhm_dev_poll, + }; + ++#define SUPPORT_HV_API_VERSION_MAJOR 1 ++#define SUPPORT_HV_API_VERSION_MINOR 0 + static int __init vhm_init(void) + { + unsigned long flag; ++ struct hc_api_version api_version = {0, 0}; + + pr_info("vhm: initializing\n"); + ++ if (hcall_get_api_version(virt_to_phys(&api_version)) < 0) { ++ pr_err("vhm: failed to get api version from Hypervisor !\n"); ++ return -EINVAL; ++ } ++ ++ if (api_version.major_version == SUPPORT_HV_API_VERSION_MAJOR && ++ api_version.minor_version == SUPPORT_HV_API_VERSION_MINOR) { ++ pr_info("vhm: hv api version %d.%d\n", ++ api_version.major_version, api_version.minor_version); ++ } else { ++ pr_err("vhm: not support hv api version %d.%d!\n", ++ api_version.major_version, api_version.minor_version); ++ return -EINVAL; ++ } ++ + /* Try to dynamically allocate a major number for the device */ + major = register_chrdev(0, DEVICE_NAME, &fops); + if (major < 0) { +-- +2.17.1 + diff --git a/patches/0026-drm-i915-Use-enum-pipe-consistently.drm b/patches/0026-drm-i915-Use-enum-pipe-consistently.drm new file mode 100644 index 0000000000..9789da7fa3 --- /dev/null +++ b/patches/0026-drm-i915-Use-enum-pipe-consistently.drm @@ -0,0 +1,356 @@ +From 94c8855a71ea1613d00f9f8f1ad82fb0be30c68d Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= +Date: Wed, 21 Aug 2019 20:30:31 +0300 +Subject: [PATCH 026/690] drm/i915: Use enum pipe consistently +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Replace all "int pipe"s with "enum pipe pipe"s to make it clear +what we're dealing with. + +Signed-off-by: Ville Syrjälä +Link: https://patchwork.freedesktop.org/patch/msgid/20190821173033.24123-3-ville.syrjala@linux.intel.com +Reviewed-by: Jani Nikula +--- + drivers/gpu/drm/i915/display/intel_display.c | 42 +++++++++---------- + .../drm/i915/display/intel_display_types.h | 2 +- + drivers/gpu/drm/i915/display/intel_dvo.c | 2 +- + drivers/gpu/drm/i915/display/intel_lvds.c | 2 +- + drivers/gpu/drm/i915/display/vlv_dsi.c | 2 +- + drivers/gpu/drm/i915/i915_debugfs.c | 2 +- + drivers/gpu/drm/i915/i915_irq.c | 11 ++--- + drivers/gpu/drm/i915/intel_pm.c | 2 +- + 8 files changed, 33 insertions(+), 32 deletions(-) + +diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c +index 6bbf04bbb2db..231741fe7098 100644 +--- a/drivers/gpu/drm/i915/display/intel_display.c ++++ b/drivers/gpu/drm/i915/display/intel_display.c +@@ -490,7 +490,7 @@ static const struct intel_limit intel_limits_bxt = { + + /* WA Display #0827: Gen9:all */ + static void +-skl_wa_827(struct drm_i915_private *dev_priv, int pipe, bool enable) ++skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable) + { + if (enable) + I915_WRITE(CLKGATE_DIS_PSL(pipe), +@@ -4434,7 +4434,7 @@ static void intel_fdi_normal_train(struct intel_crtc *crtc) + { + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); +- int pipe = crtc->pipe; ++ enum pipe pipe = crtc->pipe; + i915_reg_t reg; + u32 temp; + +@@ -4477,7 +4477,7 @@ static void ironlake_fdi_link_train(struct intel_crtc *crtc, + { + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); +- int pipe = crtc->pipe; ++ enum pipe pipe = crtc->pipe; + i915_reg_t reg; + u32 temp, tries; + +@@ -4578,7 +4578,7 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc, + { + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); +- int pipe = crtc->pipe; ++ enum pipe pipe = crtc->pipe; + i915_reg_t reg; + u32 temp, i, retry; + +@@ -4711,7 +4711,7 @@ static void ivb_manual_fdi_link_train(struct intel_crtc *crtc, + { + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); +- int pipe = crtc->pipe; ++ enum pipe pipe = crtc->pipe; + i915_reg_t reg; + u32 temp, i, j; + +@@ -4829,7 +4829,7 @@ static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state) + { + struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); +- int pipe = intel_crtc->pipe; ++ enum pipe pipe = intel_crtc->pipe; + i915_reg_t reg; + u32 temp; + +@@ -4866,7 +4866,7 @@ static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) + { + struct drm_device *dev = intel_crtc->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); +- int pipe = intel_crtc->pipe; ++ enum pipe pipe = intel_crtc->pipe; + i915_reg_t reg; + u32 temp; + +@@ -4897,7 +4897,7 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc) + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); +- int pipe = intel_crtc->pipe; ++ enum pipe pipe = intel_crtc->pipe; + i915_reg_t reg; + u32 temp; + +@@ -5212,7 +5212,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state, + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); +- int pipe = crtc->pipe; ++ enum pipe pipe = crtc->pipe; + u32 temp; + + assert_pch_transcoder_disabled(dev_priv, pipe); +@@ -5307,7 +5307,7 @@ static void lpt_pch_enable(const struct intel_atomic_state *state, + lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); + } + +-static void cpt_verify_modeset(struct drm_device *dev, int pipe) ++static void cpt_verify_modeset(struct drm_device *dev, enum pipe pipe) + { + struct drm_i915_private *dev_priv = to_i915(dev); + i915_reg_t dslreg = PIPEDSL(pipe); +@@ -5646,7 +5646,7 @@ static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state) + { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); +- int pipe = crtc->pipe; ++ enum pipe pipe = crtc->pipe; + + if (crtc_state->pch_pfit.enabled) { + /* Force use of hard-coded filter coefficients +@@ -5759,7 +5759,7 @@ intel_post_enable_primary(struct drm_crtc *crtc, + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); +- int pipe = intel_crtc->pipe; ++ enum pipe pipe = intel_crtc->pipe; + + /* + * Gen2 reports pipe underruns whenever all planes are disabled. +@@ -5783,7 +5783,7 @@ intel_pre_disable_primary_noatomic(struct drm_crtc *crtc) + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); +- int pipe = intel_crtc->pipe; ++ enum pipe pipe = intel_crtc->pipe; + + /* + * Gen2 reports pipe underruns whenever all planes are disabled. +@@ -6306,7 +6306,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config, + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); +- int pipe = intel_crtc->pipe; ++ enum pipe pipe = intel_crtc->pipe; + + if (WARN_ON(intel_crtc->active)) + return; +@@ -6439,7 +6439,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, + struct drm_crtc *crtc = pipe_config->base.crtc; + struct drm_i915_private *dev_priv = to_i915(crtc->dev); + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); +- int pipe = intel_crtc->pipe, hsw_workaround_pipe; ++ enum pipe pipe = intel_crtc->pipe, hsw_workaround_pipe; + enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; + bool psl_clkgate_wa; + +@@ -6565,7 +6565,7 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state, + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); +- int pipe = intel_crtc->pipe; ++ enum pipe pipe = intel_crtc->pipe; + + /* + * Sometimes spurious CPU pipe underruns happen when the +@@ -6852,7 +6852,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config, + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); +- int pipe = intel_crtc->pipe; ++ enum pipe pipe = intel_crtc->pipe; + + if (WARN_ON(intel_crtc->active)) + return; +@@ -6984,7 +6984,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state, + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); +- int pipe = intel_crtc->pipe; ++ enum pipe pipe = intel_crtc->pipe; + + /* + * On gen2 planes are double buffered but the pipe isn't, so we must +@@ -8560,7 +8560,7 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc, + { + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); +- int pipe = pipe_config->cpu_transcoder; ++ enum pipe pipe = crtc->pipe; + struct dpll clock; + u32 mdiv; + int refclk = 100000; +@@ -8670,7 +8670,7 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc, + { + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); +- int pipe = pipe_config->cpu_transcoder; ++ enum pipe pipe = crtc->pipe; + enum dpio_channel port = vlv_pipe_to_channel(pipe); + struct dpll clock; + u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; +@@ -11282,7 +11282,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc, + { + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); +- int pipe = pipe_config->cpu_transcoder; ++ enum pipe pipe = crtc->pipe; + u32 dpll = pipe_config->dpll_hw_state.dpll; + u32 fp; + struct dpll clock; +diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h +index 12523456143f..96514dcc7812 100644 +--- a/drivers/gpu/drm/i915/display/intel_display_types.h ++++ b/drivers/gpu/drm/i915/display/intel_display_types.h +@@ -1509,7 +1509,7 @@ intel_wait_for_vblank(struct drm_i915_private *dev_priv, enum pipe pipe) + drm_wait_one_vblank(&dev_priv->drm, pipe); + } + static inline void +-intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, int pipe) ++intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, enum pipe pipe) + { + const struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + +diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c +index 93baf366692e..34193d04597a 100644 +--- a/drivers/gpu/drm/i915/display/intel_dvo.c ++++ b/drivers/gpu/drm/i915/display/intel_dvo.c +@@ -280,7 +280,7 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder, + struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); + const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; + struct intel_dvo *intel_dvo = enc_to_dvo(encoder); +- int pipe = crtc->pipe; ++ enum pipe pipe = crtc->pipe; + u32 dvo_val; + i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg; + i915_reg_t dvo_srcdim_reg = intel_dvo->dev.dvo_srcdim_reg; +diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c +index b7c459a8931c..c786abdc3336 100644 +--- a/drivers/gpu/drm/i915/display/intel_lvds.c ++++ b/drivers/gpu/drm/i915/display/intel_lvds.c +@@ -232,7 +232,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder, + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); + const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; +- int pipe = crtc->pipe; ++ enum pipe pipe = crtc->pipe; + u32 temp; + + if (HAS_PCH_SPLIT(dev_priv)) { +diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c +index a71b22bdd95b..50064cde0724 100644 +--- a/drivers/gpu/drm/i915/display/vlv_dsi.c ++++ b/drivers/gpu/drm/i915/display/vlv_dsi.c +@@ -749,7 +749,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder, + struct drm_crtc *crtc = pipe_config->base.crtc; + struct drm_i915_private *dev_priv = to_i915(crtc->dev); + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); +- int pipe = intel_crtc->pipe; ++ enum pipe pipe = intel_crtc->pipe; + enum port port; + u32 val; + bool glk_cold_boot = false; +diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c +index 8c1d70425424..5c1a2b1e7d34 100644 +--- a/drivers/gpu/drm/i915/i915_debugfs.c ++++ b/drivers/gpu/drm/i915/i915_debugfs.c +@@ -376,7 +376,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data) + static void gen8_display_interrupt_info(struct seq_file *m) + { + struct drm_i915_private *dev_priv = node_to_i915(m->private); +- int pipe; ++ enum pipe pipe; + + for_each_pipe(dev_priv, pipe) { + enum intel_display_power_domain power_domain; +diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c +index 8ac6f6849981..3f1b6ee157ba 100644 +--- a/drivers/gpu/drm/i915/i915_irq.c ++++ b/drivers/gpu/drm/i915/i915_irq.c +@@ -1716,7 +1716,7 @@ static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) + static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, + u32 iir, u32 pipe_stats[I915_MAX_PIPES]) + { +- int pipe; ++ enum pipe pipe; + + spin_lock(&dev_priv->irq_lock); + +@@ -1741,6 +1741,7 @@ static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, + status_mask = PIPE_FIFO_UNDERRUN_STATUS; + + switch (pipe) { ++ default: + case PIPE_A: + iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; + break; +@@ -2136,7 +2137,7 @@ static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, + + static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) + { +- int pipe; ++ enum pipe pipe; + u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; + + ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx); +@@ -2222,7 +2223,7 @@ static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) + + static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) + { +- int pipe; ++ enum pipe pipe; + u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; + + ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt); +@@ -3246,7 +3247,7 @@ static void valleyview_irq_reset(struct drm_i915_private *dev_priv) + static void gen8_irq_reset(struct drm_i915_private *dev_priv) + { + struct intel_uncore *uncore = &dev_priv->uncore; +- int pipe; ++ enum pipe pipe; + + gen8_master_intr_disable(dev_priv->uncore.regs); + +@@ -3271,7 +3272,7 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv) + static void gen11_irq_reset(struct drm_i915_private *dev_priv) + { + struct intel_uncore *uncore = &dev_priv->uncore; +- int pipe; ++ enum pipe pipe; + + gen11_master_intr_disable(dev_priv->uncore.regs); + +diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c +index 09f29a337313..437cd50e5d06 100644 +--- a/drivers/gpu/drm/i915/intel_pm.c ++++ b/drivers/gpu/drm/i915/intel_pm.c +@@ -8858,7 +8858,7 @@ static void ilk_init_clock_gating(struct drm_i915_private *dev_priv) + + static void cpt_init_clock_gating(struct drm_i915_private *dev_priv) + { +- int pipe; ++ enum pipe pipe; + u32 val; + + /* +-- +2.17.1 + diff --git a/patches/0026-net-stmmac-Add-support-for-VLAN-Rx-filtering.connectivity b/patches/0026-net-stmmac-Add-support-for-VLAN-Rx-filtering.connectivity new file mode 100644 index 0000000000..7ed12804fb --- /dev/null +++ b/patches/0026-net-stmmac-Add-support-for-VLAN-Rx-filtering.connectivity @@ -0,0 +1,399 @@ +From 80ee4b0bbaafbf83b5ae3759a0b27f49e873339f Mon Sep 17 00:00:00 2001 +From: "Chuah, Kim Tatt" +Date: Tue, 26 Jun 2018 07:35:03 +0800 +Subject: [PATCH 026/108] net: stmmac: Add support for VLAN Rx filtering + +Add support for VLAN ID-based filtering by the MAC controller for MAC +drivers that support it. Only the 12-bit VID field is used. + +Signed-off-by: Chuah, Kim Tatt +Signed-off-by: Ong Boon Leong +--- + drivers/net/ethernet/stmicro/stmmac/common.h | 2 + + drivers/net/ethernet/stmicro/stmmac/dwmac4.h | 15 ++ + .../net/ethernet/stmicro/stmmac/dwmac4_core.c | 196 ++++++++++++++++++ + drivers/net/ethernet/stmicro/stmmac/hwif.h | 14 ++ + .../net/ethernet/stmicro/stmmac/stmmac_main.c | 10 + + 5 files changed, 237 insertions(+) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h +index a2ad992be4e2..88fa60f1ab91 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/common.h ++++ b/drivers/net/ethernet/stmicro/stmmac/common.h +@@ -474,6 +474,8 @@ struct mac_device_info { + unsigned int ps; + bool mdio_intr_en; + wait_queue_head_t mdio_busy_wait; ++ unsigned int num_vlan; ++ u32 vlan_filter[32]; + }; + + struct stmmac_rx_routing { +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +index 29dd67a48a15..f842807edce8 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +@@ -17,6 +17,7 @@ + #define GMAC_PACKET_FILTER 0x00000008 + #define GMAC_HASH_TAB(x) (0x10 + (x) * 4) + #define GMAC_VLAN_TAG 0x00000050 ++#define GMAC_VLAN_TAG_DATA 0x00000054 + #define GMAC_VLAN_HASH_TABLE 0x00000058 + #define GMAC_RX_FLOW_CTRL 0x00000090 + #define GMAC_VLAN_INCL 0x00000060 +@@ -82,7 +83,15 @@ + #define GMAC_VLAN_VLC GENMASK(17, 16) + #define GMAC_VLAN_VLC_SHIFT 16 + ++/* MAC VLAN Tag */ ++#define GMAC_VLAN_TAG_VID GENMASK(15, 0) ++#define GMAC_VLAN_TAG_ETV BIT(16) ++ + /* MAC VLAN Tag Control */ ++#define GMAC_VLAN_TAG_CTRL_OB BIT(0) ++#define GMAC_VLAN_TAG_CTRL_CT BIT(1) ++#define GMAC_VLAN_TAG_CTRL_OFS_MASK GENMASK(6, 2) ++#define GMAC_VLAN_TAG_CTRL_OFS_SHIFT 2 + #define GMAC_VLAN_TAG_CTRL_EVLS_MASK GENMASK(22, 21) + #define GMAC_VLAN_TAG_CTRL_EVLS_SHIFT 21 + #define GMAC_VLAN_TAG_CTRL_EVLRXS BIT(24) +@@ -92,6 +101,11 @@ + #define GMAC_VLAN_TAG_STRIP_FAIL (0x2 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT) + #define GMAC_VLAN_TAG_STRIP_ALL (0x3 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT) + ++/* MAC VLAN Tag Data/Filter */ ++#define GMAC_VLAN_TAG_DATA_VID GENMASK(15, 0) ++#define GMAC_VLAN_TAG_DATA_VEN BIT(16) ++#define GMAC_VLAN_TAG_DATA_ETV BIT(17) ++ + /* MAC RX Queue Enable */ + #define GMAC_RX_QUEUE_CLEAR(queue) ~(GENMASK(1, 0) << ((queue) * 2)) + #define GMAC_RX_AV_QUEUE_ENABLE(queue) BIT((queue) * 2) +@@ -234,6 +248,7 @@ enum power_event { + #define GMAC_HW_FEAT_FRPBS GENMASK(12, 11) + #define GMAC_HW_FEAT_FRPSEL BIT(10) + #define GMAC_HW_FEAT_DVLAN BIT(5) ++#define GMAC_HW_FEAT_NRVF GENMASK(2, 0) + + /* MAC HW ADDR regs */ + #define GMAC_HI_DCS GENMASK(18, 16) +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +index e29e189cec8c..432af7009575 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +@@ -399,6 +399,156 @@ static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw) + writel(value, ioaddr + GMAC4_LPI_TIMER_CTRL); + } + ++static void dwmac4_write_single_vlan(struct net_device *dev, u16 vid) ++{ ++ void __iomem *ioaddr = (void __iomem *)dev->base_addr; ++ u32 val; ++ ++ val = readl(ioaddr + GMAC_VLAN_TAG); ++ val &= ~GMAC_VLAN_TAG_VID; ++ val |= GMAC_VLAN_TAG_ETV | vid; ++ ++ writel(val, ioaddr + GMAC_VLAN_TAG); ++} ++ ++static int dwmac4_write_vlan_filter(struct net_device *dev, ++ struct mac_device_info *hw, ++ u8 index, u32 data) ++{ ++ void __iomem *ioaddr = (void __iomem *)dev->base_addr; ++ int i, timeout = 10; ++ u32 val; ++ ++ if (index >= hw->num_vlan) ++ return -EINVAL; ++ ++ writel(data, ioaddr + GMAC_VLAN_TAG_DATA); ++ ++ val = readl(ioaddr + GMAC_VLAN_TAG); ++ val &= ~(GMAC_VLAN_TAG_CTRL_OFS_MASK | ++ GMAC_VLAN_TAG_CTRL_CT | ++ GMAC_VLAN_TAG_CTRL_OB); ++ val |= (index << GMAC_VLAN_TAG_CTRL_OFS_SHIFT) | GMAC_VLAN_TAG_CTRL_OB; ++ ++ writel(val, ioaddr + GMAC_VLAN_TAG); ++ ++ for (i = 0; i < timeout; i++) { ++ val = readl(ioaddr + GMAC_VLAN_TAG); ++ if (!(val & GMAC_VLAN_TAG_CTRL_OB)) ++ return 0; ++ udelay(1); ++ } ++ ++ netdev_err(dev, "Timeout accessing MAC_VLAN_Tag_Filter\n"); ++ ++ return -EBUSY; ++} ++ ++static int dwmac4_add_hw_vlan_rx_fltr(struct net_device *dev, ++ struct mac_device_info *hw, ++ __be16 proto, u16 vid) ++{ ++ int index = -1; ++ u32 val = 0; ++ int i, ret; ++ ++ if (vid > 4095) ++ return -EINVAL; ++ ++ /* Single Rx VLAN Filter */ ++ if (hw->num_vlan == 1) { ++ /* For single VLAN filter, VID 0 means VLAN promiscuous */ ++ if (vid == 0) { ++ netdev_warn(dev, "Adding VLAN ID 0 is not supported\n"); ++ return -EPERM; ++ } ++ ++ if (hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) { ++ netdev_err(dev, "Only single VLAN ID supported\n"); ++ return -EPERM; ++ } ++ ++ hw->vlan_filter[0] = vid; ++ dwmac4_write_single_vlan(dev, vid); ++ ++ return 0; ++ } ++ ++ /* Extended Rx VLAN Filter Enable */ ++ val |= GMAC_VLAN_TAG_DATA_ETV | GMAC_VLAN_TAG_DATA_VEN | vid; ++ ++ for (i = 0; i < hw->num_vlan; i++) { ++ if (hw->vlan_filter[i] == val) ++ return 0; ++ else if (!(hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN)) ++ index = i; ++ } ++ ++ if (index == -1) { ++ netdev_err(dev, "MAC_VLAN_Tag_Filter full (size: %0u)\n", ++ hw->num_vlan); ++ return -EPERM; ++ } ++ ++ ret = dwmac4_write_vlan_filter(dev, hw, index, val); ++ ++ if (!ret) ++ hw->vlan_filter[index] = val; ++ ++ return ret; ++} ++ ++static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev, ++ struct mac_device_info *hw, ++ __be16 proto, u16 vid) ++{ ++ int i, ret = 0; ++ ++ /* Single Rx VLAN Filter */ ++ if (hw->num_vlan == 1) { ++ if ((hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) == vid) { ++ hw->vlan_filter[0] = 0; ++ dwmac4_write_single_vlan(dev, 0); ++ } ++ return 0; ++ } ++ ++ /* Extended Rx VLAN Filter Enable */ ++ for (i = 0; i < hw->num_vlan; i++) { ++ if ((hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VID) == vid) { ++ ret = dwmac4_write_vlan_filter(dev, hw, i, 0); ++ ++ if (!ret) ++ hw->vlan_filter[i] = 0; ++ else ++ return ret; ++ } ++ } ++ ++ return ret; ++} ++ ++static void dwmac4_restore_hw_vlan_rx_fltr(struct net_device *dev, ++ struct mac_device_info *hw) ++{ ++ u32 val; ++ int i; ++ ++ /* Single Rx VLAN Filter */ ++ if (hw->num_vlan == 1) { ++ dwmac4_write_single_vlan(dev, hw->vlan_filter[0]); ++ return; ++ } ++ ++ /* Extended Rx VLAN Filter Enable */ ++ for (i = 0; i < hw->num_vlan; i++) { ++ if (hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN) { ++ val = hw->vlan_filter[i]; ++ dwmac4_write_vlan_filter(dev, hw, i, val); ++ } ++ } ++} ++ + static void dwmac4_set_filter(struct mac_device_info *hw, + struct net_device *dev) + { +@@ -474,6 +624,10 @@ static void dwmac4_set_filter(struct mac_device_info *hw, + } + } + ++ /* VLAN filtering */ ++ if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER) ++ value |= GMAC_PACKET_FILTER_VTFE; ++ + writel(value, ioaddr + GMAC_PACKET_FILTER); + } + +@@ -876,6 +1030,9 @@ const struct stmmac_ops dwmac4_ops = { + .set_arp_offload = dwmac4_set_arp_offload, + .rx_hw_vlan = dwmac4_rx_hw_vlan, + .set_hw_vlan_mode = dwmac4_set_hw_vlan_mode, ++ .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr, ++ .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr, ++ .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr, + }; + + const struct stmmac_ops dwmac410_ops = { +@@ -914,6 +1071,9 @@ const struct stmmac_ops dwmac410_ops = { + .set_arp_offload = dwmac4_set_arp_offload, + .rx_hw_vlan = dwmac4_rx_hw_vlan, + .set_hw_vlan_mode = dwmac4_set_hw_vlan_mode, ++ .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr, ++ .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr, ++ .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr, + }; + + const struct stmmac_ops dwmac510_ops = { +@@ -957,8 +1117,42 @@ const struct stmmac_ops dwmac510_ops = { + .set_arp_offload = dwmac4_set_arp_offload, + .rx_hw_vlan = dwmac4_rx_hw_vlan, + .set_hw_vlan_mode = dwmac4_set_hw_vlan_mode, ++ .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr, ++ .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr, ++ .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr, + }; + ++static u32 dwmac4_get_num_vlan(void __iomem *ioaddr) ++{ ++ u32 val, num_vlan; ++ ++ val = readl(ioaddr + GMAC_HW_FEATURE3); ++ switch (val & GMAC_HW_FEAT_NRVF) { ++ case 0: ++ num_vlan = 1; ++ break; ++ case 1: ++ num_vlan = 4; ++ break; ++ case 2: ++ num_vlan = 8; ++ break; ++ case 3: ++ num_vlan = 16; ++ break; ++ case 4: ++ num_vlan = 24; ++ break; ++ case 5: ++ num_vlan = 32; ++ break; ++ default: ++ num_vlan = 1; ++ } ++ ++ return num_vlan; ++} ++ + int dwmac4_setup(struct stmmac_priv *priv) + { + struct mac_device_info *mac = priv->hw; +@@ -988,5 +1182,7 @@ int dwmac4_setup(struct stmmac_priv *priv) + mac->mii.clk_csr_shift = 8; + mac->mii.clk_csr_mask = GENMASK(11, 8); + ++ mac->num_vlan = dwmac4_get_num_vlan(priv->ioaddr); ++ + return 0; + } +diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h +index f0e3dd59f1ae..73829f89cc49 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h ++++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h +@@ -371,6 +371,14 @@ struct stmmac_ops { + struct dma_desc *rx_desc, struct sk_buff *skb); + void (*set_hw_vlan_mode)(void __iomem *ioaddr, + netdev_features_t features); ++ int (*add_hw_vlan_rx_fltr)(struct net_device *dev, ++ struct mac_device_info *hw, ++ __be16 proto, u16 vid); ++ int (*del_hw_vlan_rx_fltr)(struct net_device *dev, ++ struct mac_device_info *hw, ++ __be16 proto, u16 vid); ++ void (*restore_hw_vlan_rx_fltr)(struct net_device *dev, ++ struct mac_device_info *hw); + /* TX Timestamp */ + int (*get_mac_tx_timestamp)(struct mac_device_info *hw, u64 *ts); + /* Source Address Insertion / Replacement */ +@@ -463,6 +471,12 @@ struct stmmac_ops { + stmmac_do_void_callback(__priv, mac, rx_hw_vlan, __args) + #define stmmac_set_hw_vlan_mode(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, set_hw_vlan_mode, __args) ++#define stmmac_add_hw_vlan_rx_fltr(__priv, __args...) \ ++ stmmac_do_callback(__priv, mac, add_hw_vlan_rx_fltr, __args) ++#define stmmac_del_hw_vlan_rx_fltr(__priv, __args...) \ ++ stmmac_do_callback(__priv, mac, del_hw_vlan_rx_fltr, __args) ++#define stmmac_restore_hw_vlan_rx_fltr(__priv, __args...) \ ++ stmmac_do_void_callback(__priv, mac, restore_hw_vlan_rx_fltr, __args) + #define stmmac_get_mac_tx_timestamp(__priv, __args...) \ + stmmac_do_callback(__priv, mac, get_mac_tx_timestamp, __args) + #define stmmac_sarc_configure(__priv, __args...) \ +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index ae49261ff89a..329ce0af8a05 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -4624,6 +4624,8 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid + return ret; + } + ++ ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); ++ + return ret; + } + +@@ -4631,6 +4633,7 @@ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vi + { + struct stmmac_priv *priv = netdev_priv(ndev); + bool is_double = false; ++ int ret; + + if (!priv->dma_cap.vlhash) + return -EOPNOTSUPP; +@@ -4638,6 +4641,11 @@ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vi + is_double = true; + + clear_bit(vid, priv->active_vlans); ++ ++ ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); ++ if(ret) ++ return ret; ++ + return stmmac_vlan_update(priv, is_double); + } + +@@ -5223,6 +5231,8 @@ int stmmac_resume(struct device *dev) + stmmac_init_coalesce(priv); + stmmac_set_rx_mode(ndev); + ++ stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw); ++ + stmmac_enable_all_queues(priv); + + stmmac_start_all_queues(priv); +-- +2.17.1 + diff --git a/patches/0026-platform-x86-SEP-bug-fix-use-of-undeclared-va.sep-socwatch b/patches/0026-platform-x86-SEP-bug-fix-use-of-undeclared-va.sep-socwatch new file mode 100644 index 0000000000..3a93f89f0a --- /dev/null +++ b/patches/0026-platform-x86-SEP-bug-fix-use-of-undeclared-va.sep-socwatch @@ -0,0 +1,37 @@ +From dd37de7bbc21ee6e6a6c16882021facc951f246f Mon Sep 17 00:00:00 2001 +From: Manisha Chinthapally +Date: Tue, 2 Jul 2019 18:15:44 +0000 +Subject: [PATCH 26/27] platform/x86: SEP bug fix, use of undeclared var + +Typo in variable name used in debug message + +Signed-off-by: Manisha Chinthapally +--- + drivers/platform/x86/sepdk/sep/cpumon.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/drivers/platform/x86/sepdk/sep/cpumon.c b/drivers/platform/x86/sepdk/sep/cpumon.c +index ac8ade14f106..0547894daf4c 100755 +--- a/drivers/platform/x86/sepdk/sep/cpumon.c ++++ b/drivers/platform/x86/sepdk/sep/cpumon.c +@@ -206,7 +206,7 @@ DRV_BOOL CPUMON_is_Offline_Allowed(void) + * @fn VOID CPUMON_Online_Cpu( + * PVOID param) + * +- * @param PVOID parm ++ * @param PVOID param + * + * @return None + * +@@ -219,7 +219,7 @@ VOID CPUMON_Online_Cpu(PVOID param) + S32 this_cpu; + CPU_STATE pcpu; + +- SEP_DRV_LOG_TRACE_IN("Dummy parm: %p.", parm); ++ SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); + + if (param == NULL) { + preempt_disable(); +-- +2.17.1 + diff --git a/patches/0026-rpmb-enable-built-in-components.security b/patches/0026-rpmb-enable-built-in-components.security new file mode 100644 index 0000000000..0fd47a5901 --- /dev/null +++ b/patches/0026-rpmb-enable-built-in-components.security @@ -0,0 +1,62 @@ +From 6f47f294da2b9a8c4204cb3dbecef9b5a612fa74 Mon Sep 17 00:00:00 2001 +From: Tomas Winkler +Date: Tue, 21 May 2019 10:57:39 +0300 +Subject: [PATCH 26/65] rpmb: enable built in components + +This is a preparation for enabling +receiving key material from the Linux cmdline. + +Boot parameters handling has to be compiled into +kernel. +To enable built-in compilation we add RPMB_SUPPORT +boolean switch that governs RPMB compilation. + +Change-Id: Id18912416b956df8d95df78e25636573f5b36373 +Signed-off-by: Tomas Winkler +--- + drivers/char/Makefile | 2 +- + drivers/char/rpmb/Kconfig | 5 +++++ + drivers/char/rpmb/Makefile | 2 +- + 3 files changed, 7 insertions(+), 2 deletions(-) + +diff --git a/drivers/char/Makefile b/drivers/char/Makefile +index f9f7b5de84a3..8fa355ee83d2 100644 +--- a/drivers/char/Makefile ++++ b/drivers/char/Makefile +@@ -52,4 +52,4 @@ js-rtc-y = rtc.o + obj-$(CONFIG_XILLYBUS) += xillybus/ + obj-$(CONFIG_POWERNV_OP_PANEL) += powernv-op-panel.o + obj-$(CONFIG_ADI) += adi.o +-obj-$(CONFIG_RPMB) += rpmb/ ++obj-$(CONFIG_RPMB_SUPPORT) += rpmb/ +diff --git a/drivers/char/rpmb/Kconfig b/drivers/char/rpmb/Kconfig +index 8fdc98145586..6a92b5b6a544 100644 +--- a/drivers/char/rpmb/Kconfig ++++ b/drivers/char/rpmb/Kconfig +@@ -1,8 +1,13 @@ + # SPDX-License-Identifier: GPL-2.0 + # Copyright (c) 2015-2019, Intel Corporation. + ++# RPMB_SUPPORT always gets to for built-in components ++config RPMB_SUPPORT ++ bool ++ + config RPMB + tristate "RPMB partition interface" ++ select RPMB_SUPPORT + help + Unified RPMB partition interface for eMMC and UFS. + Provides interface for in kernel security controllers to +diff --git a/drivers/char/rpmb/Makefile b/drivers/char/rpmb/Makefile +index 5600cef232b2..d964ecc7570a 100644 +--- a/drivers/char/rpmb/Makefile ++++ b/drivers/char/rpmb/Makefile +@@ -9,4 +9,4 @@ obj-$(CONFIG_VIRTIO_RPMB) += virtio_rpmb.o + + ccflags-y += -D__CHECK_ENDIAN__ + +-obj-$(CONFIG_RPMB_MUX) += mux/ ++obj-$(CONFIG_RPMB_SUPPORT) += mux/ +-- +2.17.1 + diff --git a/patches/0026-serial-8250_port-Don-t-use-power-management-for-kerne.lpss b/patches/0026-serial-8250_port-Don-t-use-power-management-for-kerne.lpss new file mode 100644 index 0000000000..a26f474309 --- /dev/null +++ b/patches/0026-serial-8250_port-Don-t-use-power-management-for-kerne.lpss @@ -0,0 +1,131 @@ +From 0f51a435b19e7a034321fd8dcd21c00957697bc9 Mon Sep 17 00:00:00 2001 +From: Andy Shevchenko +Date: Thu, 17 Nov 2016 13:12:45 +0200 +Subject: [PATCH 26/40] serial: 8250_port: Don't use power management for + kernel console + +Doing any kind of power management for kernel console is really bad idea. + +First of all, it runs in poll and atomic mode. This fact attaches a limitation +on the functions that might be called. For example, pm_runtime_get_sync() might +sleep and thus can't be used. On the other hand pm_runtime_get() doesn't +guarantee that device becames powered on (fast enough). On ACPI enabled +platforms it might even call firmware for a job. + +Besides that, imagine the case when console is about to print a kernel oops and +it's powered off. In such an emergency case calling the complex functions is +not the best what we can do, taking into consideration that user wants to see +at least something of the last kernel word before it passes away. + +Here we modify the 8250 console code to prevent runtime power management. + +Signed-off-by: Andy Shevchenko +--- + drivers/tty/serial/8250/8250_core.c | 9 +++++++++ + drivers/tty/serial/8250/8250_port.c | 22 ++++++++++++++++++---- + include/linux/serial_8250.h | 1 + + 3 files changed, 28 insertions(+), 4 deletions(-) + +diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c +index e682390ce0de..374ad3947b4f 100644 +--- a/drivers/tty/serial/8250/8250_core.c ++++ b/drivers/tty/serial/8250/8250_core.c +@@ -609,6 +609,14 @@ static int univ8250_console_setup(struct console *co, char *options) + return retval; + } + ++static void univ8250_console_exit(struct console *co) ++{ ++ struct uart_port *port; ++ ++ port = &serial8250_ports[co->index].port; ++ serial8250_console_exit(port); ++} ++ + /** + * univ8250_console_match - non-standard console matching + * @co: registering console +@@ -667,6 +675,7 @@ static struct console univ8250_console = { + .write = univ8250_console_write, + .device = uart_console_device, + .setup = univ8250_console_setup, ++ .exit = univ8250_console_exit, + .match = univ8250_console_match, + .flags = CON_PRINTBUFFER | CON_ANYTIME, + .index = -1, +diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c +index 8407166610ce..f8c6bd2b4c23 100644 +--- a/drivers/tty/serial/8250/8250_port.c ++++ b/drivers/tty/serial/8250/8250_port.c +@@ -3137,6 +3137,9 @@ static void serial8250_console_restore(struct uart_8250_port *up) + * any possible real use of the port... + * + * The console_lock must be held when we get here. ++ * ++ * Doing runtime PM is really a bad idea for the kernel console. ++ * Thus we assume that the function called when device is powered on. + */ + void serial8250_console_write(struct uart_8250_port *up, const char *s, + unsigned int count) +@@ -3148,8 +3151,6 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s, + + touch_nmi_watchdog(); + +- serial8250_rpm_get(up); +- + if (oops_in_progress) + locked = spin_trylock_irqsave(&port->lock, flags); + else +@@ -3192,7 +3193,6 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s, + + if (locked) + spin_unlock_irqrestore(&port->lock, flags); +- serial8250_rpm_put(up); + } + + static unsigned int probe_baud(struct uart_port *port) +@@ -3216,6 +3216,7 @@ int serial8250_console_setup(struct uart_port *port, char *options, bool probe) + int bits = 8; + int parity = 'n'; + int flow = 'n'; ++ int ret; + + if (!port->iobase && !port->membase) + return -ENODEV; +@@ -3225,7 +3226,20 @@ int serial8250_console_setup(struct uart_port *port, char *options, bool probe) + else if (probe) + baud = probe_baud(port); + +- return uart_set_options(port, port->cons, baud, parity, bits, flow); ++ ret = uart_set_options(port, port->cons, baud, parity, bits, flow); ++ if (ret) ++ return ret; ++ ++ if (port->dev) ++ pm_runtime_get_noresume(port->dev); ++ ++ return 0; ++} ++ ++void serial8250_console_exit(struct uart_port *port) ++{ ++ if (port->dev) ++ pm_runtime_put_noidle(port->dev); + } + + #endif /* CONFIG_SERIAL_8250_CONSOLE */ +diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h +index bb2bc99388ca..c78d8b9bd707 100644 +--- a/include/linux/serial_8250.h ++++ b/include/linux/serial_8250.h +@@ -175,6 +175,7 @@ void serial8250_set_defaults(struct uart_8250_port *up); + void serial8250_console_write(struct uart_8250_port *up, const char *s, + unsigned int count); + int serial8250_console_setup(struct uart_port *port, char *options, bool probe); ++void serial8250_console_exit(struct uart_port *port); + + extern void serial8250_set_isa_configurator(void (*v) + (int port, struct uart_port *up, +-- +2.17.1 + diff --git a/patches/0026-trusty-fix-rcu_preempt-soft-lockup-crash-issue.trusty b/patches/0026-trusty-fix-rcu_preempt-soft-lockup-crash-issue.trusty new file mode 100644 index 0000000000..84b84ad9fe --- /dev/null +++ b/patches/0026-trusty-fix-rcu_preempt-soft-lockup-crash-issue.trusty @@ -0,0 +1,157 @@ +From 1db4e202cc14684de9f1d04cdeebcdcac09ffdf6 Mon Sep 17 00:00:00 2001 +From: "Yan, Shaoou" +Date: Fri, 9 Dec 2016 05:33:22 +0000 +Subject: [PATCH 26/63] trusty: fix rcu_preempt soft lockup crash issue + +since we'll run a long TEE/Trusty task, e.g generate 3K RSA key pair, +the previous API not meet the requirement of "must be fast and non-blocking +for smp_call_function_single()", we replace smp_call_function_single() +with work_on_cpu() to bind cpu #0, which can fix the rcu_preempt softup +crash issue. + +Change-Id: I63225c16be50b1ff21accb2ae51114d377c45059 +Signed-off-by: Zhu, Bing +Signed-off-by: Yan, shaopu +Reviewed-by: Ilkka Koskinen +--- + drivers/trusty/trusty.c | 101 ++++++++++++++++++++-------------------- + 1 file changed, 50 insertions(+), 51 deletions(-) + +diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c +index cfef965402c4..679c5a9a7acf 100644 +--- a/drivers/trusty/trusty.c ++++ b/drivers/trusty/trusty.c +@@ -203,59 +203,22 @@ static ulong trusty_std_call_inner(struct device *dev, ulong smcnr, + return ret; + } + +-static void trusty_std_call_inner_wrapper_remote(void *args) +-{ +- struct trusty_smc_interface *p_args = args; +- struct device *dev = p_args->dev; +- ulong smcnr = p_args->args[0]; +- ulong a0 = p_args->args[1]; +- ulong a1 = p_args->args[2]; +- ulong a2 = p_args->args[3]; +- struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); +- ulong ret; +- unsigned long flags; +- +- local_irq_save(flags); +- atomic_notifier_call_chain(&s->notifier, TRUSTY_CALL_PREPARE, +- NULL); +- ret = trusty_std_call_inner(dev, smcnr, a0, a1, a2); +- atomic_notifier_call_chain(&s->notifier, TRUSTY_CALL_RETURNED, +- NULL); +- local_irq_restore(flags); +- +- p_args->args[4] = ret; +-} +- +-static ulong trusty_std_call_inner_wrapper(struct device *dev, ulong smcnr, +- ulong a0, ulong a1, ulong a2) +-{ +- int cpu = 0; +- int ret = 0; +- struct trusty_smc_interface s; +- s.dev = dev; +- s.args[0] = smcnr; +- s.args[1] = a0; +- s.args[2] = a1; +- s.args[3] = a2; +- s.args[4] = 0; +- +- ret = smp_call_function_single(cpu, trusty_std_call_inner_wrapper_remote, (void *)&s, 1); +- +- if (ret) { +- pr_err("%s: smp_call_function_single failed: %d\n", __func__, ret); +- } +- +- return s.args[4]; +-} +- + static ulong trusty_std_call_helper(struct device *dev, ulong smcnr, + ulong a0, ulong a1, ulong a2) + { + ulong ret; + int sleep_time = 1; ++ unsigned long flags; ++ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); + + while (true) { +- ret = trusty_std_call_inner_wrapper(dev, smcnr, a0, a1, a2); ++ local_irq_save(flags); ++ atomic_notifier_call_chain(&s->notifier, TRUSTY_CALL_PREPARE, ++ NULL); ++ ret = trusty_std_call_inner(dev, smcnr, a0, a1, a2); ++ atomic_notifier_call_chain(&s->notifier, TRUSTY_CALL_RETURNED, ++ NULL); ++ local_irq_restore(flags); + + if ((int)ret != SM_ERR_BUSY) + break; +@@ -292,13 +255,33 @@ static void trusty_std_call_cpu_idle(struct trusty_state *s) + } + } + +-/* must set CONFIG_DEBUG_ATOMIC_SLEEP=n +-** otherwise mutex_lock() will fail and crash +-*/ +-s32 trusty_std_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2) ++ ++struct trusty_std_call32_args { ++ struct device *dev; ++ u32 smcnr; ++ u32 a0; ++ u32 a1; ++ u32 a2; ++}; ++ ++static long trusty_std_call32_work(void *args) + { + int ret; +- struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); ++ struct device *dev; ++ u32 smcnr, a0, a1, a2; ++ struct trusty_state *s; ++ struct trusty_std_call32_args *work_args; ++ ++ BUG_ON(!args); ++ ++ work_args = args; ++ dev = work_args->dev; ++ s = platform_get_drvdata(to_platform_device(dev)); ++ ++ smcnr = work_args->smcnr; ++ a0 = work_args->a0; ++ a1 = work_args->a1; ++ a2 = work_args->a2; + + BUG_ON(SMC_IS_FASTCALL(smcnr)); + BUG_ON(SMC_IS_SMC64(smcnr)); +@@ -334,6 +317,22 @@ s32 trusty_std_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2) + + return ret; + } ++ ++s32 trusty_std_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2) ++{ ++ const int cpu = 0; ++ struct trusty_std_call32_args args = { ++ .dev = dev, ++ .smcnr = smcnr, ++ .a0 = a0, ++ .a1 = a1, ++ .a2 = a2, ++ }; ++ ++ /* bind cpu 0 for now since trusty OS is running on physical cpu #0*/ ++ return work_on_cpu(cpu, trusty_std_call32_work, (void *) &args); ++} ++ + EXPORT_SYMBOL(trusty_std_call32); + + int trusty_call_notifier_register(struct device *dev, struct notifier_block *n) +-- +2.17.1 + diff --git a/patches/0027-ASoC-Intel-Haswell-Define-separate-ACPI-loader.audio b/patches/0027-ASoC-Intel-Haswell-Define-separate-ACPI-loader.audio new file mode 100644 index 0000000000..9ff3b2d41d --- /dev/null +++ b/patches/0027-ASoC-Intel-Haswell-Define-separate-ACPI-loader.audio @@ -0,0 +1,185 @@ +From c2fea37bb7c5edc70cbc3490d6845e793d65d908 Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Fri, 16 Aug 2019 17:33:25 +0200 +Subject: [PATCH 027/193] ASoC: Intel: Haswell: Define separate ACPI loader + +With common acpi code exposed, separate Haswell specific code from other +legacy platforms. To reduce diff delta, it's still loaded with separate +module called: hsw-acpi. This may be address later, once heavy hitters +are gone. + +Change-Id: I43fd42cc41eccc6d8a768c7bc76b4d8441464511 +Signed-off-by: Cezary Rojewski +--- + sound/soc/intel/Kconfig | 6 ++- + sound/soc/intel/common/sst-acpi.c | 32 ---------------- + sound/soc/intel/haswell/Makefile | 2 + + sound/soc/intel/haswell/acpi.c | 63 +++++++++++++++++++++++++++++++ + 4 files changed, 70 insertions(+), 33 deletions(-) + create mode 100644 sound/soc/intel/haswell/acpi.c + +diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig +index 01c99750212a..15deda49fb89 100644 +--- a/sound/soc/intel/Kconfig ++++ b/sound/soc/intel/Kconfig +@@ -31,6 +31,10 @@ config SND_SST_IPC_ACPI + # This option controls the ACPI-based IPC for HiFi2 platforms + # (Baytrail, Cherrytrail) + ++config SND_SOC_INTEL_HASWELL_ACPI ++ tristate ++ # This option controls the ACPI-based on HSW/BDW platforms ++ + config SND_SOC_INTEL_SST_ACPI + tristate + # This option controls ACPI-based probing on +@@ -52,7 +56,7 @@ config SND_SOC_INTEL_HASWELL + depends on SND_DMA_SGBUF + depends on DMADEVICES && ACPI + select SND_SOC_INTEL_SST +- select SND_SOC_INTEL_SST_ACPI ++ select SND_SOC_INTEL_HASWELL_ACPI + select SND_SOC_INTEL_SST_FIRMWARE + select SND_SOC_ACPI_INTEL_MATCH + help +diff --git a/sound/soc/intel/common/sst-acpi.c b/sound/soc/intel/common/sst-acpi.c +index 3547e923033a..077070daf6d8 100644 +--- a/sound/soc/intel/common/sst-acpi.c ++++ b/sound/soc/intel/common/sst-acpi.c +@@ -15,10 +15,6 @@ + #include + #include + +-#define SST_LPT_DSP_DMA_ADDR_OFFSET 0x0F0000 +-#define SST_WPT_DSP_DMA_ADDR_OFFSET 0x0FE000 +-#define SST_LPT_DSP_DMA_SIZE (1024 - 1) +- + struct sst_acpi_priv { + struct platform_device *pdev_mach; + struct platform_device *pdev_pcm; +@@ -157,32 +153,6 @@ int sst_dsp_acpi_remove(struct platform_device *pdev) + } + EXPORT_SYMBOL_GPL(sst_dsp_acpi_remove); + +-static struct sst_acpi_desc sst_acpi_haswell_desc = { +- .drv_name = "haswell-pcm-audio", +- .machines = snd_soc_acpi_intel_haswell_machines, +- .resindex_lpe_base = 0, +- .resindex_pcicfg_base = 1, +- .resindex_fw_base = -1, +- .irqindex_host_ipc = 0, +- .sst_id = SST_DEV_ID_LYNX_POINT, +- .dma_engine = SST_DMA_TYPE_DW, +- .resindex_dma_base = SST_LPT_DSP_DMA_ADDR_OFFSET, +- .dma_size = SST_LPT_DSP_DMA_SIZE, +-}; +- +-static struct sst_acpi_desc sst_acpi_broadwell_desc = { +- .drv_name = "haswell-pcm-audio", +- .machines = snd_soc_acpi_intel_broadwell_machines, +- .resindex_lpe_base = 0, +- .resindex_pcicfg_base = 1, +- .resindex_fw_base = -1, +- .irqindex_host_ipc = 0, +- .sst_id = SST_DEV_ID_WILDCAT_POINT, +- .dma_engine = SST_DMA_TYPE_DW, +- .resindex_dma_base = SST_WPT_DSP_DMA_ADDR_OFFSET, +- .dma_size = SST_LPT_DSP_DMA_SIZE, +-}; +- + #if !IS_ENABLED(CONFIG_SND_SST_IPC_ACPI) + static struct sst_acpi_desc sst_acpi_baytrail_desc = { + .drv_name = "baytrail-pcm-audio", +@@ -197,8 +167,6 @@ static struct sst_acpi_desc sst_acpi_baytrail_desc = { + #endif + + static const struct acpi_device_id sst_acpi_match[] = { +- { "INT33C8", (unsigned long)&sst_acpi_haswell_desc }, +- { "INT3438", (unsigned long)&sst_acpi_broadwell_desc }, + #if !IS_ENABLED(CONFIG_SND_SST_IPC_ACPI) + { "80860F28", (unsigned long)&sst_acpi_baytrail_desc }, + #endif +diff --git a/sound/soc/intel/haswell/Makefile b/sound/soc/intel/haswell/Makefile +index ad2341aea8ae..0af852d0577b 100644 +--- a/sound/soc/intel/haswell/Makefile ++++ b/sound/soc/intel/haswell/Makefile +@@ -1,5 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0-only + snd-soc-sst-haswell-pcm-objs := \ + sst-haswell-ipc.o sst-haswell-pcm.o sst-haswell-dsp.o ++snd-soc-hsw-acpi-objs := acpi.o + + obj-$(CONFIG_SND_SOC_INTEL_HASWELL) += snd-soc-sst-haswell-pcm.o ++obj-$(CONFIG_SND_SOC_INTEL_HASWELL_ACPI) += snd-soc-hsw-acpi.o +diff --git a/sound/soc/intel/haswell/acpi.c b/sound/soc/intel/haswell/acpi.c +new file mode 100644 +index 000000000000..0c7da697437c +--- /dev/null ++++ b/sound/soc/intel/haswell/acpi.c +@@ -0,0 +1,63 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Intel HSW loader on ACPI systems ++ * ++ * Copyright (C) 2019, Intel Corporation. All rights reserved. ++ */ ++ ++#include ++#include ++#include ++#include ++#include "../common/sst-dsp.h" ++ ++#define SST_LPT_DSP_DMA_ADDR_OFFSET 0x0F0000 ++#define SST_WPT_DSP_DMA_ADDR_OFFSET 0x0FE000 ++#define SST_LPT_DSP_DMA_SIZE (1024 - 1) ++ ++static struct sst_acpi_desc hsw_acpi_desc = { ++ .drv_name = "haswell-pcm-audio", ++ .machines = snd_soc_acpi_intel_haswell_machines, ++ .resindex_lpe_base = 0, ++ .resindex_pcicfg_base = 1, ++ .resindex_fw_base = -1, ++ .irqindex_host_ipc = 0, ++ .sst_id = SST_DEV_ID_LYNX_POINT, ++ .dma_engine = SST_DMA_TYPE_DW, ++ .resindex_dma_base = SST_LPT_DSP_DMA_ADDR_OFFSET, ++ .dma_size = SST_LPT_DSP_DMA_SIZE, ++}; ++ ++static struct sst_acpi_desc bdw_acpi_desc = { ++ .drv_name = "haswell-pcm-audio", ++ .machines = snd_soc_acpi_intel_broadwell_machines, ++ .resindex_lpe_base = 0, ++ .resindex_pcicfg_base = 1, ++ .resindex_fw_base = -1, ++ .irqindex_host_ipc = 0, ++ .sst_id = SST_DEV_ID_WILDCAT_POINT, ++ .dma_engine = SST_DMA_TYPE_DW, ++ .resindex_dma_base = SST_WPT_DSP_DMA_ADDR_OFFSET, ++ .dma_size = SST_LPT_DSP_DMA_SIZE, ++}; ++ ++static const struct acpi_device_id hsw_acpi_ids[] = { ++ { "INT33C8", (unsigned long)&hsw_acpi_desc }, ++ { "INT3438", (unsigned long)&bdw_acpi_desc }, ++ { } ++}; ++MODULE_DEVICE_TABLE(acpi, hsw_acpi_ids); ++ ++static struct platform_driver hsw_acpi_driver = { ++ .probe = sst_dsp_acpi_probe, ++ .remove = sst_dsp_acpi_remove, ++ .driver = { ++ .name = "hsw-acpi", ++ .acpi_match_table = ACPI_PTR(hsw_acpi_ids), ++ }, ++}; ++module_platform_driver(hsw_acpi_driver); ++ ++MODULE_AUTHOR("Cezary Rojewski "); ++MODULE_DESCRIPTION("Intel HSW loader on ACPI systems"); ++MODULE_LICENSE("GPL v2"); +-- +2.17.1 + diff --git a/patches/0027-VHM-add-VHM-api-version-support.acrn b/patches/0027-VHM-add-VHM-api-version-support.acrn new file mode 100644 index 0000000000..7e2af93981 --- /dev/null +++ b/patches/0027-VHM-add-VHM-api-version-support.acrn @@ -0,0 +1,64 @@ +From 1b329bf45d3c6e3ee0a367e61786324ff0c2210e Mon Sep 17 00:00:00 2001 +From: Jason Chen CJ +Date: Fri, 31 Aug 2018 10:58:58 +0800 +Subject: [PATCH 027/150] VHM: add VHM api version support + +Change-Id: I36dd051d0cc04720ab8d69817392ff97f1e5ad34 +Tracked-On: 218802 +Signed-off-by: Jason Chen CJ +--- + drivers/char/vhm/vhm_dev.c | 16 ++++++++++++++++ + include/linux/vhm/vhm_ioctl_defs.h | 5 +++++ + 2 files changed, 21 insertions(+) + +diff --git a/drivers/char/vhm/vhm_dev.c b/drivers/char/vhm/vhm_dev.c +index a87324b6f2af..ddafa040c15a 100644 +--- a/drivers/char/vhm/vhm_dev.c ++++ b/drivers/char/vhm/vhm_dev.c +@@ -86,6 +86,9 @@ + #define DEVICE_NAME "acrn_vhm" + #define CLASS_NAME "vhm" + ++#define VHM_API_VERSION_MAJOR 1 ++#define VHM_API_VERSION_MINOR 0 ++ + static int major; + static struct class *vhm_class; + static struct device *vhm_device; +@@ -144,6 +147,19 @@ static long vhm_dev_ioctl(struct file *filep, + + trace_printk("[%s] ioctl_num=0x%x\n", __func__, ioctl_num); + ++ if (ioctl_num == IC_GET_API_VERSION) { ++ struct api_version api_version; ++ ++ api_version.major_version = VHM_API_VERSION_MAJOR; ++ api_version.minor_version = VHM_API_VERSION_MINOR; ++ ++ if (copy_to_user((void *)ioctl_param, &api_version, ++ sizeof(struct api_version))) ++ return -EFAULT; ++ ++ return 0; ++ } ++ + memset(&hc_pt_irq, 0, sizeof(hc_pt_irq)); + memset(&ic_pt_irq, 0, sizeof(ic_pt_irq)); + vm = (struct vhm_vm *)filep->private_data; +diff --git a/include/linux/vhm/vhm_ioctl_defs.h b/include/linux/vhm/vhm_ioctl_defs.h +index d00b6588f296..258ec3982da9 100644 +--- a/include/linux/vhm/vhm_ioctl_defs.h ++++ b/include/linux/vhm/vhm_ioctl_defs.h +@@ -159,4 +159,9 @@ struct ioreq_notify { + uint32_t vcpu; + }; + ++struct api_version { ++ uint32_t major_version; ++ uint32_t minor_version; ++}; ++ + #endif /* VHM_IOCTL_DEFS_H */ +-- +2.17.1 + diff --git a/patches/0027-drm-i915-s-num_active_crtcs-num_active_pipes.drm b/patches/0027-drm-i915-s-num_active_crtcs-num_active_pipes.drm new file mode 100644 index 0000000000..7bbcf2f93f --- /dev/null +++ b/patches/0027-drm-i915-s-num_active_crtcs-num_active_pipes.drm @@ -0,0 +1,73 @@ +From f39f3fc106b59e970623de30598e08abad2c32f0 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= +Date: Wed, 21 Aug 2019 20:30:32 +0300 +Subject: [PATCH 027/690] drm/i915: s/num_active_crtcs/num_active_pipes/ +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Set a good example and talk about pipes rather than crtcs. + +Signed-off-by: Ville Syrjälä +Link: https://patchwork.freedesktop.org/patch/msgid/20190821173033.24123-4-ville.syrjala@linux.intel.com +Reviewed-by: Jani Nikula +--- + drivers/gpu/drm/i915/intel_pm.c | 14 +++++++------- + 1 file changed, 7 insertions(+), 7 deletions(-) + +diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c +index 437cd50e5d06..b4b9609db092 100644 +--- a/drivers/gpu/drm/i915/intel_pm.c ++++ b/drivers/gpu/drm/i915/intel_pm.c +@@ -1490,7 +1490,7 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv, + struct g4x_wm_values *wm) + { + struct intel_crtc *crtc; +- int num_active_crtcs = 0; ++ int num_active_pipes = 0; + + wm->cxsr = true; + wm->hpll_en = true; +@@ -1509,10 +1509,10 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv, + if (!wm_state->fbc_en) + wm->fbc_en = false; + +- num_active_crtcs++; ++ num_active_pipes++; + } + +- if (num_active_crtcs != 1) { ++ if (num_active_pipes != 1) { + wm->cxsr = false; + wm->hpll_en = false; + wm->fbc_en = false; +@@ -2098,7 +2098,7 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv, + struct vlv_wm_values *wm) + { + struct intel_crtc *crtc; +- int num_active_crtcs = 0; ++ int num_active_pipes = 0; + + wm->level = dev_priv->wm.max_level; + wm->cxsr = true; +@@ -2112,14 +2112,14 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv, + if (!wm_state->cxsr) + wm->cxsr = false; + +- num_active_crtcs++; ++ num_active_pipes++; + wm->level = min_t(int, wm->level, wm_state->num_levels - 1); + } + +- if (num_active_crtcs != 1) ++ if (num_active_pipes != 1) + wm->cxsr = false; + +- if (num_active_crtcs > 1) ++ if (num_active_pipes > 1) + wm->level = VLV_WM_LEVEL_PM2; + + for_each_intel_crtc(&dev_priv->drm, crtc) { +-- +2.17.1 + diff --git a/patches/0027-net-stmmac-Add-support-for-VLAN-promiscuous-m.connectivity b/patches/0027-net-stmmac-Add-support-for-VLAN-promiscuous-m.connectivity new file mode 100644 index 0000000000..fe39d266a2 --- /dev/null +++ b/patches/0027-net-stmmac-Add-support-for-VLAN-promiscuous-m.connectivity @@ -0,0 +1,107 @@ +From 05a5a4603fe831d47d553a8d696dc105dbb1ab7c Mon Sep 17 00:00:00 2001 +From: "Chuah, Kim Tatt" +Date: Tue, 26 Jun 2018 09:05:05 +0800 +Subject: [PATCH 027/108] net: stmmac: Add support for VLAN promiscuous mode + +For dwmac4, enable VLAN promiscuity when MAC controller is requested to +enter promiscuous mode. + +Signed-off-by: Chuah, Kim Tatt +Signed-off-by: Ong Boon Leong +--- + drivers/net/ethernet/stmicro/stmmac/common.h | 1 + + .../net/ethernet/stmicro/stmmac/dwmac4_core.c | 45 +++++++++++++++++++ + 2 files changed, 46 insertions(+) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h +index 88fa60f1ab91..782fd503d623 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/common.h ++++ b/drivers/net/ethernet/stmicro/stmmac/common.h +@@ -476,6 +476,7 @@ struct mac_device_info { + wait_queue_head_t mdio_busy_wait; + unsigned int num_vlan; + u32 vlan_filter[32]; ++ unsigned int promisc; + }; + + struct stmmac_rx_routing { +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +index 432af7009575..0f8fa8b9c53b 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +@@ -455,6 +455,12 @@ static int dwmac4_add_hw_vlan_rx_fltr(struct net_device *dev, + if (vid > 4095) + return -EINVAL; + ++ if (hw->promisc) { ++ netdev_err(dev, ++ "Adding VLAN in promisc mode not supported\n"); ++ return -EPERM; ++ } ++ + /* Single Rx VLAN Filter */ + if (hw->num_vlan == 1) { + /* For single VLAN filter, VID 0 means VLAN promiscuous */ +@@ -504,6 +510,12 @@ static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev, + { + int i, ret = 0; + ++ if (hw->promisc) { ++ netdev_err(dev, ++ "Deleting VLAN in promisc mode not supported\n"); ++ return -EPERM; ++ } ++ + /* Single Rx VLAN Filter */ + if (hw->num_vlan == 1) { + if ((hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) == vid) { +@@ -528,6 +540,27 @@ static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev, + return ret; + } + ++static void dwmac4_vlan_promisc_enable(struct net_device *dev, ++ struct mac_device_info *hw) ++{ ++ u32 val; ++ int i; ++ ++ /* Single Rx VLAN Filter */ ++ if (hw->num_vlan == 1) { ++ dwmac4_write_single_vlan(dev, 0); ++ return; ++ } ++ ++ /* Extended Rx VLAN Filter Enable */ ++ for (i = 0; i < hw->num_vlan; i++) { ++ if (hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN) { ++ val = hw->vlan_filter[i] & ~GMAC_VLAN_TAG_DATA_VEN; ++ dwmac4_write_vlan_filter(dev, hw, i, val); ++ } ++ } ++} ++ + static void dwmac4_restore_hw_vlan_rx_fltr(struct net_device *dev, + struct mac_device_info *hw) + { +@@ -629,6 +662,18 @@ static void dwmac4_set_filter(struct mac_device_info *hw, + value |= GMAC_PACKET_FILTER_VTFE; + + writel(value, ioaddr + GMAC_PACKET_FILTER); ++ ++ if (dev->flags & IFF_PROMISC) { ++ if (!hw->promisc) { ++ hw->promisc = 1; ++ dwmac4_vlan_promisc_enable(dev, hw); ++ } ++ } else { ++ if (hw->promisc) { ++ hw->promisc = 0; ++ dwmac4_restore_hw_vlan_rx_fltr(dev, hw); ++ } ++ } + } + + static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex, +-- +2.17.1 + diff --git a/patches/0027-platform-x86-SEP-BUG-fix-for-lbrcollection.sep-socwatch b/patches/0027-platform-x86-SEP-BUG-fix-for-lbrcollection.sep-socwatch new file mode 100644 index 0000000000..77fd6d170e --- /dev/null +++ b/patches/0027-platform-x86-SEP-BUG-fix-for-lbrcollection.sep-socwatch @@ -0,0 +1,55 @@ +From 8f13b7d6224c78be70d5093358d6dfa448a06e81 Mon Sep 17 00:00:00 2001 +From: Manisha +Date: Wed, 19 Jun 2019 01:37:02 +0000 +Subject: [PATCH 27/27] platform/x86: SEP BUG fix for lbrcollection + +Error in macro is causing to skip setting the lbr filter value + +Signed-off-by: Manisha +--- + drivers/platform/x86/sepdk/inc/ecb_iterators.h | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +diff --git a/drivers/platform/x86/sepdk/inc/ecb_iterators.h b/drivers/platform/x86/sepdk/inc/ecb_iterators.h +index 10527535925f..e02a3e194af5 100644 +--- a/drivers/platform/x86/sepdk/inc/ecb_iterators.h ++++ b/drivers/platform/x86/sepdk/inc/ecb_iterators.h +@@ -118,7 +118,7 @@ extern "C" { + if ((pecb)) { \ + for ((idx) = ECB_escr_start(pecb); \ + (idx) < \ +- (ECB_cccr_start(pecb) + ECB_cccr_pop(pecb)); \ ++ (ECB_escr_start(pecb) + ECB_escr_pop(pecb)); \ + (idx)++) { \ + if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ + continue; \ +@@ -141,7 +141,7 @@ extern "C" { + if ((pecb)) { \ + for ((idx) = ECB_escr_start(pecb); \ + (idx) < \ +- (ECB_cccr_start(pecb) + ECB_cccr_pop(pecb)); \ ++ (ECB_escr_start(pecb) + ECB_escr_pop(pecb)); \ + (idx)++) { \ + if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ + continue; \ +@@ -165,7 +165,7 @@ extern "C" { + if ((pecb)) { \ + for ((idx) = ECB_data_start(pecb); \ + (idx) < \ +- (ECB_cccr_start(pecb) + ECB_cccr_pop(pecb)); \ ++ (ECB_data_start(pecb) + ECB_data_pop(pecb)); \ + (idx)++) { \ + if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ + continue; \ +@@ -211,7 +211,7 @@ extern "C" { + if ((pecb)) { \ + for ((idx) = ECB_data_start(pecb); \ + (idx) < \ +- (ECB_cccr_start(pecb) + ECB_cccr_pop(pecb)); \ ++ (ECB_data_start(pecb) + ECB_data_pop(pecb)); \ + (idx)++) { \ + if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ + continue; \ +-- +2.17.1 + diff --git a/patches/0027-rpmb-mux-add-HKDF-for-RPMB-Mux-module.security b/patches/0027-rpmb-mux-add-HKDF-for-RPMB-Mux-module.security new file mode 100644 index 0000000000..5d042088db --- /dev/null +++ b/patches/0027-rpmb-mux-add-HKDF-for-RPMB-Mux-module.security @@ -0,0 +1,248 @@ +From b7d2ac7366b0837208fa8386913b8d67355e974f Mon Sep 17 00:00:00 2001 +From: Qi Yadong +Date: Thu, 18 Oct 2018 15:41:11 +0800 +Subject: [PATCH 27/65] rpmb: mux: add HKDF for RPMB Mux module + +Implement a HKDF (hash based key derivation function) +for RPMB key derivation. + +Change-Id: Ic1a0d270c32d9ba101369ef10065b93f5c7ec479 +Signed-off-by: Huang, Yang +Signed-off-by: Wei, Xinghai +Signed-off-by: Qi Yadong +Signed-off-by: Tomas Winkler +--- + drivers/char/rpmb/mux/Kconfig | 8 ++ + drivers/char/rpmb/mux/Makefile | 5 +- + drivers/char/rpmb/mux/mux_hkdf.c | 166 +++++++++++++++++++++++++++++++ + drivers/char/rpmb/mux/mux_hkdf.h | 14 +++ + 4 files changed, 192 insertions(+), 1 deletion(-) + create mode 100644 drivers/char/rpmb/mux/mux_hkdf.c + create mode 100644 drivers/char/rpmb/mux/mux_hkdf.h + +diff --git a/drivers/char/rpmb/mux/Kconfig b/drivers/char/rpmb/mux/Kconfig +index cea084fca1a1..b64efe6d5846 100644 +--- a/drivers/char/rpmb/mux/Kconfig ++++ b/drivers/char/rpmb/mux/Kconfig +@@ -14,3 +14,11 @@ config RPMB_MUX + + RPMB MUX owns RPMB authentication key internally for RPMB + virtualization usage. ++ ++config RPMB_MUX_KEY ++ bool "RPMB MUX Key handling" ++ depends on RPMB_SUPPORT ++ select CRYPTO_SHA256 ++ select CRYPTO_HMAC ++ help ++ Say yes here if you want to access RPMB MUX Key handling. +diff --git a/drivers/char/rpmb/mux/Makefile b/drivers/char/rpmb/mux/Makefile +index 127a3bf2df64..165309fc35de 100644 +--- a/drivers/char/rpmb/mux/Makefile ++++ b/drivers/char/rpmb/mux/Makefile +@@ -1,6 +1,9 @@ + # SPDX-License-Identifier: GPL-2.0 + # Copyright (c) 2018-2019, Intel Corporation. + +-obj-$(CONFIG_RPMB_MUX) += mux.o ++obj-$(CONFIG_RPMB_MUX) += rpmb_mux.o ++rpmb_mux-objs := mux.o ++ ++obj-$(CONFIG_RPMB_KEY) += mux_hkdf.o + + ccflags-y += -D__CHECK_ENDIAN__ +diff --git a/drivers/char/rpmb/mux/mux_hkdf.c b/drivers/char/rpmb/mux/mux_hkdf.c +new file mode 100644 +index 000000000000..3be271a218a4 +--- /dev/null ++++ b/drivers/char/rpmb/mux/mux_hkdf.c +@@ -0,0 +1,166 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * RPMB Mux HKDF ++ * ++ * Copyright (c) 2018-2019 Intel Corporation. ++ */ ++ ++#include ++#include ++#include "mux_hkdf.h" ++ ++static int mux_sha256_extract(u8 *out_key, size_t out_len, ++ struct shash_desc *desc, ++ const u8 *secret, size_t secret_len, ++ const u8 *salt, size_t salt_len) ++{ ++ int ret; ++ u8 salt0[SHA256_HASH_SIZE]; ++ ++ if (!salt || !salt_len) { ++ memset(salt0, 0, sizeof(salt0)); ++ salt = salt0; ++ salt_len = sizeof(salt0); ++ } ++ ++ ret = crypto_shash_setkey(desc->tfm, salt, salt_len); ++ if (ret) { ++ pr_err("set key failed = %d\n", ret); ++ goto out; ++ } ++ ++ ret = crypto_shash_init(desc); ++ if (ret) ++ goto out; ++ ++ ret = crypto_shash_update(desc, secret, secret_len); ++ if (ret) ++ goto out; ++ ++ ret = crypto_shash_final(desc, out_key); ++ if (ret) ++ goto out; ++ ++out: ++ return ret; ++} ++ ++static int mux_sha256_expand(u8 *out_key, size_t out_len, ++ struct shash_desc *desc, ++ const u8 *prk, size_t prk_len, ++ const u8 *info, size_t info_len) ++{ ++ const size_t digest_len = SHA256_HASH_SIZE; ++ u8 previous[SHA256_HASH_SIZE]; ++ size_t n, done = 0; ++ unsigned int i; ++ int ret = 0; ++ ++ n = (out_len + digest_len - 1) / digest_len; ++ ++ /* check for possible integer overflow */ ++ if (out_len + digest_len < out_len) ++ return 0; ++ ++ if (n > 255) ++ return 0; ++ ++ for (i = 0; i < n; i++) { ++ u8 ctr = i + 1; ++ size_t todo; ++ ++ ret = crypto_shash_setkey(desc->tfm, prk, prk_len); ++ if (ret) ++ goto out; ++ ++ ret = crypto_shash_init(desc); ++ if (ret) ++ goto out; ++ ++ if (i != 0 && crypto_shash_update(desc, previous, digest_len)) ++ goto out; ++ ++ if (crypto_shash_update(desc, info, info_len) || ++ crypto_shash_update(desc, &ctr, 1) || ++ crypto_shash_final(desc, previous)) { ++ ret = -EPERM; ++ goto out; ++ } ++ ++ todo = digest_len; ++ /* Check if the length of left buffer is smaller than ++ * 32 to make sure no buffer overflow in below memcpy ++ */ ++ if (done + todo > out_len) ++ todo = out_len - done; ++ ++ memcpy(out_key + done, previous, todo); ++ done += todo; ++ } ++ ++out: ++ memset(previous, 0, sizeof(previous)); ++ ++ return ret; ++} ++ ++static struct shash_desc *mux_hkdf_init_hmac_sha256_desc(void) ++{ ++ struct shash_desc *desc; ++ struct crypto_shash *tfm; ++ ++ tfm = crypto_alloc_shash("hmac(sha256)", 0, 0); ++ if (IS_ERR(tfm)) ++ return ERR_PTR(-EFAULT); ++ ++ desc = kzalloc(sizeof(*desc) + crypto_shash_descsize(tfm), GFP_KERNEL); ++ if (!desc) { ++ crypto_free_shash(tfm); ++ return ERR_PTR(-ENOMEM); ++ } ++ desc->tfm = tfm; ++ ++ return desc; ++} ++ ++int mux_hkdf_sha256(u8 *out_key, size_t out_len, ++ const u8 *secret, size_t secret_len, ++ const u8 *salt, size_t salt_len, ++ const u8 *info, size_t info_len) ++{ ++ u8 prk[SHA256_HASH_SIZE]; ++ size_t prk_len = SHA256_HASH_SIZE; ++ int ret; ++ struct shash_desc *desc; ++ ++ if (!out_key || !out_len) ++ return -EINVAL; ++ ++ if (!secret || !secret_len) ++ return -EINVAL; ++ ++ if (!info && info_len) ++ return -EINVAL; ++ ++ desc = mux_hkdf_init_hmac_sha256_desc(); ++ if (IS_ERR(desc)) ++ return PTR_ERR(desc); ++ ++ memset(prk, 0, sizeof(prk)); ++ ++ ret = mux_sha256_extract(prk, prk_len, desc, ++ secret, secret_len, ++ salt, salt_len); ++ if (ret) ++ goto err_free_shash; ++ ++ ret = mux_sha256_expand(out_key, out_len, desc, ++ prk, prk_len, ++ info, info_len); ++ ++err_free_shash: ++ crypto_free_shash(desc->tfm); ++ kfree(desc); ++ ++ return ret; ++} +diff --git a/drivers/char/rpmb/mux/mux_hkdf.h b/drivers/char/rpmb/mux/mux_hkdf.h +new file mode 100644 +index 000000000000..eab1b4566439 +--- /dev/null ++++ b/drivers/char/rpmb/mux/mux_hkdf.h +@@ -0,0 +1,14 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright (C) 2018 Intel Corp. ++ */ ++#ifndef _MUX_HKDF_H ++#define _MUX_HKDF_H ++ ++#define SHA256_HASH_SIZE 32 ++ ++int mux_hkdf_sha256(u8 *out_key, size_t out_len, ++ const u8 *secret, size_t secret_len, ++ const u8 *salt, size_t salt_len, ++ const u8 *info, size_t info_len); ++#endif /* !_MUX_HKDF_H */ +-- +2.17.1 + diff --git a/patches/0027-serial-8250_port-Disable-DMA-operations-for-kernel-co.lpss b/patches/0027-serial-8250_port-Disable-DMA-operations-for-kernel-co.lpss new file mode 100644 index 0000000000..44301de795 --- /dev/null +++ b/patches/0027-serial-8250_port-Disable-DMA-operations-for-kernel-co.lpss @@ -0,0 +1,54 @@ +From 8dbf9dc68156abd341e9ce7bdf3d6ed2b06d538b Mon Sep 17 00:00:00 2001 +From: Andy Shevchenko +Date: Sun, 27 Nov 2016 20:21:23 +0200 +Subject: [PATCH 27/40] serial: 8250_port: Disable DMA operations for kernel + console + +It would be too tricky and error prone to allow DMA operations on +kernel console. + +One of the concern is when DMA is a separate device, for example on +Intel CherryTrail platforms, and might need special work around to be +functional, see the commit + + eebb3e8d8aaf ("ACPI / LPSS: override power state for LPSS DMA device") + +for more information. + +Another one is that kernel console is used in atomic context, e.g. +when printing crucial information to the user (Oops or crash), +and DMA may not serve due to power management complications +including non-atomic ACPI calls but not limited to it (see above). + +Besides that, other concerns are described in the commit + + 84b40e3b57ee ("serial: 8250: omap: Disable DMA for console UART") + +done for OMAP UART and may be repeated here. + +Disable any kind of DMA operations on kernel console due to above concerns. + +Signed-off-by: Andy Shevchenko +--- + drivers/tty/serial/8250/8250_port.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c +index f8c6bd2b4c23..aa5357366f9c 100644 +--- a/drivers/tty/serial/8250/8250_port.c ++++ b/drivers/tty/serial/8250/8250_port.c +@@ -2305,7 +2305,10 @@ int serial8250_do_startup(struct uart_port *port) + * Request DMA channels for both RX and TX. + */ + if (up->dma) { +- retval = serial8250_request_dma(up); ++ if (uart_console(port)) ++ retval = -ENXIO; ++ else ++ retval = serial8250_request_dma(up); + if (retval) { + pr_warn_ratelimited("%s - failed to request DMA\n", + port->name); +-- +2.17.1 + diff --git a/patches/0027-trusty-Add-VMM-PANIC-dump-data.trusty b/patches/0027-trusty-Add-VMM-PANIC-dump-data.trusty new file mode 100644 index 0000000000..daa5cc70dc --- /dev/null +++ b/patches/0027-trusty-Add-VMM-PANIC-dump-data.trusty @@ -0,0 +1,66 @@ +From a72d8db5aa5f37a76d7c6a26dadad315081d26af Mon Sep 17 00:00:00 2001 +From: "Yan, Xiangyang" +Date: Wed, 11 Jan 2017 01:26:27 +0000 +Subject: [PATCH 27/63] trusty: Add VMM PANIC dump data. + +1. Increase the alloced size of dump data field to 4 page; + +2. Kick off '\r' character in dump data which is outputted + from mon_vsprintf_s() in evmm code; + +Change-Id: I255d97c2a7e898c8d4e1f15777ddd7f7c11af2b0 +Tracked-On: OAM-34720 +Signed-off-by: Yan, Xiangyang +Reviewed-by: Gross, Mark +--- + drivers/trusty/trusty-log.c | 21 +++++++++++++++++---- + 1 file changed, 17 insertions(+), 4 deletions(-) + +diff --git a/drivers/trusty/trusty-log.c b/drivers/trusty/trusty-log.c +index a066481c4f1d..8091a596a5e3 100644 +--- a/drivers/trusty/trusty-log.c ++++ b/drivers/trusty/trusty-log.c +@@ -154,15 +154,28 @@ static void trusty_vmm_dump_header(struct deadloop_dump *dump) + static void trusty_vmm_dump_data(struct deadloop_dump *dump) + { + struct dump_data *dump_data; +- int i; ++ char *p, *pstr; + + if (!dump) + return; + + dump_data = &(dump->data); + +- for (i = 0; i < dump_data->length; i++) +- pr_info("%c", dump_data->data[i]); ++ pstr = (char *)dump_data->data; ++ for (p = pstr; p < ((char *)dump_data->data + dump_data->length); p++) { ++ if (*p == '\r') { ++ *p = 0x00; ++ } else if (*p == '\n') { ++ *p = 0x00; ++ pr_info("%s\n", pstr); ++ pstr = (char *)(p + 1); ++ } ++ } ++ /* dump the characters in the last line */ ++ if ((pstr - (char *)(dump_data->data)) < dump_data->length) { ++ *p = 0x00; ++ pr_info("%s\n", pstr); ++ } + } + + static int trusty_vmm_panic_notify(struct notifier_block *nb, +@@ -287,7 +300,7 @@ static int trusty_log_probe(struct platform_device *pdev) + } + + /* allocate debug buffer for vmm panic dump */ +- g_vmm_debug_buf = get_zeroed_page(GFP_KERNEL); ++ g_vmm_debug_buf = __get_free_pages(GFP_KERNEL | __GFP_ZERO, 2); + if (!g_vmm_debug_buf) { + result = -ENOMEM; + goto error_alloc_vmm; +-- +2.17.1 + diff --git a/patches/0028-ASoC-Intel-Baytrail-Define-separate-ACPI-loader.audio b/patches/0028-ASoC-Intel-Baytrail-Define-separate-ACPI-loader.audio new file mode 100644 index 0000000000..aee52cb581 --- /dev/null +++ b/patches/0028-ASoC-Intel-Baytrail-Define-separate-ACPI-loader.audio @@ -0,0 +1,195 @@ +From 7316b387672e0281622c479e8bf41c88fa923064 Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Fri, 16 Aug 2019 17:50:34 +0200 +Subject: [PATCH 028/193] ASoC: Intel: Baytrail: Define separate ACPI loader + +With common acpi code exposed, separate Baytrail specific code from +other legacy platforms. To reduce diff delta, it's still loaded with +separate module called: byt-acpi. This may be address later, once +heavy hitters are gone. Since this is the second of two, separation +carries removal of common sst-acpi module with it. + +Signed-off-by: Cezary Rojewski +--- + sound/soc/intel/Kconfig | 8 ++-- + sound/soc/intel/baytrail/Makefile | 2 + + sound/soc/intel/baytrail/acpi.c | 43 +++++++++++++++++++++ + sound/soc/intel/baytrail/sst-baytrail-dsp.c | 1 - + sound/soc/intel/common/Makefile | 4 +- + sound/soc/intel/common/sst-acpi.c | 36 ----------------- + 6 files changed, 49 insertions(+), 45 deletions(-) + create mode 100644 sound/soc/intel/baytrail/acpi.c + +diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig +index 15deda49fb89..ee7c44f1c4d0 100644 +--- a/sound/soc/intel/Kconfig ++++ b/sound/soc/intel/Kconfig +@@ -35,11 +35,9 @@ config SND_SOC_INTEL_HASWELL_ACPI + tristate + # This option controls the ACPI-based on HSW/BDW platforms + +-config SND_SOC_INTEL_SST_ACPI ++config SND_SOC_INTEL_BAYTRAIL_ACPI + tristate +- # This option controls ACPI-based probing on +- # Haswell/Broadwell/Baytrail legacy and will be set +- # when these platforms are enabled ++ # This option controls the ACPI-based on BYT platforms + + config SND_SOC_INTEL_SST + tristate +@@ -68,7 +66,7 @@ config SND_SOC_INTEL_BAYTRAIL + tristate "Baytrail (legacy) Platforms" + depends on DMADEVICES && ACPI && SND_SST_ATOM_HIFI2_PLATFORM=n + select SND_SOC_INTEL_SST +- select SND_SOC_INTEL_SST_ACPI ++ select SND_SOC_INTEL_BAYTRAIL_ACPI + select SND_SOC_INTEL_SST_FIRMWARE + select SND_SOC_ACPI_INTEL_MATCH + help +diff --git a/sound/soc/intel/baytrail/Makefile b/sound/soc/intel/baytrail/Makefile +index 4d0806aac6bd..b59d4893b46b 100644 +--- a/sound/soc/intel/baytrail/Makefile ++++ b/sound/soc/intel/baytrail/Makefile +@@ -1,5 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0-only + snd-soc-sst-baytrail-pcm-objs := \ + sst-baytrail-ipc.o sst-baytrail-pcm.o sst-baytrail-dsp.o ++snd-soc-byt-acpi-objs := acpi.o + + obj-$(CONFIG_SND_SOC_INTEL_BAYTRAIL) += snd-soc-sst-baytrail-pcm.o ++obj-$(CONFIG_SND_SOC_INTEL_BAYTRAIL_ACPI) += snd-soc-byt-acpi.o +diff --git a/sound/soc/intel/baytrail/acpi.c b/sound/soc/intel/baytrail/acpi.c +new file mode 100644 +index 000000000000..ba6590309a1f +--- /dev/null ++++ b/sound/soc/intel/baytrail/acpi.c +@@ -0,0 +1,43 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Intel BYT loader on ACPI systems ++ * ++ * Copyright (C) 2019, Intel Corporation. All rights reserved. ++ */ ++ ++#include ++#include ++#include ++#include ++#include "../common/sst-dsp.h" ++ ++static struct sst_acpi_desc byt_acpi_desc = { ++ .drv_name = "baytrail-pcm-audio", ++ .machines = snd_soc_acpi_intel_baytrail_legacy_machines, ++ .resindex_lpe_base = 0, ++ .resindex_pcicfg_base = 1, ++ .resindex_fw_base = 2, ++ .irqindex_host_ipc = 5, ++ .sst_id = SST_DEV_ID_BYT, ++ .resindex_dma_base = -1, ++}; ++ ++static const struct acpi_device_id byt_acpi_ids[] = { ++ { "80860F28", (unsigned long)&byt_acpi_desc }, ++ { } ++}; ++MODULE_DEVICE_TABLE(acpi, byt_acpi_ids); ++ ++static struct platform_driver byt_acpi_driver = { ++ .probe = sst_dsp_acpi_probe, ++ .remove = sst_dsp_acpi_remove, ++ .driver = { ++ .name = "byt-acpi", ++ .acpi_match_table = ACPI_PTR(byt_acpi_ids), ++ }, ++}; ++module_platform_driver(byt_acpi_driver); ++ ++MODULE_AUTHOR("Cezary Rojewski "); ++MODULE_DESCRIPTION("Intel BYT loader on ACPI systems"); ++MODULE_LICENSE("GPL v2"); +diff --git a/sound/soc/intel/baytrail/sst-baytrail-dsp.c b/sound/soc/intel/baytrail/sst-baytrail-dsp.c +index 4869e18116eb..81bc05d62d5d 100644 +--- a/sound/soc/intel/baytrail/sst-baytrail-dsp.c ++++ b/sound/soc/intel/baytrail/sst-baytrail-dsp.c +@@ -13,7 +13,6 @@ + #include + #include + #include +- + #include "../common/sst-dsp.h" + #include "../common/sst-dsp-priv.h" + #include "sst-baytrail-ipc.h" +diff --git a/sound/soc/intel/common/Makefile b/sound/soc/intel/common/Makefile +index 18d9630ae9a2..e5ab82208f24 100644 +--- a/sound/soc/intel/common/Makefile ++++ b/sound/soc/intel/common/Makefile +@@ -1,6 +1,5 @@ + # SPDX-License-Identifier: GPL-2.0 +-snd-soc-sst-dsp-objs := sst-dsp.o +-snd-soc-sst-acpi-objs := sst-acpi.o ++snd-soc-sst-dsp-objs := sst-dsp.o sst-acpi.o + snd-soc-sst-ipc-objs := sst-ipc.o + snd-soc-sst-firmware-objs := sst-firmware.o + snd-soc-acpi-intel-match-objs := soc-acpi-intel-byt-match.o soc-acpi-intel-cht-match.o \ +@@ -12,6 +11,5 @@ snd-soc-acpi-intel-match-objs := soc-acpi-intel-byt-match.o soc-acpi-intel-cht-m + soc-acpi-intel-hda-match.o + + obj-$(CONFIG_SND_SOC_INTEL_SST) += snd-soc-sst-dsp.o snd-soc-sst-ipc.o +-obj-$(CONFIG_SND_SOC_INTEL_SST_ACPI) += snd-soc-sst-acpi.o + obj-$(CONFIG_SND_SOC_INTEL_SST_FIRMWARE) += snd-soc-sst-firmware.o + obj-$(CONFIG_SND_SOC_ACPI_INTEL_MATCH) += snd-soc-acpi-intel-match.o +diff --git a/sound/soc/intel/common/sst-acpi.c b/sound/soc/intel/common/sst-acpi.c +index 077070daf6d8..3b3c8894a65a 100644 +--- a/sound/soc/intel/common/sst-acpi.c ++++ b/sound/soc/intel/common/sst-acpi.c +@@ -13,7 +13,6 @@ + + #include "sst-dsp.h" + #include +-#include + + struct sst_acpi_priv { + struct platform_device *pdev_mach; +@@ -152,38 +151,3 @@ int sst_dsp_acpi_remove(struct platform_device *pdev) + return 0; + } + EXPORT_SYMBOL_GPL(sst_dsp_acpi_remove); +- +-#if !IS_ENABLED(CONFIG_SND_SST_IPC_ACPI) +-static struct sst_acpi_desc sst_acpi_baytrail_desc = { +- .drv_name = "baytrail-pcm-audio", +- .machines = snd_soc_acpi_intel_baytrail_legacy_machines, +- .resindex_lpe_base = 0, +- .resindex_pcicfg_base = 1, +- .resindex_fw_base = 2, +- .irqindex_host_ipc = 5, +- .sst_id = SST_DEV_ID_BYT, +- .resindex_dma_base = -1, +-}; +-#endif +- +-static const struct acpi_device_id sst_acpi_match[] = { +-#if !IS_ENABLED(CONFIG_SND_SST_IPC_ACPI) +- { "80860F28", (unsigned long)&sst_acpi_baytrail_desc }, +-#endif +- { } +-}; +-MODULE_DEVICE_TABLE(acpi, sst_acpi_match); +- +-static struct platform_driver sst_acpi_driver = { +- .probe = sst_dsp_acpi_probe, +- .remove = sst_dsp_acpi_remove, +- .driver = { +- .name = "sst-acpi", +- .acpi_match_table = ACPI_PTR(sst_acpi_match), +- }, +-}; +-module_platform_driver(sst_acpi_driver); +- +-MODULE_AUTHOR("Jarkko Nikula "); +-MODULE_DESCRIPTION("Intel SST loader on ACPI systems"); +-MODULE_LICENSE("GPL v2"); +-- +2.17.1 + diff --git a/patches/0028-Modify-Trusty-drivers-so-as-to-compatible-with-Kern.trusty b/patches/0028-Modify-Trusty-drivers-so-as-to-compatible-with-Kern.trusty new file mode 100644 index 0000000000..761e3bb47b --- /dev/null +++ b/patches/0028-Modify-Trusty-drivers-so-as-to-compatible-with-Kern.trusty @@ -0,0 +1,232 @@ +From 1ddb20269460a98f94831866693d5d939e7952df Mon Sep 17 00:00:00 2001 +From: weideng +Date: Tue, 28 Mar 2017 01:40:53 +0000 +Subject: [PATCH 28/63] Modify Trusty drivers so as to compatible with Kernel + 4.11 + +Cpu_hotplug_register/unregister APIs are removed from Kernel +4.11. Add this patch to fix these issues for kernel change. + +Change-Id: I0ecafaff20128dd53f80fbdc357918ef69a36da7 +Signed-off-by: weideng +--- + drivers/trusty/trusty-ipc.c | 3 +- + drivers/trusty/trusty-irq.c | 96 +++++++++++++++++++--------------- + drivers/trusty/trusty-virtio.c | 3 +- + 3 files changed, 58 insertions(+), 44 deletions(-) + +diff --git a/drivers/trusty/trusty-ipc.c b/drivers/trusty/trusty-ipc.c +index d6765f1d4510..363b0239310a 100644 +--- a/drivers/trusty/trusty-ipc.c ++++ b/drivers/trusty/trusty-ipc.c +@@ -21,6 +21,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -1549,7 +1550,7 @@ static int tipc_virtio_probe(struct virtio_device *vdev) + vds->cdev_name[sizeof(vds->cdev_name)-1] = '\0'; + + /* find tx virtqueues (rx and tx and in this order) */ +- err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, vq_names); ++ err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, vq_names, NULL); + if (err) + goto err_find_vqs; + +diff --git a/drivers/trusty/trusty-irq.c b/drivers/trusty/trusty-irq.c +index 6c510a65e784..363b302dec0a 100644 +--- a/drivers/trusty/trusty-irq.c ++++ b/drivers/trusty/trusty-irq.c +@@ -59,10 +59,13 @@ struct trusty_irq_state { + spinlock_t normal_irqs_lock; + struct trusty_irq_irqset __percpu *percpu_irqs; + struct notifier_block trusty_call_notifier; +- struct notifier_block cpu_notifier; ++ /* CPU hotplug instances for online */ ++ struct hlist_node node; + struct workqueue_struct *wq; + }; + ++static enum cpuhp_state trusty_irq_online; ++ + #define TRUSTY_VMCALL_PENDING_INTR 0x74727505 + static inline void set_pending_intr_to_lk(uint8_t vector) + { +@@ -252,49 +255,30 @@ irqreturn_t trusty_irq_handler(int irq, void *data) + return IRQ_HANDLED; + } + +-static void trusty_irq_cpu_up(void *info) ++static int trusty_irq_cpu_up(unsigned int cpu, struct hlist_node *node) + { + unsigned long irq_flags; +- struct trusty_irq_state *is = info; ++ struct trusty_irq_state *is = hlist_entry_safe(node, struct trusty_irq_state, node); + + dev_dbg(is->dev, "%s: cpu %d\n", __func__, smp_processor_id()); + + local_irq_save(irq_flags); + trusty_irq_enable_irqset(is, this_cpu_ptr(is->percpu_irqs)); + local_irq_restore(irq_flags); ++ return 0; + } + +-static void trusty_irq_cpu_down(void *info) ++static int trusty_irq_cpu_down(unsigned int cpu, struct hlist_node *node) + { + unsigned long irq_flags; +- struct trusty_irq_state *is = info; ++ struct trusty_irq_state *is = hlist_entry_safe(node, struct trusty_irq_state, node); + + dev_dbg(is->dev, "%s: cpu %d\n", __func__, smp_processor_id()); + + local_irq_save(irq_flags); + trusty_irq_disable_irqset(is, this_cpu_ptr(is->percpu_irqs)); + local_irq_restore(irq_flags); +-} +- +-static int trusty_irq_cpu_notify(struct notifier_block *nb, +- unsigned long action, void *hcpu) +-{ +- struct trusty_irq_state *is; +- +- is = container_of(nb, struct trusty_irq_state, cpu_notifier); +- +- dev_dbg(is->dev, "%s: 0x%lx\n", __func__, action); +- +- switch (action & ~CPU_TASKS_FROZEN) { +- case CPU_UP_PREPARE: +- trusty_irq_cpu_up(is); +- break; +- case CPU_DEAD: +- trusty_irq_cpu_down(is); +- break; +- } +- +- return NOTIFY_OK; ++ return 0; + } + + static int trusty_irq_create_irq_mapping(struct trusty_irq_state *is, int irq) +@@ -580,6 +564,20 @@ static void trusty_irq_free_irqs(struct trusty_irq_state *is) + } */ + } + ++static int trusty_irq_cpu_notif_add(struct trusty_irq_state *is) ++{ ++ int ret; ++ ++ ret = cpuhp_state_add_instance(trusty_irq_online, &is->node); ++ ++ return ret; ++} ++ ++static void trusty_irq_cpu_notif_remove(struct trusty_irq_state *is) ++{ ++ cpuhp_state_remove_instance(trusty_irq_online, &is->node); ++} ++ + static int trusty_irq_probe(struct platform_device *pdev) + { + int ret; +@@ -646,23 +644,14 @@ static int trusty_irq_probe(struct platform_device *pdev) + for (irq = 0; irq >= 0;) + irq = trusty_irq_init_one(is, irq, false); + +- is->cpu_notifier.notifier_call = trusty_irq_cpu_notify; +- ret = register_hotcpu_notifier(&is->cpu_notifier); ++ ret = trusty_irq_cpu_notif_add(is); + if (ret) { + dev_err(&pdev->dev, "register_cpu_notifier failed %d\n", ret); + goto err_register_hotcpu_notifier; + } +- ret = on_each_cpu(trusty_irq_cpu_up, is, 0); +- if (ret) { +- dev_err(&pdev->dev, "register_cpu_notifier failed %d\n", ret); +- goto err_on_each_cpu; +- } + + return 0; + +-err_on_each_cpu: +- unregister_hotcpu_notifier(&is->cpu_notifier); +- on_each_cpu(trusty_irq_cpu_down, is, 1); + err_register_hotcpu_notifier: + spin_lock_irqsave(&is->normal_irqs_lock, irq_flags); + trusty_irq_disable_irqset(is, &is->normal_irqs); +@@ -692,17 +681,13 @@ static int trusty_irq_probe(struct platform_device *pdev) + + static int trusty_irq_remove(struct platform_device *pdev) + { +- int ret; + unsigned int cpu; + unsigned long irq_flags; + struct trusty_irq_state *is = platform_get_drvdata(pdev); + + dev_dbg(&pdev->dev, "%s\n", __func__); + +- unregister_hotcpu_notifier(&is->cpu_notifier); +- ret = on_each_cpu(trusty_irq_cpu_down, is, 1); +- if (ret) +- dev_err(&pdev->dev, "on_each_cpu failed %d\n", ret); ++ trusty_irq_cpu_notif_remove(is); + spin_lock_irqsave(&is->normal_irqs_lock, irq_flags); + trusty_irq_disable_irqset(is, &is->normal_irqs); + spin_unlock_irqrestore(&is->normal_irqs_lock, irq_flags); +@@ -742,8 +727,35 @@ static struct platform_driver trusty_irq_driver = { + }, + }; + +-module_platform_driver(trusty_irq_driver); ++static int __init trusty_irq_driver_init(void) ++{ ++ int ret; ++ ++ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "x86/trustyirq:online", ++ trusty_irq_cpu_up, trusty_irq_cpu_down); ++ if (ret < 0) ++ goto out; ++ trusty_irq_online = ret; ++ ++ ret = platform_driver_register(&trusty_irq_driver); ++ if (ret) ++ goto err_dead; ++ ++ return 0; ++err_dead: ++ cpuhp_remove_multi_state(trusty_irq_online); ++out: ++ return ret; ++} ++ ++static void __exit trusty_irq_driver_exit(void) ++{ ++ cpuhp_remove_multi_state(trusty_irq_online); ++ platform_driver_unregister(&trusty_irq_driver); ++} + ++module_init(trusty_irq_driver_init); ++module_exit(trusty_irq_driver_exit); + + MODULE_LICENSE("GPL v2"); + +diff --git a/drivers/trusty/trusty-virtio.c b/drivers/trusty/trusty-virtio.c +index eaeb020e98f4..3d1a9aabef83 100644 +--- a/drivers/trusty/trusty-virtio.c ++++ b/drivers/trusty/trusty-virtio.c +@@ -349,7 +349,8 @@ static struct virtqueue *_find_vq(struct virtio_device *vdev, + static int trusty_virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], +- const char * const names[]) ++ const char * const names[], ++ struct irq_affinity *desc) + { + uint i; + int ret; +-- +2.17.1 + diff --git a/patches/0028-drm-i915-Use-hweight8-for-8bit-masks.drm b/patches/0028-drm-i915-Use-hweight8-for-8bit-masks.drm new file mode 100644 index 0000000000..5afc5ffe49 --- /dev/null +++ b/patches/0028-drm-i915-Use-hweight8-for-8bit-masks.drm @@ -0,0 +1,92 @@ +From 8c19a74a40aeb46b01d1eaf06fff38f314febc15 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= +Date: Wed, 21 Aug 2019 20:30:33 +0300 +Subject: [PATCH 028/690] drm/i915: Use hweight8() for 8bit masks +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Use hweight8() instead of hweight32() for 8bit masks. Doesn't actually +matter for us since the arch code will go for hweight32() anyway, but +maybe we stil want to do this for documentation purposes? + +Signed-off-by: Ville Syrjälä +Link: https://patchwork.freedesktop.org/patch/msgid/20190821173033.24123-5-ville.syrjala@linux.intel.com +Reviewed-by: Jani Nikula +--- + drivers/gpu/drm/i915/intel_pm.c | 20 ++++++++++---------- + 1 file changed, 10 insertions(+), 10 deletions(-) + +diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c +index b4b9609db092..4fa9bc83c8b4 100644 +--- a/drivers/gpu/drm/i915/intel_pm.c ++++ b/drivers/gpu/drm/i915/intel_pm.c +@@ -1327,8 +1327,8 @@ static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state) + struct intel_atomic_state *state = + to_intel_atomic_state(crtc_state->base.state); + struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal; +- int num_active_planes = hweight32(crtc_state->active_planes & +- ~BIT(PLANE_CURSOR)); ++ int num_active_planes = hweight8(crtc_state->active_planes & ++ ~BIT(PLANE_CURSOR)); + const struct g4x_pipe_wm *raw; + const struct intel_plane_state *old_plane_state; + const struct intel_plane_state *new_plane_state; +@@ -1659,7 +1659,7 @@ static int vlv_compute_fifo(struct intel_crtc_state *crtc_state) + &crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2]; + struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state; + unsigned int active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR); +- int num_active_planes = hweight32(active_planes); ++ int num_active_planes = hweight8(active_planes); + const int fifo_size = 511; + int fifo_extra, fifo_left = fifo_size; + int sprite0_fifo_extra = 0; +@@ -1848,8 +1848,8 @@ static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state) + struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal; + const struct vlv_fifo_state *fifo_state = + &crtc_state->wm.vlv.fifo_state; +- int num_active_planes = hweight32(crtc_state->active_planes & +- ~BIT(PLANE_CURSOR)); ++ int num_active_planes = hweight8(crtc_state->active_planes & ++ ~BIT(PLANE_CURSOR)); + bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->base); + const struct intel_plane_state *old_plane_state; + const struct intel_plane_state *new_plane_state; +@@ -3761,14 +3761,14 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state) + /* + * If there are no active CRTCs, no additional checks need be performed + */ +- if (hweight32(state->active_pipes) == 0) ++ if (hweight8(state->active_pipes) == 0) + return true; + + /* + * SKL+ workaround: bspec recommends we disable SAGV when we have + * more then one pipe enabled + */ +- if (hweight32(state->active_pipes) > 1) ++ if (hweight8(state->active_pipes) > 1) + return false; + + /* Since we're now guaranteed to only have one active CRTC... */ +@@ -3867,14 +3867,14 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv, + if (WARN_ON(!state) || !crtc_state->base.active) { + alloc->start = 0; + alloc->end = 0; +- *num_active = hweight32(dev_priv->active_pipes); ++ *num_active = hweight8(dev_priv->active_pipes); + return; + } + + if (intel_state->active_pipe_changes) +- *num_active = hweight32(intel_state->active_pipes); ++ *num_active = hweight8(intel_state->active_pipes); + else +- *num_active = hweight32(dev_priv->active_pipes); ++ *num_active = hweight8(dev_priv->active_pipes); + + ddb_size = intel_get_ddb_size(dev_priv, crtc_state, total_data_rate, + *num_active, ddb); +-- +2.17.1 + diff --git a/patches/0028-net-stmmac-Add-option-for-VLAN-filter-fail-qu.connectivity b/patches/0028-net-stmmac-Add-option-for-VLAN-filter-fail-qu.connectivity new file mode 100644 index 0000000000..207e42d7f1 --- /dev/null +++ b/patches/0028-net-stmmac-Add-option-for-VLAN-filter-fail-qu.connectivity @@ -0,0 +1,134 @@ +From 7e15ffd930e5e8122eabb51c0a606c5cfcb13752 Mon Sep 17 00:00:00 2001 +From: "Chuah, Kim Tatt" +Date: Wed, 18 Jul 2018 08:14:16 +0800 +Subject: [PATCH 028/108] net: stmmac: Add option for VLAN filter fail queue + enable + +Add option in plat_stmmacenet_data struct to enable VLAN Filter Fail +Queuing. This option allows packets that fail VLAN filter to be routed +to a specific Rx queue when Receive All is also set. + +When this option is enabled: +- Enable VFFQ only when entering promiscuous mode, because Receive All + will pass up all rx packets that failed address filtering (similar to + promiscuous mode). +- VLAN-promiscuous mode is never entered to allow rx packet to fail VLAN + filters and get routed to selected VFFQ Rx queue. + +Reviewed-by: Voon Weifeng +Reviewed-by: Ong Boon Leong +Signed-off-by: Chuah, Kim Tatt +Signed-off-by: Ong Boon Leong +--- + drivers/net/ethernet/stmicro/stmmac/common.h | 2 ++ + drivers/net/ethernet/stmicro/stmmac/dwmac4.h | 1 + + drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c | 15 +++++++++++++-- + drivers/net/ethernet/stmicro/stmmac/dwmac5.h | 6 ++++++ + drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 3 +++ + include/linux/stmmac.h | 2 ++ + 6 files changed, 27 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h +index 782fd503d623..f564c48c651a 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/common.h ++++ b/drivers/net/ethernet/stmicro/stmmac/common.h +@@ -477,6 +477,8 @@ struct mac_device_info { + unsigned int num_vlan; + u32 vlan_filter[32]; + unsigned int promisc; ++ bool vlan_fail_q_en; ++ u8 vlan_fail_q; + }; + + struct stmmac_rx_routing { +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +index f842807edce8..b7a7967b376a 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +@@ -68,6 +68,7 @@ + #define GMAC_PACKET_FILTER_PCF BIT(7) + #define GMAC_PACKET_FILTER_HPF BIT(10) + #define GMAC_PACKET_FILTER_VTFE BIT(16) ++#define GMAC_PACKET_FILTER_RA BIT(31) + + #define GMAC_MAX_PERFECT_ADDRESSES 128 + +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +index 0f8fa8b9c53b..da4e6595cc65 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +@@ -601,7 +601,18 @@ static void dwmac4_set_filter(struct mac_device_info *hw, + value &= ~GMAC_PACKET_FILTER_PM; + value &= ~GMAC_PACKET_FILTER_PR; + if (dev->flags & IFF_PROMISC) { +- value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_PCF; ++ /* VLAN Tag Filter Fail Packets Queuing */ ++ if (hw->vlan_fail_q_en) { ++ value = readl(ioaddr + GMAC_RXQ_CTRL4); ++ value &= ~GMAC_RXQCTRL_VFFQ_MASK; ++ value |= GMAC_RXQCTRL_VFFQE | ++ (hw->vlan_fail_q << GMAC_RXQCTRL_VFFQ_SHIFT); ++ writel(value, ioaddr + GMAC_RXQ_CTRL4); ++ ++ value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_RA; ++ } else { ++ value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_PCF; ++ } + } else if ((dev->flags & IFF_ALLMULTI) || + (netdev_mc_count(dev) > hw->multicast_filter_bins)) { + /* Pass all multi */ +@@ -663,7 +674,7 @@ static void dwmac4_set_filter(struct mac_device_info *hw, + + writel(value, ioaddr + GMAC_PACKET_FILTER); + +- if (dev->flags & IFF_PROMISC) { ++ if (dev->flags & IFF_PROMISC && !hw->vlan_fail_q_en) { + if (!hw->promisc) { + hw->promisc = 1; + dwmac4_vlan_promisc_enable(dev, hw); +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h +index 48550d617b01..e62181bf50ec 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h +@@ -75,6 +75,12 @@ + /* MDIO interrupt enable in MAC_Interrupt_Enable register */ + #define GMAC_INT_MDIO_EN BIT(18) + ++/* EQoS version 5.xx VLAN Tag Filter Fail Packets Queuing */ ++#define GMAC_RXQ_CTRL4 0x00000094 ++#define GMAC_RXQCTRL_VFFQ_MASK GENMASK(19, 17) ++#define GMAC_RXQCTRL_VFFQ_SHIFT 17 ++#define GMAC_RXQCTRL_VFFQE BIT(16) ++ + int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp); + int dwmac5_safety_feat_irq_status(struct net_device *ndev, + void __iomem *ioaddr, unsigned int asp, +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index 329ce0af8a05..8892fb485773 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -4777,6 +4777,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv) + if (priv->dma_cap.tsoen) + dev_info(priv->device, "TSO supported\n"); + ++ priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en; ++ priv->hw->vlan_fail_q = priv->plat->vlan_fail_q; ++ + /* Run HW quirks, if any */ + if (priv->hwif_quirks) { + ret = priv->hwif_quirks(priv); +diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h +index 735116a0af02..dd9676cdbdde 100644 +--- a/include/linux/stmmac.h ++++ b/include/linux/stmmac.h +@@ -195,5 +195,7 @@ struct plat_stmmacenet_data { + int msi_sfty_ue_vec; + int msi_rx_base_vec; + int msi_tx_base_vec; ++ bool vlan_fail_q_en; ++ u8 vlan_fail_q; + }; + #endif +-- +2.17.1 + diff --git a/patches/0028-rpmb-mux-add-key-retrieval-for-RPMB-multiplexor.security b/patches/0028-rpmb-mux-add-key-retrieval-for-RPMB-multiplexor.security new file mode 100644 index 0000000000..4f65015c88 --- /dev/null +++ b/patches/0028-rpmb-mux-add-key-retrieval-for-RPMB-multiplexor.security @@ -0,0 +1,634 @@ +From 2fce233a1b0700ca101f65bf0169243b924179f9 Mon Sep 17 00:00:00 2001 +From: Qi Yadong +Date: Wed, 17 Oct 2018 15:10:41 +0800 +Subject: [PATCH 28/65] rpmb: mux: add key retrieval for RPMB multiplexor + +Retrieve a RPMB key from a bootloader. +Currently automotive bootloader and slim bootloader are +supported. + +Change-Id: If5ab4024fc1cf02967fdc88f097f6918d3833b2b +Signed-off-by: Qi Yadong +Signed-off-by: Huang, Yang +Signed-off-by: Tomas Winkler +--- + .../admin-guide/kernel-parameters.txt | 4 + + drivers/char/rpmb/mux/Makefile | 5 +- + drivers/char/rpmb/mux/key.c | 62 +++++++ + drivers/char/rpmb/mux/key.h | 33 ++++ + drivers/char/rpmb/mux/key_abl.c | 141 +++++++++++++++ + drivers/char/rpmb/mux/key_abl.h | 10 ++ + drivers/char/rpmb/mux/key_sbl.c | 164 ++++++++++++++++++ + drivers/char/rpmb/mux/key_sbl.h | 10 ++ + drivers/char/rpmb/mux/mux.c | 60 ++++--- + 9 files changed, 464 insertions(+), 25 deletions(-) + create mode 100644 drivers/char/rpmb/mux/key.c + create mode 100644 drivers/char/rpmb/mux/key.h + create mode 100644 drivers/char/rpmb/mux/key_abl.c + create mode 100644 drivers/char/rpmb/mux/key_abl.h + create mode 100644 drivers/char/rpmb/mux/key_sbl.c + create mode 100644 drivers/char/rpmb/mux/key_sbl.h + +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt +index a84a83f8881e..f75f499c7390 100644 +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -756,6 +756,10 @@ + Format: , + See also Documentation/input/devices/joystick-parport.rst + ++ dev_sec_info.param_addr= ++ [BOOT] address of automotive bootloader (abl) ++ security parameters. ++ + ddebug_query= [KNL,DYNAMIC_DEBUG] Enable debug messages at early boot + time. See + Documentation/admin-guide/dynamic-debug-howto.rst for +diff --git a/drivers/char/rpmb/mux/Makefile b/drivers/char/rpmb/mux/Makefile +index 165309fc35de..f27f2fb20df4 100644 +--- a/drivers/char/rpmb/mux/Makefile ++++ b/drivers/char/rpmb/mux/Makefile +@@ -4,6 +4,9 @@ + obj-$(CONFIG_RPMB_MUX) += rpmb_mux.o + rpmb_mux-objs := mux.o + +-obj-$(CONFIG_RPMB_KEY) += mux_hkdf.o ++obj-$(CONFIG_RPMB_MUX_KEY) += mux_hkdf.o ++obj-$(CONFIG_RPMB_MUX_KEY) += key_abl.o ++obj-$(CONFIG_RPMB_MUX_KEY) += key_sbl.o ++obj-$(CONFIG_RPMB_MUX_KEY) += key.o + + ccflags-y += -D__CHECK_ENDIAN__ +diff --git a/drivers/char/rpmb/mux/key.c b/drivers/char/rpmb/mux/key.c +new file mode 100644 +index 000000000000..af73c9db101d +--- /dev/null ++++ b/drivers/char/rpmb/mux/key.c +@@ -0,0 +1,62 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * RPMB Key management: key retrieval ++ * ++ * Copyright (c) 2018-2019 Intel Corporation. ++ */ ++ ++#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ ++ ++#include ++#include ++#include ++ ++#include "key.h" ++#include "key_sbl.h" ++#include "key_abl.h" ++ ++static ulong sbl_params_addr; ++static ulong abl_params_addr; ++ ++static int __init get_sbl_params_addr(char *str) ++{ ++ if (kstrtoul(str, 16, &sbl_params_addr)) { ++ pr_err("Failed to parse ImageBootParamsAddr\n"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++__setup("ImageBootParamsAddr=", get_sbl_params_addr); ++ ++static int __init get_abl_params_addr(char *str) ++{ ++ if (kstrtoul(str, 16, &abl_params_addr)) { ++ pr_err("Failed to parse dev_sec_info.param\n"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++__setup("dev_sec_info.param_addr=", get_abl_params_addr); ++ ++int rpmb_key_get(const u8 *dev_id, size_t dev_id_len, ++ size_t max_partition_num, u8 rpmb_key[][RPMB_KEY_LENGTH]) ++{ ++ int ret; ++ ++ if (sbl_params_addr) ++ ret = rpmb_key_sbl_get(sbl_params_addr, max_partition_num, ++ rpmb_key); ++ else if (abl_params_addr) ++ ret = rpmb_key_abl_get(abl_params_addr, dev_id, dev_id_len, ++ max_partition_num, rpmb_key); ++ else ++ ret = -EINVAL; ++ ++ if (ret < 0) ++ pr_err("Failed to get boot_params from the command line!\n"); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(rpmb_key_get); +diff --git a/drivers/char/rpmb/mux/key.h b/drivers/char/rpmb/mux/key.h +new file mode 100644 +index 000000000000..816c0a8346b3 +--- /dev/null ++++ b/drivers/char/rpmb/mux/key.h +@@ -0,0 +1,33 @@ ++/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ ++/* ++ * RPMB Key management: retrieve and distribute ++ * ++ * Copyright (c) 2018 Intel Corporation. All rights reserved. ++ */ ++ ++#ifndef __RPMB_KEY_H__ ++#define __RPMB_KEY_H__ ++ ++/* ++ * Storage may support multiple rpmb partitions, but the specification ++ * does not specify the max number of rpmb partitions. ++ * Here we use 6 for now. In future, this may need to be expanded ++ * dynamically. ++ */ ++#define RPMB_MAX_PARTITION_NUMBER 6U ++ ++#define RPMB_KEY_LENGTH 64U ++ ++#ifdef CONFIG_RPMB_MUX_KEY ++int rpmb_key_get(const u8 *dev_id, size_t dev_id_len, ++ size_t max_partition_num, u8 rpmb_key[][RPMB_KEY_LENGTH]); ++#else ++static inline ++int rpmb_key_get(const u8 *dev_id, size_t dev_id_len, ++ size_t max_partition_num, u8 rpmb_key[][RPMB_KEY_LENGTH]) ++{ ++ return -EOPNOTSUPP; ++} ++#endif ++ ++#endif /* !__RPMB_KEY_H__ */ +diff --git a/drivers/char/rpmb/mux/key_abl.c b/drivers/char/rpmb/mux/key_abl.c +new file mode 100644 +index 000000000000..e5b063e55df5 +--- /dev/null ++++ b/drivers/char/rpmb/mux/key_abl.c +@@ -0,0 +1,141 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Parse legacy seed from ABL(Automotive Bootloader). Derive a rpmb key ++ * with the legacy seed. ++ * ++ * Copyright (c) 2018-2019 Intel Corporation. ++ */ ++ ++#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ ++ ++#include ++#include ++#include ++#include ++ ++#include "key.h" ++#include "key_abl.h" ++#include "mux_hkdf.h" ++ ++#define ABL_SEED_LEN 32U ++#define ABL_SEED_LIST_MAX 4U ++#define EMMC_SERIAL_LEN 15U ++ ++struct abl_seed_info { ++ u8 svn; ++ u8 reserved[3]; ++ u8 seed[ABL_SEED_LEN]; ++}; ++ ++struct dev_sec_info { ++ u32 size_of_this_struct; ++ u32 version; ++ u32 num_seeds; ++ struct abl_seed_info seed_list[ABL_SEED_LIST_MAX]; ++}; ++ ++/* ++ * The output serial is concatenation of mmc product name with a string ++ * representation of PSN. ++ */ ++static int rpmb_key_abl_build_serial(const u8 *cid, u8 *serial) ++{ ++ u32 psn; ++ ++ if (!cid || !serial) ++ return -EFAULT; ++ ++ psn = (cid[9] << 24) | (cid[8] << 16) | (cid[15] << 8) | cid[14]; ++ ++ serial[0] = cid[0]; ++ serial[1] = cid[7]; ++ serial[2] = cid[6]; ++ serial[3] = cid[5]; ++ serial[4] = cid[4]; ++ serial[5] = cid[11]; ++ ++ snprintf(&serial[6], 9, "%08x", psn); ++ ++ return 0; ++} ++ ++int rpmb_key_abl_get(ulong params_addr, const u8 *dev_id, size_t dev_id_len, ++ size_t max_partition_num, u8 rpmb_key[][RPMB_KEY_LENGTH]) ++{ ++ u32 i, legacy_seed_index = 0; ++ struct dev_sec_info *sec_info; ++ struct abl_seed_info *seed_list; ++ u8 serial[EMMC_SERIAL_LEN] = {0}; ++ int ret; ++ ++ if (!params_addr || !dev_id || !dev_id_len || !max_partition_num) { ++ pr_err("Invalid input params!\n"); ++ return -EFAULT; ++ } ++ ++ ret = rpmb_key_abl_build_serial(dev_id, serial); ++ if (ret) { ++ pr_err("Failed to build serial from cid\n"); ++ return -EFAULT; ++ } ++ ++ sec_info = memremap(params_addr, sizeof(*sec_info), MEMREMAP_WB); ++ if (!sec_info) { ++ pr_err("Remap params_addr failed!\n"); ++ return -EFAULT; ++ } ++ seed_list = &sec_info->seed_list[0]; ++ ++ /* ++ * The seed_list must contain at least 2 seeds: 1 is legacy ++ * seed and others are SVN based seed. ++ */ ++ if (sec_info->num_seeds < 2U || ++ sec_info->num_seeds > ABL_SEED_LIST_MAX) { ++ pr_err("Invalid seed number!\n"); ++ memunmap(sec_info); ++ return -EFAULT; ++ } ++ ++ /* ++ * The seed_list from ABL contains several seeds which based on SVN ++ * and one legacy seed which is not based on SVN. The legacy seed's ++ * svn value is minimum in the seed list. And CSE ensures at least two ++ * seeds will be generated which will contain the legacy seed. ++ * Here find the legacy seed index first. ++ */ ++ for (i = 1; i < sec_info->num_seeds; i++) { ++ if (seed_list[i].svn < seed_list[legacy_seed_index].svn) ++ legacy_seed_index = i; ++ } ++ ++ /* ++ * The eMMC Field Firmware Update would impact below fields of ++ * CID(Card Identification): ++ * CID[6]:PRV (Product Revision) ++ * CID[0]:CRC (CRC7 checksum) ++ * Mapping relation between CID and eMMC serial: ++ * serial[0] = CID[0] ++ * serial[2] = CID[6] ++ * So mask off serial[0]/serial[2] fields when using eMMC serial ++ * to derive rpmb key. ++ */ ++ serial[0] ^= serial[0]; ++ serial[2] ^= serial[2]; ++ ++ /* ++ * Derive RPMB key from legacy seed with storage serial number. ++ * Currently, only support eMMC storage device, UFS storage device is ++ * not supported. ++ */ ++ ret = mux_hkdf_sha256(&rpmb_key[0][0], SHA256_HASH_SIZE, ++ (const u8 *)&seed_list[legacy_seed_index].seed[0], ++ ABL_SEED_LEN, ++ NULL, 0, ++ (const u8 *)serial, sizeof(serial)); ++ ++ memset(&seed_list[legacy_seed_index], 0, sizeof(struct abl_seed_info)); ++ memunmap(sec_info); ++ ++ return ret; ++} +diff --git a/drivers/char/rpmb/mux/key_abl.h b/drivers/char/rpmb/mux/key_abl.h +new file mode 100644 +index 000000000000..136c12ab2219 +--- /dev/null ++++ b/drivers/char/rpmb/mux/key_abl.h +@@ -0,0 +1,10 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Copyright (c) 2018-2019 Intel Corporation. */ ++ ++#ifndef __RPMB_KEY_ABL__ ++#define __RPMB_KEY_ABL__ ++ ++int rpmb_key_abl_get(ulong params_addr, const u8 *dev_id, size_t dev_id_len, ++ size_t max_partition_num, u8 rpmb_key[][RPMB_KEY_LENGTH]); ++ ++#endif /* !__RPMB_KEY_ABL__ */ +diff --git a/drivers/char/rpmb/mux/key_sbl.c b/drivers/char/rpmb/mux/key_sbl.c +new file mode 100644 +index 000000000000..0959a2769cb5 +--- /dev/null ++++ b/drivers/char/rpmb/mux/key_sbl.c +@@ -0,0 +1,164 @@ ++// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 ++/* ++ * Parse RPMB key from SBL(SlimBootloader). ++ * ++ * Copyright (c) 2018 Intel Corporation. All rights reserved. ++ */ ++ ++#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ ++ ++#include ++#include ++#include ++ ++#include "key.h" ++#include "key_sbl.h" ++ ++#define SEED_ENTRY_TYPE_SVNSEED 0x1U ++#define SEED_ENTRY_TYPE_RPMBSEED 0x2U ++ ++#define SEED_ENTRY_USAGE_BASE_ON_SERIAL 0x1U ++#define SEED_ENTRY_USAGE_NOT_BASE_ON_SERIAL 0x2U ++ ++struct image_boot_params { ++ u32 size_of_this_struct; ++ u32 version; ++ u64 p_seed_list; ++ u64 p_platform_info; ++ u64 reserved; ++}; ++ ++struct seed_entry { ++ /* SVN based seed or RPMB seed or attestation key_box */ ++ u8 type; ++ /* For SVN seed: useed or dseed ++ * For RPMB seed: serial number based or not ++ */ ++ u8 usage; ++ /* index for the same type and usage seed */ ++ u8 index; ++ u8 reserved; ++ /* reserved for future use */ ++ u16 flags; ++ /* Total size of this seed entry */ ++ u16 seed_entry_size; ++ /* SVN seed: struct seed_info ++ * RPMB seed: u8 rpmb_seed[key_len] ++ */ ++ u8 seed[0]; ++}; ++ ++struct seed_list_hob { ++ u8 revision; ++ u8 rsvd0[3]; ++ u32 buffer_size; ++ u8 total_seed_count; ++ u8 rsvd1[3]; ++ struct seed_entry entry[0]; ++}; ++ ++static int rpmb_key_sbl_parse_seed_list(struct seed_list_hob *seed_hob, ++ size_t max_partition_num, ++ u8 rpmb_seed[][RPMB_KEY_LENGTH]) ++{ ++ u8 i; ++ u8 index = 0U; ++ struct seed_entry *entry; ++ ++ if (!seed_hob || !max_partition_num) { ++ pr_warn("Invalid input parameters!\n"); ++ goto fail; ++ } ++ ++ if (seed_hob->total_seed_count == 0U) { ++ pr_warn("Total seed count is 0.\n"); ++ goto fail; ++ } ++ ++ entry = seed_hob->entry; ++ ++ for (i = 0U; i < seed_hob->total_seed_count; i++) { ++ if ((u8 *)entry >= (u8 *)seed_hob + seed_hob->buffer_size) { ++ pr_warn("Exceed memory boundray!\n"); ++ goto fail; ++ } ++ ++ /* retrieve rpmb seed */ ++ if (entry->type == SEED_ENTRY_TYPE_RPMBSEED) { ++ if (entry->index != 0) { ++ pr_warn("RPMB usage mismatch!\n"); ++ goto fail; ++ } ++ ++ /* The seed_entry with same type/usage are always ++ * arranged by index in order of 0~3. ++ */ ++ if (entry->index != index) { ++ pr_warn("Index mismatch.\n"); ++ goto fail; ++ } ++ ++ if (entry->index > max_partition_num) { ++ pr_warn("Index exceed max number!\n"); ++ goto fail; ++ } ++ ++ memcpy(&rpmb_seed[index], entry->seed, RPMB_KEY_LENGTH); ++ index++; ++ ++ /* erase original seed in seed entry */ ++ memset(entry->seed, 0U, RPMB_KEY_LENGTH); ++ } ++ ++ entry = (struct seed_entry *)((u8 *)entry + ++ entry->seed_entry_size); ++ } ++ ++ return 0; ++ ++fail: ++ return -EFAULT; ++} ++ ++int rpmb_key_sbl_get(ulong params_addr, size_t max_partition_num, ++ u8 rpmb_key[][RPMB_KEY_LENGTH]) ++{ ++ struct image_boot_params *boot_params = NULL; ++ struct seed_list_hob *seed_list = NULL; ++ u32 remap_buffer_size = 0; ++ ++ if (!params_addr || !max_partition_num) { ++ pr_err("Invalid input params!\n"); ++ goto fail; ++ } ++ ++ boot_params = memremap(params_addr, sizeof(*boot_params), MEMREMAP_WB); ++ if (!boot_params) { ++ pr_err("Remap params_addr failed!\n"); ++ goto fail; ++ } ++ ++ seed_list = memremap(boot_params->p_seed_list, ++ sizeof(*seed_list), MEMREMAP_WB); ++ if (!seed_list) { ++ pr_err("Remap seed_list failed!\n"); ++ goto fail; ++ } ++ ++ remap_buffer_size = seed_list->buffer_size; ++ memunmap(seed_list); ++ ++ /* Remap with actual buffer size */ ++ seed_list = memremap(boot_params->p_seed_list, ++ remap_buffer_size, MEMREMAP_WB); ++ ++ return rpmb_key_sbl_parse_seed_list(seed_list, max_partition_num, ++ rpmb_key); ++ ++fail: ++ if (seed_list) ++ memunmap(seed_list); ++ if (boot_params) ++ memunmap(boot_params); ++ return -EFAULT; ++} +diff --git a/drivers/char/rpmb/mux/key_sbl.h b/drivers/char/rpmb/mux/key_sbl.h +new file mode 100644 +index 000000000000..90f6b060e708 +--- /dev/null ++++ b/drivers/char/rpmb/mux/key_sbl.h +@@ -0,0 +1,10 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Copyright (c) 2018-2019 Intel Corporation. */ ++ ++#ifndef __RPMB_KEY_SBL__ ++#define __RPMB_KEY_SBL__ ++ ++int rpmb_key_sbl_get(ulong params_addr, size_t max_partition_num, ++ u8 rpmb_key[][RPMB_KEY_LENGTH]); ++ ++#endif /* __RPMB_KEY_SBL__ */ +diff --git a/drivers/char/rpmb/mux/mux.c b/drivers/char/rpmb/mux/mux.c +index 59c7ed2c8944..4b7edde38084 100644 +--- a/drivers/char/rpmb/mux/mux.c ++++ b/drivers/char/rpmb/mux/mux.c +@@ -3,6 +3,8 @@ + * Copyright (c) 2018-2019 Intel Corporation. + */ + ++#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ ++ + #include + #include + #include +@@ -12,6 +14,8 @@ + #include + #include + ++#include "key.h" ++ + /** + * struct rpmb_mux_dev - device which can support RPMB partition + * @lock : the device lock +@@ -63,13 +67,6 @@ static int rpmb_mux_release(struct inode *inode, struct file *fp) + return 0; + } + +-static int rpmb_key_retrieval(void *rpmb_key) +-{ +- /* hard code */ +- memset(rpmb_key, 0x31, 32); +- return 0; +-} +- + static int rpmb_mux_hmac_256_alloc(struct rpmb_mux_dev *mux_dev) + { + struct shash_desc *desc; +@@ -566,6 +563,7 @@ static int rpmb_add_device(struct device *dev, struct class_interface *intf) + { + struct rpmb_mux_dev *mux_dev; + struct rpmb_dev *rdev = to_rpmb_dev(dev); ++ u8 rpmb_key[RPMB_MAX_PARTITION_NUMBER][RPMB_KEY_LENGTH]; + int ret; + + mux_dev = container_of(intf, struct rpmb_mux_dev, rpmb_interface); +@@ -589,7 +587,38 @@ static int rpmb_add_device(struct device *dev, struct class_interface *intf) + + mutex_unlock(&mux_dev->lock); + ++ memset(rpmb_key, 0, sizeof(rpmb_key)); ++ ret = rpmb_key_get(mux_dev->rdev->ops->dev_id, ++ mux_dev->rdev->ops->dev_id_len, ++ RPMB_MAX_PARTITION_NUMBER, ++ rpmb_key); ++ if (ret) { ++ dev_err(&rdev->dev, "rpmb_key_get failed: %d.\n", ret); ++ goto err_rpmb_key_get; ++ } ++ memcpy(mux_dev->rpmb_key, &rpmb_key[0], sizeof(mux_dev->rpmb_key)); ++ memset(rpmb_key, 0, sizeof(rpmb_key)); ++ ++ ret = crypto_shash_setkey(mux_dev->hash_desc->tfm, ++ mux_dev->rpmb_key, 32); ++ if (ret) { ++ dev_err(&rdev->dev, "set key failed = %d\n", ret); ++ goto err_crypto_shash_setkey; ++ } ++ + return 0; ++ ++err_crypto_shash_setkey: ++ memset(mux_dev->rpmb_key, 0, sizeof(mux_dev->rpmb_key)); ++err_rpmb_key_get: ++ rpmb_mux_hmac_256_free(mux_dev); ++ device_destroy(rpmb_mux_class, rpmb_mux_devt); ++ class_destroy(rpmb_mux_class); ++ cdev_del(&mux_dev->cdev); ++ kfree(mux_dev); ++ unregister_chrdev_region(rpmb_mux_devt, 0); ++ ++ return ret; + } + + static void rpmb_remove_device(struct device *dev, struct class_interface *intf) +@@ -660,19 +689,6 @@ static int __init rpmb_mux_init(void) + goto err_rpmb_mux_hmac_256_alloc; + } + +- ret = rpmb_key_retrieval(mux_dev->rpmb_key); +- if (ret) { +- pr_err("rpmb_key_retrieval failed.\n"); +- goto err_rpmb_key_retrieval; +- } +- +- ret = crypto_shash_setkey(mux_dev->hash_desc->tfm, +- mux_dev->rpmb_key, 32); +- if (ret) { +- pr_err("set key failed = %d\n", ret); +- goto err_crypto_shash_setkey; +- } +- + mux_dev->rpmb_interface.add_dev = rpmb_add_device; + mux_dev->rpmb_interface.remove_dev = rpmb_remove_device; + mux_dev->rpmb_interface.class = &rpmb_class; +@@ -686,10 +702,6 @@ static int __init rpmb_mux_init(void) + return 0; + + err_class_interface_register: +-err_crypto_shash_setkey: +- memset(mux_dev->rpmb_key, 0, sizeof(mux_dev->rpmb_key)); +-err_rpmb_key_retrieval: +- rpmb_mux_hmac_256_free(mux_dev); + err_rpmb_mux_hmac_256_alloc: + device_destroy(rpmb_mux_class, rpmb_mux_devt); + err_device_create: +-- +2.17.1 + diff --git a/patches/0028-serial-8250_port-properly-handle-runtime-PM-in-IRQ.lpss b/patches/0028-serial-8250_port-properly-handle-runtime-PM-in-IRQ.lpss new file mode 100644 index 0000000000..a96cbc618f --- /dev/null +++ b/patches/0028-serial-8250_port-properly-handle-runtime-PM-in-IRQ.lpss @@ -0,0 +1,48 @@ +From f825990bebfd0f249c72d531ee431088c89035fb Mon Sep 17 00:00:00 2001 +From: Andy Shevchenko +Date: Mon, 12 Sep 2016 13:55:22 +0300 +Subject: [PATCH 28/40] serial: 8250_port: properly handle runtime PM in IRQ + +We can't and basically don't need to call runtime PM in IRQ handler. If IRQ is +ours, device must be powered on. Otherwise check if the device is powered off +and return immediately. + +Signed-off-by: Andy Shevchenko +--- + drivers/tty/serial/8250/8250_port.c | 16 +++++++++------- + 1 file changed, 9 insertions(+), 7 deletions(-) + +diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c +index aa5357366f9c..b16c93c6b567 100644 +--- a/drivers/tty/serial/8250/8250_port.c ++++ b/drivers/tty/serial/8250/8250_port.c +@@ -1840,17 +1840,19 @@ EXPORT_SYMBOL_GPL(serial8250_handle_irq); + + static int serial8250_default_handle_irq(struct uart_port *port) + { +- struct uart_8250_port *up = up_to_u8250p(port); + unsigned int iir; +- int ret; + +- serial8250_rpm_get(up); ++ /* ++ * The IRQ might be shared with other peripherals so we must first ++ * check that are we RPM suspended or not. If we are we assume that ++ * the IRQ was not for us (we shouldn't be RPM suspended when the ++ * interrupt is enabled). ++ */ ++ if (pm_runtime_suspended(port->dev)) ++ return 0; + + iir = serial_port_in(port, UART_IIR); +- ret = serial8250_handle_irq(port, iir); +- +- serial8250_rpm_put(up); +- return ret; ++ return serial8250_handle_irq(port, iir); + } + + /* +-- +2.17.1 + diff --git a/patches/0028-virtio-framework-support-ACRN-virtio-devices.acrn b/patches/0028-virtio-framework-support-ACRN-virtio-devices.acrn new file mode 100644 index 0000000000..89cbff8091 --- /dev/null +++ b/patches/0028-virtio-framework-support-ACRN-virtio-devices.acrn @@ -0,0 +1,166 @@ +From 102822070b80e719c5653f6feeac5852997c9662 Mon Sep 17 00:00:00 2001 +From: Hao Li +Date: Fri, 31 Aug 2018 10:58:58 +0800 +Subject: [PATCH 028/150] virtio framework: support ACRN virtio devices + +To support ACRN virtio devices which use Intel VID:DID, +relax virtio device probing conditions in frontend virtio framework. + +Change-Id: I9a49ad3fbdbd0a615398218382624031d6908526 +Tracked-On: 219551 +Signed-off-by: Hao Li +Reviewed-on: +Reviewed-by: Liu, Fuzhong +Reviewed-by: Dong, Eddie +Tested-by: Dong, Eddie +--- + drivers/virtio/Kconfig | 17 +++++++++++++++++ + drivers/virtio/virtio_pci_common.c | 29 +++++++++++++++++++++++++++++ + drivers/virtio/virtio_pci_legacy.c | 10 ++++++++++ + drivers/virtio/virtio_pci_modern.c | 15 +++++++++++++++ + include/uapi/linux/virtio_ids.h | 10 ++++++++++ + 5 files changed, 81 insertions(+) + +diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig +index 078615cf2afc..72b0b6ea5dd6 100644 +--- a/drivers/virtio/Kconfig ++++ b/drivers/virtio/Kconfig +@@ -95,4 +95,21 @@ config VIRTIO_MMIO_CMDLINE_DEVICES + + If unsure, say 'N'. + ++config ACRN_VIRTIO_DEVICES ++ bool "Support for ACRN virtio devices drivers in frontend/guest" ++ default n ++ depends on VIRTIO_PCI ++ ---help--- ++ ACRN virtio devices support in frontend/guest. ++ ++ This option enables support for ACRN virtio devices which use Intel ++ vendor ID and device IDs, by extending virtio frontend framework ++ a little bit so that virtio PCI driver could be loaded for these ++ devices. ++ ++ Eventually if all devices obtain virtio VID and DIDs, we don't ++ need this option anymore. ++ ++ If unsure, say 'N'. ++ + endif # VIRTIO_MENU +diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c +index f2862f66c2ac..24d1056a2ba8 100644 +--- a/drivers/virtio/virtio_pci_common.c ++++ b/drivers/virtio/virtio_pci_common.c +@@ -493,6 +493,35 @@ static const struct dev_pm_ops virtio_pci_pm_ops = { + /* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */ + static const struct pci_device_id virtio_pci_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_REDHAT_QUMRANET, PCI_ANY_ID) }, ++#ifdef CONFIG_ACRN_VIRTIO_DEVICES ++ /* ++ * To support ACRN virtio devices which haven't obtained valid ++ * virtio VID:DID in time, we relax the probing conditions a little. ++ */ ++#define ACRN_VIRTIO_DEVICE_ID_RPMB 0x8601 ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, ACRN_VIRTIO_DEVICE_ID_RPMB) }, ++ ++#define ACRN_VIRTIO_DEVICE_ID_HECI 0x8602 ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, ACRN_VIRTIO_DEVICE_ID_HECI) }, ++ ++#define ACRN_VIRTIO_DEVICE_ID_AUDIO 0x8603 ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, ACRN_VIRTIO_DEVICE_ID_AUDIO) }, ++ ++#define ACRN_VIRTIO_DEVICE_ID_IPU 0x8604 ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, ACRN_VIRTIO_DEVICE_ID_IPU) }, ++ ++#define ACRN_VIRTIO_DEVICE_ID_TSN 0x8605 ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, ACRN_VIRTIO_DEVICE_ID_TSN) }, ++ ++#define ACRN_VIRTIO_DEVICE_ID_HYPERDMABUF 0x8606 ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, ACRN_VIRTIO_DEVICE_ID_HYPERDMABUF) }, ++ ++#define ACRN_VIRTIO_DEVICE_ID_HDCP 0x8607 ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, ACRN_VIRTIO_DEVICE_ID_HDCP) }, ++ ++#define ACRN_VIRTIO_DEVICE_ID_COREU 0x8608 ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, ACRN_VIRTIO_DEVICE_ID_COREU) }, ++#endif /* CONFIG_ACRN_VIRTIO_DEVICES */ + { 0 } + }; + +diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c +index d62e9835aeec..e9e31d9b50c3 100644 +--- a/drivers/virtio/virtio_pci_legacy.c ++++ b/drivers/virtio/virtio_pci_legacy.c +@@ -214,9 +214,19 @@ int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev) + struct pci_dev *pci_dev = vp_dev->pci_dev; + int rc; + ++#ifdef CONFIG_ACRN_VIRTIO_DEVICES ++ /* ++ * To support ACRN virtio devices which haven't obtained valid ++ * virtio VID:DID in time, we relax the probing conditions a little. ++ */ ++ if (pci_dev->vendor == PCI_VENDOR_ID_REDHAT_QUMRANET && ++ (pci_dev->device < 0x1000 || pci_dev->device > 0x103f)) ++ return -ENODEV; ++#else + /* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */ + if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f) + return -ENODEV; ++#endif /* CONFIG_ACRN_VIRTIO_DEVICES */ + + if (pci_dev->revision != VIRTIO_PCI_ABI_VERSION) { + printk(KERN_ERR "virtio_pci: expected ABI version %d, got %d\n", +diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c +index 7abcc50838b8..cb00704c1973 100644 +--- a/drivers/virtio/virtio_pci_modern.c ++++ b/drivers/virtio/virtio_pci_modern.c +@@ -587,11 +587,26 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev) + + check_offsets(); + ++#ifdef CONFIG_ACRN_VIRTIO_DEVICES ++ /* ++ * To support ACRN virtio devices which haven't obtained valid ++ * virtio VID:DID in time, we relax the probing conditions a little. ++ */ ++ if (pci_dev->vendor == PCI_VENDOR_ID_REDHAT_QUMRANET && ++ (pci_dev->device < 0x1000 || pci_dev->device > 0x107f)) ++ return -ENODEV; ++ ++ if ((pci_dev->vendor == PCI_VENDOR_ID_REDHAT_QUMRANET && ++ pci_dev->device < 0x1040) || ++ (pci_dev->vendor == PCI_VENDOR_ID_INTEL && ++ pci_dev->device < 0x8640)) { ++#else + /* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */ + if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f) + return -ENODEV; + + if (pci_dev->device < 0x1040) { ++#endif /* CONFIG_ACRN_VIRTIO_DEVICES */ + /* Transitional devices: use the PCI subsystem device id as + * virtio device id, same as legacy driver always did. + */ +diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h +index 585e07b27333..9c3c83256f78 100644 +--- a/include/uapi/linux/virtio_ids.h ++++ b/include/uapi/linux/virtio_ids.h +@@ -47,4 +47,14 @@ + #define VIRTIO_ID_FS 26 /* virtio filesystem */ + #define VIRTIO_ID_PMEM 27 /* virtio pmem */ + ++/* ACRN virtio device types */ ++#define VIRTIO_ID_RPMB 0xFFFF ++#define VIRTIO_ID_HECI 0xFFFE ++#define VIRTIO_ID_AUDIO 0xFFFD ++#define VIRTIO_ID_IPU 0xFFFC ++#define VIRTIO_ID_TSN 0xFFFB ++#define VIRTIO_ID_HYPERDMABUF 0xFFFA ++#define VIRTIO_ID_HDCP 0xFFF9 ++#define VIRTIO_ID_COREU 0xFFF8 ++ + #endif /* _LINUX_VIRTIO_IDS_H */ +-- +2.17.1 + diff --git a/patches/0029-ASoC-Intel-Refactor-probing-of-ACPI-devices.audio b/patches/0029-ASoC-Intel-Refactor-probing-of-ACPI-devices.audio new file mode 100644 index 0000000000..da7df25c9e --- /dev/null +++ b/patches/0029-ASoC-Intel-Refactor-probing-of-ACPI-devices.audio @@ -0,0 +1,309 @@ +From 06fe49d45d661f107099bbdb09a1745e53cd1130 Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Fri, 16 Aug 2019 18:17:38 +0200 +Subject: [PATCH 029/193] ASoC: Intel: Refactor probing of ACPI devices + +Baytrail and Haswell ACPI loading is now separated and no longer +clutters common code. Let's improve the loading procedure and remove +some superfluous members. + +This change removes sst_pdata::resindex_dma_base as it is a duplication +of dma_base. dma_base field has had it's type changed to allow for -1 +(not used) value. + +ACPI descriptor: sst_acpi_desc loses machines field and sst_id - now +accessed via sst_pdata::boards and sst_pdata::id respectively. +Cleanup consists mainly of legacy platform-specific probe routines +being provided for each descendant. Prevents code duplications, +especially for HSW/ BDW case while not losing any readability. + +Signed-off-by: Cezary Rojewski +--- + sound/soc/intel/baytrail/acpi.c | 43 +++++++++++----- + sound/soc/intel/common/sst-acpi.c | 21 +++----- + sound/soc/intel/common/sst-dsp.h | 8 +-- + sound/soc/intel/common/sst-firmware.c | 2 +- + sound/soc/intel/haswell/acpi.c | 65 +++++++++++++++---------- + sound/soc/intel/skylake/skl-sst-utils.c | 2 +- + 6 files changed, 82 insertions(+), 59 deletions(-) + +diff --git a/sound/soc/intel/baytrail/acpi.c b/sound/soc/intel/baytrail/acpi.c +index ba6590309a1f..b1c2f3369427 100644 +--- a/sound/soc/intel/baytrail/acpi.c ++++ b/sound/soc/intel/baytrail/acpi.c +@@ -11,25 +11,46 @@ + #include + #include "../common/sst-dsp.h" + +-static struct sst_acpi_desc byt_acpi_desc = { +- .drv_name = "baytrail-pcm-audio", +- .machines = snd_soc_acpi_intel_baytrail_legacy_machines, +- .resindex_lpe_base = 0, +- .resindex_pcicfg_base = 1, +- .resindex_fw_base = 2, +- .irqindex_host_ipc = 5, +- .sst_id = SST_DEV_ID_BYT, +- .resindex_dma_base = -1, ++static struct sst_pdata byt_desc = { ++ .id = SST_DEV_ID_BYT, ++ .fw_name = "intel/fw_sst_0f28.bin-48kHz_i2s_master", ++ .boards = snd_soc_acpi_intel_baytrail_legacy_machines, ++ .dma_base = -1, + }; + + static const struct acpi_device_id byt_acpi_ids[] = { +- { "80860F28", (unsigned long)&byt_acpi_desc }, ++ { "80860F28", (unsigned long)&byt_desc }, + { } + }; + MODULE_DEVICE_TABLE(acpi, byt_acpi_ids); + ++static int byt_acpi_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct sst_acpi_desc *acpi_desc; ++ const struct acpi_device_id *id; ++ ++ id = acpi_match_device(dev->driver->acpi_match_table, dev); ++ if (!id) ++ return -ENODEV; ++ ++ acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); ++ if (!acpi_desc) ++ return -ENOMEM; ++ ++ acpi_desc->drv_name = "baytrail-pcm-audio"; ++ acpi_desc->pdata = (struct sst_pdata *)id->driver_data; ++ acpi_desc->resindex_lpe_base = 0; ++ acpi_desc->resindex_pcicfg_base = 1; ++ acpi_desc->resindex_fw_base = 2; ++ acpi_desc->irqindex_host_ipc = 5; ++ platform_set_drvdata(pdev, acpi_desc); ++ ++ return sst_dsp_acpi_probe(pdev); ++} ++ + static struct platform_driver byt_acpi_driver = { +- .probe = sst_dsp_acpi_probe, ++ .probe = byt_acpi_probe, + .remove = sst_dsp_acpi_remove, + .driver = { + .name = "byt-acpi", +diff --git a/sound/soc/intel/common/sst-acpi.c b/sound/soc/intel/common/sst-acpi.c +index 3b3c8894a65a..6f0526b33429 100644 +--- a/sound/soc/intel/common/sst-acpi.c ++++ b/sound/soc/intel/common/sst-acpi.c +@@ -17,7 +17,6 @@ + struct sst_acpi_priv { + struct platform_device *pdev_mach; + struct platform_device *pdev_pcm; +- struct sst_pdata sst_pdata; + struct sst_acpi_desc *desc; + struct snd_soc_acpi_mach *mach; + }; +@@ -27,8 +26,8 @@ static void sst_acpi_fw_cb(const struct firmware *fw, void *context) + struct platform_device *pdev = context; + struct device *dev = &pdev->dev; + struct sst_acpi_priv *sst_acpi = platform_get_drvdata(pdev); +- struct sst_pdata *sst_pdata = &sst_acpi->sst_pdata; + struct sst_acpi_desc *desc = sst_acpi->desc; ++ struct sst_pdata *sst_pdata = desc->pdata; + struct snd_soc_acpi_mach *mach = sst_acpi->mach; + + sst_pdata->fw = fw; +@@ -51,7 +50,6 @@ static void sst_acpi_fw_cb(const struct firmware *fw, void *context) + + int sst_dsp_acpi_probe(struct platform_device *pdev) + { +- const struct acpi_device_id *id; + struct device *dev = &pdev->dev; + struct sst_acpi_priv *sst_acpi; + struct sst_pdata *sst_pdata; +@@ -64,27 +62,20 @@ int sst_dsp_acpi_probe(struct platform_device *pdev) + if (sst_acpi == NULL) + return -ENOMEM; + +- id = acpi_match_device(dev->driver->acpi_match_table, dev); +- if (!id) +- return -ENODEV; +- +- desc = (struct sst_acpi_desc *)id->driver_data; +- mach = snd_soc_acpi_find_machine(desc->machines); ++ desc = platform_get_drvdata(pdev); ++ sst_pdata = desc->pdata; ++ mach = snd_soc_acpi_find_machine(sst_pdata->boards); + if (mach == NULL) { + dev_err(dev, "No matching ASoC machine driver found\n"); + return -ENODEV; + } + +- sst_pdata = &sst_acpi->sst_pdata; +- sst_pdata->id = desc->sst_id; + sst_pdata->dma_dev = dev; + sst_acpi->desc = desc; + sst_acpi->mach = mach; + +- sst_pdata->resindex_dma_base = desc->resindex_dma_base; +- if (desc->resindex_dma_base >= 0) { ++ if (sst_pdata->dma_base >= 0) { + sst_pdata->dma_engine = desc->dma_engine; +- sst_pdata->dma_base = desc->resindex_dma_base; + sst_pdata->dma_size = desc->dma_size; + } + +@@ -141,7 +132,7 @@ EXPORT_SYMBOL_GPL(sst_dsp_acpi_probe); + int sst_dsp_acpi_remove(struct platform_device *pdev) + { + struct sst_acpi_priv *sst_acpi = platform_get_drvdata(pdev); +- struct sst_pdata *sst_pdata = &sst_acpi->sst_pdata; ++ struct sst_pdata *sst_pdata = sst_acpi->desc->pdata; + + platform_device_unregister(sst_acpi->pdev_mach); + if (!IS_ERR_OR_NULL(sst_acpi->pdev_pcm)) +diff --git a/sound/soc/intel/common/sst-dsp.h b/sound/soc/intel/common/sst-dsp.h +index 6326c7ba10b8..0f0388fe4efe 100644 +--- a/sound/soc/intel/common/sst-dsp.h ++++ b/sound/soc/intel/common/sst-dsp.h +@@ -171,16 +171,13 @@ struct platform_device; + /* Descriptor for setting up SST platform data */ + struct sst_acpi_desc { + const char *drv_name; +- struct snd_soc_acpi_mach *machines; ++ struct sst_pdata *pdata; + /* Platform resource indexes. Must set to -1 if not used */ + int resindex_lpe_base; + int resindex_pcicfg_base; + int resindex_fw_base; + int irqindex_host_ipc; + int resindex_dma_base; +- /* Unique number identifying the SST core on platform */ +- int sst_id; +- /* DMA only valid when resindex_dma_base != -1*/ + int dma_engine; + int dma_size; + }; +@@ -205,8 +202,7 @@ struct sst_pdata { + const struct firmware *fw; + + /* DMA */ +- int resindex_dma_base; /* other fields invalid if equals to -1 */ +- u32 dma_base; ++ int dma_base; /* other fields invalid if equals to -1 */ + u32 dma_size; + int dma_engine; + struct device *dma_dev; +diff --git a/sound/soc/intel/common/sst-firmware.c b/sound/soc/intel/common/sst-firmware.c +index 6b6af11c32c3..61d3e6e46b98 100644 +--- a/sound/soc/intel/common/sst-firmware.c ++++ b/sound/soc/intel/common/sst-firmware.c +@@ -268,7 +268,7 @@ static int sst_dma_new(struct sst_dsp *sst) + struct resource mem; + int ret = 0; + +- if (sst->pdata->resindex_dma_base == -1) ++ if (sst->pdata->dma_base == -1) + /* DMA is not used, return and squelsh error messages */ + return 0; + +diff --git a/sound/soc/intel/haswell/acpi.c b/sound/soc/intel/haswell/acpi.c +index 0c7da697437c..b9e1b88c0fce 100644 +--- a/sound/soc/intel/haswell/acpi.c ++++ b/sound/soc/intel/haswell/acpi.c +@@ -15,41 +15,56 @@ + #define SST_WPT_DSP_DMA_ADDR_OFFSET 0x0FE000 + #define SST_LPT_DSP_DMA_SIZE (1024 - 1) + +-static struct sst_acpi_desc hsw_acpi_desc = { +- .drv_name = "haswell-pcm-audio", +- .machines = snd_soc_acpi_intel_haswell_machines, +- .resindex_lpe_base = 0, +- .resindex_pcicfg_base = 1, +- .resindex_fw_base = -1, +- .irqindex_host_ipc = 0, +- .sst_id = SST_DEV_ID_LYNX_POINT, +- .dma_engine = SST_DMA_TYPE_DW, +- .resindex_dma_base = SST_LPT_DSP_DMA_ADDR_OFFSET, +- .dma_size = SST_LPT_DSP_DMA_SIZE, ++static struct sst_pdata hsw_desc = { ++ .id = SST_DEV_ID_LYNX_POINT, ++ .fw_name = "intel/IntcSST1.bin", ++ .boards = snd_soc_acpi_intel_haswell_machines, ++ .dma_base = SST_LPT_DSP_DMA_ADDR_OFFSET, + }; + +-static struct sst_acpi_desc bdw_acpi_desc = { +- .drv_name = "haswell-pcm-audio", +- .machines = snd_soc_acpi_intel_broadwell_machines, +- .resindex_lpe_base = 0, +- .resindex_pcicfg_base = 1, +- .resindex_fw_base = -1, +- .irqindex_host_ipc = 0, +- .sst_id = SST_DEV_ID_WILDCAT_POINT, +- .dma_engine = SST_DMA_TYPE_DW, +- .resindex_dma_base = SST_WPT_DSP_DMA_ADDR_OFFSET, +- .dma_size = SST_LPT_DSP_DMA_SIZE, ++static struct sst_pdata bdw_desc = { ++ .id = SST_DEV_ID_WILDCAT_POINT, ++ .fw_name = "intel/IntcSST2.bin", ++ .boards = snd_soc_acpi_intel_broadwell_machines, ++ .dma_base = SST_WPT_DSP_DMA_ADDR_OFFSET, + }; + + static const struct acpi_device_id hsw_acpi_ids[] = { +- { "INT33C8", (unsigned long)&hsw_acpi_desc }, +- { "INT3438", (unsigned long)&bdw_acpi_desc }, ++ { "INT33C8", (unsigned long)&hsw_desc }, ++ { "INT3438", (unsigned long)&bdw_desc }, + { } + }; + MODULE_DEVICE_TABLE(acpi, hsw_acpi_ids); + ++static int hsw_acpi_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct sst_acpi_desc *acpi_desc; ++ const struct acpi_device_id *id; ++ ++ id = acpi_match_device(dev->driver->acpi_match_table, dev); ++ if (!id) ++ return -ENODEV; ++ ++ acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); ++ if (!acpi_desc) ++ return -ENOMEM; ++ ++ acpi_desc->drv_name = "haswell-pcm-audio"; ++ acpi_desc->pdata = (struct sst_pdata *)id->driver_data; ++ acpi_desc->resindex_lpe_base = 0; ++ acpi_desc->resindex_pcicfg_base = 1; ++ acpi_desc->resindex_fw_base = -1; ++ acpi_desc->irqindex_host_ipc = 0; ++ acpi_desc->dma_engine = SST_DMA_TYPE_DW; ++ acpi_desc->dma_size = SST_LPT_DSP_DMA_SIZE; ++ platform_set_drvdata(pdev, acpi_desc); ++ ++ return sst_dsp_acpi_probe(pdev); ++} ++ + static struct platform_driver hsw_acpi_driver = { +- .probe = sst_dsp_acpi_probe, ++ .probe = hsw_acpi_probe, + .remove = sst_dsp_acpi_remove, + .driver = { + .name = "hsw-acpi", +diff --git a/sound/soc/intel/skylake/skl-sst-utils.c b/sound/soc/intel/skylake/skl-sst-utils.c +index 67ff31102d6e..f6d354811dd0 100644 +--- a/sound/soc/intel/skylake/skl-sst-utils.c ++++ b/sound/soc/intel/skylake/skl-sst-utils.c +@@ -403,7 +403,7 @@ int skl_sst_ctx_init(struct device *dev, int irq, const char *fw_name, + + pdata->id = skl->pci->device; + pdata->irq = irq; +- pdata->resindex_dma_base = -1; ++ pdata->dma_base = -1; + skl->dev = dev; + pdata->dsp = skl; + INIT_LIST_HEAD(&skl->module_list); +-- +2.17.1 + diff --git a/patches/0029-Limit-to-output-trusty-lk-log-on-debug-version.trusty b/patches/0029-Limit-to-output-trusty-lk-log-on-debug-version.trusty new file mode 100644 index 0000000000..87d28e9917 --- /dev/null +++ b/patches/0029-Limit-to-output-trusty-lk-log-on-debug-version.trusty @@ -0,0 +1,70 @@ +From bf27ef0993efb5acc6f5e6322f3844c26c24515c Mon Sep 17 00:00:00 2001 +From: yingbinx +Date: Wed, 22 Feb 2017 14:28:03 +0800 +Subject: [PATCH 29/63] Limit to output trusty/lk log on debug version + + Modified trusty_dump_log() to only output lk side's log on debug version. + This is to avoid the condition that tipc drivers will print lots of info/log + from lk side at one time to serial console on release version. + Details may reference OAM-42979. + +Change-Id: I28681a97a037d08a97d13b8314ab05f4f13b2309 +Tracked-On: OAM-43042 +Tracked-On: OAM-42979 +Signed-off-by: yingbinx +Signed-off-by: weideng +Reviewed-on: #569747 +--- + drivers/trusty/trusty-log.c | 15 +++++++++++---- + 1 file changed, 11 insertions(+), 4 deletions(-) + +diff --git a/drivers/trusty/trusty-log.c b/drivers/trusty/trusty-log.c +index 8091a596a5e3..4200e901d925 100644 +--- a/drivers/trusty/trusty-log.c ++++ b/drivers/trusty/trusty-log.c +@@ -63,7 +63,7 @@ static int log_read_line(struct trusty_log_state *s, int put, int get) + return i; + } + +-static void trusty_dump_logs(struct trusty_log_state *s) ++static void trusty_dump_logs(struct trusty_log_state *s, bool dump_panic_log) + { + struct log_rb *log = s->log; + uint32_t get, put, alloc; +@@ -99,7 +99,10 @@ static void trusty_dump_logs(struct trusty_log_state *s) + get = alloc - log->sz; + continue; + } +- pr_info("trusty: %s", s->line_buffer); ++ ++ if (dump_panic_log) ++ pr_info("trusty: %s", s->line_buffer); ++ + get += read_chars; + } + s->get = get; +@@ -116,7 +119,11 @@ static int trusty_log_call_notify(struct notifier_block *nb, + + s = container_of(nb, struct trusty_log_state, call_notifier); + spin_lock_irqsave(&s->lock, flags); +- trusty_dump_logs(s); ++#ifdef CONFIG_DEBUG_INFO ++ trusty_dump_logs(s, true); ++#else ++ trusty_dump_logs(s, false); ++#endif + spin_unlock_irqrestore(&s->lock, flags); + return NOTIFY_OK; + } +@@ -133,7 +140,7 @@ static int trusty_log_panic_notify(struct notifier_block *nb, + s = container_of(nb, struct trusty_log_state, panic_notifier); + pr_info("trusty-log panic notifier - trusty version %s", + trusty_version_str_get(s->trusty_dev)); +- trusty_dump_logs(s); ++ trusty_dump_logs(s, true); + return NOTIFY_OK; + } + +-- +2.17.1 + diff --git a/patches/0029-VHM-sync-public-header-file-acrn_common.h.acrn b/patches/0029-VHM-sync-public-header-file-acrn_common.h.acrn new file mode 100644 index 0000000000..17189b80d9 --- /dev/null +++ b/patches/0029-VHM-sync-public-header-file-acrn_common.h.acrn @@ -0,0 +1,42 @@ +From eb153bc5553c5f9454887093d3897caa05178f3c Mon Sep 17 00:00:00 2001 +From: Jason Chen CJ +Date: Fri, 31 Aug 2018 10:58:58 +0800 +Subject: [PATCH 029/150] VHM: sync public header file acrn_common.h + +Change-Id: I1e0ac4d26b22cda4d1db81a83dca8d8806405a8c +Tracked-On: 212688 +Signed-off-by: Jason Chen CJ +--- + include/linux/vhm/acrn_common.h | 11 ++++++++++- + 1 file changed, 10 insertions(+), 1 deletion(-) + +diff --git a/include/linux/vhm/acrn_common.h b/include/linux/vhm/acrn_common.h +index 29af96a7ee95..e34970656b56 100644 +--- a/include/linux/vhm/acrn_common.h ++++ b/include/linux/vhm/acrn_common.h +@@ -150,7 +150,7 @@ struct acrn_create_vm { + int32_t vmid; /* OUT: return vmid to VHM. Keep it first field */ + uint32_t vcpu_num; /* IN: VM vcpu number */ + uint8_t GUID[16]; /* IN: GUID of this vm */ +- uint8_t trusty_enabled;/* IN: whether trusty is enabled */ ++ uint8_t secure_world_enabled;/* IN: whether Secure World is enabled */ + uint8_t reserved[31]; /* Reserved for future use */ + } __attribute__((aligned(8))); + +@@ -203,4 +203,13 @@ struct acrn_vm_pci_msix_remap { + uint32_t vector_ctl; + } __attribute__((aligned(8))); + ++/* It's designed to support passing DM config data pointer, based on it, ++ * hypervisor would parse then pass DM defined configration to GUEST vcpu ++ * when booting guest VM. ++ * the address 0xd0000 here is designed by DM, as it arranged all memory ++ * layout below 1M, DM should make sure there is no overlap for the address ++ * 0xd0000 usage. ++ */ ++#define GUEST_CFG_OFFSET 0xd0000 ++ + #endif /* ACRN_COMMON_H */ +-- +2.17.1 + diff --git a/patches/0029-drm-i915-gtt-Preallocate-Braswell-top-level-page-direc.drm b/patches/0029-drm-i915-gtt-Preallocate-Braswell-top-level-page-direc.drm new file mode 100644 index 0000000000..ba2f6e3818 --- /dev/null +++ b/patches/0029-drm-i915-gtt-Preallocate-Braswell-top-level-page-direc.drm @@ -0,0 +1,92 @@ +From 0e028d72a38863cc0b17ee20452420316436ab26 Mon Sep 17 00:00:00 2001 +From: Chris Wilson +Date: Fri, 23 Aug 2019 15:14:21 +0100 +Subject: [PATCH 029/690] drm/i915/gtt: Preallocate Braswell top-level page + directory + +In order for the Braswell top-level PD to remain the same from the time +of request construction to its submission onto HW, as we may be +asynchronously rewriting the page tables (thus changing the expected +register state after having already stored the old addresses in the +request), the top level PD must be preallocated. + +So wave goodbye to our lazy allocation of those 4x2 pages. + +v2: A little bit of write-flushing required (presumably it always has +been required, but now we are more susceptible and it is showing up!) + +v3: Put back the forced-PD-reload on every batch, we can't survive +without it and explicitly marking the context for PD reload makes +Braswell turn nasty. + +Signed-off-by: Chris Wilson +Cc: Mika Kuoppala +Reviewed-by: Mika Kuoppala +Link: https://patchwork.freedesktop.org/patch/msgid/20190823141421.2398-1-chris@chris-wilson.co.uk +--- + drivers/gpu/drm/i915/gem/i915_gem_context.c | 8 +++++++- + drivers/gpu/drm/i915/i915_gem_gtt.c | 10 +++++----- + 2 files changed, 12 insertions(+), 6 deletions(-) + +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c +index 1cdfe05514c3..1f735ca9b173 100644 +--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c ++++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c +@@ -1003,12 +1003,18 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data) + intel_ring_advance(rq, cs); + } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) { + struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); ++ int err; ++ ++ /* Magic required to prevent forcewake errors! */ ++ err = engine->emit_flush(rq, EMIT_INVALIDATE); ++ if (err) ++ return err; + + cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2); + if (IS_ERR(cs)) + return PTR_ERR(cs); + +- *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES); ++ *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED; + for (i = GEN8_3LVL_PDPES; i--; ) { + const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); + +diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c +index 135f5494463a..0db82921fb38 100644 +--- a/drivers/gpu/drm/i915/i915_gem_gtt.c ++++ b/drivers/gpu/drm/i915/i915_gem_gtt.c +@@ -168,6 +168,7 @@ static int ppgtt_bind_vma(struct i915_vma *vma, + pte_flags |= PTE_READ_ONLY; + + vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); ++ wmb(); + + return 0; + } +@@ -1428,6 +1429,7 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt) + set_pd_entry(pd, idx, pde); + atomic_inc(px_used(pde)); /* keep pinned */ + } ++ wmb(); + + return 0; + } +@@ -1515,11 +1517,9 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) + } + + if (!i915_vm_is_4lvl(&ppgtt->vm)) { +- if (intel_vgpu_active(i915)) { +- err = gen8_preallocate_top_level_pdp(ppgtt); +- if (err) +- goto err_free_pd; +- } ++ err = gen8_preallocate_top_level_pdp(ppgtt); ++ if (err) ++ goto err_free_pd; + } + + ppgtt->vm.insert_entries = gen8_ppgtt_insert; +-- +2.17.1 + diff --git a/patches/0029-net-stmmac-Bugfix-on-stmmac_interrupt-for-WOL.connectivity b/patches/0029-net-stmmac-Bugfix-on-stmmac_interrupt-for-WOL.connectivity new file mode 100644 index 0000000000..483404a40b --- /dev/null +++ b/patches/0029-net-stmmac-Bugfix-on-stmmac_interrupt-for-WOL.connectivity @@ -0,0 +1,33 @@ +From 29e099586ea03a980c381cf2ebe9582582b97598 Mon Sep 17 00:00:00 2001 +From: "Tan, Tee Min" +Date: Mon, 25 Jun 2018 10:34:14 +0800 +Subject: [PATCH 029/108] net: stmmac: Bugfix on stmmac_interrupt() for WOL + +Modify pm_wakeup_event to pm_wakeup_hard_event. + +With the newly introduced pm_wakeup_hard_event function, +WOL only able to functions properly with using this new +function instead of pm_wakeup_event. + +Signed-off-by: Tan, Tee Min +Signed-off-by: Voon Weifeng +--- + drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index 8892fb485773..9b654ec6a2a5 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -4080,7 +4080,7 @@ static void stmmac_common_interrupt(struct stmmac_priv *priv) + queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt; + + if (priv->irq_wake) +- pm_wakeup_event(priv->device, 0); ++ pm_wakeup_hard_event(priv->device); + + /* To handle GMAC own interrupts */ + if ((priv->plat->has_gmac) || xmac) { +-- +2.17.1 + diff --git a/patches/0029-rpmb-mux-search-additional-argument-when-parsing-.security b/patches/0029-rpmb-mux-search-additional-argument-when-parsing-.security new file mode 100644 index 0000000000..80a37b6b9b --- /dev/null +++ b/patches/0029-rpmb-mux-search-additional-argument-when-parsing-.security @@ -0,0 +1,48 @@ +From e0c7f2a431bb07642f5189ec21c086be903eba6b Mon Sep 17 00:00:00 2001 +From: Qi Yadong +Date: Mon, 25 Feb 2019 16:55:41 +0800 +Subject: [PATCH 29/65] rpmb: mux: search additional argument when parsing seed + from ABL + +Due to ABL design change, it will reword the "dev_sec_info.param_addr=" +to "ABL.svnseed=". + +Change-Id: I6364023fcb16a746f7174f1f97c0725404224546 +Signed-off-by: Qi Yadong +Signed-off-by: Tomas Winkler +--- + drivers/char/rpmb/mux/key.c | 13 +++++++++++-- + 1 file changed, 11 insertions(+), 2 deletions(-) + +diff --git a/drivers/char/rpmb/mux/key.c b/drivers/char/rpmb/mux/key.c +index af73c9db101d..c3fcadcd36b2 100644 +--- a/drivers/char/rpmb/mux/key.c ++++ b/drivers/char/rpmb/mux/key.c +@@ -32,13 +32,22 @@ __setup("ImageBootParamsAddr=", get_sbl_params_addr); + static int __init get_abl_params_addr(char *str) + { + if (kstrtoul(str, 16, &abl_params_addr)) { +- pr_err("Failed to parse dev_sec_info.param\n"); ++ pr_err("Failed to parse seed address from ABL\n"); + return -EINVAL; + } + + return 0; + } +-__setup("dev_sec_info.param_addr=", get_abl_params_addr); ++__setup_param("ABL.svnseed=", get_abl_params_addr, ++ get_abl_params_addr, 0); ++/* ++ * The "dev_sec_info.param_addr=" will be reworded to "ABL.svnseed=" ++ * for new version of ABL. ++ * "dev_sec_info.param_addr" is still kept here in order to be compatible ++ * with old version of ABL. ++ */ ++__setup_param("dev_sec_info.param_addr=", get_abl_params_addr_compat, ++ get_abl_params_addr, 0); + + int rpmb_key_get(const u8 *dev_id, size_t dev_id_len, + size_t max_partition_num, u8 rpmb_key[][RPMB_KEY_LENGTH]) +-- +2.17.1 + diff --git a/patches/0029-serial-8250_port-factor-out-serial8250_do_restore_con.lpss b/patches/0029-serial-8250_port-factor-out-serial8250_do_restore_con.lpss new file mode 100644 index 0000000000..7d5d27bb97 --- /dev/null +++ b/patches/0029-serial-8250_port-factor-out-serial8250_do_restore_con.lpss @@ -0,0 +1,149 @@ +From 98d7db1b182b8a1e1bc2b46b988c1f2605356042 Mon Sep 17 00:00:00 2001 +From: Andy Shevchenko +Date: Tue, 11 Oct 2016 18:33:54 +0300 +Subject: [PATCH 29/40] serial: 8250_port: factor out + serial8250_do_restore_context() + +The new function serial8250_do_restore_context() is used to write the saved +register values to the hardware. It is used in serial8250_do_set_termios() and +will be used by the individual drivers to restore context when resuming. + +Signed-off-by: Andy Shevchenko +--- + drivers/tty/serial/8250/8250_port.c | 65 +++++++++++++++++++---------- + include/linux/serial_8250.h | 5 +++ + 2 files changed, 47 insertions(+), 23 deletions(-) + +diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c +index b16c93c6b567..3302b5d81c17 100644 +--- a/drivers/tty/serial/8250/8250_port.c ++++ b/drivers/tty/serial/8250/8250_port.c +@@ -2551,6 +2551,42 @@ static unsigned int serial8250_get_baud_rate(struct uart_port *port, + port->uartclk); + } + ++void serial8250_do_restore_context(struct uart_port *port) ++{ ++ struct uart_8250_port *up = up_to_u8250p(port); ++ ++ /* Write extended features at first */ ++ if (up->capabilities & UART_CAP_EFR) { ++ serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B); ++ if (port->flags & UPF_EXAR_EFR) ++ serial_port_out(port, UART_XR_EFR, up->efr); ++ else ++ serial_port_out(port, UART_EFR, up->efr); ++ } ++ ++ serial8250_set_divisor(port, up->baud, up->quot, up->frac); ++ ++ /* ++ * LCR DLAB must be set to enable 64-byte FIFO mode. If the FCR ++ * is written without DLAB set, this mode will be disabled. ++ */ ++ if (port->type == PORT_16750) ++ serial_port_out(port, UART_FCR, up->fcr); ++ ++ serial_port_out(port, UART_LCR, up->lcr); /* reset DLAB */ ++ if (port->type != PORT_16750) { ++ /* emulated UARTs (Lucent Venus 167x) need two steps */ ++ if (up->fcr & UART_FCR_ENABLE_FIFO) ++ serial_port_out(port, UART_FCR, UART_FCR_ENABLE_FIFO); ++ serial_port_out(port, UART_FCR, up->fcr); /* set fcr */ ++ } ++ serial8250_set_mctrl(port, port->mctrl); ++ ++ /* Enable interrupts at last */ ++ serial_port_out(port, UART_IER, up->ier); ++} ++EXPORT_SYMBOL_GPL(serial8250_do_restore_context); ++ + void + serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios, + struct ktermios *old) +@@ -2579,6 +2615,9 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios, + spin_lock_irqsave(&port->lock, flags); + + up->lcr = cval; /* Save computed LCR */ ++ up->baud = baud; /* Save baud rate */ ++ up->quot = quot; /* Save quot */ ++ up->frac = frac; /* Save fraction */ + + if (up->capabilities & UART_CAP_FIFO && port->fifosize > 1) { + /* NOTE: If fifo_bug is not set, a user can set RX_trigger. */ +@@ -2644,8 +2683,6 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios, + if (up->capabilities & UART_CAP_RTOIE) + up->ier |= UART_IER_RTOIE; + +- serial_port_out(port, UART_IER, up->ier); +- + if (up->capabilities & UART_CAP_EFR) { + unsigned char efr = 0; + /* +@@ -2656,30 +2693,12 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios, + if (termios->c_cflag & CRTSCTS) + efr |= UART_EFR_CTS; + +- serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B); +- if (port->flags & UPF_EXAR_EFR) +- serial_port_out(port, UART_XR_EFR, efr); +- else +- serial_port_out(port, UART_EFR, efr); ++ up->efr = efr; + } + +- serial8250_set_divisor(port, baud, quot, frac); ++ /* Write saved values to the registers */ ++ serial8250_do_restore_context(port); + +- /* +- * LCR DLAB must be set to enable 64-byte FIFO mode. If the FCR +- * is written without DLAB set, this mode will be disabled. +- */ +- if (port->type == PORT_16750) +- serial_port_out(port, UART_FCR, up->fcr); +- +- serial_port_out(port, UART_LCR, up->lcr); /* reset DLAB */ +- if (port->type != PORT_16750) { +- /* emulated UARTs (Lucent Venus 167x) need two steps */ +- if (up->fcr & UART_FCR_ENABLE_FIFO) +- serial_port_out(port, UART_FCR, UART_FCR_ENABLE_FIFO); +- serial_port_out(port, UART_FCR, up->fcr); /* set fcr */ +- } +- serial8250_set_mctrl(port, port->mctrl); + spin_unlock_irqrestore(&port->lock, flags); + serial8250_rpm_put(up); + +diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h +index c78d8b9bd707..a6db669860f9 100644 +--- a/include/linux/serial_8250.h ++++ b/include/linux/serial_8250.h +@@ -98,6 +98,7 @@ struct uart_8250_port { + bool fifo_bug; /* min RX trigger if enabled */ + unsigned int tx_loadsz; /* transmit fifo load size */ + unsigned char acr; ++ unsigned char efr; + unsigned char fcr; + unsigned char ier; + unsigned char lcr; +@@ -106,6 +107,9 @@ struct uart_8250_port { + unsigned char mcr_force; /* mask of forced bits */ + unsigned char cur_iotype; /* Running I/O type */ + unsigned int rpm_tx_active; ++ unsigned int baud; ++ unsigned int quot; ++ unsigned int frac; + unsigned char canary; /* non-zero during system sleep + * if no_console_suspend + */ +@@ -151,6 +155,7 @@ extern int early_serial_setup(struct uart_port *port); + + extern int early_serial8250_setup(struct earlycon_device *device, + const char *options); ++void serial8250_do_restore_context(struct uart_port *port); + extern void serial8250_do_set_termios(struct uart_port *port, + struct ktermios *termios, struct ktermios *old); + extern void serial8250_do_set_ldisc(struct uart_port *port, +-- +2.17.1 + diff --git a/patches/0030-ASoC-Intel-Skylake-Simplify-skl_sst_ctx_init-declara.audio b/patches/0030-ASoC-Intel-Skylake-Simplify-skl_sst_ctx_init-declara.audio new file mode 100644 index 0000000000..1edf808e75 --- /dev/null +++ b/patches/0030-ASoC-Intel-Skylake-Simplify-skl_sst_ctx_init-declara.audio @@ -0,0 +1,151 @@ +From d981c7e1a13bf8b51f87066f4a7953f27defc504 Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Mon, 12 Aug 2019 19:35:51 +0200 +Subject: [PATCH 030/193] ASoC: Intel: Skylake: Simplify skl_sst_ctx_init + declaration + +'irq' and 'dsp' are redundant parameters of skl_sst_ctx_init. Simplify +its declaration and update each invokes. This yet another step In quest +for simplification of Skylake initialization process. + +Signed-off-by: Cezary Rojewski +--- + sound/soc/intel/skylake/bxt-sst.c | 4 ++-- + sound/soc/intel/skylake/cnl-sst.c | 4 ++-- + sound/soc/intel/skylake/skl-sst-dsp.h | 3 +-- + sound/soc/intel/skylake/skl-sst-utils.c | 18 +++++++----------- + sound/soc/intel/skylake/skl-sst.c | 4 ++-- + sound/soc/intel/skylake/skl.c | 1 + + 6 files changed, 15 insertions(+), 19 deletions(-) + +diff --git a/sound/soc/intel/skylake/bxt-sst.c b/sound/soc/intel/skylake/bxt-sst.c +index af2d18333afc..2c826b3d7c12 100644 +--- a/sound/soc/intel/skylake/bxt-sst.c ++++ b/sound/soc/intel/skylake/bxt-sst.c +@@ -552,13 +552,13 @@ int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, + struct sst_dsp *sst; + int ret; + +- ret = skl_sst_ctx_init(dev, irq, fw_name, dsp, &skl_dev); ++ skl = *dsp; ++ ret = skl_sst_ctx_init(skl, fw_name, &skl_dev); + if (ret < 0) { + dev_err(dev, "%s: no device\n", __func__); + return ret; + } + +- skl = *dsp; + sst = skl->dsp; + sst->fw_ops = bxt_fw_ops; + sst->addr.lpe = mmio_base; +diff --git a/sound/soc/intel/skylake/cnl-sst.c b/sound/soc/intel/skylake/cnl-sst.c +index a206bc140279..90cc1e5a63fc 100644 +--- a/sound/soc/intel/skylake/cnl-sst.c ++++ b/sound/soc/intel/skylake/cnl-sst.c +@@ -428,13 +428,13 @@ int cnl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, + struct sst_dsp *sst; + int ret; + +- ret = skl_sst_ctx_init(dev, irq, fw_name, dsp, &cnl_dev); ++ cnl = *dsp; ++ ret = skl_sst_ctx_init(cnl, fw_name, &cnl_dev); + if (ret < 0) { + dev_err(dev, "%s: no device\n", __func__); + return ret; + } + +- cnl = *dsp; + sst = cnl->dsp; + sst->fw_ops = cnl_fw_ops; + sst->addr.lpe = mmio_base; +diff --git a/sound/soc/intel/skylake/skl-sst-dsp.h b/sound/soc/intel/skylake/skl-sst-dsp.h +index a2122577e8c3..68f62c06743b 100644 +--- a/sound/soc/intel/skylake/skl-sst-dsp.h ++++ b/sound/soc/intel/skylake/skl-sst-dsp.h +@@ -240,8 +240,7 @@ int skl_dsp_strip_extended_manifest(struct firmware *fw); + + void skl_dsp_set_astate_cfg(struct skl_dev *skl, u32 cnt, void *data); + +-int skl_sst_ctx_init(struct device *dev, int irq, const char *fw_name, +- struct skl_dev **dsp, ++int skl_sst_ctx_init(struct skl_dev *skl, const char *fw_name, + struct sst_pdata *pdata); + int skl_prepare_lib_load(struct skl_dev *skl, struct skl_lib_info *linfo, + struct firmware *stripped_fw, +diff --git a/sound/soc/intel/skylake/skl-sst-utils.c b/sound/soc/intel/skylake/skl-sst-utils.c +index f6d354811dd0..880c7f75d717 100644 +--- a/sound/soc/intel/skylake/skl-sst-utils.c ++++ b/sound/soc/intel/skylake/skl-sst-utils.c +@@ -394,30 +394,26 @@ int skl_dsp_strip_extended_manifest(struct firmware *fw) + return 0; + } + +-int skl_sst_ctx_init(struct device *dev, int irq, const char *fw_name, +- struct skl_dev **dsp, ++int skl_sst_ctx_init(struct skl_dev *skl, const char *fw_name, + struct sst_pdata *pdata) + { +- struct skl_dev *skl = *dsp; + struct sst_dsp *sst; ++ struct device *dev = skl->dev; + + pdata->id = skl->pci->device; +- pdata->irq = irq; ++ pdata->irq = skl->pci->irq; + pdata->dma_base = -1; +- skl->dev = dev; + pdata->dsp = skl; + INIT_LIST_HEAD(&skl->module_list); +- skl->dsp = sst_dsp_new(dev, pdata); +- if (!skl->dsp) { +- dev_err(skl->dev, "%s: no device\n", __func__); ++ sst = sst_dsp_new(dev, pdata); ++ if (!sst) { ++ dev_err(dev, "%s: no device\n", __func__); + return -ENODEV; + } + +- sst = skl->dsp; ++ skl->dsp = sst; + sst->fw_name = fw_name; + init_waitqueue_head(&skl->mod_load_wait); +- INIT_LIST_HEAD(&sst->module_list); +- + skl->is_first_boot = true; + + return 0; +diff --git a/sound/soc/intel/skylake/skl-sst.c b/sound/soc/intel/skylake/skl-sst.c +index 30d7f5169550..4cf89730b064 100644 +--- a/sound/soc/intel/skylake/skl-sst.c ++++ b/sound/soc/intel/skylake/skl-sst.c +@@ -523,13 +523,13 @@ int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, + struct sst_dsp *sst; + int ret; + +- ret = skl_sst_ctx_init(dev, irq, fw_name, dsp, &skl_dev); ++ skl = *dsp; ++ ret = skl_sst_ctx_init(skl, fw_name, &skl_dev); + if (ret < 0) { + dev_err(dev, "%s: no device\n", __func__); + return ret; + } + +- skl = *dsp; + sst = skl->dsp; + sst->addr.lpe = mmio_base; + sst->addr.shim = mmio_base; +diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c +index 141dbbf975ac..54e1f957121d 100644 +--- a/sound/soc/intel/skylake/skl.c ++++ b/sound/soc/intel/skylake/skl.c +@@ -890,6 +890,7 @@ static int skl_create(struct pci_dev *pci, + snd_hdac_ext_bus_init(bus, &pci->dev, &bus_core_ops, ext_ops); + bus->use_posbuf = 1; + skl->pci = pci; ++ skl->dev = &pci->dev; + INIT_WORK(&skl->probe_work, skl_probe_work); + bus->bdl_pos_adj = 0; + +-- +2.17.1 + diff --git a/patches/0030-Check-x86_hyper-type-before-doing-hypercall.acrn b/patches/0030-Check-x86_hyper-type-before-doing-hypercall.acrn new file mode 100644 index 0000000000..26f237269b --- /dev/null +++ b/patches/0030-Check-x86_hyper-type-before-doing-hypercall.acrn @@ -0,0 +1,68 @@ +From 5db92a5cd891b541f0a6d45d2ec48803f0420e0e Mon Sep 17 00:00:00 2001 +From: Jason Chen CJ +Date: Fri, 31 Aug 2018 10:58:58 +0800 +Subject: [PATCH 030/150] Check x86_hyper type before doing hypercall + +this is to fix native boot failure issue with CONFIG_ACRN + +Change-Id: I735283cbf462c8b79d9742d64950685d6ae552c1 +Tracked-On: +Signed-off-by: Jason Chen CJ +--- + drivers/acrn/acrn_trace.c | 7 +++++++ + drivers/char/vhm/vhm_dev.c | 7 +++++++ + 2 files changed, 14 insertions(+) + +diff --git a/drivers/acrn/acrn_trace.c b/drivers/acrn/acrn_trace.c +index 31470a3de6ac..856ab650acfd 100644 +--- a/drivers/acrn/acrn_trace.c ++++ b/drivers/acrn/acrn_trace.c +@@ -63,6 +63,8 @@ + #include + #include + ++#include ++ + #include "sbuf.h" + + +@@ -211,6 +213,11 @@ static int __init acrn_trace_init(void) + int ret = 0; + int i, cpu; + ++ if (x86_hyper_type != X86_HYPER_ACRN) { ++ pr_err("acrn_trace: not support acrn hypervisor!\n"); ++ return -EINVAL; ++ } ++ + /* TBD: we could get the native cpu number by hypercall later */ + pr_info("%s, cpu_num %d\n", __func__, nr_cpus); + if (nr_cpus > MAX_NR_CPUS) { +diff --git a/drivers/char/vhm/vhm_dev.c b/drivers/char/vhm/vhm_dev.c +index ddafa040c15a..200fb0c0ebb8 100644 +--- a/drivers/char/vhm/vhm_dev.c ++++ b/drivers/char/vhm/vhm_dev.c +@@ -83,6 +83,8 @@ + #include + #include + ++#include ++ + #define DEVICE_NAME "acrn_vhm" + #define CLASS_NAME "vhm" + +@@ -495,6 +497,11 @@ static int __init vhm_init(void) + + pr_info("vhm: initializing\n"); + ++ if (x86_hyper_type != X86_HYPER_ACRN) { ++ pr_err("vhm: not support acrn hypervisor!\n"); ++ return -EINVAL; ++ } ++ + if (hcall_get_api_version(virt_to_phys(&api_version)) < 0) { + pr_err("vhm: failed to get api version from Hypervisor !\n"); + return -EINVAL; +-- +2.17.1 + diff --git a/patches/0030-drm-i915-uc-define-GuC-and-HuC-FWs-for-EHL.drm b/patches/0030-drm-i915-uc-define-GuC-and-HuC-FWs-for-EHL.drm new file mode 100644 index 0000000000..05d0a95bf3 --- /dev/null +++ b/patches/0030-drm-i915-uc-define-GuC-and-HuC-FWs-for-EHL.drm @@ -0,0 +1,46 @@ +From d6fade9fe2d223758e1b85b124127ec34d1d66c6 Mon Sep 17 00:00:00 2001 +From: Daniele Ceraolo Spurio +Date: Mon, 19 Aug 2019 18:23:27 -0700 +Subject: [PATCH 030/690] drm/i915/uc: define GuC and HuC FWs for EHL + +First uc firmware release for EHL. + +Signed-off-by: Daniele Ceraolo Spurio +Cc: Matt Roper +Cc: Anusha Srivatsa +Cc: Michal Wajdeczko +Reviewed-by: Stuart Summers +Tested-by: Matt Roper +Reviewed-by: Matt Roper +Link: https://patchwork.freedesktop.org/patch/msgid/20190820012327.36443-1-daniele.ceraolospurio@intel.com +--- + drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c | 13 +++++++------ + 1 file changed, 7 insertions(+), 6 deletions(-) + +diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +index bd22bf11adad..296a82603be0 100644 +--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c ++++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +@@ -39,12 +39,13 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw, + * Must be ordered based on platform + revid, from newer to older. + */ + #define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \ +- fw_def(ICELAKE, 0, guc_def(icl, 33, 0, 0), huc_def(icl, 8, 4, 3238)) \ +- fw_def(COFFEELAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 02, 00, 1810)) \ +- fw_def(GEMINILAKE, 0, guc_def(glk, 33, 0, 0), huc_def(glk, 03, 01, 2893)) \ +- fw_def(KABYLAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 02, 00, 1810)) \ +- fw_def(BROXTON, 0, guc_def(bxt, 33, 0, 0), huc_def(bxt, 01, 8, 2893)) \ +- fw_def(SKYLAKE, 0, guc_def(skl, 33, 0, 0), huc_def(skl, 01, 07, 1398)) ++ fw_def(ELKHARTLAKE, 0, guc_def(ehl, 33, 0, 4), huc_def(ehl, 9, 0, 0)) \ ++ fw_def(ICELAKE, 0, guc_def(icl, 33, 0, 0), huc_def(icl, 8, 4, 3238)) \ ++ fw_def(COFFEELAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 02, 00, 1810)) \ ++ fw_def(GEMINILAKE, 0, guc_def(glk, 33, 0, 0), huc_def(glk, 03, 01, 2893)) \ ++ fw_def(KABYLAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 02, 00, 1810)) \ ++ fw_def(BROXTON, 0, guc_def(bxt, 33, 0, 0), huc_def(bxt, 01, 8, 2893)) \ ++ fw_def(SKYLAKE, 0, guc_def(skl, 33, 0, 0), huc_def(skl, 01, 07, 1398)) + + #define __MAKE_UC_FW_PATH(prefix_, name_, separator_, major_, minor_, patch_) \ + "i915/" \ +-- +2.17.1 + diff --git a/patches/0030-net-phy-configure-xpcs-2.5G-speed-mode.connectivity b/patches/0030-net-phy-configure-xpcs-2.5G-speed-mode.connectivity new file mode 100644 index 0000000000..c42cd9daa2 --- /dev/null +++ b/patches/0030-net-phy-configure-xpcs-2.5G-speed-mode.connectivity @@ -0,0 +1,102 @@ +From 90425347269a38c40967a77e0efa711fb1c74ec8 Mon Sep 17 00:00:00 2001 +From: Voon Weifeng +Date: Sun, 4 Aug 2019 06:47:07 +0800 +Subject: [PATCH 030/108] net: phy: configure xpcs 2.5G speed mode + +Besides setting 2.5G configuration, this patch will also disable +automatic speed mode change. This is due to the 2.5G mode is +using the same functionality as 1G mode except the clock rate is +2.5 times the original rate. Hence, auto-negotiation is disable +to make sure it will only be in 2.5G mode. + +Signed-off-by: Voon Weifeng +--- + drivers/net/phy/dwxpcs.c | 27 +++++++++++++++++++++++++++ + include/linux/dwxpcs.h | 3 +++ + 2 files changed, 30 insertions(+) + +diff --git a/drivers/net/phy/dwxpcs.c b/drivers/net/phy/dwxpcs.c +index f0003cec6871..ebbefb7ba43d 100644 +--- a/drivers/net/phy/dwxpcs.c ++++ b/drivers/net/phy/dwxpcs.c +@@ -16,6 +16,7 @@ + #define XPCS_MDIO_MII_MMD MDIO_MMD_VEND2 + + /* MII MMD registers offsets */ ++#define MDIO_MII_MMD_CTRL 0x0000 /* SR Control */ + #define MDIO_MII_MMD_DIGITAL_CTRL_1 0x8000 /* Digital Control 1 */ + #define MDIO_MII_MMD_AN_CTRL 0x8001 /* AN Control */ + #define MDIO_MII_MMD_AN_STAT 0x8002 /* AN Status */ +@@ -32,6 +33,9 @@ + #define MDIO_MII_MMD_PSE_SYM 0x2 + #define MDIO_MII_MMD_PSE_BOTH 0x3 + ++/* Enable 2.5G Mode */ ++#define MDIO_MII_MMD_DIGI_CTRL_1_EN_2_5G_MODE BIT(2) ++ + /* Automatic Speed Mode Change for MAC side SGMII AN */ + #define MDIO_MII_MMD_DIGI_CTRL_1_MAC_AUTO_SW BIT(9) + +@@ -55,6 +59,11 @@ + #define AN_STAT_SGMII_AN_1000MBPS 0x2 /* 1000 Mbps */ + #define AN_STAT_SGMII_AN_LNKSTS BIT(4) /* Link Status */ + ++/* SR MII MMD Control defines */ ++#define AN_CL37_EN BIT(12) /* Enable Clause 37 auto-nego */ ++#define SGMII_SPEED_SS13 BIT(13) /* SGMII speed along with SS6 */ ++#define SGMII_SPEED_SS6 BIT(6) /* SGMII speed along with SS13 */ ++ + enum dwxpcs_state_t { + __DWXPCS_REMOVING, + __DWXPCS_TASK_SCHED, +@@ -95,8 +104,26 @@ static void dwxpcs_init(struct dwxpcs_priv *priv) + struct mii_bus *bus = priv->mdiodev->bus; + int xpcs_addr = priv->mdiodev->addr; + int pcs_mode = priv->pdata->mode; ++ bool speed_2500_en = priv->pdata->speed_2500_en; + int phydata; + ++ if (speed_2500_en) { ++ phydata = xpcs_read(XPCS_MDIO_MII_MMD, ++ MDIO_MII_MMD_DIGITAL_CTRL_1); ++ phydata |= MDIO_MII_MMD_DIGI_CTRL_1_EN_2_5G_MODE; ++ phydata &= ~MDIO_MII_MMD_DIGI_CTRL_1_MAC_AUTO_SW; ++ xpcs_write(XPCS_MDIO_MII_MMD, MDIO_MII_MMD_DIGITAL_CTRL_1, ++ phydata); ++ ++ phydata = xpcs_read(XPCS_MDIO_MII_MMD, MDIO_MII_MMD_CTRL); ++ phydata &= ~AN_CL37_EN; ++ phydata |= SGMII_SPEED_SS6; ++ phydata &= ~SGMII_SPEED_SS13; ++ xpcs_write(XPCS_MDIO_MII_MMD, MDIO_MII_MMD_CTRL, phydata); ++ ++ return; ++ } ++ + if (pcs_mode == DWXPCS_MODE_SGMII_AN) { + /* For AN for SGMII mode, the settings are :- + * 1) VR_MII_AN_CTRL Bit(2:1)[PCS_MODE] = 10b (SGMII AN) +diff --git a/include/linux/dwxpcs.h b/include/linux/dwxpcs.h +index 2082e800ee04..85c2efcecc5a 100644 +--- a/include/linux/dwxpcs.h ++++ b/include/linux/dwxpcs.h +@@ -2,6 +2,8 @@ + #ifndef __LINUX_DWXPCS_H + #define __LINUX_DWXPCS_H + ++#include ++ + enum dwxpcs_pcs_mode { + DWXPCS_MODE_SGMII_AN, + DWXPCS_MODE_1000BASEX_AN, +@@ -11,6 +13,7 @@ struct dwxpcs_platform_data { + int irq; + enum dwxpcs_pcs_mode mode; + int ext_phy_addr; ++ bool speed_2500_en; + }; + + #endif +-- +2.17.1 + diff --git a/patches/0030-rpmb-vritio-FW-fix-rpmb-freeze-restore-code.security b/patches/0030-rpmb-vritio-FW-fix-rpmb-freeze-restore-code.security new file mode 100644 index 0000000000..c6bd30ea23 --- /dev/null +++ b/patches/0030-rpmb-vritio-FW-fix-rpmb-freeze-restore-code.security @@ -0,0 +1,169 @@ +From 6ef0ee75ddb23c6c1a3aa3bb131a65e14e5e4395 Mon Sep 17 00:00:00 2001 +From: Tomas Winkler +Date: Tue, 17 Sep 2019 01:21:58 +0300 +Subject: [PATCH 30/65] rpmb: vritio FW: fix rpmb freeze restore code + +We should not rip off the whole memory upon pm freeze, +what is needed so to just release virtio queues. + +Change-Id: I24df3c760bb746e976e2bdc4fe3fed9bb5854979 +Signed-off-by: Tomas Winkler +--- + drivers/char/rpmb/virtio_rpmb.c | 92 +++++++++++++++++++++------------ + 1 file changed, 59 insertions(+), 33 deletions(-) + +diff --git a/drivers/char/rpmb/virtio_rpmb.c b/drivers/char/rpmb/virtio_rpmb.c +index dbfeeeccec0b..ec14165a092a 100644 +--- a/drivers/char/rpmb/virtio_rpmb.c ++++ b/drivers/char/rpmb/virtio_rpmb.c +@@ -57,8 +57,8 @@ static int rpmb_virtio_cmd_seq(struct device *dev, u8 target, + struct virtio_device *vdev = dev_to_virtio(dev); + struct virtio_rpmb_info *vi = vdev->priv; + unsigned int i; +- struct virtio_rpmb_ioc *vio_cmd; +- struct rpmb_ioc_seq_cmd *seq_cmd; ++ struct virtio_rpmb_ioc *vio_cmd = NULL; ++ struct rpmb_ioc_seq_cmd *seq_cmd = NULL; + size_t seq_cmd_sz; + struct scatterlist vio_ioc, vio_seq, frame[3]; + struct scatterlist *sgs[5]; +@@ -71,6 +71,10 @@ static int rpmb_virtio_cmd_seq(struct device *dev, u8 target, + return -EINVAL; + + mutex_lock(&vi->lock); ++ if (IS_ERR(vi->vq)) { ++ ret = PTR_ERR(vi->vq); ++ goto out; ++ } + + vio_cmd = kzalloc(sizeof(*vio_cmd), GFP_KERNEL); + seq_cmd_sz = sizeof(*seq_cmd) + sizeof(struct rpmb_ioc_cmd) * ncmds; +@@ -204,41 +208,39 @@ static int rpmb_virtio_dev_init(struct virtio_rpmb_info *vi) + return ret; + } + +-static int virtio_rpmb_init(struct virtio_device *vdev) ++static int virtio_rpmb_init_vq(struct virtio_device *vdev) + { +- int ret; + struct virtio_rpmb_info *vi; + +- vi = kzalloc(sizeof(*vi), GFP_KERNEL); ++ vi = vdev->priv; + if (!vi) +- return -ENOMEM; +- +- init_waitqueue_head(&vi->have_data); +- mutex_init(&vi->lock); +- vdev->priv = vi; ++ return -EINVAL; + +- /* We expect a single virtqueue. */ + vi->vq = virtio_find_single_vq(vdev, virtio_rpmb_recv_done, "request"); + if (IS_ERR(vi->vq)) { + dev_err(&vdev->dev, "get single vq failed!\n"); +- ret = PTR_ERR(vi->vq); +- goto err; ++ return PTR_ERR(vi->vq); + } ++ return 0; ++} + +- /* create vrpmb device. */ +- ret = rpmb_virtio_dev_init(vi); +- if (ret) { +- dev_err(&vdev->dev, "create vrpmb device failed.\n"); +- goto err; +- } ++static int virtio_rpmb_del_vq(struct virtio_device *vdev) ++{ ++ struct virtio_rpmb_info *vi; + +- dev_info(&vdev->dev, "init done!\n"); ++ vi = vdev->priv; ++ if (!vi) ++ return -EINVAL; + +- return 0; ++ if (vdev->config->reset) ++ vdev->config->reset(vdev); + +-err: +- kfree(vi); +- return ret; ++ if (vdev->config->del_vqs) ++ vdev->config->del_vqs(vdev); ++ ++ vi->vq = ERR_PTR(-EAGAIN); ++ ++ return 0; + } + + static void virtio_rpmb_remove(struct virtio_device *vdev) +@@ -254,30 +256,54 @@ static void virtio_rpmb_remove(struct virtio_device *vdev) + + rpmb_dev_unregister(vi->rdev); + +- if (vdev->config->reset) +- vdev->config->reset(vdev); +- +- if (vdev->config->del_vqs) +- vdev->config->del_vqs(vdev); ++ virtio_rpmb_del_vq(vdev); + ++ vdev->priv = NULL; + kfree(vi); + } + + static int virtio_rpmb_probe(struct virtio_device *vdev) + { +- return virtio_rpmb_init(vdev); ++ int ret; ++ struct virtio_rpmb_info *vi; ++ ++ vi = kzalloc(sizeof(*vi), GFP_KERNEL); ++ if (!vi) ++ return -ENOMEM; ++ ++ init_waitqueue_head(&vi->have_data); ++ mutex_init(&vi->lock); ++ vdev->priv = vi; ++ ++ ret = virtio_rpmb_init_vq(vdev); ++ if (ret) ++ goto err; ++ ++ /* create vrpmb device. */ ++ ret = rpmb_virtio_dev_init(vi); ++ if (ret) { ++ dev_err(&vdev->dev, "create vrpmb device failed.\n"); ++ goto err; ++ } ++ ++ dev_info(&vdev->dev, "init done!\n"); ++ ++ return 0; ++err: ++ vdev->priv = NULL; ++ kfree(vi); ++ return ret; + } + + #ifdef CONFIG_PM_SLEEP + static int virtio_rpmb_freeze(struct virtio_device *vdev) + { +- virtio_rpmb_remove(vdev); +- return 0; ++ return virtio_rpmb_del_vq(vdev); + } + + static int virtio_rpmb_restore(struct virtio_device *vdev) + { +- return virtio_rpmb_init(vdev); ++ return virtio_rpmb_init_vq(vdev); + } + #endif + +-- +2.17.1 + diff --git a/patches/0030-serial-8250_port-Remove-calls-to-runtime-PM.lpss b/patches/0030-serial-8250_port-Remove-calls-to-runtime-PM.lpss new file mode 100644 index 0000000000..ecef508694 --- /dev/null +++ b/patches/0030-serial-8250_port-Remove-calls-to-runtime-PM.lpss @@ -0,0 +1,257 @@ +From 0879b4e5aa39a5b5e77366e44f261c5f5b68cee7 Mon Sep 17 00:00:00 2001 +From: Andy Shevchenko +Date: Mon, 29 Jul 2019 17:30:00 +0300 +Subject: [PATCH 30/40] serial: 8250_port: Remove calls to runtime PM + +Since we are going to have runtime PM calls in serial_core.c the individual +drivers do not need them anymore. + +Remove runtime PM calls in 8250 driver. + +Note, the individual drivers, that are using runtime PM facilities, are +prepared for this previously. + +TODO: Prepare OMAP (8250_omap) driver. + +Signed-off-by: Andy Shevchenko +--- + drivers/tty/serial/8250/8250_port.c | 61 ++++++----------------------- + 1 file changed, 12 insertions(+), 49 deletions(-) + +diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c +index 3302b5d81c17..e65bb8a8cdc6 100644 +--- a/drivers/tty/serial/8250/8250_port.c ++++ b/drivers/tty/serial/8250/8250_port.c +@@ -683,7 +683,12 @@ void serial8250_rpm_get_tx(struct uart_8250_port *p) + rpm_active = xchg(&p->rpm_tx_active, 1); + if (rpm_active) + return; +- pm_runtime_get_sync(p->port.dev); ++ /* ++ * Device has to be powered on at this point. Here we just increase ++ * reference count to prevent autosuspend until the TX FIFO becomes ++ * empty. See also a comment in serial8250_tx_chars(). ++ */ ++ pm_runtime_get_noresume(p->port.dev); + } + EXPORT_SYMBOL_GPL(serial8250_rpm_get_tx); + +@@ -711,8 +716,6 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep) + { + unsigned char lcr = 0, efr = 0; + +- serial8250_rpm_get(p); +- + if (p->capabilities & UART_CAP_SLEEP) { + if (p->capabilities & UART_CAP_EFR) { + lcr = serial_in(p, UART_LCR); +@@ -728,8 +731,6 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep) + serial_out(p, UART_LCR, lcr); + } + } +- +- serial8250_rpm_put(p); + } + + #ifdef CONFIG_SERIAL_8250_RSA +@@ -1386,13 +1387,9 @@ static void serial8250_stop_rx(struct uart_port *port) + { + struct uart_8250_port *up = up_to_u8250p(port); + +- serial8250_rpm_get(up); +- + up->ier &= ~(UART_IER_RLSI | UART_IER_RDI); + up->port.read_status_mask &= ~UART_LSR_DR; + serial_port_out(port, UART_IER, up->ier); +- +- serial8250_rpm_put(up); + } + + static void __do_stop_tx_rs485(struct uart_8250_port *p) +@@ -1489,7 +1486,6 @@ static void serial8250_stop_tx(struct uart_port *port) + { + struct uart_8250_port *up = up_to_u8250p(port); + +- serial8250_rpm_get(up); + __stop_tx(up); + + /* +@@ -1499,7 +1495,6 @@ static void serial8250_stop_tx(struct uart_port *port) + up->acr |= UART_ACR_TXDIS; + serial_icr_write(up, UART_ACR, up->acr); + } +- serial8250_rpm_put(up); + } + + static inline void __start_tx(struct uart_port *port) +@@ -1631,9 +1626,7 @@ static void serial8250_enable_ms(struct uart_port *port) + + up->ier |= UART_IER_MSI; + +- serial8250_rpm_get(up); + serial_port_out(port, UART_IER, up->ier); +- serial8250_rpm_put(up); + } + + void serial8250_read_char(struct uart_8250_port *up, unsigned char lsr) +@@ -1885,15 +1878,11 @@ static unsigned int serial8250_tx_empty(struct uart_port *port) + unsigned long flags; + unsigned int lsr; + +- serial8250_rpm_get(up); +- + spin_lock_irqsave(&port->lock, flags); + lsr = serial_port_in(port, UART_LSR); + up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; + spin_unlock_irqrestore(&port->lock, flags); + +- serial8250_rpm_put(up); +- + return (lsr & BOTH_EMPTY) == BOTH_EMPTY ? TIOCSER_TEMT : 0; + } + +@@ -1903,9 +1892,7 @@ unsigned int serial8250_do_get_mctrl(struct uart_port *port) + unsigned int status; + unsigned int val; + +- serial8250_rpm_get(up); + status = serial8250_modem_status(up); +- serial8250_rpm_put(up); + + val = serial8250_MSR_to_TIOCM(status); + if (up->gpios) +@@ -1948,7 +1935,6 @@ static void serial8250_break_ctl(struct uart_port *port, int break_state) + struct uart_8250_port *up = up_to_u8250p(port); + unsigned long flags; + +- serial8250_rpm_get(up); + spin_lock_irqsave(&port->lock, flags); + if (break_state == -1) + up->lcr |= UART_LCR_SBC; +@@ -1956,7 +1942,6 @@ static void serial8250_break_ctl(struct uart_port *port, int break_state) + up->lcr &= ~UART_LCR_SBC; + serial_port_out(port, UART_LCR, up->lcr); + spin_unlock_irqrestore(&port->lock, flags); +- serial8250_rpm_put(up); + } + + /* +@@ -2001,33 +1986,21 @@ static void wait_for_xmitr(struct uart_8250_port *up, int bits) + + static int serial8250_get_poll_char(struct uart_port *port) + { +- struct uart_8250_port *up = up_to_u8250p(port); + unsigned char lsr; +- int status; +- +- serial8250_rpm_get(up); + + lsr = serial_port_in(port, UART_LSR); ++ if (!(lsr & UART_LSR_DR)) ++ return NO_POLL_CHAR; + +- if (!(lsr & UART_LSR_DR)) { +- status = NO_POLL_CHAR; +- goto out; +- } +- +- status = serial_port_in(port, UART_RX); +-out: +- serial8250_rpm_put(up); +- return status; ++ return serial_port_in(port, UART_RX); + } + +- + static void serial8250_put_poll_char(struct uart_port *port, + unsigned char c) + { + unsigned int ier; + struct uart_8250_port *up = up_to_u8250p(port); + +- serial8250_rpm_get(up); + /* + * First save the IER then disable the interrupts + */ +@@ -2049,7 +2022,6 @@ static void serial8250_put_poll_char(struct uart_port *port, + */ + wait_for_xmitr(up, BOTH_EMPTY); + serial_port_out(port, UART_IER, ier); +- serial8250_rpm_put(up); + } + + #endif /* CONFIG_CONSOLE_POLL */ +@@ -2072,7 +2044,6 @@ int serial8250_do_startup(struct uart_port *port) + if (port->iotype != up->cur_iotype) + set_io_from_upio(port); + +- serial8250_rpm_get(up); + if (port->type == PORT_16C950) { + /* Wake up and initialize UART */ + up->acr = 0; +@@ -2152,8 +2123,7 @@ int serial8250_do_startup(struct uart_port *port) + if (!(port->flags & UPF_BUGGY_UART) && + (serial_port_in(port, UART_LSR) == 0xff)) { + pr_info_ratelimited("%s: LSR safety check engaged!\n", port->name); +- retval = -ENODEV; +- goto out; ++ return -ENODEV; + } + + /* +@@ -2235,7 +2205,7 @@ int serial8250_do_startup(struct uart_port *port) + + retval = up->ops->setup_irq(up); + if (retval) +- goto out; ++ return retval; + + /* + * Now, initialize the UART +@@ -2334,10 +2304,7 @@ int serial8250_do_startup(struct uart_port *port) + outb_p(0x80, icp); + inb_p(icp); + } +- retval = 0; +-out: +- serial8250_rpm_put(up); +- return retval; ++ return 0; + } + EXPORT_SYMBOL_GPL(serial8250_do_startup); + +@@ -2353,7 +2320,6 @@ void serial8250_do_shutdown(struct uart_port *port) + struct uart_8250_port *up = up_to_u8250p(port); + unsigned long flags; + +- serial8250_rpm_get(up); + /* + * Disable interrupts from this port + */ +@@ -2397,7 +2363,6 @@ void serial8250_do_shutdown(struct uart_port *port) + * the IRQ chain. + */ + serial_port_in(port, UART_RX); +- serial8250_rpm_put(up); + + up->ops->release_irq(up); + } +@@ -2611,7 +2576,6 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios, + * Ok, we're now changing the port state. Do it with + * interrupts disabled. + */ +- serial8250_rpm_get(up); + spin_lock_irqsave(&port->lock, flags); + + up->lcr = cval; /* Save computed LCR */ +@@ -2700,7 +2664,6 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios, + serial8250_do_restore_context(port); + + spin_unlock_irqrestore(&port->lock, flags); +- serial8250_rpm_put(up); + + /* Don't rewrite B0 */ + if (tty_termios_baud_rate(termios)) +-- +2.17.1 + diff --git a/patches/0030-trusty-ipc-tipc_msg_hdr-structure-support-large-mes.trusty b/patches/0030-trusty-ipc-tipc_msg_hdr-structure-support-large-mes.trusty new file mode 100644 index 0000000000..0132bcb540 --- /dev/null +++ b/patches/0030-trusty-ipc-tipc_msg_hdr-structure-support-large-mes.trusty @@ -0,0 +1,37 @@ +From de79fb3a871b6e3165e2797486e013fc8b0ce443 Mon Sep 17 00:00:00 2001 +From: "Yan, Xiangyang" +Date: Tue, 21 Mar 2017 13:31:33 +0800 +Subject: [PATCH 30/63] trusty-ipc:tipc_msg_hdr structure: support large + message transfer + +len field type of tipc_msg_hdr structure is u16 which will only + handle message length of less than 64K. Change it to u32 to support +larger message. + +Change-Id: I9f08d699842723224a10242d19165fa748a8c8b4 +Tracked-On: OAM-43927 +Signed-off-by: Yan, Xiangyang +Reviewed-on: 575370 +--- + drivers/trusty/trusty-ipc.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/drivers/trusty/trusty-ipc.c b/drivers/trusty/trusty-ipc.c +index 363b0239310a..44843eb811bd 100644 +--- a/drivers/trusty/trusty-ipc.c ++++ b/drivers/trusty/trusty-ipc.c +@@ -71,9 +71,9 @@ struct tipc_dev_config { + struct tipc_msg_hdr { + u32 src; + u32 dst; +- u32 reserved; +- u16 len; ++ u32 len; + u16 flags; ++ u16 reserved; + u8 data[0]; + } __packed; + +-- +2.17.1 + diff --git a/patches/0031-ASoC-Intel-Skylake-Simplify-all-sst_dsp_init-declara.audio b/patches/0031-ASoC-Intel-Skylake-Simplify-all-sst_dsp_init-declara.audio new file mode 100644 index 0000000000..6acd6694b3 --- /dev/null +++ b/patches/0031-ASoC-Intel-Skylake-Simplify-all-sst_dsp_init-declara.audio @@ -0,0 +1,271 @@ +From 511ba512fa04437cbf2790fc83e7ce3688b613a1 Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Mon, 12 Aug 2019 20:12:31 +0200 +Subject: [PATCH 031/193] ASoC: Intel: Skylake: Simplify all sst_dsp_init + declarations + +SST initializers for Skylake, Apollolake and Cannonlake descendants have +many redundant parameters within their declaration. Simplify them. +Currently, this involves duplication the PCI ioremap code. This will be +addresses on a later note, but is needed to keep SSTs sane during +initialization overhaul. + +Signed-off-by: Cezary Rojewski +--- + sound/soc/intel/skylake/bxt-sst.c | 19 +++++++++---------- + sound/soc/intel/skylake/cnl-sst-dsp.h | 3 +-- + sound/soc/intel/skylake/cnl-sst.c | 19 +++++++++---------- + sound/soc/intel/skylake/skl-messages.c | 26 ++++---------------------- + sound/soc/intel/skylake/skl-sst-dsp.h | 6 ++---- + sound/soc/intel/skylake/skl-sst.c | 22 ++++++++++------------ + sound/soc/intel/skylake/skl.h | 5 ++--- + 7 files changed, 37 insertions(+), 63 deletions(-) + +diff --git a/sound/soc/intel/skylake/bxt-sst.c b/sound/soc/intel/skylake/bxt-sst.c +index 2c826b3d7c12..34635c2ff56c 100644 +--- a/sound/soc/intel/skylake/bxt-sst.c ++++ b/sound/soc/intel/skylake/bxt-sst.c +@@ -545,30 +545,29 @@ static struct sst_pdata skl_dev = { + .ops = &skl_ops, + }; + +-int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, +- const char *fw_name, struct skl_dev **dsp) ++int bxt_sst_dsp_init(struct skl_dev *skl, const char *fw_name) + { +- struct skl_dev *skl; + struct sst_dsp *sst; ++ void __iomem *mmio; + int ret; + +- skl = *dsp; + ret = skl_sst_ctx_init(skl, fw_name, &skl_dev); +- if (ret < 0) { +- dev_err(dev, "%s: no device\n", __func__); ++ if (ret) + return ret; +- } + + sst = skl->dsp; + sst->fw_ops = bxt_fw_ops; +- sst->addr.lpe = mmio_base; +- sst->addr.shim = mmio_base; ++ mmio = pci_ioremap_bar(skl->pci, 4); ++ if (!mmio) ++ return -ENXIO; ++ sst->addr.lpe = mmio; ++ sst->addr.shim = mmio; + + sst_dsp_mailbox_init(sst, + (BXT_ADSP_SRAM0_BASE + SKL_FW_REGS_SIZE), SKL_MAILBOX_SIZE, + BXT_ADSP_SRAM1_BASE, SKL_MAILBOX_SIZE); + +- ret = skl_ipc_init(dev, skl); ++ ret = skl_ipc_init(skl->dev, skl); + if (ret) { + skl_dsp_free(sst); + return ret; +diff --git a/sound/soc/intel/skylake/cnl-sst-dsp.h b/sound/soc/intel/skylake/cnl-sst-dsp.h +index a465cc42b7e8..02e070fae2ce 100644 +--- a/sound/soc/intel/skylake/cnl-sst-dsp.h ++++ b/sound/soc/intel/skylake/cnl-sst-dsp.h +@@ -87,7 +87,6 @@ void cnl_ipc_op_int_enable(struct sst_dsp *ctx); + void cnl_ipc_op_int_disable(struct sst_dsp *ctx); + bool cnl_ipc_int_status(struct sst_dsp *ctx); + +-int cnl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, +- const char *fw_name, struct skl_dev **dsp); ++int cnl_sst_dsp_init(struct skl_dev *skl, const char *fw_name); + + #endif /*__CNL_SST_DSP_H__*/ +diff --git a/sound/soc/intel/skylake/cnl-sst.c b/sound/soc/intel/skylake/cnl-sst.c +index 90cc1e5a63fc..ce966112137b 100644 +--- a/sound/soc/intel/skylake/cnl-sst.c ++++ b/sound/soc/intel/skylake/cnl-sst.c +@@ -421,30 +421,29 @@ static struct sst_pdata cnl_dev = { + .ops = &cnl_ops, + }; + +-int cnl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, +- const char *fw_name, struct skl_dev **dsp) ++int cnl_sst_dsp_init(struct skl_dev *cnl, const char *fw_name) + { +- struct skl_dev *cnl; + struct sst_dsp *sst; ++ void __iomem *mmio; + int ret; + +- cnl = *dsp; + ret = skl_sst_ctx_init(cnl, fw_name, &cnl_dev); +- if (ret < 0) { +- dev_err(dev, "%s: no device\n", __func__); ++ if (ret < 0) + return ret; +- } + + sst = cnl->dsp; + sst->fw_ops = cnl_fw_ops; +- sst->addr.lpe = mmio_base; +- sst->addr.shim = mmio_base; ++ mmio = pci_ioremap_bar(cnl->pci, 4); ++ if (!mmio) ++ return -ENXIO; ++ sst->addr.lpe = mmio; ++ sst->addr.shim = mmio; + + sst_dsp_mailbox_init(sst, + (CNL_ADSP_SRAM0_BASE + SKL_FW_REGS_SIZE), SKL_MAILBOX_SIZE, + CNL_ADSP_SRAM1_BASE, SKL_MAILBOX_SIZE); + +- ret = cnl_ipc_init(dev, cnl); ++ ret = cnl_ipc_init(cnl->dev, cnl); + if (ret) { + skl_dsp_free(sst); + return ret; +diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c +index c52d0d11767d..5f0fb124c0fb 100644 +--- a/sound/soc/intel/skylake/skl-messages.c ++++ b/sound/soc/intel/skylake/skl-messages.c +@@ -189,9 +189,7 @@ const struct skl_dsp_ops *skl_get_dsp_ops(int pci_id) + + int skl_init_dsp(struct skl_dev *skl) + { +- void __iomem *mmio_base; + struct hdac_bus *bus = skl_to_bus(skl); +- int irq = bus->irq; + const struct skl_dsp_ops *ops; + int ret; + +@@ -199,34 +197,18 @@ int skl_init_dsp(struct skl_dev *skl) + snd_hdac_ext_bus_ppcap_enable(bus, true); + snd_hdac_ext_bus_ppcap_int_enable(bus, true); + +- /* read the BAR of the ADSP MMIO */ +- mmio_base = pci_ioremap_bar(skl->pci, 4); +- if (mmio_base == NULL) { +- dev_err(bus->dev, "ioremap error\n"); +- return -ENXIO; +- } +- + ops = skl_get_dsp_ops(skl->pci->device); +- if (!ops) { +- ret = -EIO; +- goto unmap_mmio; +- } +- +- ret = ops->init(bus->dev, mmio_base, irq, +- skl->fw_name, &skl); ++ if (!ops) ++ return -EIO; + ++ ret = ops->init(skl, skl->fw_name); + if (ret < 0) +- goto unmap_mmio; ++ return ret; + + skl->dsp_ops = ops; + dev_dbg(bus->dev, "dsp registration status=%d\n", ret); + + return 0; +- +-unmap_mmio: +- iounmap(mmio_base); +- +- return ret; + } + + int skl_free_dsp(struct skl_dev *skl) +diff --git a/sound/soc/intel/skylake/skl-sst-dsp.h b/sound/soc/intel/skylake/skl-sst-dsp.h +index 68f62c06743b..cfb31b736274 100644 +--- a/sound/soc/intel/skylake/skl-sst-dsp.h ++++ b/sound/soc/intel/skylake/skl-sst-dsp.h +@@ -219,10 +219,8 @@ int skl_dsp_get_core(struct sst_dsp *ctx, unsigned int core_id); + int skl_dsp_put_core(struct sst_dsp *ctx, unsigned int core_id); + + int skl_dsp_boot(struct sst_dsp *ctx); +-int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, +- const char *fw_name, struct skl_dev **dsp); +-int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, +- const char *fw_name, struct skl_dev **dsp); ++int skl_sst_dsp_init(struct skl_dev *skl, const char *fw_name); ++int bxt_sst_dsp_init(struct skl_dev *skl, const char *fw_name); + int bxt_load_library(struct sst_dsp *ctx, struct skl_lib_info *linfo, + int lib_count); + +diff --git a/sound/soc/intel/skylake/skl-sst.c b/sound/soc/intel/skylake/skl-sst.c +index 4cf89730b064..8c78c07e28a8 100644 +--- a/sound/soc/intel/skylake/skl-sst.c ++++ b/sound/soc/intel/skylake/skl-sst.c +@@ -516,36 +516,34 @@ static struct sst_pdata skl_dev = { + .ops = &skl_ops, + }; + +-int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, +- const char *fw_name, struct skl_dev **dsp) ++int skl_sst_dsp_init(struct skl_dev *skl, const char *fw_name) + { +- struct skl_dev *skl; + struct sst_dsp *sst; ++ void __iomem *mmio; + int ret; + +- skl = *dsp; + ret = skl_sst_ctx_init(skl, fw_name, &skl_dev); +- if (ret < 0) { +- dev_err(dev, "%s: no device\n", __func__); ++ if (ret < 0) + return ret; +- } + + sst = skl->dsp; +- sst->addr.lpe = mmio_base; +- sst->addr.shim = mmio_base; ++ sst->fw_ops = skl_fw_ops; ++ mmio = pci_ioremap_bar(skl->pci, 4); ++ if (!mmio) ++ return -ENXIO; ++ sst->addr.lpe = mmio; ++ sst->addr.shim = mmio; + + sst_dsp_mailbox_init(sst, + (SKL_ADSP_SRAM0_BASE + SKL_FW_REGS_SIZE), SKL_MAILBOX_SIZE, + SKL_ADSP_SRAM1_BASE, SKL_MAILBOX_SIZE); + +- ret = skl_ipc_init(dev, skl); ++ ret = skl_ipc_init(skl->dev, skl); + if (ret) { + skl_dsp_free(sst); + return ret; + } + +- sst->fw_ops = skl_fw_ops; +- + return 0; + } + EXPORT_SYMBOL_GPL(skl_sst_dsp_init); +diff --git a/sound/soc/intel/skylake/skl.h b/sound/soc/intel/skylake/skl.h +index 45e13240a989..c7c908a4df1d 100644 +--- a/sound/soc/intel/skylake/skl.h ++++ b/sound/soc/intel/skylake/skl.h +@@ -12,6 +12,7 @@ + #ifndef __SOUND_SOC_SKL_H + #define __SOUND_SOC_SKL_H + ++#include + #include + #include + #include +@@ -154,9 +155,7 @@ struct skl_machine_pdata { + + struct skl_dsp_ops { + int id; +- int (*init)(struct device *dev, void __iomem *mmio_base, +- int irq, const char *fw_name, +- struct skl_dev **skl_sst); ++ int (*init)(struct skl_dev *skl, const char *fw_name); + }; + + int skl_platform_unregister(struct device *dev); +-- +2.17.1 + diff --git a/patches/0031-VHM-replace-function-name-update_mmio_map-with-update.acrn b/patches/0031-VHM-replace-function-name-update_mmio_map-with-update.acrn new file mode 100644 index 0000000000..71c473defd --- /dev/null +++ b/patches/0031-VHM-replace-function-name-update_mmio_map-with-update.acrn @@ -0,0 +1,48 @@ +From 86e086f68532ed03fd78b7a219bd560ceda989da Mon Sep 17 00:00:00 2001 +From: Jason Chen CJ +Date: Fri, 31 Aug 2018 10:58:58 +0800 +Subject: [PATCH 031/150] VHM: replace function name update_mmio_map with + update_memmap_attr + +Change-Id: Ia4e4c621d4a8bc6738042cede93b9b145af291f9 +Tracked-On: 212688 +Signed-off-by: Jason Chen CJ +--- + drivers/vhm/vhm_mm.c | 4 ++-- + include/linux/vhm/acrn_vhm_mm.h | 2 +- + 2 files changed, 3 insertions(+), 3 deletions(-) + +diff --git a/drivers/vhm/vhm_mm.c b/drivers/vhm/vhm_mm.c +index b475aa91a348..712a905040a4 100644 +--- a/drivers/vhm/vhm_mm.c ++++ b/drivers/vhm/vhm_mm.c +@@ -191,11 +191,11 @@ int unset_mmio_map(unsigned long vmid, unsigned long guest_gpa, + prot, MAP_UNMAP); + } + +-int update_mmio_map(unsigned long vmid, unsigned long guest_gpa, ++int update_memmap_attr(unsigned long vmid, unsigned long guest_gpa, + unsigned long host_gpa, unsigned long len, unsigned int prot) + { + return _mem_set_memmap(vmid, guest_gpa, host_gpa, len, +- prot, MAP_MMIO); ++ prot, MAP_MEM); + } + + int map_guest_memseg(struct vhm_vm *vm, struct vm_memmap *memmap) +diff --git a/include/linux/vhm/acrn_vhm_mm.h b/include/linux/vhm/acrn_vhm_mm.h +index f0401ac6a942..2ff1e25b22ce 100644 +--- a/include/linux/vhm/acrn_vhm_mm.h ++++ b/include/linux/vhm/acrn_vhm_mm.h +@@ -69,7 +69,7 @@ int set_mmio_map(unsigned long vmid, unsigned long guest_gpa, + unsigned long host_gpa, unsigned long len, unsigned int prot); + int unset_mmio_map(unsigned long vmid, unsigned long guest_gpa, + unsigned long host_gpa, unsigned long len, unsigned int prot); +-int update_mmio_map(unsigned long vmid, unsigned long guest_gpa, ++int update_memmap_attr(unsigned long vmid, unsigned long guest_gpa, + unsigned long host_gpa, unsigned long len, unsigned int prot); + + int vhm_dev_mmap(struct file *file, struct vm_area_struct *vma); +-- +2.17.1 + diff --git a/patches/0031-drm-i915-Keep-drm_i915_file_private-around-under-RCU.drm b/patches/0031-drm-i915-Keep-drm_i915_file_private-around-under-RCU.drm new file mode 100644 index 0000000000..87c2b99387 --- /dev/null +++ b/patches/0031-drm-i915-Keep-drm_i915_file_private-around-under-RCU.drm @@ -0,0 +1,125 @@ +From c685ede8177f5992e2743b7dcb275ccd9ed46085 Mon Sep 17 00:00:00 2001 +From: Chris Wilson +Date: Fri, 23 Aug 2019 19:14:55 +0100 +Subject: [PATCH 031/690] drm/i915: Keep drm_i915_file_private around under RCU + +Ensure that the drm_i915_file_private continues to exist as we attempt +to remove a request from its list, which may race with the destruction +of the file. + +<6> [38.380714] [IGT] gem_ctx_create: starting subtest basic-files +<0> [42.201329] BUG: spinlock bad magic on CPU#0, kworker/u16:0/7 +<4> [42.201356] general protection fault: 0000 [#1] PREEMPT SMP PTI +<4> [42.201371] CPU: 0 PID: 7 Comm: kworker/u16:0 Tainted: G U 5.3.0-rc5-CI-Patchwork_14169+ #1 +<4> [42.201391] Hardware name: Dell Inc. OptiPlex 745 /0GW726, BIOS 2.3.1 05/21/2007 +<4> [42.201594] Workqueue: i915 retire_work_handler [i915] +<4> [42.201614] RIP: 0010:spin_dump+0x5a/0x90 +<4> [42.201625] Code: 00 48 8d 88 c0 06 00 00 48 c7 c7 00 71 09 82 e8 35 ef 00 00 48 85 db 44 8b 4d 08 41 b8 ff ff ff ff 48 c7 c1 0b cd 0f 82 74 0e <44> 8b 83 e0 04 00 00 48 8d 8b c0 06 00 00 8b 55 04 48 89 ee 48 c7 +<4> [42.201660] RSP: 0018:ffffc9000004bd80 EFLAGS: 00010202 +<4> [42.201673] RAX: 0000000000000031 RBX: 6b6b6b6b6b6b6b6b RCX: ffffffff820fcd0b +<4> [42.201688] RDX: 0000000000000000 RSI: ffff88803de266f8 RDI: 00000000ffffffff +<4> [42.201703] RBP: ffff888038381ff8 R08: 00000000ffffffff R09: 000000006b6b6b6b +<4> [42.201718] R10: 0000000041cb0b89 R11: 646162206b636f6c R12: ffff88802a618500 +<4> [42.201733] R13: ffff88802b32c288 R14: ffff888038381ff8 R15: ffff88802b32c250 +<4> [42.201748] FS: 0000000000000000(0000) GS:ffff88803de00000(0000) knlGS:0000000000000000 +<4> [42.201765] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 +<4> [42.201778] CR2: 00007f2cefc6d180 CR3: 00000000381ee000 CR4: 00000000000006f0 +<4> [42.201793] Call Trace: +<4> [42.201805] do_raw_spin_lock+0x66/0xb0 +<4> [42.201898] i915_request_retire+0x548/0x7c0 [i915] +<4> [42.201989] retire_requests+0x4d/0x60 [i915] +<4> [42.202078] i915_retire_requests+0x144/0x2e0 [i915] +<4> [42.202169] retire_work_handler+0x10/0x40 [i915] + +Recently, in commit 44c22f3f1a0a ("drm/i915: Serialize insertion into the +file->mm.request_list"), we fixed a race on insertion. Now, it appears +we also have a race with destruction! + +Signed-off-by: Chris Wilson +Cc: Matthew Auld +Reviewed-by: Matthew Auld +Link: https://patchwork.freedesktop.org/patch/msgid/20190823181455.31910-1-chris@chris-wilson.co.uk +--- + drivers/gpu/drm/i915/gem/i915_gem_throttle.c | 4 +--- + drivers/gpu/drm/i915/i915_drv.c | 2 +- + drivers/gpu/drm/i915/i915_drv.h | 6 +++++- + drivers/gpu/drm/i915/i915_request.c | 13 +++++++------ + 4 files changed, 14 insertions(+), 11 deletions(-) + +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c +index 1e372420771b..540ef0551789 100644 +--- a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c ++++ b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c +@@ -50,10 +50,8 @@ i915_gem_throttle_ioctl(struct drm_device *dev, void *data, + if (time_after_eq(request->emitted_jiffies, recent_enough)) + break; + +- if (target) { ++ if (target && xchg(&target->file_priv, NULL)) + list_del(&target->client_link); +- target->file_priv = NULL; +- } + + target = request; + } +diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c +index e080151c0696..3acc378a80b2 100644 +--- a/drivers/gpu/drm/i915/i915_drv.c ++++ b/drivers/gpu/drm/i915/i915_drv.c +@@ -1699,7 +1699,7 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) + i915_gem_release(dev, file); + mutex_unlock(&dev->struct_mutex); + +- kfree(file_priv); ++ kfree_rcu(file_priv, rcu); + + /* Catch up with all the deferred frees from "this" client */ + i915_gem_flush_free_objects(to_i915(dev)); +diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h +index 82b919e51896..b42651a387d9 100644 +--- a/drivers/gpu/drm/i915/i915_drv.h ++++ b/drivers/gpu/drm/i915/i915_drv.h +@@ -185,7 +185,11 @@ struct i915_mmu_object; + + struct drm_i915_file_private { + struct drm_i915_private *dev_priv; +- struct drm_file *file; ++ ++ union { ++ struct drm_file *file; ++ struct rcu_head rcu; ++ }; + + struct { + spinlock_t lock; +diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c +index 1c5506822dc7..eef70dc68934 100644 +--- a/drivers/gpu/drm/i915/i915_request.c ++++ b/drivers/gpu/drm/i915/i915_request.c +@@ -169,16 +169,17 @@ remove_from_client(struct i915_request *request) + { + struct drm_i915_file_private *file_priv; + +- file_priv = READ_ONCE(request->file_priv); +- if (!file_priv) ++ if (!READ_ONCE(request->file_priv)) + return; + +- spin_lock(&file_priv->mm.lock); +- if (request->file_priv) { ++ rcu_read_lock(); ++ file_priv = xchg(&request->file_priv, NULL); ++ if (file_priv) { ++ spin_lock(&file_priv->mm.lock); + list_del(&request->client_link); +- request->file_priv = NULL; ++ spin_unlock(&file_priv->mm.lock); + } +- spin_unlock(&file_priv->mm.lock); ++ rcu_read_unlock(); + } + + static void free_capture_list(struct i915_request *request) +-- +2.17.1 + diff --git a/patches/0031-mei-buf-drop-running-hook-debug-messages.security b/patches/0031-mei-buf-drop-running-hook-debug-messages.security new file mode 100644 index 0000000000..0cdd47321c --- /dev/null +++ b/patches/0031-mei-buf-drop-running-hook-debug-messages.security @@ -0,0 +1,67 @@ +From dbf00c96b8c2f61c07cbd1b3f14a7038ce9037f4 Mon Sep 17 00:00:00 2001 +From: Tomas Winkler +Date: Fri, 4 Oct 2019 21:26:59 +0300 +Subject: [PATCH 31/65] mei: buf: drop 'running hook' debug messages. + +Drop 'running hook' debug messages, as this info +can be already retrieved via ftrace. + +Signed-off-by: Tomas Winkler +Link: https://lore.kernel.org/r/20191004182659.2933-1-tomas.winkler@intel.com +Signed-off-by: Greg Kroah-Hartman +(cherry picked from commit 780ee709bdb49c0d3562890855d7ff7919e64075) +--- + drivers/misc/mei/bus-fixup.c | 9 --------- + 1 file changed, 9 deletions(-) + +diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c +index 0a2b99e1af45..9ad9c01ddf41 100644 +--- a/drivers/misc/mei/bus-fixup.c ++++ b/drivers/misc/mei/bus-fixup.c +@@ -46,8 +46,6 @@ static const uuid_le mei_nfc_info_guid = MEI_UUID_NFC_INFO; + */ + static void number_of_connections(struct mei_cl_device *cldev) + { +- dev_dbg(&cldev->dev, "running hook %s\n", __func__); +- + if (cldev->me_cl->props.max_number_of_connections > 1) + cldev->do_match = 0; + } +@@ -59,8 +57,6 @@ static void number_of_connections(struct mei_cl_device *cldev) + */ + static void blacklist(struct mei_cl_device *cldev) + { +- dev_dbg(&cldev->dev, "running hook %s\n", __func__); +- + cldev->do_match = 0; + } + +@@ -71,8 +67,6 @@ static void blacklist(struct mei_cl_device *cldev) + */ + static void whitelist(struct mei_cl_device *cldev) + { +- dev_dbg(&cldev->dev, "running hook %s\n", __func__); +- + cldev->do_match = 1; + } + +@@ -256,7 +250,6 @@ static void mei_wd(struct mei_cl_device *cldev) + { + struct pci_dev *pdev = to_pci_dev(cldev->dev.parent); + +- dev_dbg(&cldev->dev, "running hook %s\n", __func__); + if (pdev->device == MEI_DEV_ID_WPT_LP || + pdev->device == MEI_DEV_ID_SPT || + pdev->device == MEI_DEV_ID_SPT_H) +@@ -410,8 +403,6 @@ static void mei_nfc(struct mei_cl_device *cldev) + + bus = cldev->bus; + +- dev_dbg(&cldev->dev, "running hook %s\n", __func__); +- + mutex_lock(&bus->device_lock); + /* we need to connect to INFO GUID */ + cl = mei_cl_alloc_linked(bus); +-- +2.17.1 + diff --git a/patches/0031-net-stmmac-enable-2.5Gbps-link-speed.connectivity b/patches/0031-net-stmmac-enable-2.5Gbps-link-speed.connectivity new file mode 100644 index 0000000000..097d3e2a53 --- /dev/null +++ b/patches/0031-net-stmmac-enable-2.5Gbps-link-speed.connectivity @@ -0,0 +1,240 @@ +From f8ae126dce694772b1a82ad71b2d76d8d8dfbee0 Mon Sep 17 00:00:00 2001 +From: Voon Weifeng +Date: Sun, 4 Aug 2019 07:05:17 +0800 +Subject: [PATCH 031/108] net: stmmac: enable 2.5Gbps link speed + +The MAC support 2.5G mode when the PCS is in 1000BASE-T mode. The +2.5G mode of operation is functionally same as 1000BASE-T mode, +except that the clock rate is 2.5 times the original rate. +In this mode, the serdes/PHY operates at a serial baud rate of +3.125 Gbps and the PCS data path and GMII interface of the MAC +operate at 312.5 MH instead of 125 MHz. + +The MAC running in 10/100/1G mode or 2.5G mode is depends on +the link speed mode in the serdes. + +Signed-off-by: Voon Weifeng +--- + .../net/ethernet/stmicro/stmmac/dwmac4_core.c | 1 + + drivers/net/ethernet/stmicro/stmmac/hwif.h | 3 ++ + .../ethernet/stmicro/stmmac/intel_serdes.c | 38 +++++++++++++++++++ + .../ethernet/stmicro/stmmac/intel_serdes.h | 13 +++++++ + .../net/ethernet/stmicro/stmmac/stmmac_main.c | 25 +++++++++++- + .../net/ethernet/stmicro/stmmac/stmmac_pci.c | 3 +- + include/linux/stmmac.h | 3 +- + 7 files changed, 83 insertions(+), 3 deletions(-) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +index da4e6595cc65..8af95b024dcc 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +@@ -1228,6 +1228,7 @@ int dwmac4_setup(struct stmmac_priv *priv) + mac->link.speed10 = GMAC_CONFIG_PS; + mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS; + mac->link.speed1000 = 0; ++ mac->link.speed2500 = GMAC_CONFIG_FES; + mac->link.speed_mask = GMAC_CONFIG_FES | GMAC_CONFIG_PS; + mac->mii.addr = GMAC_MDIO_ADDR; + mac->mii.data = GMAC_MDIO_DATA; +diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h +index 73829f89cc49..4a3c25f45de1 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h ++++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h +@@ -492,12 +492,15 @@ struct stmmac_ops { + struct stmmac_serdes_ops { + int (*serdes_powerup)(struct net_device *ndev); + int (*serdes_powerdown)(struct net_device *ndev); ++ int (*speed_mode_2500)(struct net_device *ndev); + }; + + #define stmmac_serdes_powerup(__priv, __args...) \ + stmmac_do_callback(__priv, serdes, serdes_powerup, __args) + #define stmmac_serdes_powerdown(__priv, __args...) \ + stmmac_do_callback(__priv, serdes, serdes_powerdown, __args) ++#define stmmac_speed_mode_2500(__priv, __args...) \ ++ stmmac_do_callback(__priv, serdes, speed_mode_2500, __args) + + /* PTP and HW Timer helpers */ + struct stmmac_hwtimestamp { +diff --git a/drivers/net/ethernet/stmicro/stmmac/intel_serdes.c b/drivers/net/ethernet/stmicro/stmmac/intel_serdes.c +index f3c9b9892229..794503521789 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/intel_serdes.c ++++ b/drivers/net/ethernet/stmicro/stmmac/intel_serdes.c +@@ -36,6 +36,22 @@ static int intel_serdes_powerup(struct net_device *ndev) + + serdes_phy_addr = priv->plat->intel_adhoc_addr; + ++ /* Set the serdes rate and the PCLK rate */ ++ data = mdiobus_read(priv->mii, serdes_phy_addr, ++ SERDES_GCR0); ++ ++ data &= ~SERDES_RATE_MASK; ++ data &= ~SERDES_PCLK_MASK; ++ ++ if (priv->plat->speed_2500_en) ++ data |= SERDES_RATE_PCIE_GEN2 << SERDES_RATE_PCIE_SHIFT | ++ SERDES_PCLK_37p5MHZ << SERDES_PCLK_SHIFT; ++ else ++ data |= SERDES_RATE_PCIE_GEN1 << SERDES_RATE_PCIE_SHIFT | ++ SERDES_PCLK_70MHZ << SERDES_PCLK_SHIFT; ++ ++ mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); ++ + /* assert clk_req */ + data = mdiobus_read(priv->mii, serdes_phy_addr, + SERDES_GCR0); +@@ -175,7 +191,29 @@ static int intel_serdes_powerdown(struct net_device *ndev) + return 0; + } + ++static int intel_speed_mode_2500(struct net_device *ndev) ++{ ++ struct stmmac_priv *priv = netdev_priv(ndev); ++ int serdes_phy_addr = 0; ++ u32 data = 0; ++ ++ serdes_phy_addr = priv->plat->intel_adhoc_addr; ++ ++ /* Determine the link speed mode: 2.5Gbps/1Gbps */ ++ data = mdiobus_read(priv->mii, serdes_phy_addr, ++ SERDES_GCR); ++ ++ if (((data & SERDES_LINK_MODE_MASK) >> SERDES_LINK_MODE_SHIFT) == ++ SERDES_LINK_MODE_2G5) { ++ dev_info(priv->device, "Link Speed Mode: 2.5Gbps\n"); ++ return 1; ++ } else { ++ return 0; ++ } ++} ++ + const struct stmmac_serdes_ops intel_serdes_ops = { + .serdes_powerup = intel_serdes_powerup, + .serdes_powerdown = intel_serdes_powerdown, ++ .speed_mode_2500 = intel_speed_mode_2500, + }; +diff --git a/drivers/net/ethernet/stmicro/stmmac/intel_serdes.h b/drivers/net/ethernet/stmicro/stmmac/intel_serdes.h +index 22b0b71b657b..caecc4b2f8da 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/intel_serdes.h ++++ b/drivers/net/ethernet/stmicro/stmmac/intel_serdes.h +@@ -9,6 +9,7 @@ + #define POLL_DELAY_US 8 + + /* SERDES Register */ ++#define SERDES_GCR 0x0 /* Global Conguration */ + #define SERDES_GSR0 0x5 /* Global Status Reg0 */ + #define SERDES_GCR0 0xb /* Global Configuration Reg0 */ + +@@ -16,8 +17,20 @@ + #define SERDES_PLL_CLK BIT(0) /* PLL clk valid signal */ + #define SERDES_RST BIT(2) /* Serdes Reset */ + #define SERDES_PWR_ST_MASK GENMASK(6, 4) /* Serdes Power state*/ ++#define SERDES_RATE_MASK GENMASK(9, 8) ++#define SERDES_PCLK_MASK GENMASK(14, 12) /* PCLK rate to PHY */ ++#define SERDES_LINK_MODE_MASK GENMASK(2, 1) ++#define SERDES_LINK_MODE_SHIFT 1 + #define SERDES_PWR_ST_SHIFT 4 + #define SERDES_PWR_ST_P0 0x0 + #define SERDES_PWR_ST_P3 0x3 ++#define SERDES_LINK_MODE_2G5 0x3 ++#define SERSED_LINK_MODE_1G 0x2 ++#define SERDES_PCLK_37p5MHZ 0x0 ++#define SERDES_PCLK_70MHZ 0x1 ++#define SERDES_RATE_PCIE_GEN1 0x0 ++#define SERDES_RATE_PCIE_GEN2 0x1 ++#define SERDES_RATE_PCIE_SHIFT 8 ++#define SERDES_PCLK_SHIFT 12 + + #endif /* __INTEL_SERDES_H__ */ +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index 9b654ec6a2a5..1443d4b6e878 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -833,6 +833,25 @@ static void stmmac_validate(struct phylink_config *config, + phylink_set(mac_supported, Asym_Pause); + phylink_set_port_modes(mac_supported); + ++ if (priv->plat->has_gmac || ++ priv->plat->has_gmac4 || ++ priv->plat->has_xgmac) { ++ phylink_set(mac_supported, 1000baseT_Half); ++ phylink_set(mac_supported, 1000baseT_Full); ++ phylink_set(mac_supported, 1000baseKX_Full); ++ } ++ ++ /* 2.5G mode only support 2500baseT full duplex only */ ++ if (priv->plat->has_gmac4 && priv->plat->speed_2500_en) { ++ phylink_set(mac_supported, 2500baseT_Full); ++ phylink_set(mask, 10baseT_Half); ++ phylink_set(mask, 10baseT_Full); ++ phylink_set(mask, 100baseT_Half); ++ phylink_set(mask, 100baseT_Full); ++ phylink_set(mask, 1000baseT_Half); ++ phylink_set(mask, 1000baseT_Full); ++ } ++ + /* Cut down 1G if asked to */ + if ((max_speed > 0) && (max_speed < 1000)) { + phylink_set(mask, 1000baseT_Full); +@@ -2959,7 +2978,8 @@ static int stmmac_open(struct net_device *dev) + /* Start phy converter after MDIO bus IRQ handling is up */ + if (priv->plat->setup_phy_conv) { + ret = priv->plat->setup_phy_conv(priv->mii, priv->phy_conv_irq, +- priv->plat->phy_addr); ++ priv->plat->phy_addr, ++ priv->plat->speed_2500_en); + + if (ret < 0) { + netdev_err(priv->dev, +@@ -5024,6 +5044,9 @@ int stmmac_dvr_probe(struct device *device, + } + } + ++ if (priv->plat->has_serdes) ++ priv->plat->speed_2500_en = stmmac_speed_mode_2500(priv, ndev); ++ + ret = stmmac_phy_setup(priv); + if (ret) { + netdev_err(ndev, "failed to setup phy (%d)\n", ret); +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +index b7cc71ca53c4..64a6861c7176 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +@@ -122,12 +122,13 @@ static struct mdio_board_info intel_mgbe_bdinfo = { + }; + + static int setup_intel_mgbe_phy_conv(struct mii_bus *bus, int irq, +- int phy_addr) ++ int phy_addr, bool speed_2500_en) + { + struct dwxpcs_platform_data *pdata = &intel_mgbe_pdata; + + pdata->irq = irq; + pdata->ext_phy_addr = phy_addr; ++ pdata->speed_2500_en = speed_2500_en; + + return mdiobus_create_device(bus, &intel_mgbe_bdinfo); + } +diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h +index dd9676cdbdde..76b63aaa3c58 100644 +--- a/include/linux/stmmac.h ++++ b/include/linux/stmmac.h +@@ -167,7 +167,7 @@ struct plat_stmmacenet_data { + void (*exit)(struct platform_device *pdev, void *priv); + struct mac_device_info *(*setup)(void *priv); + int (*setup_phy_conv)(struct mii_bus *bus, int irq, +- int phy_addr); ++ int phy_addr, bool speed_2500_en); + int (*remove_phy_conv)(struct mii_bus *bus); + void *bsp_priv; + struct clk *stmmac_clk; +@@ -197,5 +197,6 @@ struct plat_stmmacenet_data { + int msi_tx_base_vec; + bool vlan_fail_q_en; + u8 vlan_fail_q; ++ bool speed_2500_en; + }; + #endif +-- +2.17.1 + diff --git a/patches/0031-serial-core-add-support-of-runtime-PM.lpss b/patches/0031-serial-core-add-support-of-runtime-PM.lpss new file mode 100644 index 0000000000..db146e45f2 --- /dev/null +++ b/patches/0031-serial-core-add-support-of-runtime-PM.lpss @@ -0,0 +1,444 @@ +From 3194fa6e1ab9945a712586f8c9394360bc1b78c2 Mon Sep 17 00:00:00 2001 +From: Andy Shevchenko +Date: Wed, 16 Nov 2016 19:38:22 +0200 +Subject: [PATCH 31/40] serial: core: add support of runtime PM + +8250 driver has wrong implementation of runtime PM, e.g. it uses an irq_safe +flag. This patch adds runtime PM calls to serial core once for all UART +drivers. + +The drivers without runtime PM support will not change behaviour. The rest has +been prepared already. Thus, we are on the safe side to make a change right +now. + +Signed-off-by: Andy Shevchenko +--- + drivers/tty/serial/serial_core.c | 154 +++++++++++++++++++++++++++---- + 1 file changed, 134 insertions(+), 20 deletions(-) + +diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c +index c4a414a46c7f..119488d7f2d4 100644 +--- a/drivers/tty/serial/serial_core.c ++++ b/drivers/tty/serial/serial_core.c +@@ -15,6 +15,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -53,15 +54,32 @@ static int uart_dcd_enabled(struct uart_port *uport) + return !!(uport->status & UPSTAT_DCD_ENABLE); + } + +-static inline struct uart_port *uart_port_ref(struct uart_state *state) ++static inline struct uart_port *uart_port_ref_no_rpm(struct uart_state *state) + { + if (atomic_add_unless(&state->refcount, 1, 0)) + return state->uart_port; + return NULL; + } + ++static inline void uart_port_deref_no_rpm(struct uart_port *uport) ++{ ++ if (atomic_dec_and_test(&uport->state->refcount)) ++ wake_up(&uport->state->remove_wait); ++} ++ ++static inline struct uart_port *uart_port_ref(struct uart_state *state) ++{ ++ if (atomic_add_unless(&state->refcount, 1, 0)) { ++ pm_runtime_get_sync(state->uart_port->dev); ++ return state->uart_port; ++ } ++ return NULL; ++} ++ + static inline void uart_port_deref(struct uart_port *uport) + { ++ pm_runtime_mark_last_busy(uport->dev); ++ pm_runtime_put_autosuspend(uport->dev); + if (atomic_dec_and_test(&uport->state->refcount)) + wake_up(&uport->state->remove_wait); + } +@@ -142,12 +160,15 @@ uart_update_mctrl(struct uart_port *port, unsigned int set, unsigned int clear) + unsigned long flags; + unsigned int old; + ++ pm_runtime_get_sync(port->dev); + spin_lock_irqsave(&port->lock, flags); + old = port->mctrl; + port->mctrl = (old & ~clear) | set; + if (old != port->mctrl) + port->ops->set_mctrl(port, port->mctrl); + spin_unlock_irqrestore(&port->lock, flags); ++ pm_runtime_mark_last_busy(port->dev); ++ pm_runtime_put_autosuspend(port->dev); + } + + #define uart_set_mctrl(port, set) uart_update_mctrl(port, set, 0) +@@ -216,7 +237,11 @@ static int uart_port_startup(struct tty_struct *tty, struct uart_state *state, + free_page(page); + } + ++ pm_runtime_get_sync(uport->dev); + retval = uport->ops->startup(uport); ++ pm_runtime_mark_last_busy(uport->dev); ++ pm_runtime_put_autosuspend(uport->dev); ++ + if (retval == 0) { + if (uart_console(uport) && uport->cons->cflag) { + tty->termios.c_cflag = uport->cons->cflag; +@@ -512,6 +537,8 @@ static void uart_change_speed(struct tty_struct *tty, struct uart_state *state, + return; + + termios = &tty->termios; ++ ++ pm_runtime_get_sync(uport->dev); + uport->ops->set_termios(uport, termios, old_termios); + + /* +@@ -540,6 +567,8 @@ static void uart_change_speed(struct tty_struct *tty, struct uart_state *state, + __uart_start(tty); + } + spin_unlock_irq(&uport->lock); ++ pm_runtime_mark_last_busy(uport->dev); ++ pm_runtime_put_autosuspend(uport->dev); + } + + static int uart_put_char(struct tty_struct *tty, unsigned char c) +@@ -550,19 +579,19 @@ static int uart_put_char(struct tty_struct *tty, unsigned char c) + unsigned long flags; + int ret = 0; + +- circ = &state->xmit; +- port = uart_port_lock(state, flags); +- if (!circ->buf) { +- uart_port_unlock(port, flags); ++ port = uart_port_ref_no_rpm(state); ++ if (!port) + return 0; +- } + +- if (port && uart_circ_chars_free(circ) != 0) { ++ spin_lock_irqsave(&port->lock, flags); ++ circ = &state->xmit; ++ if (circ->buf && uart_circ_chars_free(circ) != 0) { + circ->buf[circ->head] = c; + circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1); + ret = 1; + } +- uart_port_unlock(port, flags); ++ spin_unlock_irqrestore(&port->lock, flags); ++ uart_port_deref_no_rpm(port); + return ret; + } + +@@ -589,10 +618,15 @@ static int uart_write(struct tty_struct *tty, + return -EL3HLT; + } + +- port = uart_port_lock(state, flags); ++ port = uart_port_ref_no_rpm(state); ++ if (!port) ++ return 0; ++ ++ spin_lock_irqsave(&port->lock, flags); + circ = &state->xmit; + if (!circ->buf) { +- uart_port_unlock(port, flags); ++ spin_unlock_irqrestore(&port->lock, flags); ++ uart_port_deref_no_rpm(port); + return 0; + } + +@@ -610,7 +644,8 @@ static int uart_write(struct tty_struct *tty, + } + + __uart_start(tty); +- uart_port_unlock(port, flags); ++ spin_unlock_irqrestore(&port->lock, flags); ++ uart_port_deref_no_rpm(port); + return ret; + } + +@@ -621,9 +656,17 @@ static int uart_write_room(struct tty_struct *tty) + unsigned long flags; + int ret; + +- port = uart_port_lock(state, flags); ++ if (!state->xmit.buf) ++ return 0; ++ ++ port = uart_port_ref_no_rpm(state); ++ if (!port) ++ return 0; ++ ++ spin_lock_irqsave(&port->lock, flags); + ret = uart_circ_chars_free(&state->xmit); +- uart_port_unlock(port, flags); ++ spin_unlock_irqrestore(&port->lock, flags); ++ uart_port_deref_no_rpm(port); + return ret; + } + +@@ -634,9 +677,17 @@ static int uart_chars_in_buffer(struct tty_struct *tty) + unsigned long flags; + int ret; + +- port = uart_port_lock(state, flags); ++ if (!state->xmit.buf) ++ return 0; ++ ++ port = uart_port_ref_no_rpm(state); ++ if (!port) ++ return 0; ++ ++ spin_lock_irqsave(&port->lock, flags); + ret = uart_circ_chars_pending(&state->xmit); +- uart_port_unlock(port, flags); ++ spin_unlock_irqrestore(&port->lock, flags); ++ uart_port_deref_no_rpm(port); + return ret; + } + +@@ -1038,7 +1089,10 @@ static int uart_get_lsr_info(struct tty_struct *tty, + struct uart_port *uport = uart_port_check(state); + unsigned int result; + ++ pm_runtime_get_sync(uport->dev); + result = uport->ops->tx_empty(uport); ++ pm_runtime_mark_last_busy(uport->dev); ++ pm_runtime_put_autosuspend(uport->dev); + + /* + * If we're about to load something into the transmit +@@ -1068,9 +1122,13 @@ static int uart_tiocmget(struct tty_struct *tty) + + if (!tty_io_error(tty)) { + result = uport->mctrl; ++ ++ pm_runtime_get_sync(uport->dev); + spin_lock_irq(&uport->lock); + result |= uport->ops->get_mctrl(uport); + spin_unlock_irq(&uport->lock); ++ pm_runtime_mark_last_busy(uport->dev); ++ pm_runtime_put_autosuspend(uport->dev); + } + out: + mutex_unlock(&port->mutex); +@@ -1111,8 +1169,11 @@ static int uart_break_ctl(struct tty_struct *tty, int break_state) + if (!uport) + goto out; + ++ pm_runtime_get_sync(uport->dev); + if (uport->type != PORT_UNKNOWN) + uport->ops->break_ctl(uport, break_state); ++ pm_runtime_mark_last_busy(uport->dev); ++ pm_runtime_put_autosuspend(uport->dev); + ret = 0; + out: + mutex_unlock(&port->mutex); +@@ -1161,7 +1222,10 @@ static int uart_do_autoconfig(struct tty_struct *tty,struct uart_state *state) + * This will claim the ports resources if + * a port is found. + */ ++ pm_runtime_get_sync(uport->dev); + uport->ops->config_port(uport, flags); ++ pm_runtime_mark_last_busy(uport->dev); ++ pm_runtime_put_autosuspend(uport->dev); + + ret = uart_startup(tty, state, 1); + if (ret == 0) +@@ -1257,13 +1321,13 @@ static int uart_get_icount(struct tty_struct *tty, + struct uart_icount cnow; + struct uart_port *uport; + +- uport = uart_port_ref(state); ++ uport = uart_port_ref_no_rpm(state); + if (!uport) + return -EIO; + spin_lock_irq(&uport->lock); + memcpy(&cnow, &uport->icount, sizeof(struct uart_icount)); + spin_unlock_irq(&uport->lock); +- uart_port_deref(uport); ++ uart_port_deref_no_rpm(uport); + + icount->cts = cnow.cts; + icount->dsr = cnow.dsr; +@@ -1468,8 +1532,12 @@ static void uart_set_ldisc(struct tty_struct *tty) + + mutex_lock(&state->port.mutex); + uport = uart_port_check(state); +- if (uport && uport->ops->set_ldisc) ++ if (uport && uport->ops->set_ldisc) { ++ pm_runtime_get_sync(uport->dev); + uport->ops->set_ldisc(uport, &tty->termios); ++ pm_runtime_mark_last_busy(uport->dev); ++ pm_runtime_put_autosuspend(uport->dev); ++ } + mutex_unlock(&state->port.mutex); + } + +@@ -1568,9 +1636,12 @@ static void uart_tty_port_shutdown(struct tty_port *port) + if (WARN(!uport, "detached port still initialized!\n")) + return; + ++ pm_runtime_get_sync(uport->dev); + spin_lock_irq(&uport->lock); + uport->ops->stop_rx(uport); + spin_unlock_irq(&uport->lock); ++ pm_runtime_mark_last_busy(uport->dev); ++ pm_runtime_put_autosuspend(uport->dev); + + uart_port_shutdown(port); + +@@ -1699,8 +1770,12 @@ static void uart_port_shutdown(struct tty_port *port) + /* + * Free the IRQ and disable the port. + */ +- if (uport) ++ if (uport) { ++ pm_runtime_get_sync(uport->dev); + uport->ops->shutdown(uport); ++ pm_runtime_mark_last_busy(uport->dev); ++ pm_runtime_put_autosuspend(uport->dev); ++ } + + /* + * Ensure that the IRQ handler isn't running on another CPU. +@@ -1847,9 +1922,12 @@ static void uart_line_info(struct seq_file *m, struct uart_driver *drv, int i) + pm_state = state->pm_state; + if (pm_state != UART_PM_STATE_ON) + uart_change_pm(state, UART_PM_STATE_ON); ++ pm_runtime_get_sync(uport->dev); + spin_lock_irq(&uport->lock); + status = uport->ops->get_mctrl(uport); + spin_unlock_irq(&uport->lock); ++ pm_runtime_mark_last_busy(uport->dev); ++ pm_runtime_put_autosuspend(uport->dev); + if (pm_state != UART_PM_STATE_ON) + uart_change_pm(state, pm_state); + +@@ -2099,7 +2177,15 @@ uart_set_options(struct uart_port *port, struct console *co, + */ + port->mctrl |= TIOCM_DTR; + +- port->ops->set_termios(port, &termios, &dummy); ++ /* At early stage device is not created yet, we can't do PM */ ++ if (port->dev) { ++ pm_runtime_get_sync(port->dev); ++ port->ops->set_termios(port, &termios, &dummy); ++ pm_runtime_mark_last_busy(port->dev); ++ pm_runtime_put_autosuspend(port->dev); ++ } else ++ port->ops->set_termios(port, &termios, &dummy); ++ + /* + * Allow the setting of the UART parameters with a NULL console + * too: +@@ -2178,11 +2264,14 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport) + tty_port_set_suspended(port, 1); + tty_port_set_initialized(port, 0); + ++ pm_runtime_get_sync(uport->dev); + spin_lock_irq(&uport->lock); + ops->stop_tx(uport); + ops->set_mctrl(uport, 0); + ops->stop_rx(uport); + spin_unlock_irq(&uport->lock); ++ pm_runtime_mark_last_busy(uport->dev); ++ pm_runtime_put_autosuspend(uport->dev); + + /* + * Wait for the transmitter to empty. +@@ -2193,7 +2282,10 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport) + dev_err(uport->dev, "%s: Unable to drain transmitter\n", + uport->name); + ++ pm_runtime_get_sync(uport->dev); + ops->shutdown(uport); ++ pm_runtime_mark_last_busy(uport->dev); ++ pm_runtime_put_autosuspend(uport->dev); + } + + /* +@@ -2248,7 +2340,12 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport) + + if (console_suspend_enabled) + uart_change_pm(state, UART_PM_STATE_ON); ++ ++ pm_runtime_get_sync(uport->dev); + uport->ops->set_termios(uport, &termios, NULL); ++ pm_runtime_mark_last_busy(uport->dev); ++ pm_runtime_put_autosuspend(uport->dev); ++ + if (console_suspend_enabled) + console_start(uport->cons); + } +@@ -2258,20 +2355,31 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport) + int ret; + + uart_change_pm(state, UART_PM_STATE_ON); ++ pm_runtime_get_sync(uport->dev); + spin_lock_irq(&uport->lock); + ops->set_mctrl(uport, 0); + spin_unlock_irq(&uport->lock); ++ pm_runtime_mark_last_busy(uport->dev); ++ pm_runtime_put_autosuspend(uport->dev); ++ + if (console_suspend_enabled || !uart_console(uport)) { + /* Protected by port mutex for now */ + struct tty_struct *tty = port->tty; ++ ++ pm_runtime_get_sync(uport->dev); + ret = ops->startup(uport); ++ pm_runtime_mark_last_busy(uport->dev); ++ pm_runtime_put_autosuspend(uport->dev); + if (ret == 0) { + if (tty) + uart_change_speed(tty, state, NULL); ++ pm_runtime_get_sync(uport->dev); + spin_lock_irq(&uport->lock); + ops->set_mctrl(uport, uport->mctrl); + ops->start_tx(uport); + spin_unlock_irq(&uport->lock); ++ pm_runtime_mark_last_busy(uport->dev); ++ pm_runtime_put_autosuspend(uport->dev); + tty_port_set_initialized(port, 1); + } else { + /* +@@ -2365,9 +2473,12 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state, + * keep the DTR setting that is set in uart_set_options() + * We probably don't need a spinlock around this, but + */ ++ pm_runtime_get_sync(port->dev); + spin_lock_irqsave(&port->lock, flags); + port->ops->set_mctrl(port, port->mctrl & TIOCM_DTR); + spin_unlock_irqrestore(&port->lock, flags); ++ pm_runtime_mark_last_busy(port->dev); ++ pm_runtime_put_autosuspend(port->dev); + + /* + * If this driver supports console, and it hasn't been +@@ -3032,6 +3143,7 @@ void uart_handle_cts_change(struct uart_port *uport, unsigned int status) + uport->icount.cts++; + + if (uart_softcts_mode(uport)) { ++ pm_runtime_get_sync(uport->dev); + if (uport->hw_stopped) { + if (status) { + uport->hw_stopped = 0; +@@ -3044,6 +3156,8 @@ void uart_handle_cts_change(struct uart_port *uport, unsigned int status) + uport->ops->stop_tx(uport); + } + } ++ pm_runtime_mark_last_busy(uport->dev); ++ pm_runtime_put_autosuspend(uport->dev); + + } + } +-- +2.17.1 + diff --git a/patches/0031-trusty-ipc-change-DEFAULT_MSG_BUF_SIZE-to-68K.trusty b/patches/0031-trusty-ipc-change-DEFAULT_MSG_BUF_SIZE-to-68K.trusty new file mode 100644 index 0000000000..74e125c9cb --- /dev/null +++ b/patches/0031-trusty-ipc-change-DEFAULT_MSG_BUF_SIZE-to-68K.trusty @@ -0,0 +1,36 @@ +From 8dcbc7c1cc134ca17ce7f1efab748958192b779c Mon Sep 17 00:00:00 2001 +From: "Yan, Shaopu" +Date: Mon, 13 Mar 2017 13:22:21 +0800 +Subject: [PATCH 31/63] trusty-ipc: change DEFAULT_MSG_BUF_SIZE to 68K + +after hw-backed keymaster enabled, the cts cases of testLargeMsgKat and +testLongMsgKat both will failed due to the default CHUNK size(64K) is +exceed the channel buffer size in trusty keymaster which use the 4K as +default. In order to fix the failed cases, we will enlarge the default +channel buffer size to 68K. + +Change-Id: I2bfb0174430962c6e66c08033be958aaffeca515 +Tracked-On: OAM-43928 +Signed-off-by: Yan, Shaopu +Reviewed-on: #575371 +--- + drivers/trusty/trusty-ipc.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/drivers/trusty/trusty-ipc.c b/drivers/trusty/trusty-ipc.c +index 44843eb811bd..9d6f6bf94f97 100644 +--- a/drivers/trusty/trusty-ipc.c ++++ b/drivers/trusty/trusty-ipc.c +@@ -45,7 +45,8 @@ + #define MAX_SRV_NAME_LEN 256 + #define MAX_DEV_NAME_LEN 32 + +-#define DEFAULT_MSG_BUF_SIZE PAGE_SIZE ++#define DEFAULT_MSG_BUF_SIZE (68*1024) ++ + #define DEFAULT_MSG_BUF_ALIGN PAGE_SIZE + + #define TIPC_CTRL_ADDR 53 +-- +2.17.1 + diff --git a/patches/0032-ASoC-Intel-Skylake-Define-platform-descriptors.audio b/patches/0032-ASoC-Intel-Skylake-Define-platform-descriptors.audio new file mode 100644 index 0000000000..e0a97de7a3 --- /dev/null +++ b/patches/0032-ASoC-Intel-Skylake-Define-platform-descriptors.audio @@ -0,0 +1,164 @@ +From 890460b9c52b1d1b8c57e53943454115b74b51f6 Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Fri, 16 Aug 2019 19:48:13 +0200 +Subject: [PATCH 032/193] ASoC: Intel: Skylake: Define platform descriptors + +Make use of sst_pdata and declare platform descriptors for all existing +cAVS platforms. Each carries information about base_fw filename, +platform specific operations and boards supported. + +Signed-off-by: Cezary Rojewski +--- + sound/soc/intel/skylake/bxt-sst.c | 4 ++-- + sound/soc/intel/skylake/cnl-sst.c | 4 ++-- + sound/soc/intel/skylake/skl-sst.c | 4 ++-- + sound/soc/intel/skylake/skl.c | 38 ++++++++++++++++++++++++++++++- + sound/soc/intel/skylake/skl.h | 3 +++ + 5 files changed, 46 insertions(+), 7 deletions(-) + +diff --git a/sound/soc/intel/skylake/bxt-sst.c b/sound/soc/intel/skylake/bxt-sst.c +index 34635c2ff56c..22223bc01899 100644 +--- a/sound/soc/intel/skylake/bxt-sst.c ++++ b/sound/soc/intel/skylake/bxt-sst.c +@@ -531,7 +531,7 @@ static const struct skl_dsp_fw_ops bxt_fw_ops = { + .load_library = bxt_load_library, + }; + +-static struct sst_ops skl_ops = { ++struct sst_ops apl_sst_ops = { + .irq_handler = skl_dsp_sst_interrupt, + .thread_fn = skl_dsp_irq_thread_handler, + .write = sst_shim32_write, +@@ -542,7 +542,7 @@ static struct sst_ops skl_ops = { + }; + + static struct sst_pdata skl_dev = { +- .ops = &skl_ops, ++ .ops = &apl_sst_ops, + }; + + int bxt_sst_dsp_init(struct skl_dev *skl, const char *fw_name) +diff --git a/sound/soc/intel/skylake/cnl-sst.c b/sound/soc/intel/skylake/cnl-sst.c +index ce966112137b..c977a6f08a8f 100644 +--- a/sound/soc/intel/skylake/cnl-sst.c ++++ b/sound/soc/intel/skylake/cnl-sst.c +@@ -407,7 +407,7 @@ static int cnl_ipc_init(struct device *dev, struct skl_dev *cnl) + return 0; + } + +-static struct sst_ops cnl_ops = { ++struct sst_ops cnl_sst_ops = { + .irq_handler = cnl_dsp_sst_interrupt, + .thread_fn = cnl_dsp_irq_thread_handler, + .write = sst_shim32_write, +@@ -418,7 +418,7 @@ static struct sst_ops cnl_ops = { + }; + + static struct sst_pdata cnl_dev = { +- .ops = &cnl_ops, ++ .ops = &cnl_sst_ops, + }; + + int cnl_sst_dsp_init(struct skl_dev *cnl, const char *fw_name) +diff --git a/sound/soc/intel/skylake/skl-sst.c b/sound/soc/intel/skylake/skl-sst.c +index 8c78c07e28a8..490df6dfb7b8 100644 +--- a/sound/soc/intel/skylake/skl-sst.c ++++ b/sound/soc/intel/skylake/skl-sst.c +@@ -502,7 +502,7 @@ static const struct skl_dsp_fw_ops skl_fw_ops = { + .unload_mod = skl_unload_module, + }; + +-static struct sst_ops skl_ops = { ++struct sst_ops skl_sst_ops = { + .irq_handler = skl_dsp_sst_interrupt, + .thread_fn = skl_dsp_irq_thread_handler, + .write = sst_shim32_write, +@@ -513,7 +513,7 @@ static struct sst_ops skl_ops = { + }; + + static struct sst_pdata skl_dev = { +- .ops = &skl_ops, ++ .ops = &skl_sst_ops, + }; + + int skl_sst_dsp_init(struct skl_dev *skl, const char *fw_name) +diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c +index 54e1f957121d..d6d099aba834 100644 +--- a/sound/soc/intel/skylake/skl.c ++++ b/sound/soc/intel/skylake/skl.c +@@ -27,6 +27,7 @@ + #include + #include + #include ++#include "../common/sst-dsp.h" + #include "skl.h" + #include "skl-sst-dsp.h" + #include "skl-sst-ipc.h" +@@ -1063,7 +1064,6 @@ static int skl_probe(struct pci_dev *pci, + + pci_set_drvdata(skl->pci, bus); + +- + err = skl_find_machine(skl, (void *)pci_id->driver_data); + if (err < 0) { + dev_err(bus->dev, "skl_find_machine failed with err: %d\n", err); +@@ -1153,6 +1153,42 @@ static void skl_remove(struct pci_dev *pci) + dev_set_drvdata(&pci->dev, NULL); + } + ++static struct sst_pdata skl_desc = { ++ .fw_name = "intel/dsp_fw_release.bin", ++ .ops = &skl_sst_ops, ++ .boards = snd_soc_acpi_intel_skl_machines, ++}; ++ ++static struct sst_pdata kbl_desc = { ++ .fw_name = "intel/dsp_fw_kbl.bin", ++ .ops = &skl_sst_ops, ++ .boards = snd_soc_acpi_intel_kbl_machines, ++}; ++ ++static struct sst_pdata apl_desc = { ++ .fw_name = "intel/dsp_fw_bxtn.bin", ++ .ops = &apl_sst_ops, ++ .boards = snd_soc_acpi_intel_bxt_machines, ++}; ++ ++static struct sst_pdata glk_desc = { ++ .fw_name = "intel/dsp_fw_glk.bin", ++ .ops = &apl_sst_ops, ++ .boards = snd_soc_acpi_intel_glk_machines, ++}; ++ ++static struct sst_pdata cnl_desc = { ++ .fw_name = "intel/dsp_fw_cnl.bin", ++ .ops = &cnl_sst_ops, ++ .boards = snd_soc_acpi_intel_cnl_machines, ++}; ++ ++static struct sst_pdata icl_desc = { ++ .fw_name = "intel/dsp_fw_icl.bin", ++ .ops = &cnl_sst_ops, ++ .boards = snd_soc_acpi_intel_icl_machines, ++}; ++ + /* PCI IDs */ + static const struct pci_device_id skl_ids[] = { + #if IS_ENABLED(CONFIG_SND_SOC_INTEL_SKL) +diff --git a/sound/soc/intel/skylake/skl.h b/sound/soc/intel/skylake/skl.h +index c7c908a4df1d..88f15859a8ad 100644 +--- a/sound/soc/intel/skylake/skl.h ++++ b/sound/soc/intel/skylake/skl.h +@@ -42,6 +42,9 @@ + #define AZX_REG_VS_EM2_L1SEN BIT(13) + + struct skl_debug; ++extern struct sst_ops skl_sst_ops; ++extern struct sst_ops apl_sst_ops; ++extern struct sst_ops cnl_sst_ops; + + struct skl_astate_param { + u32 kcps; +-- +2.17.1 + diff --git a/patches/0032-VHM-refine-memory-segment-interface.acrn b/patches/0032-VHM-refine-memory-segment-interface.acrn new file mode 100644 index 0000000000..0060e493fc --- /dev/null +++ b/patches/0032-VHM-refine-memory-segment-interface.acrn @@ -0,0 +1,297 @@ +From 000ef228f3f4b5aab8ba53682f5dda2750829c41 Mon Sep 17 00:00:00 2001 +From: Jason Chen CJ +Date: Fri, 31 Aug 2018 10:58:58 +0800 +Subject: [PATCH 032/150] VHM: refine memory segment interface + +- restruct guest_memseg & remove redundant paramters in it +- restruct vm_memmap & remove redundant paramters in it +- remove redundant paramters in vm_memseg + +Change-Id: I7661cfd464bc2748f9d5f1d0751f52782332c97a +Tracked-On: 212688 +Signed-off-by: Jason Chen CJ +--- + drivers/vhm/vhm_mm.c | 90 ++++++++++-------------------- + include/linux/vhm/vhm_ioctl_defs.h | 31 +++------- + 2 files changed, 37 insertions(+), 84 deletions(-) + +diff --git a/drivers/vhm/vhm_mm.c b/drivers/vhm/vhm_mm.c +index 712a905040a4..a9ba810a7fd7 100644 +--- a/drivers/vhm/vhm_mm.c ++++ b/drivers/vhm/vhm_mm.c +@@ -78,12 +78,9 @@ + + struct guest_memseg { + struct list_head list; +- int segid; +- u64 base; ++ u64 vm0_gpa; + size_t len; +- char name[SPECNAMELEN + 1]; + u64 gpa; +- int prot; /* RWX */ + long vma_count; + }; + +@@ -105,10 +102,10 @@ static u64 _alloc_memblk(struct device *dev, size_t len) + return 0ULL; + } + +-static bool _free_memblk(struct device *dev, u64 base, size_t len) ++static bool _free_memblk(struct device *dev, u64 vm0_gpa, size_t len) + { + unsigned int count = PAGE_ALIGN(len) >> PAGE_SHIFT; +- struct page *page = pfn_to_page(base >> PAGE_SHIFT); ++ struct page *page = pfn_to_page(vm0_gpa >> PAGE_SHIFT); + + return dma_release_from_contiguous(dev, page, count); + } +@@ -116,32 +113,30 @@ static bool _free_memblk(struct device *dev, u64 base, size_t len) + int alloc_guest_memseg(struct vhm_vm *vm, struct vm_memseg *memseg) + { + struct guest_memseg *seg; +- u64 base; ++ u64 vm0_gpa; + int max_gfn; + + seg = kzalloc(sizeof(struct guest_memseg), GFP_KERNEL); + if (seg == NULL) + return -ENOMEM; + +- base = _alloc_memblk(vm->dev, memseg->len); +- if (base == 0ULL) { ++ vm0_gpa = _alloc_memblk(vm->dev, memseg->len); ++ if (vm0_gpa == 0ULL) { + kfree(seg); + return -ENOMEM; + } + +- seg->segid = memseg->segid; +- seg->base = base; ++ seg->vm0_gpa = vm0_gpa; + seg->len = memseg->len; +- strncpy(seg->name, memseg->name, SPECNAMELEN + 1); + seg->gpa = memseg->gpa; + + max_gfn = (seg->gpa + seg->len) >> PAGE_SHIFT; + if (vm->max_gfn < max_gfn) + vm->max_gfn = max_gfn; + +- pr_info("VHM: alloc memseg[%s] with len=0x%lx, base=0x%llx," ++ pr_info("VHM: alloc memseg with len=0x%lx, vm0_gpa=0x%llx," + " and its guest gpa = 0x%llx, vm max_gfn 0x%x\n", +- seg->name, seg->len, seg->base, seg->gpa, vm->max_gfn); ++ seg->len, seg->vm0_gpa, seg->gpa, vm->max_gfn); + + seg->vma_count = 0; + mutex_lock(&vm->seg_lock); +@@ -201,39 +196,34 @@ int update_memmap_attr(unsigned long vmid, unsigned long guest_gpa, + int map_guest_memseg(struct vhm_vm *vm, struct vm_memmap *memmap) + { + struct guest_memseg *seg = NULL; +- struct vm_set_memmap set_memmap; ++ unsigned int type, prot; ++ unsigned long guest_gpa, host_gpa; + + mutex_lock(&vm->seg_lock); + +- if (memmap->segid != VM_MMIO) { ++ if (memmap->type == VM_SYSMEM) { + list_for_each_entry(seg, &vm->memseg_list, list) { +- if (seg->segid == memmap->segid +- && seg->gpa == memmap->mem.gpa +- && seg->len == memmap->mem.len) ++ if (seg->gpa == memmap->gpa ++ && seg->len == memmap->len) + break; + } + if (&seg->list == &vm->memseg_list) { + mutex_unlock(&vm->seg_lock); + return -EINVAL; + } +- seg->prot = memmap->mem.prot; +- set_memmap.type = MAP_MEM; +- set_memmap.remote_gpa = seg->gpa; +- set_memmap.vm0_gpa = seg->base; +- set_memmap.length = seg->len; +- set_memmap.prot = seg->prot; +- set_memmap.prot |= MEM_ATTR_WB_CACHE; ++ guest_gpa = seg->gpa; ++ host_gpa = seg->vm0_gpa; ++ prot = memmap->prot | MEM_ATTR_WB_CACHE; ++ type = MAP_MEM; + } else { +- set_memmap.type = MAP_MMIO; +- set_memmap.remote_gpa = memmap->mmio.gpa; +- set_memmap.vm0_gpa = memmap->mmio.hpa; +- set_memmap.length = memmap->mmio.len; +- set_memmap.prot = memmap->mmio.prot; +- set_memmap.prot |= MEM_ATTR_UNCACHED; ++ guest_gpa = memmap->gpa; ++ host_gpa = acrn_hpa2gpa(memmap->hpa); ++ prot = memmap->prot | MEM_ATTR_UNCACHED; ++ type = MAP_MMIO; + } + +- /* hypercall to notify hv the guest EPT setting*/ +- if (hcall_set_memmap(vm->vmid, virt_to_phys(&set_memmap)) < 0) { ++ if (_mem_set_memmap(vm->vmid, guest_gpa, host_gpa, memmap->len, ++ prot, type) < 0) { + pr_err("vhm: failed to set memmap %ld!\n", vm->vmid); + mutex_unlock(&vm->seg_lock); + return -EFAULT; +@@ -241,16 +231,6 @@ int map_guest_memseg(struct vhm_vm *vm, struct vm_memmap *memmap) + + mutex_unlock(&vm->seg_lock); + +- if (memmap->segid != VM_MMIO) +- pr_debug("VHM: set ept for memseg [hvm_gpa=0x%llx," +- "guest_gpa=0x%llx,len=0x%lx, prot=0x%x]\n", +- seg->base, seg->gpa, seg->len, seg->prot); +- else +- pr_debug("VHM: set ept for mmio [hpa=0x%llx," +- "gpa=0x%llx,len=0x%lx, prot=0x%x]\n", +- memmap->mmio.hpa, memmap->mmio.gpa, +- memmap->mmio.len, memmap->mmio.prot); +- + return 0; + } + +@@ -262,7 +242,7 @@ void free_guest_mem(struct vhm_vm *vm) + while (!list_empty(&vm->memseg_list)) { + seg = list_first_entry(&vm->memseg_list, + struct guest_memseg, list); +- if (!_free_memblk(vm->dev, seg->base, seg->len)) ++ if (!_free_memblk(vm->dev, seg->vm0_gpa, seg->len)) + pr_warn("failed to free memblk\n"); + list_del(&seg->list); + kfree(seg); +@@ -276,9 +256,6 @@ int check_guest_mem(struct vhm_vm *vm) + + mutex_lock(&vm->seg_lock); + list_for_each_entry(seg, &vm->memseg_list, list) { +- if (seg->segid != VM_SYSMEM) +- continue; +- + if (seg->vma_count == 0) + continue; + +@@ -324,7 +301,7 @@ static int do_mmap_guest(struct file *file, + unsigned long start_addr; + + vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTCOPY; +- pfn = seg->base >> PAGE_SHIFT; ++ pfn = seg->vm0_gpa >> PAGE_SHIFT; + start_addr = vma->vm_start; + while (size > 0) { + page = pfn_to_page(pfn); +@@ -338,9 +315,9 @@ static int do_mmap_guest(struct file *file, + vma->vm_ops = &guest_vm_ops; + vma->vm_private_data = (void *)seg; + +- pr_info("VHM: mmap for memseg [seg base=0x%llx, gpa=0x%llx] " ++ pr_info("VHM: mmap for memseg [seg vm0_gpa=0x%llx, gpa=0x%llx] " + "to start addr 0x%lx\n", +- seg->base, seg->gpa, start_addr); ++ seg->vm0_gpa, seg->gpa, start_addr); + + return 0; + } +@@ -355,9 +332,6 @@ int vhm_dev_mmap(struct file *file, struct vm_area_struct *vma) + + mutex_lock(&vm->seg_lock); + list_for_each_entry(seg, &vm->memseg_list, list) { +- if (seg->segid != VM_SYSMEM) +- continue; +- + if (seg->gpa != offset || seg->len != len) + continue; + +@@ -375,9 +349,6 @@ static void *do_map_guest_phys(struct vhm_vm *vm, u64 guest_phys, size_t size) + + mutex_lock(&vm->seg_lock); + list_for_each_entry(seg, &vm->memseg_list, list) { +- if (seg->segid != VM_SYSMEM) +- continue; +- + if (seg->gpa > guest_phys || + guest_phys >= seg->gpa + seg->len) + continue; +@@ -388,7 +359,7 @@ static void *do_map_guest_phys(struct vhm_vm *vm, u64 guest_phys, size_t size) + } + + mutex_unlock(&vm->seg_lock); +- return phys_to_virt(seg->base + guest_phys - seg->gpa); ++ return phys_to_virt(seg->vm0_gpa + guest_phys - seg->gpa); + } + mutex_unlock(&vm->seg_lock); + return NULL; +@@ -417,9 +388,6 @@ static int do_unmap_guest_phys(struct vhm_vm *vm, u64 guest_phys) + + mutex_lock(&vm->seg_lock); + list_for_each_entry(seg, &vm->memseg_list, list) { +- if (seg->segid != VM_SYSMEM) +- continue; +- + if (seg->gpa <= guest_phys && + guest_phys < seg->gpa + seg->len) { + mutex_unlock(&vm->seg_lock); +diff --git a/include/linux/vhm/vhm_ioctl_defs.h b/include/linux/vhm/vhm_ioctl_defs.h +index 258ec3982da9..494213a9f9f0 100644 +--- a/include/linux/vhm/vhm_ioctl_defs.h ++++ b/include/linux/vhm/vhm_ioctl_defs.h +@@ -82,7 +82,6 @@ + #define IC_ATTACH_IOREQ_CLIENT _IC_ID(IC_ID, IC_ID_IOREQ_BASE + 0x03) + #define IC_DESTROY_IOREQ_CLIENT _IC_ID(IC_ID, IC_ID_IOREQ_BASE + 0x04) + +- + /* Guest memory management */ + #define IC_ID_MEM_BASE 0x40UL + #define IC_ALLOC_MEMSEG _IC_ID(IC_ID, IC_ID_MEM_BASE + 0x00) +@@ -96,35 +95,21 @@ + #define IC_SET_PTDEV_INTR_INFO _IC_ID(IC_ID, IC_ID_PCI_BASE + 0x03) + #define IC_RESET_PTDEV_INTR_INFO _IC_ID(IC_ID, IC_ID_PCI_BASE + 0x04) + +-#define SPECNAMELEN 63 +- +-#define VM_SYSMEM 0 +-#define VM_MMIO 1 +- + struct vm_memseg { +- uint32_t segid; +- uint32_t reserved; + uint64_t len; + uint64_t gpa; +- char name[SPECNAMELEN + 1]; + }; + ++#define VM_SYSMEM 0 ++#define VM_MMIO 1 ++ + struct vm_memmap { +- uint32_t segid; /* memory segment */ ++ uint32_t type; + uint32_t reserved; +- union { +- struct { +- uint64_t gpa; +- uint64_t len; /* mmap length */ +- uint32_t prot; /* RWX */ +- } mem; +- struct { +- uint64_t gpa; +- uint64_t hpa; +- uint64_t len; +- uint32_t prot; +- } mmio; +- }; ++ uint64_t gpa; ++ uint64_t hpa; /* only for type == VM_MMIO */ ++ uint64_t len; /* mmap length */ ++ uint32_t prot; /* RWX */ + }; + + struct ic_ptdev_irq { +-- +2.17.1 + diff --git a/patches/0032-check-CPUID-while-probe-trusty-drivers.trusty b/patches/0032-check-CPUID-while-probe-trusty-drivers.trusty new file mode 100644 index 0000000000..29abd8c031 --- /dev/null +++ b/patches/0032-check-CPUID-while-probe-trusty-drivers.trusty @@ -0,0 +1,144 @@ +From cef4140a9931a4c8929840a89b40ab986648c208 Mon Sep 17 00:00:00 2001 +From: yingbinx +Date: Mon, 27 Mar 2017 12:24:04 +0800 +Subject: [PATCH 32/63] check CPUID while probe trusty drivers. + +Trusty ipc drivers only work when eVmm is alive. +So when probe the trusty drivers, we need to call CPUID +to check if eVmm is already existed. + +Change-Id: I295785b0510729aa2e9d212b243d7c242370389f +Tracked-On: OAM-43859 +Signed-off-by: yingbinx +Signed-off-by: weideng +Reviewed-on: #574947 +--- + drivers/trusty/trusty-ipc.c | 7 +++++++ + drivers/trusty/trusty-irq.c | 6 ++++++ + drivers/trusty/trusty-log.c | 6 ++++++ + drivers/trusty/trusty-virtio.c | 6 ++++++ + drivers/trusty/trusty.c | 6 ++++++ + include/linux/trusty/trusty.h | 16 ++++++++++++++++ + 6 files changed, 47 insertions(+) + +diff --git a/drivers/trusty/trusty-ipc.c b/drivers/trusty/trusty-ipc.c +index 9d6f6bf94f97..a05c5f957146 100644 +--- a/drivers/trusty/trusty-ipc.c ++++ b/drivers/trusty/trusty-ipc.c +@@ -31,6 +31,7 @@ + #include + + #include ++#include + + #define MAX_DEVICES 4 + +@@ -1524,6 +1525,12 @@ static int tipc_virtio_probe(struct virtio_device *vdev) + vq_callback_t *vq_cbs[] = {_rxvq_cb, _txvq_cb}; + const char *vq_names[] = { "rx", "tx" }; + ++ err = trusty_check_cpuid(); ++ if (err < 0) { ++ dev_err(&vdev->dev, "CPUID Error: Cannot find eVmm in trusty driver initialization!"); ++ return -EINVAL; ++ } ++ + dev_dbg(&vdev->dev, "%s:\n", __func__); + + vds = kzalloc(sizeof(*vds), GFP_KERNEL); +diff --git a/drivers/trusty/trusty-irq.c b/drivers/trusty/trusty-irq.c +index 363b302dec0a..afdea66c23c2 100644 +--- a/drivers/trusty/trusty-irq.c ++++ b/drivers/trusty/trusty-irq.c +@@ -587,6 +587,12 @@ static int trusty_irq_probe(struct platform_device *pdev) + struct trusty_irq_state *is; + work_func_t work_func; + ++ ret = trusty_check_cpuid(); ++ if (ret < 0) { ++ dev_err(&pdev->dev, "CPUID Error: Cannot find eVmm in trusty driver initialization!"); ++ return -EINVAL; ++ } ++ + dev_dbg(&pdev->dev, "%s\n", __func__); + + is = kzalloc(sizeof(*is), GFP_KERNEL); +diff --git a/drivers/trusty/trusty-log.c b/drivers/trusty/trusty-log.c +index 4200e901d925..c977d33ccde5 100644 +--- a/drivers/trusty/trusty-log.c ++++ b/drivers/trusty/trusty-log.c +@@ -254,6 +254,12 @@ static int trusty_log_probe(struct platform_device *pdev) + phys_addr_t pa; + struct deadloop_dump *dump; + ++ result = trusty_check_cpuid(); ++ if (result < 0) { ++ dev_err(&pdev->dev, "CPUID Error: Cannot find eVmm in trusty driver initialization!"); ++ return -EINVAL; ++ } ++ + dev_dbg(&pdev->dev, "%s\n", __func__); + if (!trusty_supports_logging(pdev->dev.parent)) { + return -ENXIO; +diff --git a/drivers/trusty/trusty-virtio.c b/drivers/trusty/trusty-virtio.c +index 3d1a9aabef83..2ce818cef175 100644 +--- a/drivers/trusty/trusty-virtio.c ++++ b/drivers/trusty/trusty-virtio.c +@@ -628,6 +628,12 @@ static int trusty_virtio_probe(struct platform_device *pdev) + int ret; + struct trusty_ctx *tctx; + ++ ret = trusty_check_cpuid(); ++ if (ret < 0) { ++ dev_err(&pdev->dev, "CPUID Error: Cannot find eVmm in trusty driver initialization!"); ++ return -EINVAL; ++ } ++ + dev_info(&pdev->dev, "initializing\n"); + + tctx = kzalloc(sizeof(*tctx), GFP_KERNEL); +diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c +index 679c5a9a7acf..93c73882b00c 100644 +--- a/drivers/trusty/trusty.c ++++ b/drivers/trusty/trusty.c +@@ -450,6 +450,12 @@ static int trusty_probe(struct platform_device *pdev) + struct trusty_state *s; + struct device_node *node = pdev->dev.of_node; + ++ ret = trusty_check_cpuid(); ++ if (ret < 0) { ++ dev_err(&pdev->dev, "CPUID Error: Cannot find eVmm in trusty driver initialization!"); ++ return -EINVAL; ++ } ++ + if (!node) { + dev_err(&pdev->dev, "of_node required\n"); + return -EINVAL; +diff --git a/include/linux/trusty/trusty.h b/include/linux/trusty/trusty.h +index 74598389c308..7dc2dad40daa 100644 +--- a/include/linux/trusty/trusty.h ++++ b/include/linux/trusty/trusty.h +@@ -69,4 +69,20 @@ int trusty_call32_mem_buf(struct device *dev, u32 smcnr, + struct page *page, u32 size, + pgprot_t pgprot); + ++/* CPUID leaf 0x3 is used because eVMM will trap this leaf.*/ ++#define EVMM_RUNNING_SIGNATURE_CORP 0x43544E49 /* "INTC", edx */ ++#define EVMM_RUNNING_SIGNATURE_MON 0x4D4D5645 /* "XMON", ecx */ ++ ++static inline int trusty_check_cpuid(void) ++{ ++ u32 eax, ebx, ecx, edx; ++ ++ cpuid(3, &eax, &ebx, &ecx, &edx); ++ if ((ecx != EVMM_RUNNING_SIGNATURE_MON) || ++ (edx != EVMM_RUNNING_SIGNATURE_CORP)) { ++ return -EINVAL; ++ } ++ ++ return 0; ++} + #endif +-- +2.17.1 + diff --git a/patches/0032-drm-i915-selftests-Teach-igt_gpu_fill_dw-to-take-intel.drm b/patches/0032-drm-i915-selftests-Teach-igt_gpu_fill_dw-to-take-intel.drm new file mode 100644 index 0000000000..a62c4c2ea1 --- /dev/null +++ b/patches/0032-drm-i915-selftests-Teach-igt_gpu_fill_dw-to-take-intel.drm @@ -0,0 +1,609 @@ +From baed4f2fbf8e1965ab3726ffd4c299a27d88d061 Mon Sep 17 00:00:00 2001 +From: Chris Wilson +Date: Sat, 24 Aug 2019 00:51:41 +0100 +Subject: [PATCH 032/690] drm/i915/selftests: Teach igt_gpu_fill_dw() to take + intel_context + +Avoid having to pass around (ctx, engine) everywhere by passing the +actual intel_context we intend to use. Today we preach this lesson to +igt_gpu_fill_dw and its callers' callers. + +The immediate benefit for the GEM selftests is that we aim to use the +GEM context as the control, the source of the engines on which to test +the GEM context. + +Signed-off-by: Chris Wilson +Cc: Matthew Auld +Reviewed-by: Matthew Auld +Link: https://patchwork.freedesktop.org/patch/msgid/20190823235141.31799-1-chris@chris-wilson.co.uk +--- + .../gpu/drm/i915/gem/selftests/huge_pages.c | 103 ++++++++++-------- + .../drm/i915/gem/selftests/i915_gem_context.c | 70 +++++++----- + .../drm/i915/gem/selftests/igt_gem_utils.c | 26 ++--- + .../drm/i915/gem/selftests/igt_gem_utils.h | 13 +-- + 4 files changed, 116 insertions(+), 96 deletions(-) + +diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +index 8de83c6d81f5..c5cea4379216 100644 +--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c ++++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +@@ -879,9 +879,8 @@ static int igt_mock_ppgtt_64K(void *arg) + return err; + } + +-static int gpu_write(struct i915_vma *vma, +- struct i915_gem_context *ctx, +- struct intel_engine_cs *engine, ++static int gpu_write(struct intel_context *ce, ++ struct i915_vma *vma, + u32 dw, + u32 val) + { +@@ -893,7 +892,7 @@ static int gpu_write(struct i915_vma *vma, + if (err) + return err; + +- return igt_gpu_fill_dw(vma, ctx, engine, dw * sizeof(u32), ++ return igt_gpu_fill_dw(ce, vma, dw * sizeof(u32), + vma->size >> PAGE_SHIFT, val); + } + +@@ -929,18 +928,16 @@ static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val) + return err; + } + +-static int __igt_write_huge(struct i915_gem_context *ctx, +- struct intel_engine_cs *engine, ++static int __igt_write_huge(struct intel_context *ce, + struct drm_i915_gem_object *obj, + u64 size, u64 offset, + u32 dword, u32 val) + { +- struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm; + unsigned int flags = PIN_USER | PIN_OFFSET_FIXED; + struct i915_vma *vma; + int err; + +- vma = i915_vma_instance(obj, vm, NULL); ++ vma = i915_vma_instance(obj, ce->vm, NULL); + if (IS_ERR(vma)) + return PTR_ERR(vma); + +@@ -954,7 +951,7 @@ static int __igt_write_huge(struct i915_gem_context *ctx, + * The ggtt may have some pages reserved so + * refrain from erroring out. + */ +- if (err == -ENOSPC && i915_is_ggtt(vm)) ++ if (err == -ENOSPC && i915_is_ggtt(ce->vm)) + err = 0; + + goto out_vma_close; +@@ -964,7 +961,7 @@ static int __igt_write_huge(struct i915_gem_context *ctx, + if (err) + goto out_vma_unpin; + +- err = gpu_write(vma, ctx, engine, dword, val); ++ err = gpu_write(ce, vma, dword, val); + if (err) { + pr_err("gpu-write failed at offset=%llx\n", offset); + goto out_vma_unpin; +@@ -987,14 +984,13 @@ static int __igt_write_huge(struct i915_gem_context *ctx, + static int igt_write_huge(struct i915_gem_context *ctx, + struct drm_i915_gem_object *obj) + { +- struct drm_i915_private *i915 = to_i915(obj->base.dev); +- struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm; +- static struct intel_engine_cs *engines[I915_NUM_ENGINES]; +- struct intel_engine_cs *engine; ++ struct i915_gem_engines *engines; ++ struct i915_gem_engines_iter it; ++ struct intel_context *ce; + I915_RND_STATE(prng); + IGT_TIMEOUT(end_time); + unsigned int max_page_size; +- unsigned int id; ++ unsigned int count; + u64 max; + u64 num; + u64 size; +@@ -1008,19 +1004,18 @@ static int igt_write_huge(struct i915_gem_context *ctx, + if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K) + size = round_up(size, I915_GTT_PAGE_SIZE_2M); + +- max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg); +- max = div_u64((vm->total - size), max_page_size); +- + n = 0; +- for_each_engine(engine, i915, id) { +- if (!intel_engine_can_store_dword(engine)) { +- pr_info("store-dword-imm not supported on engine=%u\n", +- id); ++ count = 0; ++ max = U64_MAX; ++ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { ++ count++; ++ if (!intel_engine_can_store_dword(ce->engine)) + continue; +- } +- engines[n++] = engine; +- } + ++ max = min(max, ce->vm->total); ++ n++; ++ } ++ i915_gem_context_unlock_engines(ctx); + if (!n) + return 0; + +@@ -1029,23 +1024,30 @@ static int igt_write_huge(struct i915_gem_context *ctx, + * randomized order, lets also make feeding to the same engine a few + * times in succession a possibility by enlarging the permutation array. + */ +- order = i915_random_order(n * I915_NUM_ENGINES, &prng); ++ order = i915_random_order(count * count, &prng); + if (!order) + return -ENOMEM; + ++ max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg); ++ max = div_u64(max - size, max_page_size); ++ + /* + * Try various offsets in an ascending/descending fashion until we + * timeout -- we want to avoid issues hidden by effectively always using + * offset = 0. + */ + i = 0; ++ engines = i915_gem_context_lock_engines(ctx); + for_each_prime_number_from(num, 0, max) { + u64 offset_low = num * max_page_size; + u64 offset_high = (max - num) * max_page_size; + u32 dword = offset_in_page(num) / 4; ++ struct intel_context *ce; + +- engine = engines[order[i] % n]; +- i = (i + 1) % (n * I915_NUM_ENGINES); ++ ce = engines->engines[order[i] % engines->num_engines]; ++ i = (i + 1) % (count * count); ++ if (!ce || !intel_engine_can_store_dword(ce->engine)) ++ continue; + + /* + * In order to utilize 64K pages we need to both pad the vma +@@ -1057,22 +1059,23 @@ static int igt_write_huge(struct i915_gem_context *ctx, + offset_low = round_down(offset_low, + I915_GTT_PAGE_SIZE_2M); + +- err = __igt_write_huge(ctx, engine, obj, size, offset_low, ++ err = __igt_write_huge(ce, obj, size, offset_low, + dword, num + 1); + if (err) + break; + +- err = __igt_write_huge(ctx, engine, obj, size, offset_high, ++ err = __igt_write_huge(ce, obj, size, offset_high, + dword, num + 1); + if (err) + break; + + if (igt_timeout(end_time, +- "%s timed out on engine=%u, offset_low=%llx offset_high=%llx, max_page_size=%x\n", +- __func__, engine->id, offset_low, offset_high, ++ "%s timed out on %s, offset_low=%llx offset_high=%llx, max_page_size=%x\n", ++ __func__, ce->engine->name, offset_low, offset_high, + max_page_size)) + break; + } ++ i915_gem_context_unlock_engines(ctx); + + kfree(order); + +@@ -1316,10 +1319,10 @@ static int igt_ppgtt_pin_update(void *arg) + unsigned long supported = INTEL_INFO(dev_priv)->page_sizes; + struct i915_address_space *vm = ctx->vm; + struct drm_i915_gem_object *obj; ++ struct i915_gem_engines_iter it; ++ struct intel_context *ce; + struct i915_vma *vma; + unsigned int flags = PIN_USER | PIN_OFFSET_FIXED; +- struct intel_engine_cs *engine; +- enum intel_engine_id id; + unsigned int n; + int first, last; + int err; +@@ -1419,14 +1422,18 @@ static int igt_ppgtt_pin_update(void *arg) + */ + + n = 0; +- for_each_engine(engine, dev_priv, id) { +- if (!intel_engine_can_store_dword(engine)) ++ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { ++ if (!intel_engine_can_store_dword(ce->engine)) + continue; + +- err = gpu_write(vma, ctx, engine, n++, 0xdeadbeaf); ++ err = gpu_write(ce, vma, n++, 0xdeadbeaf); + if (err) +- goto out_unpin; ++ break; + } ++ i915_gem_context_unlock_engines(ctx); ++ if (err) ++ goto out_unpin; ++ + while (n--) { + err = cpu_check(obj, n, 0xdeadbeaf); + if (err) +@@ -1507,8 +1514,8 @@ static int igt_shrink_thp(void *arg) + struct drm_i915_private *i915 = ctx->i915; + struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm; + struct drm_i915_gem_object *obj; +- struct intel_engine_cs *engine; +- enum intel_engine_id id; ++ struct i915_gem_engines_iter it; ++ struct intel_context *ce; + struct i915_vma *vma; + unsigned int flags = PIN_USER; + unsigned int n; +@@ -1548,16 +1555,19 @@ static int igt_shrink_thp(void *arg) + goto out_unpin; + + n = 0; +- for_each_engine(engine, i915, id) { +- if (!intel_engine_can_store_dword(engine)) ++ ++ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { ++ if (!intel_engine_can_store_dword(ce->engine)) + continue; + +- err = gpu_write(vma, ctx, engine, n++, 0xdeadbeaf); ++ err = gpu_write(ce, vma, n++, 0xdeadbeaf); + if (err) +- goto out_unpin; ++ break; + } +- ++ i915_gem_context_unlock_engines(ctx); + i915_vma_unpin(vma); ++ if (err) ++ goto out_close; + + /* + * Now that the pages are *unpinned* shrink-all should invoke +@@ -1583,10 +1593,9 @@ static int igt_shrink_thp(void *arg) + while (n--) { + err = cpu_check(obj, n, 0xdeadbeaf); + if (err) +- goto out_unpin; ++ break; + } + +- + out_unpin: + i915_vma_unpin(vma); + out_close: +diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +index 3e6f4a65d356..3adb60c2fd1f 100644 +--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c ++++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +@@ -166,19 +166,17 @@ static unsigned long fake_page_count(struct drm_i915_gem_object *obj) + return huge_gem_object_dma_size(obj) >> PAGE_SHIFT; + } + +-static int gpu_fill(struct drm_i915_gem_object *obj, +- struct i915_gem_context *ctx, +- struct intel_engine_cs *engine, ++static int gpu_fill(struct intel_context *ce, ++ struct drm_i915_gem_object *obj, + unsigned int dw) + { +- struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm; + struct i915_vma *vma; + int err; + +- GEM_BUG_ON(obj->base.size > vm->total); +- GEM_BUG_ON(!intel_engine_can_store_dword(engine)); ++ GEM_BUG_ON(obj->base.size > ce->vm->total); ++ GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine)); + +- vma = i915_vma_instance(obj, vm, NULL); ++ vma = i915_vma_instance(obj, ce->vm, NULL); + if (IS_ERR(vma)) + return PTR_ERR(vma); + +@@ -200,9 +198,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj, + * whilst checking that each context provides a unique view + * into the object. + */ +- err = igt_gpu_fill_dw(vma, +- ctx, +- engine, ++ err = igt_gpu_fill_dw(ce, vma, + (dw * real_page_count(obj)) << PAGE_SHIFT | + (dw * sizeof(u32)), + real_page_count(obj), +@@ -305,22 +301,21 @@ static int file_add_object(struct drm_file *file, + } + + static struct drm_i915_gem_object * +-create_test_object(struct i915_gem_context *ctx, ++create_test_object(struct i915_address_space *vm, + struct drm_file *file, + struct list_head *objects) + { + struct drm_i915_gem_object *obj; +- struct i915_address_space *vm = ctx->vm ?: &ctx->i915->ggtt.vm; + u64 size; + int err; + + /* Keep in GEM's good graces */ +- i915_retire_requests(ctx->i915); ++ i915_retire_requests(vm->i915); + + size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE); + size = round_down(size, DW_PER_PAGE * PAGE_SIZE); + +- obj = huge_gem_object(ctx->i915, DW_PER_PAGE * PAGE_SIZE, size); ++ obj = huge_gem_object(vm->i915, DW_PER_PAGE * PAGE_SIZE, size); + if (IS_ERR(obj)) + return obj; + +@@ -393,6 +388,7 @@ static int igt_ctx_exec(void *arg) + dw = 0; + while (!time_after(jiffies, end_time)) { + struct i915_gem_context *ctx; ++ struct intel_context *ce; + + ctx = live_context(i915, file); + if (IS_ERR(ctx)) { +@@ -400,15 +396,20 @@ static int igt_ctx_exec(void *arg) + goto out_unlock; + } + ++ ce = i915_gem_context_get_engine(ctx, engine->legacy_idx); ++ + if (!obj) { +- obj = create_test_object(ctx, file, &objects); ++ obj = create_test_object(ce->vm, file, &objects); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); ++ intel_context_put(ce); + goto out_unlock; + } + } + +- err = gpu_fill(obj, ctx, engine, dw); ++ err = gpu_fill(ce, obj, dw); ++ intel_context_put(ce); ++ + if (err) { + pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", + ndwords, dw, max_dwords(obj), +@@ -509,6 +510,7 @@ static int igt_shared_ctx_exec(void *arg) + ncontexts = 0; + while (!time_after(jiffies, end_time)) { + struct i915_gem_context *ctx; ++ struct intel_context *ce; + + ctx = kernel_context(i915); + if (IS_ERR(ctx)) { +@@ -518,22 +520,26 @@ static int igt_shared_ctx_exec(void *arg) + + __assign_ppgtt(ctx, parent->vm); + ++ ce = i915_gem_context_get_engine(ctx, engine->legacy_idx); + if (!obj) { +- obj = create_test_object(parent, file, &objects); ++ obj = create_test_object(parent->vm, file, &objects); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); ++ intel_context_put(ce); + kernel_context_close(ctx); + goto out_test; + } + } + +- err = gpu_fill(obj, ctx, engine, dw); ++ err = gpu_fill(ce, obj, dw); ++ intel_context_put(ce); ++ kernel_context_close(ctx); ++ + if (err) { + pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", + ndwords, dw, max_dwords(obj), + engine->name, ctx->hw_id, + yesno(!!ctx->vm), err); +- kernel_context_close(ctx); + goto out_test; + } + +@@ -544,8 +550,6 @@ static int igt_shared_ctx_exec(void *arg) + + ndwords++; + ncontexts++; +- +- kernel_context_close(ctx); + } + pr_info("Submitted %lu contexts to %s, filling %lu dwords\n", + ncontexts, engine->name, ndwords); +@@ -604,6 +608,8 @@ static struct i915_vma *rpcs_query_batch(struct i915_vma *vma) + __i915_gem_object_flush_map(obj, 0, 64); + i915_gem_object_unpin_map(obj); + ++ intel_gt_chipset_flush(vma->vm->gt); ++ + vma = i915_vma_instance(obj, vma->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); +@@ -1082,17 +1088,19 @@ static int igt_ctx_readonly(void *arg) + ndwords = 0; + dw = 0; + while (!time_after(jiffies, end_time)) { +- struct intel_engine_cs *engine; +- unsigned int id; ++ struct i915_gem_engines_iter it; ++ struct intel_context *ce; + +- for_each_engine(engine, i915, id) { +- if (!intel_engine_can_store_dword(engine)) ++ for_each_gem_engine(ce, ++ i915_gem_context_lock_engines(ctx), it) { ++ if (!intel_engine_can_store_dword(ce->engine)) + continue; + + if (!obj) { +- obj = create_test_object(ctx, file, &objects); ++ obj = create_test_object(ce->vm, file, &objects); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); ++ i915_gem_context_unlock_engines(ctx); + goto out_unlock; + } + +@@ -1100,12 +1108,13 @@ static int igt_ctx_readonly(void *arg) + i915_gem_object_set_readonly(obj); + } + +- err = gpu_fill(obj, ctx, engine, dw); ++ err = gpu_fill(ce, obj, dw); + if (err) { + pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", + ndwords, dw, max_dwords(obj), +- engine->name, ctx->hw_id, ++ ce->engine->name, ctx->hw_id, + yesno(!!ctx->vm), err); ++ i915_gem_context_unlock_engines(ctx); + goto out_unlock; + } + +@@ -1115,6 +1124,7 @@ static int igt_ctx_readonly(void *arg) + } + ndwords++; + } ++ i915_gem_context_unlock_engines(ctx); + } + pr_info("Submitted %lu dwords (across %u engines)\n", + ndwords, RUNTIME_INFO(i915)->num_engines); +@@ -1197,6 +1207,8 @@ static int write_to_scratch(struct i915_gem_context *ctx, + __i915_gem_object_flush_map(obj, 0, 64); + i915_gem_object_unpin_map(obj); + ++ intel_gt_chipset_flush(engine->gt); ++ + vma = i915_vma_instance(obj, ctx->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); +@@ -1296,6 +1308,8 @@ static int read_from_scratch(struct i915_gem_context *ctx, + i915_gem_object_flush_map(obj); + i915_gem_object_unpin_map(obj); + ++ intel_gt_chipset_flush(engine->gt); ++ + vma = i915_vma_instance(obj, ctx->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); +diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c +index 57ece53c1075..ee5dc13a30b3 100644 +--- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c ++++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c +@@ -9,6 +9,7 @@ + #include "gem/i915_gem_context.h" + #include "gem/i915_gem_pm.h" + #include "gt/intel_context.h" ++#include "gt/intel_gt.h" + #include "i915_vma.h" + #include "i915_drv.h" + +@@ -84,6 +85,8 @@ igt_emit_store_dw(struct i915_vma *vma, + *cmd = MI_BATCH_BUFFER_END; + i915_gem_object_unpin_map(obj); + ++ intel_gt_chipset_flush(vma->vm->gt); ++ + vma = i915_vma_instance(obj, vma->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); +@@ -101,40 +104,35 @@ igt_emit_store_dw(struct i915_vma *vma, + return ERR_PTR(err); + } + +-int igt_gpu_fill_dw(struct i915_vma *vma, +- struct i915_gem_context *ctx, +- struct intel_engine_cs *engine, +- u64 offset, +- unsigned long count, +- u32 val) ++int igt_gpu_fill_dw(struct intel_context *ce, ++ struct i915_vma *vma, u64 offset, ++ unsigned long count, u32 val) + { +- struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm; + struct i915_request *rq; + struct i915_vma *batch; + unsigned int flags; + int err; + +- GEM_BUG_ON(vma->size > vm->total); +- GEM_BUG_ON(!intel_engine_can_store_dword(engine)); ++ GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine)); + GEM_BUG_ON(!i915_vma_is_pinned(vma)); + + batch = igt_emit_store_dw(vma, offset, count, val); + if (IS_ERR(batch)) + return PTR_ERR(batch); + +- rq = igt_request_alloc(ctx, engine); ++ rq = intel_context_create_request(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_batch; + } + + flags = 0; +- if (INTEL_GEN(vm->i915) <= 5) ++ if (INTEL_GEN(ce->vm->i915) <= 5) + flags |= I915_DISPATCH_SECURE; + +- err = engine->emit_bb_start(rq, +- batch->node.start, batch->node.size, +- flags); ++ err = rq->engine->emit_bb_start(rq, ++ batch->node.start, batch->node.size, ++ flags); + if (err) + goto err_request; + +diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h +index 361a7ef866b0..4221cf84d175 100644 +--- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h ++++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h +@@ -11,9 +11,11 @@ + + struct i915_request; + struct i915_gem_context; +-struct intel_engine_cs; + struct i915_vma; + ++struct intel_context; ++struct intel_engine_cs; ++ + struct i915_request * + igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine); + +@@ -23,11 +25,8 @@ igt_emit_store_dw(struct i915_vma *vma, + unsigned long count, + u32 val); + +-int igt_gpu_fill_dw(struct i915_vma *vma, +- struct i915_gem_context *ctx, +- struct intel_engine_cs *engine, +- u64 offset, +- unsigned long count, +- u32 val); ++int igt_gpu_fill_dw(struct intel_context *ce, ++ struct i915_vma *vma, u64 offset, ++ unsigned long count, u32 val); + + #endif /* __IGT_GEM_UTILS_H__ */ +-- +2.17.1 + diff --git a/patches/0032-mei-me-fix-me_intr_clear-function-name-in-KDoc.security b/patches/0032-mei-me-fix-me_intr_clear-function-name-in-KDoc.security new file mode 100644 index 0000000000..8956e76770 --- /dev/null +++ b/patches/0032-mei-me-fix-me_intr_clear-function-name-in-KDoc.security @@ -0,0 +1,30 @@ +From 83939cf69932bff1e1925a8443955ac079ae85d1 Mon Sep 17 00:00:00 2001 +From: Alexander Usyskin +Date: Tue, 8 Oct 2019 03:57:35 +0300 +Subject: [PATCH 32/65] mei: me: fix me_intr_clear function name in KDoc + +Signed-off-by: Alexander Usyskin +Signed-off-by: Tomas Winkler +Link: https://lore.kernel.org/r/20191008005735.12707-2-tomas.winkler@intel.com +Signed-off-by: Greg Kroah-Hartman +(cherry picked from commit 3e917975b7cdef6cfe92931e04677d8cf1d3df98) +--- + drivers/misc/mei/hw-me.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c +index c4f6991d3028..5ef30c7c92b3 100644 +--- a/drivers/misc/mei/hw-me.c ++++ b/drivers/misc/mei/hw-me.c +@@ -269,7 +269,7 @@ static inline void me_intr_disable(struct mei_device *dev, u32 hcsr) + } + + /** +- * mei_me_intr_clear - clear and stop interrupts ++ * me_intr_clear - clear and stop interrupts + * + * @dev: the device structure + * @hcsr: supplied hcsr register value +-- +2.17.1 + diff --git a/patches/0032-net-stmmac-Fix-the-EEE-enable-disable.connectivity b/patches/0032-net-stmmac-Fix-the-EEE-enable-disable.connectivity new file mode 100644 index 0000000000..2cdc59972a --- /dev/null +++ b/patches/0032-net-stmmac-Fix-the-EEE-enable-disable.connectivity @@ -0,0 +1,42 @@ +From dc1b166fe4e250fa0c9e9d8bd010b4456bec6649 Mon Sep 17 00:00:00 2001 +From: kajolkhx +Date: Fri, 13 Sep 2019 16:10:21 +0800 +Subject: [PATCH 032/108] net: stmmac: Fix the EEE enable disable + +Fix the EEE disable/enable sequence by using +the correct order of functions.stmmac_eee_init() +should be called only during link up/down sequence +when phy_init_eee() has set the right state for EEE. +When EEE is active and enabled, then EEE timer +should be started, otherwise timer should be deleted. + +Signed-off-by: Kharbanda,Kajol +Signed-off-by: Voon Weifeng +--- + drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c | 4 ---- + 1 file changed, 4 deletions(-) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +index 1a768837ca72..4d9e85a3b30f 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +@@ -669,16 +669,12 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev, + * to verify all by invoking the eee_init function. + * In case of failure it will return an error. + */ +- edata->eee_enabled = stmmac_eee_init(priv); +- if (!edata->eee_enabled) +- return -EOPNOTSUPP; + } + + ret = phylink_ethtool_set_eee(priv->phylink, edata); + if (ret) + return ret; + +- priv->eee_enabled = edata->eee_enabled; + priv->tx_lpi_timer = edata->tx_lpi_timer; + return 0; + } +-- +2.17.1 + diff --git a/patches/0032-serial-8250_port-remove-legacy-PM-code.lpss b/patches/0032-serial-8250_port-remove-legacy-PM-code.lpss new file mode 100644 index 0000000000..cf2133ff2d --- /dev/null +++ b/patches/0032-serial-8250_port-remove-legacy-PM-code.lpss @@ -0,0 +1,65 @@ +From 3a892003ed54f6df3ac4c32c09d51507ad740260 Mon Sep 17 00:00:00 2001 +From: Andy Shevchenko +Date: Mon, 12 Sep 2016 20:40:33 +0300 +Subject: [PATCH 32/40] serial: 8250_port: remove legacy PM code + +Since the driver has been switched to use runtime PM, remove legacy PM code. + +Signed-off-by: Andy Shevchenko +--- + drivers/tty/serial/8250/8250_port.c | 12 ------------ + 1 file changed, 12 deletions(-) + +diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c +index e65bb8a8cdc6..5be65a0e9961 100644 +--- a/drivers/tty/serial/8250/8250_port.c ++++ b/drivers/tty/serial/8250/8250_port.c +@@ -584,16 +584,12 @@ EXPORT_SYMBOL_GPL(serial8250_clear_and_reinit_fifos); + + void serial8250_rpm_get(struct uart_8250_port *p) + { +- if (!(p->capabilities & UART_CAP_RPM)) +- return; + pm_runtime_get_sync(p->port.dev); + } + EXPORT_SYMBOL_GPL(serial8250_rpm_get); + + void serial8250_rpm_put(struct uart_8250_port *p) + { +- if (!(p->capabilities & UART_CAP_RPM)) +- return; + pm_runtime_mark_last_busy(p->port.dev); + pm_runtime_put_autosuspend(p->port.dev); + } +@@ -677,9 +673,6 @@ void serial8250_rpm_get_tx(struct uart_8250_port *p) + { + unsigned char rpm_active; + +- if (!(p->capabilities & UART_CAP_RPM)) +- return; +- + rpm_active = xchg(&p->rpm_tx_active, 1); + if (rpm_active) + return; +@@ -696,9 +689,6 @@ void serial8250_rpm_put_tx(struct uart_8250_port *p) + { + unsigned char rpm_active; + +- if (!(p->capabilities & UART_CAP_RPM)) +- return; +- + rpm_active = xchg(&p->rpm_tx_active, 0); + if (!rpm_active) + return; +@@ -1758,8 +1748,6 @@ void serial8250_tx_chars(struct uart_8250_port *up) + * HW can go idle. So we get here once again with empty FIFO and disable + * the interrupt and RPM in __stop_tx() + */ +- if (uart_circ_empty(xmit) && !(up->capabilities & UART_CAP_RPM)) +- __stop_tx(up); + } + EXPORT_SYMBOL_GPL(serial8250_tx_chars); + +-- +2.17.1 + diff --git a/patches/0033-ASoC-Intel-Skylake-Update-skl_ids-table.audio b/patches/0033-ASoC-Intel-Skylake-Update-skl_ids-table.audio new file mode 100644 index 0000000000..6b2c122b8e --- /dev/null +++ b/patches/0033-ASoC-Intel-Skylake-Update-skl_ids-table.audio @@ -0,0 +1,90 @@ +From 397952d7a6a9753a1d3e0b2172f51aba48ae8ba7 Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Mon, 12 Aug 2019 17:36:42 +0200 +Subject: [PATCH 033/193] ASoC: Intel: Skylake: Update skl_ids table + +With platform descriptors defined, update PCI ID table together with +skl_probe to retrieve supported boards from saved sst_pdata objects +instead. + +Signed-off-by: Cezary Rojewski +--- + sound/soc/intel/skylake/skl.c | 20 +++++++++++--------- + 1 file changed, 11 insertions(+), 9 deletions(-) + +diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c +index d6d099aba834..53a6befd5d68 100644 +--- a/sound/soc/intel/skylake/skl.c ++++ b/sound/soc/intel/skylake/skl.c +@@ -985,6 +985,7 @@ static int skl_probe(struct pci_dev *pci, + { + struct skl_dev *skl; + struct hdac_bus *bus = NULL; ++ struct sst_pdata *desc; + int err; + + switch (skl_pci_binding) { +@@ -1064,7 +1065,8 @@ static int skl_probe(struct pci_dev *pci, + + pci_set_drvdata(skl->pci, bus); + +- err = skl_find_machine(skl, (void *)pci_id->driver_data); ++ desc = (struct sst_pdata *)pci_id->driver_data; ++ err = skl_find_machine(skl, desc->boards); + if (err < 0) { + dev_err(bus->dev, "skl_find_machine failed with err: %d\n", err); + goto out_nhlt_free; +@@ -1194,42 +1196,42 @@ static const struct pci_device_id skl_ids[] = { + #if IS_ENABLED(CONFIG_SND_SOC_INTEL_SKL) + /* Sunrise Point-LP */ + { PCI_DEVICE(0x8086, 0x9d70), +- .driver_data = (unsigned long)&snd_soc_acpi_intel_skl_machines}, ++ .driver_data = (unsigned long)&skl_desc }, + #endif + #if IS_ENABLED(CONFIG_SND_SOC_INTEL_APL) + /* BXT-P */ + { PCI_DEVICE(0x8086, 0x5a98), +- .driver_data = (unsigned long)&snd_soc_acpi_intel_bxt_machines}, ++ .driver_data = (unsigned long)&apl_desc }, + #endif + #if IS_ENABLED(CONFIG_SND_SOC_INTEL_KBL) + /* KBL */ + { PCI_DEVICE(0x8086, 0x9D71), +- .driver_data = (unsigned long)&snd_soc_acpi_intel_kbl_machines}, ++ .driver_data = (unsigned long)&kbl_desc }, + #endif + #if IS_ENABLED(CONFIG_SND_SOC_INTEL_GLK) + /* GLK */ + { PCI_DEVICE(0x8086, 0x3198), +- .driver_data = (unsigned long)&snd_soc_acpi_intel_glk_machines}, ++ .driver_data = (unsigned long)&glk_desc }, + #endif + #if IS_ENABLED(CONFIG_SND_SOC_INTEL_CNL) + /* CNL */ + { PCI_DEVICE(0x8086, 0x9dc8), +- .driver_data = (unsigned long)&snd_soc_acpi_intel_cnl_machines}, ++ .driver_data = (unsigned long)&cnl_desc }, + #endif + #if IS_ENABLED(CONFIG_SND_SOC_INTEL_CFL) + /* CFL */ + { PCI_DEVICE(0x8086, 0xa348), +- .driver_data = (unsigned long)&snd_soc_acpi_intel_cnl_machines}, ++ .driver_data = (unsigned long)&cnl_desc }, + #endif + #if IS_ENABLED(CONFIG_SND_SOC_INTEL_CML_LP) + /* CML-LP */ + { PCI_DEVICE(0x8086, 0x02c8), +- .driver_data = (unsigned long)&snd_soc_acpi_intel_cnl_machines}, ++ .driver_data = (unsigned long)&cnl_desc }, + #endif + #if IS_ENABLED(CONFIG_SND_SOC_INTEL_CML_H) + /* CML-H */ + { PCI_DEVICE(0x8086, 0x06c8), +- .driver_data = (unsigned long)&snd_soc_acpi_intel_cnl_machines}, ++ .driver_data = (unsigned long)&cnl_desc }, + #endif + { 0, } + }; +-- +2.17.1 + diff --git a/patches/0033-Fix-the-compile-error-when-update-4.12.trusty b/patches/0033-Fix-the-compile-error-when-update-4.12.trusty new file mode 100644 index 0000000000..418468fda1 --- /dev/null +++ b/patches/0033-Fix-the-compile-error-when-update-4.12.trusty @@ -0,0 +1,51 @@ +From 514c72aa038cb896f5116f6226d066af7987f24d Mon Sep 17 00:00:00 2001 +From: Zhou Furong +Date: Thu, 18 May 2017 16:31:32 +0800 +Subject: [PATCH 33/63] Fix the compile error when update 4.12 + +virtio API updated on 4.12, trusty need update accordingly. + +Change-Id: I6ef8a63a23d19cbce1471f9f3bc6e8a38002ad25 +Tracked-On: +--- + drivers/trusty/trusty-ipc.c | 2 +- + drivers/trusty/trusty-virtio.c | 3 ++- + 2 files changed, 3 insertions(+), 2 deletions(-) + +diff --git a/drivers/trusty/trusty-ipc.c b/drivers/trusty/trusty-ipc.c +index a05c5f957146..68f677f91c21 100644 +--- a/drivers/trusty/trusty-ipc.c ++++ b/drivers/trusty/trusty-ipc.c +@@ -1558,7 +1558,7 @@ static int tipc_virtio_probe(struct virtio_device *vdev) + vds->cdev_name[sizeof(vds->cdev_name)-1] = '\0'; + + /* find tx virtqueues (rx and tx and in this order) */ +- err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, vq_names, NULL); ++ err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, vq_names, NULL, NULL); + if (err) + goto err_find_vqs; + +diff --git a/drivers/trusty/trusty-virtio.c b/drivers/trusty/trusty-virtio.c +index 2ce818cef175..2368c10f1b7b 100644 +--- a/drivers/trusty/trusty-virtio.c ++++ b/drivers/trusty/trusty-virtio.c +@@ -328,7 +328,7 @@ static struct virtqueue *_find_vq(struct virtio_device *vdev, + id, tvr->vaddr, (u64)tvr->paddr, tvr->elem_num, tvr->notifyid); + + tvr->vq = vring_new_virtqueue(id, tvr->elem_num, tvr->align, +- vdev, true, tvr->vaddr, ++ vdev, true, true, tvr->vaddr, + trusty_virtio_notify, callback, name); + if (!tvr->vq) { + dev_err(&vdev->dev, "vring_new_virtqueue %s failed\n", +@@ -350,6 +350,7 @@ static int trusty_virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char * const names[], ++ const bool *ctx, + struct irq_affinity *desc) + { + uint i; +-- +2.17.1 + diff --git a/patches/0033-VBS-K-added-VHM-wrapper-APIs.acrn b/patches/0033-VBS-K-added-VHM-wrapper-APIs.acrn new file mode 100644 index 0000000000..b436cfb249 --- /dev/null +++ b/patches/0033-VBS-K-added-VHM-wrapper-APIs.acrn @@ -0,0 +1,576 @@ +From 8714f45860cfa3ecc49dad0696ad31d274dc84e4 Mon Sep 17 00:00:00 2001 +From: Hao Li +Date: Thu, 25 Jan 2018 10:12:26 -0500 +Subject: [PATCH 033/150] VBS-K: added VHM wrapper APIs + +This patch added 3 VHM wrapper APIs to the VBS-K framework: + - long virtio_dev_register(struct virtio_dev_info *dev); + - long virtio_dev_deregister(struct virtio_dev_info *dev); + - int virtio_vq_index_get(struct virtio_dev_info *dev, int req_cnt); + +VBS-K modules could use the APIs above to register kick callback +handlers to VHM. + +This patch also updated the reference driver with the new APIs +usage. + +Change-Id: I6a92a36eb785d55c1a4aa09bba46c67ed5dd2194 +Signed-off-by: Hao Li +--- + drivers/vbs/vbs.c | 121 ++++++++++++++++++++++++ + drivers/vbs/vbs_rng.c | 199 ++++++++++------------------------------ + include/linux/vbs/vbs.h | 16 +++- + 3 files changed, 181 insertions(+), 155 deletions(-) + +diff --git a/drivers/vbs/vbs.c b/drivers/vbs/vbs.c +index 1e7a9645a353..9d96f45b9644 100644 +--- a/drivers/vbs/vbs.c ++++ b/drivers/vbs/vbs.c +@@ -67,6 +67,127 @@ + #include + #include + ++long virtio_dev_register(struct virtio_dev_info *dev) ++{ ++ struct vm_info info; ++ int ret; ++ ++ pr_debug("vmid is %d\n", dev->_ctx.vmid); ++ ++ if (dev->dev_notify == NULL) { ++ pr_err("%s dev_notify empty!\n", dev->name); ++ goto err; ++ } ++ ++ /* ++ * dev->name is 32 chars while vhm only accepts 16 chars ++ * at most, so we make sure there will be a NULL ++ * terminator for the chars. ++ */ ++ dev->name[15] = '\0'; ++ dev->_ctx.vhm_client_id = ++ acrn_ioreq_create_client(dev->_ctx.vmid, ++ dev->dev_notify, ++ dev->name); ++ if (dev->_ctx.vhm_client_id < 0) { ++ pr_err("failed to create client of acrn ioreq!\n"); ++ goto err; ++ } ++ ++ ret = acrn_ioreq_add_iorange(dev->_ctx.vhm_client_id, ++ dev->io_range_type ? REQ_MMIO : REQ_PORTIO, ++ dev->io_range_start, ++ dev->io_range_start + dev->io_range_len - 1); ++ if (ret < 0) { ++ pr_err("failed to add iorange to acrn ioreq!\n"); ++ goto err; ++ } ++ ++ /* feed up max_cpu and req_buf */ ++ ret = vhm_get_vm_info(dev->_ctx.vmid, &info); ++ if (ret < 0) { ++ pr_err("failed in vhm_get_vm_info!\n"); ++ goto range_err; ++ } ++ dev->_ctx.max_vcpu = info.max_vcpu; ++ ++ dev->_ctx.req_buf = acrn_ioreq_get_reqbuf(dev->_ctx.vhm_client_id); ++ if (dev->_ctx.req_buf == NULL) { ++ pr_err("failed in acrn_ioreq_get_reqbuf!\n"); ++ goto range_err; ++ } ++ ++ acrn_ioreq_attach_client(dev->_ctx.vhm_client_id, 0); ++ ++ return 0; ++ ++range_err: ++ acrn_ioreq_del_iorange(dev->_ctx.vhm_client_id, ++ dev->io_range_type ? REQ_MMIO : REQ_PORTIO, ++ dev->io_range_start, ++ dev->io_range_start + dev->io_range_len); ++ ++err: ++ acrn_ioreq_destroy_client(dev->_ctx.vhm_client_id); ++ ++ return -EINVAL; ++} ++ ++long virtio_dev_deregister(struct virtio_dev_info *dev) ++{ ++ acrn_ioreq_del_iorange(dev->_ctx.vhm_client_id, ++ dev->io_range_type ? REQ_MMIO : REQ_PORTIO, ++ dev->io_range_start, ++ dev->io_range_start + dev->io_range_len); ++ ++ acrn_ioreq_destroy_client(dev->_ctx.vhm_client_id); ++ ++ return 0; ++} ++ ++int virtio_vq_index_get(struct virtio_dev_info *dev, int req_cnt) ++{ ++ int val = -1; ++ struct vhm_request *req; ++ int i; ++ ++ if (unlikely(req_cnt <= 0)) ++ return -EINVAL; ++ ++ if (dev == NULL) { ++ pr_err("%s: dev is NULL!\n", __func__); ++ return -EINVAL; ++ } ++ ++ for (i = 0; i < dev->_ctx.max_vcpu; i++) { ++ req = &dev->_ctx.req_buf[i]; ++ if (req->valid && req->processed == REQ_STATE_PROCESSING && ++ req->client == dev->_ctx.vhm_client_id) { ++ if (req->reqs.pio_request.direction == REQUEST_READ) { ++ /* currently we handle kick only, ++ * so read will return 0 ++ */ ++ pr_debug("%s: read request!\n", __func__); ++ if (dev->io_range_type == PIO_RANGE) ++ req->reqs.pio_request.value = 0; ++ else ++ req->reqs.mmio_request.value = 0; ++ } else { ++ pr_debug("%s: write request! type %d\n", ++ __func__, req->type); ++ if (dev->io_range_type == PIO_RANGE) ++ val = req->reqs.pio_request.value; ++ else ++ val = req->reqs.mmio_request.value; ++ } ++ req->processed = REQ_STATE_SUCCESS; ++ acrn_ioreq_complete_request(dev->_ctx.vhm_client_id, i); ++ } ++ } ++ ++ return val; ++} ++ + static long virtio_vqs_info_set(struct virtio_dev_info *dev, + struct vbs_vqs_info __user *i) + { +diff --git a/drivers/vbs/vbs_rng.c b/drivers/vbs/vbs_rng.c +index f2234e73034d..87965bafbbb3 100644 +--- a/drivers/vbs/vbs_rng.c ++++ b/drivers/vbs/vbs_rng.c +@@ -74,8 +74,6 @@ + + #include + #include +-#include +-#include + #include + + enum { +@@ -96,26 +94,14 @@ enum { + struct vbs_rng { + struct virtio_dev_info dev; + struct virtio_vq_info vqs[VBS_K_RNG_VQ_MAX]; +- int vhm_client_id; + /* Below could be device specific members */ + struct hwrng hwrng; +-}; +- +-/* +- * Each VBS-K module might serve multiple connections from multiple +- * guests/device models/VBS-Us, so better to maintain the connections +- * in a list, and here we use hashtalble as an example. +- */ +-struct vbs_rng_client { +- struct vbs_rng *rng; +- int vhm_client_id; +- int max_vcpu; +- struct vhm_request *req_buf; +-}; +- +-/* instances malloced/freed by hashtable routines */ +-struct vbs_rng_hash_entry { +- struct vbs_rng_client *info; ++ /* ++ * Each VBS-K module might serve multiple connections ++ * from multiple guests/device models/VBS-Us, so better ++ * to maintain the connections in a list, and here we ++ * use hashtable as an example. ++ */ + struct hlist_node node; + }; + +@@ -149,30 +135,20 @@ static void vbs_rng_hash_init(void) + vbs_rng_hash_initialized = 1; + } + +-static int vbs_rng_hash_add(struct vbs_rng_client *client) ++static int vbs_rng_hash_add(struct vbs_rng *entry) + { +- struct vbs_rng_hash_entry *entry; +- + if (!vbs_rng_hash_initialized) { + pr_err("RNG hash table not initialized!\n"); + return -1; + } + +- entry = kmalloc(sizeof(*entry), GFP_KERNEL); +- if (!entry) { +- pr_err("Failed to alloc memory for rng hash entry!\n"); +- return -1; +- } +- +- entry->info = client; +- +- hash_add(HASH_NAME, &entry->node, entry->info->vhm_client_id); ++ hash_add(HASH_NAME, &entry->node, virtio_dev_client_id(&entry->dev)); + return 0; + } + +-static struct vbs_rng_client *vbs_rng_hash_find(int client_id) ++static struct vbs_rng *vbs_rng_hash_find(int client_id) + { +- struct vbs_rng_hash_entry *entry; ++ struct vbs_rng *entry; + int bkt; + + if (!vbs_rng_hash_initialized) { +@@ -181,8 +157,8 @@ static struct vbs_rng_client *vbs_rng_hash_find(int client_id) + } + + hash_for_each(HASH_NAME, bkt, entry, node) +- if (entry->info->vhm_client_id == client_id) +- return entry->info; ++ if (virtio_dev_client_id(&entry->dev) == client_id) ++ return entry; + + pr_err("Not found item matching client_id!\n"); + return NULL; +@@ -190,7 +166,7 @@ static struct vbs_rng_client *vbs_rng_hash_find(int client_id) + + static int vbs_rng_hash_del(int client_id) + { +- struct vbs_rng_hash_entry *entry; ++ struct vbs_rng *entry; + int bkt; + + if (!vbs_rng_hash_initialized) { +@@ -199,9 +175,8 @@ static int vbs_rng_hash_del(int client_id) + } + + hash_for_each(HASH_NAME, bkt, entry, node) +- if (entry->info->vhm_client_id == client_id) { ++ if (virtio_dev_client_id(&entry->dev) == client_id) { + hash_del(&entry->node); +- kfree(entry); + return 0; + } + +@@ -212,7 +187,7 @@ static int vbs_rng_hash_del(int client_id) + + static int vbs_rng_hash_del_all(void) + { +- struct vbs_rng_hash_entry *entry; ++ struct vbs_rng *entry; + int bkt; + + if (!vbs_rng_hash_initialized) { +@@ -221,75 +196,11 @@ static int vbs_rng_hash_del_all(void) + } + + hash_for_each(HASH_NAME, bkt, entry, node) +- if (1) { +- hash_del(&entry->node); +- kfree(entry); +- } ++ hash_del(&entry->node); + + return 0; + } + +-static int register_vhm_client(struct virtio_dev_info *dev) +-{ +- unsigned int vmid; +- struct vm_info info; +- struct vbs_rng_client *client; +- int ret; +- +- client = kcalloc(1, sizeof(*client), GFP_KERNEL); +- if (!client) { +- pr_err("failed to malloc vbs_rng_client!\n"); +- return -EINVAL; +- } +- +- client->rng = container_of(dev, struct vbs_rng, dev); +- vmid = dev->_ctx.vmid; +- pr_debug("vmid is %d\n", vmid); +- +- client->vhm_client_id = acrn_ioreq_create_client(vmid, handle_kick, +- "vbs_rng kick init\n"); +- if (client->vhm_client_id < 0) { +- pr_err("failed to create client of acrn ioreq!\n"); +- goto err; +- } +- +- ret = acrn_ioreq_add_iorange(client->vhm_client_id, +- dev->io_range_type ? REQ_MMIO : REQ_PORTIO, +- dev->io_range_start, +- dev->io_range_start + dev->io_range_len); +- if (ret < 0) { +- pr_err("failed to add iorange to acrn ioreq!\n"); +- goto err; +- } +- +- /* feed up max_cpu and req_buf */ +- ret = vhm_get_vm_info(vmid, &info); +- if (ret < 0) { +- pr_err("failed in vhm_get_vm_info!\n"); +- goto err; +- } +- client->max_vcpu = info.max_vcpu; +- +- client->req_buf = acrn_ioreq_get_reqbuf(client->vhm_client_id); +- if (client->req_buf == NULL) { +- pr_err("failed in acrn_ioreq_get_reqbuf!\n"); +- goto err; +- } +- +- /* just attach once as vhm will kick kthread */ +- acrn_ioreq_attach_client(client->vhm_client_id, 0); +- +- client->rng->vhm_client_id = client->vhm_client_id; +- vbs_rng_hash_add(client); +- +- return 0; +-err: +- acrn_ioreq_destroy_client(client->vhm_client_id); +- kfree(client); +- +- return -EINVAL; +-} +- + static void handle_vq_kick(struct vbs_rng *rng, int vq_idx) + { + struct iovec iov; +@@ -309,8 +220,6 @@ static void handle_vq_kick(struct vbs_rng *rng, int vq_idx) + + vq = &(sc->vqs[vq_idx]); + +- pr_debug("before vq_has_desc!\n"); +- + while (virtio_vq_has_descs(vq)) { + virtio_vq_getchain(vq, &idx, &iov, 1, NULL); + +@@ -334,47 +243,25 @@ static void handle_vq_kick(struct vbs_rng *rng, int vq_idx) + static int handle_kick(int client_id, int req_cnt) + { + int val = -1; +- struct vhm_request *req; +- struct vbs_rng_client *client; +- int i; ++ struct vbs_rng *rng; + + if (unlikely(req_cnt <= 0)) + return -EINVAL; + +- pr_debug("%s!\n", __func__); ++ pr_debug("%s: handle kick!\n", __func__); + +- client = vbs_rng_hash_find(client_id); +- if (!client) { +- pr_err("Ooops! client %d not found!\n", client_id); ++ rng = vbs_rng_hash_find(client_id); ++ if (rng == NULL) { ++ pr_err("%s: client %d not found!\n", ++ __func__, client_id); + return -EINVAL; + } + +- for (i = 0; i < client->max_vcpu; i++) { +- req = &client->req_buf[i]; +- if (req->valid && req->processed == REQ_STATE_PROCESSING && +- req->client == client->vhm_client_id) { +- if (req->reqs.pio_request.direction == REQUEST_READ) +- /* currently we handle kick only, +- * so read will return 0 +- */ +- req->reqs.pio_request.value = 0; +- else +- val = req->reqs.pio_request.value; +- pr_debug("%s: ioreq type %d, direction %d, " +- "addr 0x%lx, size 0x%lx, value 0x%x\n", +- __func__, +- req->type, +- req->reqs.pio_request.direction, +- req->reqs.pio_request.address, +- req->reqs.pio_request.size, +- req->reqs.pio_request.value); +- req->processed = REQ_STATE_SUCCESS; +- acrn_ioreq_complete_request(client->vhm_client_id, i); +- } +- } ++ val = virtio_vq_index_get(&rng->dev, req_cnt); + + if (val >= 0) +- handle_vq_kick(client->rng, val); ++ handle_vq_kick(rng, val); ++ + return 0; + } + +@@ -385,15 +272,15 @@ static int vbs_rng_open(struct inode *inode, struct file *f) + struct virtio_vq_info *vqs; + int i; + +- pr_debug("%s!\n", __func__); +- + rng = kmalloc(sizeof(*rng), GFP_KERNEL); +- if (!rng) { ++ if (rng == NULL) { + pr_err("Failed to allocate memory for vbs_rng!\n"); + return -ENOMEM; + } + + dev = &rng->dev; ++ strncpy(dev->name, "vbs_rng", VBS_NAME_LEN); ++ dev->dev_notify = handle_kick; + vqs = (struct virtio_vq_info *)&rng->vqs; + + for (i = 0; i < VBS_K_RNG_VQ_MAX; i++) { +@@ -411,6 +298,8 @@ static int vbs_rng_open(struct inode *inode, struct file *f) + virtio_dev_init(dev, vqs, VBS_K_RNG_VQ_MAX); + + f->private_data = rng; ++ ++ /* init a hash table to maintain multi-connections */ + vbs_rng_hash_init(); + + return 0; +@@ -419,14 +308,10 @@ static int vbs_rng_open(struct inode *inode, struct file *f) + static int vbs_rng_release(struct inode *inode, struct file *f) + { + struct vbs_rng *rng = f->private_data; +- struct vbs_rng_client *client; + int i; + +- pr_debug("%s!\n", __func__); +- +- client = vbs_rng_hash_find(rng->vhm_client_id); +- if (!client) +- pr_err("%s: UNLIKELY not found client!\n", ++ if (!rng) ++ pr_err("%s: UNLIKELY rng NULL!\n", + __func__); + + vbs_rng_stop(rng); +@@ -437,16 +322,16 @@ static int vbs_rng_release(struct inode *inode, struct file *f) + /* device specific release */ + vbs_rng_reset(rng); + +- pr_debug("vbs_rng_connection cnt is %d\n", vbs_rng_connection_cnt); ++ pr_debug("vbs_rng_connection cnt is %d\n", ++ vbs_rng_connection_cnt); + +- if (client && vbs_rng_connection_cnt--) +- vbs_rng_hash_del(client->vhm_client_id); ++ if (rng && vbs_rng_connection_cnt--) ++ vbs_rng_hash_del(virtio_dev_client_id(&rng->dev)); + if (!vbs_rng_connection_cnt) { + pr_debug("vbs_rng remove all hash entries\n"); + vbs_rng_hash_del_all(); + } + +- kfree(client); + kfree(rng); + + pr_debug("%s done\n", __func__); +@@ -488,7 +373,8 @@ static long vbs_rng_ioctl(struct file *f, unsigned int ioctl, + * return vhost_net_set_features(n, features); + */ + case VBS_SET_VQ: +- /* we handle this here because we want to register VHM client ++ /* ++ * we handle this here because we want to register VHM client + * after handling VBS_K_SET_VQ request + */ + pr_debug("VBS_K_SET_VQ ioctl:\n"); +@@ -498,10 +384,16 @@ static long vbs_rng_ioctl(struct file *f, unsigned int ioctl, + return -EFAULT; + } + /* Register VHM client */ +- if (register_vhm_client(&rng->dev) < 0) { ++ if (virtio_dev_register(&rng->dev) < 0) { + pr_err("failed to register VHM client!\n"); + return -EFAULT; + } ++ /* Added to local hash table */ ++ if (vbs_rng_hash_add(rng) < 0) { ++ pr_err("failed to add to hashtable!\n"); ++ return -EFAULT; ++ } ++ /* Increment counter */ + vbs_rng_connection_cnt++; + return r; + default: +@@ -544,6 +436,7 @@ static void vbs_rng_stop_vq(struct vbs_rng *rng, + /* device specific function */ + static void vbs_rng_stop(struct vbs_rng *rng) + { ++ virtio_dev_deregister(&rng->dev); + } + + /* device specific function */ +diff --git a/include/linux/vbs/vbs.h b/include/linux/vbs/vbs.h +index 715c49156a1a..b2e185e115c8 100644 +--- a/include/linux/vbs/vbs.h ++++ b/include/linux/vbs/vbs.h +@@ -63,6 +63,8 @@ + #define _VBS_H_ + + #include ++#include ++#include + + /* + * VBS-K device needs to handle frontend driver's kick in kernel. +@@ -78,6 +80,9 @@ enum IORangeType { + struct ctx { + /* VHM required info */ + int vmid; ++ int vhm_client_id; ++ int max_vcpu; ++ struct vhm_request *req_buf; + }; + + struct virtio_desc { /* AKA vring_desc */ +@@ -138,12 +143,16 @@ struct virtio_dev_info { + enum IORangeType io_range_type; /* IO range type, PIO or MMIO */ + + /* members created in kernel space VBS */ +- void (*dev_notify)(void *, struct virtio_vq_info *); +- /* device-wide notification */ ++ int (*dev_notify)(int, int); /* device-wide notification */ + struct virtio_vq_info *vqs; /* virtqueue(s) */ + int curq; /* current virtqueue index */ + }; + ++static inline int virtio_dev_client_id(struct virtio_dev_info *dev) ++{ ++ return dev->_ctx.vhm_client_id; ++} ++ + /* VBS Runtime Control APIs */ + long virtio_dev_init(struct virtio_dev_info *dev, struct virtio_vq_info *vqs, + int nvq); +@@ -151,5 +160,8 @@ long virtio_dev_ioctl(struct virtio_dev_info *dev, unsigned int ioctl, + void __user *argp); + long virtio_vqs_ioctl(struct virtio_dev_info *dev, unsigned int ioctl, + void __user *argp); ++long virtio_dev_register(struct virtio_dev_info *dev); ++long virtio_dev_deregister(struct virtio_dev_info *dev); ++int virtio_vq_index_get(struct virtio_dev_info *dev, int req_cnt); + + #endif +-- +2.17.1 + diff --git a/patches/0033-drm-panel-Add-missing-drm_panel_init-in-panel-drivers.drm b/patches/0033-drm-panel-Add-missing-drm_panel_init-in-panel-drivers.drm new file mode 100644 index 0000000000..f1a71bd647 --- /dev/null +++ b/patches/0033-drm-panel-Add-missing-drm_panel_init-in-panel-drivers.drm @@ -0,0 +1,45 @@ +From d2cf7f523d04fe3e71f001c4b0103a09a5061d0c Mon Sep 17 00:00:00 2001 +From: Laurent Pinchart +Date: Fri, 23 Aug 2019 22:32:42 +0300 +Subject: [PATCH 033/690] drm/panel: Add missing drm_panel_init() in panel + drivers + +Panels must be initialised with drm_panel_init(). Add the missing +function call in the panel-raspberrypi-touchscreen.c and +panel-sitronix-st7789v.c drivers. + +Signed-off-by: Laurent Pinchart +Signed-off-by: Sam Ravnborg +Link: https://patchwork.freedesktop.org/patch/msgid/20190823193245.23876-2-laurent.pinchart@ideasonboard.com +--- + drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c | 1 + + drivers/gpu/drm/panel/panel-sitronix-st7789v.c | 1 + + 2 files changed, 2 insertions(+) + +diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c +index b5b14aa059ea..2aa89eaecf6f 100644 +--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c ++++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c +@@ -426,6 +426,7 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c, + return PTR_ERR(ts->dsi); + } + ++ drm_panel_init(&ts->base); + ts->base.dev = dev; + ts->base.funcs = &rpi_touchscreen_funcs; + +diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c +index 5e3e92ea9ea6..3b2612ae931e 100644 +--- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c ++++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c +@@ -381,6 +381,7 @@ static int st7789v_probe(struct spi_device *spi) + spi_set_drvdata(spi, ctx); + ctx->spi = spi; + ++ drm_panel_init(&ctx->panel); + ctx->panel.dev = &spi->dev; + ctx->panel.funcs = &st7789v_drm_funcs; + +-- +2.17.1 + diff --git a/patches/0033-mei-fix-modalias-documentation.security b/patches/0033-mei-fix-modalias-documentation.security new file mode 100644 index 0000000000..7cb8a36cd3 --- /dev/null +++ b/patches/0033-mei-fix-modalias-documentation.security @@ -0,0 +1,34 @@ +From 3b38937e59a27a830fec0b0768ecef4102f90c29 Mon Sep 17 00:00:00 2001 +From: Alexander Usyskin +Date: Tue, 8 Oct 2019 03:57:34 +0300 +Subject: [PATCH 33/65] mei: fix modalias documentation + +mei client bus added the client protocol version to the device alias, +but ABI documentation was not updated. + +Fixes: b26864cad1c9 (mei: bus: add client protocol version to the device alias) +Signed-off-by: Alexander Usyskin +Signed-off-by: Tomas Winkler +Link: https://lore.kernel.org/r/20191008005735.12707-1-tomas.winkler@intel.com +Signed-off-by: Greg Kroah-Hartman +(cherry picked from commit 73668309215285366c433489de70d31362987be9) +--- + Documentation/ABI/testing/sysfs-bus-mei | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/Documentation/ABI/testing/sysfs-bus-mei b/Documentation/ABI/testing/sysfs-bus-mei +index 6bd45346ac7e..3f8701e8fa24 100644 +--- a/Documentation/ABI/testing/sysfs-bus-mei ++++ b/Documentation/ABI/testing/sysfs-bus-mei +@@ -4,7 +4,7 @@ KernelVersion: 3.10 + Contact: Samuel Ortiz + linux-mei@linux.intel.com + Description: Stores the same MODALIAS value emitted by uevent +- Format: mei::: ++ Format: mei::: + + What: /sys/bus/mei/devices/.../name + Date: May 2015 +-- +2.17.1 + diff --git a/patches/0033-net-stmmac-Adding-tx_lpi_enable-configuration.connectivity b/patches/0033-net-stmmac-Adding-tx_lpi_enable-configuration.connectivity new file mode 100644 index 0000000000..a947394402 --- /dev/null +++ b/patches/0033-net-stmmac-Adding-tx_lpi_enable-configuration.connectivity @@ -0,0 +1,51 @@ +From 2b6f54a03a8ad203c2b9326ed46d65a92f4cf823 Mon Sep 17 00:00:00 2001 +From: Rusaimi Amira Ruslan +Date: Fri, 13 Sep 2019 18:25:50 +0800 +Subject: [PATCH 033/108] net: stmmac: Adding tx_lpi_enable configuration + +Adding tx_lpi_enable configuration to follow ethtool standard +that have tx_lpi_enable option for user to choose. + +Signed-off-by: Rusaimi Amira Ruslan +Signed-off-by: Kharbanda,Kajol +Signed-off-by: Voon Weifeng +--- + drivers/net/ethernet/stmicro/stmmac/stmmac.h | 1 + + drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c | 2 ++ + 2 files changed, 3 insertions(+) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h +index 7b089089d204..703d87d0c178 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h +@@ -202,6 +202,7 @@ struct stmmac_priv { + int eee_enabled; + int eee_active; + int tx_lpi_timer; ++ int tx_lpi_enabled; + unsigned int mode; + unsigned int chain_mode; + int extend_desc; +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +index 4d9e85a3b30f..1a8faf158a93 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +@@ -652,6 +652,7 @@ static int stmmac_ethtool_op_get_eee(struct net_device *dev, + edata->eee_enabled = priv->eee_enabled; + edata->eee_active = priv->eee_active; + edata->tx_lpi_timer = priv->tx_lpi_timer; ++ edata->tx_lpi_enabled = priv->tx_lpi_enabled; + + return phylink_ethtool_get_eee(priv->phylink, edata); + } +@@ -676,6 +677,7 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev, + return ret; + + priv->tx_lpi_timer = edata->tx_lpi_timer; ++ priv->tx_lpi_enabled = edata->tx_lpi_enabled; + return 0; + } + +-- +2.17.1 + diff --git a/patches/0033-serial-8250_dw-enable-runtime-PM.lpss b/patches/0033-serial-8250_dw-enable-runtime-PM.lpss new file mode 100644 index 0000000000..76614357c7 --- /dev/null +++ b/patches/0033-serial-8250_dw-enable-runtime-PM.lpss @@ -0,0 +1,58 @@ +From 6cf3c2c58c754962c732c4f6f8ae1e098feee646 Mon Sep 17 00:00:00 2001 +From: Andy Shevchenko +Date: Mon, 12 Sep 2016 14:40:45 +0300 +Subject: [PATCH 33/40] serial: 8250_dw: enable runtime PM + +Enable runtime PM for all ports unconditionally. Set autosuspend time to +infinity (-1) to avoid port being shutdown when using, for example, as a +serial console since there is no means to resume it back. + +Signed-off-by: Andy Shevchenko +--- + drivers/tty/serial/8250/8250_dw.c | 16 +++------------- + 1 file changed, 3 insertions(+), 13 deletions(-) + +diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c +index 1c72fdc2dd37..ea835dd6b506 100644 +--- a/drivers/tty/serial/8250/8250_dw.c ++++ b/drivers/tty/serial/8250/8250_dw.c +@@ -260,18 +260,6 @@ static int dw8250_handle_irq(struct uart_port *p) + return 0; + } + +-static void +-dw8250_do_pm(struct uart_port *port, unsigned int state, unsigned int old) +-{ +- if (!state) +- pm_runtime_get_sync(port->dev); +- +- serial8250_do_pm(port, state, old); +- +- if (state) +- pm_runtime_put_sync_suspend(port->dev); +-} +- + static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios, + struct ktermios *old) + { +@@ -408,7 +396,6 @@ static int dw8250_probe(struct platform_device *pdev) + p->mapbase = regs->start; + p->irq = irq; + p->handle_irq = dw8250_handle_irq; +- p->pm = dw8250_do_pm; + p->type = PORT_8250; + p->flags = UPF_SHARE_IRQ | UPF_FIXED_PORT; + p->dev = dev; +@@ -537,6 +524,9 @@ static int dw8250_probe(struct platform_device *pdev) + + platform_set_drvdata(pdev, data); + ++ pm_runtime_use_autosuspend(dev); ++ pm_runtime_set_autosuspend_delay(dev, -1); ++ + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + +-- +2.17.1 + diff --git a/patches/0034-ASoC-Intel-Skylake-Flip-SST-initialization-order.audio b/patches/0034-ASoC-Intel-Skylake-Flip-SST-initialization-order.audio new file mode 100644 index 0000000000..62ad4d6068 --- /dev/null +++ b/patches/0034-ASoC-Intel-Skylake-Flip-SST-initialization-order.audio @@ -0,0 +1,207 @@ +From a7c45d3d5bbe5ac6319b638db39cec7683127c7e Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Mon, 12 Aug 2019 20:54:01 +0200 +Subject: [PATCH 034/193] ASoC: Intel: Skylake: Flip SST initialization order + +To this date Skylake SST were following ill flow of initialization by +bypassing sst_dsp_new -> sst_ops::init order. Fix that by flipping +invocation order of handlers engaged in Skylake initialization. + +Signed-off-by: Cezary Rojewski +--- + sound/soc/intel/skylake/bxt-sst.c | 15 ++++----------- + sound/soc/intel/skylake/cnl-sst-dsp.h | 2 +- + sound/soc/intel/skylake/cnl-sst.c | 15 ++++----------- + sound/soc/intel/skylake/skl-messages.c | 4 ++-- + sound/soc/intel/skylake/skl-sst-dsp.h | 4 ++-- + sound/soc/intel/skylake/skl-sst.c | 15 ++++----------- + sound/soc/intel/skylake/skl.c | 2 +- + sound/soc/intel/skylake/skl.h | 4 ++-- + 8 files changed, 20 insertions(+), 41 deletions(-) + +diff --git a/sound/soc/intel/skylake/bxt-sst.c b/sound/soc/intel/skylake/bxt-sst.c +index 22223bc01899..08b834a4f7ac 100644 +--- a/sound/soc/intel/skylake/bxt-sst.c ++++ b/sound/soc/intel/skylake/bxt-sst.c +@@ -538,24 +538,17 @@ struct sst_ops apl_sst_ops = { + .read = sst_shim32_read, + .ram_read = sst_memcpy_fromio_32, + .ram_write = sst_memcpy_toio_32, ++ .init = bxt_sst_dsp_init, + .free = skl_dsp_free, + }; + +-static struct sst_pdata skl_dev = { +- .ops = &apl_sst_ops, +-}; +- +-int bxt_sst_dsp_init(struct skl_dev *skl, const char *fw_name) ++int bxt_sst_dsp_init(struct sst_dsp *sst, struct sst_pdata *pdata) + { +- struct sst_dsp *sst; ++ struct skl_dev *skl = sst->thread_context; + void __iomem *mmio; + int ret; + +- ret = skl_sst_ctx_init(skl, fw_name, &skl_dev); +- if (ret) +- return ret; +- +- sst = skl->dsp; ++ skl->dsp = sst; + sst->fw_ops = bxt_fw_ops; + mmio = pci_ioremap_bar(skl->pci, 4); + if (!mmio) +diff --git a/sound/soc/intel/skylake/cnl-sst-dsp.h b/sound/soc/intel/skylake/cnl-sst-dsp.h +index 02e070fae2ce..7810ae11954a 100644 +--- a/sound/soc/intel/skylake/cnl-sst-dsp.h ++++ b/sound/soc/intel/skylake/cnl-sst-dsp.h +@@ -87,6 +87,6 @@ void cnl_ipc_op_int_enable(struct sst_dsp *ctx); + void cnl_ipc_op_int_disable(struct sst_dsp *ctx); + bool cnl_ipc_int_status(struct sst_dsp *ctx); + +-int cnl_sst_dsp_init(struct skl_dev *skl, const char *fw_name); ++int cnl_sst_dsp_init(struct sst_dsp *sst, struct sst_pdata *pdata); + + #endif /*__CNL_SST_DSP_H__*/ +diff --git a/sound/soc/intel/skylake/cnl-sst.c b/sound/soc/intel/skylake/cnl-sst.c +index c977a6f08a8f..164fb5f92797 100644 +--- a/sound/soc/intel/skylake/cnl-sst.c ++++ b/sound/soc/intel/skylake/cnl-sst.c +@@ -414,24 +414,17 @@ struct sst_ops cnl_sst_ops = { + .read = sst_shim32_read, + .ram_read = sst_memcpy_fromio_32, + .ram_write = sst_memcpy_toio_32, ++ .init = cnl_sst_dsp_init, + .free = cnl_dsp_free, + }; + +-static struct sst_pdata cnl_dev = { +- .ops = &cnl_sst_ops, +-}; +- +-int cnl_sst_dsp_init(struct skl_dev *cnl, const char *fw_name) ++int cnl_sst_dsp_init(struct sst_dsp *sst, struct sst_pdata *pdata) + { +- struct sst_dsp *sst; ++ struct skl_dev *cnl = sst->thread_context; + void __iomem *mmio; + int ret; + +- ret = skl_sst_ctx_init(cnl, fw_name, &cnl_dev); +- if (ret < 0) +- return ret; +- +- sst = cnl->dsp; ++ cnl->dsp = sst; + sst->fw_ops = cnl_fw_ops; + mmio = pci_ioremap_bar(cnl->pci, 4); + if (!mmio) +diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c +index 5f0fb124c0fb..27ab344344fa 100644 +--- a/sound/soc/intel/skylake/skl-messages.c ++++ b/sound/soc/intel/skylake/skl-messages.c +@@ -187,7 +187,7 @@ const struct skl_dsp_ops *skl_get_dsp_ops(int pci_id) + return NULL; + } + +-int skl_init_dsp(struct skl_dev *skl) ++int skl_init_dsp(struct skl_dev *skl, struct sst_pdata *pdata) + { + struct hdac_bus *bus = skl_to_bus(skl); + const struct skl_dsp_ops *ops; +@@ -201,7 +201,7 @@ int skl_init_dsp(struct skl_dev *skl) + if (!ops) + return -EIO; + +- ret = ops->init(skl, skl->fw_name); ++ ret = skl_sst_ctx_init(skl, skl->fw_name, pdata); + if (ret < 0) + return ret; + +diff --git a/sound/soc/intel/skylake/skl-sst-dsp.h b/sound/soc/intel/skylake/skl-sst-dsp.h +index cfb31b736274..c4833d468860 100644 +--- a/sound/soc/intel/skylake/skl-sst-dsp.h ++++ b/sound/soc/intel/skylake/skl-sst-dsp.h +@@ -219,8 +219,8 @@ int skl_dsp_get_core(struct sst_dsp *ctx, unsigned int core_id); + int skl_dsp_put_core(struct sst_dsp *ctx, unsigned int core_id); + + int skl_dsp_boot(struct sst_dsp *ctx); +-int skl_sst_dsp_init(struct skl_dev *skl, const char *fw_name); +-int bxt_sst_dsp_init(struct skl_dev *skl, const char *fw_name); ++int skl_sst_dsp_init(struct sst_dsp *sst, struct sst_pdata *pdata); ++int bxt_sst_dsp_init(struct sst_dsp *sst, struct sst_pdata *pdata); + int bxt_load_library(struct sst_dsp *ctx, struct skl_lib_info *linfo, + int lib_count); + +diff --git a/sound/soc/intel/skylake/skl-sst.c b/sound/soc/intel/skylake/skl-sst.c +index 490df6dfb7b8..f7b06e1d3d5a 100644 +--- a/sound/soc/intel/skylake/skl-sst.c ++++ b/sound/soc/intel/skylake/skl-sst.c +@@ -509,24 +509,17 @@ struct sst_ops skl_sst_ops = { + .read = sst_shim32_read, + .ram_read = sst_memcpy_fromio_32, + .ram_write = sst_memcpy_toio_32, ++ .init = skl_sst_dsp_init, + .free = skl_dsp_free, + }; + +-static struct sst_pdata skl_dev = { +- .ops = &skl_sst_ops, +-}; +- +-int skl_sst_dsp_init(struct skl_dev *skl, const char *fw_name) ++int skl_sst_dsp_init(struct sst_dsp *sst, struct sst_pdata *pdata) + { +- struct sst_dsp *sst; ++ struct skl_dev *skl = sst->thread_context; + void __iomem *mmio; + int ret; + +- ret = skl_sst_ctx_init(skl, fw_name, &skl_dev); +- if (ret < 0) +- return ret; +- +- sst = skl->dsp; ++ skl->dsp = sst; + sst->fw_ops = skl_fw_ops; + mmio = pci_ioremap_bar(skl->pci, 4); + if (!mmio) +diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c +index 53a6befd5d68..39442c80a179 100644 +--- a/sound/soc/intel/skylake/skl.c ++++ b/sound/soc/intel/skylake/skl.c +@@ -1072,7 +1072,7 @@ static int skl_probe(struct pci_dev *pci, + goto out_nhlt_free; + } + +- err = skl_init_dsp(skl); ++ err = skl_init_dsp(skl, desc); + if (err < 0) { + dev_dbg(bus->dev, "error failed to register dsp\n"); + goto out_nhlt_free; +diff --git a/sound/soc/intel/skylake/skl.h b/sound/soc/intel/skylake/skl.h +index 88f15859a8ad..fe9e3f2fad76 100644 +--- a/sound/soc/intel/skylake/skl.h ++++ b/sound/soc/intel/skylake/skl.h +@@ -158,7 +158,7 @@ struct skl_machine_pdata { + + struct skl_dsp_ops { + int id; +- int (*init)(struct skl_dev *skl, const char *fw_name); ++ int (*init)(struct sst_dsp *dsp, struct sst_pdata *pdata); + }; + + int skl_platform_unregister(struct device *dev); +@@ -169,7 +169,7 @@ struct nhlt_specific_cfg *skl_get_ep_blob(struct skl_dev *skl, u32 instance, + u32 s_rate, u8 dirn, u8 dev_type); + + int skl_nhlt_update_topology_bin(struct skl_dev *skl); +-int skl_init_dsp(struct skl_dev *skl); ++int skl_init_dsp(struct skl_dev *skl, struct sst_pdata *pdata); + int skl_free_dsp(struct skl_dev *skl); + int skl_sst_init_fw(struct skl_dev *skl); + void skl_sst_dsp_cleanup(struct skl_dev *skl); +-- +2.17.1 + diff --git a/patches/0034-api-doc-add-ACRN-VBS-API-docs.acrn b/patches/0034-api-doc-add-ACRN-VBS-API-docs.acrn new file mode 100644 index 0000000000..c059923242 --- /dev/null +++ b/patches/0034-api-doc-add-ACRN-VBS-API-docs.acrn @@ -0,0 +1,514 @@ +From f2f645176469c882463eb8ee9bc131728104bff2 Mon Sep 17 00:00:00 2001 +From: Hao Li +Date: Fri, 31 Aug 2018 10:58:58 +0800 +Subject: [PATCH 034/150] api doc: add ACRN VBS API docs + +V2: Fixing doc build caused by linux/vbs/vbs.h + +Change-Id: I634c0117392ca529d7bd4b89a02fec43a0f70d63 +Tracked-On: 220254 +Signed-off-by: Hao Li +--- + Documentation/virtual/acrn/00-INDEX | 8 ++ + Documentation/virtual/acrn/conf.py | 5 + + Documentation/virtual/acrn/index.rst | 17 +++ + Documentation/virtual/acrn/vbs.rst | 20 +++ + Documentation/virtual/acrn/vhm.rst | 5 + + drivers/vbs/vbs_rng.c | 9 ++ + include/linux/vbs/vbs.h | 177 ++++++++++++++++++++++----- + include/linux/vbs/vq.h | 106 +++++++++++++++- + 8 files changed, 308 insertions(+), 39 deletions(-) + create mode 100644 Documentation/virtual/acrn/00-INDEX + create mode 100644 Documentation/virtual/acrn/conf.py + create mode 100644 Documentation/virtual/acrn/index.rst + create mode 100644 Documentation/virtual/acrn/vbs.rst + create mode 100644 Documentation/virtual/acrn/vhm.rst + +diff --git a/Documentation/virtual/acrn/00-INDEX b/Documentation/virtual/acrn/00-INDEX +new file mode 100644 +index 000000000000..5beb50eef9e1 +--- /dev/null ++++ b/Documentation/virtual/acrn/00-INDEX +@@ -0,0 +1,8 @@ ++00-INDEX ++ - this file. ++index.rst ++ - Index. ++vhm.rst ++ - virtio and hypervisor service module (VHM) APIs. ++vbs.rst ++ - virtio and backend service (VBS) APIs. +diff --git a/Documentation/virtual/acrn/conf.py b/Documentation/virtual/acrn/conf.py +new file mode 100644 +index 000000000000..ed247df22700 +--- /dev/null ++++ b/Documentation/virtual/acrn/conf.py +@@ -0,0 +1,5 @@ ++# -*- coding: utf-8; mode: python -*- ++ ++project = "ACRN Project" ++ ++tags.add("subproject") +diff --git a/Documentation/virtual/acrn/index.rst b/Documentation/virtual/acrn/index.rst +new file mode 100644 +index 000000000000..3630d4fe3207 +--- /dev/null ++++ b/Documentation/virtual/acrn/index.rst +@@ -0,0 +1,17 @@ ++.. -*- coding: utf-8; mode: rst -*- ++ ++============================= ++ACRN Project ++============================= ++ ++.. toctree:: ++ ++ vbs.rst ++ vhm.rst ++ ++.. only:: subproject ++ ++ Indices ++ ======= ++ ++ * :ref:`genindex` +diff --git a/Documentation/virtual/acrn/vbs.rst b/Documentation/virtual/acrn/vbs.rst +new file mode 100644 +index 000000000000..40a0683a1c0b +--- /dev/null ++++ b/Documentation/virtual/acrn/vbs.rst +@@ -0,0 +1,20 @@ ++================================ ++Virtio and Backend Service (VBS) ++================================ ++ ++The Virtio and Backend Service (VBS) in part of ACRN Project. ++ ++The VBS can be further divided into two parts: VBS in user space (VBS-U) ++and VBS in kernel space (VBS-K). ++ ++Example: ++-------- ++A reference driver for VBS-K can be found at :c:type:`struct vbs_rng`. ++ ++.. kernel-doc:: drivers/vbs/vbs_rng.c ++ ++APIs: ++----- ++ ++.. kernel-doc:: include/linux/vbs/vbs.h ++.. kernel-doc:: include/linux/vbs/vq.h +diff --git a/Documentation/virtual/acrn/vhm.rst b/Documentation/virtual/acrn/vhm.rst +new file mode 100644 +index 000000000000..56d498a016b0 +--- /dev/null ++++ b/Documentation/virtual/acrn/vhm.rst +@@ -0,0 +1,5 @@ ++================================== ++Virtio and Hypervisor Module (VHM) ++================================== ++ ++The Virtio and Hypervisor service Module (VHM) in part of ACRN Project. +diff --git a/drivers/vbs/vbs_rng.c b/drivers/vbs/vbs_rng.c +index 87965bafbbb3..2c71186801e7 100644 +--- a/drivers/vbs/vbs_rng.c ++++ b/drivers/vbs/vbs_rng.c +@@ -91,6 +91,15 @@ enum { + *}; + */ + ++/** ++ * struct vbs_rng - Backend of virtio-rng based on VBS-K ++ * ++ * @dev : instance of struct virtio_dev_info ++ * @vqs : instances of struct virtio_vq_info ++ * @hwrng : device specific member ++ * @node : hashtable maintaining multiple connections ++ * from multiple guests/devices ++ */ + struct vbs_rng { + struct virtio_dev_info dev; + struct virtio_vq_info vqs[VBS_K_RNG_VQ_MAX]; +diff --git a/include/linux/vbs/vbs.h b/include/linux/vbs/vbs.h +index b2e185e115c8..ef54960e59c4 100644 +--- a/include/linux/vbs/vbs.h ++++ b/include/linux/vbs/vbs.h +@@ -66,19 +66,26 @@ + #include + #include + +-/* +- * VBS-K device needs to handle frontend driver's kick in kernel. +- * For virtio 0.9.5, the kick register is a PIO register, +- * for virtio 1.0+, the kick register could be a MMIO register. ++/** ++ * enum IORangeType - type of registers to be handled in VBS-K ++ * ++ * @PIO_RANGE : Port I/O registers, for virtio 0.9.5 ++ * @MMIO_RANGE : Memory-Mapped I/O registers, for virtio 1.0+ + */ + enum IORangeType { + PIO_RANGE = 0x0, /* default */ + MMIO_RANGE = 0x1, + }; + +-/* device context */ ++/** ++ * struct ctx - VM context this device belongs to ++ * ++ * @vmid : ID of VM this device belongs to ++ * @vhm_client_id : ID of VHM client this device registers ++ * @max_vcpu : number of VCPU in this VM ++ * @req_buf : request buffers ++ */ + struct ctx { +- /* VHM required info */ + int vmid; + int vhm_client_id; + int max_vcpu; +@@ -109,59 +116,163 @@ struct vring_used { + struct virtio_used ring[]; /* size N */ + } __attribute__((packed)); + +-/* struct used to maintain virtqueue info from userspace VBS */ ++/** ++ * struct virtio_vq_info - virtqueue data structure ++ */ + struct virtio_vq_info { + /* virtqueue info from VBS-U */ +- uint16_t qsize; /* size of this queue (a power of 2) */ +- uint32_t pfn; /* PFN of virt queue (not shifted!) */ +- uint16_t msix_idx; /* MSI-X index/VIRTIO_MSI_NO_VECTOR */ +- uint64_t msix_addr; /* MSI-X address specified by index */ +- uint32_t msix_data; /* MSI-X data specified by index */ ++ /** @qsize: size of this queue (a power of 2) */ ++ uint16_t qsize; ++ /** @pfn: PFN of virt queue (not shifted!) */ ++ uint32_t pfn; ++ /** @msix_idx: MSI-X index/VIRTIO_MSI_NO_VECTOR */ ++ uint16_t msix_idx; ++ /** @msix_addr: MSI-X address specified by index */ ++ uint64_t msix_addr; ++ /** @msix_data: MSI-X data specified by index */ ++ uint32_t msix_data; + + /* members created in kernel space VBS */ +- int (*vq_notify)(int); /* vq-wide notification */ +- struct virtio_dev_info *dev; /* backpointer to virtio_dev_info */ +- uint16_t num; /* we're the num'th virtqueue */ +- uint16_t flags; /* virtqueue flags */ +- uint16_t last_avail; /* a recent value of vq_avail->va_idx */ +- uint16_t save_used; /* saved vq_used->vu_idx */ +- +- volatile struct virtio_desc *desc; /* descriptor array */ +- volatile struct vring_avail *avail; /* the "avail" ring */ +- volatile struct vring_used *used; /* the "used" ring */ ++ /** @vq_notify: vq-wide notification */ ++ int (*vq_notify)(int); ++ /** @dev: backpointer to virtio_dev_info */ ++ struct virtio_dev_info *dev; ++ /** @num: we're the num'th virtqueue */ ++ uint16_t num; ++ /** @flags: virtqueue flags */ ++ uint16_t flags; ++ /* private: a recent value of vq_avail->va_idx */ ++ uint16_t last_avail; ++ /* private: saved vq_used->vu_idx */ ++ uint16_t save_used; ++ ++ /* private: descriptor array */ ++ volatile struct virtio_desc *desc; ++ /* private: the "avail" ring */ ++ volatile struct vring_avail *avail; ++ /* private: the "used" ring */ ++ volatile struct vring_used *used; + }; + +-/* struct used to maintain virtio device info from userspace VBS */ ++/** ++ * struct virtio_dev_info - VBS-K device data structure ++ */ + struct virtio_dev_info { + /* dev info from VBS */ +- char name[VBS_NAME_LEN]; /* VBS device name */ +- struct ctx _ctx; /* device context */ +- int nvq; /* number of virtqueues */ +- uint32_t negotiated_features; /* features after guest loads driver */ +- uint64_t io_range_start; /* IO range start of VBS device */ +- uint64_t io_range_len; /* IO range len of VBS device */ +- enum IORangeType io_range_type; /* IO range type, PIO or MMIO */ ++ /** @name: VBS device name */ ++ char name[VBS_NAME_LEN]; ++ /** @_ctx: VM context this device belongs to */ ++ struct ctx _ctx; ++ /** @nvq: number of virtqueues */ ++ int nvq; ++ /** @negotiated_features: features after guest loads driver */ ++ uint32_t negotiated_features; ++ /** @io_range_start: start of an IO range VBS needs to handle */ ++ uint64_t io_range_start; ++ /** @io_range_len: len of an IO range VBS needs to handle */ ++ uint64_t io_range_len; ++ /** @io_range_type: IO range type, PIO or MMIO */ ++ enum IORangeType io_range_type; + + /* members created in kernel space VBS */ +- int (*dev_notify)(int, int); /* device-wide notification */ +- struct virtio_vq_info *vqs; /* virtqueue(s) */ +- int curq; /* current virtqueue index */ ++ /** ++ * @dev_notify: device-wide notification ++ * ++ * This is the callback function to be registered to VHM, ++ * so that VBS gets notified when frontend accessed the register. ++ */ ++ int (*dev_notify)(int, int); ++ /** @vqs: virtqueue(s) of this device */ ++ struct virtio_vq_info *vqs; ++ /** @curq: current virtqueue index */ ++ int curq; + }; + ++/** ++ * virtio_dev_client_id - get device's VHM client ID ++ * ++ * @dev: VBS-K device data struct ++ * ++ * Return: device's VHM client ID ++ */ + static inline int virtio_dev_client_id(struct virtio_dev_info *dev) + { + return dev->_ctx.vhm_client_id; + } + + /* VBS Runtime Control APIs */ ++ ++/** ++ * virtio_dev_init - Initialize VBS-K device data structures ++ * ++ * @dev: Pointer to VBS-K device data struct ++ * @vqs: Pointer to VBS-K virtqueue data struct, normally in an array ++ * @nvq: Number of virtqueues this device has ++ * ++ * Return: 0 on success, <0 on error ++ */ + long virtio_dev_init(struct virtio_dev_info *dev, struct virtio_vq_info *vqs, + int nvq); ++ ++/** ++ * virtio_dev_ioctl - VBS-K device's common ioctl routine ++ * ++ * @dev: Pointer to VBS-K device data struct ++ * @ioctl: Command of ioctl to device ++ * @argp: Data from user space ++ * ++ * Return: 0 on success, <0 on error ++ */ + long virtio_dev_ioctl(struct virtio_dev_info *dev, unsigned int ioctl, + void __user *argp); ++ ++/** ++ * virtio_vqs_ioctl - VBS-K vq's common ioctl routine ++ * ++ * @dev: Pointer to VBS-K device data struct ++ * @ioctl: Command of ioctl to virtqueue ++ * @argp: Data from user space ++ * ++ * Return: 0 on success, <0 on error ++ */ + long virtio_vqs_ioctl(struct virtio_dev_info *dev, unsigned int ioctl, + void __user *argp); ++ ++/** ++ * virtio_dev_register - register a VBS-K device to VHM ++ * ++ * Each VBS-K device will be registered as a VHM client, with the ++ * information including "kick" register location, callback, etc. ++ * ++ * @dev: Pointer to VBS-K device data struct ++ * ++ * Return: 0 on success, <0 on error ++ */ + long virtio_dev_register(struct virtio_dev_info *dev); ++ ++/** ++ * virtio_dev_register - unregister a VBS-K device from VHM ++ * ++ * Destroy the client corresponding to the VBS-K device specified. ++ * ++ * @dev: Pointer to VBS-K device data struct ++ * ++ * Return: 0 on success, <0 on error ++ */ + long virtio_dev_deregister(struct virtio_dev_info *dev); ++ ++/** ++ * virtio_vq_index_get - get virtqueue index that frontend kicks ++ * ++ * This API is normally called in the VBS-K device's callback ++ * function, to get value write to the "kick" register from ++ * frontend. ++ * ++ * @dev: Pointer to VBS-K device data struct ++ * @req_cnt: Number of requests need to handle, provided by VHM ++ * ++ * Return: >=0 on virtqueue index, <0 on error ++ */ + int virtio_vq_index_get(struct virtio_dev_info *dev, int req_cnt); + + #endif +diff --git a/include/linux/vbs/vq.h b/include/linux/vbs/vq.h +index 9ebde05e4663..9e865b8dff05 100644 +--- a/include/linux/vbs/vq.h ++++ b/include/linux/vbs/vq.h +@@ -101,7 +101,13 @@ + /* Functions for dealing with generalized "virtual devices" */ + #define VQ_USED_EVENT_IDX(vq) ((vq)->avail->ring[(vq)->qsize]) + +-/* get virtqueue size according to virtio specification */ ++/** ++ * virtio_vq_ring_size - Calculate size of a virtqueue ++ * ++ * @qsz: size of raw data in a certain virtqueue ++ * ++ * Return: size of a certain virtqueue ++ */ + static inline size_t virtio_vq_ring_size(unsigned int qsz) + { + size_t size; +@@ -117,15 +123,26 @@ static inline size_t virtio_vq_ring_size(unsigned int qsz) + return size; + } + +-/* Is this ring ready for I/O? */ ++/** ++ * virtio_vq_ring_ready - Is this ring ready for I/O? ++ * ++ * @vq: Pointer to struct virtio_vq_info ++ * ++ * Return: 0 on not ready, and 1 on ready ++ */ + static inline int virtio_vq_ring_ready(struct virtio_vq_info *vq) + { + return (vq->flags & VQ_ALLOC); + } + +-/* +- * Are there "available" descriptors? (This does not count +- * how many, just returns True if there are some). ++/** ++ * virtio_vq_has_descs - Are there "available" descriptors? ++ * ++ * This does not count how many, just returns True if there is any. ++ * ++ * @vq: Pointer to struct virtio_vq_info ++ * ++ * Return: 0 on no available, and non-zero on available + */ + static inline int virtio_vq_has_descs(struct virtio_vq_info *vq) + { +@@ -133,7 +150,16 @@ static inline int virtio_vq_has_descs(struct virtio_vq_info *vq) + vq->last_avail != vq->avail->idx); + } + +-/* Deliver an interrupt to guest on the given virtual queue */ ++/** ++ * virtio_vq_interrupt - Deliver an interrupt to guest on the given ++ * virtqueue. ++ * MSI-x or a generic MSI interrupt. ++ * ++ * @dev: Pointer to struct virtio_dev_info ++ * @vq: Pointer to struct virtio_vq_info ++ * ++ * Return: NULL ++ */ + static inline void virtio_vq_interrupt(struct virtio_dev_info *dev, + struct virtio_vq_info *vq) + { +@@ -158,15 +184,83 @@ static inline void virtio_vq_interrupt(struct virtio_dev_info *dev, + + + /* virtqueue initialization APIs */ ++ ++/** ++ * virtio_vq_init - Initialize the currently-selected virtqueue ++ * ++ * The guest just gave us a page frame number, from which we can ++ * calculate the addresses of the queue. After calculation, the ++ * addresses are updated in vq's members. ++ * ++ * @vq: Pointer to struct virtio_vq_info ++ * @pfn: page frame number in guest physical address space ++ * ++ * Return: NULL ++ */ + void virtio_vq_init(struct virtio_vq_info *vq, uint32_t pfn); ++ ++/** ++ * virtio_vq_reset - reset one virtqueue, make it invalid ++ * ++ * @vq: Pointer to struct virtio_vq_info ++ * ++ * Return: NULL ++ */ + void virtio_vq_reset(struct virtio_vq_info *vq); + + /* virtqueue runtime APIs */ ++ ++/** ++ * virtio_vq_getchain - Walk through the chain of descriptors ++ * involved in a request and put them into ++ * a given iov[] array ++ * ++ * @vq: Pointer to struct virtio_vq_info ++ * @pidx: Pointer to available ring position ++ * @iov: Pointer to iov[] array prepared by caller ++ * @n_iov: Size of iov[] array ++ * @flags: Pointer to a uint16_t array which will contain flag of ++ * each descriptor ++ * ++ * Return: number of descriptors ++ */ + int virtio_vq_getchain(struct virtio_vq_info *vq, uint16_t *pidx, + struct iovec *iov, int n_iov, uint16_t *flags); ++ ++/** ++ * virtio_vq_retchain - Return the currently-first request chain ++ * back to the available ring ++ * ++ * @vq: Pointer to struct virtio_vq_info ++ * ++ * Return: NULL ++ */ + void virtio_vq_retchain(struct virtio_vq_info *vq); ++ ++/** ++ * virtio_vq_relchain - Return specified request chain to the guest, ++ * setting its I/O length to the provided value ++ * ++ * @vq: Pointer to struct virtio_vq_info ++ * @idx: Pointer to available ring position, returned by vq_getchain() ++ * @iolen: Number of data bytes to be returned to frontend ++ * ++ * Return: NULL ++ */ + void virtio_vq_relchain(struct virtio_vq_info *vq, uint16_t idx, + uint32_t iolen); ++ ++/** ++ * virtio_vq_endchains - Driver has finished processing "available" ++ * chains and calling vq_relchain on each one ++ * ++ * If driver used all the available chains, used_all should be set. ++ * ++ * @vq: Pointer to struct virtio_vq_info ++ * @used_all_avail: Flag indicating if driver used all available chains ++ * ++ * Return: NULL ++ */ + void virtio_vq_endchains(struct virtio_vq_info *vq, int used_all_avail); + + #endif +-- +2.17.1 + diff --git a/patches/0034-drm-panel-Initialise-panel-dev-and-funcs-through-drm_p.drm b/patches/0034-drm-panel-Initialise-panel-dev-and-funcs-through-drm_p.drm new file mode 100644 index 0000000000..1d20865f37 --- /dev/null +++ b/patches/0034-drm-panel-Initialise-panel-dev-and-funcs-through-drm_p.drm @@ -0,0 +1,719 @@ +From 5fdbdab2cc819c9e45b233c08f22946a71081bb2 Mon Sep 17 00:00:00 2001 +From: Laurent Pinchart +Date: Fri, 23 Aug 2019 22:32:43 +0300 +Subject: [PATCH 034/690] drm/panel: Initialise panel dev and funcs through + drm_panel_init() + +Instead of requiring all drivers to set the dev and funcs fields of +drm_panel manually after calling drm_panel_init(), pass the data as +arguments to the function. This simplifies the panel drivers, and will +help future refactoring when adding new arguments to drm_panel_init(). + +The panel drivers have been updated with the following Coccinelle +semantic patch, with manual inspection to verify that no call to +drm_panel_init() with a single argument still exists. + +@@ +expression panel; +expression device; +identifier ops; +@@ + drm_panel_init(&panel ++ , device, &ops + ); + ... +( +-panel.dev = device; +-panel.funcs = &ops; +| +-panel.funcs = &ops; +-panel.dev = device; +) + +Suggested-by: Sam Ravnborg +Signed-off-by: Laurent Pinchart +Signed-off-by: Sam Ravnborg +Link: https://patchwork.freedesktop.org/patch/msgid/20190823193245.23876-3-laurent.pinchart@ideasonboard.com +--- + drivers/gpu/drm/drm_panel.c | 11 ++++++++--- + drivers/gpu/drm/panel/panel-arm-versatile.c | 4 +--- + drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c | 4 +--- + drivers/gpu/drm/panel/panel-ilitek-ili9322.c | 4 +--- + drivers/gpu/drm/panel/panel-ilitek-ili9881c.c | 4 +--- + drivers/gpu/drm/panel/panel-innolux-p079zca.c | 4 +--- + drivers/gpu/drm/panel/panel-jdi-lt070me05000.c | 4 +--- + drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c | 5 ++--- + drivers/gpu/drm/panel/panel-lg-lb035q02.c | 4 +--- + drivers/gpu/drm/panel/panel-lg-lg4573.c | 4 +--- + drivers/gpu/drm/panel/panel-lvds.c | 4 +--- + drivers/gpu/drm/panel/panel-nec-nl8048hl11.c | 4 +--- + drivers/gpu/drm/panel/panel-novatek-nt39016.c | 4 +--- + drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c | 4 +--- + drivers/gpu/drm/panel/panel-orisetech-otm8009a.c | 4 +--- + drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c | 5 ++--- + drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c | 5 ++--- + drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c | 4 +--- + drivers/gpu/drm/panel/panel-raydium-rm67191.c | 4 +--- + drivers/gpu/drm/panel/panel-raydium-rm68200.c | 4 +--- + drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c | 4 +--- + drivers/gpu/drm/panel/panel-ronbo-rb070d30.c | 4 +--- + drivers/gpu/drm/panel/panel-samsung-ld9040.c | 4 +--- + drivers/gpu/drm/panel/panel-samsung-s6d16d0.c | 4 +--- + drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c | 4 +--- + drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c | 4 +--- + drivers/gpu/drm/panel/panel-samsung-s6e63m0.c | 4 +--- + drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c | 4 +--- + drivers/gpu/drm/panel/panel-seiko-43wvf1g.c | 4 +--- + drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c | 4 +--- + drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c | 4 +--- + drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c | 5 ++--- + drivers/gpu/drm/panel/panel-simple.c | 4 +--- + drivers/gpu/drm/panel/panel-sitronix-st7701.c | 4 +--- + drivers/gpu/drm/panel/panel-sitronix-st7789v.c | 4 +--- + drivers/gpu/drm/panel/panel-sony-acx565akm.c | 4 +--- + drivers/gpu/drm/panel/panel-tpo-td028ttec1.c | 4 +--- + drivers/gpu/drm/panel/panel-tpo-td043mtea1.c | 4 +--- + drivers/gpu/drm/panel/panel-tpo-tpg110.c | 4 +--- + drivers/gpu/drm/panel/panel-truly-nt35597.c | 4 +--- + include/drm/drm_panel.h | 3 ++- + 41 files changed, 53 insertions(+), 121 deletions(-) + +diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c +index 6b0bf42039cf..ba2fad4c9648 100644 +--- a/drivers/gpu/drm/drm_panel.c ++++ b/drivers/gpu/drm/drm_panel.c +@@ -44,13 +44,18 @@ static LIST_HEAD(panel_list); + /** + * drm_panel_init - initialize a panel + * @panel: DRM panel ++ * @dev: parent device of the panel ++ * @funcs: panel operations + * +- * Sets up internal fields of the panel so that it can subsequently be added +- * to the registry. ++ * Initialize the panel structure for subsequent registration with ++ * drm_panel_add(). + */ +-void drm_panel_init(struct drm_panel *panel) ++void drm_panel_init(struct drm_panel *panel, struct device *dev, ++ const struct drm_panel_funcs *funcs) + { + INIT_LIST_HEAD(&panel->list); ++ panel->dev = dev; ++ panel->funcs = funcs; + } + EXPORT_SYMBOL(drm_panel_init); + +diff --git a/drivers/gpu/drm/panel/panel-arm-versatile.c b/drivers/gpu/drm/panel/panel-arm-versatile.c +index 5f72c922a04b..a4333ed0f20c 100644 +--- a/drivers/gpu/drm/panel/panel-arm-versatile.c ++++ b/drivers/gpu/drm/panel/panel-arm-versatile.c +@@ -350,9 +350,7 @@ static int versatile_panel_probe(struct platform_device *pdev) + dev_info(dev, "panel mounted on IB2 daughterboard\n"); + } + +- drm_panel_init(&vpanel->panel); +- vpanel->panel.dev = dev; +- vpanel->panel.funcs = &versatile_panel_drm_funcs; ++ drm_panel_init(&vpanel->panel, dev, &versatile_panel_drm_funcs); + + return drm_panel_add(&vpanel->panel); + } +diff --git a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c +index dabf59e0f56f..7d5d7455bc01 100644 +--- a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c ++++ b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c +@@ -204,9 +204,7 @@ static int feiyang_dsi_probe(struct mipi_dsi_device *dsi) + mipi_dsi_set_drvdata(dsi, ctx); + ctx->dsi = dsi; + +- drm_panel_init(&ctx->panel); +- ctx->panel.dev = &dsi->dev; +- ctx->panel.funcs = &feiyang_funcs; ++ drm_panel_init(&ctx->panel, &dsi->dev, &feiyang_funcs); + + ctx->dvdd = devm_regulator_get(&dsi->dev, "dvdd"); + if (IS_ERR(ctx->dvdd)) { +diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c +index 3c58f63adbf7..ad2405baa0ac 100644 +--- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c ++++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c +@@ -895,9 +895,7 @@ static int ili9322_probe(struct spi_device *spi) + ili->input = ili->conf->input; + } + +- drm_panel_init(&ili->panel); +- ili->panel.dev = dev; +- ili->panel.funcs = &ili9322_drm_funcs; ++ drm_panel_init(&ili->panel, dev, &ili9322_drm_funcs); + + return drm_panel_add(&ili->panel); + } +diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c +index 3ad4a46c4e94..1d714f961c00 100644 +--- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c ++++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c +@@ -433,9 +433,7 @@ static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi) + mipi_dsi_set_drvdata(dsi, ctx); + ctx->dsi = dsi; + +- drm_panel_init(&ctx->panel); +- ctx->panel.dev = &dsi->dev; +- ctx->panel.funcs = &ili9881c_funcs; ++ drm_panel_init(&ctx->panel, &dsi->dev, &ili9881c_funcs); + + ctx->power = devm_regulator_get(&dsi->dev, "power"); + if (IS_ERR(ctx->power)) { +diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c +index d92d1c98878c..2054afc31f20 100644 +--- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c ++++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c +@@ -487,9 +487,7 @@ static int innolux_panel_add(struct mipi_dsi_device *dsi, + if (IS_ERR(innolux->backlight)) + return PTR_ERR(innolux->backlight); + +- drm_panel_init(&innolux->base); +- innolux->base.funcs = &innolux_panel_funcs; +- innolux->base.dev = dev; ++ drm_panel_init(&innolux->base, dev, &innolux_panel_funcs); + + err = drm_panel_add(&innolux->base); + if (err < 0) +diff --git a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c +index ff3e89e61e3f..7bfdbfbc868e 100644 +--- a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c ++++ b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c +@@ -437,9 +437,7 @@ static int jdi_panel_add(struct jdi_panel *jdi) + return ret; + } + +- drm_panel_init(&jdi->base); +- jdi->base.funcs = &jdi_panel_funcs; +- jdi->base.dev = &jdi->dsi->dev; ++ drm_panel_init(&jdi->base, &jdi->dsi->dev, &jdi_panel_funcs); + + ret = drm_panel_add(&jdi->base); + +diff --git a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c +index 3ac04eb8d0fe..a47885a1a8aa 100644 +--- a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c ++++ b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c +@@ -391,9 +391,8 @@ static int kingdisplay_panel_add(struct kingdisplay_panel *kingdisplay) + if (IS_ERR(kingdisplay->backlight)) + return PTR_ERR(kingdisplay->backlight); + +- drm_panel_init(&kingdisplay->base); +- kingdisplay->base.funcs = &kingdisplay_panel_funcs; +- kingdisplay->base.dev = &kingdisplay->link->dev; ++ drm_panel_init(&kingdisplay->base, &kingdisplay->link->dev, ++ &kingdisplay_panel_funcs); + + return drm_panel_add(&kingdisplay->base); + } +diff --git a/drivers/gpu/drm/panel/panel-lg-lb035q02.c b/drivers/gpu/drm/panel/panel-lg-lb035q02.c +index ee4379729a5b..c7b9b47849bb 100644 +--- a/drivers/gpu/drm/panel/panel-lg-lb035q02.c ++++ b/drivers/gpu/drm/panel/panel-lg-lb035q02.c +@@ -196,9 +196,7 @@ static int lb035q02_probe(struct spi_device *spi) + if (ret < 0) + return ret; + +- drm_panel_init(&lcd->panel); +- lcd->panel.dev = &lcd->spi->dev; +- lcd->panel.funcs = &lb035q02_funcs; ++ drm_panel_init(&lcd->panel, &lcd->spi->dev, &lb035q02_funcs); + + return drm_panel_add(&lcd->panel); + } +diff --git a/drivers/gpu/drm/panel/panel-lg-lg4573.c b/drivers/gpu/drm/panel/panel-lg-lg4573.c +index 41bf02d122a1..608f2de91662 100644 +--- a/drivers/gpu/drm/panel/panel-lg-lg4573.c ++++ b/drivers/gpu/drm/panel/panel-lg-lg4573.c +@@ -259,9 +259,7 @@ static int lg4573_probe(struct spi_device *spi) + return ret; + } + +- drm_panel_init(&ctx->panel); +- ctx->panel.dev = &spi->dev; +- ctx->panel.funcs = &lg4573_drm_funcs; ++ drm_panel_init(&ctx->panel, &spi->dev, &lg4573_drm_funcs); + + return drm_panel_add(&ctx->panel); + } +diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c +index ad47cc95459e..3dc1ac2a6b03 100644 +--- a/drivers/gpu/drm/panel/panel-lvds.c ++++ b/drivers/gpu/drm/panel/panel-lvds.c +@@ -260,9 +260,7 @@ static int panel_lvds_probe(struct platform_device *pdev) + */ + + /* Register the panel. */ +- drm_panel_init(&lvds->panel); +- lvds->panel.dev = lvds->dev; +- lvds->panel.funcs = &panel_lvds_funcs; ++ drm_panel_init(&lvds->panel, lvds->dev, &panel_lvds_funcs); + + ret = drm_panel_add(&lvds->panel); + if (ret < 0) +diff --git a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c +index 20f17e46e65d..272a1434e155 100644 +--- a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c ++++ b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c +@@ -205,9 +205,7 @@ static int nl8048_probe(struct spi_device *spi) + if (ret < 0) + return ret; + +- drm_panel_init(&lcd->panel); +- lcd->panel.dev = &lcd->spi->dev; +- lcd->panel.funcs = &nl8048_funcs; ++ drm_panel_init(&lcd->panel, &lcd->spi->dev, &nl8048_funcs); + + return drm_panel_add(&lcd->panel); + } +diff --git a/drivers/gpu/drm/panel/panel-novatek-nt39016.c b/drivers/gpu/drm/panel/panel-novatek-nt39016.c +index 2ad1063b068d..64cfe111aaad 100644 +--- a/drivers/gpu/drm/panel/panel-novatek-nt39016.c ++++ b/drivers/gpu/drm/panel/panel-novatek-nt39016.c +@@ -292,9 +292,7 @@ static int nt39016_probe(struct spi_device *spi) + return err; + } + +- drm_panel_init(&panel->drm_panel); +- panel->drm_panel.dev = dev; +- panel->drm_panel.funcs = &nt39016_funcs; ++ drm_panel_init(&panel->drm_panel, dev, &nt39016_funcs); + + err = drm_panel_add(&panel->drm_panel); + if (err < 0) { +diff --git a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c +index 2bae1db3ff34..f2d6a4ec0046 100644 +--- a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c ++++ b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c +@@ -288,9 +288,7 @@ static int lcd_olinuxino_probe(struct i2c_client *client, + if (IS_ERR(lcd->backlight)) + return PTR_ERR(lcd->backlight); + +- drm_panel_init(&lcd->panel); +- lcd->panel.dev = dev; +- lcd->panel.funcs = &lcd_olinuxino_funcs; ++ drm_panel_init(&lcd->panel, dev, &lcd_olinuxino_funcs); + + return drm_panel_add(&lcd->panel); + } +diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c +index c7b48df8869a..8b60d5e4d775 100644 +--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c ++++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c +@@ -455,9 +455,7 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi) + dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | + MIPI_DSI_MODE_LPM; + +- drm_panel_init(&ctx->panel); +- ctx->panel.dev = dev; +- ctx->panel.funcs = &otm8009a_drm_funcs; ++ drm_panel_init(&ctx->panel, dev, &otm8009a_drm_funcs); + + ctx->bl_dev = devm_backlight_device_register(dev, dev_name(dev), + dsi->host->dev, ctx, +diff --git a/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c b/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c +index e0e20ecff916..38f114b03b89 100644 +--- a/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c ++++ b/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c +@@ -166,9 +166,8 @@ static int osd101t2587_panel_add(struct osd101t2587_panel *osd101t2587) + if (IS_ERR(osd101t2587->backlight)) + return PTR_ERR(osd101t2587->backlight); + +- drm_panel_init(&osd101t2587->base); +- osd101t2587->base.funcs = &osd101t2587_panel_funcs; +- osd101t2587->base.dev = &osd101t2587->dsi->dev; ++ drm_panel_init(&osd101t2587->base, &osd101t2587->dsi->dev, ++ &osd101t2587_panel_funcs); + + return drm_panel_add(&osd101t2587->base); + } +diff --git a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c +index 3dff0b3f73c2..6035bf458074 100644 +--- a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c ++++ b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c +@@ -223,9 +223,8 @@ static int wuxga_nt_panel_add(struct wuxga_nt_panel *wuxga_nt) + return -EPROBE_DEFER; + } + +- drm_panel_init(&wuxga_nt->base); +- wuxga_nt->base.funcs = &wuxga_nt_panel_funcs; +- wuxga_nt->base.dev = &wuxga_nt->dsi->dev; ++ drm_panel_init(&wuxga_nt->base, &wuxga_nt->dsi->dev, ++ &wuxga_nt_panel_funcs); + + ret = drm_panel_add(&wuxga_nt->base); + if (ret < 0) +diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c +index 2aa89eaecf6f..23a801427e42 100644 +--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c ++++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c +@@ -426,9 +426,7 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c, + return PTR_ERR(ts->dsi); + } + +- drm_panel_init(&ts->base); +- ts->base.dev = dev; +- ts->base.funcs = &rpi_touchscreen_funcs; ++ drm_panel_init(&ts->base, dev, &rpi_touchscreen_funcs); + + /* This appears last, as it's what will unblock the DSI host + * driver's component bind function. +diff --git a/drivers/gpu/drm/panel/panel-raydium-rm67191.c b/drivers/gpu/drm/panel/panel-raydium-rm67191.c +index 6a5d37006103..f82a1f69f13b 100644 +--- a/drivers/gpu/drm/panel/panel-raydium-rm67191.c ++++ b/drivers/gpu/drm/panel/panel-raydium-rm67191.c +@@ -606,9 +606,7 @@ static int rad_panel_probe(struct mipi_dsi_device *dsi) + if (ret) + return ret; + +- drm_panel_init(&panel->panel); +- panel->panel.funcs = &rad_panel_funcs; +- panel->panel.dev = dev; ++ drm_panel_init(&panel->panel, dev, &rad_panel_funcs); + dev_set_drvdata(dev, panel); + + ret = drm_panel_add(&panel->panel); +diff --git a/drivers/gpu/drm/panel/panel-raydium-rm68200.c b/drivers/gpu/drm/panel/panel-raydium-rm68200.c +index ba889625ad43..f004b78fb8bc 100644 +--- a/drivers/gpu/drm/panel/panel-raydium-rm68200.c ++++ b/drivers/gpu/drm/panel/panel-raydium-rm68200.c +@@ -404,9 +404,7 @@ static int rm68200_probe(struct mipi_dsi_device *dsi) + dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | + MIPI_DSI_MODE_LPM; + +- drm_panel_init(&ctx->panel); +- ctx->panel.dev = dev; +- ctx->panel.funcs = &rm68200_drm_funcs; ++ drm_panel_init(&ctx->panel, dev, &rm68200_drm_funcs); + + drm_panel_add(&ctx->panel); + +diff --git a/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c b/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c +index b9109922397f..d7f56374f2f1 100644 +--- a/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c ++++ b/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c +@@ -343,9 +343,7 @@ static int jh057n_probe(struct mipi_dsi_device *dsi) + return ret; + } + +- drm_panel_init(&ctx->panel); +- ctx->panel.dev = dev; +- ctx->panel.funcs = &jh057n_drm_funcs; ++ drm_panel_init(&ctx->panel, dev, &jh057n_drm_funcs); + + drm_panel_add(&ctx->panel); + +diff --git a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c +index 3c15764f0c03..8708fbbe7637 100644 +--- a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c ++++ b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c +@@ -173,9 +173,7 @@ static int rb070d30_panel_dsi_probe(struct mipi_dsi_device *dsi) + mipi_dsi_set_drvdata(dsi, ctx); + ctx->dsi = dsi; + +- drm_panel_init(&ctx->panel); +- ctx->panel.dev = &dsi->dev; +- ctx->panel.funcs = &rb070d30_panel_funcs; ++ drm_panel_init(&ctx->panel, &dsi->dev, &rb070d30_panel_funcs); + + ctx->gpios.reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(ctx->gpios.reset)) { +diff --git a/drivers/gpu/drm/panel/panel-samsung-ld9040.c b/drivers/gpu/drm/panel/panel-samsung-ld9040.c +index 3be902dcedc0..71a292dbec47 100644 +--- a/drivers/gpu/drm/panel/panel-samsung-ld9040.c ++++ b/drivers/gpu/drm/panel/panel-samsung-ld9040.c +@@ -351,9 +351,7 @@ static int ld9040_probe(struct spi_device *spi) + return ret; + } + +- drm_panel_init(&ctx->panel); +- ctx->panel.dev = dev; +- ctx->panel.funcs = &ld9040_drm_funcs; ++ drm_panel_init(&ctx->panel, dev, &ld9040_drm_funcs); + + return drm_panel_add(&ctx->panel); + } +diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c +index f75bef24e050..4d25c96e842c 100644 +--- a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c ++++ b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c +@@ -215,9 +215,7 @@ static int s6d16d0_probe(struct mipi_dsi_device *dsi) + return ret; + } + +- drm_panel_init(&s6->panel); +- s6->panel.dev = dev; +- s6->panel.funcs = &s6d16d0_drm_funcs; ++ drm_panel_init(&s6->panel, dev, &s6d16d0_drm_funcs); + + ret = drm_panel_add(&s6->panel); + if (ret < 0) +diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c +index b923de23ed65..42a3aaab49eb 100644 +--- a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c ++++ b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c +@@ -732,9 +732,7 @@ static int s6e3ha2_probe(struct mipi_dsi_device *dsi) + ctx->bl_dev->props.brightness = S6E3HA2_DEFAULT_BRIGHTNESS; + ctx->bl_dev->props.power = FB_BLANK_POWERDOWN; + +- drm_panel_init(&ctx->panel); +- ctx->panel.dev = dev; +- ctx->panel.funcs = &s6e3ha2_drm_funcs; ++ drm_panel_init(&ctx->panel, dev, &s6e3ha2_drm_funcs); + + ret = drm_panel_add(&ctx->panel); + if (ret < 0) +diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c +index cd90fa700c49..b4d879bf4d03 100644 +--- a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c ++++ b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c +@@ -466,9 +466,7 @@ static int s6e63j0x03_probe(struct mipi_dsi_device *dsi) + return PTR_ERR(ctx->reset_gpio); + } + +- drm_panel_init(&ctx->panel); +- ctx->panel.dev = dev; +- ctx->panel.funcs = &s6e63j0x03_funcs; ++ drm_panel_init(&ctx->panel, dev, &s6e63j0x03_funcs); + + ctx->bl_dev = backlight_device_register("s6e63j0x03", dev, ctx, + &s6e63j0x03_bl_ops, NULL); +diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c +index 142d395ea512..61259c2833ab 100644 +--- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c ++++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c +@@ -473,9 +473,7 @@ static int s6e63m0_probe(struct spi_device *spi) + return ret; + } + +- drm_panel_init(&ctx->panel); +- ctx->panel.dev = dev; +- ctx->panel.funcs = &s6e63m0_drm_funcs; ++ drm_panel_init(&ctx->panel, dev, &s6e63m0_drm_funcs); + + ret = s6e63m0_backlight_register(ctx); + if (ret < 0) +diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c +index 81858267723a..35dbffabd526 100644 +--- a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c ++++ b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c +@@ -1017,9 +1017,7 @@ static int s6e8aa0_probe(struct mipi_dsi_device *dsi) + + ctx->brightness = GAMMA_LEVEL_NUM - 1; + +- drm_panel_init(&ctx->panel); +- ctx->panel.dev = dev; +- ctx->panel.funcs = &s6e8aa0_drm_funcs; ++ drm_panel_init(&ctx->panel, dev, &s6e8aa0_drm_funcs); + + ret = drm_panel_add(&ctx->panel); + if (ret < 0) +diff --git a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c +index 18b22b1294fb..0833d0c03adc 100644 +--- a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c ++++ b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c +@@ -274,9 +274,7 @@ static int seiko_panel_probe(struct device *dev, + return -EPROBE_DEFER; + } + +- drm_panel_init(&panel->base); +- panel->base.dev = dev; +- panel->base.funcs = &seiko_panel_funcs; ++ drm_panel_init(&panel->base, dev, &seiko_panel_funcs); + + err = drm_panel_add(&panel->base); + if (err < 0) +diff --git a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c +index e910b4ad1310..87a58cb4d945 100644 +--- a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c ++++ b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c +@@ -329,9 +329,7 @@ static int sharp_panel_add(struct sharp_panel *sharp) + if (IS_ERR(sharp->backlight)) + return PTR_ERR(sharp->backlight); + +- drm_panel_init(&sharp->base); +- sharp->base.funcs = &sharp_panel_funcs; +- sharp->base.dev = &sharp->link1->dev; ++ drm_panel_init(&sharp->base, &sharp->link1->dev, &sharp_panel_funcs); + + return drm_panel_add(&sharp->base); + } +diff --git a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c +index 46cd9a250129..96e3deb0e305 100644 +--- a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c ++++ b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c +@@ -185,9 +185,7 @@ static int ls037v7dw01_probe(struct platform_device *pdev) + return PTR_ERR(lcd->ud_gpio); + } + +- drm_panel_init(&lcd->panel); +- lcd->panel.dev = &pdev->dev; +- lcd->panel.funcs = &ls037v7dw01_funcs; ++ drm_panel_init(&lcd->panel, &pdev->dev, &ls037v7dw01_funcs); + + return drm_panel_add(&lcd->panel); + } +diff --git a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c +index c39abde9f9f1..ffa844ee82ad 100644 +--- a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c ++++ b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c +@@ -264,9 +264,8 @@ static int sharp_nt_panel_add(struct sharp_nt_panel *sharp_nt) + if (IS_ERR(sharp_nt->backlight)) + return PTR_ERR(sharp_nt->backlight); + +- drm_panel_init(&sharp_nt->base); +- sharp_nt->base.funcs = &sharp_nt_panel_funcs; +- sharp_nt->base.dev = &sharp_nt->dsi->dev; ++ drm_panel_init(&sharp_nt->base, &sharp_nt->dsi->dev, ++ &sharp_nt_panel_funcs); + + return drm_panel_add(&sharp_nt->base); + } +diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c +index 28fa6ba7b767..b36c130d5cf0 100644 +--- a/drivers/gpu/drm/panel/panel-simple.c ++++ b/drivers/gpu/drm/panel/panel-simple.c +@@ -464,9 +464,7 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc) + if (!of_get_display_timing(dev->of_node, "panel-timing", &dt)) + panel_simple_parse_panel_timing_node(dev, panel, &dt); + +- drm_panel_init(&panel->base); +- panel->base.dev = dev; +- panel->base.funcs = &panel_simple_funcs; ++ drm_panel_init(&panel->base, dev, &panel_simple_funcs); + + err = drm_panel_add(&panel->base); + if (err < 0) +diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c +index 09c5d9a6f9fa..3f7bcd24aa81 100644 +--- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c ++++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c +@@ -369,7 +369,7 @@ static int st7701_dsi_probe(struct mipi_dsi_device *dsi) + if (IS_ERR(st7701->backlight)) + return PTR_ERR(st7701->backlight); + +- drm_panel_init(&st7701->panel); ++ drm_panel_init(&st7701->panel, &dsi->dev, &st7701_funcs); + + /** + * Once sleep out has been issued, ST7701 IC required to wait 120ms +@@ -381,8 +381,6 @@ static int st7701_dsi_probe(struct mipi_dsi_device *dsi) + * ts8550b and there is no valid documentation for that. + */ + st7701->sleep_delay = 120 + desc->panel_sleep_delay; +- st7701->panel.funcs = &st7701_funcs; +- st7701->panel.dev = &dsi->dev; + + ret = drm_panel_add(&st7701->panel); + if (ret < 0) +diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c +index 3b2612ae931e..2eeaeee0dd7f 100644 +--- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c ++++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c +@@ -381,9 +381,7 @@ static int st7789v_probe(struct spi_device *spi) + spi_set_drvdata(spi, ctx); + ctx->spi = spi; + +- drm_panel_init(&ctx->panel); +- ctx->panel.dev = &spi->dev; +- ctx->panel.funcs = &st7789v_drm_funcs; ++ drm_panel_init(&ctx->panel, &spi->dev, &st7789v_drm_funcs); + + ctx->power = devm_regulator_get(&spi->dev, "power"); + if (IS_ERR(ctx->power)) +diff --git a/drivers/gpu/drm/panel/panel-sony-acx565akm.c b/drivers/gpu/drm/panel/panel-sony-acx565akm.c +index 3d5b9c4f68d9..1e39067387a6 100644 +--- a/drivers/gpu/drm/panel/panel-sony-acx565akm.c ++++ b/drivers/gpu/drm/panel/panel-sony-acx565akm.c +@@ -648,9 +648,7 @@ static int acx565akm_probe(struct spi_device *spi) + return ret; + } + +- drm_panel_init(&lcd->panel); +- lcd->panel.dev = &lcd->spi->dev; +- lcd->panel.funcs = &acx565akm_funcs; ++ drm_panel_init(&lcd->panel, &lcd->spi->dev, &acx565akm_funcs); + + ret = drm_panel_add(&lcd->panel); + if (ret < 0) { +diff --git a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c +index f2baff827f50..76cfca89c3c7 100644 +--- a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c ++++ b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c +@@ -347,9 +347,7 @@ static int td028ttec1_probe(struct spi_device *spi) + return ret; + } + +- drm_panel_init(&lcd->panel); +- lcd->panel.dev = &lcd->spi->dev; +- lcd->panel.funcs = &td028ttec1_funcs; ++ drm_panel_init(&lcd->panel, &lcd->spi->dev, &td028ttec1_funcs); + + return drm_panel_add(&lcd->panel); + } +diff --git a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c +index ba163c779084..afd7c5ed53c4 100644 +--- a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c ++++ b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c +@@ -458,9 +458,7 @@ static int td043mtea1_probe(struct spi_device *spi) + return ret; + } + +- drm_panel_init(&lcd->panel); +- lcd->panel.dev = &lcd->spi->dev; +- lcd->panel.funcs = &td043mtea1_funcs; ++ drm_panel_init(&lcd->panel, &lcd->spi->dev, &td043mtea1_funcs); + + ret = drm_panel_add(&lcd->panel); + if (ret < 0) { +diff --git a/drivers/gpu/drm/panel/panel-tpo-tpg110.c b/drivers/gpu/drm/panel/panel-tpo-tpg110.c +index 71591e5f5938..25524c26b241 100644 +--- a/drivers/gpu/drm/panel/panel-tpo-tpg110.c ++++ b/drivers/gpu/drm/panel/panel-tpo-tpg110.c +@@ -457,9 +457,7 @@ static int tpg110_probe(struct spi_device *spi) + if (ret) + return ret; + +- drm_panel_init(&tpg->panel); +- tpg->panel.dev = dev; +- tpg->panel.funcs = &tpg110_drm_funcs; ++ drm_panel_init(&tpg->panel, dev, &tpg110_drm_funcs); + spi_set_drvdata(spi, tpg); + + return drm_panel_add(&tpg->panel); +diff --git a/drivers/gpu/drm/panel/panel-truly-nt35597.c b/drivers/gpu/drm/panel/panel-truly-nt35597.c +index 77e1311b7c69..c3714be78837 100644 +--- a/drivers/gpu/drm/panel/panel-truly-nt35597.c ++++ b/drivers/gpu/drm/panel/panel-truly-nt35597.c +@@ -518,9 +518,7 @@ static int truly_nt35597_panel_add(struct truly_nt35597 *ctx) + /* dual port */ + gpiod_set_value(ctx->mode_gpio, 0); + +- drm_panel_init(&ctx->panel); +- ctx->panel.dev = dev; +- ctx->panel.funcs = &truly_nt35597_drm_funcs; ++ drm_panel_init(&ctx->panel, dev, &truly_nt35597_drm_funcs); + drm_panel_add(&ctx->panel); + + return 0; +diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h +index 624bd15ecfab..4b9c656dc15e 100644 +--- a/include/drm/drm_panel.h ++++ b/include/drm/drm_panel.h +@@ -147,7 +147,8 @@ struct drm_panel { + struct list_head list; + }; + +-void drm_panel_init(struct drm_panel *panel); ++void drm_panel_init(struct drm_panel *panel, struct device *dev, ++ const struct drm_panel_funcs *funcs); + + int drm_panel_add(struct drm_panel *panel); + void drm_panel_remove(struct drm_panel *panel); +-- +2.17.1 + diff --git a/patches/0034-net-stmmac-Adding-ref-clock-1us-tic-for-LPI-c.connectivity b/patches/0034-net-stmmac-Adding-ref-clock-1us-tic-for-LPI-c.connectivity new file mode 100644 index 0000000000..e057fdc1f2 --- /dev/null +++ b/patches/0034-net-stmmac-Adding-ref-clock-1us-tic-for-LPI-c.connectivity @@ -0,0 +1,99 @@ +From 1dd3cf736dd09b7132e76c66744c151393452ddc Mon Sep 17 00:00:00 2001 +From: Rusaimi Amira Ruslan +Date: Fri, 9 Aug 2019 01:19:19 +0800 +Subject: [PATCH 034/108] net: stmmac: Adding ref clock 1us tic for LPI cntr + +Adding reference clock (1us tic) for all LPI timer. +This also enables all LPI counter. + +Signed-off-by: Rusaimi Amira Ruslan +--- + drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c | 9 +++++++++ + drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c | 10 +++++++++- + include/linux/stmmac.h | 1 + + 3 files changed, 19 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c +index fad503820e04..620e4169ed50 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c +@@ -15,6 +15,7 @@ + + #include "stmmac.h" + #include "stmmac_platform.h" ++#include "dwmac4.h" + + static int dwmac_generic_probe(struct platform_device *pdev) + { +@@ -44,6 +45,7 @@ static int dwmac_generic_probe(struct platform_device *pdev) + + /* Set default value for unicast filter entries */ + plat_dat->unicast_filter_entries = 1; ++ plat_dat->eee_usecs_rate = plat_dat->clk_ptp_rate; + } + + /* Custom initialisation (if needed) */ +@@ -53,6 +55,13 @@ static int dwmac_generic_probe(struct platform_device *pdev) + goto err_remove_config_dt; + } + ++ if (plat_dat->eee_usecs_rate > 0) { ++ u32 tx_lpi_usec; ++ ++ tx_lpi_usec = (plat_dat->eee_usecs_rate / 1000000) - 1; ++ writel(tx_lpi_usec, stmmac_res.addr + GMAC_1US_TIC_COUNTER); ++ } ++ + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + if (ret) + goto err_exit; +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +index 64a6861c7176..f6493766c572 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +@@ -15,6 +15,7 @@ + #include + #include + #include "stmmac.h" ++#include "dwmac4.h" + + /* + * This struct is used to associate PCI Function of MAC controller on a board, +@@ -216,7 +217,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, + plat->axi->axi_blen[2] = 16; + + plat->ptp_max_adj = plat->clk_ptp_rate; +- ++ plat->eee_usecs_rate = plat->clk_ptp_rate; + /* Set system clock */ + plat->stmmac_clk = clk_register_fixed_rate(&pdev->dev, + "stmmac-clk", NULL, 0, +@@ -718,6 +719,13 @@ static int stmmac_pci_probe(struct pci_dev *pdev, + memset(&res, 0, sizeof(res)); + res.addr = pcim_iomap_table(pdev)[i]; + ++ if (plat->eee_usecs_rate > 0) { ++ u32 tx_lpi_usec; ++ ++ tx_lpi_usec = (plat->eee_usecs_rate / 1000000) - 1; ++ writel(tx_lpi_usec, res.addr + GMAC_1US_TIC_COUNTER); ++ } ++ + ret = stmmac_config_multi_msi(pdev, plat, &res); + if (!ret) + goto msi_done; +diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h +index 76b63aaa3c58..fa740da70078 100644 +--- a/include/linux/stmmac.h ++++ b/include/linux/stmmac.h +@@ -173,6 +173,7 @@ struct plat_stmmacenet_data { + struct clk *stmmac_clk; + struct clk *pclk; + struct clk *clk_ptp_ref; ++ unsigned int eee_usecs_rate; + unsigned int clk_ptp_rate; + unsigned int clk_ref_rate; + s32 ptp_max_adj; +-- +2.17.1 + diff --git a/patches/0034-samples-mei-use-hostprogs-kbuild-constructs.security b/patches/0034-samples-mei-use-hostprogs-kbuild-constructs.security new file mode 100644 index 0000000000..f518ea7ca0 --- /dev/null +++ b/patches/0034-samples-mei-use-hostprogs-kbuild-constructs.security @@ -0,0 +1,68 @@ +From 438172ae71bf7132b080bfc02ecb6dbfad532c94 Mon Sep 17 00:00:00 2001 +From: Tomas Winkler +Date: Thu, 19 Sep 2019 16:38:26 +0300 +Subject: [PATCH 34/65] samples: mei: use hostprogs kbuild constructs + +Use hostprogs kbuild constructs to compile +mei sample program mei-amt-version + +Add CONFIG_SAMPLE_INTEL_MEI option to enable/disable +the feature. + +Change-Id: I0305934cd7222555413dc73c88a050e97896407e +Signed-off-by: Tomas Winkler +--- + samples/Kconfig | 7 +++++++ + samples/Makefile | 1 + + samples/mei/Makefile | 12 ++++++------ + 3 files changed, 14 insertions(+), 6 deletions(-) + +diff --git a/samples/Kconfig b/samples/Kconfig +index c8dacb4dda80..b663d9d24114 100644 +--- a/samples/Kconfig ++++ b/samples/Kconfig +@@ -169,4 +169,11 @@ config SAMPLE_VFS + as mount API and statx(). Note that this is restricted to the x86 + arch whilst it accesses system calls that aren't yet in all arches. + ++config SAMPLE_INTEL_MEI ++ bool "Build example program working with intel mei driver" ++ depends on INTEL_MEI ++ help ++ Build a sample program to work with mei device. ++ ++ + endif # SAMPLES +diff --git a/samples/Makefile b/samples/Makefile +index 7d6e4ca28d69..d6062ab25347 100644 +--- a/samples/Makefile ++++ b/samples/Makefile +@@ -20,3 +20,4 @@ obj-$(CONFIG_SAMPLE_TRACE_PRINTK) += trace_printk/ + obj-$(CONFIG_VIDEO_PCI_SKELETON) += v4l/ + obj-y += vfio-mdev/ + subdir-$(CONFIG_SAMPLE_VFS) += vfs ++obj-$(CONFIG_SAMPLE_INTEL_MEI) += mei/ +diff --git a/samples/mei/Makefile b/samples/mei/Makefile +index c7e52e9e92ca..27f37efdadb4 100644 +--- a/samples/mei/Makefile ++++ b/samples/mei/Makefile +@@ -1,10 +1,10 @@ + # SPDX-License-Identifier: GPL-2.0 +-CC := $(CROSS_COMPILE)gcc +-CFLAGS := -I../../usr/include ++# Copyright (c) 2012-2019, Intel Corporation. All rights reserved. + +-PROGS := mei-amt-version ++hostprogs-y := mei-amt-version + +-all: $(PROGS) ++HOSTCFLAGS_mei-amt-version.o += -I$(objtree)/usr/include + +-clean: +- rm -fr $(PROGS) ++always := $(hostprogs-y) ++ ++all: mei-amt-version +-- +2.17.1 + diff --git a/patches/0034-serial-remove-legacy-pm-hook.lpss b/patches/0034-serial-remove-legacy-pm-hook.lpss new file mode 100644 index 0000000000..2e38640ca4 --- /dev/null +++ b/patches/0034-serial-remove-legacy-pm-hook.lpss @@ -0,0 +1,1263 @@ +From e4a2dfa16bea9cbee216ff9349666e79c57c454a Mon Sep 17 00:00:00 2001 +From: Andy Shevchenko +Date: Wed, 12 Oct 2016 19:23:29 +0300 +Subject: [PATCH 34/40] serial: *: remove legacy ->pm() hook + +Since we switch to runtime PM the old PM hook is not needed anymore. + +TODO: Use of serial8250_set_sleep() ? +TODO: serial: *: enable runtime PM + +Enable runtime PM for all ports unconditionally. Set autosuspend time to +infinity (-1) to avoid port being shutdown when using, for example, as a +serial console since there is no means to resume it back. + +Signed-off-by: Andy Shevchenko +--- + Documentation/driver-api/serial/driver.rst | 15 --------- + arch/arm/mach-omap1/board-ams-delta.c | 28 ----------------- + arch/arm/mach-sa1100/assabet.c | 18 ----------- + arch/arm/mach-sa1100/badge4.c | 13 -------- + arch/arm/mach-sa1100/h3xxx.c | 14 --------- + arch/arm/mach-sa1100/hackkit.c | 19 ------------ + arch/arm/mach-sa1100/simpad.c | 22 ------------- + arch/mips/alchemy/common/platform.c | 21 ------------- + drivers/tty/serial/8250/8250_core.c | 1 - + drivers/tty/serial/8250/8250_exar.c | 12 -------- + drivers/tty/serial/8250/8250_mtk.c | 13 -------- + drivers/tty/serial/8250/8250_omap.c | 23 -------------- + drivers/tty/serial/8250/8250_port.c | 11 ------- + drivers/tty/serial/atmel_serial.c | 36 ---------------------- + drivers/tty/serial/dz.c | 25 +-------------- + drivers/tty/serial/mpc52xx_uart.c | 1 - + drivers/tty/serial/msm_serial.c | 20 ------------ + drivers/tty/serial/omap-serial.c | 25 --------------- + drivers/tty/serial/pch_uart.c | 1 - + drivers/tty/serial/pxa.c | 13 -------- + drivers/tty/serial/qcom_geni_serial.c | 18 ----------- + drivers/tty/serial/sa1100.c | 1 - + drivers/tty/serial/samsung.c | 34 -------------------- + drivers/tty/serial/sc16is7xx.c | 7 ----- + drivers/tty/serial/serial_txx9.c | 17 ---------- + drivers/tty/serial/sh-sci.c | 16 ---------- + drivers/tty/serial/sirfsoc_uart.c | 11 ------- + drivers/tty/serial/sprd_serial.c | 17 ---------- + drivers/tty/serial/st-asc.c | 27 ---------------- + drivers/tty/serial/stm32-usart.c | 23 -------------- + drivers/tty/serial/uartlite.c | 12 -------- + drivers/tty/serial/vr41xx_siu.c | 27 ---------------- + drivers/tty/serial/xilinx_uartps.c | 15 --------- + drivers/tty/serial/zs.c | 20 ------------ + include/linux/serial_8250.h | 2 -- + 35 files changed, 1 insertion(+), 577 deletions(-) + +diff --git a/Documentation/driver-api/serial/driver.rst b/Documentation/driver-api/serial/driver.rst +index 31bd4e16fb1f..b6269c4f89aa 100644 +--- a/Documentation/driver-api/serial/driver.rst ++++ b/Documentation/driver-api/serial/driver.rst +@@ -315,21 +315,6 @@ hardware. + + Locking: caller holds tty_port->mutex + +- pm(port,state,oldstate) +- Perform any power management related activities on the specified +- port. State indicates the new state (defined by +- enum uart_pm_state), oldstate indicates the previous state. +- +- This function should not be used to grab any resources. +- +- This will be called when the port is initially opened and finally +- closed, except when the port is also the system console. This +- will occur even if CONFIG_PM is not set. +- +- Locking: none. +- +- Interrupts: caller dependent. +- + type(port) + Return a pointer to a string constant describing the specified + port, or return NULL, in which case the string 'unknown' is +diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c +index a2aa7a12b374..934d534b03b6 100644 +--- a/arch/arm/mach-omap1/board-ams-delta.c ++++ b/arch/arm/mach-omap1/board-ams-delta.c +@@ -759,33 +759,6 @@ static void __init ams_delta_init(void) + omapfb_set_lcd_config(&ams_delta_lcd_config); + } + +-static void modem_pm(struct uart_port *port, unsigned int state, unsigned old) +-{ +- struct modem_private_data *priv = port->private_data; +- int ret; +- +- if (!priv) +- return; +- +- if (IS_ERR(priv->regulator)) +- return; +- +- if (state == old) +- return; +- +- if (state == 0) +- ret = regulator_enable(priv->regulator); +- else if (old == 0) +- ret = regulator_disable(priv->regulator); +- else +- ret = 0; +- +- if (ret) +- dev_warn(port->dev, +- "ams_delta modem_pm: failed to %sable regulator: %d\n", +- state ? "dis" : "en", ret); +-} +- + static struct plat_serial8250_port ams_delta_modem_ports[] = { + { + .membase = IOMEM(MODEM_VIRT), +@@ -796,7 +769,6 @@ static struct plat_serial8250_port ams_delta_modem_ports[] = { + .iotype = UPIO_MEM, + .regshift = 1, + .uartclk = BASE_BAUD * 16, +- .pm = modem_pm, + .private_data = &modem_priv, + }, + { }, +diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c +index d96a101e5504..b9da8052fef8 100644 +--- a/arch/arm/mach-sa1100/assabet.c ++++ b/arch/arm/mach-sa1100/assabet.c +@@ -676,21 +676,6 @@ fixup_assabet(struct tag *tags, char **cmdline) + printk("Neponset expansion board detected\n"); + } + +- +-static void assabet_uart_pm(struct uart_port *port, u_int state, u_int oldstate) +-{ +- if (port->mapbase == _Ser1UTCR0) { +- if (state) +- ASSABET_BCR_clear(ASSABET_BCR_RS232EN); +- else +- ASSABET_BCR_set(ASSABET_BCR_RS232EN); +- } +-} +- +-static struct sa1100_port_fns assabet_port_fns __initdata = { +- .pm = assabet_uart_pm, +-}; +- + static struct map_desc assabet_io_desc[] __initdata = { + { /* Board Control Register */ + .virtual = 0xf1000000, +@@ -719,9 +704,6 @@ static void __init assabet_map_io(void) + MSC_NonBrst | MSC_32BitStMem | + MSC_RdAcc(2) | MSC_WrAcc(2) | MSC_Rec(0); + +- if (!machine_has_neponset()) +- sa1100_register_uart_fns(&assabet_port_fns); +- + /* + * When Neponset is attached, the first UART should be + * UART3. That's what Angel is doing and many documents +diff --git a/arch/arm/mach-sa1100/badge4.c b/arch/arm/mach-sa1100/badge4.c +index de79f3502045..1c42b439da76 100644 +--- a/arch/arm/mach-sa1100/badge4.c ++++ b/arch/arm/mach-sa1100/badge4.c +@@ -302,24 +302,11 @@ static struct map_desc badge4_io_desc[] __initdata = { + } + }; + +-static void +-badge4_uart_pm(struct uart_port *port, u_int state, u_int oldstate) +-{ +- if (!state) { +- Ser1SDCR0 |= SDCR0_UART; +- } +-} +- +-static struct sa1100_port_fns badge4_port_fns __initdata = { +- .pm = badge4_uart_pm, +-}; +- + static void __init badge4_map_io(void) + { + sa1100_map_io(); + iotable_init(badge4_io_desc, ARRAY_SIZE(badge4_io_desc)); + +- sa1100_register_uart_fns(&badge4_port_fns); + sa1100_register_uart(0, 3); + sa1100_register_uart(1, 1); + } +diff --git a/arch/arm/mach-sa1100/h3xxx.c b/arch/arm/mach-sa1100/h3xxx.c +index d685f03f51f3..aaf2262288df 100644 +--- a/arch/arm/mach-sa1100/h3xxx.c ++++ b/arch/arm/mach-sa1100/h3xxx.c +@@ -83,19 +83,6 @@ static struct resource h3xxx_flash_resource = + /* + * H3xxx uart support + */ +-static void h3xxx_uart_pm(struct uart_port *port, u_int state, u_int oldstate) +-{ +- if (port->mapbase == _Ser3UTCR0) { +- if (!gpio_request(H3XXX_EGPIO_RS232_ON, "RS232 transceiver")) { +- gpio_direction_output(H3XXX_EGPIO_RS232_ON, !state); +- gpio_free(H3XXX_EGPIO_RS232_ON); +- } else { +- pr_err("%s: can't request H3XXX_EGPIO_RS232_ON\n", +- __func__); +- } +- } +-} +- + /* + * Enable/Disable wake up events for this serial port. + * Obviously, we only support this on the normal COM port. +@@ -115,7 +102,6 @@ static int h3xxx_uart_set_wake(struct uart_port *port, u_int enable) + } + + static struct sa1100_port_fns h3xxx_port_fns __initdata = { +- .pm = h3xxx_uart_pm, + .set_wake = h3xxx_uart_set_wake, + }; + +diff --git a/arch/arm/mach-sa1100/hackkit.c b/arch/arm/mach-sa1100/hackkit.c +index 6d37d263e0d2..586db474cfe5 100644 +--- a/arch/arm/mach-sa1100/hackkit.c ++++ b/arch/arm/mach-sa1100/hackkit.c +@@ -45,8 +45,6 @@ + /* init funcs */ + static void __init hackkit_map_io(void); + +-static void hackkit_uart_pm(struct uart_port *port, u_int state, u_int oldstate); +- + /********************************************************************** + * global data + */ +@@ -64,10 +62,6 @@ static struct map_desc hackkit_io_desc[] __initdata = { + }, + }; + +-static struct sa1100_port_fns hackkit_port_fns __initdata = { +- .pm = hackkit_uart_pm, +-}; +- + /********************************************************************** + * Static functions + */ +@@ -77,7 +71,6 @@ static void __init hackkit_map_io(void) + sa1100_map_io(); + iotable_init(hackkit_io_desc, ARRAY_SIZE(hackkit_io_desc)); + +- sa1100_register_uart_fns(&hackkit_port_fns); + sa1100_register_uart(0, 1); /* com port */ + sa1100_register_uart(1, 2); + sa1100_register_uart(2, 3); /* radio module */ +@@ -85,18 +78,6 @@ static void __init hackkit_map_io(void) + Ser1SDCR0 |= SDCR0_SUS; + } + +-/** +- * hackkit_uart_pm - powermgmt callback function for system 3 UART +- * @port: uart port structure +- * @state: pm state +- * @oldstate: old pm state +- * +- */ +-static void hackkit_uart_pm(struct uart_port *port, u_int state, u_int oldstate) +-{ +- /* TODO: switch on/off uart in powersave mode */ +-} +- + static struct mtd_partition hackkit_partitions[] = { + { + .name = "BLOB", +diff --git a/arch/arm/mach-sa1100/simpad.c b/arch/arm/mach-sa1100/simpad.c +index c7fb9a73e4c5..ea3998f92a83 100644 +--- a/arch/arm/mach-sa1100/simpad.c ++++ b/arch/arm/mach-sa1100/simpad.c +@@ -134,27 +134,6 @@ static struct map_desc simpad_io_desc[] __initdata = { + }, + }; + +- +-static void simpad_uart_pm(struct uart_port *port, u_int state, u_int oldstate) +-{ +- if (port->mapbase == (u_int)&Ser1UTCR0) { +- if (state) +- { +- simpad_clear_cs3_bit(RS232_ON); +- simpad_clear_cs3_bit(DECT_POWER_ON); +- }else +- { +- simpad_set_cs3_bit(RS232_ON); +- simpad_set_cs3_bit(DECT_POWER_ON); +- } +- } +-} +- +-static struct sa1100_port_fns simpad_port_fns __initdata = { +- .pm = simpad_uart_pm, +-}; +- +- + static struct mtd_partition simpad_partitions[] = { + { + .name = "SIMpad boot firmware", +@@ -207,7 +186,6 @@ static void __init simpad_map_io(void) + RS232_ON | ENABLE_5V | RESET_SIMCARD | DECT_POWER_ON); + __simpad_write_cs3(); /* Spinlocks not yet initialized */ + +- sa1100_register_uart_fns(&simpad_port_fns); + sa1100_register_uart(0, 3); /* serial interface */ + sa1100_register_uart(1, 1); /* DECT */ + +diff --git a/arch/mips/alchemy/common/platform.c b/arch/mips/alchemy/common/platform.c +index b8f3397c59c9..f82a6007fc3b 100644 +--- a/arch/mips/alchemy/common/platform.c ++++ b/arch/mips/alchemy/common/platform.c +@@ -28,26 +28,6 @@ + + #include + +-static void alchemy_8250_pm(struct uart_port *port, unsigned int state, +- unsigned int old_state) +-{ +-#ifdef CONFIG_SERIAL_8250 +- switch (state) { +- case 0: +- alchemy_uart_enable(CPHYSADDR(port->membase)); +- serial8250_do_pm(port, state, old_state); +- break; +- case 3: /* power off */ +- serial8250_do_pm(port, state, old_state); +- alchemy_uart_disable(CPHYSADDR(port->membase)); +- break; +- default: +- serial8250_do_pm(port, state, old_state); +- break; +- } +-#endif +-} +- + #define PORT(_base, _irq) \ + { \ + .mapbase = _base, \ +@@ -57,7 +37,6 @@ static void alchemy_8250_pm(struct uart_port *port, unsigned int state, + .flags = UPF_SKIP_TEST | UPF_IOREMAP | \ + UPF_FIXED_TYPE, \ + .type = PORT_16550A, \ +- .pm = alchemy_8250_pm, \ + } + + static struct plat_serial8250_port au1x00_uart_data[][4] __initdata = { +diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c +index 374ad3947b4f..ff0bed933718 100644 +--- a/drivers/tty/serial/8250/8250_core.c ++++ b/drivers/tty/serial/8250/8250_core.c +@@ -834,7 +834,6 @@ static int serial8250_probe(struct platform_device *dev) + uart.port.set_termios = p->set_termios; + uart.port.set_ldisc = p->set_ldisc; + uart.port.get_mctrl = p->get_mctrl; +- uart.port.pm = p->pm; + uart.port.dev = &dev->dev; + uart.port.irqflags |= irqflag; + ret = serial8250_register_8250_port(&uart); +diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c +index 597eb9d16f21..341ad72ad64f 100644 +--- a/drivers/tty/serial/8250/8250_exar.c ++++ b/drivers/tty/serial/8250/8250_exar.c +@@ -130,17 +130,6 @@ struct exar8250 { + int line[0]; + }; + +-static void exar_pm(struct uart_port *port, unsigned int state, unsigned int old) +-{ +- /* +- * Exar UARTs have a SLEEP register that enables or disables each UART +- * to enter sleep mode separately. On the XR17V35x the register +- * is accessible to each UART at the UART_EXAR_SLEEP offset, but +- * the UART channel may only write to the corresponding bit. +- */ +- serial_port_out(port, UART_EXAR_SLEEP, state ? 0xff : 0); +-} +- + /* + * XR17V35x UARTs have an extra fractional divisor register (DLD) + * Calculate divisor with extra 4-bit fractional portion +@@ -216,7 +205,6 @@ static int default_setup(struct exar8250 *priv, struct pci_dev *pcidev, + port->port.type = PORT_XR17D15X; + } + +- port->port.pm = exar_pm; + port->port.shutdown = exar_shutdown; + + return 0; +diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c +index b411ba4eb5e9..2ae0401bf15c 100644 +--- a/drivers/tty/serial/8250/8250_mtk.c ++++ b/drivers/tty/serial/8250/8250_mtk.c +@@ -415,18 +415,6 @@ static int __maybe_unused mtk8250_runtime_resume(struct device *dev) + return 0; + } + +-static void +-mtk8250_do_pm(struct uart_port *port, unsigned int state, unsigned int old) +-{ +- if (!state) +- pm_runtime_get_sync(port->dev); +- +- serial8250_do_pm(port, state, old); +- +- if (state) +- pm_runtime_put_sync_suspend(port->dev); +-} +- + #ifdef CONFIG_SERIAL_8250_DMA + static bool mtk8250_dma_filter(struct dma_chan *chan, void *param) + { +@@ -511,7 +499,6 @@ static int mtk8250_probe(struct platform_device *pdev) + spin_lock_init(&uart.port.lock); + uart.port.mapbase = regs->start; + uart.port.irq = irq->start; +- uart.port.pm = mtk8250_do_pm; + uart.port.type = PORT_16550; + uart.port.flags = UPF_BOOT_AUTOCONF | UPF_FIXED_PORT; + uart.port.dev = &pdev->dev; +diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c +index 836e736ae188..07dcbf5f6db7 100644 +--- a/drivers/tty/serial/8250/8250_omap.c ++++ b/drivers/tty/serial/8250/8250_omap.c +@@ -494,28 +494,6 @@ static void omap_8250_set_termios(struct uart_port *port, + tty_termios_encode_baud_rate(termios, baud, baud); + } + +-/* same as 8250 except that we may have extra flow bits set in EFR */ +-static void omap_8250_pm(struct uart_port *port, unsigned int state, +- unsigned int oldstate) +-{ +- struct uart_8250_port *up = up_to_u8250p(port); +- u8 efr; +- +- pm_runtime_get_sync(port->dev); +- serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); +- efr = serial_in(up, UART_EFR); +- serial_out(up, UART_EFR, efr | UART_EFR_ECB); +- serial_out(up, UART_LCR, 0); +- +- serial_out(up, UART_IER, (state != 0) ? UART_IERX_SLEEP : 0); +- serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); +- serial_out(up, UART_EFR, efr); +- serial_out(up, UART_LCR, 0); +- +- pm_runtime_mark_last_busy(port->dev); +- pm_runtime_put_autosuspend(port->dev); +-} +- + static void omap_serial_fill_features_erratas(struct uart_8250_port *up, + struct omap8250_priv *priv) + { +@@ -1186,7 +1164,6 @@ static int omap8250_probe(struct platform_device *pdev) + #endif + up.port.set_termios = omap_8250_set_termios; + up.port.set_mctrl = omap8250_set_mctrl; +- up.port.pm = omap_8250_pm; + up.port.startup = omap_8250_startup; + up.port.shutdown = omap_8250_shutdown; + up.port.throttle = omap_8250_throttle; +diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c +index 5be65a0e9961..a8f29eaa3012 100644 +--- a/drivers/tty/serial/8250/8250_port.c ++++ b/drivers/tty/serial/8250/8250_port.c +@@ -2705,16 +2705,6 @@ void serial8250_do_pm(struct uart_port *port, unsigned int state, + } + EXPORT_SYMBOL(serial8250_do_pm); + +-static void +-serial8250_pm(struct uart_port *port, unsigned int state, +- unsigned int oldstate) +-{ +- if (port->pm) +- port->pm(port, state, oldstate); +- else +- serial8250_do_pm(port, state, oldstate); +-} +- + static unsigned int serial8250_port_size(struct uart_8250_port *pt) + { + if (pt->port.mapsize) +@@ -3026,7 +3016,6 @@ static const struct uart_ops serial8250_pops = { + .shutdown = serial8250_shutdown, + .set_termios = serial8250_set_termios, + .set_ldisc = serial8250_set_ldisc, +- .pm = serial8250_pm, + .type = serial8250_type, + .release_port = serial8250_release_port, + .request_port = serial8250_request_port, +diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c +index a8dc8af83f39..6a536c05872b 100644 +--- a/drivers/tty/serial/atmel_serial.c ++++ b/drivers/tty/serial/atmel_serial.c +@@ -2090,41 +2090,6 @@ static void atmel_shutdown(struct uart_port *port) + atmel_flush_buffer(port); + } + +-/* +- * Power / Clock management. +- */ +-static void atmel_serial_pm(struct uart_port *port, unsigned int state, +- unsigned int oldstate) +-{ +- struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); +- +- switch (state) { +- case 0: +- /* +- * Enable the peripheral clock for this serial port. +- * This is called on uart_open() or a resume event. +- */ +- clk_prepare_enable(atmel_port->clk); +- +- /* re-enable interrupts if we disabled some on suspend */ +- atmel_uart_writel(port, ATMEL_US_IER, atmel_port->backup_imr); +- break; +- case 3: +- /* Back up the interrupt mask and disable all interrupts */ +- atmel_port->backup_imr = atmel_uart_readl(port, ATMEL_US_IMR); +- atmel_uart_writel(port, ATMEL_US_IDR, -1); +- +- /* +- * Disable the peripheral clock for this serial port. +- * This is called on uart_close() or a suspend event. +- */ +- clk_disable_unprepare(atmel_port->clk); +- break; +- default: +- dev_err(port->dev, "atmel_serial: unknown pm %d\n", state); +- } +-} +- + /* + * Change the port parameters + */ +@@ -2467,7 +2432,6 @@ static const struct uart_ops atmel_pops = { + .request_port = atmel_request_port, + .config_port = atmel_config_port, + .verify_port = atmel_verify_port, +- .pm = atmel_serial_pm, + #ifdef CONFIG_CONSOLE_POLL + .poll_get_char = atmel_poll_get_char, + .poll_put_char = atmel_poll_put_char, +diff --git a/drivers/tty/serial/dz.c b/drivers/tty/serial/dz.c +index 7b57e840e255..395371114605 100644 +--- a/drivers/tty/serial/dz.c ++++ b/drivers/tty/serial/dz.c +@@ -636,26 +636,6 @@ static void dz_set_termios(struct uart_port *uport, struct ktermios *termios, + spin_unlock_irqrestore(&dport->port.lock, flags); + } + +-/* +- * Hack alert! +- * Required solely so that the initial PROM-based console +- * works undisturbed in parallel with this one. +- */ +-static void dz_pm(struct uart_port *uport, unsigned int state, +- unsigned int oldstate) +-{ +- struct dz_port *dport = to_dport(uport); +- unsigned long flags; +- +- spin_lock_irqsave(&dport->port.lock, flags); +- if (state < 3) +- dz_start_tx(&dport->port); +- else +- dz_stop_tx(&dport->port); +- spin_unlock_irqrestore(&dport->port.lock, flags); +-} +- +- + static const char *dz_type(struct uart_port *uport) + { + return "DZ"; +@@ -751,7 +731,6 @@ static const struct uart_ops dz_ops = { + .startup = dz_startup, + .shutdown = dz_shutdown, + .set_termios = dz_set_termios, +- .pm = dz_pm, + .type = dz_type, + .release_port = dz_release_port, + .request_port = dz_request_port, +@@ -779,6 +758,7 @@ static void __init dz_init_ports(void) + struct uart_port *uport = &dport->port; + + dport->mux = &dz_mux; ++ spin_lock_init(&uport->lock); + + uport->irq = dec_interrupt[DEC_IRQ_DZ11]; + uport->fifosize = 1; +@@ -875,10 +855,7 @@ static int __init dz_console_setup(struct console *co, char *options) + if (ret) + return ret; + +- spin_lock_init(&dport->port.lock); /* For dz_pm(). */ +- + dz_reset(dport); +- dz_pm(uport, 0, -1); + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); +diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c +index 3a75ee08d619..264d066dfc0d 100644 +--- a/drivers/tty/serial/mpc52xx_uart.c ++++ b/drivers/tty/serial/mpc52xx_uart.c +@@ -1356,7 +1356,6 @@ static const struct uart_ops mpc52xx_uart_ops = { + .startup = mpc52xx_uart_startup, + .shutdown = mpc52xx_uart_shutdown, + .set_termios = mpc52xx_uart_set_termios, +-/* .pm = mpc52xx_uart_pm, Not supported yet */ + .type = mpc52xx_uart_type, + .release_port = mpc52xx_uart_release_port, + .request_port = mpc52xx_uart_request_port, +diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c +index 3657a24913fc..e9f3f4f946a8 100644 +--- a/drivers/tty/serial/msm_serial.c ++++ b/drivers/tty/serial/msm_serial.c +@@ -1386,25 +1386,6 @@ static int msm_verify_port(struct uart_port *port, struct serial_struct *ser) + return 0; + } + +-static void msm_power(struct uart_port *port, unsigned int state, +- unsigned int oldstate) +-{ +- struct msm_port *msm_port = UART_TO_MSM(port); +- +- switch (state) { +- case 0: +- clk_prepare_enable(msm_port->clk); +- clk_prepare_enable(msm_port->pclk); +- break; +- case 3: +- clk_disable_unprepare(msm_port->clk); +- clk_disable_unprepare(msm_port->pclk); +- break; +- default: +- pr_err("msm_serial: Unknown PM state %d\n", state); +- } +-} +- + #ifdef CONFIG_CONSOLE_POLL + static int msm_poll_get_char_single(struct uart_port *port) + { +@@ -1524,7 +1505,6 @@ static struct uart_ops msm_uart_pops = { + .request_port = msm_request_port, + .config_port = msm_config_port, + .verify_port = msm_verify_port, +- .pm = msm_power, + #ifdef CONFIG_CONSOLE_POLL + .poll_get_char = msm_poll_get_char, + .poll_put_char = msm_poll_put_char, +diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c +index 6420ae581a80..b5abeaa004bc 100644 +--- a/drivers/tty/serial/omap-serial.c ++++ b/drivers/tty/serial/omap-serial.c +@@ -1097,30 +1097,6 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios, + dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->port.line); + } + +-static void +-serial_omap_pm(struct uart_port *port, unsigned int state, +- unsigned int oldstate) +-{ +- struct uart_omap_port *up = to_uart_omap_port(port); +- unsigned char efr; +- +- dev_dbg(up->port.dev, "serial_omap_pm+%d\n", up->port.line); +- +- pm_runtime_get_sync(up->dev); +- serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); +- efr = serial_in(up, UART_EFR); +- serial_out(up, UART_EFR, efr | UART_EFR_ECB); +- serial_out(up, UART_LCR, 0); +- +- serial_out(up, UART_IER, (state != 0) ? UART_IERX_SLEEP : 0); +- serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); +- serial_out(up, UART_EFR, efr); +- serial_out(up, UART_LCR, 0); +- +- pm_runtime_mark_last_busy(up->dev); +- pm_runtime_put_autosuspend(up->dev); +-} +- + static void serial_omap_release_port(struct uart_port *port) + { + dev_dbg(port->dev, "serial_omap_release_port+\n"); +@@ -1459,7 +1435,6 @@ static const struct uart_ops serial_omap_pops = { + .startup = serial_omap_startup, + .shutdown = serial_omap_shutdown, + .set_termios = serial_omap_set_termios, +- .pm = serial_omap_pm, + .type = serial_omap_type, + .release_port = serial_omap_release_port, + .request_port = serial_omap_request_port, +diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c +index 6157213a8359..f231eaf5b47c 100644 +--- a/drivers/tty/serial/pch_uart.c ++++ b/drivers/tty/serial/pch_uart.c +@@ -1594,7 +1594,6 @@ static const struct uart_ops pch_uart_ops = { + .startup = pch_uart_startup, + .shutdown = pch_uart_shutdown, + .set_termios = pch_uart_set_termios, +-/* .pm = pch_uart_pm, Not supported yet */ + .type = pch_uart_type, + .release_port = pch_uart_release_port, + .request_port = pch_uart_request_port, +diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c +index 4932b674f7ef..a63ea2ae0147 100644 +--- a/drivers/tty/serial/pxa.c ++++ b/drivers/tty/serial/pxa.c +@@ -547,18 +547,6 @@ serial_pxa_set_termios(struct uart_port *port, struct ktermios *termios, + spin_unlock_irqrestore(&up->port.lock, flags); + } + +-static void +-serial_pxa_pm(struct uart_port *port, unsigned int state, +- unsigned int oldstate) +-{ +- struct uart_pxa_port *up = (struct uart_pxa_port *)port; +- +- if (!state) +- clk_prepare_enable(up->clk); +- else +- clk_disable_unprepare(up->clk); +-} +- + static void serial_pxa_release_port(struct uart_port *port) + { + } +@@ -770,7 +758,6 @@ static const struct uart_ops serial_pxa_pops = { + .startup = serial_pxa_startup, + .shutdown = serial_pxa_shutdown, + .set_termios = serial_pxa_set_termios, +- .pm = serial_pxa_pm, + .type = serial_pxa_type, + .release_port = serial_pxa_release_port, + .request_port = serial_pxa_request_port, +diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c +index 14c6306bc462..612cb9588857 100644 +--- a/drivers/tty/serial/qcom_geni_serial.c ++++ b/drivers/tty/serial/qcom_geni_serial.c +@@ -1193,22 +1193,6 @@ static struct uart_driver qcom_geni_uart_driver = { + .nr = GENI_UART_PORTS, + }; + +-static void qcom_geni_serial_pm(struct uart_port *uport, +- unsigned int new_state, unsigned int old_state) +-{ +- struct qcom_geni_serial_port *port = to_dev_port(uport, uport); +- +- /* If we've never been called, treat it as off */ +- if (old_state == UART_PM_STATE_UNDEFINED) +- old_state = UART_PM_STATE_OFF; +- +- if (new_state == UART_PM_STATE_ON && old_state == UART_PM_STATE_OFF) +- geni_se_resources_on(&port->se); +- else if (new_state == UART_PM_STATE_OFF && +- old_state == UART_PM_STATE_ON) +- geni_se_resources_off(&port->se); +-} +- + static const struct uart_ops qcom_geni_console_pops = { + .tx_empty = qcom_geni_serial_tx_empty, + .stop_tx = qcom_geni_serial_stop_tx, +@@ -1226,7 +1210,6 @@ static const struct uart_ops qcom_geni_console_pops = { + .poll_get_char = qcom_geni_serial_get_char, + .poll_put_char = qcom_geni_serial_poll_put_char, + #endif +- .pm = qcom_geni_serial_pm, + }; + + static const struct uart_ops qcom_geni_uart_pops = { +@@ -1242,7 +1225,6 @@ static const struct uart_ops qcom_geni_uart_pops = { + .type = qcom_geni_serial_get_type, + .set_mctrl = qcom_geni_serial_set_mctrl, + .get_mctrl = qcom_geni_serial_get_mctrl, +- .pm = qcom_geni_serial_pm, + }; + + static int qcom_geni_serial_probe(struct platform_device *pdev) +diff --git a/drivers/tty/serial/sa1100.c b/drivers/tty/serial/sa1100.c +index 8e618129e65c..6466af783f90 100644 +--- a/drivers/tty/serial/sa1100.c ++++ b/drivers/tty/serial/sa1100.c +@@ -660,7 +660,6 @@ void sa1100_register_uart_fns(struct sa1100_port_fns *fns) + if (fns->set_mctrl) + sa1100_pops.set_mctrl = fns->set_mctrl; + +- sa1100_pops.pm = fns->pm; + /* + * FIXME: fns->set_wake is unused - this should be called from + * the suspend() callback if device_may_wakeup(dev)) is set. +diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c +index 83fd51607741..0527759fc274 100644 +--- a/drivers/tty/serial/samsung.c ++++ b/drivers/tty/serial/samsung.c +@@ -1101,39 +1101,6 @@ static int s3c64xx_serial_startup(struct uart_port *port) + return ret; + } + +-/* power power management control */ +- +-static void s3c24xx_serial_pm(struct uart_port *port, unsigned int level, +- unsigned int old) +-{ +- struct s3c24xx_uart_port *ourport = to_ourport(port); +- int timeout = 10000; +- +- ourport->pm_level = level; +- +- switch (level) { +- case 3: +- while (--timeout && !s3c24xx_serial_txempty_nofifo(port)) +- udelay(100); +- +- if (!IS_ERR(ourport->baudclk)) +- clk_disable_unprepare(ourport->baudclk); +- +- clk_disable_unprepare(ourport->clk); +- break; +- +- case 0: +- clk_prepare_enable(ourport->clk); +- +- if (!IS_ERR(ourport->baudclk)) +- clk_prepare_enable(ourport->baudclk); +- +- break; +- default: +- dev_err(port->dev, "s3c24xx_serial: unknown pm %d\n", level); +- } +-} +- + /* baud rate calculation + * + * The UARTs on the S3C2410/S3C2440 can take their clocks from a number +@@ -1492,7 +1459,6 @@ static void s3c24xx_serial_put_poll_char(struct uart_port *port, + #endif + + static struct uart_ops s3c24xx_serial_ops = { +- .pm = s3c24xx_serial_pm, + .tx_empty = s3c24xx_serial_tx_empty, + .get_mctrl = s3c24xx_serial_get_mctrl, + .set_mctrl = s3c24xx_serial_set_mctrl, +diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c +index 7d3ae31cc720..4278ce1f4a1f 100644 +--- a/drivers/tty/serial/sc16is7xx.c ++++ b/drivers/tty/serial/sc16is7xx.c +@@ -1093,12 +1093,6 @@ static int sc16is7xx_verify_port(struct uart_port *port, + return 0; + } + +-static void sc16is7xx_pm(struct uart_port *port, unsigned int state, +- unsigned int oldstate) +-{ +- sc16is7xx_power(port, (state == UART_PM_STATE_ON) ? 1 : 0); +-} +- + static void sc16is7xx_null_void(struct uart_port *port) + { + /* Do nothing */ +@@ -1120,7 +1114,6 @@ static const struct uart_ops sc16is7xx_ops = { + .release_port = sc16is7xx_null_void, + .config_port = sc16is7xx_config_port, + .verify_port = sc16is7xx_verify_port, +- .pm = sc16is7xx_pm, + }; + + #ifdef CONFIG_GPIOLIB +diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c +index d22ccb32aa9b..6e13fd4a5097 100644 +--- a/drivers/tty/serial/serial_txx9.c ++++ b/drivers/tty/serial/serial_txx9.c +@@ -730,22 +730,6 @@ serial_txx9_set_termios(struct uart_port *port, struct ktermios *termios, + spin_unlock_irqrestore(&up->port.lock, flags); + } + +-static void +-serial_txx9_pm(struct uart_port *port, unsigned int state, +- unsigned int oldstate) +-{ +- /* +- * If oldstate was -1 this is called from +- * uart_configure_port(). In this case do not initialize the +- * port now, because the port was already initialized (for +- * non-console port) or should not be initialized here (for +- * console port). If we initialized the port here we lose +- * serial console settings. +- */ +- if (state == 0 && oldstate != -1) +- serial_txx9_initialize(port); +-} +- + static int serial_txx9_request_resource(struct uart_txx9_port *up) + { + unsigned int size = TXX9_REGION_SIZE; +@@ -852,7 +836,6 @@ static const struct uart_ops serial_txx9_pops = { + .startup = serial_txx9_startup, + .shutdown = serial_txx9_shutdown, + .set_termios = serial_txx9_set_termios, +- .pm = serial_txx9_pm, + .type = serial_txx9_type, + .release_port = serial_txx9_release_port, + .request_port = serial_txx9_request_port, +diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c +index 22e5d4e13714..df213ab51514 100644 +--- a/drivers/tty/serial/sh-sci.c ++++ b/drivers/tty/serial/sh-sci.c +@@ -2633,21 +2633,6 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios, + sci_enable_ms(port); + } + +-static void sci_pm(struct uart_port *port, unsigned int state, +- unsigned int oldstate) +-{ +- struct sci_port *sci_port = to_sci_port(port); +- +- switch (state) { +- case UART_PM_STATE_OFF: +- sci_port_disable(sci_port); +- break; +- default: +- sci_port_enable(sci_port); +- break; +- } +-} +- + static const char *sci_type(struct uart_port *port) + { + switch (port->type) { +@@ -2762,7 +2747,6 @@ static const struct uart_ops sci_uart_ops = { + .shutdown = sci_shutdown, + .flush_buffer = sci_flush_buffer, + .set_termios = sci_set_termios, +- .pm = sci_pm, + .type = sci_type, + .release_port = sci_release_port, + .request_port = sci_request_port, +diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c +index 38622f2a30a9..d3827063969a 100644 +--- a/drivers/tty/serial/sirfsoc_uart.c ++++ b/drivers/tty/serial/sirfsoc_uart.c +@@ -881,16 +881,6 @@ static void sirfsoc_uart_set_termios(struct uart_port *port, + spin_unlock_irqrestore(&port->lock, flags); + } + +-static void sirfsoc_uart_pm(struct uart_port *port, unsigned int state, +- unsigned int oldstate) +-{ +- struct sirfsoc_uart_port *sirfport = to_sirfport(port); +- if (!state) +- clk_prepare_enable(sirfport->clk); +- else +- clk_disable_unprepare(sirfport->clk); +-} +- + static int sirfsoc_uart_startup(struct uart_port *port) + { + struct sirfsoc_uart_port *sirfport = to_sirfport(port); +@@ -1072,7 +1062,6 @@ static const struct uart_ops sirfsoc_uart_ops = { + .startup = sirfsoc_uart_startup, + .shutdown = sirfsoc_uart_shutdown, + .set_termios = sirfsoc_uart_set_termios, +- .pm = sirfsoc_uart_pm, + .type = sirfsoc_uart_type, + .release_port = sirfsoc_uart_release_port, + .request_port = sirfsoc_uart_request_port, +diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c +index 771d11196523..4e2a9fe1cf32 100644 +--- a/drivers/tty/serial/sprd_serial.c ++++ b/drivers/tty/serial/sprd_serial.c +@@ -903,22 +903,6 @@ static int sprd_verify_port(struct uart_port *port, struct serial_struct *ser) + return 0; + } + +-static void sprd_pm(struct uart_port *port, unsigned int state, +- unsigned int oldstate) +-{ +- struct sprd_uart_port *sup = +- container_of(port, struct sprd_uart_port, port); +- +- switch (state) { +- case UART_PM_STATE_ON: +- clk_prepare_enable(sup->clk); +- break; +- case UART_PM_STATE_OFF: +- clk_disable_unprepare(sup->clk); +- break; +- } +-} +- + static const struct uart_ops serial_sprd_ops = { + .tx_empty = sprd_tx_empty, + .get_mctrl = sprd_get_mctrl, +@@ -935,7 +919,6 @@ static const struct uart_ops serial_sprd_ops = { + .request_port = sprd_request_port, + .config_port = sprd_config_port, + .verify_port = sprd_verify_port, +- .pm = sprd_pm, + }; + + #ifdef CONFIG_SERIAL_SPRD_CONSOLE +diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c +index 7971997cdead..940197802076 100644 +--- a/drivers/tty/serial/st-asc.c ++++ b/drivers/tty/serial/st-asc.c +@@ -478,32 +478,6 @@ static void asc_shutdown(struct uart_port *port) + free_irq(port->irq, port); + } + +-static void asc_pm(struct uart_port *port, unsigned int state, +- unsigned int oldstate) +-{ +- struct asc_port *ascport = to_asc_port(port); +- unsigned long flags = 0; +- u32 ctl; +- +- switch (state) { +- case UART_PM_STATE_ON: +- clk_prepare_enable(ascport->clk); +- break; +- case UART_PM_STATE_OFF: +- /* +- * Disable the ASC baud rate generator, which is as close as +- * we can come to turning it off. Note this is not called with +- * the port spinlock held. +- */ +- spin_lock_irqsave(&port->lock, flags); +- ctl = asc_in(port, ASC_CTL) & ~ASC_CTL_RUN; +- asc_out(port, ASC_CTL, ctl); +- spin_unlock_irqrestore(&port->lock, flags); +- clk_disable_unprepare(ascport->clk); +- break; +- } +-} +- + static void asc_set_termios(struct uart_port *port, struct ktermios *termios, + struct ktermios *old) + { +@@ -710,7 +684,6 @@ static const struct uart_ops asc_uart_ops = { + .request_port = asc_request_port, + .config_port = asc_config_port, + .verify_port = asc_verify_port, +- .pm = asc_pm, + #ifdef CONFIG_CONSOLE_POLL + .poll_get_char = asc_get_poll_char, + .poll_put_char = asc_put_poll_char, +diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c +index df90747ee3a8..92772a0eb674 100644 +--- a/drivers/tty/serial/stm32-usart.c ++++ b/drivers/tty/serial/stm32-usart.c +@@ -871,28 +871,6 @@ stm32_verify_port(struct uart_port *port, struct serial_struct *ser) + return -EINVAL; + } + +-static void stm32_pm(struct uart_port *port, unsigned int state, +- unsigned int oldstate) +-{ +- struct stm32_port *stm32port = container_of(port, +- struct stm32_port, port); +- struct stm32_usart_offsets *ofs = &stm32port->info->ofs; +- struct stm32_usart_config *cfg = &stm32port->info->cfg; +- unsigned long flags = 0; +- +- switch (state) { +- case UART_PM_STATE_ON: +- pm_runtime_get_sync(port->dev); +- break; +- case UART_PM_STATE_OFF: +- spin_lock_irqsave(&port->lock, flags); +- stm32_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit)); +- spin_unlock_irqrestore(&port->lock, flags); +- pm_runtime_put_sync(port->dev); +- break; +- } +-} +- + static const struct uart_ops stm32_uart_ops = { + .tx_empty = stm32_tx_empty, + .set_mctrl = stm32_set_mctrl, +@@ -906,7 +884,6 @@ static const struct uart_ops stm32_uart_ops = { + .startup = stm32_startup, + .shutdown = stm32_shutdown, + .set_termios = stm32_set_termios, +- .pm = stm32_pm, + .type = stm32_type, + .release_port = stm32_release_port, + .request_port = stm32_request_port, +diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c +index 06e79c11141d..6713906e5eed 100644 +--- a/drivers/tty/serial/uartlite.c ++++ b/drivers/tty/serial/uartlite.c +@@ -390,17 +390,6 @@ static int ulite_verify_port(struct uart_port *port, struct serial_struct *ser) + return -EINVAL; + } + +-static void ulite_pm(struct uart_port *port, unsigned int state, +- unsigned int oldstate) +-{ +- if (!state) { +- pm_runtime_get_sync(port->dev); +- } else { +- pm_runtime_mark_last_busy(port->dev); +- pm_runtime_put_autosuspend(port->dev); +- } +-} +- + #ifdef CONFIG_CONSOLE_POLL + static int ulite_get_poll_char(struct uart_port *port) + { +@@ -436,7 +425,6 @@ static const struct uart_ops ulite_ops = { + .request_port = ulite_request_port, + .config_port = ulite_config_port, + .verify_port = ulite_verify_port, +- .pm = ulite_pm, + #ifdef CONFIG_CONSOLE_POLL + .poll_get_char = ulite_get_poll_char, + .poll_put_char = ulite_put_poll_char, +diff --git a/drivers/tty/serial/vr41xx_siu.c b/drivers/tty/serial/vr41xx_siu.c +index 6d106e33f842..06f1ae8e8ff5 100644 +--- a/drivers/tty/serial/vr41xx_siu.c ++++ b/drivers/tty/serial/vr41xx_siu.c +@@ -581,32 +581,6 @@ static void siu_set_termios(struct uart_port *port, struct ktermios *new, + spin_unlock_irqrestore(&port->lock, flags); + } + +-static void siu_pm(struct uart_port *port, unsigned int state, unsigned int oldstate) +-{ +- switch (state) { +- case 0: +- switch (port->type) { +- case PORT_VR41XX_SIU: +- vr41xx_supply_clock(SIU_CLOCK); +- break; +- case PORT_VR41XX_DSIU: +- vr41xx_supply_clock(DSIU_CLOCK); +- break; +- } +- break; +- case 3: +- switch (port->type) { +- case PORT_VR41XX_SIU: +- vr41xx_mask_clock(SIU_CLOCK); +- break; +- case PORT_VR41XX_DSIU: +- vr41xx_mask_clock(DSIU_CLOCK); +- break; +- } +- break; +- } +-} +- + static const char *siu_type(struct uart_port *port) + { + return siu_type_name(port); +@@ -680,7 +654,6 @@ static const struct uart_ops siu_uart_ops = { + .startup = siu_startup, + .shutdown = siu_shutdown, + .set_termios = siu_set_termios, +- .pm = siu_pm, + .type = siu_type, + .release_port = siu_release_port, + .request_port = siu_request_port, +diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c +index 4e55bc327a54..e041619f5b75 100644 +--- a/drivers/tty/serial/xilinx_uartps.c ++++ b/drivers/tty/serial/xilinx_uartps.c +@@ -1082,20 +1082,6 @@ static void cdns_uart_poll_put_char(struct uart_port *port, unsigned char c) + } + #endif + +-static void cdns_uart_pm(struct uart_port *port, unsigned int state, +- unsigned int oldstate) +-{ +- switch (state) { +- case UART_PM_STATE_OFF: +- pm_runtime_mark_last_busy(port->dev); +- pm_runtime_put_autosuspend(port->dev); +- break; +- default: +- pm_runtime_get_sync(port->dev); +- break; +- } +-} +- + static const struct uart_ops cdns_uart_ops = { + .set_mctrl = cdns_uart_set_mctrl, + .get_mctrl = cdns_uart_get_mctrl, +@@ -1107,7 +1093,6 @@ static const struct uart_ops cdns_uart_ops = { + .set_termios = cdns_uart_set_termios, + .startup = cdns_uart_startup, + .shutdown = cdns_uart_shutdown, +- .pm = cdns_uart_pm, + .type = cdns_uart_type, + .verify_port = cdns_uart_verify_port, + .request_port = cdns_uart_request_port, +diff --git a/drivers/tty/serial/zs.c b/drivers/tty/serial/zs.c +index b03d3e458ea2..120b46bfdba4 100644 +--- a/drivers/tty/serial/zs.c ++++ b/drivers/tty/serial/zs.c +@@ -959,24 +959,6 @@ static void zs_set_termios(struct uart_port *uport, struct ktermios *termios, + spin_unlock_irqrestore(&scc->zlock, flags); + } + +-/* +- * Hack alert! +- * Required solely so that the initial PROM-based console +- * works undisturbed in parallel with this one. +- */ +-static void zs_pm(struct uart_port *uport, unsigned int state, +- unsigned int oldstate) +-{ +- struct zs_port *zport = to_zport(uport); +- +- if (state < 3) +- zport->regs[5] |= TxENAB; +- else +- zport->regs[5] &= ~TxENAB; +- write_zsreg(zport, R5, zport->regs[5]); +-} +- +- + static const char *zs_type(struct uart_port *uport) + { + return "Z85C30 SCC"; +@@ -1058,7 +1040,6 @@ static const struct uart_ops zs_ops = { + .startup = zs_startup, + .shutdown = zs_shutdown, + .set_termios = zs_set_termios, +- .pm = zs_pm, + .type = zs_type, + .release_port = zs_release_port, + .request_port = zs_request_port, +@@ -1212,7 +1193,6 @@ static int __init zs_console_setup(struct console *co, char *options) + return ret; + + zs_reset(zport); +- zs_pm(uport, 0, -1); + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); +diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h +index a6db669860f9..b1e6cea83789 100644 +--- a/include/linux/serial_8250.h ++++ b/include/linux/serial_8250.h +@@ -36,8 +36,6 @@ struct plat_serial8250_port { + struct ktermios *); + unsigned int (*get_mctrl)(struct uart_port *); + int (*handle_irq)(struct uart_port *); +- void (*pm)(struct uart_port *, unsigned int state, +- unsigned old); + void (*handle_break)(struct uart_port *); + }; + +-- +2.17.1 + diff --git a/patches/0034-trusty-Fix-the-warnings-for-eywa-building.trusty b/patches/0034-trusty-Fix-the-warnings-for-eywa-building.trusty new file mode 100644 index 0000000000..cfc1a9a8bd --- /dev/null +++ b/patches/0034-trusty-Fix-the-warnings-for-eywa-building.trusty @@ -0,0 +1,95 @@ +From 8157dde27bca91d073699b8a6b87248743a015d6 Mon Sep 17 00:00:00 2001 +From: yingbinx +Date: Tue, 9 May 2017 13:45:06 +0800 +Subject: [PATCH 34/63] trusty: Fix the warnings for eywa building + +Several warnings are generated while we build for eywa with ARCH i386. +The patch is to fix the warnings. +Tested by tipc test cases and CTS, all are pass. + +Change-Id: I2710dd94dfb635f12f5b482a894891bcf725f6be +Tracked-On: OAM-45413 +Signed-off-by: yingbinx +Signed-off-by: weideng +Reviewed-on: #581978 +--- + drivers/trusty/trusty-log.c | 10 +++++++--- + drivers/trusty/trusty-virtio.c | 2 +- + include/linux/trusty/trusty.h | 7 +++++++ + 3 files changed, 15 insertions(+), 4 deletions(-) + +diff --git a/drivers/trusty/trusty-log.c b/drivers/trusty/trusty-log.c +index c977d33ccde5..0f00d0074fc9 100644 +--- a/drivers/trusty/trusty-log.c ++++ b/drivers/trusty/trusty-log.c +@@ -26,7 +26,11 @@ + #define TRUSTY_LOG_SIZE (PAGE_SIZE * 2) + #define TRUSTY_LINE_BUFFER_SIZE 256 + ++#ifdef CONFIG_64BIT + static uint64_t g_vmm_debug_buf; ++#else ++static uint32_t g_vmm_debug_buf; ++#endif + + struct trusty_log_state { + struct device *dev; +@@ -286,7 +290,7 @@ static int trusty_log_probe(struct platform_device *pdev) + pa = page_to_phys(s->log_pages); + result = trusty_std_call32(s->trusty_dev, + SMC_SC_SHARED_LOG_ADD, +- (u32)(pa), (u32)(pa >> 32), ++ (u32)(pa), (u32)HIULINT(pa), + TRUSTY_LOG_SIZE); + if (result < 0) { + pr_err("trusty std call (SMC_SC_SHARED_LOG_ADD) failed: %d %pa\n", +@@ -354,7 +358,7 @@ static int trusty_log_probe(struct platform_device *pdev) + trusty_call_notifier_unregister(s->trusty_dev, &s->call_notifier); + error_call_notifier: + trusty_std_call32(s->trusty_dev, SMC_SC_SHARED_LOG_RM, +- (u32)pa, (u32)(pa >> 32), 0); ++ (u32)pa, (u32)HIULINT(pa), 0); + error_std_call: + __free_pages(s->log_pages, get_order(TRUSTY_LOG_SIZE)); + error_alloc_log: +@@ -378,7 +382,7 @@ static int trusty_log_remove(struct platform_device *pdev) + trusty_call_notifier_unregister(s->trusty_dev, &s->call_notifier); + + result = trusty_std_call32(s->trusty_dev, SMC_SC_SHARED_LOG_RM, +- (u32)pa, (u32)(pa >> 32), 0); ++ (u32)pa, (u32)HIULINT(pa), 0); + if (result) { + pr_err("trusty std call (SMC_SC_SHARED_LOG_RM) failed: %d\n", + result); +diff --git a/drivers/trusty/trusty-virtio.c b/drivers/trusty/trusty-virtio.c +index 2368c10f1b7b..6cb1ec762efe 100644 +--- a/drivers/trusty/trusty-virtio.c ++++ b/drivers/trusty/trusty-virtio.c +@@ -322,7 +322,7 @@ static struct virtqueue *_find_vq(struct virtio_device *vdev, + /* da field is only 32 bit wide. Use previously unused 'reserved' field + * to store top 32 bits of 64-bit address + */ +- tvr->vr_descr->pa = (u32)(pa >> 32); ++ tvr->vr_descr->pa = (u32)HIULINT(pa); + + dev_info(&vdev->dev, "vring%d: va(pa) %p(%llx) qsz %d notifyid %d\n", + id, tvr->vaddr, (u64)tvr->paddr, tvr->elem_num, tvr->notifyid); +diff --git a/include/linux/trusty/trusty.h b/include/linux/trusty/trusty.h +index 7dc2dad40daa..f7b0a14c9a1d 100644 +--- a/include/linux/trusty/trusty.h ++++ b/include/linux/trusty/trusty.h +@@ -85,4 +85,11 @@ static inline int trusty_check_cpuid(void) + + return 0; + } ++ ++/* High 32 bits of unsigned 64-bit integer*/ ++#ifdef CONFIG_64BIT ++#define HIULINT(x) ((x) >> 32) ++#else ++#define HIULINT(x) 0 ++#endif + #endif +-- +2.17.1 + diff --git a/patches/0035-ASoC-Intel-Reuse-sst_pdata-fw_name-field.audio b/patches/0035-ASoC-Intel-Reuse-sst_pdata-fw_name-field.audio new file mode 100644 index 0000000000..0876d436ca --- /dev/null +++ b/patches/0035-ASoC-Intel-Reuse-sst_pdata-fw_name-field.audio @@ -0,0 +1,134 @@ +From bbf8c562102839ee5dc3a0e21ca89388a15bd71e Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Mon, 12 Aug 2019 18:02:24 +0200 +Subject: [PATCH 035/193] ASoC: Intel: Reuse sst_pdata::fw_name field + +struct sst_pdata is equipped with fw_name field - a platform specific +filename for basefw module. Usage of such allows for suther +simplification of declaration of handlers directly involved with Skylake +initialization procedure. + +This change invalidates mach::fw_filename field and skl::fw_name. + +Signed-off-by: Cezary Rojewski +--- + sound/soc/intel/common/sst-acpi.c | 5 ++--- + sound/soc/intel/common/sst-firmware.c | 1 + + sound/soc/intel/skylake/skl-messages.c | 2 +- + sound/soc/intel/skylake/skl-sst-dsp.h | 3 +-- + sound/soc/intel/skylake/skl-sst-utils.c | 4 +--- + sound/soc/intel/skylake/skl.c | 4 ---- + 6 files changed, 6 insertions(+), 13 deletions(-) + +diff --git a/sound/soc/intel/common/sst-acpi.c b/sound/soc/intel/common/sst-acpi.c +index 6f0526b33429..a44e8fd54c5d 100644 +--- a/sound/soc/intel/common/sst-acpi.c ++++ b/sound/soc/intel/common/sst-acpi.c +@@ -28,11 +28,10 @@ static void sst_acpi_fw_cb(const struct firmware *fw, void *context) + struct sst_acpi_priv *sst_acpi = platform_get_drvdata(pdev); + struct sst_acpi_desc *desc = sst_acpi->desc; + struct sst_pdata *sst_pdata = desc->pdata; +- struct snd_soc_acpi_mach *mach = sst_acpi->mach; + + sst_pdata->fw = fw; + if (!fw) { +- dev_err(dev, "Cannot load firmware %s\n", mach->fw_filename); ++ dev_err(dev, "Cannot load firmware %s\n", sst_pdata->fw_name); + return; + } + +@@ -120,7 +119,7 @@ int sst_dsp_acpi_probe(struct platform_device *pdev) + return PTR_ERR(sst_acpi->pdev_mach); + + /* continue SST probing after firmware is loaded */ +- ret = request_firmware_nowait(THIS_MODULE, true, mach->fw_filename, ++ ret = request_firmware_nowait(THIS_MODULE, true, sst_pdata->fw_name, + dev, GFP_KERNEL, pdev, sst_acpi_fw_cb); + if (ret) + platform_device_unregister(sst_acpi->pdev_mach); +diff --git a/sound/soc/intel/common/sst-firmware.c b/sound/soc/intel/common/sst-firmware.c +index 61d3e6e46b98..cc88849eb10f 100644 +--- a/sound/soc/intel/common/sst-firmware.c ++++ b/sound/soc/intel/common/sst-firmware.c +@@ -1218,6 +1218,7 @@ struct sst_dsp *sst_dsp_new(struct device *dev, struct sst_pdata *pdata) + sst->thread_context = pdata->dsp; + sst->id = pdata->id; + sst->irq = pdata->irq; ++ sst->fw_name = pdata->fw_name; + sst->ops = pdata->ops; + sst->pdata = pdata; + INIT_LIST_HEAD(&sst->used_block_list); +diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c +index 27ab344344fa..4e4d9ded9354 100644 +--- a/sound/soc/intel/skylake/skl-messages.c ++++ b/sound/soc/intel/skylake/skl-messages.c +@@ -201,7 +201,7 @@ int skl_init_dsp(struct skl_dev *skl, struct sst_pdata *pdata) + if (!ops) + return -EIO; + +- ret = skl_sst_ctx_init(skl, skl->fw_name, pdata); ++ ret = skl_sst_ctx_init(skl, pdata); + if (ret < 0) + return ret; + +diff --git a/sound/soc/intel/skylake/skl-sst-dsp.h b/sound/soc/intel/skylake/skl-sst-dsp.h +index c4833d468860..e6f25f37c369 100644 +--- a/sound/soc/intel/skylake/skl-sst-dsp.h ++++ b/sound/soc/intel/skylake/skl-sst-dsp.h +@@ -238,8 +238,7 @@ int skl_dsp_strip_extended_manifest(struct firmware *fw); + + void skl_dsp_set_astate_cfg(struct skl_dev *skl, u32 cnt, void *data); + +-int skl_sst_ctx_init(struct skl_dev *skl, const char *fw_name, +- struct sst_pdata *pdata); ++int skl_sst_ctx_init(struct skl_dev *skl, struct sst_pdata *pdata); + int skl_prepare_lib_load(struct skl_dev *skl, struct skl_lib_info *linfo, + struct firmware *stripped_fw, + unsigned int hdr_offset, int index); +diff --git a/sound/soc/intel/skylake/skl-sst-utils.c b/sound/soc/intel/skylake/skl-sst-utils.c +index 880c7f75d717..2d333ecf1016 100644 +--- a/sound/soc/intel/skylake/skl-sst-utils.c ++++ b/sound/soc/intel/skylake/skl-sst-utils.c +@@ -394,8 +394,7 @@ int skl_dsp_strip_extended_manifest(struct firmware *fw) + return 0; + } + +-int skl_sst_ctx_init(struct skl_dev *skl, const char *fw_name, +- struct sst_pdata *pdata) ++int skl_sst_ctx_init(struct skl_dev *skl, struct sst_pdata *pdata) + { + struct sst_dsp *sst; + struct device *dev = skl->dev; +@@ -412,7 +411,6 @@ int skl_sst_ctx_init(struct skl_dev *skl, const char *fw_name, + } + + skl->dsp = sst; +- sst->fw_name = fw_name; + init_waitqueue_head(&skl->mod_load_wait); + skl->is_first_boot = true; + +diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c +index 39442c80a179..3225f4f8793e 100644 +--- a/sound/soc/intel/skylake/skl.c ++++ b/sound/soc/intel/skylake/skl.c +@@ -491,9 +491,6 @@ static struct snd_soc_acpi_mach *skl_find_hda_machine(struct skl_dev *skl, + /* point to common table */ + mach = snd_soc_acpi_intel_hda_machines; + +- /* all entries in the machine table use the same firmware */ +- mach->fw_filename = machines->fw_filename; +- + return mach; + } + +@@ -514,7 +511,6 @@ static int skl_find_machine(struct skl_dev *skl, void *driver_data) + } + + skl->mach = mach; +- skl->fw_name = mach->fw_filename; + pdata = mach->pdata; + + if (pdata) { +-- +2.17.1 + diff --git a/patches/0035-HVLog-reserve-memory-for-ACRN-HVLog.acrn b/patches/0035-HVLog-reserve-memory-for-ACRN-HVLog.acrn new file mode 100644 index 0000000000..8c292892e2 --- /dev/null +++ b/patches/0035-HVLog-reserve-memory-for-ACRN-HVLog.acrn @@ -0,0 +1,132 @@ +From a88ded96005718462f5d287f6471dbfefc7fff54 Mon Sep 17 00:00:00 2001 +From: "Li, Fei1" +Date: Fri, 31 Aug 2018 10:58:58 +0800 +Subject: [PATCH 035/150] HVLog: reserve memory for ACRN HVLog + +Change-Id: Ic87c83510d1405c791ce9c47872b960f801d45c2 +Tracked-On: 220304 +Signed-off-by: Li, Fei1 +--- + drivers/acrn/Kconfig | 7 ++++ + drivers/acrn/Makefile | 3 +- + drivers/acrn/acrn_hvlog.c | 83 +++++++++++++++++++++++++++++++++++++++ + 3 files changed, 92 insertions(+), 1 deletion(-) + create mode 100644 drivers/acrn/acrn_hvlog.c + +diff --git a/drivers/acrn/Kconfig b/drivers/acrn/Kconfig +index 08b24a168167..9056a4f1f20a 100644 +--- a/drivers/acrn/Kconfig ++++ b/drivers/acrn/Kconfig +@@ -11,3 +11,10 @@ config ACRN_TRACE + This is the Trace driver for the Intel ACRN hypervisor. + You can say y to build it into the kernel, or m to build + it as a module. ++ ++config ACRN_HVLOG ++ bool "Intel ACRN Hypervisor Logmsg support" ++ select ACRN_SHARED_BUFFER ++ ---help--- ++ This is the Trace driver for the Intel ACRN hypervisor log. ++ You can say y to build it into the kernel. +diff --git a/drivers/acrn/Makefile b/drivers/acrn/Makefile +index 5430f4fa06fd..05dd698e8171 100644 +--- a/drivers/acrn/Makefile ++++ b/drivers/acrn/Makefile +@@ -1,2 +1,3 @@ + obj-$(CONFIG_ACRN_SHARED_BUFFER) += sbuf.o +-obj-$(CONFIG_ACRN_TRACE) += acrn_trace.o +\ No newline at end of file ++obj-$(CONFIG_ACRN_TRACE) += acrn_trace.o ++obj-$(CONFIG_ACRN_HVLOG) += acrn_hvlog.o +diff --git a/drivers/acrn/acrn_hvlog.c b/drivers/acrn/acrn_hvlog.c +new file mode 100644 +index 000000000000..9c30fba58faf +--- /dev/null ++++ b/drivers/acrn/acrn_hvlog.c +@@ -0,0 +1,83 @@ ++/* ++ * ACRN Hypervisor logmsg ++ * ++ * This file is provided under a dual BSD/GPLv2 license.  When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright (c) 2017 Intel Corporation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU ++ * General Public License for more details. ++ * ++ * Contact Information: Li Fei ++ * ++ * BSD LICENSE ++ * ++ * Copyright (C) 2017 Intel Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * ++ * Li Fei ++ * ++ */ ++#include ++#include ++ ++static unsigned long long hvlog_buf_size; ++static unsigned long long hvlog_buf_base; ++ ++static int __init early_hvlog(char *p) ++{ ++ int ret; ++ ++ pr_debug("%s(%s)\n", __func__, p); ++ hvlog_buf_size = memparse(p, &p); ++ if (*p != '@') ++ return 0; ++ hvlog_buf_base = memparse(p + 1, &p); ++ ++ if (!!hvlog_buf_base && !!hvlog_buf_size) { ++ ret = memblock_reserve(hvlog_buf_base, hvlog_buf_size); ++ if (ret) { ++ pr_err("%s: Error reserving hvlog memblock\n", ++ __func__); ++ hvlog_buf_base = 0; ++ hvlog_buf_size = 0; ++ return ret; ++ } ++ } ++ return 0; ++} ++early_param("hvlog", early_hvlog); +-- +2.17.1 + diff --git a/patches/0035-drm-mipi-dbi-fix-a-loop-in-debugfs-code.drm b/patches/0035-drm-mipi-dbi-fix-a-loop-in-debugfs-code.drm new file mode 100644 index 0000000000..00744478c1 --- /dev/null +++ b/patches/0035-drm-mipi-dbi-fix-a-loop-in-debugfs-code.drm @@ -0,0 +1,51 @@ +From f45fcefd20d6067f6d60e12dc157f200060fc7e8 Mon Sep 17 00:00:00 2001 +From: Dan Carpenter +Date: Wed, 21 Aug 2019 10:24:56 +0300 +Subject: [PATCH 035/690] drm/mipi-dbi: fix a loop in debugfs code +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +This code will likely crash if we try to do a zero byte write. The code +looks like this: + + /* strip trailing whitespace */ + for (i = count - 1; i > 0; i--) + if (isspace(buf[i])) + ... + +We're writing zero bytes so count = 0. You would think that "count - 1" +would be negative one, but because "i" is unsigned it is a large +positive numer instead. The "i > 0" condition is true and the "buf[i]" +access will be out of bounds. + +The fix is to make "i" signed and now everything works as expected. The +upper bound of "count" is capped in __kernel_write() at MAX_RW_COUNT so +we don't have to worry about it being higher than INT_MAX. + +Fixes: 02dd95fe3169 ("drm/tinydrm: Add MIPI DBI support") +Signed-off-by: Dan Carpenter +[noralf: Adjust title] +Signed-off-by: Noralf Trønnes +Link: https://patchwork.freedesktop.org/patch/msgid/20190821072456.GJ26957@mwanda +--- + drivers/gpu/drm/drm_mipi_dbi.c | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c +index 1961f713aaab..c4ee2709a6f3 100644 +--- a/drivers/gpu/drm/drm_mipi_dbi.c ++++ b/drivers/gpu/drm/drm_mipi_dbi.c +@@ -1187,8 +1187,7 @@ static ssize_t mipi_dbi_debugfs_command_write(struct file *file, + struct mipi_dbi_dev *dbidev = m->private; + u8 val, cmd = 0, parameters[64]; + char *buf, *pos, *token; +- unsigned int i; +- int ret, idx; ++ int i, ret, idx; + + if (!drm_dev_enter(&dbidev->drm, &idx)) + return -ENODEV; +-- +2.17.1 + diff --git a/patches/0035-mei-me-mei_me_dev_init-use-struct-device-instead-.security b/patches/0035-mei-me-mei_me_dev_init-use-struct-device-instead-.security new file mode 100644 index 0000000000..9878da8bd2 --- /dev/null +++ b/patches/0035-mei-me-mei_me_dev_init-use-struct-device-instead-.security @@ -0,0 +1,84 @@ +From 876d438d3b9f6499b3006201387470d3c695db4a Mon Sep 17 00:00:00 2001 +From: Tomas Winkler +Date: Wed, 13 Mar 2019 20:07:41 +0200 +Subject: [PATCH 35/65] mei: me: mei_me_dev_init() use struct device instead of + struct pci_dev. + +It's enough to bind mei_device with associated 'struct device' instead +of actual 'struct pci_dev'. This is to allow working with mei devices +embedded within another pci devices, where mei device is represented +as a platform child device. + +Change-Id: Ic79607d1c296c07637a12189ac5f842d501166eb +Signed-off-by: Tomas Winkler +--- + drivers/misc/mei/hw-me.c | 8 ++++---- + drivers/misc/mei/hw-me.h | 2 +- + drivers/misc/mei/pci-me.c | 2 +- + 3 files changed, 6 insertions(+), 6 deletions(-) + +diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c +index 5ef30c7c92b3..640d3528124a 100644 +--- a/drivers/misc/mei/hw-me.c ++++ b/drivers/misc/mei/hw-me.c +@@ -1461,19 +1461,19 @@ const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx) + /** + * mei_me_dev_init - allocates and initializes the mei device structure + * +- * @pdev: The pci device structure ++ * @parent: device associated with physical device (pci/platform) + * @cfg: per device generation config + * + * Return: The mei_device pointer on success, NULL on failure. + */ +-struct mei_device *mei_me_dev_init(struct pci_dev *pdev, ++struct mei_device *mei_me_dev_init(struct device *parent, + const struct mei_cfg *cfg) + { + struct mei_device *dev; + struct mei_me_hw *hw; + int i; + +- dev = devm_kzalloc(&pdev->dev, sizeof(struct mei_device) + ++ dev = devm_kzalloc(parent, sizeof(struct mei_device) + + sizeof(struct mei_me_hw), GFP_KERNEL); + if (!dev) + return NULL; +@@ -1483,7 +1483,7 @@ struct mei_device *mei_me_dev_init(struct pci_dev *pdev, + for (i = 0; i < DMA_DSCR_NUM; i++) + dev->dr_dscr[i].size = cfg->dma_size[i]; + +- mei_device_init(dev, &pdev->dev, &mei_me_hw_ops); ++ mei_device_init(dev, parent, &mei_me_hw_ops); + hw->cfg = cfg; + + dev->fw_f_fw_ver_supported = cfg->fw_ver_supported; +diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h +index 1d8794828cbc..2d30a26bdf02 100644 +--- a/drivers/misc/mei/hw-me.h ++++ b/drivers/misc/mei/hw-me.h +@@ -91,7 +91,7 @@ enum mei_cfg_idx { + + const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx); + +-struct mei_device *mei_me_dev_init(struct pci_dev *pdev, ++struct mei_device *mei_me_dev_init(struct device *parent, + const struct mei_cfg *cfg); + + int mei_me_pg_enter_sync(struct mei_device *dev); +diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c +index 3dca63eddaa0..81a85c38d5a9 100644 +--- a/drivers/misc/mei/pci-me.c ++++ b/drivers/misc/mei/pci-me.c +@@ -191,7 +191,7 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + } + + /* allocates and initializes the mei dev structure */ +- dev = mei_me_dev_init(pdev, cfg); ++ dev = mei_me_dev_init(&pdev->dev, cfg); + if (!dev) { + err = -ENOMEM; + goto end; +-- +2.17.1 + diff --git a/patches/0035-net-stmmac-introduce-IEEE-802.1Qbv-configurat.connectivity b/patches/0035-net-stmmac-introduce-IEEE-802.1Qbv-configurat.connectivity new file mode 100644 index 0000000000..59b1a2ed29 --- /dev/null +++ b/patches/0035-net-stmmac-introduce-IEEE-802.1Qbv-configurat.connectivity @@ -0,0 +1,1346 @@ +From 7a100304e2219ef86415e48209b3e1bc4361635c Mon Sep 17 00:00:00 2001 +From: Ong Boon Leong +Date: Fri, 6 Jul 2018 06:16:28 +0800 +Subject: [PATCH 035/108] net: stmmac: introduce IEEE 802.1Qbv configuration + functionalities + +IEEE 802.1Qbv Enhancements for Scheduled Traffics (EST) is available in +EQoS ver5.xx. The change adds basic EST functionalities: + +a) EST initialization with hardware capabilities detection. +b) Setting Gate Control List (GCL), i.e. gate open/close & time intervals, + and all GC Related Registers (GCRR), e.g., base time (BTR), cycle time + (CTR), time extension (TER) and GC List Length (LLR). +c) Enable/disable EST. +d) Getting TSN hardware capabilities. +e) Getting Gate Control configuration either from driver data store or + hardware. + +We extend the main driver logic to include basic TSN capability discovery, +and setup. We also add EST feature enable/disable control. + +The TSN driver framework is split into IP-specific and IP-agnostic and +the interface calls are all defined inside hwif.h:- + +a) The set of tsnif_xxx APIs are for IP-specific and dwmac5_tsn.c contains + all TSN implementation specific to IP v5.xx. +b) The set of stmmac_xxx_tsn_yyy APIs are for IP-agnostic logics that are + called by stmmac_main.c and other high-level interface such as tc. + +Reviewed-by: Voon Weifeng +Reviewed-by: Kweh Hock Leong +Signed-off-by: Voon Weifeng +Signed-off-by: Ong Boon Leong +--- + drivers/net/ethernet/stmicro/stmmac/Makefile | 2 +- + drivers/net/ethernet/stmicro/stmmac/common.h | 3 + + .../net/ethernet/stmicro/stmmac/dwmac4_core.c | 11 + + drivers/net/ethernet/stmicro/stmmac/dwmac5.h | 56 +- + .../net/ethernet/stmicro/stmmac/dwmac5_tsn.c | 249 ++++++++ + drivers/net/ethernet/stmicro/stmmac/hwif.h | 131 ++++ + .../net/ethernet/stmicro/stmmac/stmmac_main.c | 15 + + .../net/ethernet/stmicro/stmmac/stmmac_tsn.c | 574 ++++++++++++++++++ + .../net/ethernet/stmicro/stmmac/stmmac_tsn.h | 106 ++++ + include/linux/stmmac.h | 1 + + 10 files changed, 1146 insertions(+), 2 deletions(-) + create mode 100644 drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c + create mode 100644 drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c + create mode 100644 drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h + +diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile +index 3230d2673cb5..d50c29afe70e 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/Makefile ++++ b/drivers/net/ethernet/stmicro/stmmac/Makefile +@@ -6,7 +6,7 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \ + mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o dwmac4_descs.o \ + dwmac4_dma.o dwmac4_lib.o dwmac4_core.o dwmac5.o hwif.o \ + stmmac_tc.o dwxgmac2_core.o dwxgmac2_dma.o dwxgmac2_descs.o \ +- intel_serdes.o $(stmmac-y) ++ intel_serdes.o stmmac_tsn.o dwmac5_tsn.o $(stmmac-y) + + stmmac-$(CONFIG_STMMAC_SELFTESTS) += stmmac_selftests.o + +diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h +index f564c48c651a..842219fa7931 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/common.h ++++ b/drivers/net/ethernet/stmicro/stmmac/common.h +@@ -21,6 +21,7 @@ + #include + #endif + ++#include "stmmac_tsn.h" + #include "descs.h" + #include "hwif.h" + #include "mmc.h" +@@ -479,6 +480,8 @@ struct mac_device_info { + unsigned int promisc; + bool vlan_fail_q_en; + u8 vlan_fail_q; ++ const struct tsnif_ops *tsnif; ++ struct tsnif_info tsn_info; + }; + + struct stmmac_rx_routing { +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +index 8af95b024dcc..a8852e75b91a 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +@@ -1176,6 +1176,17 @@ const struct stmmac_ops dwmac510_ops = { + .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr, + .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr, + .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr, ++ .tsnif_setup = dwmac510_tsnif_setup, ++ .init_tsn = tsn_init, ++ .set_tsn_feat = tsn_feat_set, ++ .has_tsn_feat = tsn_has_feat, ++ .set_est_enable = tsn_est_enable_set, ++ .get_est_bank = tsn_est_bank_get, ++ .set_est_gce = tsn_est_gce_set, ++ .get_est_gcl_len = tsn_est_gcl_len_get, ++ .set_est_gcl_len = tsn_est_gcl_len_set, ++ .set_est_gcrr_times = tsn_est_gcrr_times_set, ++ .get_est_gcc = tsn_est_gcc_get, + }; + + static u32 dwmac4_get_num_vlan(void __iomem *ioaddr) +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h +index e62181bf50ec..0b71ae6f592d 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h +@@ -1,5 +1,6 @@ + // SPDX-License-Identifier: (GPL-2.0 OR MIT) + // Copyright (c) 2017 Synopsys, Inc. and/or its affiliates. ++// Copyright (c) 2019, Intel Corporation. + // stmmac Support for 5.xx Ethernet QoS cores + + #ifndef __DWMAC5_H__ +@@ -81,6 +82,59 @@ + #define GMAC_RXQCTRL_VFFQ_SHIFT 17 + #define GMAC_RXQCTRL_VFFQE BIT(16) + ++/* DWMAC v5.xx supports the following Time Sensitive Networking protocols: ++ * 1) IEEE 802.1Qbv Enhancements for Scheduled Traffic (EST) ++ */ ++ ++/* MAC HW features3 bitmap */ ++#define GMAC_HW_FEAT_ESTWID GENMASK(21, 20) ++#define GMAC_HW_FEAT_ESTWID_SHIFT 20 ++#define GMAC_HW_FEAT_ESTDEP GENMASK(19, 17) ++#define GMAC_HW_FEAT_ESTDEP_SHIFT 17 ++#define GMAC_HW_FEAT_ESTSEL BIT(16) ++ ++/* MTL EST control register */ ++#define MTL_EST_CTRL 0x00000c50 ++#define MTL_EST_CTRL_SSWL BIT(1) /* Switch to SWOL */ ++#define MTL_EST_CTRL_EEST BIT(0) /* Enable EST */ ++ ++/* MTL EST status register */ ++#define MTL_EST_STATUS 0x00000c58 ++#define MTL_EST_STATUS_BTRL GENMASK(11, 8) /* BTR ERR loop cnt */ ++#define MTL_EST_STATUS_BTRL_SHIFT 8 ++#define MTL_EST_STATUS_BTRL_MAX (0xF << 8) ++#define MTL_EST_STATUS_SWOL BIT(7) /* SW owned list */ ++#define MTL_EST_STATUS_SWOL_SHIFT 7 ++#define MTL_EST_STATUS_BTRE BIT(1) /* BTR Error */ ++#define MTL_EST_STATUS_SWLC BIT(0) /* Switch to SWOL complete */ ++ ++/* MTL EST GCL control register */ ++#define MTL_EST_GCL_CTRL 0x00000c80 ++#define MTL_EST_GCL_CTRL_ADDR(dep) GENMASK(8 + (dep) - 1, 8) /* GCL Addr */ ++#define MTL_EST_GCL_CTRL_ADDR_VAL(addr) ((addr) << 8) ++#define GCL_CTRL_ADDR_BTR_LO 0x0 ++#define GCL_CTRL_ADDR_BTR_HI 0x1 ++#define GCL_CTRL_ADDR_CTR_LO 0x2 ++#define GCL_CTRL_ADDR_CTR_HI 0x3 ++#define GCL_CTRL_ADDR_TER 0x4 ++#define GCL_CTRL_ADDR_LLR 0x5 ++#define MTL_EST_GCL_CTRL_DBGB1 BIT(5) /* Debug Mode Bank Select */ ++#define MTL_EST_GCL_CTRL_DBGM BIT(4) /* Debug Mode */ ++#define MTL_EST_GCL_CTRL_GCRR BIT(2) /* GC Related Registers */ ++#define MTL_EST_GCL_CTRL_R1W0 BIT(1) /* Read / Write Operation */ ++#define GCL_OPS_R BIT(1) ++#define GCL_OPS_W 0 ++#define MTL_EST_GCL_CTRL_SRWO BIT(0) /* Start R/W Operation */ ++ ++/* MTL EST GCL data register */ ++#define MTL_EST_GCL_DATA 0x00000c84 ++ ++/* EST Global defines */ ++#define EST_CTR_HI_MAX 0xff /* CTR Hi is 8-bit only */ ++ ++/* MAC Core Version */ ++#define TSN_VER_MASK 0xFF ++ + int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp); + int dwmac5_safety_feat_irq_status(struct net_device *ndev, + void __iomem *ioaddr, unsigned int asp, +@@ -92,5 +146,5 @@ int dwmac5_rxp_config(void __iomem *ioaddr, struct stmmac_tc_entry *entries, + int dwmac5_flex_pps_config(void __iomem *ioaddr, int index, + struct stmmac_pps_cfg *cfg, bool enable, + u32 sub_second_inc, u32 systime_flags); +- ++void dwmac510_tsnif_setup(struct mac_device_info *mac); + #endif /* __DWMAC5_H__ */ +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c +new file mode 100644 +index 000000000000..64b263336b87 +--- /dev/null ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c +@@ -0,0 +1,249 @@ ++// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause ++/* Copyright (c) 2019, Intel Corporation. ++ * DW EQoS v5.00 TSN IP Implementation ++ */ ++#include ++#include "dwmac4.h" ++#include "dwmac5.h" ++#include "hwif.h" ++#include "stmmac_tsn.h" ++ ++static int est_set_gcl_addr(void __iomem *ioaddr, u32 addr, ++ bool is_gcrr, u32 rwops, u32 dep, ++ u32 dbgb, bool is_dbgm) ++{ ++ u32 value; ++ ++ value = MTL_EST_GCL_CTRL_ADDR_VAL(addr) & MTL_EST_GCL_CTRL_ADDR(dep); ++ ++ if (is_dbgm) { ++ if (dbgb) ++ value |= MTL_EST_GCL_CTRL_DBGB1; ++ ++ value |= MTL_EST_GCL_CTRL_DBGM; ++ } ++ ++ if (is_gcrr) ++ value |= MTL_EST_GCL_CTRL_GCRR; ++ ++ /* This is the only place SRWO is set and driver polls SRWO ++ * for self-cleared before exit. Therefore, caller should ++ * check return status for possible time out error. ++ */ ++ value |= (rwops | MTL_EST_GCL_CTRL_SRWO); ++ ++ writel(value, ioaddr + MTL_EST_GCL_CTRL); ++ ++ return readl_poll_timeout(ioaddr + MTL_EST_GCL_CTRL, value, ++ !(value & MTL_EST_GCL_CTRL_SRWO), ++ 50000, 600000); ++} ++ ++static u32 dwmac5_read_hwid(void __iomem *ioaddr) ++{ ++ return (readl(ioaddr + GMAC4_VERSION) & TSN_VER_MASK); ++} ++ ++static bool dwmac5_has_tsn_cap(void __iomem *ioaddr, enum tsn_feat_id featid) ++{ ++ u32 hw_cap3 = readl(ioaddr + GMAC_HW_FEATURE3); ++ ++ switch (featid) { ++ case TSN_FEAT_ID_EST: ++ return (hw_cap3 & GMAC_HW_FEAT_ESTSEL); ++ default: ++ return false; ++ }; ++} ++ ++static u32 dwmac5_est_get_gcl_depth(void __iomem *ioaddr) ++{ ++ u32 hw_cap3; ++ u32 estdep; ++ u32 depth; ++ ++ hw_cap3 = readl(ioaddr + GMAC_HW_FEATURE3); ++ estdep = (hw_cap3 & GMAC_HW_FEAT_ESTDEP) >> GMAC_HW_FEAT_ESTDEP_SHIFT; ++ ++ switch (estdep) { ++ case 1: ++ depth = 64; ++ break; ++ case 2: ++ depth = 128; ++ break; ++ case 3: ++ depth = 256; ++ break; ++ case 4: ++ depth = 512; ++ break; ++ case 5: ++ depth = 1024; ++ break; ++ default: ++ depth = 0; ++ } ++ ++ return depth; ++} ++ ++static u32 dwmac5_est_get_ti_width(void __iomem *ioaddr) ++{ ++ u32 hw_cap3; ++ u32 estwid; ++ u32 width; ++ ++ hw_cap3 = readl(ioaddr + GMAC_HW_FEATURE3); ++ estwid = (hw_cap3 & GMAC_HW_FEAT_ESTWID) >> GMAC_HW_FEAT_ESTWID_SHIFT; ++ ++ switch (estwid) { ++ case 1: ++ width = 16; ++ break; ++ case 2: ++ width = 20; ++ break; ++ case 3: ++ width = 24; ++ break; ++ default: ++ width = 0; ++ } ++ ++ return width; ++} ++ ++static u32 dwmac5_est_get_txqcnt(void __iomem *ioaddr) ++{ ++ u32 hw_cap2 = readl(ioaddr + GMAC_HW_FEATURE2); ++ ++ return ((hw_cap2 & GMAC_HW_FEAT_TXQCNT) >> 6) + 1; ++} ++ ++static void dwmac5_est_get_max(u32 *cycle_max) ++{ ++ *cycle_max = EST_CTR_HI_MAX; ++} ++ ++static int dwmac5_est_write_gcl_config(void __iomem *ioaddr, u32 data, u32 addr, ++ bool is_gcrr, ++ u32 dbgb, bool is_dbgm) ++{ ++ u32 dep = dwmac5_est_get_gcl_depth(ioaddr); ++ ++ dep = ilog2(dep); ++ writel(data, ioaddr + MTL_EST_GCL_DATA); ++ ++ return est_set_gcl_addr(ioaddr, addr, is_gcrr, GCL_OPS_W, dep, ++ dbgb, is_dbgm); ++} ++ ++static int dwmac5_est_read_gcl_config(void __iomem *ioaddr, u32 *data, u32 addr, ++ bool is_gcrr, ++ u32 dbgb, bool is_dbgm) ++{ ++ u32 dep = dwmac5_est_get_gcl_depth(ioaddr); ++ int ret; ++ ++ dep = ilog2(dep); ++ ret = est_set_gcl_addr(ioaddr, addr, is_gcrr, GCL_OPS_R, dep, ++ dbgb, is_dbgm); ++ if (ret) ++ return ret; ++ ++ *data = readl(ioaddr + MTL_EST_GCL_DATA); ++ ++ return ret; ++} ++ ++static int dwmac5_est_read_gce(void __iomem *ioaddr, u32 row, ++ u32 *gates, u32 *ti_nsec, ++ u32 ti_wid, u32 txqcnt, ++ u32 dbgb, bool is_dbgm) ++{ ++ u32 gates_mask; ++ u32 ti_mask; ++ u32 value; ++ int ret; ++ ++ gates_mask = (1 << txqcnt) - 1; ++ ti_mask = (1 << ti_wid) - 1; ++ ++ ret = dwmac5_est_read_gcl_config(ioaddr, &value, row, 0, dbgb, is_dbgm); ++ if (ret) ++ return ret; ++ ++ *ti_nsec = value & ti_mask; ++ *gates = (value >> ti_wid) & gates_mask; ++ ++ return ret; ++} ++ ++static int dwmac5_est_set_enable(void __iomem *ioaddr, bool enable) ++{ ++ u32 value; ++ ++ value = readl(ioaddr + MTL_EST_CTRL); ++ value &= ~(MTL_EST_CTRL_SSWL | MTL_EST_CTRL_EEST); ++ value |= (enable & MTL_EST_CTRL_EEST); ++ ++ writel(value, ioaddr + MTL_EST_CTRL); ++ ++ return 0; ++} ++ ++static bool dwmac5_est_get_enable(void __iomem *ioaddr) ++{ ++ u32 value; ++ ++ value = readl(ioaddr + MTL_EST_CTRL); ++ ++ return (value & MTL_EST_CTRL_EEST); ++} ++ ++static u32 dwmac5_est_get_bank(void __iomem *ioaddr, bool is_own) ++{ ++ u32 value; ++ ++ value = readl(ioaddr + MTL_EST_STATUS); ++ ++ value = ((value & MTL_EST_STATUS_SWOL) >> ++ MTL_EST_STATUS_SWOL_SHIFT); ++ ++ if (is_own) ++ return value; ++ else ++ return (~value & 0x1); ++} ++ ++static void dwmac5_est_switch_swol(void __iomem *ioaddr) ++{ ++ u32 value; ++ ++ value = readl(ioaddr + MTL_EST_CTRL); ++ value |= MTL_EST_CTRL_SSWL; ++ ++ writel(value, ioaddr + MTL_EST_CTRL); ++} ++ ++const struct tsnif_ops dwmac510_tsnif_ops = { ++ .read_hwid = dwmac5_read_hwid, ++ .has_tsn_cap = dwmac5_has_tsn_cap, ++ .est_get_gcl_depth = dwmac5_est_get_gcl_depth, ++ .est_get_ti_width = dwmac5_est_get_ti_width, ++ .est_get_txqcnt = dwmac5_est_get_txqcnt, ++ .est_get_max = dwmac5_est_get_max, ++ .est_write_gcl_config = dwmac5_est_write_gcl_config, ++ .est_read_gcl_config = dwmac5_est_read_gcl_config, ++ .est_read_gce = dwmac5_est_read_gce, ++ .est_set_enable = dwmac5_est_set_enable, ++ .est_get_enable = dwmac5_est_get_enable, ++ .est_get_bank = dwmac5_est_get_bank, ++ .est_switch_swol = dwmac5_est_switch_swol, ++}; ++ ++void dwmac510_tsnif_setup(struct mac_device_info *mac) ++{ ++ mac->tsnif = &dwmac510_tsnif_ops; ++} +diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h +index 4a3c25f45de1..e9e5d0dd1556 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h ++++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h +@@ -282,6 +282,10 @@ struct stmmac_safety_stats; + struct stmmac_tc_entry; + struct stmmac_pps_cfg; + struct stmmac_rss; ++enum tsn_feat_id; ++struct est_gc_entry; ++struct est_gcrr; ++struct est_gc_config; + + /* Helpers to program the MAC core */ + struct stmmac_ops { +@@ -391,6 +395,33 @@ struct stmmac_ops { + bool en, bool udp, bool sa, bool inv, + u32 match); + void (*set_arp_offload)(struct mac_device_info *hw, bool en, u32 addr); ++ /* TSN APIs */ ++ void (*tsnif_setup)(struct mac_device_info *mac); ++ int (*init_tsn)(struct mac_device_info *hw, struct net_device *dev); ++ int (*set_tsn_feat)(struct mac_device_info *hw, ++ struct net_device *dev, ++ enum tsn_feat_id featid, bool enable); ++ bool (*has_tsn_feat)(struct mac_device_info *hw, struct net_device *dev, ++ enum tsn_feat_id featid); ++ int (*set_est_enable)(struct mac_device_info *hw, ++ struct net_device *dev, bool enable); ++ int (*get_est_bank)(struct mac_device_info *hw, struct net_device *dev, ++ bool is_own, u32 *bank); ++ int (*set_est_gce)(struct mac_device_info *hw, struct net_device *dev, ++ struct est_gc_entry *gce, u32 row, ++ u32 dbgb, bool is_dbgm); ++ int (*get_est_gcl_len)(struct mac_device_info *hw, ++ struct net_device *dev, u32 *gcl_len, ++ u32 dbgb, bool is_dbgm); ++ int (*set_est_gcl_len)(struct mac_device_info *hw, ++ struct net_device *dev, u32 gcl_len, ++ u32 dbgb, bool is_dbgm); ++ int (*set_est_gcrr_times)(struct mac_device_info *hw, ++ struct net_device *dev, ++ struct est_gcrr *gcrr, ++ u32 dbgb, bool is_dbgm); ++ int (*get_est_gcc)(struct mac_device_info *hw, struct net_device *dev, ++ struct est_gc_config **gcc); + }; + + #define stmmac_core_init(__priv, __args...) \ +@@ -487,6 +518,32 @@ struct stmmac_ops { + stmmac_do_callback(__priv, mac, config_l4_filter, __args) + #define stmmac_set_arp_offload(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, set_arp_offload, __args) ++#define stmmac_tsnif_setup(__priv, __args...) \ ++ stmmac_do_void_callback(__priv, mac, tsnif_setup, __args) ++#define stmmac_tsn_init(__priv, __args...) \ ++ stmmac_do_callback(__priv, mac, init_tsn, __args) ++#define stmmac_set_tsn_feat(__priv, __args...) \ ++ stmmac_do_void_callback(__priv, mac, set_tsn_feat, __args) ++#define stmmac_has_tsn_feat(__priv, __args...) \ ++ stmmac_do_callback(__priv, mac, has_tsn_feat, __args) ++#define stmmac_set_tsn_hwtunable(__priv, __args...) \ ++ stmmac_do_callback(__priv, mac, set_tsn_hwtunable, __args) ++#define stmmac_get_tsn_hwtunable(__priv, __args...) \ ++ stmmac_do_callback(__priv, mac, get_tsn_hwtunable, __args) ++#define stmmac_set_est_enable(__priv, __args...) \ ++ stmmac_do_callback(__priv, mac, set_est_enable, __args) ++#define stmmac_get_est_bank(__priv, __args...) \ ++ stmmac_do_callback(__priv, mac, get_est_bank, __args) ++#define stmmac_set_est_gce(__priv, __args...) \ ++ stmmac_do_callback(__priv, mac, set_est_gce, __args) ++#define stmmac_set_est_gcl_len(__priv, __args...) \ ++ stmmac_do_callback(__priv, mac, set_est_gcl_len, __args) ++#define stmmac_get_est_gcl_len(__priv, __args...) \ ++ stmmac_do_callback(__priv, mac, get_est_gcl_len, __args) ++#define stmmac_set_est_gcrr_times(__priv, __args...) \ ++ stmmac_do_callback(__priv, mac, set_est_gcrr_times, __args) ++#define stmmac_get_est_gcc(__priv, __args...) \ ++ stmmac_do_callback(__priv, mac, get_est_gcc, __args) + + /* Helpers for serdes */ + struct stmmac_serdes_ops { +@@ -620,4 +677,78 @@ extern const struct stmmac_mmc_ops dwxgmac_mmc_ops; + + int stmmac_hwif_init(struct stmmac_priv *priv); + ++/* TSN Interface HW IP Specific Functions ++ * Note: ++ * These functions implement IP specifics logics and are callable by TSN APIs ++ * defined in struct stmmac_ops. To differentiate them from high level TSN ++ * APIs, we use tsnif_xxx here. ++ */ ++#define tsnif_do_void_callback(__hw, __cname, __arg0, __args...) \ ++({ \ ++ int __result = -EINVAL; \ ++ if ((__hw)->tsnif && (__hw)->tsnif->__cname) { \ ++ (__hw)->tsnif->__cname((__arg0), ##__args); \ ++ __result = 0; \ ++ } \ ++ __result; \ ++}) ++#define tsnif_do_callback(__hw, __cname, __arg0, __args...) \ ++({ \ ++ int __result = -EINVAL; \ ++ if ((__hw)->tsnif && (__hw)->tsnif->__cname) \ ++ __result = (__hw)->tsnif->__cname((__arg0), ##__args); \ ++ __result; \ ++}) ++ ++struct tsnif_ops { ++ u32 (*read_hwid)(void __iomem *ioaddr); ++ bool (*has_tsn_cap)(void __iomem *ioaddr, enum tsn_feat_id featid); ++ /* IEEE 802.1Qbv Enhanced Scheduled Traffics (EST) */ ++ u32 (*est_get_gcl_depth)(void __iomem *ioaddr); ++ u32 (*est_get_ti_width)(void __iomem *ioaddr); ++ u32 (*est_get_txqcnt)(void __iomem *ioaddr); ++ void (*est_get_max)(u32 *ct_max); ++ int (*est_write_gcl_config)(void __iomem *ioaddr, u32 data, u32 addr, ++ bool is_gcrr, ++ u32 dbgb, bool is_dbgm); ++ int (*est_read_gcl_config)(void __iomem *ioaddr, u32 *data, u32 addr, ++ bool is_gcrr, ++ u32 dbgb, bool is_dbgm); ++ int (*est_read_gce)(void __iomem *ioaddr, u32 row, ++ u32 *gates, u32 *ti_nsec, ++ u32 ti_wid, u32 txqcnt, ++ u32 dbgb, bool is_dbgm); ++ int (*est_set_enable)(void __iomem *ioaddr, bool enable); ++ bool (*est_get_enable)(void __iomem *ioaddr); ++ u32 (*est_get_bank)(void __iomem *ioaddr, bool is_own); ++ void (*est_switch_swol)(void __iomem *ioaddr); ++}; ++ ++#define tsnif_read_hwid(__hw, __args...) \ ++ tsnif_do_callback(__hw, read_hwid, __args) ++#define tsnif_has_tsn_cap(__hw, __args...) \ ++ tsnif_do_callback(__hw, has_tsn_cap, __args) ++#define tsnif_est_get_gcl_depth(__hw, __args...) \ ++ tsnif_do_callback(__hw, est_get_gcl_depth, __args) ++#define tsnif_est_get_ti_width(__hw, __args...) \ ++ tsnif_do_callback(__hw, est_get_ti_width, __args) ++#define tsnif_est_get_txqcnt(__hw, __args...) \ ++ tsnif_do_callback(__hw, est_get_txqcnt, __args) ++#define tsnif_est_get_max(__hw, __args...) \ ++ tsnif_do_void_callback(__hw, est_get_max, __args) ++#define tsnif_est_write_gcl_config(__hw, __args...) \ ++ tsnif_do_callback(__hw, est_write_gcl_config, __args) ++#define tsnif_est_read_gcl_config(__hw, __args...) \ ++ tsnif_do_callback(__hw, est_read_gcl_config, __args) ++#define tsnif_est_read_gce(__hw, __args...) \ ++ tsnif_do_callback(__hw, est_read_gce, __args) ++#define tsnif_est_set_enable(__hw, __args...) \ ++ tsnif_do_callback(__hw, est_set_enable, __args) ++#define tsnif_est_get_enable(__hw, __args...) \ ++ tsnif_do_callback(__hw, est_get_enable, __args) ++#define tsnif_est_get_bank(__hw, __args...) \ ++ tsnif_do_callback(__hw, est_get_bank, __args) ++#define tsnif_est_switch_swol(__hw, __args...) \ ++ tsnif_do_void_callback(__hw, est_switch_swol, __args) ++ + #endif /* __STMMAC_HWIF_H__ */ +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index 1443d4b6e878..8ed6013477c6 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -45,6 +45,7 @@ + #include "dwxgmac2.h" + #include "hwif.h" + #include "intel_serdes.h" ++#include "stmmac_tsn.h" + + #define STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES) + #define TSO_MAX_BUFF_SIZE (SZ_16K - 1) +@@ -4742,6 +4743,12 @@ static int stmmac_hw_init(struct stmmac_priv *priv) + if (ret) + return ret; + ++ /* Initialize TSN capability */ ++ stmmac_tsnif_setup(priv, priv->hw); ++ ret = stmmac_tsn_init(priv, priv->hw, priv->dev); ++ if (ret) ++ return ret; ++ + /* Get the HW capability (new GMAC newer than 3.50a) */ + priv->hw_cap_support = stmmac_get_hw_features(priv); + if (priv->hw_cap_support) { +@@ -4950,6 +4957,14 @@ int stmmac_dvr_probe(struct device *device, + + ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; + ndev->watchdog_timeo = msecs_to_jiffies(watchdog); ++ ++ /* TSN HW feature setup */ ++ if (priv->hw->tsn_info.cap.est_support && priv->plat->tsn_est_en) { ++ stmmac_set_tsn_feat(priv, priv->hw, ndev, TSN_FEAT_ID_EST, ++ true); ++ dev_info(priv->device, "EST feature enabled\n"); ++ } ++ + #ifdef STMMAC_VLAN_TAG_USED + /* Both mac100 and gmac support receive VLAN tag detection */ + ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c +new file mode 100644 +index 000000000000..582552d5506b +--- /dev/null ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c +@@ -0,0 +1,574 @@ ++// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause ++/* Copyright (c) 2019, Intel Corporation. ++ * TSN General APIs ++ */ ++#include ++#include ++#include ++#include "stmmac_ptp.h" ++#include "common.h" ++ ++static u32 est_get_gcl_total_intervals_nsec(struct est_gc_config *gcc, ++ u32 bank, u32 gcl_len) ++{ ++ struct est_gc_entry *gcl = gcc->gcb[bank].gcl; ++ u32 nsec = 0; ++ u32 row; ++ ++ for (row = 0; row < gcl_len; row++) { ++ nsec += gcl->ti_nsec; ++ gcl++; ++ } ++ ++ return nsec; ++} ++ ++int tsn_init(struct mac_device_info *hw, struct net_device *dev) ++{ ++ struct tsnif_info *info = &hw->tsn_info; ++ struct device *pdev = dev->dev.parent; ++ void __iomem *ioaddr = hw->pcsr; ++ struct est_gc_entry *gcl; ++ struct tsn_hw_cap *cap; ++ u32 gcl_depth; ++ u32 ti_wid; ++ u32 bank; ++ u32 hwid; ++ int ret; ++ ++ /* Init TSN HW Cap */ ++ cap = &hw->tsn_info.cap; ++ memset(cap, 0, sizeof(*cap)); ++ ++ hwid = tsnif_read_hwid(hw, ioaddr); ++ if (hwid < MIN_TSN_CORE_VER) { ++ dev_info(pdev, "IP v5.00 does not support TSN\n"); ++ cap->est_support = 0; ++ return 0; ++ } ++ ++ if (!tsnif_has_tsn_cap(hw, ioaddr, TSN_FEAT_ID_EST)) { ++ dev_info(pdev, "EST NOT supported\n"); ++ cap->est_support = 0; ++ return 0; ++ } ++ ++ gcl_depth = tsnif_est_get_gcl_depth(hw, ioaddr); ++ if (gcl_depth < 0) { ++ dev_err(pdev, "EST GCL depth(%d) < 0\n", gcl_depth); ++ cap->est_support = 0; ++ return -EINVAL; ++ } ++ ++ for (bank = 0; bank < EST_GCL_BANK_MAX; bank++) { ++ gcl = devm_kzalloc(pdev, ++ (sizeof(*gcl) * gcl_depth), ++ GFP_KERNEL); ++ if (!gcl) { ++ ret = -ENOMEM; ++ break; ++ } ++ info->est_gcc.gcb[bank].gcl = gcl; ++ ret = 0; ++ } ++ if (ret) { ++ int i; ++ ++ for (i = bank - 1; i >= 0; i--) { ++ gcl = info->est_gcc.gcb[bank].gcl; ++ devm_kfree(pdev, gcl); ++ info->est_gcc.gcb[bank].gcl = NULL; ++ } ++ dev_warn(pdev, "EST: GCL -ENOMEM\n"); ++ ++ return ret; ++ } ++ ++ ti_wid = tsnif_est_get_ti_width(hw, ioaddr); ++ cap->ti_wid = ti_wid; ++ cap->gcl_depth = gcl_depth; ++ ++ cap->ext_max = EST_TIWID_TO_EXTMAX(ti_wid); ++ cap->txqcnt = tsnif_est_get_txqcnt(hw, ioaddr); ++ tsnif_est_get_max(hw, &cap->cycle_max); ++ cap->est_support = 1; ++ ++ dev_info(pdev, "EST: depth=%u, ti_wid=%u, ter_max=%uns, tqcnt=%u\n", ++ gcl_depth, ti_wid, cap->ext_max, cap->txqcnt); ++ ++ return 0; ++} ++ ++int tsn_feat_set(struct mac_device_info *hw, struct net_device *dev, ++ enum tsn_feat_id featid, bool enable) ++{ ++ if (featid >= TSN_FEAT_ID_MAX) { ++ netdev_warn(dev, "TSN: invalid feature id(%u)\n", featid); ++ return -EINVAL; ++ } ++ ++ hw->tsn_info.feat_en[featid] = enable; ++ ++ return 0; ++} ++ ++bool tsn_has_feat(struct mac_device_info *hw, struct net_device *dev, ++ enum tsn_feat_id featid) ++{ ++ if (featid >= TSN_FEAT_ID_MAX) { ++ netdev_warn(dev, "TSN: invalid feature id(%u)\n", featid); ++ return -EINVAL; ++ } ++ ++ return hw->tsn_info.feat_en[featid]; ++} ++ ++int tsn_est_enable_set(struct mac_device_info *hw, struct net_device *dev, ++ bool enable) ++{ ++ struct tsnif_info *info = &hw->tsn_info; ++ void __iomem *ioaddr = hw->pcsr; ++ ++ if (!tsn_has_feat(hw, dev, TSN_FEAT_ID_EST)) { ++ netdev_info(dev, "EST: feature unsupported\n"); ++ return -ENOTSUPP; ++ } ++ ++ tsnif_est_set_enable(hw, ioaddr, enable); ++ ++ info->est_gcc.enable = enable; ++ ++ return 0; ++} ++ ++int tsn_est_bank_get(struct mac_device_info *hw, struct net_device *dev, ++ bool is_own, u32 *bank) ++{ ++ void __iomem *ioaddr = hw->pcsr; ++ ++ if (!tsn_has_feat(hw, dev, TSN_FEAT_ID_EST)) { ++ netdev_info(dev, "EST: feature unsupported\n"); ++ return -ENOTSUPP; ++ } ++ ++ *bank = tsnif_est_get_bank(hw, ioaddr, is_own); ++ ++ return 0; ++} ++ ++int tsn_est_gce_set(struct mac_device_info *hw, struct net_device *dev, ++ struct est_gc_entry *gce, u32 row, ++ u32 dbgb, bool is_dbgm) ++{ ++ struct tsnif_info *info = &hw->tsn_info; ++ struct tsn_hw_cap *cap = &info->cap; ++ void __iomem *ioaddr = hw->pcsr; ++ u32 ti_nsec = gce->ti_nsec; ++ u32 gates = gce->gates; ++ struct est_gc_entry *gcl; ++ u32 gates_mask; ++ u32 ti_wid; ++ u32 ti_max; ++ u32 value; ++ u32 bank; ++ int ret; ++ ++ if (!tsn_has_feat(hw, dev, TSN_FEAT_ID_EST)) { ++ netdev_info(dev, "EST: feature unsupported\n"); ++ return -ENOTSUPP; ++ } ++ ++ if (dbgb >= EST_GCL_BANK_MAX) ++ return -EINVAL; ++ ++ bank = is_dbgm ? dbgb : tsnif_est_get_bank(hw, ioaddr, 1); ++ ++ if (!cap->gcl_depth || row > cap->gcl_depth) { ++ netdev_warn(dev, "EST: row(%u) > GCL depth(%u)\n", ++ row, cap->gcl_depth); ++ ++ return -EINVAL; ++ } ++ ++ ti_wid = cap->ti_wid; ++ ti_max = (1 << ti_wid) - 1; ++ if (ti_nsec > ti_max) { ++ netdev_warn(dev, "EST: ti_nsec(%u) > upper limit(%u)\n", ++ ti_nsec, ti_max); ++ ++ return -EINVAL; ++ } ++ ++ gates_mask = (1 << cap->txqcnt) - 1; ++ if (gates > gates_mask) { ++ netdev_warn(dev, "EST: gates 0x%x is out of boundary 0x%x", ++ gates, gates_mask); ++ ++ return -EINVAL; ++ } ++ ++ value = ((gates & gates_mask) << ti_wid) | ti_nsec; ++ ++ ret = tsnif_est_write_gcl_config(hw, ioaddr, value, row, I_GCE, ++ dbgb, is_dbgm); ++ if (ret) { ++ netdev_err(dev, "EST: GCE write failed: bank=%u row=%u.\n", ++ bank, row); ++ ++ return ret; ++ } ++ ++ netdev_info(dev, "EST: GCE write: dbgm=%u bank=%u row=%u, gc=0x%x.\n", ++ is_dbgm, bank, row, value); ++ ++ /* Since GC write is successful, update GCL copy of the driver */ ++ gcl = info->est_gcc.gcb[bank].gcl + row; ++ gcl->gates = gates; ++ gcl->ti_nsec = ti_nsec; ++ ++ return ret; ++} ++ ++int tsn_est_gcl_len_get(struct mac_device_info *hw, struct net_device *dev, ++ u32 *gcl_len, ++ u32 dbgb, bool is_dbgm) ++{ ++ void __iomem *ioaddr = hw->pcsr; ++ u32 value; ++ u32 bank; ++ int ret; ++ ++ if (!tsn_has_feat(hw, dev, TSN_FEAT_ID_EST)) { ++ netdev_info(dev, "EST: feature unsupported\n"); ++ return -ENOTSUPP; ++ } ++ ++ if (dbgb >= EST_GCL_BANK_MAX) ++ return -EINVAL; ++ ++ bank = is_dbgm ? dbgb : tsnif_est_get_bank(hw, ioaddr, 1); ++ ++ ret = tsnif_est_read_gcl_config(hw, ioaddr, &value, ++ GCL_PARAM_IDX_LLR, I_PRM, ++ dbgb, is_dbgm); ++ if (ret) { ++ netdev_err(dev, "read LLR fail at bank=%u\n", bank); ++ ++ return ret; ++ } ++ ++ *gcl_len = value; ++ ++ return 0; ++} ++ ++int tsn_est_gcl_len_set(struct mac_device_info *hw, struct net_device *dev, ++ u32 gcl_len, ++ u32 dbgb, bool is_dbgm) ++{ ++ struct tsnif_info *info = &hw->tsn_info; ++ struct tsn_hw_cap *cap = &info->cap; ++ void __iomem *ioaddr = hw->pcsr; ++ int ret = 0; ++ struct est_gcrr *bgcrr; ++ u32 bank; ++ ++ if (!tsn_has_feat(hw, dev, TSN_FEAT_ID_EST)) { ++ netdev_info(dev, "EST: feature unsupported\n"); ++ return -ENOTSUPP; ++ } ++ ++ if (dbgb >= EST_GCL_BANK_MAX) ++ return -EINVAL; ++ ++ bank = is_dbgm ? dbgb : tsnif_est_get_bank(hw, ioaddr, 1); ++ ++ if (gcl_len > cap->gcl_depth) { ++ netdev_warn(dev, "EST: GCL length(%u) > depth(%u)\n", ++ gcl_len, cap->gcl_depth); ++ ++ return -EINVAL; ++ } ++ ++ bgcrr = &info->est_gcc.gcb[bank].gcrr; ++ ++ if (gcl_len != bgcrr->llr) { ++ ret = tsnif_est_write_gcl_config(hw, ioaddr, gcl_len, ++ GCL_PARAM_IDX_LLR, I_PRM, ++ dbgb, is_dbgm); ++ if (ret) { ++ netdev_err(dev, "EST: GCRR programming failure!\n"); ++ ++ return ret; ++ } ++ bgcrr->llr = gcl_len; ++ } ++ ++ return 0; ++} ++ ++int tsn_est_gcrr_times_set(struct mac_device_info *hw, ++ struct net_device *dev, ++ struct est_gcrr *gcrr, ++ u32 dbgb, bool is_dbgm) ++{ ++ struct tsnif_info *info = &hw->tsn_info; ++ struct tsn_hw_cap *cap = &info->cap; ++ u32 cycle_nsec = gcrr->cycle_nsec; ++ u32 cycle_sec = gcrr->cycle_sec; ++ u32 base_nsec = gcrr->base_nsec; ++ void __iomem *ioaddr = hw->pcsr; ++ u32 base_sec = gcrr->base_sec; ++ u32 ext_nsec = gcrr->ter_nsec; ++ int ret = 0; ++ u64 val_ns, sys_ns, tti_ns; ++ struct est_gcrr *bgcrr; ++ u32 gcl_len, bank; ++ ++ if (!tsn_has_feat(hw, dev, TSN_FEAT_ID_EST)) { ++ netdev_info(dev, "EST: feature unsupported\n"); ++ return -ENOTSUPP; ++ } ++ ++ if (dbgb >= EST_GCL_BANK_MAX) ++ return -EINVAL; ++ ++ bank = is_dbgm ? dbgb : tsnif_est_get_bank(hw, ioaddr, 1); ++ ++ if (base_nsec > NSEC_PER_SEC || cycle_nsec > NSEC_PER_SEC) { ++ netdev_warn(dev, "EST: base(%u) or cycle(%u) nsec > 1s !\n", ++ base_nsec, cycle_nsec); ++ ++ return -EINVAL; ++ } ++ ++ /* Ensure base time is later than MAC system time */ ++ val_ns = (u64)base_nsec; ++ val_ns += (u64)(base_sec * NSEC_PER_SEC); ++ ++ /* Get the MAC system time */ ++ sys_ns = readl(ioaddr + PTP_STNSR); ++ sys_ns += readl(ioaddr + PTP_STSR) * NSEC_PER_SEC; ++ ++ if (val_ns <= sys_ns) { ++ netdev_warn(dev, "EST: base time(%llu) <= system time(%llu)\n", ++ val_ns, sys_ns); ++ ++ return -EINVAL; ++ } ++ ++ if (cycle_sec > cap->cycle_max) { ++ netdev_warn(dev, "EST: cycle time(%u) > %u seconds\n", ++ cycle_sec, cap->cycle_max); ++ ++ return -EINVAL; ++ } ++ ++ if (ext_nsec > cap->ext_max) { ++ netdev_warn(dev, "EST: invalid time extension(%u), max=%u\n", ++ ext_nsec, cap->ext_max); ++ ++ return -EINVAL; ++ } ++ ++ bgcrr = &info->est_gcc.gcb[bank].gcrr; ++ gcl_len = bgcrr->llr; ++ ++ /* Sanity test on GCL total time intervals against cycle time. ++ * a) For GC length = 1, if its time interval is equal or greater ++ * than cycle time, it is a constant gate error. ++ * b) If total time interval > cycle time, irregardless of GC ++ * length, it is not considered an error that GC list is ++ * truncated. In this case, giving a warning message is ++ * sufficient. ++ * c) If total time interval < cycle time, irregardless of GC ++ * length, all GATES are OPEN after the last GC is processed ++ * until cycle time lapses. This is potentially due to poor ++ * GCL configuration but is not an error, so we inform user ++ * about it. ++ */ ++ tti_ns = (u64)est_get_gcl_total_intervals_nsec(&info->est_gcc, bank, ++ gcl_len); ++ val_ns = (u64)cycle_nsec; ++ val_ns += (u64)(cycle_sec * NSEC_PER_SEC); ++ if (gcl_len == 1 && tti_ns >= val_ns) { ++ netdev_warn(dev, "EST: Constant gate error!\n"); ++ ++ return -EINVAL; ++ } ++ ++ if (tti_ns > val_ns) ++ netdev_warn(dev, "EST: GCL is truncated!\n"); ++ ++ if (tti_ns < val_ns) { ++ netdev_info(dev, ++ "EST: All GCs OPEN at %llu-ns of %llu-ns cycle\n", ++ tti_ns, val_ns); ++ } ++ ++ /* Finally, start programming GCL related registers if the value ++ * differs from the driver copy for efficiency. ++ */ ++ ++ if (base_nsec != bgcrr->base_nsec) ++ ret |= tsnif_est_write_gcl_config(hw, ioaddr, base_nsec, ++ GCL_PARAM_IDX_BTR_LO, I_PRM, ++ dbgb, is_dbgm); ++ ++ if (base_sec != bgcrr->base_sec) ++ ret |= tsnif_est_write_gcl_config(hw, ioaddr, base_sec, ++ GCL_PARAM_IDX_BTR_HI, I_PRM, ++ dbgb, is_dbgm); ++ ++ if (cycle_nsec != bgcrr->cycle_nsec) ++ ret |= tsnif_est_write_gcl_config(hw, ioaddr, cycle_nsec, ++ GCL_PARAM_IDX_CTR_LO, I_PRM, ++ dbgb, is_dbgm); ++ ++ if (cycle_sec != bgcrr->cycle_sec) ++ ret |= tsnif_est_write_gcl_config(hw, ioaddr, cycle_sec, ++ GCL_PARAM_IDX_CTR_HI, I_PRM, ++ dbgb, is_dbgm); ++ ++ if (ext_nsec != bgcrr->ter_nsec) ++ ret |= tsnif_est_write_gcl_config(hw, ioaddr, ext_nsec, ++ GCL_PARAM_IDX_TER, I_PRM, ++ dbgb, is_dbgm); ++ ++ if (ret) { ++ netdev_err(dev, "EST: GCRR programming failure!\n"); ++ ++ return ret; ++ } ++ ++ /* Finally, we are ready to switch SWOL now. */ ++ tsnif_est_switch_swol(hw, ioaddr); ++ ++ /* Update driver copy */ ++ bgcrr->base_sec = base_sec; ++ bgcrr->base_nsec = base_nsec; ++ bgcrr->cycle_sec = cycle_sec; ++ bgcrr->cycle_nsec = cycle_nsec; ++ bgcrr->ter_nsec = ext_nsec; ++ ++ netdev_info(dev, "EST: gcrr set successful\n"); ++ ++ return 0; ++} ++ ++int tsn_est_gcc_get(struct mac_device_info *hw, struct net_device *dev, ++ struct est_gc_config **gcc) ++{ ++ struct tsnif_info *info = &hw->tsn_info; ++ void __iomem *ioaddr = hw->pcsr; ++ struct est_gc_config *pgcc; ++ u32 ti_wid; ++ u32 txqcnt; ++ u32 value; ++ u32 bank; ++ int ret; ++ ++ if (!tsn_has_feat(hw, dev, TSN_FEAT_ID_EST)) { ++ netdev_info(dev, "EST: feature unsupported\n"); ++ return -ENOTSUPP; ++ } ++ ++ /* Get GC config from HW */ ++ pgcc = &info->est_gcc; ++ pgcc->enable = tsnif_est_get_enable(hw, ioaddr); ++ ++ for (bank = 0; bank < EST_GCL_BANK_MAX; bank++) { ++ u32 llr, row; ++ struct est_gc_bank *gcbc = &pgcc->gcb[bank]; ++ ++ ret = tsnif_est_read_gcl_config(hw, ioaddr, &value, ++ GCL_PARAM_IDX_BTR_LO, I_PRM, ++ bank, 1); ++ if (ret) { ++ netdev_err(dev, "read BTR(low) fail at bank=%u\n", ++ bank); ++ ++ return ret; ++ } ++ gcbc->gcrr.base_nsec = value; ++ ++ ret = tsnif_est_read_gcl_config(hw, ioaddr, &value, ++ GCL_PARAM_IDX_BTR_HI, I_PRM, ++ bank, 1); ++ if (ret) { ++ netdev_err(dev, "read BTR(high) fail at bank=%u\n", ++ bank); ++ ++ return ret; ++ } ++ gcbc->gcrr.base_sec = value; ++ ++ ret = tsnif_est_read_gcl_config(hw, ioaddr, &value, ++ GCL_PARAM_IDX_CTR_LO, I_PRM, ++ bank, 1); ++ if (ret) { ++ netdev_err(dev, "read CTR(low) fail at bank=%u\n", ++ bank); ++ ++ return ret; ++ } ++ gcbc->gcrr.cycle_nsec = value; ++ ++ ret = tsnif_est_read_gcl_config(hw, ioaddr, &value, ++ GCL_PARAM_IDX_CTR_HI, I_PRM, ++ bank, 1); ++ if (ret) { ++ netdev_err(dev, "read CTR(high) fail at bank=%u\n", ++ bank); ++ ++ return ret; ++ } ++ gcbc->gcrr.cycle_sec = value; ++ ++ ret = tsnif_est_read_gcl_config(hw, ioaddr, &value, ++ GCL_PARAM_IDX_TER, I_PRM, ++ bank, 1); ++ if (ret) { ++ netdev_err(dev, "read TER fail at bank=%u\n", bank); ++ ++ return ret; ++ } ++ gcbc->gcrr.ter_nsec = value; ++ ++ ret = tsnif_est_read_gcl_config(hw, ioaddr, &value, ++ GCL_PARAM_IDX_LLR, I_PRM, ++ bank, 1); ++ if (ret) { ++ netdev_err(dev, "read LLR fail at bank=%u\n", bank); ++ ++ return ret; ++ } ++ gcbc->gcrr.llr = value; ++ llr = value; ++ ++ ti_wid = info->cap.ti_wid; ++ txqcnt = info->cap.txqcnt; ++ for (row = 0; row < llr; row++) { ++ struct est_gc_entry *gce = gcbc->gcl + row; ++ u32 gates, ti_nsec; ++ ++ ret = tsnif_est_read_gce(hw, ioaddr, row, ++ &gates, &ti_nsec, ++ ti_wid, txqcnt, bank, 1); ++ if (ret) { ++ netdev_err(dev, ++ "read GCE fail at bank=%u row=%u\n", ++ bank, row); ++ ++ return ret; ++ } ++ gce->gates = gates; ++ gce->ti_nsec = ti_nsec; ++ } ++ } ++ ++ *gcc = pgcc; ++ netdev_info(dev, "EST: read GCL from HW done.\n"); ++ ++ return 0; ++} +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h +new file mode 100644 +index 000000000000..e70ee6eea0d0 +--- /dev/null ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h +@@ -0,0 +1,106 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Copyright (c) 2019, Intel Corporation. ++ * Time-Sensitive Networking (TSN) Header ++ */ ++ ++#ifndef __STMMAC_TSN_H__ ++#define __STMMAC_TSN_H__ ++ ++#define MIN_TSN_CORE_VER 0x50 ++#define EST_GCL_BANK_MAX (2) ++#define EST_TIWID_TO_EXTMAX(ti_wid) ((1 << ((ti_wid) + 7)) - 1) ++ ++/* TSN Feature Enabled List */ ++enum tsn_feat_id { ++ TSN_FEAT_ID_EST = 0, ++ TSN_FEAT_ID_MAX, ++}; ++ ++/* TSN GCL Parameter Index */ ++#define I_GCE 0 /* Is GCE */ ++#define I_PRM 1 /* Is GCL Parameters */ ++/* Currently, the order of Param Index matches the GCL addr ++ * order defined in IPv5.xx MTL EST GCL control register ++ */ ++enum tsn_gcl_param_idx { ++ GCL_PARAM_IDX_BTR_LO = 0, ++ GCL_PARAM_IDX_BTR_HI = 1, ++ GCL_PARAM_IDX_CTR_LO = 2, ++ GCL_PARAM_IDX_CTR_HI = 3, ++ GCL_PARAM_IDX_TER = 4, ++ GCL_PARAM_IDX_LLR = 5, ++}; ++ ++/* TSN HW Capabilities */ ++struct tsn_hw_cap { ++ bool est_support; /* 1: supported */ ++ u32 txqcnt; /* Number of TxQ (control gate) */ ++ u32 gcl_depth; /* GCL depth. */ ++ u32 ti_wid; /* time interval width */ ++ u32 ext_max; /* Max time extension */ ++ u32 cycle_max; /* Max Cycle Time */ ++}; ++ ++/* EST Gate Control Entry */ ++struct est_gc_entry { ++ u32 gates; /* gate control: 0: closed, ++ * 1: open. ++ */ ++ u32 ti_nsec; /* time interval in nsec */ ++}; ++ ++/* EST GCL Related Registers */ ++struct est_gcrr { ++ u32 base_nsec; /* base time denominator (nsec) */ ++ u32 base_sec; /* base time numerator (sec) */ ++ u32 cycle_nsec; /* cycle time denominator (nsec) */ ++ u32 cycle_sec; /* cycle time numerator sec)*/ ++ u32 ter_nsec; /* time extension (nsec) */ ++ u32 llr; /* GC list length */ ++}; ++ ++/* EST Gate Control bank */ ++struct est_gc_bank { ++ struct est_gc_entry *gcl; /* Gate Control List */ ++ struct est_gcrr gcrr; /* GCL Related Registers */ ++}; ++ ++/* EST Gate Control Configuration */ ++struct est_gc_config { ++ struct est_gc_bank gcb[EST_GCL_BANK_MAX]; ++ bool enable; /* 1: enabled */ ++}; ++ ++struct tsnif_info { ++ struct tsn_hw_cap cap; ++ bool feat_en[TSN_FEAT_ID_MAX]; ++ struct est_gc_config est_gcc; ++}; ++ ++struct mac_device_info; ++ ++/* TSN functions */ ++int tsn_init(struct mac_device_info *hw, struct net_device *dev); ++int tsn_feat_set(struct mac_device_info *hw, struct net_device *dev, ++ enum tsn_feat_id featid, bool enable); ++bool tsn_has_feat(struct mac_device_info *hw, struct net_device *dev, ++ enum tsn_feat_id featid); ++int tsn_est_enable_set(struct mac_device_info *hw, struct net_device *dev, ++ bool enable); ++int tsn_est_bank_get(struct mac_device_info *hw, struct net_device *dev, ++ bool is_own, u32 *bank); ++int tsn_est_gce_set(struct mac_device_info *hw, struct net_device *dev, ++ struct est_gc_entry *gce, u32 row, ++ u32 dbgb, bool is_dbgm); ++int tsn_est_gcl_len_set(struct mac_device_info *hw, struct net_device *dev, ++ u32 gcl_len, u32 dbgb, bool is_dbgm); ++int tsn_est_gcl_len_get(struct mac_device_info *hw, struct net_device *dev, ++ u32 *gcl_len, u32 dbgb, bool is_dbgm); ++int tsn_est_gcrr_times_set(struct mac_device_info *hw, ++ struct net_device *dev, ++ struct est_gcrr *gcrr, ++ u32 dbgb, bool is_dbgm); ++int tsn_est_gcc_get(struct mac_device_info *hw, struct net_device *dev, ++ struct est_gc_config **gcc); ++ ++#endif /* __STMMAC_TSN_H__ */ +diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h +index fa740da70078..f0eca7253577 100644 +--- a/include/linux/stmmac.h ++++ b/include/linux/stmmac.h +@@ -184,6 +184,7 @@ struct plat_stmmacenet_data { + bool has_sun8i; + bool tso_en; + int rss_en; ++ bool tsn_est_en; + int mac_port_sel_speed; + bool en_tx_lpi_clockgating; + int has_xgmac; +-- +2.17.1 + diff --git a/patches/0035-serial-core-remove-legacy-PM-code.lpss b/patches/0035-serial-core-remove-legacy-PM-code.lpss new file mode 100644 index 0000000000..a315ca771d --- /dev/null +++ b/patches/0035-serial-core-remove-legacy-PM-code.lpss @@ -0,0 +1,212 @@ +From 3af05d5894b75b0858fe5b320c24a1d4f4627ff4 Mon Sep 17 00:00:00 2001 +From: Andy Shevchenko +Date: Tue, 13 Sep 2016 00:21:01 +0300 +Subject: [PATCH 35/40] serial: core: remove legacy PM code + +The drivers in serial subsystem has been switched to support runtime PM, remove +legacy PM code from serial_core.c. + +Signed-off-by: Andy Shevchenko +--- + drivers/tty/serial/serial_core.c | 55 -------------------------------- + include/linux/serial_core.h | 15 --------- + 2 files changed, 70 deletions(-) + +diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c +index 119488d7f2d4..56aa2330ab16 100644 +--- a/drivers/tty/serial/serial_core.c ++++ b/drivers/tty/serial/serial_core.c +@@ -44,8 +44,6 @@ static struct lock_class_key port_lock_key; + static void uart_change_speed(struct tty_struct *tty, struct uart_state *state, + struct ktermios *old_termios); + static void uart_wait_until_sent(struct tty_struct *tty, int timeout); +-static void uart_change_pm(struct uart_state *state, +- enum uart_pm_state pm_state); + + static void uart_port_shutdown(struct tty_port *port); + +@@ -210,11 +208,6 @@ static int uart_port_startup(struct tty_struct *tty, struct uart_state *state, + if (uport->type == PORT_UNKNOWN) + return 1; + +- /* +- * Make sure the device is in D0 state. +- */ +- uart_change_pm(state, UART_PM_STATE_ON); +- + /* + * Initialise and allocate the transmit and temporary + * buffer. +@@ -1651,9 +1644,6 @@ static void uart_tty_port_shutdown(struct tty_port *port) + * we don't try to resume a port that has been shutdown. + */ + tty_port_set_suspended(port, 0); +- +- uart_change_pm(state, UART_PM_STATE_OFF); +- + } + + static void uart_wait_until_sent(struct tty_struct *tty, int timeout) +@@ -1744,8 +1734,6 @@ static void uart_hangup(struct tty_struct *tty) + spin_unlock_irqrestore(&port->lock, flags); + tty_port_set_active(port, 0); + tty_port_tty_set(port, NULL); +- if (uport && !uart_console(uport)) +- uart_change_pm(state, UART_PM_STATE_OFF); + wake_up_interruptible(&port->open_wait); + wake_up_interruptible(&port->delta_msr_wait); + } +@@ -1894,7 +1882,6 @@ static void uart_line_info(struct seq_file *m, struct uart_driver *drv, int i) + { + struct uart_state *state = drv->state + i; + struct tty_port *port = &state->port; +- enum uart_pm_state pm_state; + struct uart_port *uport; + char stat_buf[32]; + unsigned int status; +@@ -1919,17 +1906,12 @@ static void uart_line_info(struct seq_file *m, struct uart_driver *drv, int i) + } + + if (capable(CAP_SYS_ADMIN)) { +- pm_state = state->pm_state; +- if (pm_state != UART_PM_STATE_ON) +- uart_change_pm(state, UART_PM_STATE_ON); + pm_runtime_get_sync(uport->dev); + spin_lock_irq(&uport->lock); + status = uport->ops->get_mctrl(uport); + spin_unlock_irq(&uport->lock); + pm_runtime_mark_last_busy(uport->dev); + pm_runtime_put_autosuspend(uport->dev); +- if (pm_state != UART_PM_STATE_ON) +- uart_change_pm(state, pm_state); + + seq_printf(m, " tx:%d rx:%d", + uport->icount.tx, uport->icount.rx); +@@ -2198,26 +2180,6 @@ uart_set_options(struct uart_port *port, struct console *co, + EXPORT_SYMBOL_GPL(uart_set_options); + #endif /* CONFIG_SERIAL_CORE_CONSOLE */ + +-/** +- * uart_change_pm - set power state of the port +- * +- * @state: port descriptor +- * @pm_state: new state +- * +- * Locking: port->mutex has to be held +- */ +-static void uart_change_pm(struct uart_state *state, +- enum uart_pm_state pm_state) +-{ +- struct uart_port *port = uart_port_check(state); +- +- if (state->pm_state != pm_state) { +- if (port && port->ops->pm) +- port->ops->pm(port, pm_state, state->pm_state); +- state->pm_state = pm_state; +- } +-} +- + struct uart_match { + struct uart_port *port; + struct uart_driver *driver; +@@ -2293,8 +2255,6 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport) + */ + if (uart_console(uport)) + console_stop(uport->cons); +- +- uart_change_pm(state, UART_PM_STATE_OFF); + unlock: + mutex_unlock(&port->mutex); + +@@ -2338,9 +2298,6 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport) + if (port->tty && termios.c_cflag == 0) + termios = port->tty->termios; + +- if (console_suspend_enabled) +- uart_change_pm(state, UART_PM_STATE_ON); +- + pm_runtime_get_sync(uport->dev); + uport->ops->set_termios(uport, &termios, NULL); + pm_runtime_mark_last_busy(uport->dev); +@@ -2354,7 +2311,6 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport) + const struct uart_ops *ops = uport->ops; + int ret; + +- uart_change_pm(state, UART_PM_STATE_ON); + pm_runtime_get_sync(uport->dev); + spin_lock_irq(&uport->lock); + ops->set_mctrl(uport, 0); +@@ -2465,9 +2421,6 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state, + + uart_report_port(drv, port); + +- /* Power up port for set_mctrl() */ +- uart_change_pm(state, UART_PM_STATE_ON); +- + /* + * Ensure that the modem control lines are de-activated. + * keep the DTR setting that is set in uart_set_options() +@@ -2487,13 +2440,6 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state, + */ + if (port->cons && !(port->cons->flags & CON_ENABLED)) + register_console(port->cons); +- +- /* +- * Power down all ports by default, except the +- * console if we have one. +- */ +- if (!uart_console(port)) +- uart_change_pm(state, UART_PM_STATE_OFF); + } + } + +@@ -2924,7 +2870,6 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport) + state->uart_port = uport; + uport->state = state; + +- state->pm_state = UART_PM_STATE_UNDEFINED; + uport->cons = drv->cons; + uport->minor = drv->tty_driver->minor_start + uport->line; + uport->name = kasprintf(GFP_KERNEL, "%s%d", drv->dev_name, +diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h +index 2b78cc734719..48f690bd818d 100644 +--- a/include/linux/serial_core.h ++++ b/include/linux/serial_core.h +@@ -52,8 +52,6 @@ struct uart_ops { + void (*set_termios)(struct uart_port *, struct ktermios *new, + struct ktermios *old); + void (*set_ldisc)(struct uart_port *, struct ktermios *); +- void (*pm)(struct uart_port *, unsigned int state, +- unsigned int oldstate); + + /* + * Return a string describing the type of the port +@@ -265,25 +263,12 @@ static inline void serial_port_out(struct uart_port *up, int offset, int value) + up->serial_out(up, offset, value); + } + +-/** +- * enum uart_pm_state - power states for UARTs +- * @UART_PM_STATE_ON: UART is powered, up and operational +- * @UART_PM_STATE_OFF: UART is powered off +- * @UART_PM_STATE_UNDEFINED: sentinel +- */ +-enum uart_pm_state { +- UART_PM_STATE_ON = 0, +- UART_PM_STATE_OFF = 3, /* number taken from ACPI */ +- UART_PM_STATE_UNDEFINED, +-}; +- + /* + * This is the state information which is persistent across opens. + */ + struct uart_state { + struct tty_port port; + +- enum uart_pm_state pm_state; + struct circ_buf xmit; + + atomic_t refcount; +-- +2.17.1 + diff --git a/patches/0035-trusty-Enable-dynamic-timer.trusty b/patches/0035-trusty-Enable-dynamic-timer.trusty new file mode 100644 index 0000000000..bcb038b848 --- /dev/null +++ b/patches/0035-trusty-Enable-dynamic-timer.trusty @@ -0,0 +1,210 @@ +From e13ed7fa66ab680c0a887bc2ff7b0813ce786dd5 Mon Sep 17 00:00:00 2001 +From: "Zhong,Fangjian" +Date: Tue, 6 Jun 2017 00:15:01 +0000 +Subject: [PATCH 35/63] trusty: Enable dynamic timer + +Enable the dynamic timer support for Trusty scheduling. +Besides periodic timer, Trusty now supports both dynamic timer +and periodic timer. Proxy timer drives the Trusty OS scheduling +in fixed periodical intervals. Dynamic timer is similar to +tickless mode which will not schedule if Trusty OS is idle. +This patch will consult Trusty OS for the timer mode to use +and enable the specified timer to drive Trusty scheduling. + +Change-Id: Ic972c40d768cb59a8326842c698fafbe45af906c +Signed-off-by: Zhong,Fangjian +--- + drivers/trusty/trusty.c | 91 ++++++++++++++++++++++++++++++++--- + include/linux/trusty/smcall.h | 17 +++++++ + 2 files changed, 100 insertions(+), 8 deletions(-) + +diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c +index 93c73882b00c..647031dacb4e 100644 +--- a/drivers/trusty/trusty.c ++++ b/drivers/trusty/trusty.c +@@ -25,9 +25,10 @@ + #include + #include + +-#define TRUSTY_VMCALL_SMC 0x74727500 ++#define TRUSTY_VMCALL_SMC 0x74727500 + #define TRUSTY_LKTIMER_INTERVAL 10 /* 10 ms */ + #define TRUSTY_LKTIMER_VECTOR 0x31 /* INT_PIT */ ++#define TRUSTY_STOP_TIMER 0xFFFFFFFF + + enum lktimer_mode { + ONESHOT_TIMER, +@@ -52,6 +53,12 @@ struct trusty_smc_interface { + ulong args[5]; + }; + ++static struct timer_list *lk_timer; ++ ++static ulong (*smc_func)(ulong r0, ulong r1, ulong r2, ulong r3); ++static ulong smc_dynamic_timer(ulong r0, ulong r1, ulong r2, ulong r3); ++static ulong smc_periodic_timer(ulong r0, ulong r1, ulong r2, ulong r3); ++ + static void trusty_lktimer_work_func(struct work_struct *work) + { + int ret; +@@ -72,7 +79,7 @@ static void trusty_lktimer_work_func(struct work_struct *work) + if (ret != SM_ERR_NOP_DONE) + dev_err(s->dev, "%s: SMC_SC_NOP failed %d", __func__, ret); + +- dev_notice_once(s->dev, "LK OS proxy timer works\n"); ++ dev_notice_once(s->dev, "LK OS timer works\n"); + } + + static void trusty_lktimer_func(unsigned long data) +@@ -92,6 +99,7 @@ static void trusty_init_lktimer(struct trusty_state *s) + { + INIT_WORK(&s->timer_work, trusty_lktimer_work_func); + setup_timer(&s->timer, trusty_lktimer_func, (unsigned long)s); ++ lk_timer = &s->timer; + } + + /* note that this function is not thread-safe */ +@@ -108,6 +116,39 @@ static void trusty_configure_lktimer(struct trusty_state *s, + mod_timer(&s->timer, jiffies + msecs_to_jiffies(s->timer_interval)); + } + ++static void trusty_init_smc_function(void) ++{ ++ smc_func = smc_periodic_timer; ++} ++ ++static void trusty_set_timer_mode(struct trusty_state *s, struct device *dev) ++{ ++ int ret; ++ ++ ret = trusty_fast_call32(dev, SMC_FC_TIMER_MODE, 0, 0, 0); ++ ++ if (ret == 0) { ++ smc_func = smc_dynamic_timer; ++ } else { ++ smc_func = smc_periodic_timer; ++ /* ++ * If bit 31 set indicates periodic timer is used ++ * bit 15:0 indicates interval ++ */ ++ if ((ret & 0x80000000) && (ret & 0x0FFFF)) { ++ trusty_configure_lktimer(s, ++ PERIODICAL_TIMER, ++ ret & 0x0FFFF); ++ } else { ++ /* set periodical timer with default interval */ ++ trusty_configure_lktimer(s, ++ PERIODICAL_TIMER, ++ TRUSTY_LKTIMER_INTERVAL); ++ } ++ } ++ ++} ++ + /* + * this should be called when removing trusty dev and + * when LK/Trusty crashes, to disable proxy timer. +@@ -119,12 +160,45 @@ static void trusty_del_lktimer(struct trusty_state *s) + } + + static inline ulong smc(ulong r0, ulong r1, ulong r2, ulong r3) ++{ ++ return smc_func(r0, r1, r2, r3); ++} ++ ++static ulong smc_dynamic_timer(ulong r0, ulong r1, ulong r2, ulong r3) + { + __asm__ __volatile__( + "vmcall; \n" +- :"=D"(r0) +- :"a"(TRUSTY_VMCALL_SMC), "D"(r0), "S"(r1), "d"(r2), "b"(r3) ++ : "=D"(r0), "=S"(r1), "=d"(r2), "=b"(r3) ++ : "a"(TRUSTY_VMCALL_SMC), "D"(r0), "S"(r1), "d"(r2), "b"(r3) + ); ++ ++ if (((r0 == SM_ERR_NOP_INTERRUPTED) || ++ (r0 == SM_ERR_INTERRUPTED)) && ++ (r1 != 0)) { ++ struct trusty_state *s; ++ ++ if (lk_timer != NULL) { ++ s = container_of(lk_timer, struct trusty_state, timer); ++ if (r1 != TRUSTY_STOP_TIMER) ++ trusty_configure_lktimer(s, ONESHOT_TIMER, r1); ++ else ++ trusty_configure_lktimer(s, ONESHOT_TIMER, 0); ++ } else { ++ pr_err("Trusty timer has not been initialized yet!\n"); ++ } ++ } ++ ++ return r0; ++} ++ ++static inline ulong smc_periodic_timer(ulong r0, ulong r1, ulong r2, ulong r3) ++{ ++ __asm__ __volatile__( ++ "vmcall; \n" ++ : "=D"(r0), "=S"(r1), "=d"(r2), "=b"(r3) ++ : "a"(TRUSTY_VMCALL_SMC), "D"(r0), "S"(r1), "d"(r2), "b"(r3) ++ ); ++ + return r0; + } + +@@ -472,19 +546,20 @@ static int trusty_probe(struct platform_device *pdev) + platform_set_drvdata(pdev, s); + s->dev = &pdev->dev; + ++ trusty_init_smc_function(); ++ trusty_init_lktimer(s); ++ trusty_set_timer_mode(s, &pdev->dev); ++ + trusty_init_version(s, &pdev->dev); + + ret = trusty_init_api_version(s, &pdev->dev); + if (ret < 0) + goto err_api_version; + +- trusty_init_lktimer(s); +- trusty_configure_lktimer(s, +- PERIODICAL_TIMER, TRUSTY_LKTIMER_INTERVAL); +- + return 0; + + err_api_version: ++ trusty_del_lktimer(s); + if (s->version_str) { + device_remove_file(&pdev->dev, &dev_attr_trusty_version); + kfree(s->version_str); +diff --git a/include/linux/trusty/smcall.h b/include/linux/trusty/smcall.h +index 1160890a3d90..974b7b3e753d 100644 +--- a/include/linux/trusty/smcall.h ++++ b/include/linux/trusty/smcall.h +@@ -123,6 +123,23 @@ + #define TRUSTY_API_VERSION_CURRENT (2) + #define SMC_FC_API_VERSION SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 11) + ++/** ++ * SMC_FC_TIMER_MODE - Find and set timer mode ++ * Returns timer mode from trusty. ++ * ++ * Return value stands for: ++ * Bit 31 : ++ * If this bit is set, trusty uses periodic timer, Android trusty driver ++ * injects timer interrupt to trusty with specified interval. ++ * If this bit is clear, trusty uses dynamic timer, Android trusty ++ * driver injects timer interrupt to trusty on demand. ++ * Bit 15:0 : ++ * If bit 31 is set, Android trusty driver injects timer interrupt to ++ * trusty with interval specified by this field in milliseconds. ++ * If bit 31 is clear, this field is ignored. ++ */ ++#define SMC_FC_TIMER_MODE SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 12) ++ + /* TRUSTED_OS entity calls */ + #define SMC_SC_VIRTIO_GET_DESCR SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 20) + #define SMC_SC_VIRTIO_START SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 21) +-- +2.17.1 + diff --git a/patches/0036-ASoC-Intel-Reuse-sst_pdata-fw-field.audio b/patches/0036-ASoC-Intel-Reuse-sst_pdata-fw-field.audio new file mode 100644 index 0000000000..b6821e06eb --- /dev/null +++ b/patches/0036-ASoC-Intel-Reuse-sst_pdata-fw-field.audio @@ -0,0 +1,185 @@ +From 21a254f8a850ef74baf21075dd260dc652b92ec1 Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Sun, 11 Aug 2019 20:33:03 +0200 +Subject: [PATCH 036/193] ASoC: Intel: Reuse sst_pdata::fw field + +Similarly to fw_name, sst_pdata::fw can be used to unify usage of +firmware objects for all SSTs. + +This change invalidates sst_dsp::fw field. + +Signed-off-by: Cezary Rojewski +--- + sound/soc/intel/skylake/bxt-sst.c | 15 ++++++++------- + sound/soc/intel/skylake/cnl-sst.c | 15 ++++++++------- + sound/soc/intel/skylake/skl-sst.c | 22 ++++++++++++---------- + 3 files changed, 28 insertions(+), 24 deletions(-) + +diff --git a/sound/soc/intel/skylake/bxt-sst.c b/sound/soc/intel/skylake/bxt-sst.c +index 08b834a4f7ac..a77e31c6f34e 100644 +--- a/sound/soc/intel/skylake/bxt-sst.c ++++ b/sound/soc/intel/skylake/bxt-sst.c +@@ -185,10 +185,11 @@ static int bxt_load_base_firmware(struct sst_dsp *ctx) + { + struct firmware stripped_fw; + struct skl_dev *skl = ctx->thread_context; ++ struct sst_pdata *pdata = ctx->pdata; + int ret, i; + +- if (ctx->fw == NULL) { +- ret = request_firmware(&ctx->fw, ctx->fw_name, ctx->dev); ++ if (!pdata->fw) { ++ ret = request_firmware(&pdata->fw, ctx->fw_name, ctx->dev); + if (ret < 0) { + dev_err(ctx->dev, "Request firmware failed %d\n", ret); + return ret; +@@ -196,14 +197,14 @@ static int bxt_load_base_firmware(struct sst_dsp *ctx) + } + + if (skl->is_first_boot) { +- ret = snd_skl_parse_manifest(ctx, ctx->fw, ++ ret = snd_skl_parse_manifest(ctx, pdata->fw, + BXT_ADSP_FW_BIN_HDR_OFFSET, 0); + if (ret < 0) + goto sst_load_base_firmware_failed; + } + +- stripped_fw.data = ctx->fw->data; +- stripped_fw.size = ctx->fw->size; ++ stripped_fw.data = pdata->fw->data; ++ stripped_fw.size = pdata->fw->size; + skl_dsp_strip_extended_manifest(&stripped_fw); + + for (i = 0; i < BXT_FW_ROM_INIT_RETRY; i++) { +@@ -246,8 +247,8 @@ static int bxt_load_base_firmware(struct sst_dsp *ctx) + return ret; + + sst_load_base_firmware_failed: +- release_firmware(ctx->fw); +- ctx->fw = NULL; ++ release_firmware(pdata->fw); ++ pdata->fw = NULL; + return ret; + } + +diff --git a/sound/soc/intel/skylake/cnl-sst.c b/sound/soc/intel/skylake/cnl-sst.c +index 164fb5f92797..28d469cbe09e 100644 +--- a/sound/soc/intel/skylake/cnl-sst.c ++++ b/sound/soc/intel/skylake/cnl-sst.c +@@ -108,10 +108,11 @@ static int cnl_load_base_firmware(struct sst_dsp *ctx) + { + struct firmware stripped_fw; + struct skl_dev *cnl = ctx->thread_context; ++ struct sst_pdata *pdata = ctx->pdata; + int ret; + +- if (!ctx->fw) { +- ret = request_firmware(&ctx->fw, ctx->fw_name, ctx->dev); ++ if (!pdata->fw) { ++ ret = request_firmware(&pdata->fw, ctx->fw_name, ctx->dev); + if (ret < 0) { + dev_err(ctx->dev, "request firmware failed: %d\n", ret); + goto cnl_load_base_firmware_failed; +@@ -119,14 +120,14 @@ static int cnl_load_base_firmware(struct sst_dsp *ctx) + } + + if (cnl->is_first_boot) { +- ret = snd_skl_parse_manifest(ctx, ctx->fw, ++ ret = snd_skl_parse_manifest(ctx, pdata->fw, + CNL_ADSP_FW_HDR_OFFSET, 0); + if (ret < 0) + goto cnl_load_base_firmware_failed; + } + +- stripped_fw.data = ctx->fw->data; +- stripped_fw.size = ctx->fw->size; ++ stripped_fw.data = pdata->fw->data; ++ stripped_fw.size = pdata->fw->size; + skl_dsp_strip_extended_manifest(&stripped_fw); + + ret = cnl_prepare_fw(ctx, stripped_fw.data, stripped_fw.size); +@@ -156,8 +157,8 @@ static int cnl_load_base_firmware(struct sst_dsp *ctx) + return 0; + + cnl_load_base_firmware_failed: +- release_firmware(ctx->fw); +- ctx->fw = NULL; ++ release_firmware(pdata->fw); ++ pdata->fw = NULL; + + return ret; + } +diff --git a/sound/soc/intel/skylake/skl-sst.c b/sound/soc/intel/skylake/skl-sst.c +index f7b06e1d3d5a..f84c1191fe0d 100644 +--- a/sound/soc/intel/skylake/skl-sst.c ++++ b/sound/soc/intel/skylake/skl-sst.c +@@ -67,14 +67,15 @@ static int skl_load_base_firmware(struct sst_dsp *ctx) + { + int ret = 0, i; + struct skl_dev *skl = ctx->thread_context; ++ struct sst_pdata *pdata = ctx->pdata; + struct firmware stripped_fw; + u32 reg; + + skl->boot_complete = false; + init_waitqueue_head(&skl->boot_wait); + +- if (ctx->fw == NULL) { +- ret = request_firmware(&ctx->fw, ctx->fw_name, ctx->dev); ++ if (!pdata->fw) { ++ ret = request_firmware(&pdata->fw, ctx->fw_name, ctx->dev); + if (ret < 0) { + dev_err(ctx->dev, "Request firmware failed %d\n", ret); + return -EIO; +@@ -82,19 +83,19 @@ static int skl_load_base_firmware(struct sst_dsp *ctx) + } + + if (skl->is_first_boot) { +- ret = snd_skl_parse_manifest(ctx, ctx->fw, ++ ret = snd_skl_parse_manifest(ctx, pdata->fw, + SKL_ADSP_FW_BIN_HDR_OFFSET, 0); + if (ret < 0) { + dev_err(ctx->dev, "Manifest parsing err: %d\n", ret); +- release_firmware(ctx->fw); ++ release_firmware(pdata->fw); + skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK); + return ret; + } + } + + /* check for extended manifest */ +- stripped_fw.data = ctx->fw->data; +- stripped_fw.size = ctx->fw->size; ++ stripped_fw.data = pdata->fw->data; ++ stripped_fw.size = pdata->fw->size; + + skl_dsp_strip_extended_manifest(&stripped_fw); + +@@ -152,8 +153,8 @@ static int skl_load_base_firmware(struct sst_dsp *ctx) + ctx->cl_dev.ops.cl_cleanup_controller(ctx); + skl_load_base_firmware_failed: + skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK); +- release_firmware(ctx->fw); +- ctx->fw = NULL; ++ release_firmware(pdata->fw); ++ pdata->fw = NULL; + return ret; + } + +@@ -602,10 +603,11 @@ EXPORT_SYMBOL_GPL(skl_sst_init_fw); + void skl_sst_dsp_cleanup(struct skl_dev *skl) + { + struct sst_dsp *dsp = skl->dsp; ++ struct sst_pdata *pdata = dsp->pdata; + + skl_release_library(skl->lib_info, skl->lib_count); +- if (dsp->fw) +- release_firmware(dsp->fw); ++ if (pdata->fw) ++ release_firmware(pdata->fw); + skl_clear_module_table(dsp); + + list_del_init(&skl->module_list); +-- +2.17.1 + diff --git a/patches/0036-HVLog-add-HVLog-module.acrn b/patches/0036-HVLog-add-HVLog-module.acrn new file mode 100644 index 0000000000..c563c10b31 --- /dev/null +++ b/patches/0036-HVLog-add-HVLog-module.acrn @@ -0,0 +1,501 @@ +From 61108645191769d22e3585119499159024918f46 Mon Sep 17 00:00:00 2001 +From: "Li, Fei1" +Date: Fri, 31 Aug 2018 10:58:59 +0800 +Subject: [PATCH 036/150] HVLog: add HVLog module + +Change-Id: I328bee769ea93dacf1642e4ffc142adb66356d2a +Tracked-On:220304 +Signed-off-by: Li, Fei1 +--- + drivers/acrn/acrn_hvlog.c | 349 ++++++++++++++++++++++++++++++++++++++ + drivers/acrn/acrn_trace.c | 6 +- + drivers/acrn/sbuf.c | 34 +++- + drivers/acrn/sbuf.h | 11 +- + 4 files changed, 393 insertions(+), 7 deletions(-) + +diff --git a/drivers/acrn/acrn_hvlog.c b/drivers/acrn/acrn_hvlog.c +index 9c30fba58faf..84e04ee32819 100644 +--- a/drivers/acrn/acrn_hvlog.c ++++ b/drivers/acrn/acrn_hvlog.c +@@ -52,8 +52,39 @@ + * Li Fei + * + */ ++#define pr_fmt(fmt) "ACRN HVLog: " fmt ++ + #include + #include ++#include ++#include ++#include ++#include ++#include ++ ++#include "sbuf.h" ++ ++#define LOG_ENTRY_SIZE 80 ++#define PCPU_NRS 4 ++ ++#define foreach_cpu(cpu, cpu_num) \ ++ for ((cpu) = 0; (cpu) < (cpu_num); (cpu)++) ++ ++#define foreach_hvlog_type(idx, hvlog_type) \ ++ for ((idx) = 0; (idx) < (hvlog_type); (idx)++) ++ ++enum sbuf_hvlog_index { ++ SBUF_CUR_HVLOG = 0, ++ SBUF_LAST_HVLOG, ++ SBUF_HVLOG_TYPES ++}; ++ ++struct acrn_hvlog { ++ struct miscdevice miscdev; ++ shared_buf_t *sbuf; ++ atomic_t open_cnt; ++ int pcpu_num; ++}; + + static unsigned long long hvlog_buf_size; + static unsigned long long hvlog_buf_base; +@@ -78,6 +109,324 @@ static int __init early_hvlog(char *p) + return ret; + } + } ++ + return 0; + } + early_param("hvlog", early_hvlog); ++ ++ ++static inline shared_buf_t *hvlog_mark_unread(shared_buf_t *sbuf) ++{ ++ /* sbuf must point to valid data. ++ * clear the lowest bit in the magic to indicate that ++ * the sbuf point to the last boot valid data, we should ++ * read it later. ++ */ ++ if (sbuf != NULL) ++ sbuf->magic &= ~1; ++ ++ return sbuf; ++} ++ ++static int acrn_hvlog_open(struct inode *inode, struct file *filp) ++{ ++ struct acrn_hvlog *acrn_hvlog; ++ ++ acrn_hvlog = container_of(filp->private_data, ++ struct acrn_hvlog, miscdev); ++ pr_debug("%s, %s\n", __func__, acrn_hvlog->miscdev.name); ++ ++ if (acrn_hvlog->pcpu_num >= PCPU_NRS) { ++ pr_err("%s, invalid pcpu_num: %d\n", ++ __func__, acrn_hvlog->pcpu_num); ++ return -EIO; ++ } ++ ++ /* More than one reader at the same time could get data messed up */ ++ if (atomic_cmpxchg(&acrn_hvlog->open_cnt, 0, 1) != 0) ++ return -EBUSY; ++ ++ filp->private_data = acrn_hvlog; ++ ++ return 0; ++} ++ ++static int acrn_hvlog_release(struct inode *inode, struct file *filp) ++{ ++ struct acrn_hvlog *acrn_hvlog; ++ ++ acrn_hvlog = filp->private_data; ++ ++ pr_debug("%s, %s\n", __func__, acrn_hvlog->miscdev.name); ++ ++ if (acrn_hvlog->pcpu_num >= PCPU_NRS) { ++ pr_err("%s, invalid pcpu_num: %d\n", ++ __func__, acrn_hvlog->pcpu_num); ++ return -EIO; ++ } ++ ++ atomic_dec(&acrn_hvlog->open_cnt); ++ filp->private_data = NULL; ++ ++ return 0; ++} ++ ++static ssize_t acrn_hvlog_read(struct file *filp, char __user *buf, ++ size_t count, loff_t *offset) ++{ ++ char data[LOG_ENTRY_SIZE]; ++ struct acrn_hvlog *acrn_hvlog; ++ int ret; ++ ++ acrn_hvlog = (struct acrn_hvlog *)filp->private_data; ++ ++ pr_debug("%s, %s\n", __func__, acrn_hvlog->miscdev.name); ++ ++ if (acrn_hvlog->pcpu_num >= PCPU_NRS) { ++ pr_err("%s, invalid pcpu_num: %d\n", ++ __func__, acrn_hvlog->pcpu_num); ++ return -EIO; ++ } ++ ++ if (acrn_hvlog->sbuf != NULL) { ++ ret = sbuf_get(acrn_hvlog->sbuf, (uint8_t *)&data); ++ if (ret > 0) { ++ if (copy_to_user(buf, &data, ret)) ++ return -EFAULT; ++ } ++ ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static const struct file_operations acrn_hvlog_fops = { ++ .owner = THIS_MODULE, ++ .open = acrn_hvlog_open, ++ .release = acrn_hvlog_release, ++ .read = acrn_hvlog_read, ++}; ++ ++static struct acrn_hvlog acrn_hvlog_devs[SBUF_HVLOG_TYPES][PCPU_NRS] = { ++ [SBUF_CUR_HVLOG] = { ++ { ++ .miscdev = { ++ .name = "acrn_hvlog_cur_0", ++ .minor = MISC_DYNAMIC_MINOR, ++ .fops = &acrn_hvlog_fops, ++ }, ++ .pcpu_num = 0, ++ }, ++ { ++ .miscdev = { ++ .name = "acrn_hvlog_cur_1", ++ .minor = MISC_DYNAMIC_MINOR, ++ .fops = &acrn_hvlog_fops, ++ }, ++ .pcpu_num = 1, ++ }, ++ { ++ .miscdev = { ++ .name = "acrn_hvlog_cur_2", ++ .minor = MISC_DYNAMIC_MINOR, ++ .fops = &acrn_hvlog_fops, ++ }, ++ .pcpu_num = 2, ++ }, ++ { ++ .miscdev = { ++ .name = "acrn_hvlog_cur_3", ++ .minor = MISC_DYNAMIC_MINOR, ++ .fops = &acrn_hvlog_fops, ++ }, ++ .pcpu_num = 3, ++ }, ++ }, ++ [SBUF_LAST_HVLOG] = { ++ { ++ .miscdev = { ++ .name = "acrn_hvlog_last_0", ++ .minor = MISC_DYNAMIC_MINOR, ++ .fops = &acrn_hvlog_fops, ++ }, ++ .pcpu_num = 0, ++ }, ++ { ++ .miscdev = { ++ .name = "acrn_hvlog_last_1", ++ .minor = MISC_DYNAMIC_MINOR, ++ .fops = &acrn_hvlog_fops, ++ }, ++ .pcpu_num = 1, ++ }, ++ { ++ .miscdev = { ++ .name = "acrn_hvlog_last_2", ++ .minor = MISC_DYNAMIC_MINOR, ++ .fops = &acrn_hvlog_fops, ++ }, ++ .pcpu_num = 2, ++ }, ++ { ++ .miscdev = { ++ .name = "acrn_hvlog_last_3", ++ .minor = MISC_DYNAMIC_MINOR, ++ .fops = &acrn_hvlog_fops, ++ }, ++ .pcpu_num = 3, ++ }, ++ } ++}; ++ ++static int __init acrn_hvlog_init(void) ++{ ++ int ret = 0; ++ int i, j, idx; ++ uint32_t pcpu_id; ++ uint64_t logbuf_base0; ++ uint64_t logbuf_base1; ++ uint64_t logbuf_size; ++ uint32_t ele_size; ++ uint32_t ele_num; ++ uint32_t size; ++ bool sbuf_constructed = false; ++ ++ shared_buf_t *sbuf0[PCPU_NRS]; ++ shared_buf_t *sbuf1[PCPU_NRS]; ++ ++ pr_info("%s\n", __func__); ++ if (!hvlog_buf_base || !hvlog_buf_size) { ++ pr_warn("no fixed memory reserve for hvlog.\n"); ++ return 0; ++ } ++ ++ logbuf_base0 = hvlog_buf_base; ++ logbuf_size = (hvlog_buf_size >> 1); ++ logbuf_base1 = hvlog_buf_base + logbuf_size; ++ ++ size = (logbuf_size / PCPU_NRS); ++ ele_size = LOG_ENTRY_SIZE; ++ ele_num = (size - SBUF_HEAD_SIZE) / ele_size; ++ ++ foreach_cpu(pcpu_id, PCPU_NRS) { ++ sbuf0[pcpu_id] = sbuf_check_valid(ele_num, ele_size, ++ logbuf_base0 + size * pcpu_id); ++ sbuf1[pcpu_id] = sbuf_check_valid(ele_num, ele_size, ++ logbuf_base1 + size * pcpu_id); ++ } ++ ++ foreach_cpu(pcpu_id, PCPU_NRS) { ++ if (sbuf0[pcpu_id] == NULL) ++ continue; ++ ++ foreach_cpu(pcpu_id, PCPU_NRS) { ++ acrn_hvlog_devs[SBUF_LAST_HVLOG][pcpu_id].sbuf = ++ hvlog_mark_unread(sbuf0[pcpu_id]); ++ acrn_hvlog_devs[SBUF_CUR_HVLOG][pcpu_id].sbuf = ++ sbuf_construct(ele_num, ele_size, ++ logbuf_base1 + size * pcpu_id); ++ } ++ sbuf_constructed = true; ++ } ++ ++ if (sbuf_constructed == false) { ++ foreach_cpu(pcpu_id, PCPU_NRS) { ++ if (sbuf1[pcpu_id] == NULL) ++ continue; ++ ++ foreach_cpu(pcpu_id, PCPU_NRS) { ++ acrn_hvlog_devs[SBUF_LAST_HVLOG][pcpu_id].sbuf = ++ hvlog_mark_unread(sbuf1[pcpu_id]); ++ } ++ } ++ foreach_cpu(pcpu_id, PCPU_NRS) { ++ acrn_hvlog_devs[SBUF_CUR_HVLOG][pcpu_id].sbuf = ++ sbuf_construct(ele_num, ele_size, ++ logbuf_base0 + size * pcpu_id); ++ } ++ sbuf_constructed = true; ++ } ++ ++ idx = SBUF_CUR_HVLOG; ++ { ++ foreach_cpu(pcpu_id, PCPU_NRS) { ++ ret = sbuf_share_setup(pcpu_id, ACRN_HVLOG, ++ acrn_hvlog_devs[idx][pcpu_id].sbuf); ++ if (ret < 0) { ++ pr_err("Failed to setup %s, errno %d\n", ++ acrn_hvlog_devs[idx][pcpu_id].miscdev.name, ret); ++ goto setup_err; ++ } ++ } ++ } ++ ++ foreach_hvlog_type(idx, SBUF_HVLOG_TYPES) { ++ foreach_cpu(pcpu_id, PCPU_NRS) { ++ atomic_set(&acrn_hvlog_devs[idx][pcpu_id].open_cnt, 0); ++ ++ ret = misc_register( ++ &acrn_hvlog_devs[idx][pcpu_id].miscdev); ++ if (ret < 0) { ++ pr_err("Failed to register %s, errno %d\n", ++ acrn_hvlog_devs[idx][pcpu_id].miscdev.name, ret); ++ goto reg_err; ++ } ++ } ++ } ++ ++ return 0; ++ ++reg_err: ++ foreach_hvlog_type(i, idx) { ++ foreach_cpu(j, PCPU_NRS) { ++ misc_deregister(&acrn_hvlog_devs[i][j].miscdev); ++ } ++ } ++ ++ foreach_cpu(j, pcpu_id) { ++ misc_deregister(&acrn_hvlog_devs[idx][j].miscdev); ++ } ++ ++ pcpu_id = PCPU_NRS; ++setup_err: ++ idx = SBUF_CUR_HVLOG; ++ { ++ foreach_cpu(j, pcpu_id) { ++ sbuf_share_setup(j, ACRN_HVLOG, 0); ++ sbuf_deconstruct(acrn_hvlog_devs[idx][j].sbuf); ++ } ++ } ++ ++ return ret; ++} ++ ++static void __exit acrn_hvlog_exit(void) ++{ ++ int idx; ++ uint32_t pcpu_id; ++ ++ pr_info("%s\n", __func__); ++ ++ foreach_hvlog_type(idx, SBUF_HVLOG_TYPES) { ++ foreach_cpu(pcpu_id, PCPU_NRS) { ++ misc_deregister(&acrn_hvlog_devs[idx][pcpu_id].miscdev); ++ } ++ } ++ ++ idx = SBUF_CUR_HVLOG; ++ { ++ foreach_cpu(pcpu_id, PCPU_NRS) { ++ sbuf_share_setup(pcpu_id, ACRN_HVLOG, 0); ++ sbuf_deconstruct(acrn_hvlog_devs[idx][pcpu_id].sbuf); ++ } ++ } ++} ++ ++module_init(acrn_hvlog_init); ++module_exit(acrn_hvlog_exit); ++ ++MODULE_LICENSE("Dual BSD/GPL"); ++MODULE_AUTHOR("Intel Corp., http://www.intel.com"); ++MODULE_DESCRIPTION("Driver for the Intel ACRN Hypervisor Logmsg"); ++MODULE_VERSION("0.1"); +diff --git a/drivers/acrn/acrn_trace.c b/drivers/acrn/acrn_trace.c +index 856ab650acfd..d48b03625223 100644 +--- a/drivers/acrn/acrn_trace.c ++++ b/drivers/acrn/acrn_trace.c +@@ -239,7 +239,7 @@ static int __init acrn_trace_init(void) + } + + foreach_cpu(cpu, pcpu_num) { +- ret = sbuf_share_setup(cpu, 0, sbuf_per_cpu[cpu]); ++ ret = sbuf_share_setup(cpu, ACRN_TRACE, sbuf_per_cpu[cpu]); + if (ret < 0) { + pr_err("Failed to setup SBuf, cpuid %d\n", cpu); + goto out_sbuf; +@@ -264,7 +264,7 @@ static int __init acrn_trace_init(void) + + out_sbuf: + for (i = --cpu; i >= 0; i--) +- sbuf_share_setup(i, 0, NULL); ++ sbuf_share_setup(i, ACRN_TRACE, NULL); + cpu = pcpu_num; + + out_free: +@@ -288,7 +288,7 @@ static void __exit acrn_trace_exit(void) + misc_deregister(acrn_trace_devs[cpu]); + + /* set sbuf pointer to NULL in HV */ +- sbuf_share_setup(cpu, 0, NULL); ++ sbuf_share_setup(cpu, ACRN_TRACE, NULL); + + /* free sbuf, sbuf_per_cpu[cpu] should be set NULL */ + sbuf_free(sbuf_per_cpu[cpu]); +diff --git a/drivers/acrn/sbuf.c b/drivers/acrn/sbuf.c +index 8849ce28a06c..a3582325d9b9 100644 +--- a/drivers/acrn/sbuf.c ++++ b/drivers/acrn/sbuf.c +@@ -185,7 +185,7 @@ int sbuf_share_setup(uint32_t pcpu_id, uint32_t sbuf_id, shared_buf_t *sbuf) + } + EXPORT_SYMBOL(sbuf_share_setup); + +-shared_buf_t *sbuf_construct(uint32_t ele_num, uint32_t ele_size, ++shared_buf_t *sbuf_check_valid(uint32_t ele_num, uint32_t ele_size, + uint64_t paddr) + { + shared_buf_t *sbuf; +@@ -199,11 +199,39 @@ shared_buf_t *sbuf_construct(uint32_t ele_num, uint32_t ele_size, + if ((sbuf->magic == SBUF_MAGIC) && + (sbuf->ele_num == ele_num) && + (sbuf->ele_size == ele_size)) { +- pr_info("construct sbuf at 0x%llx.\n", paddr); +- /* return sbuf for dump */ + return sbuf; + } + + return NULL; + } ++EXPORT_SYMBOL(sbuf_check_valid); ++ ++shared_buf_t *sbuf_construct(uint32_t ele_num, uint32_t ele_size, ++ uint64_t paddr) ++{ ++ shared_buf_t *sbuf; ++ ++ if (!ele_num || !ele_size || !paddr) ++ return NULL; ++ ++ sbuf = (shared_buf_t *)phys_to_virt(paddr); ++ BUG_ON(!virt_addr_valid(sbuf)); ++ ++ memset(sbuf, 0, SBUF_HEAD_SIZE); ++ sbuf->magic = SBUF_MAGIC; ++ sbuf->ele_num = ele_num; ++ sbuf->ele_size = ele_size; ++ sbuf->size = ele_num * ele_size; ++ pr_info("construct sbuf at 0x%llx.\n", paddr); ++ return sbuf; ++} + EXPORT_SYMBOL(sbuf_construct); ++ ++void sbuf_deconstruct(shared_buf_t *sbuf) ++{ ++ if (sbuf == NULL) ++ return; ++ ++ sbuf->magic = 0; ++} ++EXPORT_SYMBOL(sbuf_deconstruct); +diff --git a/drivers/acrn/sbuf.h b/drivers/acrn/sbuf.h +index 73608c35046c..4fae7a258bce 100644 +--- a/drivers/acrn/sbuf.h ++++ b/drivers/acrn/sbuf.h +@@ -67,6 +67,11 @@ + #define OVERRUN_CNT_EN (1ULL << 0) /* whether overrun counting is enabled */ + #define OVERWRITE_EN (1ULL << 1) /* whether overwrite is enabled */ + ++enum sbuf_type { ++ ACRN_TRACE, ++ ACRN_HVLOG, ++ ACRN_SBUF_TYPE_MAX, ++}; + /** + * (sbuf) head + buf (store (ele_num - 1) elements at most) + * buffer empty: tail == head +@@ -115,6 +120,10 @@ shared_buf_t *sbuf_allocate(uint32_t ele_num, uint32_t ele_size); + void sbuf_free(shared_buf_t *sbuf); + int sbuf_get(shared_buf_t *sbuf, uint8_t *data); + int sbuf_share_setup(uint32_t pcpu_id, uint32_t sbuf_id, shared_buf_t *sbuf); +-shared_buf_t *sbuf_construct(uint32_t ele_num, uint32_t ele_size, uint64_t gpa); ++shared_buf_t *sbuf_check_valid(uint32_t ele_num, uint32_t ele_size, ++ uint64_t gpa); ++shared_buf_t *sbuf_construct(uint32_t ele_num, uint32_t ele_size, ++ uint64_t gpa); ++void sbuf_deconstruct(shared_buf_t *sbuf); + + #endif /* SHARED_BUF_H */ +-- +2.17.1 + diff --git a/patches/0036-check-vmm-signature-for-vmm-dump.trusty b/patches/0036-check-vmm-signature-for-vmm-dump.trusty new file mode 100644 index 0000000000..b97fa6913a --- /dev/null +++ b/patches/0036-check-vmm-signature-for-vmm-dump.trusty @@ -0,0 +1,182 @@ +From 63e681203b8a513680725f0db4c9e1c47cb070ff Mon Sep 17 00:00:00 2001 +From: "Zhang, Qi" +Date: Wed, 28 Jun 2017 06:26:15 +0000 +Subject: [PATCH 36/63] check vmm signature for vmm dump + +Change-Id: Ibc0e1ebf561b0b4278bb5f2d92d173685810aa22 +Signed-off-by: Zhang, Qi +--- + drivers/trusty/trusty-ipc.c | 2 +- + drivers/trusty/trusty-irq.c | 2 +- + drivers/trusty/trusty-log.c | 55 ++++++++++++++++++---------------- + drivers/trusty/trusty-virtio.c | 2 +- + drivers/trusty/trusty.c | 2 +- + include/linux/trusty/trusty.h | 14 +++++---- + 6 files changed, 42 insertions(+), 35 deletions(-) + +diff --git a/drivers/trusty/trusty-ipc.c b/drivers/trusty/trusty-ipc.c +index 68f677f91c21..93003b45eb32 100644 +--- a/drivers/trusty/trusty-ipc.c ++++ b/drivers/trusty/trusty-ipc.c +@@ -1525,7 +1525,7 @@ static int tipc_virtio_probe(struct virtio_device *vdev) + vq_callback_t *vq_cbs[] = {_rxvq_cb, _txvq_cb}; + const char *vq_names[] = { "rx", "tx" }; + +- err = trusty_check_cpuid(); ++ err = trusty_check_cpuid(NULL); + if (err < 0) { + dev_err(&vdev->dev, "CPUID Error: Cannot find eVmm in trusty driver initialization!"); + return -EINVAL; +diff --git a/drivers/trusty/trusty-irq.c b/drivers/trusty/trusty-irq.c +index afdea66c23c2..d17162c6a85e 100644 +--- a/drivers/trusty/trusty-irq.c ++++ b/drivers/trusty/trusty-irq.c +@@ -587,7 +587,7 @@ static int trusty_irq_probe(struct platform_device *pdev) + struct trusty_irq_state *is; + work_func_t work_func; + +- ret = trusty_check_cpuid(); ++ ret = trusty_check_cpuid(NULL); + if (ret < 0) { + dev_err(&pdev->dev, "CPUID Error: Cannot find eVmm in trusty driver initialization!"); + return -EINVAL; +diff --git a/drivers/trusty/trusty-log.c b/drivers/trusty/trusty-log.c +index 0f00d0074fc9..c5a85ccaf222 100644 +--- a/drivers/trusty/trusty-log.c ++++ b/drivers/trusty/trusty-log.c +@@ -255,10 +255,11 @@ static int trusty_log_probe(struct platform_device *pdev) + { + struct trusty_log_state *s; + int result; ++ u32 vmm_signature; + phys_addr_t pa; + struct deadloop_dump *dump; + +- result = trusty_check_cpuid(); ++ result = trusty_check_cpuid(&vmm_signature); + if (result < 0) { + dev_err(&pdev->dev, "CPUID Error: Cannot find eVmm in trusty driver initialization!"); + return -EINVAL; +@@ -316,33 +317,35 @@ static int trusty_log_probe(struct platform_device *pdev) + goto error_panic_notifier; + } + +- /* allocate debug buffer for vmm panic dump */ +- g_vmm_debug_buf = __get_free_pages(GFP_KERNEL | __GFP_ZERO, 2); +- if (!g_vmm_debug_buf) { +- result = -ENOMEM; +- goto error_alloc_vmm; +- } +- +- dump = (struct deadloop_dump *)g_vmm_debug_buf; +- dump->version_of_this_struct = VMM_DUMP_VERSION; +- dump->size_of_this_struct = sizeof(struct deadloop_dump); +- dump->is_valid = false; ++ if(vmm_signature == EVMM_SIGNATURE_VMM) { ++ /* allocate debug buffer for vmm panic dump */ ++ g_vmm_debug_buf = __get_free_pages(GFP_KERNEL | __GFP_ZERO, 2); ++ if (!g_vmm_debug_buf) { ++ result = -ENOMEM; ++ goto error_alloc_vmm; ++ } + +- /* shared the buffer to vmm by VMCALL */ +- result = trusty_vmm_dump_init(dump); +- if (result < 0) { +- dev_err(&pdev->dev, +- "failed to share the dump buffer to VMM\n"); +- goto error_vmm_panic_notifier; +- } ++ dump = (struct deadloop_dump *)g_vmm_debug_buf; ++ dump->version_of_this_struct = VMM_DUMP_VERSION; ++ dump->size_of_this_struct = sizeof(struct deadloop_dump); ++ dump->is_valid = false; ++ ++ /* shared the buffer to vmm by VMCALL */ ++ result = trusty_vmm_dump_init(dump); ++ if (result < 0) { ++ dev_err(&pdev->dev, ++ "failed to share the dump buffer to VMM\n"); ++ goto error_vmm_panic_notifier; ++ } + +- /* register the panic notifier for vmm */ +- result = atomic_notifier_chain_register(&panic_notifier_list, +- &trusty_vmm_panic_nb); +- if (result < 0) { +- dev_err(&pdev->dev, +- "failed to register vmm panic notifier\n"); +- goto error_vmm_panic_notifier; ++ /* register the panic notifier for vmm */ ++ result = atomic_notifier_chain_register(&panic_notifier_list, ++ &trusty_vmm_panic_nb); ++ if (result < 0) { ++ dev_err(&pdev->dev, ++ "failed to register vmm panic notifier\n"); ++ goto error_vmm_panic_notifier; ++ } + } + + platform_set_drvdata(pdev, s); +diff --git a/drivers/trusty/trusty-virtio.c b/drivers/trusty/trusty-virtio.c +index 6cb1ec762efe..6bbf80ce7d7f 100644 +--- a/drivers/trusty/trusty-virtio.c ++++ b/drivers/trusty/trusty-virtio.c +@@ -629,7 +629,7 @@ static int trusty_virtio_probe(struct platform_device *pdev) + int ret; + struct trusty_ctx *tctx; + +- ret = trusty_check_cpuid(); ++ ret = trusty_check_cpuid(NULL); + if (ret < 0) { + dev_err(&pdev->dev, "CPUID Error: Cannot find eVmm in trusty driver initialization!"); + return -EINVAL; +diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c +index 647031dacb4e..8e7e715d7018 100644 +--- a/drivers/trusty/trusty.c ++++ b/drivers/trusty/trusty.c +@@ -524,7 +524,7 @@ static int trusty_probe(struct platform_device *pdev) + struct trusty_state *s; + struct device_node *node = pdev->dev.of_node; + +- ret = trusty_check_cpuid(); ++ ret = trusty_check_cpuid(NULL); + if (ret < 0) { + dev_err(&pdev->dev, "CPUID Error: Cannot find eVmm in trusty driver initialization!"); + return -EINVAL; +diff --git a/include/linux/trusty/trusty.h b/include/linux/trusty/trusty.h +index f7b0a14c9a1d..aba204b9ff3a 100644 +--- a/include/linux/trusty/trusty.h ++++ b/include/linux/trusty/trusty.h +@@ -70,19 +70,23 @@ int trusty_call32_mem_buf(struct device *dev, u32 smcnr, + pgprot_t pgprot); + + /* CPUID leaf 0x3 is used because eVMM will trap this leaf.*/ +-#define EVMM_RUNNING_SIGNATURE_CORP 0x43544E49 /* "INTC", edx */ +-#define EVMM_RUNNING_SIGNATURE_MON 0x4D4D5645 /* "XMON", ecx */ ++#define EVMM_SIGNATURE_CORP 0x43544E49 /* "INTC", edx */ ++#define EVMM_SIGNATURE_VMM 0x4D4D5645 /* "EVMM", ecx */ + +-static inline int trusty_check_cpuid(void) ++static inline int trusty_check_cpuid(u32 *vmm_signature) + { + u32 eax, ebx, ecx, edx; + + cpuid(3, &eax, &ebx, &ecx, &edx); +- if ((ecx != EVMM_RUNNING_SIGNATURE_MON) || +- (edx != EVMM_RUNNING_SIGNATURE_CORP)) { ++ if ((ecx != EVMM_SIGNATURE_VMM) || ++ (edx != EVMM_SIGNATURE_CORP)) { + return -EINVAL; + } + ++ if(vmm_signature) { ++ *vmm_signature = ecx; ++ } ++ + return 0; + } + +-- +2.17.1 + diff --git a/patches/0036-drm-mcde-Fix-an-error-handling-path-in-mcde_probe.drm b/patches/0036-drm-mcde-Fix-an-error-handling-path-in-mcde_probe.drm new file mode 100644 index 0000000000..cc2d5672be --- /dev/null +++ b/patches/0036-drm-mcde-Fix-an-error-handling-path-in-mcde_probe.drm @@ -0,0 +1,34 @@ +From c6eba44d1c589d9e96f9e7e7f64bdb81eae73c43 Mon Sep 17 00:00:00 2001 +From: Christophe JAILLET +Date: Thu, 22 Aug 2019 23:15:18 +0200 +Subject: [PATCH 036/690] drm/mcde: Fix an error handling path in + 'mcde_probe()' + +If we don't find any matching components, we should go through the error +handling path, in order to free some resources. + +Fixes: ca5be902a87d ("drm/mcde: Fix uninitialized variable") +Signed-off-by: Christophe JAILLET +Signed-off-by: Linus Walleij +Link: https://patchwork.freedesktop.org/patch/msgid/20190822211518.5578-1-christophe.jaillet@wanadoo.fr +--- + drivers/gpu/drm/mcde/mcde_drv.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c +index 9a09eba53182..5649887d2b90 100644 +--- a/drivers/gpu/drm/mcde/mcde_drv.c ++++ b/drivers/gpu/drm/mcde/mcde_drv.c +@@ -484,7 +484,8 @@ static int mcde_probe(struct platform_device *pdev) + } + if (!match) { + dev_err(dev, "no matching components\n"); +- return -ENODEV; ++ ret = -ENODEV; ++ goto clk_disable; + } + if (IS_ERR(match)) { + dev_err(dev, "could not create component match\n"); +-- +2.17.1 + diff --git a/patches/0036-mei-add-trc-detection-register-to-sysfs.security b/patches/0036-mei-add-trc-detection-register-to-sysfs.security new file mode 100644 index 0000000000..96c1d23376 --- /dev/null +++ b/patches/0036-mei-add-trc-detection-register-to-sysfs.security @@ -0,0 +1,259 @@ +From 8662a20b41bd5bd77e2b731b454b07c07d409ac5 Mon Sep 17 00:00:00 2001 +From: Alexander Usyskin +Date: Mon, 24 Jun 2019 13:52:45 +0300 +Subject: [PATCH 36/65] mei: add trc detection register to sysfs + +The glitch detection HW (TRC) save it status information into +TRC status register. +Make it available to user-space via read-only sysfs file. +The TRC register is availab for PCH15 gen and newer, for older +platforms reading the sysfs file will fail with EOPNOTSUPP. + +Change-Id: I5a655fb0f321f43d8e9037c512b5d47e99057fa3 +Signed-off-by: Alexander Usyskin +Signed-off-by: Tomas Winkler +--- + Documentation/ABI/testing/sysfs-class-mei | 10 +++++++ + drivers/misc/mei/hw-me-regs.h | 3 ++- + drivers/misc/mei/hw-me.c | 33 +++++++++++++++++++++++ + drivers/misc/mei/hw-me.h | 4 +++ + drivers/misc/mei/main.c | 24 +++++++++++++++++ + drivers/misc/mei/mei_dev.h | 10 +++++++ + drivers/misc/mei/pci-me.c | 4 +-- + 7 files changed, 85 insertions(+), 3 deletions(-) + +diff --git a/Documentation/ABI/testing/sysfs-class-mei b/Documentation/ABI/testing/sysfs-class-mei +index a92d844f806e..2b60a0fca62e 100644 +--- a/Documentation/ABI/testing/sysfs-class-mei ++++ b/Documentation/ABI/testing/sysfs-class-mei +@@ -80,3 +80,13 @@ Description: Display the ME device state. + DISABLED + POWER_DOWN + POWER_UP ++ ++What: /sys/class/mei/meiN/trc ++Date: Jul 2019 ++KernelVersion: 5.3 ++Contact: Tomas Winkler ++Description: Display trc status register content ++ ++ The ME FW writes Glitch Detection HW (TRC) ++ status information into trc status register ++ for BIOS and OS to monitor fw health. +diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h +index c09f8bb49495..d7934d1b2b6f 100644 +--- a/drivers/misc/mei/hw-me-regs.h ++++ b/drivers/misc/mei/hw-me-regs.h +@@ -162,7 +162,8 @@ access to ME_CBD */ + #define ME_IS_HRA 0x00000002 + /* ME Interrupt Enable HRA - host read only access to ME_IE */ + #define ME_IE_HRA 0x00000001 +- ++/* TRC control shadow register */ ++#define ME_TRC 0x00000030 + + /* H_HPG_CSR register bits */ + #define H_HPG_CSR_PGIHEXR 0x00000001 +diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c +index 640d3528124a..cc29e695df33 100644 +--- a/drivers/misc/mei/hw-me.c ++++ b/drivers/misc/mei/hw-me.c +@@ -172,6 +172,27 @@ static inline void mei_me_d0i3c_write(struct mei_device *dev, u32 reg) + mei_me_reg_write(to_me_hw(dev), H_D0I3C, reg); + } + ++/** ++ * mei_me_trc_status - read trc status register ++ * ++ * @dev: mei device ++ * @trc: trc status register value ++ * ++ * Return: 0 on success, error otherwise ++ */ ++static int mei_me_trc_status(struct mei_device *dev, u32 *trc) ++{ ++ struct mei_me_hw *hw = to_me_hw(dev); ++ ++ if (!hw->cfg->hw_trc_supported) ++ return -EOPNOTSUPP; ++ ++ *trc = mei_me_reg_read(hw, ME_TRC); ++ trace_mei_reg_read(dev->dev, "ME_TRC", ME_TRC, *trc); ++ ++ return 0; ++} ++ + /** + * mei_me_fw_status - read fw status register from pci config space + * +@@ -1294,6 +1315,7 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) + + static const struct mei_hw_ops mei_me_hw_ops = { + ++ .trc_status = mei_me_trc_status, + .fw_status = mei_me_fw_status, + .pg_state = mei_me_pg_state, + +@@ -1384,6 +1406,9 @@ static bool mei_me_fw_type_sps(struct pci_dev *pdev) + .dma_size[DMA_DSCR_DEVICE] = SZ_128K, \ + .dma_size[DMA_DSCR_CTRL] = PAGE_SIZE + ++#define MEI_CFG_TRC \ ++ .hw_trc_supported = 1 ++ + /* ICH Legacy devices */ + static const struct mei_cfg mei_me_ich_cfg = { + MEI_CFG_ICH_HFS, +@@ -1432,6 +1457,13 @@ static const struct mei_cfg mei_me_pch12_cfg = { + MEI_CFG_DMA_128, + }; + ++/* Tiger Lake and newer devices */ ++static const struct mei_cfg mei_me_pch15_cfg = { ++ MEI_CFG_PCH8_HFS, ++ MEI_CFG_DMA_128, ++ MEI_CFG_TRC, ++}; ++ + /* + * mei_cfg_list - A list of platform platform specific configurations. + * Note: has to be synchronized with enum mei_cfg_idx. +@@ -1446,6 +1478,7 @@ static const struct mei_cfg *const mei_cfg_list[] = { + [MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg, + [MEI_ME_PCH8_SPS_CFG] = &mei_me_pch8_sps_cfg, + [MEI_ME_PCH12_CFG] = &mei_me_pch12_cfg, ++ [MEI_ME_PCH15_CFG] = &mei_me_pch15_cfg, + }; + + const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx) +diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h +index 2d30a26bdf02..fabc23de7565 100644 +--- a/drivers/misc/mei/hw-me.h ++++ b/drivers/misc/mei/hw-me.h +@@ -21,12 +21,14 @@ + * @quirk_probe: device exclusion quirk + * @dma_size: device DMA buffers size + * @fw_ver_supported: is fw version retrievable from FW ++ * @hw_trc_supported: does the hw support trc register + */ + struct mei_cfg { + const struct mei_fw_status fw_status; + bool (*quirk_probe)(struct pci_dev *pdev); + size_t dma_size[DMA_DSCR_NUM]; + u32 fw_ver_supported:1; ++ u32 hw_trc_supported:1; + }; + + +@@ -74,6 +76,7 @@ struct mei_me_hw { + * servers platforms with quirk for + * SPS firmware exclusion. + * @MEI_ME_PCH12_CFG: Platform Controller Hub Gen12 and newer ++ * @MEI_ME_PCH15_CFG: Platform Controller Hub Gen15 and newer + * @MEI_ME_NUM_CFG: Upper Sentinel. + */ + enum mei_cfg_idx { +@@ -86,6 +89,7 @@ enum mei_cfg_idx { + MEI_ME_PCH8_CFG, + MEI_ME_PCH8_SPS_CFG, + MEI_ME_PCH12_CFG, ++ MEI_ME_PCH15_CFG, + MEI_ME_NUM_CFG, + }; + +diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c +index 7310b476323c..4ef6e37caafc 100644 +--- a/drivers/misc/mei/main.c ++++ b/drivers/misc/mei/main.c +@@ -700,6 +700,29 @@ static int mei_fasync(int fd, struct file *file, int band) + return fasync_helper(fd, file, band, &cl->ev_async); + } + ++/** ++ * trc_show - mei device trc attribute show method ++ * ++ * @device: device pointer ++ * @attr: attribute pointer ++ * @buf: char out buffer ++ * ++ * Return: number of the bytes printed into buf or error ++ */ ++static ssize_t trc_show(struct device *device, ++ struct device_attribute *attr, char *buf) ++{ ++ struct mei_device *dev = dev_get_drvdata(device); ++ u32 trc; ++ int ret; ++ ++ ret = mei_trc_status(dev, &trc); ++ if (ret) ++ return ret; ++ return sprintf(buf, "%08X\n", trc); ++} ++static DEVICE_ATTR_RO(trc); ++ + /** + * fw_status_show - mei device fw_status attribute show method + * +@@ -887,6 +910,7 @@ static struct attribute *mei_attrs[] = { + &dev_attr_tx_queue_limit.attr, + &dev_attr_fw_ver.attr, + &dev_attr_dev_state.attr, ++ &dev_attr_trc.attr, + NULL + }; + ATTRIBUTE_GROUPS(mei); +diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h +index 0f2141178299..3ec625e76c28 100644 +--- a/drivers/misc/mei/mei_dev.h ++++ b/drivers/misc/mei/mei_dev.h +@@ -260,6 +260,7 @@ struct mei_cl { + * @hw_config : configure hw + * + * @fw_status : get fw status registers ++ * @trc_status : get trc status register + * @pg_state : power gating state of the device + * @pg_in_transition : is device now in pg transition + * @pg_is_enabled : is power gating enabled +@@ -290,6 +291,8 @@ struct mei_hw_ops { + void (*hw_config)(struct mei_device *dev); + + int (*fw_status)(struct mei_device *dev, struct mei_fw_status *fw_sts); ++ int (*trc_status)(struct mei_device *dev, u32 *trc); ++ + enum mei_pg_state (*pg_state)(struct mei_device *dev); + bool (*pg_in_transition)(struct mei_device *dev); + bool (*pg_is_enabled)(struct mei_device *dev); +@@ -711,6 +714,13 @@ static inline int mei_count_full_read_slots(struct mei_device *dev) + return dev->ops->rdbuf_full_slots(dev); + } + ++static inline int mei_trc_status(struct mei_device *dev, u32 *trc) ++{ ++ if (dev->ops->trc_status) ++ return dev->ops->trc_status(dev, trc); ++ return -EOPNOTSUPP; ++} ++ + static inline int mei_fw_status(struct mei_device *dev, + struct mei_fw_status *fw_status) + { +diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c +index 81a85c38d5a9..401848838fbd 100644 +--- a/drivers/misc/mei/pci-me.c ++++ b/drivers/misc/mei/pci-me.c +@@ -101,9 +101,9 @@ static const struct pci_device_id mei_me_pci_tbl[] = { + + {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)}, + +- {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH12_CFG)}, ++ {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH15_CFG)}, + +- {MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH12_CFG)}, ++ {MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH15_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)}, + + /* required last entry */ +-- +2.17.1 + diff --git a/patches/0036-serial-8250_dw-support-serial-console-wakeup.lpss b/patches/0036-serial-8250_dw-support-serial-console-wakeup.lpss new file mode 100644 index 0000000000..4c2de85889 --- /dev/null +++ b/patches/0036-serial-8250_dw-support-serial-console-wakeup.lpss @@ -0,0 +1,131 @@ +From b040538d6cb1e28bbf323c7b10efd60a48c2496e Mon Sep 17 00:00:00 2001 +From: Andy Shevchenko +Date: Thu, 17 Nov 2016 12:36:27 +0200 +Subject: [PATCH 36/40] serial: 8250_dw: support serial console wakeup + +Set up RxD or CTS pin as wake source and switch the driver to use it. + +TODO: Check PMC HAS what it does provide to have OOB wakeup for LPSS_UART. + +Signed-off-by: Andy Shevchenko +--- + drivers/tty/serial/8250/8250_dw.c | 53 +++++++++++++++++++++++++++++++ + 1 file changed, 53 insertions(+) + +diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c +index ea835dd6b506..caf04cd25c56 100644 +--- a/drivers/tty/serial/8250/8250_dw.c ++++ b/drivers/tty/serial/8250/8250_dw.c +@@ -11,6 +11,7 @@ + */ + #include + #include ++#include + #include + #include + #include +@@ -18,7 +19,9 @@ + #include + #include + #include ++#include + #include ++#include + #include + #include + #include +@@ -370,6 +373,41 @@ static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data) + } + } + ++static int dw8250_init_wakeup(struct device *dev) ++{ ++ struct gpio_desc *wake; ++ int irq, err; ++ ++ /* Set up RxD or CTS pin as wake source */ ++ wake = gpiod_get(dev, "rx", GPIOD_IN); ++ if (IS_ERR(wake)) ++ wake = gpiod_get(dev, "cts", GPIOD_IN); ++ if (IS_ERR(wake)) ++ return PTR_ERR(wake); ++ ++ irq = gpiod_to_irq(wake); ++ if (irq < 0) { ++ err = irq; ++ } else { ++ device_init_wakeup(dev, true); ++ err = dev_pm_set_dedicated_wake_irq(dev, irq); ++ if (err) { ++ dev_warn(dev, "Can't set dedicated wake IRQ: %d\n", err); ++ device_init_wakeup(dev, false); ++ } else { ++ irq_set_irq_type(irq, IRQ_TYPE_EDGE_BOTH); ++ } ++ } ++ gpiod_put(wake); ++ return err; ++} ++ ++static void dw8250_clear_wakeup(struct device *dev) ++{ ++ dev_pm_clear_wake_irq(dev); ++ device_init_wakeup(dev, false); ++} ++ + static int dw8250_probe(struct platform_device *pdev) + { + struct uart_8250_port uart = {}, *up = &uart; +@@ -522,6 +560,10 @@ static int dw8250_probe(struct platform_device *pdev) + goto err_reset; + } + ++ err = dw8250_init_wakeup(dev); ++ if (err) ++ dev_dbg(dev, "Can't init wakeup: %d\n", err); ++ + platform_set_drvdata(pdev, data); + + pm_runtime_use_autosuspend(dev); +@@ -551,6 +593,8 @@ static int dw8250_remove(struct platform_device *pdev) + struct dw8250_data *data = platform_get_drvdata(pdev); + struct device *dev = &pdev->dev; + ++ dw8250_clear_wakeup(dev); ++ + pm_runtime_get_sync(dev); + + serial8250_unregister_port(data->data.line); +@@ -594,6 +638,8 @@ static int dw8250_runtime_suspend(struct device *dev) + { + struct dw8250_data *data = dev_get_drvdata(dev); + ++ pinctrl_pm_select_sleep_state(dev); ++ + if (!IS_ERR(data->clk)) + clk_disable_unprepare(data->clk); + +@@ -606,6 +652,7 @@ static int dw8250_runtime_suspend(struct device *dev) + static int dw8250_runtime_resume(struct device *dev) + { + struct dw8250_data *data = dev_get_drvdata(dev); ++ struct uart_8250_port *up = serial8250_get_port(data->line); + + if (!IS_ERR(data->pclk)) + clk_prepare_enable(data->pclk); +@@ -613,6 +660,12 @@ static int dw8250_runtime_resume(struct device *dev) + if (!IS_ERR(data->clk)) + clk_prepare_enable(data->clk); + ++ pinctrl_pm_select_default_state(dev); ++ ++ /* Restore context */ ++ serial8250_do_restore_context(&up->port); ++ ++ /* TODO: Check if it needs more than it's done in serial8250_console_restore() */ + return 0; + } + #endif +-- +2.17.1 + diff --git a/patches/0036-taprio-Allow-users-not-to-specify-flags-when-.connectivity b/patches/0036-taprio-Allow-users-not-to-specify-flags-when-.connectivity new file mode 100644 index 0000000000..6ecb28eef5 --- /dev/null +++ b/patches/0036-taprio-Allow-users-not-to-specify-flags-when-.connectivity @@ -0,0 +1,34 @@ +From 30b34554eb6484b30808fcfb0b16e8fd57ba2669 Mon Sep 17 00:00:00 2001 +From: Vinicius Costa Gomes +Date: Thu, 1 Aug 2019 15:14:28 -0700 +Subject: [PATCH 036/108] taprio: Allow users not to specify "flags" when + changing schedules + +When "txtime-assisted" mode is enabled, users had to specify the +"flags" parameter when adding a new "admin" schedule. + +This fix allows that parameter to be omitted when adding a new +schedule. + +Fixes: 4cfd577 ("taprio: Add support for txtime-assist mode") +Signed-off-by: Vinicius Costa Gomes +--- + net/sched/sch_taprio.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c +index 6719a65169d4..65563e6acdae 100644 +--- a/net/sched/sch_taprio.c ++++ b/net/sched/sch_taprio.c +@@ -1479,7 +1479,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt, + goto unlock; + } + +- if (TXTIME_ASSIST_IS_ENABLED(taprio_flags)) { ++ if (TXTIME_ASSIST_IS_ENABLED(q->flags)) { + setup_txtime(q, new_admin, start); + + if (!oper) { +-- +2.17.1 + diff --git a/patches/0037-ASoC-Intel-Skylake-Remove-skl_dsp_ops.audio b/patches/0037-ASoC-Intel-Skylake-Remove-skl_dsp_ops.audio new file mode 100644 index 0000000000..b34b9f0098 --- /dev/null +++ b/patches/0037-ASoC-Intel-Skylake-Remove-skl_dsp_ops.audio @@ -0,0 +1,129 @@ +From 391094e5c92204d544f1080269a8686611b8eeef Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Mon, 12 Aug 2019 21:33:48 +0200 +Subject: [PATCH 037/193] ASoC: Intel: Skylake: Remove skl_dsp_ops + +All declaration simplication and addition of FIRMWARE_CONFIG and +HARDWARE_CONFIG led to ultimate goal of removal of struct skl_dsp_ops. +Strike it down, struct sst_pdata is more than enough is providing +description for SST platforms. + +Signed-off-by: Cezary Rojewski +--- + sound/soc/intel/skylake/skl-messages.c | 53 -------------------------- + sound/soc/intel/skylake/skl.h | 8 ---- + 2 files changed, 61 deletions(-) + +diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c +index 4e4d9ded9354..4680352d473a 100644 +--- a/sound/soc/intel/skylake/skl-messages.c ++++ b/sound/soc/intel/skylake/skl-messages.c +@@ -140,72 +140,19 @@ int skl_dsp_cleanup(struct device *dev, + return 0; + } + +-static const struct skl_dsp_ops dsp_ops[] = { +- { +- .id = 0x9d70, +- .init = skl_sst_dsp_init, +- }, +- { +- .id = 0x9d71, +- .init = skl_sst_dsp_init, +- }, +- { +- .id = 0x5a98, +- .init = bxt_sst_dsp_init, +- }, +- { +- .id = 0x3198, +- .init = bxt_sst_dsp_init, +- }, +- { +- .id = 0x9dc8, +- .init = cnl_sst_dsp_init, +- }, +- { +- .id = 0xa348, +- .init = cnl_sst_dsp_init, +- }, +- { +- .id = 0x02c8, +- .init = cnl_sst_dsp_init, +- }, +- { +- .id = 0x06c8, +- .init = cnl_sst_dsp_init, +- }, +-}; +- +-const struct skl_dsp_ops *skl_get_dsp_ops(int pci_id) +-{ +- int i; +- +- for (i = 0; i < ARRAY_SIZE(dsp_ops); i++) { +- if (dsp_ops[i].id == pci_id) +- return &dsp_ops[i]; +- } +- +- return NULL; +-} +- + int skl_init_dsp(struct skl_dev *skl, struct sst_pdata *pdata) + { + struct hdac_bus *bus = skl_to_bus(skl); +- const struct skl_dsp_ops *ops; + int ret; + + /* enable ppcap interrupt */ + snd_hdac_ext_bus_ppcap_enable(bus, true); + snd_hdac_ext_bus_ppcap_int_enable(bus, true); + +- ops = skl_get_dsp_ops(skl->pci->device); +- if (!ops) +- return -EIO; +- + ret = skl_sst_ctx_init(skl, pdata); + if (ret < 0) + return ret; + +- skl->dsp_ops = ops; + dev_dbg(bus->dev, "dsp registration status=%d\n", ret); + + return 0; +diff --git a/sound/soc/intel/skylake/skl.h b/sound/soc/intel/skylake/skl.h +index fe9e3f2fad76..19e0dbb8f9d1 100644 +--- a/sound/soc/intel/skylake/skl.h ++++ b/sound/soc/intel/skylake/skl.h +@@ -134,8 +134,6 @@ struct skl_dev { + + struct skl_d0i3_data d0i3; + +- const struct skl_dsp_ops *dsp_ops; +- + /* Callback to update dynamic clock and power gating registers */ + void (*clock_power_gating)(struct device *dev, bool enable); + }; +@@ -156,11 +154,6 @@ struct skl_machine_pdata { + bool use_tplg_pcm; /* use dais and dai links from topology */ + }; + +-struct skl_dsp_ops { +- int id; +- int (*init)(struct sst_dsp *dsp, struct sst_pdata *pdata); +-}; +- + int skl_platform_unregister(struct device *dev); + int skl_platform_register(struct device *dev); + +@@ -177,7 +170,6 @@ int skl_suspend_late_dsp(struct skl_dev *skl); + int skl_suspend_dsp(struct skl_dev *skl); + int skl_resume_dsp(struct skl_dev *skl); + void skl_cleanup_resources(struct skl_dev *skl); +-const struct skl_dsp_ops *skl_get_dsp_ops(int pci_id); + void skl_update_d0i3c(struct device *dev, bool enable); + int skl_nhlt_create_sysfs(struct skl_dev *skl); + void skl_nhlt_remove_sysfs(struct skl_dev *skl); +-- +2.17.1 + diff --git a/patches/0037-Revert-BXT-DYNAMIC-TIMER-Enable-dynamic-timer.trusty b/patches/0037-Revert-BXT-DYNAMIC-TIMER-Enable-dynamic-timer.trusty new file mode 100644 index 0000000000..1334b50d25 --- /dev/null +++ b/patches/0037-Revert-BXT-DYNAMIC-TIMER-Enable-dynamic-timer.trusty @@ -0,0 +1,199 @@ +From 85b0439798ebec6847742cc6dc10f506d33ad5a9 Mon Sep 17 00:00:00 2001 +From: "Zhong,Fangjian" +Date: Tue, 11 Jul 2017 04:15:01 +0000 +Subject: [PATCH 37/63] Revert "[BXT][DYNAMIC TIMER] Enable dynamic timer" + +This reverts commit 56dae7b0c686eeaf2ff604497ab940328124f611. + +Change-Id: I3603787890e6de43acc5f895034237e7b0c5f954 +Signed-off-by: Zhong,Fangjian +--- + drivers/trusty/trusty.c | 88 +++-------------------------------- + include/linux/trusty/smcall.h | 17 ------- + 2 files changed, 7 insertions(+), 98 deletions(-) + +diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c +index 8e7e715d7018..2fc1b232fee3 100644 +--- a/drivers/trusty/trusty.c ++++ b/drivers/trusty/trusty.c +@@ -25,10 +25,9 @@ + #include + #include + +-#define TRUSTY_VMCALL_SMC 0x74727500 ++#define TRUSTY_VMCALL_SMC 0x74727500 + #define TRUSTY_LKTIMER_INTERVAL 10 /* 10 ms */ + #define TRUSTY_LKTIMER_VECTOR 0x31 /* INT_PIT */ +-#define TRUSTY_STOP_TIMER 0xFFFFFFFF + + enum lktimer_mode { + ONESHOT_TIMER, +@@ -53,12 +52,6 @@ struct trusty_smc_interface { + ulong args[5]; + }; + +-static struct timer_list *lk_timer; +- +-static ulong (*smc_func)(ulong r0, ulong r1, ulong r2, ulong r3); +-static ulong smc_dynamic_timer(ulong r0, ulong r1, ulong r2, ulong r3); +-static ulong smc_periodic_timer(ulong r0, ulong r1, ulong r2, ulong r3); +- + static void trusty_lktimer_work_func(struct work_struct *work) + { + int ret; +@@ -79,7 +72,7 @@ static void trusty_lktimer_work_func(struct work_struct *work) + if (ret != SM_ERR_NOP_DONE) + dev_err(s->dev, "%s: SMC_SC_NOP failed %d", __func__, ret); + +- dev_notice_once(s->dev, "LK OS timer works\n"); ++ dev_notice_once(s->dev, "LK OS proxy timer works\n"); + } + + static void trusty_lktimer_func(unsigned long data) +@@ -99,7 +92,6 @@ static void trusty_init_lktimer(struct trusty_state *s) + { + INIT_WORK(&s->timer_work, trusty_lktimer_work_func); + setup_timer(&s->timer, trusty_lktimer_func, (unsigned long)s); +- lk_timer = &s->timer; + } + + /* note that this function is not thread-safe */ +@@ -116,39 +108,6 @@ static void trusty_configure_lktimer(struct trusty_state *s, + mod_timer(&s->timer, jiffies + msecs_to_jiffies(s->timer_interval)); + } + +-static void trusty_init_smc_function(void) +-{ +- smc_func = smc_periodic_timer; +-} +- +-static void trusty_set_timer_mode(struct trusty_state *s, struct device *dev) +-{ +- int ret; +- +- ret = trusty_fast_call32(dev, SMC_FC_TIMER_MODE, 0, 0, 0); +- +- if (ret == 0) { +- smc_func = smc_dynamic_timer; +- } else { +- smc_func = smc_periodic_timer; +- /* +- * If bit 31 set indicates periodic timer is used +- * bit 15:0 indicates interval +- */ +- if ((ret & 0x80000000) && (ret & 0x0FFFF)) { +- trusty_configure_lktimer(s, +- PERIODICAL_TIMER, +- ret & 0x0FFFF); +- } else { +- /* set periodical timer with default interval */ +- trusty_configure_lktimer(s, +- PERIODICAL_TIMER, +- TRUSTY_LKTIMER_INTERVAL); +- } +- } +- +-} +- + /* + * this should be called when removing trusty dev and + * when LK/Trusty crashes, to disable proxy timer. +@@ -160,45 +119,12 @@ static void trusty_del_lktimer(struct trusty_state *s) + } + + static inline ulong smc(ulong r0, ulong r1, ulong r2, ulong r3) +-{ +- return smc_func(r0, r1, r2, r3); +-} +- +-static ulong smc_dynamic_timer(ulong r0, ulong r1, ulong r2, ulong r3) + { + __asm__ __volatile__( + "vmcall; \n" +- : "=D"(r0), "=S"(r1), "=d"(r2), "=b"(r3) ++ : "=D"(r0) + : "a"(TRUSTY_VMCALL_SMC), "D"(r0), "S"(r1), "d"(r2), "b"(r3) + ); +- +- if (((r0 == SM_ERR_NOP_INTERRUPTED) || +- (r0 == SM_ERR_INTERRUPTED)) && +- (r1 != 0)) { +- struct trusty_state *s; +- +- if (lk_timer != NULL) { +- s = container_of(lk_timer, struct trusty_state, timer); +- if (r1 != TRUSTY_STOP_TIMER) +- trusty_configure_lktimer(s, ONESHOT_TIMER, r1); +- else +- trusty_configure_lktimer(s, ONESHOT_TIMER, 0); +- } else { +- pr_err("Trusty timer has not been initialized yet!\n"); +- } +- } +- +- return r0; +-} +- +-static inline ulong smc_periodic_timer(ulong r0, ulong r1, ulong r2, ulong r3) +-{ +- __asm__ __volatile__( +- "vmcall; \n" +- : "=D"(r0), "=S"(r1), "=d"(r2), "=b"(r3) +- : "a"(TRUSTY_VMCALL_SMC), "D"(r0), "S"(r1), "d"(r2), "b"(r3) +- ); +- + return r0; + } + +@@ -546,16 +472,16 @@ static int trusty_probe(struct platform_device *pdev) + platform_set_drvdata(pdev, s); + s->dev = &pdev->dev; + +- trusty_init_smc_function(); +- trusty_init_lktimer(s); +- trusty_set_timer_mode(s, &pdev->dev); +- + trusty_init_version(s, &pdev->dev); + + ret = trusty_init_api_version(s, &pdev->dev); + if (ret < 0) + goto err_api_version; + ++ trusty_init_lktimer(s); ++ trusty_configure_lktimer(s, ++ PERIODICAL_TIMER, TRUSTY_LKTIMER_INTERVAL); ++ + return 0; + + err_api_version: +diff --git a/include/linux/trusty/smcall.h b/include/linux/trusty/smcall.h +index 974b7b3e753d..1160890a3d90 100644 +--- a/include/linux/trusty/smcall.h ++++ b/include/linux/trusty/smcall.h +@@ -123,23 +123,6 @@ + #define TRUSTY_API_VERSION_CURRENT (2) + #define SMC_FC_API_VERSION SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 11) + +-/** +- * SMC_FC_TIMER_MODE - Find and set timer mode +- * Returns timer mode from trusty. +- * +- * Return value stands for: +- * Bit 31 : +- * If this bit is set, trusty uses periodic timer, Android trusty driver +- * injects timer interrupt to trusty with specified interval. +- * If this bit is clear, trusty uses dynamic timer, Android trusty +- * driver injects timer interrupt to trusty on demand. +- * Bit 15:0 : +- * If bit 31 is set, Android trusty driver injects timer interrupt to +- * trusty with interval specified by this field in milliseconds. +- * If bit 31 is clear, this field is ignored. +- */ +-#define SMC_FC_TIMER_MODE SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 12) +- + /* TRUSTED_OS entity calls */ + #define SMC_SC_VIRTIO_GET_DESCR SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 20) + #define SMC_SC_VIRTIO_START SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 21) +-- +2.17.1 + diff --git a/patches/0037-Revert-serial-8250_dw-support-serial-console-wakeup.lpss b/patches/0037-Revert-serial-8250_dw-support-serial-console-wakeup.lpss new file mode 100644 index 0000000000..5703c89651 --- /dev/null +++ b/patches/0037-Revert-serial-8250_dw-support-serial-console-wakeup.lpss @@ -0,0 +1,129 @@ +From 4d949bf525d37fbd91167f34b8c99c71e3b9ffff Mon Sep 17 00:00:00 2001 +From: Andy Shevchenko +Date: Mon, 29 Jul 2019 17:08:22 +0300 +Subject: [PATCH 37/40] Revert "serial: 8250_dw: support serial console wakeup" + +This reverts commit 210dec91eb8f1b463edee14f2180525bcc623b1c. + +Signed-off-by: Andy Shevchenko +--- + drivers/tty/serial/8250/8250_dw.c | 53 ------------------------------- + 1 file changed, 53 deletions(-) + +diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c +index caf04cd25c56..ea835dd6b506 100644 +--- a/drivers/tty/serial/8250/8250_dw.c ++++ b/drivers/tty/serial/8250/8250_dw.c +@@ -11,7 +11,6 @@ + */ + #include + #include +-#include + #include + #include + #include +@@ -19,9 +18,7 @@ + #include + #include + #include +-#include + #include +-#include + #include + #include + #include +@@ -373,41 +370,6 @@ static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data) + } + } + +-static int dw8250_init_wakeup(struct device *dev) +-{ +- struct gpio_desc *wake; +- int irq, err; +- +- /* Set up RxD or CTS pin as wake source */ +- wake = gpiod_get(dev, "rx", GPIOD_IN); +- if (IS_ERR(wake)) +- wake = gpiod_get(dev, "cts", GPIOD_IN); +- if (IS_ERR(wake)) +- return PTR_ERR(wake); +- +- irq = gpiod_to_irq(wake); +- if (irq < 0) { +- err = irq; +- } else { +- device_init_wakeup(dev, true); +- err = dev_pm_set_dedicated_wake_irq(dev, irq); +- if (err) { +- dev_warn(dev, "Can't set dedicated wake IRQ: %d\n", err); +- device_init_wakeup(dev, false); +- } else { +- irq_set_irq_type(irq, IRQ_TYPE_EDGE_BOTH); +- } +- } +- gpiod_put(wake); +- return err; +-} +- +-static void dw8250_clear_wakeup(struct device *dev) +-{ +- dev_pm_clear_wake_irq(dev); +- device_init_wakeup(dev, false); +-} +- + static int dw8250_probe(struct platform_device *pdev) + { + struct uart_8250_port uart = {}, *up = &uart; +@@ -560,10 +522,6 @@ static int dw8250_probe(struct platform_device *pdev) + goto err_reset; + } + +- err = dw8250_init_wakeup(dev); +- if (err) +- dev_dbg(dev, "Can't init wakeup: %d\n", err); +- + platform_set_drvdata(pdev, data); + + pm_runtime_use_autosuspend(dev); +@@ -593,8 +551,6 @@ static int dw8250_remove(struct platform_device *pdev) + struct dw8250_data *data = platform_get_drvdata(pdev); + struct device *dev = &pdev->dev; + +- dw8250_clear_wakeup(dev); +- + pm_runtime_get_sync(dev); + + serial8250_unregister_port(data->data.line); +@@ -638,8 +594,6 @@ static int dw8250_runtime_suspend(struct device *dev) + { + struct dw8250_data *data = dev_get_drvdata(dev); + +- pinctrl_pm_select_sleep_state(dev); +- + if (!IS_ERR(data->clk)) + clk_disable_unprepare(data->clk); + +@@ -652,7 +606,6 @@ static int dw8250_runtime_suspend(struct device *dev) + static int dw8250_runtime_resume(struct device *dev) + { + struct dw8250_data *data = dev_get_drvdata(dev); +- struct uart_8250_port *up = serial8250_get_port(data->line); + + if (!IS_ERR(data->pclk)) + clk_prepare_enable(data->pclk); +@@ -660,12 +613,6 @@ static int dw8250_runtime_resume(struct device *dev) + if (!IS_ERR(data->clk)) + clk_prepare_enable(data->clk); + +- pinctrl_pm_select_default_state(dev); +- +- /* Restore context */ +- serial8250_do_restore_context(&up->port); +- +- /* TODO: Check if it needs more than it's done in serial8250_console_restore() */ + return 0; + } + #endif +-- +2.17.1 + diff --git a/patches/0037-drm-virtio-make-resource-id-workaround-runtime-switcha.drm b/patches/0037-drm-virtio-make-resource-id-workaround-runtime-switcha.drm new file mode 100644 index 0000000000..e2fd3e60d3 --- /dev/null +++ b/patches/0037-drm-virtio-make-resource-id-workaround-runtime-switcha.drm @@ -0,0 +1,81 @@ +From 6d587fa08a6494a8ff3514bc2691f08588fc52e8 Mon Sep 17 00:00:00 2001 +From: Gerd Hoffmann +Date: Thu, 22 Aug 2019 12:26:14 +0200 +Subject: [PATCH 037/690] drm/virtio: make resource id workaround runtime + switchable. + +Also update the comment with a reference to the virglrenderer fix. + +Signed-off-by: Gerd Hoffmann +Reviewed-by: Chia-I Wu +Link: http://patchwork.freedesktop.org/patch/msgid/20190822102614.18164-1-kraxel@redhat.com +--- + drivers/gpu/drm/virtio/virtgpu_object.c | 44 ++++++++++++++----------- + 1 file changed, 24 insertions(+), 20 deletions(-) + +diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c +index 09b526518f5a..aab5534056ec 100644 +--- a/drivers/gpu/drm/virtio/virtgpu_object.c ++++ b/drivers/gpu/drm/virtio/virtgpu_object.c +@@ -27,34 +27,38 @@ + + #include "virtgpu_drv.h" + ++static int virtio_gpu_virglrenderer_workaround = 1; ++module_param_named(virglhack, virtio_gpu_virglrenderer_workaround, int, 0400); ++ + static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, + uint32_t *resid) + { +-#if 0 +- int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL); +- +- if (handle < 0) +- return handle; +-#else +- static int handle; +- +- /* +- * FIXME: dirty hack to avoid re-using IDs, virglrenderer +- * can't deal with that. Needs fixing in virglrenderer, also +- * should figure a better way to handle that in the guest. +- */ +- handle++; +-#endif +- +- *resid = handle + 1; ++ if (virtio_gpu_virglrenderer_workaround) { ++ /* ++ * Hack to avoid re-using resource IDs. ++ * ++ * virglrenderer versions up to (and including) 0.7.0 ++ * can't deal with that. virglrenderer commit ++ * "f91a9dd35715 Fix unlinking resources from hash ++ * table." (Feb 2019) fixes the bug. ++ */ ++ static int handle; ++ handle++; ++ *resid = handle + 1; ++ } else { ++ int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL); ++ if (handle < 0) ++ return handle; ++ *resid = handle + 1; ++ } + return 0; + } + + static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id) + { +-#if 0 +- ida_free(&vgdev->resource_ida, id - 1); +-#endif ++ if (!virtio_gpu_virglrenderer_workaround) { ++ ida_free(&vgdev->resource_ida, id - 1); ++ } + } + + static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) +-- +2.17.1 + diff --git a/patches/0037-mei-bus-prefix-device-names-on-bus-with-parent-na.security b/patches/0037-mei-bus-prefix-device-names-on-bus-with-parent-na.security new file mode 100644 index 0000000000..54db5f6028 --- /dev/null +++ b/patches/0037-mei-bus-prefix-device-names-on-bus-with-parent-na.security @@ -0,0 +1,33 @@ +From 005faf4ff20c3a4df9098666bc81af4a282ec8b6 Mon Sep 17 00:00:00 2001 +From: Alexander Usyskin +Date: Wed, 2 Oct 2019 15:16:21 +0300 +Subject: [PATCH 37/65] mei: bus: prefix device names on bus with parent name + +Add parent device name to name of devices on bus to avoid +device names collisions for same client UUID available +from different MEI heads. + +Change-Id: Id337816382febdf33e3b9c3c66c93d6d157d8c01 +Signed-off-by: Alexander Usyskin +Signed-off-by: Tomas Winkler +--- + drivers/misc/mei/bus.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c +index 985bd4fd3328..d5ba41177392 100644 +--- a/drivers/misc/mei/bus.c ++++ b/drivers/misc/mei/bus.c +@@ -878,7 +878,8 @@ static const struct device_type mei_cl_device_type = { + */ + static inline void mei_cl_bus_set_name(struct mei_cl_device *cldev) + { +- dev_set_name(&cldev->dev, "mei:%s:%pUl:%02X", ++ dev_set_name(&cldev->dev, "%s-mei:%s:%pUl:%02X", ++ dev_name(cldev->bus->dev), + cldev->name, + mei_me_cl_uuid(cldev->me_cl), + mei_me_cl_ver(cldev->me_cl)); +-- +2.17.1 + diff --git a/patches/0037-net-stmmac-enable-HW-offloading-for-tc-taprio.connectivity b/patches/0037-net-stmmac-enable-HW-offloading-for-tc-taprio.connectivity new file mode 100644 index 0000000000..a91a45533a --- /dev/null +++ b/patches/0037-net-stmmac-enable-HW-offloading-for-tc-taprio.connectivity @@ -0,0 +1,212 @@ +From ac117ed7dce7462df511a4b3ea3e1318e6eeac65 Mon Sep 17 00:00:00 2001 +From: Voon Weifeng +Date: Thu, 6 Jun 2019 23:03:42 +0800 +Subject: [PATCH 037/108] net: stmmac: enable HW offloading for tc taprio + +This patch enables iproute2's tc taprio to run IEEE 802.1Qbv +using HW offload. tc taprio manual can refer to: +http://man7.org/linux/man-pages/man8/tc-taprio.8.html + +To enable HW offloading, we set 'flags 0x2'. + +An example configuration is: +$ tc qdisc add dev IFACE parent root handle 100 taprio \ + num_tc 3 \ + map 2 2 1 0 2 2 2 2 2 2 2 2 2 2 2 2 \ + queues 1@0 1@1 2@2 \ + base-time 10000000 \ + cycle-time 1000000 \ + cycle-time-extension 200000 \ + sched-entry S 03 300000 \ + sched-entry S 02 300000 \ + sched-entry S 04 400000 \ + flags 0x2 \ + clockid CLOCK_TAI + +For DWMAC IPv5.1, the limit of hardware capabilities are as +follow and it is subject to IP configuration:- +a) up-to 512 sched-entries +b) up-to 8 Tx Queues +c) up to 16ms (24-bit interval width) + +Signed-off-by: Voon Weifeng +Signed-off-by: Ong Boon Leong +--- + drivers/net/ethernet/stmicro/stmmac/hwif.h | 5 + + .../net/ethernet/stmicro/stmmac/stmmac_main.c | 2 + + .../net/ethernet/stmicro/stmmac/stmmac_pci.c | 1 + + .../net/ethernet/stmicro/stmmac/stmmac_tc.c | 100 ++++++++++++++++++ + 4 files changed, 108 insertions(+) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h +index e9e5d0dd1556..2f55efbbdc6c 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h ++++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h +@@ -615,6 +615,7 @@ struct stmmac_priv; + struct tc_cls_u32_offload; + struct tc_cbs_qopt_offload; + struct flow_cls_offload; ++struct tc_taprio_qopt_offload; + + struct stmmac_tc_ops { + int (*init)(struct stmmac_priv *priv); +@@ -624,6 +625,8 @@ struct stmmac_tc_ops { + struct tc_cbs_qopt_offload *qopt); + int (*setup_cls)(struct stmmac_priv *priv, + struct flow_cls_offload *cls); ++ int (*setup_taprio)(struct stmmac_priv *priv, ++ struct tc_taprio_qopt_offload *qopt); + }; + + #define stmmac_tc_init(__priv, __args...) \ +@@ -634,6 +637,8 @@ struct stmmac_tc_ops { + stmmac_do_callback(__priv, tc, setup_cbs, __args) + #define stmmac_tc_setup_cls(__priv, __args...) \ + stmmac_do_callback(__priv, tc, setup_cls, __args) ++#define stmmac_tc_setup_taprio(__priv, __args...) \ ++ stmmac_do_callback(__priv, tc, setup_taprio, __args) + + struct stmmac_counters; + +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index 8ed6013477c6..c451c3e84d6f 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -4388,6 +4388,8 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type, + priv, priv, true); + case TC_SETUP_QDISC_CBS: + return stmmac_tc_setup_cbs(priv, priv, type_data); ++ case TC_SETUP_QDISC_TAPRIO: ++ return stmmac_tc_setup_taprio(priv, priv, type_data); + default: + return -EOPNOTSUPP; + } +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +index f6493766c572..e18b87b85544 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +@@ -160,6 +160,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, + plat->has_gmac4 = 1; + plat->force_sf_dma_mode = 0; + plat->tso_en = 1; ++ plat->tsn_est_en = 1; + + plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP; + +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +index f9a9a9d82233..2a04bb9ae919 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +@@ -593,9 +593,109 @@ static int tc_setup_cls(struct stmmac_priv *priv, + return ret; + } + ++static int tc_setup_taprio(struct stmmac_priv *priv, ++ struct tc_taprio_qopt_offload *qopt) ++{ ++ u64 time_extension = qopt->cycle_time_extension; ++ u64 base_time = ktime_to_ns(qopt->base_time); ++ u64 cycle_time = qopt->cycle_time; ++ struct est_gcrr egcrr; ++ u32 extension_ns; ++ u32 extension_s; ++ u32 cycle_ns; ++ u32 cycle_s; ++ u32 base_ns; ++ u32 base_s; ++ int ret; ++ int i; ++ ++ if (qopt->enable) { ++ stmmac_set_est_enable(priv, priv->hw, priv->dev, true); ++ dev_info(priv->device, "taprio: EST enabled\n"); ++ } else { ++ stmmac_set_est_enable(priv, priv->hw, priv->dev, false); ++ dev_info(priv->device, "taprio: EST disabled\n"); ++ return 0; ++ } ++ ++ dev_dbg(priv->device, ++ "EST: base_time %llu, cycle_time %llu, cycle_extension %llu\n", ++ qopt->base_time, qopt->cycle_time, ++ qopt->cycle_time_extension); ++ ++ for (i = 0; i < qopt->num_entries; i++) { ++ struct est_gc_entry sgce; ++ ++ sgce.gates = qopt->entries[i].gate_mask; ++ sgce.ti_nsec = qopt->entries[i].interval; ++ ++ /* cycle_time will be sum of all time interval ++ * of the entries in the schedule if the ++ * cycle_time is not provided ++ */ ++ if (!qopt->cycle_time) ++ cycle_time += qopt->entries[i].interval; ++ ++ dev_dbg(priv->device, ++ "EST: gates 0x%x, ti_ns %u, cycle_ns %llu\n", ++ sgce.gates, sgce.ti_nsec, cycle_time); ++ ++ ret = stmmac_set_est_gce(priv, priv->hw, priv->dev, ++ &sgce, i, 0, 0); ++ if (ret) { ++ dev_err(priv->device, ++ "EST: fail to program GC entry(%d).\n", i); ++ ++ return ret; ++ } ++ } ++ ++ ret = stmmac_set_est_gcl_len(priv, priv->hw, priv->dev, ++ qopt->num_entries, ++ 0, 0); ++ if (ret) { ++ dev_err(priv->device, ++ "EST: fail to program GC length into HW\n"); ++ return ret; ++ } ++ ++ /* set est_info */ ++ base_ns = do_div(base_time, NSEC_PER_SEC); ++ base_s = base_time; ++ dev_info(priv->device, "EST: base_s %u, base_ns %u\n", ++ base_s, base_ns); ++ ++ cycle_ns = do_div(cycle_time, NSEC_PER_SEC); ++ cycle_s = cycle_time; ++ dev_info(priv->device, "EST: cycle_s %u, cycle_ns %u\n", ++ cycle_s, cycle_ns); ++ ++ extension_ns = do_div(time_extension, NSEC_PER_SEC); ++ extension_s = time_extension; ++ dev_info(priv->device, ++ "EST: cycle extension_s %u, cycle_extension_ns %u\n", ++ extension_s, extension_ns); ++ ++ if (extension_s) { ++ dev_err(priv->device, ++ "EST: extension in seconds not supported.\n"); ++ return -EINVAL; ++ } ++ ++ egcrr.cycle_sec = cycle_s; ++ egcrr.cycle_nsec = cycle_ns; ++ egcrr.base_sec = base_s; ++ egcrr.base_nsec = base_ns; ++ egcrr.ter_nsec = extension_ns; ++ ++ return stmmac_set_est_gcrr_times(priv, priv->hw, priv->dev, ++ &egcrr, 0, 0); ++} ++ + const struct stmmac_tc_ops dwmac510_tc_ops = { + .init = tc_init, + .setup_cls_u32 = tc_setup_cls_u32, + .setup_cbs = tc_setup_cbs, + .setup_cls = tc_setup_cls, ++ .setup_taprio = tc_setup_taprio, + }; +-- +2.17.1 + diff --git a/patches/0037-update-MEM_ATTR_WRITE_PROT-with-WB-policy.acrn b/patches/0037-update-MEM_ATTR_WRITE_PROT-with-WB-policy.acrn new file mode 100644 index 0000000000..cecc5ec17c --- /dev/null +++ b/patches/0037-update-MEM_ATTR_WRITE_PROT-with-WB-policy.acrn @@ -0,0 +1,28 @@ +From 11ee049ea57e0680a455af77c62549ed93d5dee4 Mon Sep 17 00:00:00 2001 +From: Jason Chen CJ +Date: Fri, 31 Aug 2018 10:58:59 +0800 +Subject: [PATCH 037/150] update MEM_ATTR_WRITE_PROT with WB policy + +Change-Id: Icfc16c58148f8329528e27346dc2db047b7f37e2 +Tracked-On: +Signed-off-by: Jason Chen CJ +--- + include/linux/vhm/acrn_hv_defs.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/include/linux/vhm/acrn_hv_defs.h b/include/linux/vhm/acrn_hv_defs.h +index 688d69b6f5b0..12dc3c954526 100644 +--- a/include/linux/vhm/acrn_hv_defs.h ++++ b/include/linux/vhm/acrn_hv_defs.h +@@ -122,7 +122,7 @@ + #define MEM_ATTR_WP 0x00000400 + + #define MEM_ATTR_ALL 0x00000007 +-#define MEM_ATTR_WRITE_PROT 0x00000005 ++#define MEM_ATTR_WRITE_PROT 0x00000045 + #define MEM_ATTR_ALL_WB 0x00000047 + #define MEM_ATTR_ALL_WC 0x00000207 + +-- +2.17.1 + diff --git a/patches/0038-ASoC-Intel-Skylake-Privatize-SST-init-handlers.audio b/patches/0038-ASoC-Intel-Skylake-Privatize-SST-init-handlers.audio new file mode 100644 index 0000000000..7693ca5e7f --- /dev/null +++ b/patches/0038-ASoC-Intel-Skylake-Privatize-SST-init-handlers.audio @@ -0,0 +1,173 @@ +From 546be36b1d2369f664c0cedd4cdb97645d176c4a Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Mon, 12 Aug 2019 21:30:46 +0200 +Subject: [PATCH 038/193] ASoC: Intel: Skylake: Privatize SST init handlers + +With initialization order repaired, sst_ops::init overloads can be +privatized for Skylake platofmrs. Let's do so. + +Signed-off-by: Cezary Rojewski +--- + sound/soc/intel/skylake/bxt-sst.c | 25 ++++++++++++------------- + sound/soc/intel/skylake/cnl-sst-dsp.h | 2 -- + sound/soc/intel/skylake/cnl-sst.c | 25 ++++++++++++------------- + sound/soc/intel/skylake/skl-sst-dsp.h | 2 -- + sound/soc/intel/skylake/skl-sst.c | 25 ++++++++++++------------- + 5 files changed, 36 insertions(+), 43 deletions(-) + +diff --git a/sound/soc/intel/skylake/bxt-sst.c b/sound/soc/intel/skylake/bxt-sst.c +index a77e31c6f34e..66c787005d20 100644 +--- a/sound/soc/intel/skylake/bxt-sst.c ++++ b/sound/soc/intel/skylake/bxt-sst.c +@@ -532,18 +532,7 @@ static const struct skl_dsp_fw_ops bxt_fw_ops = { + .load_library = bxt_load_library, + }; + +-struct sst_ops apl_sst_ops = { +- .irq_handler = skl_dsp_sst_interrupt, +- .thread_fn = skl_dsp_irq_thread_handler, +- .write = sst_shim32_write, +- .read = sst_shim32_read, +- .ram_read = sst_memcpy_fromio_32, +- .ram_write = sst_memcpy_toio_32, +- .init = bxt_sst_dsp_init, +- .free = skl_dsp_free, +-}; +- +-int bxt_sst_dsp_init(struct sst_dsp *sst, struct sst_pdata *pdata) ++static int bxt_sst_init(struct sst_dsp *sst, struct sst_pdata *pdata) + { + struct skl_dev *skl = sst->thread_context; + void __iomem *mmio; +@@ -577,7 +566,17 @@ int bxt_sst_dsp_init(struct sst_dsp *sst, struct sst_pdata *pdata) + + return 0; + } +-EXPORT_SYMBOL_GPL(bxt_sst_dsp_init); ++ ++struct sst_ops apl_sst_ops = { ++ .irq_handler = skl_dsp_sst_interrupt, ++ .thread_fn = skl_dsp_irq_thread_handler, ++ .write = sst_shim32_write, ++ .read = sst_shim32_read, ++ .ram_read = sst_memcpy_fromio_32, ++ .ram_write = sst_memcpy_toio_32, ++ .init = bxt_sst_init, ++ .free = skl_dsp_free, ++}; + + MODULE_LICENSE("GPL v2"); + MODULE_DESCRIPTION("Intel Broxton IPC driver"); +diff --git a/sound/soc/intel/skylake/cnl-sst-dsp.h b/sound/soc/intel/skylake/cnl-sst-dsp.h +index 7810ae11954a..a1ea242e9539 100644 +--- a/sound/soc/intel/skylake/cnl-sst-dsp.h ++++ b/sound/soc/intel/skylake/cnl-sst-dsp.h +@@ -87,6 +87,4 @@ void cnl_ipc_op_int_enable(struct sst_dsp *ctx); + void cnl_ipc_op_int_disable(struct sst_dsp *ctx); + bool cnl_ipc_int_status(struct sst_dsp *ctx); + +-int cnl_sst_dsp_init(struct sst_dsp *sst, struct sst_pdata *pdata); +- + #endif /*__CNL_SST_DSP_H__*/ +diff --git a/sound/soc/intel/skylake/cnl-sst.c b/sound/soc/intel/skylake/cnl-sst.c +index 28d469cbe09e..840bc09a0309 100644 +--- a/sound/soc/intel/skylake/cnl-sst.c ++++ b/sound/soc/intel/skylake/cnl-sst.c +@@ -408,18 +408,7 @@ static int cnl_ipc_init(struct device *dev, struct skl_dev *cnl) + return 0; + } + +-struct sst_ops cnl_sst_ops = { +- .irq_handler = cnl_dsp_sst_interrupt, +- .thread_fn = cnl_dsp_irq_thread_handler, +- .write = sst_shim32_write, +- .read = sst_shim32_read, +- .ram_read = sst_memcpy_fromio_32, +- .ram_write = sst_memcpy_toio_32, +- .init = cnl_sst_dsp_init, +- .free = cnl_dsp_free, +-}; +- +-int cnl_sst_dsp_init(struct sst_dsp *sst, struct sst_pdata *pdata) ++static int cnl_sst_init(struct sst_dsp *sst, struct sst_pdata *pdata) + { + struct skl_dev *cnl = sst->thread_context; + void __iomem *mmio; +@@ -448,7 +437,17 @@ int cnl_sst_dsp_init(struct sst_dsp *sst, struct sst_pdata *pdata) + + return 0; + } +-EXPORT_SYMBOL_GPL(cnl_sst_dsp_init); ++ ++struct sst_ops cnl_sst_ops = { ++ .irq_handler = cnl_dsp_sst_interrupt, ++ .thread_fn = cnl_dsp_irq_thread_handler, ++ .write = sst_shim32_write, ++ .read = sst_shim32_read, ++ .ram_read = sst_memcpy_fromio_32, ++ .ram_write = sst_memcpy_toio_32, ++ .init = cnl_sst_init, ++ .free = cnl_dsp_free, ++}; + + MODULE_LICENSE("GPL v2"); + MODULE_DESCRIPTION("Intel Cannonlake IPC driver"); +diff --git a/sound/soc/intel/skylake/skl-sst-dsp.h b/sound/soc/intel/skylake/skl-sst-dsp.h +index e6f25f37c369..4bbf3ba3f788 100644 +--- a/sound/soc/intel/skylake/skl-sst-dsp.h ++++ b/sound/soc/intel/skylake/skl-sst-dsp.h +@@ -219,8 +219,6 @@ int skl_dsp_get_core(struct sst_dsp *ctx, unsigned int core_id); + int skl_dsp_put_core(struct sst_dsp *ctx, unsigned int core_id); + + int skl_dsp_boot(struct sst_dsp *ctx); +-int skl_sst_dsp_init(struct sst_dsp *sst, struct sst_pdata *pdata); +-int bxt_sst_dsp_init(struct sst_dsp *sst, struct sst_pdata *pdata); + int bxt_load_library(struct sst_dsp *ctx, struct skl_lib_info *linfo, + int lib_count); + +diff --git a/sound/soc/intel/skylake/skl-sst.c b/sound/soc/intel/skylake/skl-sst.c +index f84c1191fe0d..4e5487a64d55 100644 +--- a/sound/soc/intel/skylake/skl-sst.c ++++ b/sound/soc/intel/skylake/skl-sst.c +@@ -503,18 +503,7 @@ static const struct skl_dsp_fw_ops skl_fw_ops = { + .unload_mod = skl_unload_module, + }; + +-struct sst_ops skl_sst_ops = { +- .irq_handler = skl_dsp_sst_interrupt, +- .thread_fn = skl_dsp_irq_thread_handler, +- .write = sst_shim32_write, +- .read = sst_shim32_read, +- .ram_read = sst_memcpy_fromio_32, +- .ram_write = sst_memcpy_toio_32, +- .init = skl_sst_dsp_init, +- .free = skl_dsp_free, +-}; +- +-int skl_sst_dsp_init(struct sst_dsp *sst, struct sst_pdata *pdata) ++static int skl_sst_init(struct sst_dsp *sst, struct sst_pdata *pdata) + { + struct skl_dev *skl = sst->thread_context; + void __iomem *mmio; +@@ -540,7 +529,17 @@ int skl_sst_dsp_init(struct sst_dsp *sst, struct sst_pdata *pdata) + + return 0; + } +-EXPORT_SYMBOL_GPL(skl_sst_dsp_init); ++ ++struct sst_ops skl_sst_ops = { ++ .irq_handler = skl_dsp_sst_interrupt, ++ .thread_fn = skl_dsp_irq_thread_handler, ++ .write = sst_shim32_write, ++ .read = sst_shim32_read, ++ .ram_read = sst_memcpy_fromio_32, ++ .ram_write = sst_memcpy_toio_32, ++ .init = skl_sst_init, ++ .free = skl_dsp_free, ++}; + + int skl_sst_init_fw(struct skl_dev *skl) + { +-- +2.17.1 + diff --git a/patches/0038-HACK-serial-8250_dw-skip-lpss-pci-UART-D3-when-used-w.lpss b/patches/0038-HACK-serial-8250_dw-skip-lpss-pci-UART-D3-when-used-w.lpss new file mode 100644 index 0000000000..9a2a9d1c83 --- /dev/null +++ b/patches/0038-HACK-serial-8250_dw-skip-lpss-pci-UART-D3-when-used-w.lpss @@ -0,0 +1,68 @@ +From 1ecd2a6dfe35f9f9ee59efe642f52e2d22ebef01 Mon Sep 17 00:00:00 2001 +From: Andy Shevchenko +Date: Mon, 29 Jul 2019 17:11:39 +0300 +Subject: [PATCH 38/40] HACK: serial: 8250_dw: skip lpss pci UART D3 when used + with no_console_suspend + +For Platforms with lpss pci UARTs, the parent device should +be prevented from going into D3 for the no_console_suspend +flag to work as expected. + +This is tested with Intel SPT platforms with lpss pci UART devices. + +Signed-off-by: Archana Patni +Signed-off-by: Subramony Sesha + +Interim. This solution is not ideal. We should not be +concerned of the parent in this driver, and we really should +not do any PCI operations here. + +Ideally the issue can be fixed in the parent driver +(drivers/mfd/intel-lpss-pci.c). + +Signed-off-by: Heikki Krogerus +Signed-off-by: Andy Shevchenko +--- + drivers/tty/serial/8250/8250_dw.c | 16 ++++++++++++++++ + 1 file changed, 16 insertions(+) + +diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c +index ea835dd6b506..ad43f3cb2a07 100644 +--- a/drivers/tty/serial/8250/8250_dw.c ++++ b/drivers/tty/serial/8250/8250_dw.c +@@ -21,9 +21,11 @@ + #include + #include + #include ++#include + #include + #include + #include ++#include + + #include + +@@ -574,6 +576,20 @@ static int dw8250_suspend(struct device *dev) + { + struct dw8250_data *data = dev_get_drvdata(dev); + ++ /* ++ * FIXME: For Platforms with LPSS PCI UARTs, the parent device should ++ * be prevented from going into D3 for the no_console_suspend flag to ++ * work as expected. ++ */ ++ if (platform_get_resource_byname(to_platform_device(dev), ++ IORESOURCE_MEM, "lpss_dev")) { ++ struct uart_8250_port *up = serial8250_get_port(data->data.line); ++ struct pci_dev *pdev = to_pci_dev(dev->parent); ++ ++ if (pdev && !console_suspend_enabled && uart_console(&up->port)) ++ pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3; ++ } ++ + serial8250_suspend_port(data->data.line); + + return 0; +-- +2.17.1 + diff --git a/patches/0038-Revert-trusty-implement-trusty-OS-timer-proxy-for-p.trusty b/patches/0038-Revert-trusty-implement-trusty-OS-timer-proxy-for-p.trusty new file mode 100644 index 0000000000..d492644d24 --- /dev/null +++ b/patches/0038-Revert-trusty-implement-trusty-OS-timer-proxy-for-p.trusty @@ -0,0 +1,173 @@ +From ed12875908bd4327752b97d2455665dd385ea373 Mon Sep 17 00:00:00 2001 +From: "Zhong,Fangjian" +Date: Tue, 11 Jul 2017 04:19:21 +0000 +Subject: [PATCH 38/63] Revert "trusty: implement trusty OS timer proxy for + performance enhancement" + +This reverts commit 3e30c8c0a0b5928bc11fa44571563635a9b1e0a8. + +Change-Id: I16e64b07a9ddfd50f44ab85ed0aa27925c8ac8a2 +Signed-off-by: Zhong,Fangjian +--- + drivers/trusty/trusty-irq.c | 2 + + drivers/trusty/trusty.c | 88 ------------------------------------- + 2 files changed, 2 insertions(+), 88 deletions(-) + +diff --git a/drivers/trusty/trusty-irq.c b/drivers/trusty/trusty-irq.c +index d17162c6a85e..e60068b50e04 100644 +--- a/drivers/trusty/trusty-irq.c ++++ b/drivers/trusty/trusty-irq.c +@@ -651,6 +651,8 @@ static int trusty_irq_probe(struct platform_device *pdev) + irq = trusty_irq_init_one(is, irq, false); + + ret = trusty_irq_cpu_notif_add(is); ++ irq_register_done(); ++ + if (ret) { + dev_err(&pdev->dev, "register_cpu_notifier failed %d\n", ret); + goto err_register_hotcpu_notifier; +diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c +index 2fc1b232fee3..7e55453ae5f5 100644 +--- a/drivers/trusty/trusty.c ++++ b/drivers/trusty/trusty.c +@@ -26,23 +26,11 @@ + #include + + #define TRUSTY_VMCALL_SMC 0x74727500 +-#define TRUSTY_LKTIMER_INTERVAL 10 /* 10 ms */ +-#define TRUSTY_LKTIMER_VECTOR 0x31 /* INT_PIT */ +- +-enum lktimer_mode { +- ONESHOT_TIMER, +- PERIODICAL_TIMER, +-}; + + struct trusty_state { +- struct device *dev; + struct mutex smc_lock; + struct atomic_notifier_head notifier; + struct completion cpu_idle_completion; +- struct timer_list timer; +- struct work_struct timer_work; +- enum lktimer_mode timer_mode; +- unsigned long timer_interval; + char *version_str; + u32 api_version; + }; +@@ -52,72 +40,6 @@ struct trusty_smc_interface { + ulong args[5]; + }; + +-static void trusty_lktimer_work_func(struct work_struct *work) +-{ +- int ret; +- unsigned int vector; +- struct trusty_state *s = +- container_of(work, struct trusty_state, timer_work); +- +- dev_dbg(s->dev, "%s\n", __func__); +- +- /* need vector number only for the first time */ +- vector = TRUSTY_LKTIMER_VECTOR; +- +- do { +- ret = trusty_std_call32(s->dev, SMC_SC_NOP, vector, 0, 0); +- vector = 0; +- } while (ret == SM_ERR_NOP_INTERRUPTED); +- +- if (ret != SM_ERR_NOP_DONE) +- dev_err(s->dev, "%s: SMC_SC_NOP failed %d", __func__, ret); +- +- dev_notice_once(s->dev, "LK OS proxy timer works\n"); +-} +- +-static void trusty_lktimer_func(unsigned long data) +-{ +- struct trusty_state *s = (struct trusty_state *)data; +- +- /* binding it physical CPU0 only because trusty OS runs on it */ +- schedule_work_on(0, &s->timer_work); +- +- /* reactivate the timer again in periodic mode */ +- if (s->timer_mode == PERIODICAL_TIMER) +- mod_timer(&s->timer, +- jiffies + msecs_to_jiffies(s->timer_interval)); +-} +- +-static void trusty_init_lktimer(struct trusty_state *s) +-{ +- INIT_WORK(&s->timer_work, trusty_lktimer_work_func); +- setup_timer(&s->timer, trusty_lktimer_func, (unsigned long)s); +-} +- +-/* note that this function is not thread-safe */ +-static void trusty_configure_lktimer(struct trusty_state *s, +- enum lktimer_mode mode, unsigned long interval) +-{ +- if (mode != ONESHOT_TIMER && mode != PERIODICAL_TIMER) { +- pr_err("%s: invalid timer mode: %d\n", __func__, mode); +- return; +- } +- +- s->timer_mode = mode; +- s->timer_interval = interval; +- mod_timer(&s->timer, jiffies + msecs_to_jiffies(s->timer_interval)); +-} +- +-/* +- * this should be called when removing trusty dev and +- * when LK/Trusty crashes, to disable proxy timer. +- */ +-static void trusty_del_lktimer(struct trusty_state *s) +-{ +- del_timer_sync(&s->timer); +- flush_work(&s->timer_work); +-} +- + static inline ulong smc(ulong r0, ulong r1, ulong r2, ulong r3) + { + __asm__ __volatile__( +@@ -307,9 +229,6 @@ static long trusty_std_call32_work(void *args) + + WARN_ONCE(ret == SM_ERR_PANIC, "trusty crashed"); + +- if (ret == SM_ERR_PANIC) +- trusty_del_lktimer(s); +- + if (smcnr == SMC_SC_NOP) + complete(&s->cpu_idle_completion); + else +@@ -470,7 +389,6 @@ static int trusty_probe(struct platform_device *pdev) + ATOMIC_INIT_NOTIFIER_HEAD(&s->notifier); + init_completion(&s->cpu_idle_completion); + platform_set_drvdata(pdev, s); +- s->dev = &pdev->dev; + + trusty_init_version(s, &pdev->dev); + +@@ -478,14 +396,9 @@ static int trusty_probe(struct platform_device *pdev) + if (ret < 0) + goto err_api_version; + +- trusty_init_lktimer(s); +- trusty_configure_lktimer(s, +- PERIODICAL_TIMER, TRUSTY_LKTIMER_INTERVAL); +- + return 0; + + err_api_version: +- trusty_del_lktimer(s); + if (s->version_str) { + device_remove_file(&pdev->dev, &dev_attr_trusty_version); + kfree(s->version_str); +@@ -509,7 +422,6 @@ static int trusty_remove(struct platform_device *pdev) + device_remove_file(&pdev->dev, &dev_attr_trusty_version); + kfree(s->version_str); + } +- trusty_del_lktimer(s); + kfree(s); + return 0; + } +-- +2.17.1 + diff --git a/patches/0038-drm-i915-selftests-Add-the-usual-batch-vma-managements.drm b/patches/0038-drm-i915-selftests-Add-the-usual-batch-vma-managements.drm new file mode 100644 index 0000000000..3b29b18251 --- /dev/null +++ b/patches/0038-drm-i915-selftests-Add-the-usual-batch-vma-managements.drm @@ -0,0 +1,55 @@ +From 2e224f0dc80acabc5370daf7c8ae94e9941b75c3 Mon Sep 17 00:00:00 2001 +From: Chris Wilson +Date: Mon, 26 Aug 2019 08:21:22 +0100 +Subject: [PATCH 038/690] drm/i915/selftests: Add the usual batch vma + managements to st_workarounds + +To properly handle asynchronous migration of batch objects, we need to +couple the fences on the incoming batch into the request and should not +assume that they always start idle. + +Signed-off-by: Chris Wilson +Cc: Matthew Auld +Reviewed-by: Matthew Auld +Link: https://patchwork.freedesktop.org/patch/msgid/20190826072149.9447-1-chris@chris-wilson.co.uk +--- + drivers/gpu/drm/i915/gt/selftest_workarounds.c | 16 ++++++++++++++++ + 1 file changed, 16 insertions(+) + +diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c +index d06d68ac2a3b..999a98f00494 100644 +--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c ++++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c +@@ -565,6 +565,14 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx, + goto err_request; + } + ++ i915_vma_lock(batch); ++ err = i915_request_await_object(rq, batch->obj, false); ++ if (err == 0) ++ err = i915_vma_move_to_active(batch, rq, 0); ++ i915_vma_unlock(batch); ++ if (err) ++ goto err_request; ++ + err = engine->emit_bb_start(rq, + batch->node.start, PAGE_SIZE, + 0); +@@ -850,6 +858,14 @@ static int scrub_whitelisted_registers(struct i915_gem_context *ctx, + goto err_request; + } + ++ i915_vma_lock(batch); ++ err = i915_request_await_object(rq, batch->obj, false); ++ if (err == 0) ++ err = i915_vma_move_to_active(batch, rq, 0); ++ i915_vma_unlock(batch); ++ if (err) ++ goto err_request; ++ + /* Perform the writes from an unprivileged "user" batch */ + err = engine->emit_bb_start(rq, batch->node.start, 0, 0); + +-- +2.17.1 + diff --git a/patches/0038-mei-restrict-vtag-support-to-hbm-version-2.2.security b/patches/0038-mei-restrict-vtag-support-to-hbm-version-2.2.security new file mode 100644 index 0000000000..0aac406679 --- /dev/null +++ b/patches/0038-mei-restrict-vtag-support-to-hbm-version-2.2.security @@ -0,0 +1,89 @@ +From 0f32f0b3cb7e815ff8dfb69add74750936f58c1d Mon Sep 17 00:00:00 2001 +From: Alexander Usyskin +Date: Sun, 6 May 2018 15:36:13 +0300 +Subject: [PATCH 38/65] mei: restrict vtag support to hbm version 2.2 + +The vtag allows partitioning the mei messages into virtual groups/channels. +This change only define vtag restrictions to firmware with version 2.2 +and above in order to make the series bisectable. + +Change-Id: Ic1fbe9826a7cd02662e55786518a9a83e1f830cb +Signed-off-by: Alexander Usyskin +Signed-off-by: Tomas Winkler +--- + drivers/misc/mei/debugfs.c | 1 + + drivers/misc/mei/hbm.c | 7 +++++++ + drivers/misc/mei/hw.h | 6 ++++++ + drivers/misc/mei/mei_dev.h | 2 ++ + 4 files changed, 16 insertions(+) + +diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c +index a26c716c61a1..2f85f518117e 100644 +--- a/drivers/misc/mei/debugfs.c ++++ b/drivers/misc/mei/debugfs.c +@@ -103,6 +103,7 @@ static int mei_dbgfs_devstate_show(struct seq_file *m, void *unused) + seq_printf(m, "\tFA: %01d\n", dev->hbm_f_fa_supported); + seq_printf(m, "\tOS: %01d\n", dev->hbm_f_os_supported); + seq_printf(m, "\tDR: %01d\n", dev->hbm_f_dr_supported); ++ seq_printf(m, "\tVT: %01d\n", dev->hbm_f_vt_supported); + } + + seq_printf(m, "pg: %s, %s\n", +diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c +index a44094cdbc36..86c91f8961bb 100644 +--- a/drivers/misc/mei/hbm.c ++++ b/drivers/misc/mei/hbm.c +@@ -1052,6 +1052,13 @@ static void mei_hbm_config_features(struct mei_device *dev) + (dev->version.major_version == HBM_MAJOR_VERSION_DR && + dev->version.minor_version >= HBM_MINOR_VERSION_DR)) + dev->hbm_f_dr_supported = 1; ++ ++ /* VTag Support */ ++ dev->hbm_f_vt_supported = 0; ++ if (dev->version.major_version > HBM_MAJOR_VERSION_VT || ++ (dev->version.major_version == HBM_MAJOR_VERSION_VT && ++ dev->version.minor_version >= HBM_MINOR_VERSION_VT)) ++ dev->hbm_f_vt_supported = 1; + } + + /** +diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h +index d025a5f8317e..6416b0fda02e 100644 +--- a/drivers/misc/mei/hw.h ++++ b/drivers/misc/mei/hw.h +@@ -76,6 +76,12 @@ + #define HBM_MINOR_VERSION_DR 1 + #define HBM_MAJOR_VERSION_DR 2 + ++/* ++ * MEI version with vm tag support ++ */ ++#define HBM_MINOR_VERSION_VT 2 ++#define HBM_MAJOR_VERSION_VT 2 ++ + /* Host bus message command opcode */ + #define MEI_HBM_CMD_OP_MSK 0x7f + /* Host bus message command RESPONSE */ +diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h +index 3ec625e76c28..728c3596d917 100644 +--- a/drivers/misc/mei/mei_dev.h ++++ b/drivers/misc/mei/mei_dev.h +@@ -426,6 +426,7 @@ struct mei_fw_version { + * @hbm_f_ie_supported : hbm feature immediate reply to enum request + * @hbm_f_os_supported : hbm feature support OS ver message + * @hbm_f_dr_supported : hbm feature dma ring supported ++ * @hbm_f_vt_supported : hbm feature vtag supported + * + * @fw_ver : FW versions + * +@@ -508,6 +509,7 @@ struct mei_device { + unsigned int hbm_f_ie_supported:1; + unsigned int hbm_f_os_supported:1; + unsigned int hbm_f_dr_supported:1; ++ unsigned int hbm_f_vt_supported:1; + + struct mei_fw_version fw_ver[MEI_MAX_FW_VER_BLOCKS]; + +-- +2.17.1 + diff --git a/patches/0038-net-stmmac-introduce-IEEE-802.1Qbv-HW-tunable.connectivity b/patches/0038-net-stmmac-introduce-IEEE-802.1Qbv-HW-tunable.connectivity new file mode 100644 index 0000000000..77f5b989a3 --- /dev/null +++ b/patches/0038-net-stmmac-introduce-IEEE-802.1Qbv-HW-tunable.connectivity @@ -0,0 +1,383 @@ +From 63610fe9a2eead37e223ffe15b018fb2ad29c13a Mon Sep 17 00:00:00 2001 +From: Ong Boon Leong +Date: Fri, 6 Jul 2018 06:16:28 +0800 +Subject: [PATCH 038/108] net: stmmac: introduce IEEE 802.1Qbv HW tunables + functionalities + +We add hardware tunables setter and getter functions for: +a) time interval left shift (TILS) - multiply the gate interval through + left-shifting operation. +b) PTP Time Offset Value (PTOV) - PTP clock offset to avoid transmission + overrun at the installation of new Gate Control List. +c) Current Time Offset Value (CTOV) - Current time offset to compensate + for all pipeline delays to ensure the effect of gate controls is + visible on the line exactly at pre-determined schedule. + +Reviewed-by: Voon Weifeng +Reviewed-by: Kweh Hock Leong +Signed-off-by: Voon Weifeng +Signed-off-by: Ong Boon Leong +--- + .../net/ethernet/stmicro/stmmac/dwmac4_core.c | 2 + + drivers/net/ethernet/stmicro/stmmac/dwmac5.h | 8 ++ + .../net/ethernet/stmicro/stmmac/dwmac5_tsn.c | 42 ++++++- + drivers/net/ethernet/stmicro/stmmac/hwif.h | 19 ++- + .../net/ethernet/stmicro/stmmac/stmmac_tsn.c | 110 +++++++++++++++++- + .../net/ethernet/stmicro/stmmac/stmmac_tsn.h | 16 +++ + 6 files changed, 191 insertions(+), 6 deletions(-) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +index a8852e75b91a..f6be8a35aae7 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +@@ -1180,6 +1180,8 @@ const struct stmmac_ops dwmac510_ops = { + .init_tsn = tsn_init, + .set_tsn_feat = tsn_feat_set, + .has_tsn_feat = tsn_has_feat, ++ .set_tsn_hwtunable = tsn_hwtunable_set, ++ .get_tsn_hwtunable = tsn_hwtunable_get, + .set_est_enable = tsn_est_enable_set, + .get_est_bank = tsn_est_bank_get, + .set_est_gce = tsn_est_gce_set, +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h +index 0b71ae6f592d..40ba7f86b521 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h +@@ -95,6 +95,12 @@ + + /* MTL EST control register */ + #define MTL_EST_CTRL 0x00000c50 ++#define MTL_EST_CTRL_PTOV GENMASK(31, 24) ++#define MTL_EST_CTRL_PTOV_SHIFT 24 ++#define MTL_EST_CTRL_CTOV GENMASK(23, 12) ++#define MTL_EST_CTRL_CTOV_SHIFT 12 ++#define MTL_EST_CTRL_TILS GENMASK(10, 8) ++#define MTL_EST_CTRL_TILS_SHIFT 8 + #define MTL_EST_CTRL_SSWL BIT(1) /* Switch to SWOL */ + #define MTL_EST_CTRL_EEST BIT(0) /* Enable EST */ + +@@ -131,6 +137,8 @@ + + /* EST Global defines */ + #define EST_CTR_HI_MAX 0xff /* CTR Hi is 8-bit only */ ++#define EST_PTOV_MAX 0xff /* Max PTP time offset */ ++#define EST_CTOV_MAX 0xfff /* Max Current time offset */ + + /* MAC Core Version */ + #define TSN_VER_MASK 0xFF +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c +index 64b263336b87..85fc7c638d17 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c +@@ -121,8 +121,12 @@ static u32 dwmac5_est_get_txqcnt(void __iomem *ioaddr) + return ((hw_cap2 & GMAC_HW_FEAT_TXQCNT) >> 6) + 1; + } + +-static void dwmac5_est_get_max(u32 *cycle_max) ++static void dwmac5_est_get_max(u32 *ptov_max, ++ u32 *ctov_max, ++ u32 *cycle_max) + { ++ *ptov_max = EST_PTOV_MAX; ++ *ctov_max = EST_CTOV_MAX; + *cycle_max = EST_CTR_HI_MAX; + } + +@@ -180,6 +184,39 @@ static int dwmac5_est_read_gce(void __iomem *ioaddr, u32 row, + return ret; + } + ++static void dwmac5_est_set_tils(void __iomem *ioaddr, const u32 tils) ++{ ++ u32 value; ++ ++ value = readl(ioaddr + MTL_EST_CTRL); ++ value &= ~(MTL_EST_CTRL_SSWL | MTL_EST_CTRL_TILS); ++ value |= (tils << MTL_EST_CTRL_TILS_SHIFT); ++ ++ writel(value, ioaddr + MTL_EST_CTRL); ++} ++ ++static void dwmac5_est_set_ptov(void __iomem *ioaddr, const u32 ptov) ++{ ++ u32 value; ++ ++ value = readl(ioaddr + MTL_EST_CTRL); ++ value &= ~(MTL_EST_CTRL_SSWL | MTL_EST_CTRL_PTOV); ++ value |= (ptov << MTL_EST_CTRL_PTOV_SHIFT); ++ ++ writel(value, ioaddr + MTL_EST_CTRL); ++} ++ ++static void dwmac5_est_set_ctov(void __iomem *ioaddr, const u32 ctov) ++{ ++ u32 value; ++ ++ value = readl(ioaddr + MTL_EST_CTRL); ++ value &= ~(MTL_EST_CTRL_SSWL | MTL_EST_CTRL_CTOV); ++ value |= (ctov << MTL_EST_CTRL_CTOV_SHIFT); ++ ++ writel(value, ioaddr + MTL_EST_CTRL); ++} ++ + static int dwmac5_est_set_enable(void __iomem *ioaddr, bool enable) + { + u32 value; +@@ -237,6 +274,9 @@ const struct tsnif_ops dwmac510_tsnif_ops = { + .est_write_gcl_config = dwmac5_est_write_gcl_config, + .est_read_gcl_config = dwmac5_est_read_gcl_config, + .est_read_gce = dwmac5_est_read_gce, ++ .est_set_tils = dwmac5_est_set_tils, ++ .est_set_ptov = dwmac5_est_set_ptov, ++ .est_set_ctov = dwmac5_est_set_ctov, + .est_set_enable = dwmac5_est_set_enable, + .est_get_enable = dwmac5_est_get_enable, + .est_get_bank = dwmac5_est_get_bank, +diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h +index 2f55efbbdc6c..edaed05e7385 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h ++++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h +@@ -283,6 +283,7 @@ struct stmmac_tc_entry; + struct stmmac_pps_cfg; + struct stmmac_rss; + enum tsn_feat_id; ++enum tsn_hwtunable_id; + struct est_gc_entry; + struct est_gcrr; + struct est_gc_config; +@@ -403,6 +404,13 @@ struct stmmac_ops { + enum tsn_feat_id featid, bool enable); + bool (*has_tsn_feat)(struct mac_device_info *hw, struct net_device *dev, + enum tsn_feat_id featid); ++ int (*set_tsn_hwtunable)(struct mac_device_info *hw, ++ struct net_device *dev, ++ enum tsn_hwtunable_id id, ++ const u32 data); ++ int (*get_tsn_hwtunable)(struct mac_device_info *hw, ++ struct net_device *dev, ++ enum tsn_hwtunable_id id, u32 *data); + int (*set_est_enable)(struct mac_device_info *hw, + struct net_device *dev, bool enable); + int (*get_est_bank)(struct mac_device_info *hw, struct net_device *dev, +@@ -712,7 +720,7 @@ struct tsnif_ops { + u32 (*est_get_gcl_depth)(void __iomem *ioaddr); + u32 (*est_get_ti_width)(void __iomem *ioaddr); + u32 (*est_get_txqcnt)(void __iomem *ioaddr); +- void (*est_get_max)(u32 *ct_max); ++ void (*est_get_max)(u32 *ptov_max, u32 *ctov_max, u32 *ct_max); + int (*est_write_gcl_config)(void __iomem *ioaddr, u32 data, u32 addr, + bool is_gcrr, + u32 dbgb, bool is_dbgm); +@@ -723,6 +731,9 @@ struct tsnif_ops { + u32 *gates, u32 *ti_nsec, + u32 ti_wid, u32 txqcnt, + u32 dbgb, bool is_dbgm); ++ void (*est_set_tils)(void __iomem *ioaddr, const u32 tils); ++ void (*est_set_ptov)(void __iomem *ioaddr, const u32 ptov); ++ void (*est_set_ctov)(void __iomem *ioaddr, const u32 ctov); + int (*est_set_enable)(void __iomem *ioaddr, bool enable); + bool (*est_get_enable)(void __iomem *ioaddr); + u32 (*est_get_bank)(void __iomem *ioaddr, bool is_own); +@@ -747,6 +758,12 @@ struct tsnif_ops { + tsnif_do_callback(__hw, est_read_gcl_config, __args) + #define tsnif_est_read_gce(__hw, __args...) \ + tsnif_do_callback(__hw, est_read_gce, __args) ++#define tsnif_est_set_tils(__hw, __args...) \ ++ tsnif_do_void_callback(__hw, est_set_tils, __args) ++#define tsnif_est_set_ptov(__hw, __args...) \ ++ tsnif_do_void_callback(__hw, est_set_ptov, __args) ++#define tsnif_est_set_ctov(__hw, __args...) \ ++ tsnif_do_void_callback(__hw, est_set_ctov, __args) + #define tsnif_est_set_enable(__hw, __args...) \ + tsnif_do_callback(__hw, est_set_enable, __args) + #define tsnif_est_get_enable(__hw, __args...) \ +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c +index 582552d5506b..d1f599138963 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c +@@ -31,6 +31,7 @@ int tsn_init(struct mac_device_info *hw, struct net_device *dev) + struct est_gc_entry *gcl; + struct tsn_hw_cap *cap; + u32 gcl_depth; ++ u32 tils_max; + u32 ti_wid; + u32 bank; + u32 hwid; +@@ -87,14 +88,17 @@ int tsn_init(struct mac_device_info *hw, struct net_device *dev) + ti_wid = tsnif_est_get_ti_width(hw, ioaddr); + cap->ti_wid = ti_wid; + cap->gcl_depth = gcl_depth; +- + cap->ext_max = EST_TIWID_TO_EXTMAX(ti_wid); + cap->txqcnt = tsnif_est_get_txqcnt(hw, ioaddr); +- tsnif_est_get_max(hw, &cap->cycle_max); ++ ++ tils_max = (tsnif_has_tsn_cap(hw, ioaddr, TSN_FEAT_ID_EST) ? 3 : 0); ++ tils_max = (1 << tils_max) - 1; ++ cap->tils_max = tils_max; ++ tsnif_est_get_max(hw, &cap->ptov_max, &cap->ctov_max, &cap->cycle_max); + cap->est_support = 1; + +- dev_info(pdev, "EST: depth=%u, ti_wid=%u, ter_max=%uns, tqcnt=%u\n", +- gcl_depth, ti_wid, cap->ext_max, cap->txqcnt); ++ dev_info(pdev, "EST: depth=%u, ti_wid=%u, ter_max=%uns, tils_max=%u, tqcnt=%u\n", ++ gcl_depth, ti_wid, cap->ext_max, tils_max, cap->txqcnt); + + return 0; + } +@@ -123,6 +127,104 @@ bool tsn_has_feat(struct mac_device_info *hw, struct net_device *dev, + return hw->tsn_info.feat_en[featid]; + } + ++int tsn_hwtunable_set(struct mac_device_info *hw, struct net_device *dev, ++ enum tsn_hwtunable_id id, ++ const u32 data) ++{ ++ struct tsnif_info *info = &hw->tsn_info; ++ struct tsn_hw_cap *cap = &info->cap; ++ void __iomem *ioaddr = hw->pcsr; ++ int ret = 0; ++ ++ switch (id) { ++ case TSN_HWTUNA_TX_EST_TILS: ++ case TSN_HWTUNA_TX_EST_PTOV: ++ case TSN_HWTUNA_TX_EST_CTOV: ++ if (!tsn_has_feat(hw, dev, TSN_FEAT_ID_EST)) { ++ netdev_info(dev, "EST: feature unsupported\n"); ++ return -ENOTSUPP; ++ } ++ break; ++ default: ++ netdev_warn(dev, "TSN: invalid tunable id(%u)\n", id); ++ return -EINVAL; ++ }; ++ ++ switch (id) { ++ case TSN_HWTUNA_TX_EST_TILS: ++ if (data > cap->tils_max) { ++ netdev_warn(dev, "EST: invalid tils(%u), max=%u\n", ++ data, cap->tils_max); ++ ++ return -EINVAL; ++ } ++ if (data != info->hwtunable[TSN_HWTUNA_TX_EST_TILS]) { ++ tsnif_est_set_tils(hw, ioaddr, data); ++ info->hwtunable[TSN_HWTUNA_TX_EST_TILS] = data; ++ netdev_info(dev, "EST: Set TILS = %u\n", data); ++ } ++ break; ++ case TSN_HWTUNA_TX_EST_PTOV: ++ if (data > cap->ptov_max) { ++ netdev_warn(dev, ++ "EST: invalid PTOV(%u), max=%u\n", ++ data, cap->ptov_max); ++ ++ return -EINVAL; ++ } ++ if (data != info->hwtunable[TSN_HWTUNA_TX_EST_PTOV]) { ++ tsnif_est_set_ptov(hw, ioaddr, data); ++ info->hwtunable[TSN_HWTUNA_TX_EST_PTOV] = data; ++ netdev_info(dev, "EST: Set PTOV = %u\n", data); ++ } ++ break; ++ case TSN_HWTUNA_TX_EST_CTOV: ++ if (data > cap->ctov_max) { ++ netdev_warn(dev, ++ "EST: invalid CTOV(%u), max=%u\n", ++ data, cap->ctov_max); ++ ++ return -EINVAL; ++ } ++ if (data != info->hwtunable[TSN_HWTUNA_TX_EST_CTOV]) { ++ tsnif_est_set_ctov(hw, ioaddr, data); ++ info->hwtunable[TSN_HWTUNA_TX_EST_CTOV] = data; ++ netdev_info(dev, "EST: Set CTOV = %u\n", data); ++ } ++ break; ++ default: ++ netdev_warn(dev, "TSN: invalid tunable id(%u)\n", id); ++ ret = -EINVAL; ++ }; ++ ++ return ret; ++} ++ ++int tsn_hwtunable_get(struct mac_device_info *hw, struct net_device *dev, ++ enum tsn_hwtunable_id id, u32 *data) ++{ ++ struct tsnif_info *info = &hw->tsn_info; ++ ++ switch (id) { ++ case TSN_HWTUNA_TX_EST_TILS: ++ case TSN_HWTUNA_TX_EST_PTOV: ++ case TSN_HWTUNA_TX_EST_CTOV: ++ if (!tsn_has_feat(hw, dev, TSN_FEAT_ID_EST)) { ++ netdev_info(dev, "EST: feature unsupported\n"); ++ return -ENOTSUPP; ++ } ++ break; ++ default: ++ netdev_warn(dev, "TSN: invalid tunable id(%u)\n", id); ++ return -EINVAL; ++ }; ++ ++ *data = info->hwtunable[id]; ++ netdev_info(dev, "TSN: Get HW tunable[%d] = %u\n", id, *data); ++ ++ return 0; ++} ++ + int tsn_est_enable_set(struct mac_device_info *hw, struct net_device *dev, + bool enable) + { +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h +index e70ee6eea0d0..b51c2008f5e4 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h +@@ -10,6 +10,14 @@ + #define EST_GCL_BANK_MAX (2) + #define EST_TIWID_TO_EXTMAX(ti_wid) ((1 << ((ti_wid) + 7)) - 1) + ++/* Hardware Tunable Enum */ ++enum tsn_hwtunable_id { ++ TSN_HWTUNA_TX_EST_TILS = 0, ++ TSN_HWTUNA_TX_EST_PTOV, ++ TSN_HWTUNA_TX_EST_CTOV, ++ TSN_HWTUNA_MAX, ++}; ++ + /* TSN Feature Enabled List */ + enum tsn_feat_id { + TSN_FEAT_ID_EST = 0, +@@ -39,6 +47,9 @@ struct tsn_hw_cap { + u32 ti_wid; /* time interval width */ + u32 ext_max; /* Max time extension */ + u32 cycle_max; /* Max Cycle Time */ ++ u32 tils_max; /* Max time interval left shift */ ++ u32 ptov_max; /* Max PTP Offset */ ++ u32 ctov_max; /* Max Current Time Offset */ + }; + + /* EST Gate Control Entry */ +@@ -74,6 +85,7 @@ struct est_gc_config { + struct tsnif_info { + struct tsn_hw_cap cap; + bool feat_en[TSN_FEAT_ID_MAX]; ++ u32 hwtunable[TSN_HWTUNA_MAX]; + struct est_gc_config est_gcc; + }; + +@@ -85,6 +97,10 @@ int tsn_feat_set(struct mac_device_info *hw, struct net_device *dev, + enum tsn_feat_id featid, bool enable); + bool tsn_has_feat(struct mac_device_info *hw, struct net_device *dev, + enum tsn_feat_id featid); ++int tsn_hwtunable_set(struct mac_device_info *hw, struct net_device *dev, ++ enum tsn_hwtunable_id id, const u32 data); ++int tsn_hwtunable_get(struct mac_device_info *hw, struct net_device *dev, ++ enum tsn_hwtunable_id id, u32 *data); + int tsn_est_enable_set(struct mac_device_info *hw, struct net_device *dev, + bool enable); + int tsn_est_bank_get(struct mac_device_info *hw, struct net_device *dev, +-- +2.17.1 + diff --git a/patches/0038-vhm-modify-mmio-memory-map-unmap-api.acrn b/patches/0038-vhm-modify-mmio-memory-map-unmap-api.acrn new file mode 100644 index 0000000000..ed728eb2e1 --- /dev/null +++ b/patches/0038-vhm-modify-mmio-memory-map-unmap-api.acrn @@ -0,0 +1,177 @@ +From 8ba333a33bdde4404a5c9884b4381d803f09b989 Mon Sep 17 00:00:00 2001 +From: Mingqiang Chi +Date: Fri, 31 Aug 2018 10:58:59 +0800 +Subject: [PATCH 038/150] vhm: modify mmio/memory map/unmap api + +Split the parameter(prot) to two parameters(mem_type and +mem_access_right) +Remove the parameter(prot) in unset_mmio_map + +Change-Id: I9d8bf3401898d53ec2b765135601d1e4bed1e09d +Tracked-On: 222796 +Signed-off-by: Mingqiang Chi +Reviewed-on: +Reviewed-by: Dong, Eddie +Tested-by: Dong, Eddie +--- + drivers/vhm/vhm_mm.c | 33 +++++++++++++++++++------------- + include/linux/vhm/acrn_hv_defs.h | 26 ++++++++++++------------- + include/linux/vhm/acrn_vhm_mm.h | 8 +++++--- + 3 files changed, 37 insertions(+), 30 deletions(-) + +diff --git a/drivers/vhm/vhm_mm.c b/drivers/vhm/vhm_mm.c +index a9ba810a7fd7..be6a47afad9a 100644 +--- a/drivers/vhm/vhm_mm.c ++++ b/drivers/vhm/vhm_mm.c +@@ -148,7 +148,8 @@ int alloc_guest_memseg(struct vhm_vm *vm, struct vm_memseg *memseg) + + static int _mem_set_memmap(unsigned long vmid, unsigned long guest_gpa, + unsigned long host_gpa, unsigned long len, +- unsigned int prot, unsigned int type) ++ unsigned int mem_type, unsigned int mem_access_right, ++ unsigned int type) + { + struct vm_set_memmap set_memmap; + +@@ -156,7 +157,8 @@ static int _mem_set_memmap(unsigned long vmid, unsigned long guest_gpa, + set_memmap.remote_gpa = guest_gpa; + set_memmap.vm0_gpa = host_gpa; + set_memmap.length = len; +- set_memmap.prot = prot; ++ set_memmap.prot = ((mem_type & MEM_TYPE_MASK) | ++ (mem_access_right & MEM_ACCESS_RIGHT_MASK)); + + /* hypercall to notify hv the guest EPT setting*/ + if (hcall_set_memmap(vmid, +@@ -167,36 +169,39 @@ static int _mem_set_memmap(unsigned long vmid, unsigned long guest_gpa, + + pr_debug("VHM: set ept for mem map[type=0x%x, host_gpa=0x%lx," + "guest_gpa=0x%lx,len=0x%lx, prot=0x%x]\n", +- type, host_gpa, guest_gpa, len, prot); ++ type, host_gpa, guest_gpa, len, set_memmap.prot); + + return 0; + } + + int set_mmio_map(unsigned long vmid, unsigned long guest_gpa, +- unsigned long host_gpa, unsigned long len, unsigned int prot) ++ unsigned long host_gpa, unsigned long len, ++ unsigned int mem_type, unsigned mem_access_right) + { + return _mem_set_memmap(vmid, guest_gpa, host_gpa, len, +- prot, MAP_MMIO); ++ mem_type, mem_access_right, MAP_MMIO); + } + + int unset_mmio_map(unsigned long vmid, unsigned long guest_gpa, +- unsigned long host_gpa, unsigned long len, unsigned int prot) ++ unsigned long host_gpa, unsigned long len) + { + return _mem_set_memmap(vmid, guest_gpa, host_gpa, len, +- prot, MAP_UNMAP); ++ 0, 0, MAP_UNMAP); + } + + int update_memmap_attr(unsigned long vmid, unsigned long guest_gpa, +- unsigned long host_gpa, unsigned long len, unsigned int prot) ++ unsigned long host_gpa, unsigned long len, ++ unsigned int mem_type, unsigned int mem_access_right) + { + return _mem_set_memmap(vmid, guest_gpa, host_gpa, len, +- prot, MAP_MEM); ++ mem_type, mem_access_right, MAP_MEM); + } + + int map_guest_memseg(struct vhm_vm *vm, struct vm_memmap *memmap) + { + struct guest_memseg *seg = NULL; +- unsigned int type, prot; ++ unsigned int type; ++ unsigned int mem_type, mem_access_right; + unsigned long guest_gpa, host_gpa; + + mutex_lock(&vm->seg_lock); +@@ -213,17 +218,19 @@ int map_guest_memseg(struct vhm_vm *vm, struct vm_memmap *memmap) + } + guest_gpa = seg->gpa; + host_gpa = seg->vm0_gpa; +- prot = memmap->prot | MEM_ATTR_WB_CACHE; ++ mem_type = MEM_TYPE_WB; ++ mem_access_right = (memmap->prot & MEM_ACCESS_RIGHT_MASK); + type = MAP_MEM; + } else { + guest_gpa = memmap->gpa; + host_gpa = acrn_hpa2gpa(memmap->hpa); +- prot = memmap->prot | MEM_ATTR_UNCACHED; ++ mem_type = MEM_TYPE_UC; ++ mem_access_right = (memmap->prot & MEM_ACCESS_RIGHT_MASK); + type = MAP_MMIO; + } + + if (_mem_set_memmap(vm->vmid, guest_gpa, host_gpa, memmap->len, +- prot, type) < 0) { ++ mem_type, mem_access_right, type) < 0) { + pr_err("vhm: failed to set memmap %ld!\n", vm->vmid); + mutex_unlock(&vm->seg_lock); + return -EFAULT; +diff --git a/include/linux/vhm/acrn_hv_defs.h b/include/linux/vhm/acrn_hv_defs.h +index 12dc3c954526..411f197f7f3a 100644 +--- a/include/linux/vhm/acrn_hv_defs.h ++++ b/include/linux/vhm/acrn_hv_defs.h +@@ -111,20 +111,18 @@ + #define ACRN_INVALID_HPA (-1UL) + + /* Generic memory attributes */ +-#define MEM_ATTR_READ 0x00000001 +-#define MEM_ATTR_WRITE 0x00000002 +-#define MEM_ATTR_EXECUTE 0x00000004 +-#define MEM_ATTR_USER 0x00000008 +-#define MEM_ATTR_WB_CACHE 0x00000040 +-#define MEM_ATTR_WT_CACHE 0x00000080 +-#define MEM_ATTR_UNCACHED 0x00000100 +-#define MEM_ATTR_WC 0x00000200 +-#define MEM_ATTR_WP 0x00000400 +- +-#define MEM_ATTR_ALL 0x00000007 +-#define MEM_ATTR_WRITE_PROT 0x00000045 +-#define MEM_ATTR_ALL_WB 0x00000047 +-#define MEM_ATTR_ALL_WC 0x00000207 ++#define MEM_ACCESS_READ 0x00000001 ++#define MEM_ACCESS_WRITE 0x00000002 ++#define MEM_ACCESS_EXEC 0x00000004 ++#define MEM_ACCESS_RWX (MEM_ACCESS_READ | MEM_ACCESS_WRITE | \ ++ MEM_ACCESS_EXEC) ++#define MEM_ACCESS_RIGHT_MASK 0x00000007 ++#define MEM_TYPE_WB 0x00000040 ++#define MEM_TYPE_WT 0x00000080 ++#define MEM_TYPE_UC 0x00000100 ++#define MEM_TYPE_WC 0x00000200 ++#define MEM_TYPE_WP 0x00000400 ++#define MEM_TYPE_MASK 0x000007C0 + + struct vm_set_memmap { + #define MAP_MEM 0 +diff --git a/include/linux/vhm/acrn_vhm_mm.h b/include/linux/vhm/acrn_vhm_mm.h +index 2ff1e25b22ce..ba8558949e48 100644 +--- a/include/linux/vhm/acrn_vhm_mm.h ++++ b/include/linux/vhm/acrn_vhm_mm.h +@@ -66,11 +66,13 @@ static inline unsigned long acrn_hpa2gpa(unsigned long hpa) + void *map_guest_phys(unsigned long vmid, u64 uos_phys, size_t size); + int unmap_guest_phys(unsigned long vmid, u64 uos_phys); + int set_mmio_map(unsigned long vmid, unsigned long guest_gpa, +- unsigned long host_gpa, unsigned long len, unsigned int prot); ++ unsigned long host_gpa, unsigned long len, ++ unsigned int mem_type, unsigned int mem_access_right); + int unset_mmio_map(unsigned long vmid, unsigned long guest_gpa, +- unsigned long host_gpa, unsigned long len, unsigned int prot); ++ unsigned long host_gpa, unsigned long len); + int update_memmap_attr(unsigned long vmid, unsigned long guest_gpa, +- unsigned long host_gpa, unsigned long len, unsigned int prot); ++ unsigned long host_gpa, unsigned long len, ++ unsigned int mem_type, unsigned int mem_access_right); + + int vhm_dev_mmap(struct file *file, struct vm_area_struct *vma); + +-- +2.17.1 + diff --git a/patches/0039-ASoC-Intel-Skylake-Merge-skl_sst_ctx_init-into-skl_i.audio b/patches/0039-ASoC-Intel-Skylake-Merge-skl_sst_ctx_init-into-skl_i.audio new file mode 100644 index 0000000000..73b60fa202 --- /dev/null +++ b/patches/0039-ASoC-Intel-Skylake-Merge-skl_sst_ctx_init-into-skl_i.audio @@ -0,0 +1,106 @@ +From c89bb24546a617e571277f1c54833a072a9c6447 Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Tue, 13 Aug 2019 20:55:48 +0200 +Subject: [PATCH 039/193] ASoC: Intel: Skylake: Merge skl_sst_ctx_init into + skl_init_dsp + +skl_init_dsp and skl_sst_ctx_init share the exact same purpose: trigger +for sst_dsp creation. Merge them together. While adding code, change +reorders certain blocks, so skl_dev instance is always initialized +before sst_dsp_new cascade begins. + +Change-Id: I001b76a63047ae299341726c546c511bab8d41a6 +Signed-off-by: Cezary Rojewski +--- + sound/soc/intel/skylake/skl-messages.c | 21 +++++++++++++++------ + sound/soc/intel/skylake/skl-sst-dsp.h | 1 - + sound/soc/intel/skylake/skl-sst-utils.c | 23 ----------------------- + 3 files changed, 15 insertions(+), 30 deletions(-) + +diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c +index 4680352d473a..d4127fed12ad 100644 +--- a/sound/soc/intel/skylake/skl-messages.c ++++ b/sound/soc/intel/skylake/skl-messages.c +@@ -142,18 +142,27 @@ int skl_dsp_cleanup(struct device *dev, + + int skl_init_dsp(struct skl_dev *skl, struct sst_pdata *pdata) + { ++ struct sst_dsp *dsp; + struct hdac_bus *bus = skl_to_bus(skl); +- int ret; ++ struct device *dev = skl->dev; + + /* enable ppcap interrupt */ + snd_hdac_ext_bus_ppcap_enable(bus, true); + snd_hdac_ext_bus_ppcap_int_enable(bus, true); + +- ret = skl_sst_ctx_init(skl, pdata); +- if (ret < 0) +- return ret; +- +- dev_dbg(bus->dev, "dsp registration status=%d\n", ret); ++ skl->is_first_boot = true; ++ INIT_LIST_HEAD(&skl->module_list); ++ init_waitqueue_head(&skl->mod_load_wait); ++ ++ pdata->id = skl->pci->device; ++ pdata->irq = skl->pci->irq; ++ pdata->dma_base = -1; ++ pdata->dsp = skl; ++ dsp = sst_dsp_new(dev, pdata); ++ if (!dsp) { ++ dev_err(dev, "%s: no device\n", __func__); ++ return -ENODEV; ++ } + + return 0; + } +diff --git a/sound/soc/intel/skylake/skl-sst-dsp.h b/sound/soc/intel/skylake/skl-sst-dsp.h +index 4bbf3ba3f788..bb2ccb0f449e 100644 +--- a/sound/soc/intel/skylake/skl-sst-dsp.h ++++ b/sound/soc/intel/skylake/skl-sst-dsp.h +@@ -236,7 +236,6 @@ int skl_dsp_strip_extended_manifest(struct firmware *fw); + + void skl_dsp_set_astate_cfg(struct skl_dev *skl, u32 cnt, void *data); + +-int skl_sst_ctx_init(struct skl_dev *skl, struct sst_pdata *pdata); + int skl_prepare_lib_load(struct skl_dev *skl, struct skl_lib_info *linfo, + struct firmware *stripped_fw, + unsigned int hdr_offset, int index); +diff --git a/sound/soc/intel/skylake/skl-sst-utils.c b/sound/soc/intel/skylake/skl-sst-utils.c +index 2d333ecf1016..7c3596f1f74b 100644 +--- a/sound/soc/intel/skylake/skl-sst-utils.c ++++ b/sound/soc/intel/skylake/skl-sst-utils.c +@@ -394,29 +394,6 @@ int skl_dsp_strip_extended_manifest(struct firmware *fw) + return 0; + } + +-int skl_sst_ctx_init(struct skl_dev *skl, struct sst_pdata *pdata) +-{ +- struct sst_dsp *sst; +- struct device *dev = skl->dev; +- +- pdata->id = skl->pci->device; +- pdata->irq = skl->pci->irq; +- pdata->dma_base = -1; +- pdata->dsp = skl; +- INIT_LIST_HEAD(&skl->module_list); +- sst = sst_dsp_new(dev, pdata); +- if (!sst) { +- dev_err(dev, "%s: no device\n", __func__); +- return -ENODEV; +- } +- +- skl->dsp = sst; +- init_waitqueue_head(&skl->mod_load_wait); +- skl->is_first_boot = true; +- +- return 0; +-} +- + int skl_prepare_lib_load(struct skl_dev *skl, struct skl_lib_info *linfo, + struct firmware *stripped_fw, + unsigned int hdr_offset, int index) +-- +2.17.1 + diff --git a/patches/0039-drm_dp_cec-add-connector-info-support.drm b/patches/0039-drm_dp_cec-add-connector-info-support.drm new file mode 100644 index 0000000000..6e57f680ee --- /dev/null +++ b/patches/0039-drm_dp_cec-add-connector-info-support.drm @@ -0,0 +1,200 @@ +From 332d49f292b6b14499bef55ec603feab7880bd63 Mon Sep 17 00:00:00 2001 +From: Dariusz Marcinkiewicz +Date: Wed, 14 Aug 2019 12:44:59 +0200 +Subject: [PATCH 039/690] drm_dp_cec: add connector info support. + +Pass the connector info to the CEC adapter. This makes it possible +to associate the CEC adapter with the corresponding drm connector. + +Signed-off-by: Dariusz Marcinkiewicz +Signed-off-by: Hans Verkuil +Tested-by: Hans Verkuil +Reviewed-by: Lyude Paul +Reviewed-by: Ben Skeggs +Signed-off-by: Hans Verkuil +Link: https://patchwork.freedesktop.org/patch/msgid/20190814104520.6001-2-darekm@google.com +--- + .../display/amdgpu_dm/amdgpu_dm_mst_types.c | 2 +- + drivers/gpu/drm/drm_dp_cec.c | 25 ++++++++++++------- + drivers/gpu/drm/i915/display/intel_dp.c | 4 +-- + drivers/gpu/drm/nouveau/nouveau_connector.c | 3 +-- + include/drm/drm_dp_helper.h | 17 ++++++------- + 5 files changed, 27 insertions(+), 24 deletions(-) + +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +index 16218a202b59..5ec14efd4d8c 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +@@ -416,7 +416,7 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, + + drm_dp_aux_register(&aconnector->dm_dp_aux.aux); + drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux, +- aconnector->base.name, dm->adev->dev); ++ &aconnector->base); + aconnector->mst_mgr.cbs = &dm_mst_cbs; + drm_dp_mst_topology_mgr_init( + &aconnector->mst_mgr, +diff --git a/drivers/gpu/drm/drm_dp_cec.c b/drivers/gpu/drm/drm_dp_cec.c +index b15cee85b702..b457c16c3a8b 100644 +--- a/drivers/gpu/drm/drm_dp_cec.c ++++ b/drivers/gpu/drm/drm_dp_cec.c +@@ -8,7 +8,9 @@ + #include + #include + #include ++#include + #include ++#include + #include + + /* +@@ -295,7 +297,10 @@ static void drm_dp_cec_unregister_work(struct work_struct *work) + */ + void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid) + { +- u32 cec_caps = CEC_CAP_DEFAULTS | CEC_CAP_NEEDS_HPD; ++ struct drm_connector *connector = aux->cec.connector; ++ u32 cec_caps = CEC_CAP_DEFAULTS | CEC_CAP_NEEDS_HPD | ++ CEC_CAP_CONNECTOR_INFO; ++ struct cec_connector_info conn_info; + unsigned int num_las = 1; + u8 cap; + +@@ -344,13 +349,17 @@ void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid) + + /* Create a new adapter */ + aux->cec.adap = cec_allocate_adapter(&drm_dp_cec_adap_ops, +- aux, aux->cec.name, cec_caps, ++ aux, connector->name, cec_caps, + num_las); + if (IS_ERR(aux->cec.adap)) { + aux->cec.adap = NULL; + goto unlock; + } +- if (cec_register_adapter(aux->cec.adap, aux->cec.parent)) { ++ ++ cec_fill_conn_info_from_drm(&conn_info, connector); ++ cec_s_conn_info(aux->cec.adap, &conn_info); ++ ++ if (cec_register_adapter(aux->cec.adap, connector->dev->dev)) { + cec_delete_adapter(aux->cec.adap); + aux->cec.adap = NULL; + } else { +@@ -406,22 +415,20 @@ EXPORT_SYMBOL(drm_dp_cec_unset_edid); + /** + * drm_dp_cec_register_connector() - register a new connector + * @aux: DisplayPort AUX channel +- * @name: name of the CEC device +- * @parent: parent device ++ * @connector: drm connector + * + * A new connector was registered with associated CEC adapter name and + * CEC adapter parent device. After registering the name and parent + * drm_dp_cec_set_edid() is called to check if the connector supports + * CEC and to register a CEC adapter if that is the case. + */ +-void drm_dp_cec_register_connector(struct drm_dp_aux *aux, const char *name, +- struct device *parent) ++void drm_dp_cec_register_connector(struct drm_dp_aux *aux, ++ struct drm_connector *connector) + { + WARN_ON(aux->cec.adap); + if (WARN_ON(!aux->transfer)) + return; +- aux->cec.name = name; +- aux->cec.parent = parent; ++ aux->cec.connector = connector; + INIT_DELAYED_WORK(&aux->cec.unregister_work, + drm_dp_cec_unregister_work); + } +diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c +index 842d8b016638..0dee844ca08a 100644 +--- a/drivers/gpu/drm/i915/display/intel_dp.c ++++ b/drivers/gpu/drm/i915/display/intel_dp.c +@@ -5500,7 +5500,6 @@ static int + intel_dp_connector_register(struct drm_connector *connector) + { + struct intel_dp *intel_dp = intel_attached_dp(connector); +- struct drm_device *dev = connector->dev; + int ret; + + ret = intel_connector_register(connector); +@@ -5515,8 +5514,7 @@ intel_dp_connector_register(struct drm_connector *connector) + intel_dp->aux.dev = connector->kdev; + ret = drm_dp_aux_register(&intel_dp->aux); + if (!ret) +- drm_dp_cec_register_connector(&intel_dp->aux, +- connector->name, dev->dev); ++ drm_dp_cec_register_connector(&intel_dp->aux, connector); + return ret; + } + +diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c +index 94dfa2e5a9ab..56871d34e3fb 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_connector.c ++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c +@@ -1415,8 +1415,7 @@ nouveau_connector_create(struct drm_device *dev, + switch (type) { + case DRM_MODE_CONNECTOR_DisplayPort: + case DRM_MODE_CONNECTOR_eDP: +- drm_dp_cec_register_connector(&nv_connector->aux, +- connector->name, dev->dev); ++ drm_dp_cec_register_connector(&nv_connector->aux, connector); + break; + } + +diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h +index 8364502f92cf..7972b925a952 100644 +--- a/include/drm/drm_dp_helper.h ++++ b/include/drm/drm_dp_helper.h +@@ -1230,20 +1230,19 @@ struct drm_dp_aux_msg { + + struct cec_adapter; + struct edid; ++struct drm_connector; + + /** + * struct drm_dp_aux_cec - DisplayPort CEC-Tunneling-over-AUX + * @lock: mutex protecting this struct + * @adap: the CEC adapter for CEC-Tunneling-over-AUX support. +- * @name: name of the CEC adapter +- * @parent: parent device of the CEC adapter ++ * @connector: the connector this CEC adapter is associated with + * @unregister_work: unregister the CEC adapter + */ + struct drm_dp_aux_cec { + struct mutex lock; + struct cec_adapter *adap; +- const char *name; +- struct device *parent; ++ struct drm_connector *connector; + struct delayed_work unregister_work; + }; + +@@ -1451,8 +1450,8 @@ drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk) + + #ifdef CONFIG_DRM_DP_CEC + void drm_dp_cec_irq(struct drm_dp_aux *aux); +-void drm_dp_cec_register_connector(struct drm_dp_aux *aux, const char *name, +- struct device *parent); ++void drm_dp_cec_register_connector(struct drm_dp_aux *aux, ++ struct drm_connector *connector); + void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux); + void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid); + void drm_dp_cec_unset_edid(struct drm_dp_aux *aux); +@@ -1461,9 +1460,9 @@ static inline void drm_dp_cec_irq(struct drm_dp_aux *aux) + { + } + +-static inline void drm_dp_cec_register_connector(struct drm_dp_aux *aux, +- const char *name, +- struct device *parent) ++static inline void ++drm_dp_cec_register_connector(struct drm_dp_aux *aux, ++ struct drm_connector *connector) + { + } + +-- +2.17.1 + diff --git a/patches/0039-mei-hbm-add-capabilities-message.security b/patches/0039-mei-hbm-add-capabilities-message.security new file mode 100644 index 0000000000..bff37ef699 --- /dev/null +++ b/patches/0039-mei-hbm-add-capabilities-message.security @@ -0,0 +1,239 @@ +From fdc4fe402f476fda680da5d83007e9b68bb4e8e5 Mon Sep 17 00:00:00 2001 +From: Alexander Usyskin +Date: Tue, 8 May 2018 11:08:19 +0300 +Subject: [PATCH 39/65] mei: hbm: add capabilities message + +The HBM capabilities command allows performing +capabilities handshake between FW and a host driver. +The capabilities command is supported on the firmwares with +HBM version 2.2 and bigger. + +Change-Id: Ie4a85a52c51b722d18d3e94455086d80148c6719 +Signed-off-by: Alexander Usyskin +Signed-off-by: Tomas Winkler +--- + drivers/misc/mei/debugfs.c | 1 + + drivers/misc/mei/hbm.c | 80 ++++++++++++++++++++++++++++++++++++++ + drivers/misc/mei/hbm.h | 2 + + drivers/misc/mei/hw.h | 21 ++++++++++ + drivers/misc/mei/mei_dev.h | 2 + + 5 files changed, 106 insertions(+) + +diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c +index 2f85f518117e..b98f6f9a4896 100644 +--- a/drivers/misc/mei/debugfs.c ++++ b/drivers/misc/mei/debugfs.c +@@ -104,6 +104,7 @@ static int mei_dbgfs_devstate_show(struct seq_file *m, void *unused) + seq_printf(m, "\tOS: %01d\n", dev->hbm_f_os_supported); + seq_printf(m, "\tDR: %01d\n", dev->hbm_f_dr_supported); + seq_printf(m, "\tVT: %01d\n", dev->hbm_f_vt_supported); ++ seq_printf(m, "\tCAP: %01d\n", dev->hbm_f_cap_supported); + } + + seq_printf(m, "pg: %s, %s\n", +diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c +index 86c91f8961bb..d71800a2f1d8 100644 +--- a/drivers/misc/mei/hbm.c ++++ b/drivers/misc/mei/hbm.c +@@ -327,6 +327,39 @@ static int mei_hbm_dma_setup_req(struct mei_device *dev) + return 0; + } + ++/** ++ * mei_hbm_capabilities_req - request capabilities ++ * ++ * @dev: the device structure ++ * ++ * Return: 0 on success and < 0 on failure ++ */ ++static int mei_hbm_capabilities_req(struct mei_device *dev) ++{ ++ struct mei_msg_hdr mei_hdr; ++ struct hbm_capability_request req; ++ int ret; ++ ++ mei_hbm_hdr(&mei_hdr, sizeof(req)); ++ ++ memset(&req, 0, sizeof(req)); ++ req.hbm_cmd = MEI_HBM_CAPABILITIES_REQ_CMD; ++ if (dev->hbm_f_vt_supported) ++ req.capability_requested[0] = HBM_CAP_VM; ++ ++ ret = mei_hbm_write_message(dev, &mei_hdr, &req); ++ if (ret) { ++ dev_err(dev->dev, ++ "capabilities request write failed: ret = %d.\n", ret); ++ return ret; ++ } ++ ++ dev->hbm_state = MEI_HBM_CAP_SETUP; ++ dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; ++ mei_schedule_stall_timer(dev); ++ return 0; ++} ++ + /** + * mei_hbm_enum_clients_req - sends enumeration client request message. + * +@@ -1059,6 +1092,13 @@ static void mei_hbm_config_features(struct mei_device *dev) + (dev->version.major_version == HBM_MAJOR_VERSION_VT && + dev->version.minor_version >= HBM_MINOR_VERSION_VT)) + dev->hbm_f_vt_supported = 1; ++ ++ /* Capability message Support */ ++ dev->hbm_f_cap_supported = 0; ++ if (dev->version.major_version > HBM_MAJOR_VERSION_CAP || ++ (dev->version.major_version == HBM_MAJOR_VERSION_CAP && ++ dev->version.minor_version >= HBM_MINOR_VERSION_CAP)) ++ dev->hbm_f_cap_supported = 1; + } + + /** +@@ -1092,6 +1132,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) + struct hbm_host_enum_response *enum_res; + struct hbm_dma_setup_response *dma_setup_res; + struct hbm_add_client_request *add_cl_req; ++ struct hbm_capability_response *capability_res; + int ret; + + struct mei_hbm_cl_cmd *cl_cmd; +@@ -1155,6 +1196,13 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) + return -EPROTO; + } + ++ if (dev->hbm_f_cap_supported) { ++ if (mei_hbm_capabilities_req(dev)) ++ return -EIO; ++ wake_up(&dev->wait_hbm_start); ++ break; ++ } ++ + if (dev->hbm_f_dr_supported) { + if (mei_dmam_ring_alloc(dev)) + dev_info(dev->dev, "running w/o dma ring\n"); +@@ -1176,6 +1224,38 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) + wake_up(&dev->wait_hbm_start); + break; + ++ case MEI_HBM_CAPABILITIES_RES_CMD: ++ dev_dbg(dev->dev, "hbm: capabilities response: message received.\n"); ++ ++ dev->init_clients_timer = 0; ++ ++ if (dev->hbm_state != MEI_HBM_CAP_SETUP) { ++ dev_err(dev->dev, "hbm: capabilities response: state mismatch, [%d, %d]\n", ++ dev->dev_state, dev->hbm_state); ++ return -EPROTO; ++ } ++ ++ capability_res = (struct hbm_capability_response *)mei_msg; ++ if (!(capability_res->capability_granted[0] & HBM_CAP_VM)) ++ dev->hbm_f_vt_supported = 0; ++ ++ if (dev->hbm_f_dr_supported) { ++ if (mei_dmam_ring_alloc(dev)) ++ dev_info(dev->dev, "running w/o dma ring\n"); ++ if (mei_dma_ring_is_allocated(dev)) { ++ if (mei_hbm_dma_setup_req(dev)) ++ return -EIO; ++ break; ++ } ++ } ++ ++ dev->hbm_f_dr_supported = 0; ++ mei_dmam_ring_free(dev); ++ ++ if (mei_hbm_enum_clients_req(dev)) ++ return -EIO; ++ break; ++ + case MEI_HBM_DMA_SETUP_RES_CMD: + dev_dbg(dev->dev, "hbm: dma setup response: message received.\n"); + +diff --git a/drivers/misc/mei/hbm.h b/drivers/misc/mei/hbm.h +index 5aa58cffdd2e..4d95e38e4ddf 100644 +--- a/drivers/misc/mei/hbm.h ++++ b/drivers/misc/mei/hbm.h +@@ -16,6 +16,7 @@ struct mei_cl; + * + * @MEI_HBM_IDLE : protocol not started + * @MEI_HBM_STARTING : start request message was sent ++ * @MEI_HBM_CAP_SETUP : capabilities request message was sent + * @MEI_HBM_DR_SETUP : dma ring setup request message was sent + * @MEI_HBM_ENUM_CLIENTS : enumeration request was sent + * @MEI_HBM_CLIENT_PROPERTIES : acquiring clients properties +@@ -25,6 +26,7 @@ struct mei_cl; + enum mei_hbm_state { + MEI_HBM_IDLE = 0, + MEI_HBM_STARTING, ++ MEI_HBM_CAP_SETUP, + MEI_HBM_DR_SETUP, + MEI_HBM_ENUM_CLIENTS, + MEI_HBM_CLIENT_PROPERTIES, +diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h +index 6416b0fda02e..d9ea1b2cbd51 100644 +--- a/drivers/misc/mei/hw.h ++++ b/drivers/misc/mei/hw.h +@@ -82,6 +82,12 @@ + #define HBM_MINOR_VERSION_VT 2 + #define HBM_MAJOR_VERSION_VT 2 + ++/* ++ * MEI version with capabilities message support ++ */ ++#define HBM_MINOR_VERSION_CAP 2 ++#define HBM_MAJOR_VERSION_CAP 2 ++ + /* Host bus message command opcode */ + #define MEI_HBM_CMD_OP_MSK 0x7f + /* Host bus message command RESPONSE */ +@@ -127,6 +133,9 @@ + #define MEI_HBM_DMA_SETUP_REQ_CMD 0x12 + #define MEI_HBM_DMA_SETUP_RES_CMD 0x92 + ++#define MEI_HBM_CAPABILITIES_REQ_CMD 0x13 ++#define MEI_HBM_CAPABILITIES_RES_CMD 0x93 ++ + /* + * MEI Stop Reason + * used by hbm_host_stop_request.reason +@@ -536,4 +545,16 @@ struct hbm_dma_ring_ctrl { + u32 reserved4; + } __packed; + ++#define HBM_CAP_VM BIT(0) ++ ++struct hbm_capability_request { ++ u8 hbm_cmd; ++ u8 capability_requested[3]; ++} __packed; ++ ++struct hbm_capability_response { ++ u8 hbm_cmd; ++ u8 capability_granted[3]; ++} __packed; ++ + #endif +diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h +index 728c3596d917..5c92b207810b 100644 +--- a/drivers/misc/mei/mei_dev.h ++++ b/drivers/misc/mei/mei_dev.h +@@ -427,6 +427,7 @@ struct mei_fw_version { + * @hbm_f_os_supported : hbm feature support OS ver message + * @hbm_f_dr_supported : hbm feature dma ring supported + * @hbm_f_vt_supported : hbm feature vtag supported ++ * @hbm_f_cap_supported : hbm feature capabilities message supported + * + * @fw_ver : FW versions + * +@@ -510,6 +511,7 @@ struct mei_device { + unsigned int hbm_f_os_supported:1; + unsigned int hbm_f_dr_supported:1; + unsigned int hbm_f_vt_supported:1; ++ unsigned int hbm_f_cap_supported:1; + + struct mei_fw_version fw_ver[MEI_MAX_FW_VER_BLOCKS]; + +-- +2.17.1 + diff --git a/patches/0039-net-stmmac-gcl-errors-reporting-and-its-inter.connectivity b/patches/0039-net-stmmac-gcl-errors-reporting-and-its-inter.connectivity new file mode 100644 index 0000000000..22ef39c168 --- /dev/null +++ b/patches/0039-net-stmmac-gcl-errors-reporting-and-its-inter.connectivity @@ -0,0 +1,559 @@ +From c77a70d2653510b8ebc4cc37919305f1063b0433 Mon Sep 17 00:00:00 2001 +From: Voon Weifeng +Date: Mon, 5 Aug 2019 22:43:09 +0800 +Subject: [PATCH 039/108] net: stmmac: gcl errors reporting and its interrupt + handling + +Enabled interrupt for Constant Gate Control Error (CGCE), Head-of-Line +Blocking due to scheduling error (HLBS) and Head-of-Line Blocking due to +frame size error (HLBF). + +CGCE should not happen as the driver has already implemented a check +before applying the settings. CGCE handling is added as a safety +check so that we can catch it if there is such error being fired. For +HLBS, the user will get the info of all the queues that shows this +error. For HLBF, the user will get the info of all the queue with the +latest frame size which causes the error. Frame size 0 indicates no +error. + +The ISR handling takes place when EST feature is enabled by user. + +This patch is also co-authored by Ong Boon Leong for the hook into +stmmac_ethtool.c to support EST error statistic showing. + +Signed-off-by: Voon Weifeng +Signed-off-by: Ong Boon Leong +--- + .../net/ethernet/stmicro/stmmac/dwmac4_core.c | 3 + + drivers/net/ethernet/stmicro/stmmac/dwmac5.h | 21 +++ + .../net/ethernet/stmicro/stmmac/dwmac5_tsn.c | 138 ++++++++++++++++++ + drivers/net/ethernet/stmicro/stmmac/hwif.h | 22 +++ + .../ethernet/stmicro/stmmac/stmmac_ethtool.c | 33 ++++- + .../net/ethernet/stmicro/stmmac/stmmac_main.c | 5 + + .../net/ethernet/stmicro/stmmac/stmmac_tsn.c | 42 ++++++ + .../net/ethernet/stmicro/stmmac/stmmac_tsn.h | 17 +++ + 8 files changed, 279 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +index f6be8a35aae7..85327b7fe77a 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +@@ -1178,6 +1178,7 @@ const struct stmmac_ops dwmac510_ops = { + .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr, + .tsnif_setup = dwmac510_tsnif_setup, + .init_tsn = tsn_init, ++ .setup_tsn_hw = tsn_hw_setup, + .set_tsn_feat = tsn_feat_set, + .has_tsn_feat = tsn_has_feat, + .set_tsn_hwtunable = tsn_hwtunable_set, +@@ -1189,6 +1190,8 @@ const struct stmmac_ops dwmac510_ops = { + .set_est_gcl_len = tsn_est_gcl_len_set, + .set_est_gcrr_times = tsn_est_gcrr_times_set, + .get_est_gcc = tsn_est_gcc_get, ++ .est_irq_status = tsn_est_irq_status, ++ .dump_tsn_mmc = tsn_mmc_dump, + }; + + static u32 dwmac4_get_num_vlan(void __iomem *ioaddr) +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h +index 40ba7f86b521..98ab4013d0d9 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h +@@ -111,9 +111,30 @@ + #define MTL_EST_STATUS_BTRL_MAX (0xF << 8) + #define MTL_EST_STATUS_SWOL BIT(7) /* SW owned list */ + #define MTL_EST_STATUS_SWOL_SHIFT 7 ++#define MTL_EST_STATUS_CGCE BIT(4) /* Constant gate ctrl err */ ++#define MTL_EST_STATUS_HLBS BIT(3) /* HLB due to scheduling */ ++#define MTL_EST_STATUS_HLBF BIT(2) /* HLB due to frame size */ + #define MTL_EST_STATUS_BTRE BIT(1) /* BTR Error */ + #define MTL_EST_STATUS_SWLC BIT(0) /* Switch to SWOL complete */ + ++/* MTL EST Scheduling error */ ++#define MTL_EST_SCH_ERR 0x00000c60 ++#define MTL_EST_FRM_SZ_ERR 0x00000c64 ++#define MTL_EST_FRM_SZ_CAP 0x00000c68 ++#define MTL_EST_FRM_SZ_CAP_HBFS_MASK GENMASK(14, 0) ++#define MTL_EST_FRM_SZ_CAP_HBFQ_SHIFT 16 ++#define MTL_EST_FRM_SZ_CAP_HBFQ_MASK(x) ((x) > 4 ? GENMASK(18, 16) : \ ++ (x) > 2 ? GENMASK(17, 16) : \ ++ BIT(16)) ++ ++/* MTL EST interrupt enable */ ++#define MTL_EST_INT_EN 0x00000c70 ++#define MTL_EST_INT_EN_CGCE BIT(4) ++#define MTL_EST_INT_EN_IEHS BIT(3) ++#define MTL_EST_INT_EN_IEHF BIT(2) ++#define MTL_EST_INT_EN_IEBE BIT(1) ++#define MTL_EST_INT_EN_IECC BIT(0) ++ + /* MTL EST GCL control register */ + #define MTL_EST_GCL_CTRL 0x00000c80 + #define MTL_EST_GCL_CTRL_ADDR(dep) GENMASK(8 + (dep) - 1, 8) /* GCL Addr */ +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c +index 85fc7c638d17..41e67df455c9 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c +@@ -8,6 +8,33 @@ + #include "hwif.h" + #include "stmmac_tsn.h" + ++enum tsn_mmc_idx { ++ EST_MMC_BTRE = 0, ++ EST_MMC_BTRLM = 1, ++ EST_MMC_HLBF = 2, ++ EST_MMC_HLBS = 3, ++ EST_MMC_CGCE = 4, ++}; ++ ++const struct tsn_mmc_desc dwmac5_tsn_mmc_desc[STMMAC_TSN_STAT_SIZE] = { ++ { true, "BTRE" }, /* BTR Error */ ++ { true, "BTRLM" }, /* BTR Maximum Loop Count Error */ ++ { true, "HLBF" }, /* Head-of-Line Blocking due to Frame Size */ ++ { true, "HLBS" }, /* Head-of-Line Blocking due to Scheduling */ ++ { true, "CGCE" }, /* Constant Gate Control Error */ ++ { false, "RESV" }, ++ { false, "RESV" }, ++ { false, "RESV" }, ++ { false, "RESV" }, ++ { false, "RESV" }, ++ { false, "RESV" }, ++ { false, "RESV" }, ++ { false, "RESV" }, ++ { false, "RESV" }, ++ { false, "RESV" }, ++ { false, "RESV" }, ++}; ++ + static int est_set_gcl_addr(void __iomem *ioaddr, u32 addr, + bool is_gcrr, u32 rwops, u32 dep, + u32 dbgb, bool is_dbgm) +@@ -56,6 +83,23 @@ static bool dwmac5_has_tsn_cap(void __iomem *ioaddr, enum tsn_feat_id featid) + }; + } + ++static void dwmac5_hw_setup(void __iomem *ioaddr, enum tsn_feat_id featid) ++{ ++ u32 value; ++ ++ switch (featid) { ++ case TSN_FEAT_ID_EST: ++ /* Enable EST interrupts */ ++ value = (MTL_EST_INT_EN_CGCE | MTL_EST_INT_EN_IEHS | ++ MTL_EST_INT_EN_IEHF | MTL_EST_INT_EN_IEBE | ++ MTL_EST_INT_EN_IECC); ++ writel(value, ioaddr + MTL_EST_INT_EN); ++ break; ++ default: ++ return; ++ }; ++} ++ + static u32 dwmac5_est_get_gcl_depth(void __iomem *ioaddr) + { + u32 hw_cap3; +@@ -264,9 +308,101 @@ static void dwmac5_est_switch_swol(void __iomem *ioaddr) + writel(value, ioaddr + MTL_EST_CTRL); + } + ++int dwmac5_est_irq_status(void __iomem *ioaddr, struct net_device *dev, ++ struct tsn_mmc_stat *mmc_stat, ++ u32 txqcnt) ++{ ++ u32 txqcnt_mask; ++ u32 status; ++ u32 value; ++ u32 feqn; ++ u32 hbfq; ++ u32 hbfs; ++ u32 btrl; ++ ++ txqcnt_mask = (1 << txqcnt) - 1; ++ status = readl(ioaddr + MTL_EST_STATUS); ++ ++ value = (MTL_EST_STATUS_CGCE | MTL_EST_STATUS_HLBS | ++ MTL_EST_STATUS_HLBF | MTL_EST_STATUS_BTRE | ++ MTL_EST_STATUS_SWLC); ++ ++ /* Return if there is no error */ ++ if (!(status & value)) ++ return 0; ++ ++ if (status & MTL_EST_STATUS_CGCE) { ++ /* Clear Interrupt */ ++ writel(MTL_EST_STATUS_CGCE, ioaddr + MTL_EST_STATUS); ++ ++ mmc_stat->count[EST_MMC_CGCE]++; ++ } ++ ++ if (status & MTL_EST_STATUS_HLBS) { ++ value = readl(ioaddr + MTL_EST_SCH_ERR); ++ value &= txqcnt_mask; ++ ++ mmc_stat->count[EST_MMC_HLBS]++; ++ ++ /* Clear Interrupt */ ++ writel(value, ioaddr + MTL_EST_SCH_ERR); ++ ++ /* Collecting info to shows all the queues that has HLBS ++ * issue. The only way to clear this is to clear the ++ * statistic ++ */ ++ if (net_ratelimit()) ++ netdev_err(dev, "EST: HLB(sched) Queue %u\n", value); ++ } ++ ++ if (status & MTL_EST_STATUS_HLBF) { ++ value = readl(ioaddr + MTL_EST_FRM_SZ_ERR); ++ feqn = value & txqcnt_mask; ++ ++ value = readl(ioaddr + MTL_EST_FRM_SZ_CAP); ++ hbfq = (value & MTL_EST_FRM_SZ_CAP_HBFQ_MASK(txqcnt)) >> ++ MTL_EST_FRM_SZ_CAP_HBFQ_SHIFT; ++ hbfs = value & MTL_EST_FRM_SZ_CAP_HBFS_MASK; ++ ++ mmc_stat->count[EST_MMC_HLBF]++; ++ ++ /* Clear Interrupt */ ++ writel(feqn, ioaddr + MTL_EST_FRM_SZ_ERR); ++ ++ if (net_ratelimit()) ++ netdev_err(dev, "EST: HLB(size) Queue %u Size %u\n", ++ hbfq, hbfs); ++ } ++ ++ if (status & MTL_EST_STATUS_BTRE) { ++ if ((status & MTL_EST_STATUS_BTRL) == ++ MTL_EST_STATUS_BTRL_MAX) ++ mmc_stat->count[EST_MMC_BTRLM]++; ++ else ++ mmc_stat->count[EST_MMC_BTRE]++; ++ ++ btrl = (status & MTL_EST_STATUS_BTRL) >> ++ MTL_EST_STATUS_BTRL_SHIFT; ++ ++ if (net_ratelimit()) ++ netdev_info(dev, "EST: BTR Error Loop Count %u\n", ++ btrl); ++ ++ writel(MTL_EST_STATUS_BTRE, ioaddr + MTL_EST_STATUS); ++ } ++ ++ if (status & MTL_EST_STATUS_SWLC) { ++ writel(MTL_EST_STATUS_SWLC, ioaddr + MTL_EST_STATUS); ++ netdev_info(dev, "SWOL has been switched\n"); ++ } ++ ++ return status; ++} ++ + const struct tsnif_ops dwmac510_tsnif_ops = { + .read_hwid = dwmac5_read_hwid, + .has_tsn_cap = dwmac5_has_tsn_cap, ++ .hw_setup = dwmac5_hw_setup, + .est_get_gcl_depth = dwmac5_est_get_gcl_depth, + .est_get_ti_width = dwmac5_est_get_ti_width, + .est_get_txqcnt = dwmac5_est_get_txqcnt, +@@ -281,9 +417,11 @@ const struct tsnif_ops dwmac510_tsnif_ops = { + .est_get_enable = dwmac5_est_get_enable, + .est_get_bank = dwmac5_est_get_bank, + .est_switch_swol = dwmac5_est_switch_swol, ++ .est_irq_status = dwmac5_est_irq_status, + }; + + void dwmac510_tsnif_setup(struct mac_device_info *mac) + { + mac->tsnif = &dwmac510_tsnif_ops; ++ mac->tsn_info.mmc_desc = &dwmac5_tsn_mmc_desc[0]; + } +diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h +index edaed05e7385..d511788fdb1c 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h ++++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h +@@ -404,6 +404,8 @@ struct stmmac_ops { + enum tsn_feat_id featid, bool enable); + bool (*has_tsn_feat)(struct mac_device_info *hw, struct net_device *dev, + enum tsn_feat_id featid); ++ void (*setup_tsn_hw)(struct mac_device_info *hw, ++ struct net_device *dev); + int (*set_tsn_hwtunable)(struct mac_device_info *hw, + struct net_device *dev, + enum tsn_hwtunable_id id, +@@ -430,6 +432,10 @@ struct stmmac_ops { + u32 dbgb, bool is_dbgm); + int (*get_est_gcc)(struct mac_device_info *hw, struct net_device *dev, + struct est_gc_config **gcc); ++ void (*est_irq_status)(struct mac_device_info *hw, ++ struct net_device *dev); ++ int (*dump_tsn_mmc)(struct mac_device_info *hw, int index, ++ unsigned long *count, const char **desc); + }; + + #define stmmac_core_init(__priv, __args...) \ +@@ -534,6 +540,8 @@ struct stmmac_ops { + stmmac_do_void_callback(__priv, mac, set_tsn_feat, __args) + #define stmmac_has_tsn_feat(__priv, __args...) \ + stmmac_do_callback(__priv, mac, has_tsn_feat, __args) ++#define stmmac_tsn_hw_setup(__priv, __args...) \ ++ stmmac_do_void_callback(__priv, mac, setup_tsn_hw, __args) + #define stmmac_set_tsn_hwtunable(__priv, __args...) \ + stmmac_do_callback(__priv, mac, set_tsn_hwtunable, __args) + #define stmmac_get_tsn_hwtunable(__priv, __args...) \ +@@ -552,6 +560,10 @@ struct stmmac_ops { + stmmac_do_callback(__priv, mac, set_est_gcrr_times, __args) + #define stmmac_get_est_gcc(__priv, __args...) \ + stmmac_do_callback(__priv, mac, get_est_gcc, __args) ++#define stmmac_est_irq_status(__priv, __args...) \ ++ stmmac_do_void_callback(__priv, mac, est_irq_status, __args) ++#define stmmac_dump_tsn_mmc(__priv, __args...) \ ++ stmmac_do_callback(__priv, mac, dump_tsn_mmc, __args) + + /* Helpers for serdes */ + struct stmmac_serdes_ops { +@@ -713,9 +725,12 @@ int stmmac_hwif_init(struct stmmac_priv *priv); + __result; \ + }) + ++struct tsn_mmc_stat; ++ + struct tsnif_ops { + u32 (*read_hwid)(void __iomem *ioaddr); + bool (*has_tsn_cap)(void __iomem *ioaddr, enum tsn_feat_id featid); ++ void (*hw_setup)(void __iomem *ioaddr, enum tsn_feat_id featid); + /* IEEE 802.1Qbv Enhanced Scheduled Traffics (EST) */ + u32 (*est_get_gcl_depth)(void __iomem *ioaddr); + u32 (*est_get_ti_width)(void __iomem *ioaddr); +@@ -738,12 +753,17 @@ struct tsnif_ops { + bool (*est_get_enable)(void __iomem *ioaddr); + u32 (*est_get_bank)(void __iomem *ioaddr, bool is_own); + void (*est_switch_swol)(void __iomem *ioaddr); ++ int (*est_irq_status)(void *ioaddr, struct net_device *dev, ++ struct tsn_mmc_stat *mmc_stat, ++ unsigned int txqcnt); + }; + + #define tsnif_read_hwid(__hw, __args...) \ + tsnif_do_callback(__hw, read_hwid, __args) + #define tsnif_has_tsn_cap(__hw, __args...) \ + tsnif_do_callback(__hw, has_tsn_cap, __args) ++#define tsnif_hw_setup(__hw, __args...) \ ++ tsnif_do_void_callback(__hw, hw_setup, __args) + #define tsnif_est_get_gcl_depth(__hw, __args...) \ + tsnif_do_callback(__hw, est_get_gcl_depth, __args) + #define tsnif_est_get_ti_width(__hw, __args...) \ +@@ -772,5 +792,7 @@ struct tsnif_ops { + tsnif_do_callback(__hw, est_get_bank, __args) + #define tsnif_est_switch_swol(__hw, __args...) \ + tsnif_do_void_callback(__hw, est_switch_swol, __args) ++#define tsnif_est_irq_status(__hw, __args...) \ ++ tsnif_do_callback(__hw, est_irq_status, __args) + + #endif /* __STMMAC_HWIF_H__ */ +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +index 1a8faf158a93..43f506543314 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +@@ -489,7 +489,14 @@ static void stmmac_get_ethtool_stats(struct net_device *dev, + data[j++] = count; + } + } +- ++ if (priv->hw->tsn_info.cap.est_support) { ++ for (i = 0; i < STMMAC_TSN_STAT_SIZE; i++) { ++ if (!stmmac_dump_tsn_mmc(priv, ++ priv->hw, i, ++ &count, NULL)) ++ data[j++] = count; ++ } ++ } + /* Update the DMA HW counters for dwmac10/100 */ + ret = stmmac_dma_diagnostic_fr(priv, &dev->stats, (void *) &priv->xstats, + priv->ioaddr); +@@ -528,7 +535,7 @@ static void stmmac_get_ethtool_stats(struct net_device *dev, + static int stmmac_get_sset_count(struct net_device *netdev, int sset) + { + struct stmmac_priv *priv = netdev_priv(netdev); +- int i, len, safety_len = 0; ++ int i, len, safety_len = 0, tsn_len = 0; + + switch (sset) { + case ETH_SS_STATS: +@@ -546,6 +553,16 @@ static int stmmac_get_sset_count(struct net_device *netdev, int sset) + + len += safety_len; + } ++ if (priv->hw->tsn_info.cap.est_support) { ++ for (i = 0; i < STMMAC_TSN_STAT_SIZE; i++) { ++ if (!stmmac_dump_tsn_mmc(priv, ++ priv->hw, i, ++ NULL, NULL)) ++ tsn_len++; ++ } ++ ++ len += tsn_len; ++ } + + return len; + case ETH_SS_TEST: +@@ -574,6 +591,18 @@ static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data) + } + } + } ++ if (priv->hw->tsn_info.cap.est_support) { ++ for (i = 0; i < STMMAC_TSN_STAT_SIZE; i++) { ++ const char *desc; ++ ++ if (!stmmac_dump_tsn_mmc(priv, ++ priv->hw, i, ++ NULL, &desc)) { ++ memcpy(p, desc, ETH_GSTRING_LEN); ++ p += ETH_GSTRING_LEN; ++ } ++ } ++ } + if (priv->dma_cap.rmon) + for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) { + memcpy(p, stmmac_mmc[i].stat_string, +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index c451c3e84d6f..310f62702ebd 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -2672,6 +2672,8 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) + /* Set HW VLAN stripping mode */ + stmmac_set_hw_vlan_mode(priv, priv->ioaddr, dev->features); + ++ stmmac_tsn_hw_setup(priv, priv->hw, priv->dev); ++ + return 0; + } + +@@ -4103,6 +4105,9 @@ static void stmmac_common_interrupt(struct stmmac_priv *priv) + if (priv->irq_wake) + pm_wakeup_hard_event(priv->device); + ++ if (priv->hw->tsn_info.feat_en[TSN_FEAT_ID_EST]) ++ stmmac_est_irq_status(priv, priv->hw, priv->dev); ++ + /* To handle GMAC own interrupts */ + if ((priv->plat->has_gmac) || xmac) { + int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats); +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c +index d1f599138963..3288f1f54179 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c +@@ -7,6 +7,7 @@ + #include + #include "stmmac_ptp.h" + #include "common.h" ++#include "stmmac.h" + + static u32 est_get_gcl_total_intervals_nsec(struct est_gc_config *gcc, + u32 bank, u32 gcl_len) +@@ -127,6 +128,18 @@ bool tsn_has_feat(struct mac_device_info *hw, struct net_device *dev, + return hw->tsn_info.feat_en[featid]; + } + ++/* tsn_hw_setup is called within stmmac_hw_setup() after ++ * stmmac_init_dma_engine() which resets MAC controller. ++ * This is so-that MAC registers are not cleared. ++ */ ++void tsn_hw_setup(struct mac_device_info *hw, struct net_device *dev) ++{ ++ void __iomem *ioaddr = hw->pcsr; ++ ++ if (tsn_has_feat(hw, dev, TSN_FEAT_ID_EST)) ++ tsnif_hw_setup(hw, ioaddr, TSN_FEAT_ID_EST); ++} ++ + int tsn_hwtunable_set(struct mac_device_info *hw, struct net_device *dev, + enum tsn_hwtunable_id id, + const u32 data) +@@ -674,3 +687,32 @@ int tsn_est_gcc_get(struct mac_device_info *hw, struct net_device *dev, + + return 0; + } ++ ++void tsn_est_irq_status(struct mac_device_info *hw, struct net_device *dev) ++{ ++ struct tsnif_info *info = &hw->tsn_info; ++ void __iomem *ioaddr = hw->pcsr; ++ unsigned int status; ++ ++ status = tsnif_est_irq_status(hw, ioaddr, dev, &info->mmc_stat, ++ info->cap.txqcnt); ++} ++ ++int tsn_mmc_dump(struct mac_device_info *hw, ++ int index, unsigned long *count, const char **desc) ++{ ++ struct tsnif_info *info = &hw->tsn_info; ++ const struct tsn_mmc_desc *mmc_desc; ++ unsigned long *ptr; ++ ++ ptr = (unsigned long *)&info->mmc_stat; ++ mmc_desc = info->mmc_desc; ++ ++ if (!(mmc_desc + index)->valid) ++ return -EINVAL; ++ if (count) ++ *count = *(ptr + index); ++ if (desc) ++ *desc = (mmc_desc + index)->desc; ++ return 0; ++} +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h +index b51c2008f5e4..b855b33526e3 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h +@@ -9,6 +9,7 @@ + #define MIN_TSN_CORE_VER 0x50 + #define EST_GCL_BANK_MAX (2) + #define EST_TIWID_TO_EXTMAX(ti_wid) ((1 << ((ti_wid) + 7)) - 1) ++#define STMMAC_TSN_STAT_SIZE (16) + + /* Hardware Tunable Enum */ + enum tsn_hwtunable_id { +@@ -82,11 +83,23 @@ struct est_gc_config { + bool enable; /* 1: enabled */ + }; + ++/* TSN MMC Statistics */ ++struct tsn_mmc_desc { ++ bool valid; ++ const char *desc; ++}; ++ ++struct tsn_mmc_stat { ++ unsigned long count[STMMAC_TSN_STAT_SIZE]; ++}; ++ + struct tsnif_info { + struct tsn_hw_cap cap; + bool feat_en[TSN_FEAT_ID_MAX]; + u32 hwtunable[TSN_HWTUNA_MAX]; + struct est_gc_config est_gcc; ++ struct tsn_mmc_stat mmc_stat; ++ const struct tsn_mmc_desc *mmc_desc; + }; + + struct mac_device_info; +@@ -97,6 +110,7 @@ int tsn_feat_set(struct mac_device_info *hw, struct net_device *dev, + enum tsn_feat_id featid, bool enable); + bool tsn_has_feat(struct mac_device_info *hw, struct net_device *dev, + enum tsn_feat_id featid); ++void tsn_hw_setup(struct mac_device_info *hw, struct net_device *dev); + int tsn_hwtunable_set(struct mac_device_info *hw, struct net_device *dev, + enum tsn_hwtunable_id id, const u32 data); + int tsn_hwtunable_get(struct mac_device_info *hw, struct net_device *dev, +@@ -118,5 +132,8 @@ int tsn_est_gcrr_times_set(struct mac_device_info *hw, + u32 dbgb, bool is_dbgm); + int tsn_est_gcc_get(struct mac_device_info *hw, struct net_device *dev, + struct est_gc_config **gcc); ++void tsn_est_irq_status(struct mac_device_info *hw, struct net_device *dev); ++int tsn_mmc_dump(struct mac_device_info *hw, ++ int index, unsigned long *count, const char **desc); + + #endif /* __STMMAC_TSN_H__ */ +-- +2.17.1 + diff --git a/patches/0039-spi-dw-pci-add-runtime-power-management-support.lpss b/patches/0039-spi-dw-pci-add-runtime-power-management-support.lpss new file mode 100644 index 0000000000..3af560aa34 --- /dev/null +++ b/patches/0039-spi-dw-pci-add-runtime-power-management-support.lpss @@ -0,0 +1,64 @@ +From 65cea5ed858bcafc1cf856ffa9aa919fb0dfd8ba Mon Sep 17 00:00:00 2001 +From: Raymond Tan +Date: Thu, 29 Aug 2019 16:29:21 +0300 +Subject: [PATCH 39/40] spi: dw-pci: add runtime power management support + +- implement pm_runtime hooks at pci driver +- turn on auto_runtime_pm flag on dw controller driver for SPI core + +Signed-off-by: Raymond Tan +Signed-off-by: Jarkko Nikula +--- + drivers/spi/spi-dw-pci.c | 9 +++++++++ + drivers/spi/spi-dw.c | 1 + + 2 files changed, 10 insertions(+) + +diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c +index 140644913e6c..eebaea34e515 100644 +--- a/drivers/spi/spi-dw-pci.c ++++ b/drivers/spi/spi-dw-pci.c +@@ -7,6 +7,7 @@ + + #include + #include ++#include + #include + #include + #include +@@ -93,6 +94,11 @@ static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + dev_info(&pdev->dev, "found PCI SPI controller(ID: %04x:%04x)\n", + pdev->vendor, pdev->device); + ++ pm_runtime_set_autosuspend_delay(&pdev->dev, 1000); ++ pm_runtime_use_autosuspend(&pdev->dev); ++ pm_runtime_put_autosuspend(&pdev->dev); ++ pm_runtime_allow(&pdev->dev); ++ + return 0; + } + +@@ -100,6 +106,9 @@ static void spi_pci_remove(struct pci_dev *pdev) + { + struct dw_spi *dws = pci_get_drvdata(pdev); + ++ pm_runtime_forbid(&pdev->dev); ++ pm_runtime_get_noresume(&pdev->dev); ++ + dw_spi_remove_host(dws); + } + +diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c +index 9a49e073e8b7..466f5c67843b 100644 +--- a/drivers/spi/spi-dw.c ++++ b/drivers/spi/spi-dw.c +@@ -493,6 +493,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) + master->dev.of_node = dev->of_node; + master->dev.fwnode = dev->fwnode; + master->flags = SPI_MASTER_GPIO_SS; ++ master->auto_runtime_pm = true; + + if (dws->set_cs) + master->set_cs = dws->set_cs; +-- +2.17.1 + diff --git a/patches/0039-trusty-add-support-for-parameterized-NOP-ops.trusty b/patches/0039-trusty-add-support-for-parameterized-NOP-ops.trusty new file mode 100644 index 0000000000..b274dc484b --- /dev/null +++ b/patches/0039-trusty-add-support-for-parameterized-NOP-ops.trusty @@ -0,0 +1,480 @@ +From 1433f77de3deb40a68d34fbd3e326fd38b6669f4 Mon Sep 17 00:00:00 2001 +From: "Zhang, Qi" +Date: Tue, 11 Jul 2017 04:42:49 +0000 +Subject: [PATCH 39/63] trusty: add support for parameterized NOP ops + +Parameterized NOPs are introduced by Trusty secure side to +facilitate better SMP concurrency. They are effectively NOP +calls with parameters that will be routed to appropriate +handlers on secure side which can be executed concurrently +on multiple CPUs. Parameterized NOPs are represented by +trusty_nop structure that has to be initialized by calling +trusty_nop_init call. This patch creates queue for such +items, +adds per CPU work queue to invoke them and adds API to +enqueue +and dequeue them. + +Change-Id: I7cf32bfdf07727e7d9b0d955ddfb3bf1b52e3a46 +Signed-off-by: Zhong,Fangjian +Author: Michael Ryleev +--- + drivers/trusty/trusty-irq.c | 96 +------------------ + drivers/trusty/trusty.c | 169 ++++++++++++++++++++++++++++++++++ + include/linux/trusty/smcall.h | 3 +- + include/linux/trusty/trusty.h | 17 ++++ + 4 files changed, 189 insertions(+), 96 deletions(-) + +diff --git a/drivers/trusty/trusty-irq.c b/drivers/trusty/trusty-irq.c +index e60068b50e04..5b4686f4f85f 100644 +--- a/drivers/trusty/trusty-irq.c ++++ b/drivers/trusty/trusty-irq.c +@@ -41,11 +41,6 @@ struct trusty_irq { + struct trusty_irq __percpu *percpu_ptr; + }; + +-struct trusty_irq_work { +- struct trusty_irq_state *is; +- struct work_struct work; +-}; +- + struct trusty_irq_irqset { + struct hlist_head pending; + struct hlist_head inactive; +@@ -54,14 +49,12 @@ struct trusty_irq_irqset { + struct trusty_irq_state { + struct device *dev; + struct device *trusty_dev; +- struct trusty_irq_work __percpu *irq_work; + struct trusty_irq_irqset normal_irqs; + spinlock_t normal_irqs_lock; + struct trusty_irq_irqset __percpu *percpu_irqs; + struct notifier_block trusty_call_notifier; + /* CPU hotplug instances for online */ + struct hlist_node node; +- struct workqueue_struct *wq; + }; + + static enum cpuhp_state trusty_irq_online; +@@ -183,46 +176,10 @@ static int trusty_irq_call_notify(struct notifier_block *nb, + return NOTIFY_OK; + } + +- +-static void trusty_irq_work_func_locked_nop(struct work_struct *work) +-{ +- int ret; +- struct trusty_irq_state *is = +- container_of(work, struct trusty_irq_work, work)->is; +- +- dev_dbg(is->dev, "%s\n", __func__); +- +- ret = trusty_std_call32(is->trusty_dev, SMC_SC_LOCKED_NOP, 0, 0, 0); +- if (ret != 0) +- dev_err(is->dev, "%s: SMC_SC_LOCKED_NOP failed %d", +- __func__, ret); +- +- dev_dbg(is->dev, "%s: done\n", __func__); +-} +- +-static void trusty_irq_work_func(struct work_struct *work) +-{ +- int ret; +- struct trusty_irq_state *is = +- container_of(work, struct trusty_irq_work, work)->is; +- +- dev_dbg(is->dev, "%s\n", __func__); +- +- do { +- ret = trusty_std_call32(is->trusty_dev, SMC_SC_NOP, 0, 0, 0); +- } while (ret == SM_ERR_NOP_INTERRUPTED); +- +- if (ret != SM_ERR_NOP_DONE) +- dev_err(is->dev, "%s: SMC_SC_NOP failed %d", __func__, ret); +- +- dev_dbg(is->dev, "%s: done\n", __func__); +-} +- + irqreturn_t trusty_irq_handler(int irq, void *data) + { + struct trusty_irq *trusty_irq = data; + struct trusty_irq_state *is = trusty_irq->is; +- struct trusty_irq_work *trusty_irq_work = this_cpu_ptr(is->irq_work); + struct trusty_irq_irqset *irqset; + + dev_dbg(is->dev, "%s: irq %d, percpu %d, cpu %d, enable %d\n", +@@ -248,7 +205,7 @@ irqreturn_t trusty_irq_handler(int irq, void *data) + } + spin_unlock(&is->normal_irqs_lock); + +- queue_work_on(raw_smp_processor_id(), is->wq, &trusty_irq_work->work); ++ trusty_enqueue_nop(is->trusty_dev, NULL); + + dev_dbg(is->dev, "%s: irq %d done\n", __func__, irq); + +@@ -582,10 +539,8 @@ static int trusty_irq_probe(struct platform_device *pdev) + { + int ret; + int irq; +- unsigned int cpu; + unsigned long irq_flags; + struct trusty_irq_state *is; +- work_func_t work_func; + + ret = trusty_check_cpuid(NULL); + if (ret < 0) { +@@ -601,19 +556,8 @@ static int trusty_irq_probe(struct platform_device *pdev) + goto err_alloc_is; + } + +- is->wq = alloc_workqueue("trusty-irq-wq", WQ_CPU_INTENSIVE, 0); +- if (!is->wq) { +- ret = -ENOMEM; +- goto err_alloc_wq; +- } +- + is->dev = &pdev->dev; + is->trusty_dev = is->dev->parent; +- is->irq_work = alloc_percpu(struct trusty_irq_work); +- if (!is->irq_work) { +- ret = -ENOMEM; +- goto err_alloc_irq_work; +- } + spin_lock_init(&is->normal_irqs_lock); + is->percpu_irqs = alloc_percpu(struct trusty_irq_irqset); + if (!is->percpu_irqs) { +@@ -632,21 +576,6 @@ static int trusty_irq_probe(struct platform_device *pdev) + goto err_trusty_call_notifier_register; + } + +- if (trusty_get_api_version(is->trusty_dev) < TRUSTY_API_VERSION_SMP) +- work_func = trusty_irq_work_func_locked_nop; +- else +- work_func = trusty_irq_work_func; +- +- for_each_possible_cpu(cpu) { +- struct trusty_irq_work *trusty_irq_work; +- +- if (cpu >= 32) +- return -EINVAL; +- trusty_irq_work = per_cpu_ptr(is->irq_work, cpu); +- trusty_irq_work->is = is; +- INIT_WORK(&trusty_irq_work->work, work_func); +- } +- + for (irq = 0; irq >= 0;) + irq = trusty_irq_init_one(is, irq, false); + +@@ -670,18 +599,6 @@ static int trusty_irq_probe(struct platform_device *pdev) + err_trusty_call_notifier_register: + free_percpu(is->percpu_irqs); + err_alloc_pending_percpu_irqs: +- for_each_possible_cpu(cpu) { +- struct trusty_irq_work *trusty_irq_work; +- +- if (cpu >= 32) +- return -EINVAL; +- trusty_irq_work = per_cpu_ptr(is->irq_work, cpu); +- flush_work(&trusty_irq_work->work); +- } +- free_percpu(is->irq_work); +-err_alloc_irq_work: +- destroy_workqueue(is->wq); +-err_alloc_wq: + kfree(is); + err_alloc_is: + return ret; +@@ -689,7 +606,6 @@ static int trusty_irq_probe(struct platform_device *pdev) + + static int trusty_irq_remove(struct platform_device *pdev) + { +- unsigned int cpu; + unsigned long irq_flags; + struct trusty_irq_state *is = platform_get_drvdata(pdev); + +@@ -705,16 +621,6 @@ static int trusty_irq_remove(struct platform_device *pdev) + trusty_call_notifier_unregister(is->trusty_dev, + &is->trusty_call_notifier); + free_percpu(is->percpu_irqs); +- for_each_possible_cpu(cpu) { +- struct trusty_irq_work *trusty_irq_work; +- +- if (cpu >= 32) +- return -EINVAL; +- trusty_irq_work = per_cpu_ptr(is->irq_work, cpu); +- flush_work(&trusty_irq_work->work); +- } +- free_percpu(is->irq_work); +- destroy_workqueue(is->wq); + kfree(is); + + return 0; +diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c +index 7e55453ae5f5..4aa4a89799dc 100644 +--- a/drivers/trusty/trusty.c ++++ b/drivers/trusty/trusty.c +@@ -27,12 +27,24 @@ + + #define TRUSTY_VMCALL_SMC 0x74727500 + ++struct trusty_state; ++ ++struct trusty_work { ++ struct trusty_state *ts; ++ struct work_struct work; ++}; ++ + struct trusty_state { + struct mutex smc_lock; + struct atomic_notifier_head notifier; + struct completion cpu_idle_completion; + char *version_str; + u32 api_version; ++ struct device *dev; ++ struct workqueue_struct *nop_wq; ++ struct trusty_work __percpu *nop_works; ++ struct list_head nop_queue; ++ spinlock_t nop_lock; /* protects nop_queue */ + }; + + struct trusty_smc_interface { +@@ -363,9 +375,116 @@ static int trusty_init_api_version(struct trusty_state *s, struct device *dev) + return 0; + } + ++static bool dequeue_nop(struct trusty_state *s, u32 *args) ++{ ++ unsigned long flags; ++ struct trusty_nop *nop = NULL; ++ ++ spin_lock_irqsave(&s->nop_lock, flags); ++ if (!list_empty(&s->nop_queue)) { ++ nop = list_first_entry(&s->nop_queue, ++ struct trusty_nop, node); ++ list_del_init(&nop->node); ++ args[0] = nop->args[0]; ++ args[1] = nop->args[1]; ++ args[2] = nop->args[2]; ++ } else { ++ args[0] = 0; ++ args[1] = 0; ++ args[2] = 0; ++ } ++ spin_unlock_irqrestore(&s->nop_lock, flags); ++ return nop; ++} ++ ++static void locked_nop_work_func(struct work_struct *work) ++{ ++ int ret; ++ struct trusty_work *tw = container_of(work, struct trusty_work, work); ++ struct trusty_state *s = tw->ts; ++ ++ dev_dbg(s->dev, "%s\n", __func__); ++ ++ ret = trusty_std_call32(s->dev, SMC_SC_LOCKED_NOP, 0, 0, 0); ++ if (ret != 0) ++ dev_err(s->dev, "%s: SMC_SC_LOCKED_NOP failed %d", ++ __func__, ret); ++ dev_dbg(s->dev, "%s: done\n", __func__); ++} ++ ++static void nop_work_func(struct work_struct *work) ++{ ++ int ret; ++ bool next; ++ u32 args[3]; ++ struct trusty_work *tw = container_of(work, struct trusty_work, work); ++ struct trusty_state *s = tw->ts; ++ ++ dev_dbg(s->dev, "%s:\n", __func__); ++ ++ dequeue_nop(s, args); ++ do { ++ dev_dbg(s->dev, "%s: %x %x %x\n", ++ __func__, args[0], args[1], args[2]); ++ ++ ret = trusty_std_call32(s->dev, SMC_SC_NOP, ++ args[0], args[1], args[2]); ++ ++ next = dequeue_nop(s, args); ++ ++ if (ret == SM_ERR_NOP_INTERRUPTED) ++ next = true; ++ else if (ret != SM_ERR_NOP_DONE) ++ dev_err(s->dev, "%s: SMC_SC_NOP failed %d", ++ __func__, ret); ++ } while (next); ++ ++ dev_dbg(s->dev, "%s: done\n", __func__); ++} ++ ++void trusty_enqueue_nop(struct device *dev, struct trusty_nop *nop) ++{ ++ unsigned long flags; ++ struct trusty_work *tw; ++ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); ++ ++ preempt_disable(); ++ tw = this_cpu_ptr(s->nop_works); ++ if (nop) { ++ WARN_ON(s->api_version < TRUSTY_API_VERSION_SMP_NOP); ++ ++ spin_lock_irqsave(&s->nop_lock, flags); ++ if (list_empty(&nop->node)) ++ list_add_tail(&nop->node, &s->nop_queue); ++ spin_unlock_irqrestore(&s->nop_lock, flags); ++ } ++ queue_work(s->nop_wq, &tw->work); ++ preempt_enable(); ++} ++EXPORT_SYMBOL(trusty_enqueue_nop); ++ ++void trusty_dequeue_nop(struct device *dev, struct trusty_nop *nop) ++{ ++ unsigned long flags; ++ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); ++ ++ if (WARN_ON(!nop)) ++ return; ++ ++ spin_lock_irqsave(&s->nop_lock, flags); ++ if (!list_empty(&nop->node)) ++ list_del_init(&nop->node); ++ spin_unlock_irqrestore(&s->nop_lock, flags); ++} ++EXPORT_SYMBOL(trusty_dequeue_nop); ++ ++ ++ + static int trusty_probe(struct platform_device *pdev) + { + int ret; ++ unsigned int cpu; ++ work_func_t work_func; + struct trusty_state *s; + struct device_node *node = pdev->dev.of_node; + +@@ -385,6 +504,11 @@ static int trusty_probe(struct platform_device *pdev) + ret = -ENOMEM; + goto err_allocate_state; + } ++ ++ s->dev = &pdev->dev; ++ spin_lock_init(&s->nop_lock); ++ INIT_LIST_HEAD(&s->nop_queue); ++ + mutex_init(&s->smc_lock); + ATOMIC_INIT_NOTIFIER_HEAD(&s->notifier); + init_completion(&s->cpu_idle_completion); +@@ -396,8 +520,43 @@ static int trusty_probe(struct platform_device *pdev) + if (ret < 0) + goto err_api_version; + ++ s->nop_wq = alloc_workqueue("trusty-nop-wq", WQ_CPU_INTENSIVE, 0); ++ if (!s->nop_wq) { ++ ret = -ENODEV; ++ dev_err(&pdev->dev, "Failed create trusty-nop-wq\n"); ++ goto err_create_nop_wq; ++ } ++ ++ s->nop_works = alloc_percpu(struct trusty_work); ++ if (!s->nop_works) { ++ ret = -ENOMEM; ++ dev_err(&pdev->dev, "Failed to allocate works\n"); ++ goto err_alloc_works; ++ } ++ ++ if (s->api_version < TRUSTY_API_VERSION_SMP) ++ work_func = locked_nop_work_func; ++ else ++ work_func = nop_work_func; ++ ++ for_each_possible_cpu(cpu) { ++ struct trusty_work *tw = per_cpu_ptr(s->nop_works, cpu); ++ ++ tw->ts = s; ++ INIT_WORK(&tw->work, work_func); ++ } ++ + return 0; + ++err_alloc_works: ++ for_each_possible_cpu(cpu) { ++ struct trusty_work *tw = per_cpu_ptr(s->nop_works, cpu); ++ ++ flush_work(&tw->work); ++ } ++ free_percpu(s->nop_works); ++ destroy_workqueue(s->nop_wq); ++err_create_nop_wq: + err_api_version: + if (s->version_str) { + device_remove_file(&pdev->dev, &dev_attr_trusty_version); +@@ -412,11 +571,21 @@ static int trusty_probe(struct platform_device *pdev) + + static int trusty_remove(struct platform_device *pdev) + { ++ unsigned int cpu; + struct trusty_state *s = platform_get_drvdata(pdev); + + dev_dbg(&(pdev->dev), "%s() is called\n", __func__); + + device_for_each_child(&pdev->dev, NULL, trusty_remove_child); ++ ++ for_each_possible_cpu(cpu) { ++ struct trusty_work *tw = per_cpu_ptr(s->nop_works, cpu); ++ ++ flush_work(&tw->work); ++ } ++ free_percpu(s->nop_works); ++ destroy_workqueue(s->nop_wq); ++ + mutex_destroy(&s->smc_lock); + if (s->version_str) { + device_remove_file(&pdev->dev, &dev_attr_trusty_version); +diff --git a/include/linux/trusty/smcall.h b/include/linux/trusty/smcall.h +index 1160890a3d90..fc98b3e5b2e7 100644 +--- a/include/linux/trusty/smcall.h ++++ b/include/linux/trusty/smcall.h +@@ -120,7 +120,8 @@ + */ + #define TRUSTY_API_VERSION_RESTART_FIQ (1) + #define TRUSTY_API_VERSION_SMP (2) +-#define TRUSTY_API_VERSION_CURRENT (2) ++#define TRUSTY_API_VERSION_SMP_NOP (3) ++#define TRUSTY_API_VERSION_CURRENT (3) + #define SMC_FC_API_VERSION SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 11) + + /* TRUSTED_OS entity calls */ +diff --git a/include/linux/trusty/trusty.h b/include/linux/trusty/trusty.h +index aba204b9ff3a..eaa833bdea73 100644 +--- a/include/linux/trusty/trusty.h ++++ b/include/linux/trusty/trusty.h +@@ -69,6 +69,23 @@ int trusty_call32_mem_buf(struct device *dev, u32 smcnr, + struct page *page, u32 size, + pgprot_t pgprot); + ++struct trusty_nop { ++ struct list_head node; ++ u32 args[3]; ++}; ++ ++static inline void trusty_nop_init(struct trusty_nop *nop, ++ u32 arg0, u32 arg1, u32 arg2) { ++ INIT_LIST_HEAD(&nop->node); ++ nop->args[0] = arg0; ++ nop->args[1] = arg1; ++ nop->args[2] = arg2; ++} ++ ++void trusty_enqueue_nop(struct device *dev, struct trusty_nop *nop); ++void trusty_dequeue_nop(struct device *dev, struct trusty_nop *nop); ++ ++ + /* CPUID leaf 0x3 is used because eVMM will trap this leaf.*/ + #define EVMM_SIGNATURE_CORP 0x43544E49 /* "INTC", edx */ + #define EVMM_SIGNATURE_VMM 0x4D4D5645 /* "EVMM", ecx */ +-- +2.17.1 + diff --git a/patches/0039-vhm-cleanup-update-one-field-name-in-vhm.acrn b/patches/0039-vhm-cleanup-update-one-field-name-in-vhm.acrn new file mode 100644 index 0000000000..96b356044a --- /dev/null +++ b/patches/0039-vhm-cleanup-update-one-field-name-in-vhm.acrn @@ -0,0 +1,31 @@ +From f19729dbc95e0c0d1584a2eb47410173c5f1ed6d Mon Sep 17 00:00:00 2001 +From: Yin Fengwei +Date: Fri, 31 Aug 2018 10:58:59 +0800 +Subject: [PATCH 039/150] vhm cleanup: update one field name in vhm + +Change-Id: Ib125147ff72b566b183d20496251fa74244d7970 +Tracked-On: 212688 +Signed-off-by: Yin Fengwei +Reviewed-on: +Reviewed-by: Dong, Eddie +Tested-by: Dong, Eddie +--- + include/linux/vhm/vhm_ioctl_defs.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/include/linux/vhm/vhm_ioctl_defs.h b/include/linux/vhm/vhm_ioctl_defs.h +index 494213a9f9f0..9f2f21acbbe3 100644 +--- a/include/linux/vhm/vhm_ioctl_defs.h ++++ b/include/linux/vhm/vhm_ioctl_defs.h +@@ -123,7 +123,7 @@ struct ic_ptdev_irq { + struct { + uint32_t virt_pin; /* IN: virtual IOAPIC pin */ + uint32_t phys_pin; /* IN: physical IOAPIC pin */ +- uint32_t pic_pin; /* IN: pin from PIC? */ ++ uint32_t is_pic_pin; /* IN: pin from PIC? */ + } intx; + struct { + /* IN: vector count of MSI/MSIX, +-- +2.17.1 + diff --git a/patches/0040-ASoC-Intel-Remove-obsolete-firmware-fields.audio b/patches/0040-ASoC-Intel-Remove-obsolete-firmware-fields.audio new file mode 100644 index 0000000000..160fda0da0 --- /dev/null +++ b/patches/0040-ASoC-Intel-Remove-obsolete-firmware-fields.audio @@ -0,0 +1,300 @@ +From 945a1806cd6a4a599662e50215141880293589ea Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Sun, 11 Aug 2019 20:36:54 +0200 +Subject: [PATCH 040/193] ASoC: Intel: Remove obsolete firmware fields + +FW filename fields are now deprecated in favour of ones coming from +platform descriptors. This aligns with paradigm of FW being platform +specific, not board specific. +Any remaining deprecated survivors of the precedding tidal wave are +removed here too. + +Signed-off-by: Cezary Rojewski +--- + sound/soc/intel/common/soc-acpi-intel-bxt-match.c | 2 -- + sound/soc/intel/common/soc-acpi-intel-byt-match.c | 2 -- + sound/soc/intel/common/soc-acpi-intel-cnl-match.c | 1 - + sound/soc/intel/common/soc-acpi-intel-glk-match.c | 3 --- + sound/soc/intel/common/soc-acpi-intel-hda-match.c | 2 -- + .../soc/intel/common/soc-acpi-intel-hsw-bdw-match.c | 4 ---- + sound/soc/intel/common/soc-acpi-intel-icl-match.c | 1 - + sound/soc/intel/common/soc-acpi-intel-kbl-match.c | 12 ------------ + sound/soc/intel/common/soc-acpi-intel-skl-match.c | 3 --- + sound/soc/intel/common/sst-dsp-priv.h | 1 - + sound/soc/intel/skylake/skl.h | 1 - + 11 files changed, 32 deletions(-) + +diff --git a/sound/soc/intel/common/soc-acpi-intel-bxt-match.c b/sound/soc/intel/common/soc-acpi-intel-bxt-match.c +index 4a5adae1d785..eda799e49113 100644 +--- a/sound/soc/intel/common/soc-acpi-intel-bxt-match.c ++++ b/sound/soc/intel/common/soc-acpi-intel-bxt-match.c +@@ -50,14 +50,12 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_bxt_machines[] = { + { + .id = "INT343A", + .drv_name = "bxt_alc298s_i2s", +- .fw_filename = "intel/dsp_fw_bxtn.bin", + .sof_fw_filename = "sof-apl.ri", + .sof_tplg_filename = "sof-apl-rt298.tplg", + }, + { + .id = "DLGS7219", + .drv_name = "bxt_da7219_max98357a", +- .fw_filename = "intel/dsp_fw_bxtn.bin", + .machine_quirk = snd_soc_acpi_codec_list, + .quirk_data = &bxt_codecs, + .sof_fw_filename = "sof-apl.ri", +diff --git a/sound/soc/intel/common/soc-acpi-intel-byt-match.c b/sound/soc/intel/common/soc-acpi-intel-byt-match.c +index 1cc801ba92eb..4d2ba663ee33 100644 +--- a/sound/soc/intel/common/soc-acpi-intel-byt-match.c ++++ b/sound/soc/intel/common/soc-acpi-intel-byt-match.c +@@ -124,12 +124,10 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_baytrail_legacy_machines[] = { + { + .id = "10EC5640", + .drv_name = "byt-rt5640", +- .fw_filename = "intel/fw_sst_0f28.bin-48kHz_i2s_master", + }, + { + .id = "193C9890", + .drv_name = "byt-max98090", +- .fw_filename = "intel/fw_sst_0f28.bin-48kHz_i2s_master", + }, + {} + }; +diff --git a/sound/soc/intel/common/soc-acpi-intel-cnl-match.c b/sound/soc/intel/common/soc-acpi-intel-cnl-match.c +index 985aa366c9e8..1d2d87400195 100644 +--- a/sound/soc/intel/common/soc-acpi-intel-cnl-match.c ++++ b/sound/soc/intel/common/soc-acpi-intel-cnl-match.c +@@ -28,7 +28,6 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cnl_machines[] = { + { + .id = "INT34C2", + .drv_name = "cnl_rt274", +- .fw_filename = "intel/dsp_fw_cnl.bin", + .pdata = &cnl_pdata, + .sof_fw_filename = "sof-cnl.ri", + .sof_tplg_filename = "sof-cnl-rt274.tplg", +diff --git a/sound/soc/intel/common/soc-acpi-intel-glk-match.c b/sound/soc/intel/common/soc-acpi-intel-glk-match.c +index 60dea358fa04..370487d13c85 100644 +--- a/sound/soc/intel/common/soc-acpi-intel-glk-match.c ++++ b/sound/soc/intel/common/soc-acpi-intel-glk-match.c +@@ -18,14 +18,12 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_glk_machines[] = { + { + .id = "INT343A", + .drv_name = "glk_alc298s_i2s", +- .fw_filename = "intel/dsp_fw_glk.bin", + .sof_fw_filename = "sof-glk.ri", + .sof_tplg_filename = "sof-glk-alc298.tplg", + }, + { + .id = "DLGS7219", + .drv_name = "glk_da7219_max98357a", +- .fw_filename = "intel/dsp_fw_glk.bin", + .machine_quirk = snd_soc_acpi_codec_list, + .quirk_data = &glk_codecs, + .sof_fw_filename = "sof-glk.ri", +@@ -34,7 +32,6 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_glk_machines[] = { + { + .id = "10EC5682", + .drv_name = "glk_rt5682_max98357a", +- .fw_filename = "intel/dsp_fw_glk.bin", + .machine_quirk = snd_soc_acpi_codec_list, + .quirk_data = &glk_codecs, + .sof_fw_filename = "sof-glk.ri", +diff --git a/sound/soc/intel/common/soc-acpi-intel-hda-match.c b/sound/soc/intel/common/soc-acpi-intel-hda-match.c +index cc972d2ac691..39827d2e8634 100644 +--- a/sound/soc/intel/common/soc-acpi-intel-hda-match.c ++++ b/sound/soc/intel/common/soc-acpi-intel-hda-match.c +@@ -19,8 +19,6 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_hda_machines[] = { + /* .id is not used in this file */ + .drv_name = "skl_hda_dsp_generic", + +- /* .fw_filename is dynamically set in skylake driver */ +- + /* .sof_fw_filename is dynamically set in sof/intel driver */ + + .sof_tplg_filename = "sof-hda-generic.tplg", +diff --git a/sound/soc/intel/common/soc-acpi-intel-hsw-bdw-match.c b/sound/soc/intel/common/soc-acpi-intel-hsw-bdw-match.c +index 34eb0baaa951..6b113c32aff0 100644 +--- a/sound/soc/intel/common/soc-acpi-intel-hsw-bdw-match.c ++++ b/sound/soc/intel/common/soc-acpi-intel-hsw-bdw-match.c +@@ -13,7 +13,6 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_haswell_machines[] = { + { + .id = "INT33CA", + .drv_name = "haswell-audio", +- .fw_filename = "intel/IntcSST1.bin", + .sof_fw_filename = "sof-hsw.ri", + .sof_tplg_filename = "sof-hsw.tplg", + }, +@@ -25,21 +24,18 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_broadwell_machines[] = { + { + .id = "INT343A", + .drv_name = "broadwell-audio", +- .fw_filename = "intel/IntcSST2.bin", + .sof_fw_filename = "sof-bdw.ri", + .sof_tplg_filename = "sof-bdw-rt286.tplg", + }, + { + .id = "RT5677CE", + .drv_name = "bdw-rt5677", +- .fw_filename = "intel/IntcSST2.bin", + .sof_fw_filename = "sof-bdw.ri", + .sof_tplg_filename = "sof-bdw-rt5677.tplg", + }, + { + .id = "INT33CA", + .drv_name = "haswell-audio", +- .fw_filename = "intel/IntcSST2.bin", + .sof_fw_filename = "sof-bdw.ri", + .sof_tplg_filename = "sof-bdw-rt5640.tplg", + }, +diff --git a/sound/soc/intel/common/soc-acpi-intel-icl-match.c b/sound/soc/intel/common/soc-acpi-intel-icl-match.c +index 38977669b576..04a17da96f6a 100644 +--- a/sound/soc/intel/common/soc-acpi-intel-icl-match.c ++++ b/sound/soc/intel/common/soc-acpi-intel-icl-match.c +@@ -18,7 +18,6 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_icl_machines[] = { + { + .id = "INT34C2", + .drv_name = "icl_rt274", +- .fw_filename = "intel/dsp_fw_icl.bin", + .pdata = &icl_pdata, + .sof_fw_filename = "sof-icl.ri", + .sof_tplg_filename = "sof-icl-rt274.tplg", +diff --git a/sound/soc/intel/common/soc-acpi-intel-kbl-match.c b/sound/soc/intel/common/soc-acpi-intel-kbl-match.c +index e200baa11011..fe82d5472aa3 100644 +--- a/sound/soc/intel/common/soc-acpi-intel-kbl-match.c ++++ b/sound/soc/intel/common/soc-acpi-intel-kbl-match.c +@@ -46,12 +46,10 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_kbl_machines[] = { + { + .id = "INT343A", + .drv_name = "kbl_alc286s_i2s", +- .fw_filename = "intel/dsp_fw_kbl.bin", + }, + { + .id = "INT343B", + .drv_name = "kbl_n88l25_s4567", +- .fw_filename = "intel/dsp_fw_kbl.bin", + .machine_quirk = snd_soc_acpi_codec_list, + .quirk_data = &kbl_codecs, + .pdata = &skl_dmic_data, +@@ -59,7 +57,6 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_kbl_machines[] = { + { + .id = "MX98357A", + .drv_name = "kbl_n88l25_m98357a", +- .fw_filename = "intel/dsp_fw_kbl.bin", + .machine_quirk = snd_soc_acpi_codec_list, + .quirk_data = &kbl_codecs, + .pdata = &skl_dmic_data, +@@ -67,7 +64,6 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_kbl_machines[] = { + { + .id = "MX98927", + .drv_name = "kbl_r5514_5663_max", +- .fw_filename = "intel/dsp_fw_kbl.bin", + .machine_quirk = snd_soc_acpi_codec_list, + .quirk_data = &kbl_5663_5514_codecs, + .pdata = &skl_dmic_data, +@@ -75,7 +71,6 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_kbl_machines[] = { + { + .id = "MX98927", + .drv_name = "kbl_rt5663_m98927", +- .fw_filename = "intel/dsp_fw_kbl.bin", + .machine_quirk = snd_soc_acpi_codec_list, + .quirk_data = &kbl_poppy_codecs, + .pdata = &skl_dmic_data, +@@ -83,12 +78,10 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_kbl_machines[] = { + { + .id = "10EC5663", + .drv_name = "kbl_rt5663", +- .fw_filename = "intel/dsp_fw_kbl.bin", + }, + { + .id = "DLGS7219", + .drv_name = "kbl_da7219_max98357a", +- .fw_filename = "intel/dsp_fw_kbl.bin", + .machine_quirk = snd_soc_acpi_codec_list, + .quirk_data = &kbl_7219_98357_codecs, + .pdata = &skl_dmic_data, +@@ -96,7 +89,6 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_kbl_machines[] = { + { + .id = "DLGS7219", + .drv_name = "kbl_da7219_max98927", +- .fw_filename = "intel/dsp_fw_kbl.bin", + .machine_quirk = snd_soc_acpi_codec_list, + .quirk_data = &kbl_7219_98927_codecs, + .pdata = &skl_dmic_data +@@ -104,17 +96,14 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_kbl_machines[] = { + { + .id = "10EC5660", + .drv_name = "kbl_rt5660", +- .fw_filename = "intel/dsp_fw_kbl.bin", + }, + { + .id = "10EC3277", + .drv_name = "kbl_rt5660", +- .fw_filename = "intel/dsp_fw_kbl.bin", + }, + { + .id = "DLGS7219", + .drv_name = "kbl_da7219_max98373", +- .fw_filename = "intel/dsp_fw_kbl.bin", + .machine_quirk = snd_soc_acpi_codec_list, + .quirk_data = &kbl_7219_98373_codecs, + .pdata = &skl_dmic_data +@@ -122,7 +111,6 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_kbl_machines[] = { + { + .id = "MX98373", + .drv_name = "kbl_max98373", +- .fw_filename = "intel/dsp_fw_kbl.bin", + .pdata = &skl_dmic_data + }, + {}, +diff --git a/sound/soc/intel/common/soc-acpi-intel-skl-match.c b/sound/soc/intel/common/soc-acpi-intel-skl-match.c +index 42fa40a8d932..ec969044706c 100644 +--- a/sound/soc/intel/common/soc-acpi-intel-skl-match.c ++++ b/sound/soc/intel/common/soc-acpi-intel-skl-match.c +@@ -21,12 +21,10 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_skl_machines[] = { + { + .id = "INT343A", + .drv_name = "skl_alc286s_i2s", +- .fw_filename = "intel/dsp_fw_release.bin", + }, + { + .id = "INT343B", + .drv_name = "skl_n88l25_s4567", +- .fw_filename = "intel/dsp_fw_release.bin", + .machine_quirk = snd_soc_acpi_codec_list, + .quirk_data = &skl_codecs, + .pdata = &skl_dmic_data, +@@ -34,7 +32,6 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_skl_machines[] = { + { + .id = "MX98357A", + .drv_name = "skl_n88l25_m98357a", +- .fw_filename = "intel/dsp_fw_release.bin", + .machine_quirk = snd_soc_acpi_codec_list, + .quirk_data = &skl_codecs, + .pdata = &skl_dmic_data, +diff --git a/sound/soc/intel/common/sst-dsp-priv.h b/sound/soc/intel/common/sst-dsp-priv.h +index a4628a89d47d..1a208ef65fa8 100644 +--- a/sound/soc/intel/common/sst-dsp-priv.h ++++ b/sound/soc/intel/common/sst-dsp-priv.h +@@ -306,7 +306,6 @@ struct sst_dsp { + int sst_state; + struct skl_cl_dev cl_dev; + u32 intr_status; +- const struct firmware *fw; + struct snd_dma_buffer dmab; + }; + +diff --git a/sound/soc/intel/skylake/skl.h b/sound/soc/intel/skylake/skl.h +index 19e0dbb8f9d1..a7401d178183 100644 +--- a/sound/soc/intel/skylake/skl.h ++++ b/sound/soc/intel/skylake/skl.h +@@ -76,7 +76,6 @@ struct skl_dev { + struct list_head ppl_list; + struct list_head bind_list; + +- const char *fw_name; + char tplg_name[64]; + unsigned short pci_id; + +-- +2.17.1 + diff --git a/patches/0040-drm-i915-intel_hdmi-use-cec_notifier_conn_-un-register.drm b/patches/0040-drm-i915-intel_hdmi-use-cec_notifier_conn_-un-register.drm new file mode 100644 index 0000000000..99c37aef4b --- /dev/null +++ b/patches/0040-drm-i915-intel_hdmi-use-cec_notifier_conn_-un-register.drm @@ -0,0 +1,64 @@ +From 0764b6554af8e26a9221d25f124535af519364ff Mon Sep 17 00:00:00 2001 +From: Dariusz Marcinkiewicz +Date: Wed, 14 Aug 2019 12:45:00 +0200 +Subject: [PATCH 040/690] drm/i915/intel_hdmi: use + cec_notifier_conn_(un)register +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Use the new cec_notifier_conn_(un)register() functions to +(un)register the notifier for the HDMI connector, and fill in +the cec_connector_info. + +Signed-off-by: Dariusz Marcinkiewicz +Signed-off-by: Hans Verkuil +Tested-by: Hans Verkuil +Reviewed-by: Ville Syrjälä +Signed-off-by: Hans Verkuil +Link: https://patchwork.freedesktop.org/patch/msgid/20190814104520.6001-3-darekm@google.com +--- + drivers/gpu/drm/i915/display/intel_hdmi.c | 13 +++++++++---- + 1 file changed, 9 insertions(+), 4 deletions(-) + +diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c +index e02f0faecf02..0f5a0c618e46 100644 +--- a/drivers/gpu/drm/i915/display/intel_hdmi.c ++++ b/drivers/gpu/drm/i915/display/intel_hdmi.c +@@ -2751,8 +2751,9 @@ intel_hdmi_connector_register(struct drm_connector *connector) + + static void intel_hdmi_destroy(struct drm_connector *connector) + { +- if (intel_attached_hdmi(connector)->cec_notifier) +- cec_notifier_put(intel_attached_hdmi(connector)->cec_notifier); ++ struct cec_notifier *n = intel_attached_hdmi(connector)->cec_notifier; ++ ++ cec_notifier_conn_unregister(n); + + intel_connector_destroy(connector); + } +@@ -3067,6 +3068,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, + struct drm_device *dev = intel_encoder->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + enum port port = intel_encoder->port; ++ struct cec_connector_info conn_info; + + DRM_DEBUG_KMS("Adding HDMI connector on port %c\n", + port_name(port)); +@@ -3119,8 +3121,11 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, + I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); + } + +- intel_hdmi->cec_notifier = cec_notifier_get_conn(dev->dev, +- port_identifier(port)); ++ cec_fill_conn_info_from_drm(&conn_info, connector); ++ ++ intel_hdmi->cec_notifier = ++ cec_notifier_conn_register(dev->dev, port_identifier(port), ++ &conn_info); + if (!intel_hdmi->cec_notifier) + DRM_DEBUG_KMS("CEC notifier get failed\n"); + } +-- +2.17.1 + diff --git a/patches/0040-mei-add-extended-header.security b/patches/0040-mei-add-extended-header.security new file mode 100644 index 0000000000..6a6a70dbf9 --- /dev/null +++ b/patches/0040-mei-add-extended-header.security @@ -0,0 +1,368 @@ +From 793d9dfeec2d0a0aa52f87e70924b95c962845de Mon Sep 17 00:00:00 2001 +From: Tomas Winkler +Date: Mon, 18 Jun 2018 18:11:40 +0300 +Subject: [PATCH 40/65] mei: add extended header + +Extend header, beyond existing 4 bytes mei message header, +currently containing 8bit vtag (virtual tag). + +Change-Id: I125b0bade2d600f4f96dd4dcbf8fb3e81fbc7e95 +Signed-off-by: Tomas Winkler +Signed-off-by: Alexander Usyskin +--- + drivers/misc/mei/client.c | 76 ++++++++++++++++++++++-------------- + drivers/misc/mei/hbm.c | 14 +++---- + drivers/misc/mei/hw.h | 17 +++++++- + drivers/misc/mei/interrupt.c | 20 +++++++++- + drivers/misc/mei/mei_dev.h | 7 +++- + 5 files changed, 89 insertions(+), 45 deletions(-) + +diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c +index 1e3edbbacb1e..c133df526fd8 100644 +--- a/drivers/misc/mei/client.c ++++ b/drivers/misc/mei/client.c +@@ -376,6 +376,7 @@ static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, + cb->cl = cl; + cb->buf_idx = 0; + cb->fop_type = type; ++ cb->vtag = 0; + return cb; + } + +@@ -1521,16 +1522,29 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp) + * + * @mei_hdr: mei message header + * @cb: message callback structure ++ * ++ * Return: header length in bytes + */ +-static void mei_msg_hdr_init(struct mei_msg_hdr *mei_hdr, struct mei_cl_cb *cb) ++static size_t mei_msg_hdr_init(struct mei_msg_hdr *mei_hdr, ++ struct mei_cl_cb *cb) + { ++ size_t hdr_len = sizeof(*mei_hdr); ++ struct mei_msg_extd_hdr *ext_hdr; ++ ++ memset(mei_hdr, 0, sizeof(*mei_hdr)); + mei_hdr->host_addr = mei_cl_host_addr(cb->cl); + mei_hdr->me_addr = mei_cl_me_id(cb->cl); +- mei_hdr->length = 0; +- mei_hdr->reserved = 0; +- mei_hdr->msg_complete = 0; +- mei_hdr->dma_ring = 0; + mei_hdr->internal = cb->internal; ++ ++ if (cb->vtag && cb->buf_idx == 0) { ++ ext_hdr = (struct mei_msg_extd_hdr *)mei_hdr->extension; ++ memset(ext_hdr, 0, sizeof(*ext_hdr)); ++ mei_hdr->extended = 1; ++ ext_hdr->vtag = cb->vtag; ++ hdr_len += sizeof(*ext_hdr); ++ } ++ ++ return hdr_len; + } + + /** +@@ -1548,8 +1562,9 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, + { + struct mei_device *dev; + struct mei_msg_data *buf; +- struct mei_msg_hdr mei_hdr; +- size_t hdr_len = sizeof(mei_hdr); ++ u32 __hdr[MEI_MSG_HDR_MAX]; ++ struct mei_msg_hdr *mei_hdr = (void *)__hdr; ++ size_t hdr_len; + size_t len; + size_t hbuf_len, dr_len; + int hbuf_slots; +@@ -1589,36 +1604,36 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, + dr_slots = mei_dma_ring_empty_slots(dev); + dr_len = mei_slots2data(dr_slots); + +- mei_msg_hdr_init(&mei_hdr, cb); ++ hdr_len = mei_msg_hdr_init(mei_hdr, cb); + + /** + * Split the message only if we can write the whole host buffer + * otherwise wait for next time the host buffer is empty. + */ + if (len + hdr_len <= hbuf_len) { +- mei_hdr.length = len; +- mei_hdr.msg_complete = 1; ++ mei_hdr->length = len; ++ mei_hdr->msg_complete = 1; + } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) { +- mei_hdr.dma_ring = 1; ++ mei_hdr->dma_ring = 1; + if (len > dr_len) + len = dr_len; + else +- mei_hdr.msg_complete = 1; ++ mei_hdr->msg_complete = 1; + +- mei_hdr.length = sizeof(dma_len); ++ mei_hdr->length = sizeof(dma_len); + dma_len = len; + data = &dma_len; + } else if ((u32)hbuf_slots == mei_hbuf_depth(dev)) { + len = hbuf_len - hdr_len; +- mei_hdr.length = len; ++ mei_hdr->length = len; + } else { + return 0; + } + +- if (mei_hdr.dma_ring) ++ if (mei_hdr->dma_ring) + mei_dma_ring_write(dev, buf->data + cb->buf_idx, len); + +- rets = mei_write_message(dev, &mei_hdr, hdr_len, data, mei_hdr.length); ++ rets = mei_write_message(dev, mei_hdr, hdr_len, data, mei_hdr->length); + if (rets) + goto err; + +@@ -1633,7 +1648,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, + } + } + +- if (mei_hdr.msg_complete) ++ if (mei_hdr->msg_complete) + list_move_tail(&cb->list, &dev->write_waiting_list); + + return 0; +@@ -1657,8 +1672,9 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) + { + struct mei_device *dev; + struct mei_msg_data *buf; +- struct mei_msg_hdr mei_hdr; +- size_t hdr_len = sizeof(mei_hdr); ++ u32 __hdr[MEI_MSG_HDR_MAX]; ++ struct mei_msg_hdr *mei_hdr = (void *)__hdr; ++ size_t hdr_len; + size_t len, hbuf_len, dr_len; + int hbuf_slots; + u32 dr_slots; +@@ -1698,7 +1714,7 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) + if (rets < 0) + goto err; + +- mei_msg_hdr_init(&mei_hdr, cb); ++ hdr_len = mei_msg_hdr_init(mei_hdr, cb); + + if (rets == 0) { + cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); +@@ -1723,28 +1739,28 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) + dr_len = mei_slots2data(dr_slots); + + if (len + hdr_len <= hbuf_len) { +- mei_hdr.length = len; +- mei_hdr.msg_complete = 1; ++ mei_hdr->length = len; ++ mei_hdr->msg_complete = 1; + } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) { +- mei_hdr.dma_ring = 1; ++ mei_hdr->dma_ring = 1; + if (len > dr_len) + len = dr_len; + else +- mei_hdr.msg_complete = 1; ++ mei_hdr->msg_complete = 1; + +- mei_hdr.length = sizeof(dma_len); ++ mei_hdr->length = sizeof(dma_len); + dma_len = len; + data = &dma_len; + } else { + len = hbuf_len - hdr_len; +- mei_hdr.length = len; ++ mei_hdr->length = len; + } + +- if (mei_hdr.dma_ring) ++ if (mei_hdr->dma_ring) + mei_dma_ring_write(dev, buf->data, len); + +- rets = mei_write_message(dev, &mei_hdr, hdr_len, +- data, mei_hdr.length); ++ rets = mei_write_message(dev, mei_hdr, hdr_len, ++ data, mei_hdr->length); + if (rets) + goto err; + +@@ -1758,7 +1774,7 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) + len = buf->size; + + out: +- if (mei_hdr.msg_complete) ++ if (mei_hdr->msg_complete) + mei_tx_cb_enqueue(cb, &dev->write_waiting_list); + else + mei_tx_cb_enqueue(cb, &dev->write_list); +diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c +index d71800a2f1d8..c7c2f6b10ff9 100644 +--- a/drivers/misc/mei/hbm.c ++++ b/drivers/misc/mei/hbm.c +@@ -125,19 +125,15 @@ void mei_hbm_reset(struct mei_device *dev) + /** + * mei_hbm_hdr - construct hbm header + * +- * @hdr: hbm header ++ * @mei_hdr: hbm header + * @length: payload length + */ + +-static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length) ++static inline void mei_hbm_hdr(struct mei_msg_hdr *mei_hdr, size_t length) + { +- hdr->host_addr = 0; +- hdr->me_addr = 0; +- hdr->length = length; +- hdr->msg_complete = 1; +- hdr->dma_ring = 0; +- hdr->reserved = 0; +- hdr->internal = 0; ++ memset(mei_hdr, 0, sizeof(*mei_hdr)); ++ mei_hdr->length = length; ++ mei_hdr->msg_complete = 1; + } + + /** +diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h +index d9ea1b2cbd51..e1dc73de944e 100644 +--- a/drivers/misc/mei/hw.h ++++ b/drivers/misc/mei/hw.h +@@ -201,6 +201,17 @@ enum mei_cl_disconnect_status { + MEI_CL_DISCONN_SUCCESS = MEI_HBMS_SUCCESS + }; + ++/** ++ * struct mei_msg_extd_hdr - mei extended header ++ * ++ * @vtag: virtual tag. ++ * @reserved: reserved. ++ */ ++struct mei_msg_extd_hdr { ++ u8 vtag; ++ u8 reserved[3]; ++} __packed; ++ + /** + * struct mei_msg_hdr - MEI BUS Interface Section + * +@@ -208,6 +219,7 @@ enum mei_cl_disconnect_status { + * @host_addr: host address + * @length: message length + * @reserved: reserved ++ * @extended: message has extended header + * @dma_ring: message is on dma ring + * @internal: message is internal + * @msg_complete: last packet of the message +@@ -217,14 +229,15 @@ struct mei_msg_hdr { + u32 me_addr:8; + u32 host_addr:8; + u32 length:9; +- u32 reserved:4; ++ u32 reserved:3; ++ u32 extended:1; + u32 dma_ring:1; + u32 internal:1; + u32 msg_complete:1; + u32 extension[0]; + } __packed; + +-#define MEI_MSG_HDR_MAX 2 ++#define MEI_MSG_HDR_MAX 3 + + struct mei_bus_message { + u8 hbm_cmd; +diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c +index c70a8c74cc57..32757cd6529b 100644 +--- a/drivers/misc/mei/interrupt.c ++++ b/drivers/misc/mei/interrupt.c +@@ -90,6 +90,7 @@ static int mei_cl_irq_read_msg(struct mei_cl *cl, + { + struct mei_device *dev = cl->dev; + struct mei_cl_cb *cb; ++ struct mei_msg_extd_hdr *ext_hdr = (void *)mei_hdr->extension; + size_t buf_sz; + u32 length; + +@@ -105,13 +106,24 @@ static int mei_cl_irq_read_msg(struct mei_cl *cl, + list_add_tail(&cb->list, &cl->rd_pending); + } + ++ if (mei_hdr->extended) { ++ cl_dbg(dev, cl, "vtag: %d\n", ext_hdr->vtag); ++ if (cb->vtag && cb->vtag != ext_hdr->vtag) { ++ cl_err(dev, cl, "mismatched tag: %d != %d\n", ++ cb->vtag, ext_hdr->vtag); ++ cb->status = -EPROTO; ++ goto discard; ++ } ++ cb->vtag = ext_hdr->vtag; ++ } ++ + if (!mei_cl_is_connected(cl)) { + cl_dbg(dev, cl, "not connected\n"); + cb->status = -ENODEV; + goto discard; + } + +- length = mei_hdr->dma_ring ? mei_hdr->extension[0] : mei_hdr->length; ++ length = mei_hdr->dma_ring ? mei_hdr->extension[1] : mei_hdr->length; + + buf_sz = length + cb->buf_idx; + /* catch for integer overflow */ +@@ -292,9 +304,13 @@ int mei_irq_read_handler(struct mei_device *dev, + goto end; + } + +- if (mei_hdr->dma_ring) { ++ if (mei_hdr->extended) { + dev->rd_msg_hdr[1] = mei_read_hdr(dev); + (*slots)--; ++ } ++ if (mei_hdr->dma_ring) { ++ dev->rd_msg_hdr[2] = mei_read_hdr(dev); ++ (*slots)--; + mei_hdr->length = 0; + } + +diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h +index 5c92b207810b..5359ea8f93cb 100644 +--- a/drivers/misc/mei/mei_dev.h ++++ b/drivers/misc/mei/mei_dev.h +@@ -174,6 +174,7 @@ struct mei_cl; + * @fop_type: file operation type + * @buf: buffer for data associated with the callback + * @buf_idx: last read index ++ * @vtag: vm tag + * @fp: pointer to file structure + * @status: io status of the cb + * @internal: communication between driver and FW flag +@@ -185,6 +186,7 @@ struct mei_cl_cb { + enum mei_cb_file_ops fop_type; + struct mei_msg_data buf; + size_t buf_idx; ++ u8 vtag; + const struct file *fp; + int status; + u32 internal:1; +@@ -746,10 +748,11 @@ static inline void mei_dbgfs_deregister(struct mei_device *dev) {} + int mei_register(struct mei_device *dev, struct device *parent); + void mei_deregister(struct mei_device *dev); + +-#define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d dma=%1d internal=%1d comp=%1d" ++#define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d dma=%1d ext=%1d internal=%1d comp=%1d" + #define MEI_HDR_PRM(hdr) \ + (hdr)->host_addr, (hdr)->me_addr, \ +- (hdr)->length, (hdr)->dma_ring, (hdr)->internal, (hdr)->msg_complete ++ (hdr)->length, (hdr)->dma_ring, (hdr)->extended, \ ++ (hdr)->internal, (hdr)->msg_complete + + ssize_t mei_fw_status2str(struct mei_fw_status *fw_sts, char *buf, size_t len); + /** +-- +2.17.1 + diff --git a/patches/0040-net-stmmac-Set-TSN-HW-tunable-after-tsn-setup.connectivity b/patches/0040-net-stmmac-Set-TSN-HW-tunable-after-tsn-setup.connectivity new file mode 100644 index 0000000000..1c068327e5 --- /dev/null +++ b/patches/0040-net-stmmac-Set-TSN-HW-tunable-after-tsn-setup.connectivity @@ -0,0 +1,77 @@ +From c87ce7f6fd14d5e219ee0d3d9f8d147b93176627 Mon Sep 17 00:00:00 2001 +From: Voon Weifeng +Date: Thu, 13 Jun 2019 00:20:04 +0800 +Subject: [PATCH 040/108] net: stmmac: Set TSN HW tunable after tsn setup + +TSN HW tunable data for PTP Time Offset Value(PTOV), +Current Time Offset Value(CTOV) and Time Interval Shift +Amount(TILS) are added as platform data. These platform +data are set after tsn setup. + +Signed-off-by: Voon Weifeng +Signed-off-by: Ong Boon Leong +--- + .../net/ethernet/stmicro/stmmac/stmmac_main.c | 16 ++++++++++++++++ + drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c | 5 +++++ + include/linux/stmmac.h | 3 +++ + 3 files changed, 24 insertions(+) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index 310f62702ebd..0f9e24e2a6d8 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -2674,6 +2674,22 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) + + stmmac_tsn_hw_setup(priv, priv->hw, priv->dev); + ++ /* Set TSN HW tunable */ ++ if (priv->plat->ptov) ++ stmmac_set_tsn_hwtunable(priv, priv->hw, priv->dev, ++ TSN_HWTUNA_TX_EST_PTOV, ++ priv->plat->ptov); ++ ++ if (priv->plat->ctov) ++ stmmac_set_tsn_hwtunable(priv, priv->hw, priv->dev, ++ TSN_HWTUNA_TX_EST_CTOV, ++ priv->plat->ctov); ++ ++ if (priv->plat->tils) ++ stmmac_set_tsn_hwtunable(priv, priv->hw, priv->dev, ++ TSN_HWTUNA_TX_EST_TILS, ++ priv->plat->tils); ++ + return 0; + } + +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +index e18b87b85544..168121240545 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +@@ -256,6 +256,11 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, + plat->msi_rx_base_vec = 0; + plat->msi_tx_base_vec = 1; + ++ /* TSN HW tunable data */ ++ plat->ctov = 0; ++ plat->ptov = 0; ++ plat->tils = 0; ++ + return 0; + } + +diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h +index f0eca7253577..4d3b6269198c 100644 +--- a/include/linux/stmmac.h ++++ b/include/linux/stmmac.h +@@ -200,5 +200,8 @@ struct plat_stmmacenet_data { + bool vlan_fail_q_en; + u8 vlan_fail_q; + bool speed_2500_en; ++ u32 ptov; ++ u32 ctov; ++ u32 tils; + }; + #endif +-- +2.17.1 + diff --git a/patches/0040-sos-add-a-config-for-VHM.acrn b/patches/0040-sos-add-a-config-for-VHM.acrn new file mode 100644 index 0000000000..7e2ac8b34b --- /dev/null +++ b/patches/0040-sos-add-a-config-for-VHM.acrn @@ -0,0 +1,79 @@ +From 20622bc35b7351508eecd0db731877798012b5e7 Mon Sep 17 00:00:00 2001 +From: Shiqing Gao +Date: Fri, 31 Aug 2018 10:58:59 +0800 +Subject: [PATCH 040/150] sos: add a config for VHM + +seperate the config for ACRN and VHM + +SOS has to enable both CONFIG_ACRN and CONFIG_ACRN_VHM. +UOS only needs to enable CONFIG_ACRN. VHM is not used in UOS. + +Change-Id: I8529771e1943c18d790230533f7a4bcc84966350 +Tracked-On: 224645 +Signed-off-by: Shiqing Gao +Reviewed-on: +--- + drivers/Kconfig | 2 ++ + drivers/Makefile | 2 +- + drivers/char/Makefile | 2 +- + drivers/vhm/Kconfig | 18 ++++++++++++++++++ + 4 files changed, 22 insertions(+), 2 deletions(-) + create mode 100644 drivers/vhm/Kconfig + +Index: kernel-coe-tracker/drivers/Kconfig +=================================================================== +--- kernel-coe-tracker.orig/drivers/Kconfig ++++ kernel-coe-tracker/drivers/Kconfig +@@ -233,4 +233,6 @@ source "drivers/counter/Kconfig" + source "drivers/vbs/Kconfig" + + source "drivers/acrn/Kconfig" ++ ++source "drivers/vhm/Kconfig" + endmenu +Index: kernel-coe-tracker/drivers/Makefile +=================================================================== +--- kernel-coe-tracker.orig/drivers/Makefile ++++ kernel-coe-tracker/drivers/Makefile +@@ -183,7 +183,7 @@ obj-$(CONFIG_FPGA) += fpga/ + obj-$(CONFIG_FSI) += fsi/ + obj-$(CONFIG_TEE) += tee/ + obj-$(CONFIG_MULTIPLEXER) += mux/ +-obj-$(CONFIG_ACRN_GUEST) += vhm/ ++obj-$(CONFIG_ACRN_VHM) += vhm/ + obj-$(CONFIG_ACRN_GUEST) += acrn/ + obj-$(CONFIG_UNISYS_VISORBUS) += visorbus/ + obj-$(CONFIG_SIOX) += siox/ +Index: kernel-coe-tracker/drivers/char/Makefile +=================================================================== +--- kernel-coe-tracker.orig/drivers/char/Makefile ++++ kernel-coe-tracker/drivers/char/Makefile +@@ -53,4 +53,4 @@ obj-$(CONFIG_XILLYBUS) += xillybus/ + obj-$(CONFIG_POWERNV_OP_PANEL) += powernv-op-panel.o + obj-$(CONFIG_ADI) += adi.o + obj-$(CONFIG_RPMB_SUPPORT) += rpmb/ +-obj-$(CONFIG_ACRN_GUEST) += vhm/ ++obj-$(CONFIG_ACRN_VHM) += vhm/ +Index: kernel-coe-tracker/drivers/vhm/Kconfig +=================================================================== +--- /dev/null ++++ kernel-coe-tracker/drivers/vhm/Kconfig +@@ -0,0 +1,18 @@ ++config ACRN_VHM ++ bool "Intel ACRN Hypervisor Virtio and Hypervisor service Module (VHM)" ++ depends on ACRN_GUEST ++ depends on DMA_CMA ++ depends on PCI_MSI ++ depends on !INTEL_IOMMU ++ depends on !VMAP_STACK ++ default n ++ ---help--- ++ This is the Virtio and Hypervisor service Module (VHM) for ++ Intel ACRN hypervisor. ++ ++ It is required for Service OS. ++ User OS doesn't need to have this config. ++ ++ Say Y for SOS and say N for UOS. ++ ++ If unsure, say N. diff --git a/patches/0040-spi-dw-pci-Fix-Chip-Select-amount-on-Intel-Elkhart-La.lpss b/patches/0040-spi-dw-pci-Fix-Chip-Select-amount-on-Intel-Elkhart-La.lpss new file mode 100644 index 0000000000..3b705eff26 --- /dev/null +++ b/patches/0040-spi-dw-pci-Fix-Chip-Select-amount-on-Intel-Elkhart-La.lpss @@ -0,0 +1,31 @@ +From 4ab363eb3057803bdf718bd613b33f54aefcb742 Mon Sep 17 00:00:00 2001 +From: Jarkko Nikula +Date: Tue, 22 Oct 2019 15:21:24 +0300 +Subject: [PATCH 40/40] spi: dw-pci: Fix Chip Select amount on Intel Elkhart + Lake PSE SPI + +Intel(R) Programmable Services Engine (Intel(R) PSE) SPI controllers in +Intel Elkhart Lake have two Chip Select signals instead of one. + +Reported-by: Raymond Tan +Signed-off-by: Jarkko Nikula +--- + drivers/spi/spi-dw-pci.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c +index eebaea34e515..263d33b0a67c 100644 +--- a/drivers/spi/spi-dw-pci.c ++++ b/drivers/spi/spi-dw-pci.c +@@ -36,7 +36,7 @@ static struct spi_pci_desc spi_pci_mid_desc_2 = { + }; + + static struct spi_pci_desc spi_pci_ehl_desc = { +- .num_cs = 1, ++ .num_cs = 2, + .bus_num = -1, + .max_freq = 100000000, + }; +-- +2.17.1 + diff --git a/patches/0040-trusty-switch-to-use-version-3-of-TRUSTY_API.trusty b/patches/0040-trusty-switch-to-use-version-3-of-TRUSTY_API.trusty new file mode 100644 index 0000000000..fa46e319b6 --- /dev/null +++ b/patches/0040-trusty-switch-to-use-version-3-of-TRUSTY_API.trusty @@ -0,0 +1,83 @@ +From 2b29ee6b3060333bbd512378a4b5701ae7d429ac Mon Sep 17 00:00:00 2001 +From: "Zhong,Fangjian" +Date: Tue, 11 Jul 2017 04:44:59 +0000 +Subject: [PATCH 40/63] trusty: switch to use version 3 of TRUSTY_API + +Version 3 of Trusty API adds support for new command +(SMC_NC_VDEV_KICK_VQ) that can be used to notify virtqueue +that new item is available. This command is a parameterized +NOP, it has to be queued using trusty_enqueue_nop API and as +such can be executed concurrently on multiple CPUs. + +Change-Id: I9ba615e70b59e0689a47fa6eae0a6d9ba6033841 +Signed-off-by: Zhong,Fangjian +Author: Michael Ryleev +--- + drivers/trusty/trusty-virtio.c | 18 +++++++++++++++--- + include/linux/trusty/smcall.h | 1 + + 2 files changed, 16 insertions(+), 3 deletions(-) + +diff --git a/drivers/trusty/trusty-virtio.c b/drivers/trusty/trusty-virtio.c +index 6bbf80ce7d7f..b2418d7da5e1 100644 +--- a/drivers/trusty/trusty-virtio.c ++++ b/drivers/trusty/trusty-virtio.c +@@ -60,7 +60,8 @@ struct trusty_vring { + atomic_t needs_kick; + struct fw_rsc_vdev_vring *vr_descr; + struct virtqueue *vq; +- struct trusty_vdev *tvdev; ++ struct trusty_vdev *tvdev; ++ struct trusty_nop kick_nop; + }; + + struct trusty_vdev { +@@ -144,8 +145,14 @@ static bool trusty_virtio_notify(struct virtqueue *vq) + struct trusty_vdev *tvdev = tvr->tvdev; + struct trusty_ctx *tctx = tvdev->tctx; + +- atomic_set(&tvr->needs_kick, 1); +- queue_work(tctx->kick_wq, &tctx->kick_vqs); ++ u32 api_ver = trusty_get_api_version(tctx->dev->parent); ++ ++ if (api_ver < TRUSTY_API_VERSION_SMP_NOP) { ++ atomic_set(&tvr->needs_kick, 1); ++ queue_work(tctx->kick_wq, &tctx->kick_vqs); ++ } else { ++ trusty_enqueue_nop(tctx->dev->parent, &tvr->kick_nop); ++ } + + return true; + } +@@ -269,6 +276,9 @@ static void _del_vqs(struct virtio_device *vdev) + struct trusty_vring *tvr = &tvdev->vrings[0]; + + for (i = 0; i < tvdev->vring_num; i++, tvr++) { ++ /* dequeue kick_nop */ ++ trusty_dequeue_nop(tvdev->tctx->dev->parent, &tvr->kick_nop); ++ + /* delete vq */ + if (tvr->vq) { + vring_del_virtqueue(tvr->vq); +@@ -431,6 +441,8 @@ static int trusty_virtio_add_device(struct trusty_ctx *tctx, + tvr->align = vr_descr->align; + tvr->elem_num = vr_descr->num; + tvr->notifyid = vr_descr->notifyid; ++ trusty_nop_init(&tvr->kick_nop, SMC_NC_VDEV_KICK_VQ, ++ tvdev->notifyid, tvr->notifyid); + } + + /* register device */ +diff --git a/include/linux/trusty/smcall.h b/include/linux/trusty/smcall.h +index fc98b3e5b2e7..037b3fa4429e 100644 +--- a/include/linux/trusty/smcall.h ++++ b/include/linux/trusty/smcall.h +@@ -131,5 +131,6 @@ + + #define SMC_SC_VDEV_RESET SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 23) + #define SMC_SC_VDEV_KICK_VQ SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 24) ++#define SMC_NC_VDEV_KICK_VQ SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 25) + + #endif /* __LINUX_TRUSTY_SMCALL_H */ +-- +2.17.1 + diff --git a/patches/0041-ASoC-enable-get_time_info-from-device-driver.audio b/patches/0041-ASoC-enable-get_time_info-from-device-driver.audio new file mode 100644 index 0000000000..f52ad95cab --- /dev/null +++ b/patches/0041-ASoC-enable-get_time_info-from-device-driver.audio @@ -0,0 +1,61 @@ +From 119c59c37024e77b9d78449d7a90835e201bbd0d Mon Sep 17 00:00:00 2001 +From: Gustaw Lewandowski +Date: Sun, 17 Feb 2019 09:45:25 +0100 +Subject: [PATCH 041/193] ASoC: enable get_time_info from device driver + +get_time_info from snd_pcm_ops is not passed to snd_soc_pcm_runtime and +it causing that skl_get_time_info and azx_get_time_info be unreachable. + +Change-Id: Iee3cd6dcbc0d01c8dee5283ebf85393d879e976b +Signed-off-by: Gustaw Lewandowski +--- + sound/soc/soc-pcm.c | 26 ++++++++++++++++++++++++++ + 1 file changed, 26 insertions(+) + +diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c +index e163dde5eab1..468cabe35c4d 100644 +--- a/sound/soc/soc-pcm.c ++++ b/sound/soc/soc-pcm.c +@@ -2453,6 +2453,30 @@ static int dpcm_fe_dai_prepare(struct snd_pcm_substream *substream) + return ret; + } + ++static int soc_pcm_get_time_info(struct snd_pcm_substream *substream, ++ struct timespec *system_ts, struct timespec *audio_ts, ++ struct snd_pcm_audio_tstamp_config *audio_tstamp_config, ++ struct snd_pcm_audio_tstamp_report *audio_tstamp_report) ++{ ++ struct snd_soc_pcm_runtime *rtd = substream->private_data; ++ struct snd_soc_component *component; ++ struct snd_soc_rtdcom_list *rtdcom; ++ ++ for_each_rtdcom(rtd, rtdcom) { ++ component = rtdcom->component; ++ ++ if (!component->driver->ops || ++ !component->driver->ops->get_time_info) ++ continue; ++ ++ return component->driver->ops->get_time_info(substream, ++ system_ts, audio_ts, audio_tstamp_config, ++ audio_tstamp_report); ++ } ++ ++ return -ENOSYS; ++} ++ + static int dpcm_run_update_shutdown(struct snd_soc_pcm_runtime *fe, int stream) + { + struct snd_pcm_substream *substream = +@@ -2928,6 +2952,8 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num) + rtd->ops.ioctl = snd_soc_pcm_component_ioctl; + } + ++ rtd->ops.get_time_info = soc_pcm_get_time_info; ++ + for_each_rtdcom(rtd, rtdcom) { + const struct snd_pcm_ops *ops = rtdcom->component->driver->ops; + +-- +2.17.1 + diff --git a/patches/0041-api-doc-add-vhm-API-docs.acrn b/patches/0041-api-doc-add-vhm-API-docs.acrn new file mode 100644 index 0000000000..d4a56cd453 --- /dev/null +++ b/patches/0041-api-doc-add-vhm-API-docs.acrn @@ -0,0 +1,561 @@ +From 6b092c6d8cb9f4cf0ade025711a7dba39d415dc4 Mon Sep 17 00:00:00 2001 +From: Yin Fengwei +Date: Fri, 31 Aug 2018 10:58:59 +0800 +Subject: [PATCH 041/150] api doc: add vhm API docs + +Change-Id: If6df309ea215c1592ce41f7da724388ff1084087 +Tracked-On: 220254 +Signed-off-by: Yin Fengwei +Reviewed-on: +Signed-off-by: Yin Fengwei +Reviewed-on: +--- + Documentation/virtual/acrn/vhm.rst | 8 ++ + include/linux/vhm/acrn_vhm_ioreq.h | 106 +++++++++++++++++++++++++ + include/linux/vhm/acrn_vhm_mm.h | 119 ++++++++++++++++++++++++++++- + include/linux/vhm/vhm_ioctl_defs.h | 60 +++++++++++++-- + include/linux/vhm/vhm_vm_mngt.h | 73 ++++++++++++++++++ + 5 files changed, 358 insertions(+), 8 deletions(-) + +diff --git a/Documentation/virtual/acrn/vhm.rst b/Documentation/virtual/acrn/vhm.rst +index 56d498a016b0..901cff492e2b 100644 +--- a/Documentation/virtual/acrn/vhm.rst ++++ b/Documentation/virtual/acrn/vhm.rst +@@ -3,3 +3,11 @@ Virtio and Hypervisor Module (VHM) + ================================== + + The Virtio and Hypervisor service Module (VHM) in part of ACRN Project. ++ ++APIs: ++----- ++ ++.. kernel-doc:: include/linux/vhm/acrn_vhm_ioreq.h ++.. kernel-doc:: include/linux/vhm/acrn_vhm_mm.h ++.. kernel-doc:: include/linux/vhm/vhm_ioctl_defs.h ++.. kernel-doc:: include/linux/vhm/vhm_vm_mngt.h +diff --git a/include/linux/vhm/acrn_vhm_ioreq.h b/include/linux/vhm/acrn_vhm_ioreq.h +index fcec2c1e2eac..de3a8aa4eaf6 100644 +--- a/include/linux/vhm/acrn_vhm_ioreq.h ++++ b/include/linux/vhm/acrn_vhm_ioreq.h +@@ -51,6 +51,12 @@ + * + */ + ++/** ++ * @file acrn_vhm_ioreq.h ++ * ++ * @brief Virtio and Hypervisor Module(VHM) ioreq APIs ++ */ ++ + #ifndef __ACRN_VHM_IOREQ_H__ + #define __ACRN_VHM_IOREQ_H__ + +@@ -59,22 +65,122 @@ + + typedef int (*ioreq_handler_t)(int client_id, int req); + ++/** ++ * acrn_ioreq_create_client - create ioreq client ++ * ++ * @vmid: ID to identify guest ++ * @handler: ioreq_handler of ioreq client ++ * If client want request handled in client thread context, set ++ * this parameter to NULL. If client want request handled out of ++ * client thread context, set handler function pointer of its own. ++ * VHM will create kernel thread and call handler to handle request ++ * ++ * @name: the name of ioreq client ++ * ++ * Return: client id on success, <0 on error ++ */ + int acrn_ioreq_create_client(unsigned long vmid, ioreq_handler_t handler, + char *name); ++ ++/** ++ * acrn_ioreq_destroy_client - destroy ioreq client ++ * ++ * @client_id: client id to identify ioreq client ++ * ++ * Return: ++ */ + void acrn_ioreq_destroy_client(int client_id); + ++/** ++ * acrn_ioreq_add_iorange - add iorange monitored by ioreq client ++ * ++ * @client_id: client id to identify ioreq client ++ * @type: iorange type ++ * @start: iorange start address ++ * @end: iorange end address ++ * ++ * Return: 0 on success, <0 on error ++ */ + int acrn_ioreq_add_iorange(int client_id, uint32_t type, + long start, long end); ++ ++/** ++ * acrn_ioreq_del_iorange - del iorange monitored by ioreq client ++ * ++ * @client_id: client id to identify ioreq client ++ * @type: iorange type ++ * @start: iorange start address ++ * @end: iorange end address ++ * ++ * Return: 0 on success, <0 on error ++ */ + int acrn_ioreq_del_iorange(int client_id, uint32_t type, + long start, long end); + ++/** ++ * acrn_ioreq_get_reqbuf - get request buffer ++ * request buffer is shared by all clients in one guest ++ * ++ * @client_id: client id to identify ioreq client ++ * ++ * Return: pointer to request buffer, NULL on error ++ */ + struct vhm_request *acrn_ioreq_get_reqbuf(int client_id); ++ ++/** ++ * acrn_ioreq_attach_client - start handle request for ioreq client ++ * If request is handled out of client thread context, this function is ++ * only called once to be ready to handle new request. ++ * ++ * If request is handled in client thread context, this function must ++ * be called every time after the previous request handling is completed ++ * to be ready to handle new request. ++ * ++ * @client_id: client id to identify ioreq client ++ * @check_kthread_stop: whether check current kthread should be stopped ++ * ++ * Return: 0 on success, <0 on error, 1 if ioreq client is destroying ++ */ + int acrn_ioreq_attach_client(int client_id, bool check_kthread_stop); + ++/** ++ * acrn_ioreq_distribute_request - deliver request to corresponding client ++ * ++ * @vm: pointer to guest ++ * ++ * Return: 0 always ++ */ + int acrn_ioreq_distribute_request(struct vhm_vm *vm); ++ ++/** ++ * acrn_ioreq_complete_request - notify guest request handling is completed ++ * ++ * @client_id: client id to identify ioreq client ++ * @vcpu: identify request submitter ++ * ++ * Return: 0 on success, <0 on error ++ */ + int acrn_ioreq_complete_request(int client_id, uint64_t vcpu); + ++/** ++ * acrn_ioreq_intercept_bdf - set intercept bdf info of ioreq client ++ * ++ * @client_id: client id to identify ioreq client ++ * @bus: bus number ++ * @dev: device number ++ * @func: function number ++ * ++ * Return: ++ */ + void acrn_ioreq_intercept_bdf(int client_id, int bus, int dev, int func); ++ ++/** ++ * acrn_ioreq_unintercept_bdf - clear intercept bdf info of ioreq client ++ * ++ * @client_id: client id to identify ioreq client ++ * ++ * Return: ++ */ + void acrn_ioreq_unintercept_bdf(int client_id); + + /* IOReq APIs */ +diff --git a/include/linux/vhm/acrn_vhm_mm.h b/include/linux/vhm/acrn_vhm_mm.h +index ba8558949e48..ba383b354986 100644 +--- a/include/linux/vhm/acrn_vhm_mm.h ++++ b/include/linux/vhm/acrn_vhm_mm.h +@@ -51,25 +51,115 @@ + * + */ + ++/** ++ * @file acrn_vhm_mm.h ++ * ++ * @brief Virtio and Hypervisor Module memory manager APIs ++ */ ++ + #ifndef __ACRN_VHM_MM_H__ + #define __ACRN_VHM_MM_H__ + + #include + #include + +-/* 1:1 mapping for service OS */ ++/** ++ * acrn_hpa2gpa - physical address conversion ++ * ++ * convert host physical address (hpa) to guest physical address (gpa) ++ * gpa and hpa is 1:1 mapping for service OS ++ * ++ * @hpa: host physical address ++ * ++ * Return: guest physical address ++ */ + static inline unsigned long acrn_hpa2gpa(unsigned long hpa) + { + return hpa; + } + ++/** ++ * map_guest_phys - map guest physical address ++ * ++ * to SOS kernel virtual address ++ * ++ * @vmid: guest vmid ++ * @uos_phy: phsical address in guest ++ * @size: the memory size mapped ++ * ++ * Return: SOS kernel virtual address, NULL on error ++ */ + void *map_guest_phys(unsigned long vmid, u64 uos_phys, size_t size); ++ ++/** ++ * unmap_guest_phys - unmap guest physical address ++ * ++ * @vmid: guest vmid ++ * @uos_phy: phsical address in guest ++ * ++ * Return: 0 on success, <0 for error. ++ */ + int unmap_guest_phys(unsigned long vmid, u64 uos_phys); ++ ++/** ++ * set_mmio_map - map mmio EPT mapping between UOS gpa and SOS gpa ++ * ++ * @vmid: guest vmid ++ * @guest_gpa: gpa of UOS ++ * @host_gpa: gpa of SOS ++ * @len: memory mapped length ++ * @mem_type: memory mapping type. Possilble value could be: ++ * MEM_TYPE_WB ++ * MEM_TYPE_WT ++ * MEM_TYPE_UC ++ * MEM_TYPE_WC ++ * MEM_TYPE_WP ++ * @mem_access_right: memory mapping access. Possible value could be: ++ * MEM_ACCESS_READ ++ * MEM_ACCESS_WRITE ++ * MEM_ACCESS_EXEC ++ * MEM_ACCESS_RWX ++ * ++ * Return: 0 on success, <0 for error. ++ */ + int set_mmio_map(unsigned long vmid, unsigned long guest_gpa, + unsigned long host_gpa, unsigned long len, + unsigned int mem_type, unsigned int mem_access_right); ++ ++/** ++ * unset_mmio_map - unmap mmio mapping between UOS gpa and SOS gpa ++ * ++ * @vmid: guest vmid ++ * @guest_gpa: gpa of UOS ++ * @host_gpa: gpa of SOS ++ * @len: memory mapped length ++ * ++ * Return: 0 on success, <0 for error. ++ */ + int unset_mmio_map(unsigned long vmid, unsigned long guest_gpa, + unsigned long host_gpa, unsigned long len); ++ ++/** ++ * update_memmap_attr - update mmio EPT mapping between UOS gpa and SOS gpa ++ * ++ * @vmid: guest vmid ++ * @guest_gpa: gpa of UOS ++ * @host_gpa: gpa of SOS ++ * @len: memory mapped length ++ * @mem_type: memory mapping type. Possilble value could be: ++ * MEM_TYPE_WB ++ * MEM_TYPE_WT ++ * MEM_TYPE_UC ++ * MEM_TYPE_WC ++ * MEM_TYPE_WP ++ * @mem_access_right: memory mapping access. Possible value could be: ++ * MEM_ACCESS_READ ++ * MEM_ACCESS_WRITE ++ * MEM_ACCESS_EXEC ++ * MEM_ACCESS_RWX ++ * ++ * Return: 0 on success, <0 for error. ++ */ + int update_memmap_attr(unsigned long vmid, unsigned long guest_gpa, + unsigned long host_gpa, unsigned long len, + unsigned int mem_type, unsigned int mem_access_right); +@@ -77,9 +167,36 @@ int update_memmap_attr(unsigned long vmid, unsigned long guest_gpa, + int vhm_dev_mmap(struct file *file, struct vm_area_struct *vma); + + int check_guest_mem(struct vhm_vm *vm); ++ ++/** ++ * free_guest_mem - free memory of guest ++ * ++ * @vm: pointer to guest vm ++ * ++ * Return: ++ */ + void free_guest_mem(struct vhm_vm *vm); + ++/** ++ * alloc_guest_memseg - alloc memory of guest according to pre-defined ++ * memory segment info ++ * ++ * @vm: pointer to guest vm ++ * @memseg: pointer to guest memory segment info ++ * ++ * Return: ++ */ + int alloc_guest_memseg(struct vhm_vm *vm, struct vm_memseg *memseg); ++ ++/** ++ * map_guest_memseg - map EPT mmapping of memory of guest according to ++ * pre-defined memory mapping info ++ * ++ * @vm: pointer to guest vm ++ * @memmap: pointer to guest memory mapping info ++ * ++ * Return: ++ */ + int map_guest_memseg(struct vhm_vm *vm, struct vm_memmap *memmap); + + #endif +diff --git a/include/linux/vhm/vhm_ioctl_defs.h b/include/linux/vhm/vhm_ioctl_defs.h +index 9f2f21acbbe3..5bc7c666f2ea 100644 +--- a/include/linux/vhm/vhm_ioctl_defs.h ++++ b/include/linux/vhm/vhm_ioctl_defs.h +@@ -43,6 +43,12 @@ + * $FreeBSD$ + */ + ++/** ++ * @file vhm_ioctl_defs.h ++ * ++ * @brief Virtio and Hypervisor Module definition for ioctl to user space ++ */ ++ + #ifndef _VHM_IOCTL_DEFS_H_ + #define _VHM_IOCTL_DEFS_H_ + +@@ -95,6 +101,12 @@ + #define IC_SET_PTDEV_INTR_INFO _IC_ID(IC_ID, IC_ID_PCI_BASE + 0x03) + #define IC_RESET_PTDEV_INTR_INFO _IC_ID(IC_ID, IC_ID_PCI_BASE + 0x04) + ++/** ++ * struct vm_memseg - memory segment info for guest ++ * ++ * @len: length of memory segment ++ * @gpa: guest physical start address of memory segment ++ */ + struct vm_memseg { + uint64_t len; + uint64_t gpa; +@@ -103,6 +115,15 @@ struct vm_memseg { + #define VM_SYSMEM 0 + #define VM_MMIO 1 + ++/** ++ * struct vm_memmap - EPT memory mapping info for guest ++ * ++ * @type: memory mapping type ++ * @gpa: guest physical start address of memory mapping ++ * @hpa: host physical start address of memory ++ * @len: the length of memory range mapped ++ * @prot: memory mapping attribute ++ */ + struct vm_memmap { + uint32_t type; + uint32_t reserved; +@@ -112,38 +133,63 @@ struct vm_memmap { + uint32_t prot; /* RWX */ + }; + ++/** ++ * struct ic_ptdev_irq - pass thru device irq data structure ++ */ + struct ic_ptdev_irq { + #define IRQ_INTX 0 + #define IRQ_MSI 1 + #define IRQ_MSIX 2 ++ /** @type: irq type */ + uint32_t type; ++ /** @virt_bdf: virtual bdf description of pass thru device */ + uint16_t virt_bdf; /* IN: Device virtual BDF# */ ++ /** @phy_bdf: physical bdf description of pass thru device */ + uint16_t phys_bdf; /* IN: Device physical BDF# */ ++ /** union */ + union { ++ /** struct intx - info of IOAPIC/PIC interrupt */ + struct { +- uint32_t virt_pin; /* IN: virtual IOAPIC pin */ +- uint32_t phys_pin; /* IN: physical IOAPIC pin */ +- uint32_t is_pic_pin; /* IN: pin from PIC? */ ++ /** @virt_pin: virtual IOAPIC pin */ ++ uint32_t virt_pin; ++ /** @phys_pin: physical IOAPIC pin */ ++ uint32_t phys_pin; ++ /** @pic_pin: PIC pin */ ++ uint32_t is_pic_pin; + } intx; ++ ++ /** struct msix - info of MSI/MSIX interrupt */ + struct { +- /* IN: vector count of MSI/MSIX, +- * Keep this filed on top of msix */ ++ /* Keep this filed on top of msix */ ++ /** @vector_cnt: vector count of MSI/MSIX */ + uint32_t vector_cnt; + +- /* IN: size of MSI-X table (round up to 4K) */ ++ /** @table_size: size of MSIX table(round up to 4K) */ + uint32_t table_size; + +- /* IN: physical address of MSI-X table */ ++ /** @table_paddr: physical address of MSIX table */ + uint64_t table_paddr; + } msix; + }; + }; + ++/** ++ * struct ioreq_notify - data strcture to notify hypervisor ioreq is handled ++ * ++ * @client_id: client id to identify ioreq client ++ * @vcpu: identify the ioreq submitter ++ */ + struct ioreq_notify { + int32_t client_id; + uint32_t vcpu; + }; + ++/** ++ * struct api_version - data structure to track VHM API version ++ * ++ * @major_version: major version of VHM API ++ * @minor_version: minor version of VHM API ++ */ + struct api_version { + uint32_t major_version; + uint32_t minor_version; +diff --git a/include/linux/vhm/vhm_vm_mngt.h b/include/linux/vhm/vhm_vm_mngt.h +index 5edacb31dc1b..e7bc8b2372f7 100644 +--- a/include/linux/vhm/vhm_vm_mngt.h ++++ b/include/linux/vhm/vhm_vm_mngt.h +@@ -53,6 +53,12 @@ + * Jason Chen CJ + * + */ ++ ++/** ++ * @file vhm_vm_mngt.h ++ * ++ * @brief Virtio and Hypervisor Module(VHM) management APIs ++ */ + #ifndef VHM_VM_MNGT_H + #define VHM_VM_MNGT_H + +@@ -61,6 +67,22 @@ + extern struct list_head vhm_vm_list; + extern struct mutex vhm_vm_list_lock; + ++/** ++ * struct vhm_vm - data structure to track guest ++ * ++ * @dev: pointer to dev of linux device mode ++ * @list: list of vhm_vm ++ * @vmid: guest vmid ++ * @ioreq_fallback_client: default ioreq client ++ * @refcnt: reference count of guest ++ * @seg_lock: mutex to protect memseg_list ++ * @memseg_list: list of memseg ++ * @max_gfn: maximum guest page frame number ++ * @ioreq_client_lock: spinlock to protect ioreq_client_list ++ * @ioreq_client_list: list of ioreq clients ++ * @req_buf: request buffer shared between HV, SOS and UOS ++ * @pg: pointer to linux page which holds req_buf ++ */ + struct vhm_vm { + struct device *dev; + struct list_head list; +@@ -76,16 +98,67 @@ struct vhm_vm { + struct page *pg; + }; + ++/** ++ * struct vm_info - data structure to track guest info ++ * ++ * @max_vcpu: maximum vcpu number of guest ++ * @max_gfn: maximum guest page frame number ++ */ + struct vm_info { + int max_vcpu; + int max_gfn; + }; + ++/** ++ * struct find_get_vm - find and hold vhm_vm of guest according to guest vmid ++ * ++ * @vmid: guest vmid ++ * ++ * Return: pointer to vhm_vm, NULL if can't find vm matching vmid ++ */ + struct vhm_vm *find_get_vm(unsigned long vmid); ++ ++/** ++ * struct put_vm - release vhm_vm of guest according to guest vmid ++ * If the latest reference count drops to zero, free vhm_vm as well ++ * ++ * @vm: pointer to vhm_vm which identrify specific guest ++ * ++ * Return: ++ */ + void put_vm(struct vhm_vm *vm); ++ ++/** ++ * struct vhm_get_vm_info - get vm_info of specific guest ++ * ++ * @vmid: guest vmid ++ * @info: pointer to vm_info for returned vm_info ++ * ++ * Return: 0 on success, <0 on error ++ */ + int vhm_get_vm_info(unsigned long vmid, struct vm_info *info); ++ ++/** ++ * struct vhm_inject_msi - inject MSI interrupt to guest ++ * ++ * @vmid: guest vmid ++ * @msi_addr: MSI addr matches MSI spec ++ * @msi_data: MSI data matches MSI spec ++ * ++ * Return: 0 on success, <0 on error ++ */ + int vhm_inject_msi(unsigned long vmid, unsigned long msi_addr, + unsigned long msi_data); ++ ++/** ++ * struct vhm_vm_gpa2hpa - convert guest physical address to ++ * host physical address ++ * ++ * @vmid: guest vmid ++ * @gap: guest physical address ++ * ++ * Return: host physical address, <0 on error ++ */ + unsigned long vhm_vm_gpa2hpa(unsigned long vmid, unsigned long gpa); + + void vm_list_add(struct list_head *list); +-- +2.17.1 + diff --git a/patches/0041-drm-vc4-vc4_hdmi-fill-in-connector-info.drm b/patches/0041-drm-vc4-vc4_hdmi-fill-in-connector-info.drm new file mode 100644 index 0000000000..da13c5ff36 --- /dev/null +++ b/patches/0041-drm-vc4-vc4_hdmi-fill-in-connector-info.drm @@ -0,0 +1,56 @@ +From 2f06ad0e72b66fc1b6265721d05e83418a17ef14 Mon Sep 17 00:00:00 2001 +From: Dariusz Marcinkiewicz +Date: Fri, 23 Aug 2019 13:24:25 +0200 +Subject: [PATCH 041/690] drm/vc4/vc4_hdmi: fill in connector info + +Fill in the connector info, allowing userspace to associate +the CEC device with the drm connector. + +Tested on a Raspberry Pi 3B. + +Signed-off-by: Dariusz Marcinkiewicz +Signed-off-by: Hans Verkuil +Tested-by: Hans Verkuil +Acked-by: Eric Anholt +Link: https://patchwork.freedesktop.org/patch/msgid/20190823112427.42394-2-hverkuil-cisco@xs4all.nl +--- + drivers/gpu/drm/vc4/vc4_hdmi.c | 13 +++++++++---- + 1 file changed, 9 insertions(+), 4 deletions(-) + +diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c +index ee7d4e7b0ee3..0853b980bcb3 100644 +--- a/drivers/gpu/drm/vc4/vc4_hdmi.c ++++ b/drivers/gpu/drm/vc4/vc4_hdmi.c +@@ -1285,6 +1285,9 @@ static const struct cec_adap_ops vc4_hdmi_cec_adap_ops = { + + static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) + { ++#ifdef CONFIG_DRM_VC4_HDMI_CEC ++ struct cec_connector_info conn_info; ++#endif + struct platform_device *pdev = to_platform_device(dev); + struct drm_device *drm = dev_get_drvdata(master); + struct vc4_dev *vc4 = drm->dev_private; +@@ -1403,13 +1406,15 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) + #ifdef CONFIG_DRM_VC4_HDMI_CEC + hdmi->cec_adap = cec_allocate_adapter(&vc4_hdmi_cec_adap_ops, + vc4, "vc4", +- CEC_CAP_TRANSMIT | +- CEC_CAP_LOG_ADDRS | +- CEC_CAP_PASSTHROUGH | +- CEC_CAP_RC, 1); ++ CEC_CAP_DEFAULTS | ++ CEC_CAP_CONNECTOR_INFO, 1); + ret = PTR_ERR_OR_ZERO(hdmi->cec_adap); + if (ret < 0) + goto err_destroy_conn; ++ ++ cec_fill_conn_info_from_drm(&conn_info, hdmi->connector); ++ cec_s_conn_info(hdmi->cec_adap, &conn_info); ++ + HDMI_WRITE(VC4_HDMI_CPU_MASK_SET, 0xffffffff); + value = HDMI_READ(VC4_HDMI_CEC_CNTRL_1); + value &= ~VC4_HDMI_CEC_DIV_CLK_CNT_MASK; +-- +2.17.1 + diff --git a/patches/0041-mei-add-vtag-support-bit-in-client-properties.security b/patches/0041-mei-add-vtag-support-bit-in-client-properties.security new file mode 100644 index 0000000000..60c415633e --- /dev/null +++ b/patches/0041-mei-add-vtag-support-bit-in-client-properties.security @@ -0,0 +1,63 @@ +From 20323f27077f8d6b5a4acd3999e41747bce2a37b Mon Sep 17 00:00:00 2001 +From: Alexander Usyskin +Date: Wed, 9 May 2018 09:18:17 +0300 +Subject: [PATCH 41/65] mei: add vtag support bit in client properties + +Virtual tag support is on client basis, the vtag capability +is communicated vi client properties. + +Change-Id: I8b1c80bf0d8a5f3fea4ec1e8e604f6cc8d16a7b8 +Signed-off-by: Alexander Usyskin +--- + drivers/misc/mei/debugfs.c | 7 ++++--- + drivers/misc/mei/hw.h | 3 ++- + 2 files changed, 6 insertions(+), 4 deletions(-) + +diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c +index b98f6f9a4896..3ab1a431d810 100644 +--- a/drivers/misc/mei/debugfs.c ++++ b/drivers/misc/mei/debugfs.c +@@ -27,7 +27,7 @@ static int mei_dbgfs_meclients_show(struct seq_file *m, void *unused) + + down_read(&dev->me_clients_rwsem); + +- seq_puts(m, " |id|fix| UUID |con|msg len|sb|refc|\n"); ++ seq_puts(m, " |id|fix| UUID |con|msg len|sb|refc|vt|\n"); + + /* if the driver is not enabled the list won't be consistent */ + if (dev->dev_state != MEI_DEV_ENABLED) +@@ -37,14 +37,15 @@ static int mei_dbgfs_meclients_show(struct seq_file *m, void *unused) + if (!mei_me_cl_get(me_cl)) + continue; + +- seq_printf(m, "%2d|%2d|%3d|%pUl|%3d|%7d|%2d|%4d|\n", ++ seq_printf(m, "%2d|%2d|%3d|%pUl|%3d|%7d|%2d|%4d|%2d|\n", + i++, me_cl->client_id, + me_cl->props.fixed_address, + &me_cl->props.protocol_name, + me_cl->props.max_number_of_connections, + me_cl->props.max_msg_length, + me_cl->props.single_recv_buf, +- kref_read(&me_cl->refcnt)); ++ kref_read(&me_cl->refcnt), ++ me_cl->props.vt_supported); + mei_me_cl_put(me_cl); + } + +diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h +index e1dc73de944e..fee94e1e3973 100644 +--- a/drivers/misc/mei/hw.h ++++ b/drivers/misc/mei/hw.h +@@ -330,7 +330,8 @@ struct mei_client_properties { + u8 max_number_of_connections; + u8 fixed_address; + u8 single_recv_buf:1; +- u8 reserved:7; ++ u8 vt_supported:1; ++ u8 reserved:6; + u32 max_msg_length; + } __packed; + +-- +2.17.1 + diff --git a/patches/0041-net-stmmac-support-recalculating-of-CBS-idle-.connectivity b/patches/0041-net-stmmac-support-recalculating-of-CBS-idle-.connectivity new file mode 100644 index 0000000000..6dd8ea718e --- /dev/null +++ b/patches/0041-net-stmmac-support-recalculating-of-CBS-idle-.connectivity @@ -0,0 +1,303 @@ +From fc9e7d9f45c00250d3cf0764812c1480004acfc1 Mon Sep 17 00:00:00 2001 +From: Voon Weifeng +Date: Tue, 6 Aug 2019 16:52:54 +0800 +Subject: [PATCH 041/108] net: stmmac: support recalculating of CBS idle slope + under EST + +When EST enabled, credit is accumulated only when then gate is open. +Hence, the effective data rate of the idleSlope must be increased to +reflect the duty cycle of the transmission gate associated with the +queue. The new idleSlope is calculated using the equation below: + +idleSlope = (operIdleSlope(N) * OperCycle/GateOpenTime) + +operIdleSlope = calculated idle slope before EST enabled +N = queue number +OperCycle = Cycle time for queue N +GateOpenTime = Total gate open time for queue N + +Signed-off-by: Voon Weifeng +Signed-off-by: Ong Boon Leong +--- + .../net/ethernet/stmicro/stmmac/dwmac4_core.c | 1 + + drivers/net/ethernet/stmicro/stmmac/dwmac5.h | 3 + + .../net/ethernet/stmicro/stmmac/dwmac5_tsn.c | 4 +- + drivers/net/ethernet/stmicro/stmmac/hwif.h | 9 +- + .../net/ethernet/stmicro/stmmac/stmmac_tc.c | 43 ++++++++- + .../net/ethernet/stmicro/stmmac/stmmac_tsn.c | 95 ++++++++++++++++++- + .../net/ethernet/stmicro/stmmac/stmmac_tsn.h | 3 + + 7 files changed, 153 insertions(+), 5 deletions(-) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +index 85327b7fe77a..454ed0d1e31b 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +@@ -1192,6 +1192,7 @@ const struct stmmac_ops dwmac510_ops = { + .get_est_gcc = tsn_est_gcc_get, + .est_irq_status = tsn_est_irq_status, + .dump_tsn_mmc = tsn_mmc_dump, ++ .cbs_recal_idleslope = tsn_cbs_recal_idleslope, + }; + + static u32 dwmac4_get_num_vlan(void __iomem *ioaddr) +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h +index 98ab4013d0d9..af55842bef08 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h +@@ -161,6 +161,9 @@ + #define EST_PTOV_MAX 0xff /* Max PTP time offset */ + #define EST_CTOV_MAX 0xfff /* Max Current time offset */ + ++/* CBS Global defines */ ++#define CBS_IDLESLOPE_MAX 0x1fffff ++ + /* MAC Core Version */ + #define TSN_VER_MASK 0xFF + +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c +index 41e67df455c9..b2fecb07be6c 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c +@@ -167,11 +167,13 @@ static u32 dwmac5_est_get_txqcnt(void __iomem *ioaddr) + + static void dwmac5_est_get_max(u32 *ptov_max, + u32 *ctov_max, +- u32 *cycle_max) ++ u32 *cycle_max, ++ u32 *idleslope_max) + { + *ptov_max = EST_PTOV_MAX; + *ctov_max = EST_CTOV_MAX; + *cycle_max = EST_CTR_HI_MAX; ++ *idleslope_max = CBS_IDLESLOPE_MAX; + } + + static int dwmac5_est_write_gcl_config(void __iomem *ioaddr, u32 data, u32 addr, +diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h +index d511788fdb1c..b6110b35dcae 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h ++++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h +@@ -436,6 +436,10 @@ struct stmmac_ops { + struct net_device *dev); + int (*dump_tsn_mmc)(struct mac_device_info *hw, int index, + unsigned long *count, const char **desc); ++ int (*cbs_recal_idleslope)(struct mac_device_info *hw, ++ struct net_device *dev, ++ u32 queue, ++ u32 *idle_slope); + }; + + #define stmmac_core_init(__priv, __args...) \ +@@ -564,6 +568,8 @@ struct stmmac_ops { + stmmac_do_void_callback(__priv, mac, est_irq_status, __args) + #define stmmac_dump_tsn_mmc(__priv, __args...) \ + stmmac_do_callback(__priv, mac, dump_tsn_mmc, __args) ++#define stmmac_cbs_recal_idleslope(__priv, __args...) \ ++ stmmac_do_callback(__priv, mac, cbs_recal_idleslope, __args) + + /* Helpers for serdes */ + struct stmmac_serdes_ops { +@@ -735,7 +741,8 @@ struct tsnif_ops { + u32 (*est_get_gcl_depth)(void __iomem *ioaddr); + u32 (*est_get_ti_width)(void __iomem *ioaddr); + u32 (*est_get_txqcnt)(void __iomem *ioaddr); +- void (*est_get_max)(u32 *ptov_max, u32 *ctov_max, u32 *ct_max); ++ void (*est_get_max)(u32 *ptov_max, u32 *ctov_max, u32 *ct_max, ++ u32 *idleslope_max); + int (*est_write_gcl_config)(void __iomem *ioaddr, u32 data, u32 addr, + bool is_gcrr, + u32 dbgb, bool is_dbgm); +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +index 2a04bb9ae919..cb871bf2707f 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +@@ -688,8 +688,47 @@ static int tc_setup_taprio(struct stmmac_priv *priv, + egcrr.base_nsec = base_ns; + egcrr.ter_nsec = extension_ns; + +- return stmmac_set_est_gcrr_times(priv, priv->hw, priv->dev, +- &egcrr, 0, 0); ++ ret = stmmac_set_est_gcrr_times(priv, priv->hw, priv->dev, ++ &egcrr, 0, 0); ++ if (ret) { ++ dev_err(priv->device, ++ "EST: fail to program GCRR times into HW\n"); ++ return ret; ++ } ++ ++ if (priv->plat->tx_queues_to_use > 1) { ++ u32 queue; ++ ++ for (queue = 1; queue < priv->plat->tx_queues_to_use; queue++) { ++ u32 new_idle_slope; ++ ++ struct stmmac_txq_cfg *txqcfg = ++ &priv->plat->tx_queues_cfg[queue]; ++ ++ if (txqcfg->mode_to_use == MTL_QUEUE_DCB) ++ continue; ++ ++ new_idle_slope = txqcfg->idle_slope; ++ ret = stmmac_cbs_recal_idleslope(priv, priv->hw, ++ priv->dev, queue, ++ &new_idle_slope); ++ ++ if (ret) { ++ dev_err(priv->device, ++ "Recal idleslope failed.\n"); ++ break; ++ } ++ ++ stmmac_config_cbs(priv, priv->hw, ++ txqcfg->send_slope, ++ new_idle_slope, ++ txqcfg->high_credit, ++ txqcfg->low_credit, ++ queue); ++ } ++ } ++ ++ return ret; + } + + const struct stmmac_tc_ops dwmac510_tc_ops = { +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c +index 3288f1f54179..a76a5f68353f 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c +@@ -24,6 +24,48 @@ static u32 est_get_gcl_total_intervals_nsec(struct est_gc_config *gcc, + return nsec; + } + ++static u64 est_get_all_open_time(struct est_gc_config *est_gcc, ++ u32 bank, ++ u64 cycle_ns, ++ u32 queue) ++{ ++ u32 gate = 0x1 << queue; ++ u64 tti_ns = 0; ++ u64 total = 0; ++ struct est_gc_entry *gcl; ++ u32 gcl_len; ++ int row; ++ ++ gcl_len = est_gcc->gcb[bank].gcrr.llr; ++ gcl = est_gcc->gcb[bank].gcl; ++ ++ /* GCL which exceeds the cycle time will be truncated. ++ * So, time interval that exceeds the cycle time will not be ++ * included. ++ */ ++ for (row = 0; row < gcl_len; row++) { ++ tti_ns += gcl->ti_nsec; ++ ++ if (gcl->gates & gate) { ++ if (tti_ns <= cycle_ns) ++ total += gcl->ti_nsec; ++ else ++ total += gcl->ti_nsec - ++ (tti_ns - cycle_ns); ++ } ++ ++ gcl++; ++ } ++ ++ /* The gates wihtout any setting of open/close within ++ * the cycle time are considered as open. ++ */ ++ if (tti_ns < cycle_ns) ++ total += cycle_ns - tti_ns; ++ ++ return total; ++} ++ + int tsn_init(struct mac_device_info *hw, struct net_device *dev) + { + struct tsnif_info *info = &hw->tsn_info; +@@ -95,7 +137,8 @@ int tsn_init(struct mac_device_info *hw, struct net_device *dev) + tils_max = (tsnif_has_tsn_cap(hw, ioaddr, TSN_FEAT_ID_EST) ? 3 : 0); + tils_max = (1 << tils_max) - 1; + cap->tils_max = tils_max; +- tsnif_est_get_max(hw, &cap->ptov_max, &cap->ctov_max, &cap->cycle_max); ++ tsnif_est_get_max(hw, &cap->ptov_max, &cap->ctov_max, ++ &cap->cycle_max, &cap->idleslope_max); + cap->est_support = 1; + + dev_info(pdev, "EST: depth=%u, ti_wid=%u, ter_max=%uns, tils_max=%u, tqcnt=%u\n", +@@ -716,3 +759,53 @@ int tsn_mmc_dump(struct mac_device_info *hw, + *desc = (mmc_desc + index)->desc; + return 0; + } ++ ++int tsn_cbs_recal_idleslope(struct mac_device_info *hw, struct net_device *dev, ++ u32 queue, u32 *idle_slope) ++{ ++ struct tsnif_info *info = &hw->tsn_info; ++ void __iomem *ioaddr = hw->pcsr; ++ u64 scaling = 0; ++ struct est_gc_config *est_gcc; ++ struct tsn_hw_cap *cap; ++ u64 new_idle_slope; ++ u64 cycle_time_ns; ++ u32 open_time; ++ u32 hw_bank; ++ ++ cap = &info->cap; ++ est_gcc = &info->est_gcc; ++ hw_bank = tsnif_est_get_bank(hw, ioaddr, 1); ++ ++ cycle_time_ns = (est_gcc->gcb[hw_bank].gcrr.cycle_sec * ++ NSEC_PER_SEC) + ++ est_gcc->gcb[hw_bank].gcrr.cycle_nsec; ++ ++ if (!cycle_time_ns) { ++ netdev_warn(dev, "EST: Cycle time is 0.\n"); ++ netdev_warn(dev, "CBS idle slope will not be reconfigured.\n"); ++ ++ return -EINVAL; ++ } ++ ++ open_time = est_get_all_open_time(est_gcc, hw_bank, ++ cycle_time_ns, queue); ++ ++ if (!open_time) { ++ netdev_warn(dev, "EST: Total gate open time for queue %d is 0\n", ++ queue); ++ ++ return -EINVAL; ++ } ++ ++ scaling = cycle_time_ns; ++ do_div(scaling, open_time); ++ ++ new_idle_slope = *idle_slope * scaling; ++ if (new_idle_slope > cap->idleslope_max) ++ new_idle_slope = cap->idleslope_max; ++ ++ *idle_slope = new_idle_slope; ++ ++ return 0; ++} +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h +index b855b33526e3..6571fa92d4f8 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h +@@ -51,6 +51,7 @@ struct tsn_hw_cap { + u32 tils_max; /* Max time interval left shift */ + u32 ptov_max; /* Max PTP Offset */ + u32 ctov_max; /* Max Current Time Offset */ ++ u32 idleslope_max; /* Max idle slope */ + }; + + /* EST Gate Control Entry */ +@@ -135,5 +136,7 @@ int tsn_est_gcc_get(struct mac_device_info *hw, struct net_device *dev, + void tsn_est_irq_status(struct mac_device_info *hw, struct net_device *dev); + int tsn_mmc_dump(struct mac_device_info *hw, + int index, unsigned long *count, const char **desc); ++int tsn_cbs_recal_idleslope(struct mac_device_info *hw, struct net_device *dev, ++ u32 queue, u32 *idle_slope); + + #endif /* __STMMAC_TSN_H__ */ +-- +2.17.1 + diff --git a/patches/0041-trusty-add-support-for-SM-Wall-object.trusty b/patches/0041-trusty-add-support-for-SM-Wall-object.trusty new file mode 100644 index 0000000000..73cd3bd502 --- /dev/null +++ b/patches/0041-trusty-add-support-for-SM-Wall-object.trusty @@ -0,0 +1,497 @@ +From 953edf23b12889e6f802f130e256b602f454330b Mon Sep 17 00:00:00 2001 +From: "Zhang, Qi" +Date: Tue, 11 Jul 2017 05:03:20 +0000 +Subject: [PATCH 41/63] trusty: add support for SM Wall object + +SM Wall is a shared memory buffer established between secure and +non-secure side that allows for secure side to publish in efficient +manner certain state that non-secure side might acts. + +This patch adds support for such buffer in a generic way, an API to +setup such buffer with secure side and an API to locate it's content +based on well object known id's. + +Change-Id: Ibc4d43bdb7f47e803939461ece2ed848fda5738d +Signed-off-by: Zhong,Fangjian +Author: Michael Ryleev +Author: Zhong,Fangjian +--- + drivers/trusty/Makefile | 1 + + drivers/trusty/trusty-irq.c | 20 ---- + drivers/trusty/trusty-wall.c | 199 ++++++++++++++++++++++++++++++++++ + drivers/trusty/trusty.c | 22 +++- + include/linux/trusty/smcall.h | 21 +++- + include/linux/trusty/smwall.h | 90 +++++++++++++++ + include/linux/trusty/trusty.h | 13 +++ + 7 files changed, 343 insertions(+), 23 deletions(-) + create mode 100644 drivers/trusty/trusty-wall.c + create mode 100644 include/linux/trusty/smwall.h + +diff --git a/drivers/trusty/Makefile b/drivers/trusty/Makefile +index 9ca451e50dee..c1afb140ee00 100644 +--- a/drivers/trusty/Makefile ++++ b/drivers/trusty/Makefile +@@ -11,3 +11,4 @@ obj-$(CONFIG_TRUSTY_LOG) += trusty-log.o + obj-$(CONFIG_TRUSTY) += trusty-mem.o + obj-$(CONFIG_TRUSTY_VIRTIO) += trusty-virtio.o + obj-$(CONFIG_TRUSTY_VIRTIO_IPC) += trusty-ipc.o ++obj-$(CONFIG_TRUSTY) += trusty-wall.o +diff --git a/drivers/trusty/trusty-irq.c b/drivers/trusty/trusty-irq.c +index 5b4686f4f85f..eda0bff48c40 100644 +--- a/drivers/trusty/trusty-irq.c ++++ b/drivers/trusty/trusty-irq.c +@@ -59,24 +59,6 @@ struct trusty_irq_state { + + static enum cpuhp_state trusty_irq_online; + +-#define TRUSTY_VMCALL_PENDING_INTR 0x74727505 +-static inline void set_pending_intr_to_lk(uint8_t vector) +-{ +- __asm__ __volatile__( +- "vmcall" +- ::"a"(TRUSTY_VMCALL_PENDING_INTR), "b"(vector) +- ); +-} +- +-#define TRUSTY_VMCALL_IRQ_DONE 0x74727506 +-static inline void irq_register_done(void) +-{ +- __asm__ __volatile__( +- "vmcall" +- ::"a"(TRUSTY_VMCALL_IRQ_DONE) +- ); +-} +- + static void trusty_irq_enable_pending_irqs(struct trusty_irq_state *is, + struct trusty_irq_irqset *irqset, + bool percpu) +@@ -580,8 +562,6 @@ static int trusty_irq_probe(struct platform_device *pdev) + irq = trusty_irq_init_one(is, irq, false); + + ret = trusty_irq_cpu_notif_add(is); +- irq_register_done(); +- + if (ret) { + dev_err(&pdev->dev, "register_cpu_notifier failed %d\n", ret); + goto err_register_hotcpu_notifier; +diff --git a/drivers/trusty/trusty-wall.c b/drivers/trusty/trusty-wall.c +new file mode 100644 +index 000000000000..3c33d724b3fa +--- /dev/null ++++ b/drivers/trusty/trusty-wall.c +@@ -0,0 +1,199 @@ ++/* ++ * Copyright (C) 2017 Intel, Inc. ++ * Copyright (C) 2016 Google, Inc. ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++ ++void *trusty_wall_base(struct device *dev) ++{ ++ struct trusty_wall_dev_state *s; ++ ++ s = platform_get_drvdata(to_platform_device(dev)); ++ ++ if (NULL == s) ++ return NULL; ++ ++ return s->va; ++} ++EXPORT_SYMBOL(trusty_wall_base); ++ ++void *trusty_wall_per_cpu_item_ptr(struct device *dev, unsigned int cpu, ++ u32 item_id, size_t exp_sz) ++{ ++ uint i; ++ struct sm_wall_toc *toc; ++ struct sm_wall_toc_item *item; ++ struct trusty_wall_dev_state *s; ++ ++ s = platform_get_drvdata(to_platform_device(dev)); ++ ++ if (!s->va) { ++ dev_dbg(s->dev, "No smwall buffer is set\n"); ++ return NULL; ++ } ++ ++ toc = (struct sm_wall_toc *)s->va; ++ if (toc->version != SM_WALL_TOC_VER) { ++ dev_err(s->dev, "Unexpected toc version: %d\n", toc->version); ++ return NULL; ++ } ++ ++ if (cpu >= toc->cpu_num) { ++ dev_err(s->dev, "Unsupported cpu (%d) requested\n", cpu); ++ return NULL; ++ } ++ ++ item = (struct sm_wall_toc_item *)((uintptr_t)toc + ++ toc->per_cpu_toc_offset); ++ for (i = 0; i < toc->per_cpu_num_items; i++, item++) { ++ if (item->id != item_id) ++ continue; ++ ++ if (item->size != exp_sz) { ++ dev_err(s->dev, ++ "Size mismatch (%zd vs. %zd) for item_id %d\n", ++ (size_t)item->size, exp_sz, item_id); ++ return NULL; ++ } ++ ++ return s->va + toc->per_cpu_base_offset + ++ cpu * toc->per_cpu_region_size + item->offset; ++ } ++ return NULL; ++} ++EXPORT_SYMBOL(trusty_wall_per_cpu_item_ptr); ++ ++static int trusty_wall_setup(struct trusty_wall_dev_state *s) ++{ ++ int ret; ++ void *va; ++ size_t sz; ++ ++ /* check if wall feature is supported by Trusted OS */ ++ ret = trusty_fast_call32(s->trusty_dev, SMC_FC_GET_WALL_SIZE, 0, 0, 0); ++ if (ret == SM_ERR_UNDEFINED_SMC || ret == SM_ERR_NOT_SUPPORTED) { ++ /* wall is not supported */ ++ dev_notice(s->dev, "smwall: is not supported by Trusted OS\n"); ++ return 0; ++ } else if (ret < 0) { ++ dev_err(s->dev, "smwall: failed (%d) to query buffer size\n", ++ ret); ++ return ret; ++ } else if (ret == 0) { ++ dev_notice(s->dev, "smwall: zero-sized buffer requested\n"); ++ return 0; ++ } ++ sz = (size_t)ret; ++ ++ /* allocate memory for shared buffer */ ++ va = alloc_pages_exact(sz, GFP_KERNEL | __GFP_ZERO); ++ if (!va) { ++ dev_err(s->dev, "smwall: failed to allocate buffer\n"); ++ return -ENOMEM; ++ } ++ ++ /* call into Trusted OS to setup wall */ ++ ret = trusty_call32_mem_buf(s->trusty_dev, SMC_SC_SETUP_WALL, ++ virt_to_page(va), sz, PAGE_KERNEL); ++ if (ret < 0) { ++ dev_err(s->dev, "smwall: TEE returned (%d)\n", ret); ++ free_pages_exact(va, sz); ++ return -ENODEV; ++ } ++ ++ dev_info(s->dev, "smwall: initialized %zu bytes\n", sz); ++ ++ s->va = va; ++ s->sz = sz; ++ ++ return 0; ++} ++ ++static void trusty_wall_destroy(struct trusty_wall_dev_state *s) ++{ ++ int ret; ++ ++ ret = trusty_std_call32(s->trusty_dev, SMC_SC_DESTROY_WALL, 0, 0, 0); ++ if (ret) { ++ /** ++ * It should never happen, but if it happens, it is ++ * unsafe to free buffer so we have to leak memory ++ */ ++ dev_err(s->dev, "Failed (%d) to destroy the wall buffer\n", ++ ret); ++ } else { ++ free_pages_exact(s->va, s->sz); ++ } ++} ++ ++static int trusty_wall_probe(struct platform_device *pdev) ++{ ++ int ret; ++ struct trusty_wall_dev_state *s; ++ ++ dev_dbg(&pdev->dev, "%s\n", __func__); ++ ++ s = kzalloc(sizeof(*s), GFP_KERNEL); ++ if (!s) ++ return -ENOMEM; ++ ++ s->dev = &pdev->dev; ++ s->trusty_dev = s->dev->parent; ++ platform_set_drvdata(pdev, s); ++ ++ ret = trusty_wall_setup(s); ++ if (ret < 0) { ++ dev_warn(s->dev, "Failed (%d) to setup the wall\n", ret); ++ kfree(s); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static int trusty_wall_remove(struct platform_device *pdev) ++{ ++ struct trusty_wall_dev_state *s = platform_get_drvdata(pdev); ++ ++ trusty_wall_destroy(s); ++ ++ return 0; ++} ++ ++static const struct of_device_id trusty_wall_of_match[] = { ++ { .compatible = "android, trusty-wall-v1", }, ++ {}, ++}; ++ ++MODULE_DEVICE_TABLE(of, trusty_wall_of_match); ++ ++static struct platform_driver trusty_wall_driver = { ++ .probe = trusty_wall_probe, ++ .remove = trusty_wall_remove, ++ .driver = { ++ .name = "trusty-wall", ++ .owner = THIS_MODULE, ++ .of_match_table = trusty_wall_of_match, ++ }, ++}; ++ ++module_platform_driver(trusty_wall_driver); ++ ++MODULE_LICENSE("GPL v2"); ++MODULE_DESCRIPTION("Trusty smwall driver"); +diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c +index 4aa4a89799dc..0b3e75823be1 100644 +--- a/drivers/trusty/trusty.c ++++ b/drivers/trusty/trusty.c +@@ -22,6 +22,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -610,12 +611,17 @@ static struct platform_driver trusty_driver = { + }, + }; + +-void trusty_dev_release(struct device *dev) ++void trusty_dev_release(struct device *dev) + { + dev_dbg(dev, "%s() is called()\n", __func__); + return; + } + ++static struct device_node trusty_wall_node = { ++ .name = "trusty-wall", ++ .sibling = NULL, ++}; ++ + static struct device_node trusty_irq_node = { + .name = "trusty-irq", + .sibling = NULL, +@@ -679,11 +685,23 @@ static struct platform_device trusty_platform_dev_irq = { + }, + }; + ++static struct platform_device trusty_platform_dev_wall = { ++ .name = "trusty-wall", ++ .id = -1, ++ .num_resources = 0, ++ .dev = { ++ .release = trusty_dev_release, ++ .parent = &trusty_platform_dev.dev, ++ .of_node = &trusty_wall_node, ++ }, ++}; ++ + static struct platform_device *trusty_devices[] __initdata = { + &trusty_platform_dev, + &trusty_platform_dev_log, + &trusty_platform_dev_virtio, +- &trusty_platform_dev_irq ++ &trusty_platform_dev_irq, ++ &trusty_platform_dev_wall + }; + static int __init trusty_driver_init(void) + { +diff --git a/include/linux/trusty/smcall.h b/include/linux/trusty/smcall.h +index 037b3fa4429e..ee5dda2560b6 100644 +--- a/include/linux/trusty/smcall.h ++++ b/include/linux/trusty/smcall.h +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 2013-2014 Google Inc. All rights reserved ++ * Copyright (c) 2013-2016 Google Inc. All rights reserved + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files +@@ -124,6 +124,25 @@ + #define TRUSTY_API_VERSION_CURRENT (3) + #define SMC_FC_API_VERSION SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 11) + ++/* ++ * SM Wall is a shared memory buffer established between secure and non-secure ++ * side that allows for secure side to publish certain state that non-secure ++ * side might acts on. One known example is a state of per CPU timer on ++ * platforms that require migration to broadcast timer in deep idle states. ++ * ++ * SMC_FC_GET_WALL_SIZE - retrieves the size of memory buffer that will be ++ * required to setup the SM Wall object. ++ * ++ * SMC_SC_SETUP_WALL - specifies location, size and attributes of memory buffer ++ * allocated by non-secure side to setup the SM Wall object. ++ * ++ * SMC_SC_DESTROY_WALL - notifies secure side that previously specifies SM Wall ++ * object should be released usually as part of normal shutdown sequence. ++ */ ++#define SMC_FC_GET_WALL_SIZE SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 12) ++#define SMC_SC_SETUP_WALL SMC_STDCALL_NR(SMC_ENTITY_SECURE_MONITOR, 12) ++#define SMC_SC_DESTROY_WALL SMC_STDCALL_NR(SMC_ENTITY_SECURE_MONITOR, 13) ++ + /* TRUSTED_OS entity calls */ + #define SMC_SC_VIRTIO_GET_DESCR SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 20) + #define SMC_SC_VIRTIO_START SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 21) +diff --git a/include/linux/trusty/smwall.h b/include/linux/trusty/smwall.h +new file mode 100644 +index 000000000000..370d8b32f26a +--- /dev/null ++++ b/include/linux/trusty/smwall.h +@@ -0,0 +1,90 @@ ++/* ++ * Copyright (c) 2016 Google Inc. All rights reserved ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files ++ * (the "Software"), to deal in the Software without restriction, ++ * including without limitation the rights to use, copy, modify, merge, ++ * publish, distribute, sublicense, and/or sell copies of the Software, ++ * and to permit persons to whom the Software is furnished to do so, ++ * subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be ++ * included in all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY ++ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, ++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE ++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++#ifndef __LINUX_TRUSTY_SMWALL_H ++#define __LINUX_TRUSTY_SMWALL_H ++ ++/** ++ * DOC: Introduction ++ * ++ * SM Wall buffer is formatted by secure side to contain the location of ++ * objects it exports: ++ * ++ * In general it starts with sm_wall_toc header struct followed ++ * by array of sm_wall_toc_item objects describing location of ++ * individual objects within SM Wall buffer. ++ */ ++ ++/* current version of TOC structure */ ++#define SM_WALL_TOC_VER 1 ++ ++/** ++ * struct sm_wall_toc_item - describes individual table of content item ++ * @id: item id ++ * @offset: item offset relative to appropriate base. For global items ++ * it is relative to SM wall buffer base address. For per cpu item, this is an ++ * offset within each individual per cpu region. ++ * @size: item size ++ * @reserved: reserved: must be set to zero ++ */ ++struct sm_wall_toc_item { ++ u32 id; ++ u32 offset; ++ u32 size; ++ u32 reserved; ++}; ++ ++/** ++ * struct sm_wall_toc - describes sm_wall table of content structure ++ * @version: current toc structure version ++ * @cpu_num: number of cpus supported ++ * @per_cpu_toc_offset: offset of the start of per_cpu item table relative to ++ * SM wall buffer base address. ++ * @per_cpu_num_items: number of per cpu toc items located at position ++ * specified by @per_cpu_toc_offset. ++ * @per_cpu_base_offset: offset of the start of a sequence of per cpu data ++ * regions (@cpu_num total) relative to SM wall buffer ++ * base address. ++ * @per_cpu_region_size: size of each per cpu data region. ++ * @global_toc_offset: offset of the start of global item table relative to ++ * SM wall buffer base address. ++ * @global_num_items: number of items in global item table ++ */ ++struct sm_wall_toc { ++ u32 version; ++ u32 cpu_num; ++ u32 per_cpu_toc_offset; ++ u32 per_cpu_num_items; ++ u32 per_cpu_base_offset; ++ u32 per_cpu_region_size; ++ u32 global_toc_offset; ++ u32 global_num_items; ++}; ++ ++struct trusty_wall_dev_state { ++ struct device *dev; ++ struct device *trusty_dev; ++ void *va; ++ size_t sz; ++}; ++ ++#endif /* __LINUX_TRUSTY_SMWALL_H */ +diff --git a/include/linux/trusty/trusty.h b/include/linux/trusty/trusty.h +index eaa833bdea73..029b0986566f 100644 +--- a/include/linux/trusty/trusty.h ++++ b/include/linux/trusty/trusty.h +@@ -85,6 +85,19 @@ static inline void trusty_nop_init(struct trusty_nop *nop, + void trusty_enqueue_nop(struct device *dev, struct trusty_nop *nop); + void trusty_dequeue_nop(struct device *dev, struct trusty_nop *nop); + ++#define TRUSTY_VMCALL_PENDING_INTR 0x74727505 ++static inline void set_pending_intr_to_lk(uint8_t vector) ++{ ++ __asm__ __volatile__( ++ "vmcall" ++ ::"a"(TRUSTY_VMCALL_PENDING_INTR), "b"(vector) ++ ); ++} ++ ++void trusty_update_wall_info(struct device *dev, void *va, size_t sz); ++void *trusty_wall_base(struct device *dev); ++void *trusty_wall_per_cpu_item_ptr(struct device *dev, unsigned int cpu, ++ u32 item_id, size_t exp_sz); + + /* CPUID leaf 0x3 is used because eVMM will trap this leaf.*/ + #define EVMM_SIGNATURE_CORP 0x43544E49 /* "INTC", edx */ +-- +2.17.1 + diff --git a/patches/0042-ASoC-Intel-Skylake-Fix-for-SNDRV_PCM_AUDIO_TSTAMP_TY.audio b/patches/0042-ASoC-Intel-Skylake-Fix-for-SNDRV_PCM_AUDIO_TSTAMP_TY.audio new file mode 100644 index 0000000000..3a88d8a214 --- /dev/null +++ b/patches/0042-ASoC-Intel-Skylake-Fix-for-SNDRV_PCM_AUDIO_TSTAMP_TY.audio @@ -0,0 +1,33 @@ +From 9da9509322907cdb37d62bc2d446b6a30ea04d26 Mon Sep 17 00:00:00 2001 +From: Gustaw Lewandowski +Date: Mon, 11 Mar 2019 13:45:45 +0100 +Subject: [PATCH 042/193] ASoC: Intel: Skylake: Fix for + SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK requests + +Wrong mask passed to snd_hdac_stream_timecounter_init caused that +timecounter has been never initialised and finally skl_get_time_info +calls timecounter_read on uninitialised pointer causes NULL pointer +dereference. + +Change-Id: I82d465a6e0a8b73e271a24dd438721b9943893b7 +Signed-off-by: Gustaw Lewandowski +--- + sound/soc/intel/skylake/skl-pcm.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c +index 20a7c497a6aa..690b1f90320e 100644 +--- a/sound/soc/intel/skylake/skl-pcm.c ++++ b/sound/soc/intel/skylake/skl-pcm.c +@@ -458,7 +458,7 @@ static int skl_decoupled_trigger(struct snd_pcm_substream *substream, + + if (start) { + snd_hdac_stream_start(hdac_stream(stream), true); +- snd_hdac_stream_timecounter_init(hstr, 0); ++ snd_hdac_stream_timecounter_init(hstr, 1 << hstr->index); + } else { + snd_hdac_stream_stop(hdac_stream(stream)); + } +-- +2.17.1 + diff --git a/patches/0042-api-doc-update-ACRN-VBS-API-docs.acrn b/patches/0042-api-doc-update-ACRN-VBS-API-docs.acrn new file mode 100644 index 0000000000..1e87402de3 --- /dev/null +++ b/patches/0042-api-doc-update-ACRN-VBS-API-docs.acrn @@ -0,0 +1,83 @@ +From 20c09c1b1dd80a7fa6f56689ae8bba50dabc4954 Mon Sep 17 00:00:00 2001 +From: Hao Li +Date: Fri, 31 Aug 2018 10:58:59 +0800 +Subject: [PATCH 042/150] api doc: update ACRN VBS API docs + +Change-Id: I9e56502a114019297ac04d7e8a2a07230e7adcfe +Tracked-On: 220254 +Signed-off-by: Hao Li +Reviewed-on: +--- + include/linux/vbs/vq.h | 14 +++++++------- + 1 file changed, 7 insertions(+), 7 deletions(-) + +diff --git a/include/linux/vbs/vq.h b/include/linux/vbs/vq.h +index 9e865b8dff05..647466567db4 100644 +--- a/include/linux/vbs/vq.h ++++ b/include/linux/vbs/vq.h +@@ -106,7 +106,7 @@ + * + * @qsz: size of raw data in a certain virtqueue + * +- * Return: size of a certain virtqueue ++ * Return: size of a certain virtqueue, in bytes + */ + static inline size_t virtio_vq_ring_size(unsigned int qsz) + { +@@ -158,7 +158,7 @@ static inline int virtio_vq_has_descs(struct virtio_vq_info *vq) + * @dev: Pointer to struct virtio_dev_info + * @vq: Pointer to struct virtio_vq_info + * +- * Return: NULL ++ * Return: N/A + */ + static inline void virtio_vq_interrupt(struct virtio_dev_info *dev, + struct virtio_vq_info *vq) +@@ -195,7 +195,7 @@ static inline void virtio_vq_interrupt(struct virtio_dev_info *dev, + * @vq: Pointer to struct virtio_vq_info + * @pfn: page frame number in guest physical address space + * +- * Return: NULL ++ * Return: N/A + */ + void virtio_vq_init(struct virtio_vq_info *vq, uint32_t pfn); + +@@ -204,7 +204,7 @@ void virtio_vq_init(struct virtio_vq_info *vq, uint32_t pfn); + * + * @vq: Pointer to struct virtio_vq_info + * +- * Return: NULL ++ * Return: N/A + */ + void virtio_vq_reset(struct virtio_vq_info *vq); + +@@ -233,7 +233,7 @@ int virtio_vq_getchain(struct virtio_vq_info *vq, uint16_t *pidx, + * + * @vq: Pointer to struct virtio_vq_info + * +- * Return: NULL ++ * Return: N/A + */ + void virtio_vq_retchain(struct virtio_vq_info *vq); + +@@ -245,7 +245,7 @@ void virtio_vq_retchain(struct virtio_vq_info *vq); + * @idx: Pointer to available ring position, returned by vq_getchain() + * @iolen: Number of data bytes to be returned to frontend + * +- * Return: NULL ++ * Return: N/A + */ + void virtio_vq_relchain(struct virtio_vq_info *vq, uint16_t idx, + uint32_t iolen); +@@ -259,7 +259,7 @@ void virtio_vq_relchain(struct virtio_vq_info *vq, uint16_t idx, + * @vq: Pointer to struct virtio_vq_info + * @used_all_avail: Flag indicating if driver used all available chains + * +- * Return: NULL ++ * Return: N/A + */ + void virtio_vq_endchains(struct virtio_vq_info *vq, int used_all_avail); + +-- +2.17.1 + diff --git a/patches/0042-drm-i915-selftests-Markup-impossible-error-pointers.drm b/patches/0042-drm-i915-selftests-Markup-impossible-error-pointers.drm new file mode 100644 index 0000000000..b9d5419d9f --- /dev/null +++ b/patches/0042-drm-i915-selftests-Markup-impossible-error-pointers.drm @@ -0,0 +1,41 @@ +From 89179c4d4b274965e13c90cb2be6d880008cdc1e Mon Sep 17 00:00:00 2001 +From: Chris Wilson +Date: Tue, 27 Aug 2019 10:49:33 +0100 +Subject: [PATCH 042/690] drm/i915/selftests: Markup impossible error pointers + +If we create a new live_context() we should have a mapping for each +engine. Document that assumption with an assertion. + +Reported-by: Dan Carpenter +Signed-off-by: Chris Wilson +Cc: Matthew Auld +Reviewed-by: Matthew Auld +Link: https://patchwork.freedesktop.org/patch/msgid/20190827094933.13778-1-chris@chris-wilson.co.uk +--- + drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +index 3adb60c2fd1f..37a177e37665 100644 +--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c ++++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +@@ -397,6 +397,7 @@ static int igt_ctx_exec(void *arg) + } + + ce = i915_gem_context_get_engine(ctx, engine->legacy_idx); ++ GEM_BUG_ON(IS_ERR(ce)); + + if (!obj) { + obj = create_test_object(ce->vm, file, &objects); +@@ -521,6 +522,8 @@ static int igt_shared_ctx_exec(void *arg) + __assign_ppgtt(ctx, parent->vm); + + ce = i915_gem_context_get_engine(ctx, engine->legacy_idx); ++ GEM_BUG_ON(IS_ERR(ce)); ++ + if (!obj) { + obj = create_test_object(parent->vm, file, &objects); + if (IS_ERR(obj)) { +-- +2.17.1 + diff --git a/patches/0042-mei-bump-hbm-version-to-2.2.security b/patches/0042-mei-bump-hbm-version-to-2.2.security new file mode 100644 index 0000000000..ef9f8cd942 --- /dev/null +++ b/patches/0042-mei-bump-hbm-version-to-2.2.security @@ -0,0 +1,30 @@ +From 152ea9b43ad223cb17b2b6f56cc5ded964905a35 Mon Sep 17 00:00:00 2001 +From: Alexander Usyskin +Date: Mon, 7 May 2018 13:34:46 +0300 +Subject: [PATCH 42/65] mei: bump hbm version to 2.2 + +Bump HBM version to 2.2 to indicate vtag support. + +Change-Id: Iabcc95fe541ca612b477e8f9068e38ed297928ca +Signed-off-by: Alexander Usyskin +Signed-off-by: Tomas Winkler +--- + drivers/misc/mei/hw.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h +index fee94e1e3973..efb9f9e019b0 100644 +--- a/drivers/misc/mei/hw.h ++++ b/drivers/misc/mei/hw.h +@@ -25,7 +25,7 @@ + /* + * MEI Version + */ +-#define HBM_MINOR_VERSION 1 ++#define HBM_MINOR_VERSION 2 + #define HBM_MAJOR_VERSION 2 + + /* +-- +2.17.1 + diff --git a/patches/0042-trusty-add-support-for-trusty-backup-timer.trusty b/patches/0042-trusty-add-support-for-trusty-backup-timer.trusty new file mode 100644 index 0000000000..f5f6c5dd5a --- /dev/null +++ b/patches/0042-trusty-add-support-for-trusty-backup-timer.trusty @@ -0,0 +1,301 @@ +From de9eef3ccfe105cda1b3d72c3021f90fc00296ab Mon Sep 17 00:00:00 2001 +From: "Zhong,Fangjian" +Date: Tue, 11 Jul 2017 05:09:10 +0000 +Subject: [PATCH 42/63] trusty: add support for trusty backup timer + +On some platforms, in certain cpu idle modes, Trusty might +lose the state of secure timer that it is using for work +scheduling. In such cases, non-secure side would typically +migrate such timers to alternative implementations that +does not lose their state.Ideally, secure side should +have similar mechanizm, but it might not be always +feasible due to hardware limitations. + +This patch introduces a generic workaround for this +issue but adding backup non-secure timers that is used +to kick cpus out of deep idle modes when appropriate. + +Change-Id: I7ce18d45db67cc650f7875395451da7a2ed1ab2d +Signed-off-by: Zhong,Fangjian +Author: Michael Ryleev +Author: Zhong,Fangjian +--- + drivers/trusty/Kconfig | 13 +++ + drivers/trusty/Makefile | 1 + + drivers/trusty/trusty-timer.c | 166 ++++++++++++++++++++++++++++++++++ + drivers/trusty/trusty.c | 19 +++- + include/linux/trusty/smwall.h | 14 +++ + 5 files changed, 212 insertions(+), 1 deletion(-) + create mode 100644 drivers/trusty/trusty-timer.c + +diff --git a/drivers/trusty/Kconfig b/drivers/trusty/Kconfig +index 0b6b88e3a718..7b58db5e9a21 100644 +--- a/drivers/trusty/Kconfig ++++ b/drivers/trusty/Kconfig +@@ -49,4 +49,17 @@ config TRUSTY_VIRTIO_IPC + If you choose to build a module, it'll be called trusty-ipc. + Say N if unsure. + ++config TRUSTY_BACKUP_TIMER ++ tristate "Trusty backup timer" ++ depends on TRUSTY ++ default y ++ help ++ This module adds support for Trusty backup timer. Trusty backup ++ timer might be required on platforms that might loose state of ++ secure timer in deep idle state. ++ ++ If you choose to build a module, it'll be called trusty-timer. ++ Say N if unsure. ++ ++ + endmenu +diff --git a/drivers/trusty/Makefile b/drivers/trusty/Makefile +index c1afb140ee00..69a78688f1b0 100644 +--- a/drivers/trusty/Makefile ++++ b/drivers/trusty/Makefile +@@ -12,3 +12,4 @@ obj-$(CONFIG_TRUSTY) += trusty-mem.o + obj-$(CONFIG_TRUSTY_VIRTIO) += trusty-virtio.o + obj-$(CONFIG_TRUSTY_VIRTIO_IPC) += trusty-ipc.o + obj-$(CONFIG_TRUSTY) += trusty-wall.o ++obj-$(CONFIG_TRUSTY_BACKUP_TIMER) += trusty-timer.o +diff --git a/drivers/trusty/trusty-timer.c b/drivers/trusty/trusty-timer.c +new file mode 100644 +index 000000000000..0998e027984b +--- /dev/null ++++ b/drivers/trusty/trusty-timer.c +@@ -0,0 +1,166 @@ ++/* ++ * Copyright (C) 2017 Intel, Inc. ++ * Copyright (C) 2016 Google, Inc. ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++struct trusty_timer { ++ struct sec_timer_state *sts; ++ struct hrtimer tm; ++}; ++ ++struct trusty_timer_dev_state { ++ struct device *dev; ++ struct device *smwall_dev; ++ struct device *trusty_dev; ++ struct notifier_block call_notifier; ++ struct trusty_timer timer; ++}; ++ ++static enum hrtimer_restart trusty_timer_cb(struct hrtimer *tm) ++{ ++ struct trusty_timer_dev_state *s; ++ ++ s = container_of(tm, struct trusty_timer_dev_state, timer.tm); ++ ++ set_pending_intr_to_lk(0x31); ++ trusty_enqueue_nop(s->trusty_dev, NULL); ++ ++ return HRTIMER_NORESTART; ++} ++ ++static int trusty_timer_call_notify(struct notifier_block *nb, ++ unsigned long action, void *data) ++{ ++ struct trusty_timer *tt; ++ struct sec_timer_state *sts; ++ struct trusty_timer_dev_state *s; ++ ++ if (action != TRUSTY_CALL_RETURNED) ++ return NOTIFY_DONE; ++ ++ s = container_of(nb, struct trusty_timer_dev_state, call_notifier); ++ ++ /* this notifier is executed in non-preemptible context */ ++ tt = &s->timer; ++ sts = tt->sts; ++ ++ if (sts->tv_ns > sts->cv_ns) { ++ hrtimer_cancel(&tt->tm); ++ } else if (sts->cv_ns > sts->tv_ns) { ++ /* need to set/reset timer */ ++ hrtimer_start(&tt->tm, ns_to_ktime(sts->cv_ns - sts->tv_ns), ++ HRTIMER_MODE_REL_PINNED); ++ } ++ ++ sts->cv_ns = 0ULL; ++ sts->tv_ns = 0ULL; ++ ++ return NOTIFY_OK; ++} ++ ++static int trusty_timer_probe(struct platform_device *pdev) ++{ ++ int ret; ++ unsigned int cpu; ++ struct trusty_timer_dev_state *s; ++ struct trusty_timer *tt; ++ ++ dev_dbg(&pdev->dev, "%s\n", __func__); ++ ++ if (!trusty_wall_base(pdev->dev.parent)) { ++ dev_notice(&pdev->dev, "smwall: is not setup by parent\n"); ++ return -ENODEV; ++ } ++ ++ s = kzalloc(sizeof(*s), GFP_KERNEL); ++ if (!s) ++ return -ENOMEM; ++ ++ s->dev = &pdev->dev; ++ s->smwall_dev = s->dev->parent; ++ s->trusty_dev = s->smwall_dev->parent; ++ platform_set_drvdata(pdev, s); ++ ++ tt = &s->timer; ++ ++ hrtimer_init(&tt->tm, CLOCK_BOOTTIME, HRTIMER_MODE_REL_PINNED); ++ tt->tm.function = trusty_timer_cb; ++ tt->sts = ++ trusty_wall_per_cpu_item_ptr(s->smwall_dev, 0, ++ SM_WALL_PER_CPU_SEC_TIMER_ID, ++ sizeof(*tt->sts)); ++ WARN_ON(!tt->sts); ++ ++ ++ /* register notifier */ ++ s->call_notifier.notifier_call = trusty_timer_call_notify; ++ ret = trusty_call_notifier_register(s->trusty_dev, &s->call_notifier); ++ if (ret < 0) { ++ dev_err(&pdev->dev, "Failed to register call notifier\n"); ++ kfree(s); ++ return ret; ++ } ++ ++ dev_info(s->dev, "initialized\n"); ++ ++ return 0; ++ ++} ++ ++static int trusty_timer_remove(struct platform_device *pdev) ++{ ++ unsigned int cpu; ++ struct trusty_timer_dev_state *s = platform_get_drvdata(pdev); ++ struct trusty_timer *tt; ++ ++ ++ dev_dbg(&pdev->dev, "%s\n", __func__); ++ ++ /* unregister notifier */ ++ trusty_call_notifier_unregister(s->trusty_dev, &s->call_notifier); ++ ++ tt = &s->timer; ++ hrtimer_cancel(&tt->tm); ++ ++ /* free state */ ++ kfree(s); ++ return 0; ++} ++ ++static const struct of_device_id trusty_test_of_match[] = { ++ { .compatible = "android,trusty-timer-v1", }, ++ {}, ++}; ++ ++static struct platform_driver trusty_timer_driver = { ++ .probe = trusty_timer_probe, ++ .remove = trusty_timer_remove, ++ .driver = { ++ .name = "trusty-timer", ++ .owner = THIS_MODULE, ++ .of_match_table = trusty_test_of_match, ++ }, ++}; ++ ++module_platform_driver(trusty_timer_driver); ++ ++MODULE_LICENSE("GPL v2"); ++MODULE_DESCRIPTION("Trusty timer driver"); +diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c +index 0b3e75823be1..1568849e4501 100644 +--- a/drivers/trusty/trusty.c ++++ b/drivers/trusty/trusty.c +@@ -617,6 +617,11 @@ void trusty_dev_release(struct device *dev) + return; + } + ++static struct device_node trusty_timer_node = { ++ .name = "trusty-timer", ++ .sibling = NULL, ++}; ++ + static struct device_node trusty_wall_node = { + .name = "trusty-wall", + .sibling = NULL, +@@ -696,12 +701,24 @@ static struct platform_device trusty_platform_dev_wall = { + }, + }; + ++static struct platform_device trusty_platform_dev_timer = { ++ .name = "trusty-timer", ++ .id = -1, ++ .num_resources = 0, ++ .dev = { ++ .release = trusty_dev_release, ++ .parent = &trusty_platform_dev_wall.dev, ++ .of_node = &trusty_timer_node, ++ }, ++}; ++ + static struct platform_device *trusty_devices[] __initdata = { + &trusty_platform_dev, + &trusty_platform_dev_log, + &trusty_platform_dev_virtio, + &trusty_platform_dev_irq, +- &trusty_platform_dev_wall ++ &trusty_platform_dev_wall, ++ &trusty_platform_dev_timer + }; + static int __init trusty_driver_init(void) + { +diff --git a/include/linux/trusty/smwall.h b/include/linux/trusty/smwall.h +index 370d8b32f26a..66368de8c137 100644 +--- a/include/linux/trusty/smwall.h ++++ b/include/linux/trusty/smwall.h +@@ -87,4 +87,18 @@ struct trusty_wall_dev_state { + size_t sz; + }; + ++/* ID's of well known wall objects */ ++#define SM_WALL_PER_CPU_SEC_TIMER_ID 1 ++ ++/** ++ * struct sec_timer_state - structure to hold secute timer state ++ * @tv_ns: If non-zero this field contains snapshot of timers ++ * current time (ns). ++ * @cv_ns: next timer event configured (ns) ++ */ ++struct sec_timer_state { ++ u64 tv_ns; ++ u64 cv_ns; ++}; ++ + #endif /* __LINUX_TRUSTY_SMWALL_H */ +-- +2.17.1 + diff --git a/patches/0042-udp-hook-into-time-based-transmission.connectivity b/patches/0042-udp-hook-into-time-based-transmission.connectivity new file mode 100644 index 0000000000..95aa871d17 --- /dev/null +++ b/patches/0042-udp-hook-into-time-based-transmission.connectivity @@ -0,0 +1,34 @@ +From 7e9bb57b0e3f9d62fb0d920ac5051afd0c5cd42c Mon Sep 17 00:00:00 2001 +From: Kweh Hock Leong +Date: Fri, 17 Nov 2017 08:38:15 +0800 +Subject: [PATCH 042/108] udp: hook into time based transmission + +For udp packets, copy the desired future transmit time +from the CMSG cookie into the skb. + +Signed-off-by: Kweh Hock Leong +Signed-off-by: Ong Boon Leong +Signed-off-by: Voon Weifeng +--- + net/ipv4/udp.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c +index 14bc654b6842..5455d3fc3987 100644 +--- a/net/ipv4/udp.c ++++ b/net/ipv4/udp.c +@@ -1174,8 +1174,10 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) + sizeof(struct udphdr), &ipc, &rt, + &cork, msg->msg_flags); + err = PTR_ERR(skb); +- if (!IS_ERR_OR_NULL(skb)) ++ if (!IS_ERR_OR_NULL(skb)) { ++ skb->tstamp = ipc.sockc.transmit_time; + err = udp_send_skb(skb, fl4, &cork); ++ } + goto out; + } + +-- +2.17.1 + diff --git a/patches/0043-ASoC-Intel-Skylake-Add-asynch-time-info-feature.audio b/patches/0043-ASoC-Intel-Skylake-Add-asynch-time-info-feature.audio new file mode 100644 index 0000000000..ca06e8933f --- /dev/null +++ b/patches/0043-ASoC-Intel-Skylake-Add-asynch-time-info-feature.audio @@ -0,0 +1,284 @@ +From 50c2bfa57e7afcd18a0b9390fcea516a9b297c5f Mon Sep 17 00:00:00 2001 +From: Gustaw Lewandowski +Date: Fri, 15 Mar 2019 01:15:41 +0100 +Subject: [PATCH 043/193] ASoC: Intel: Skylake: Add asynch time info feature + +Implementation of asynch time info - feature named HammockHarbor. +Driver gets pcm_get_time_info request from user space and sends HHTSE +to last copier instance (BE) which talks to SSP. FW returns timestamp +data asynch via IPC notification, so pcm_get_time_info call is blocked +waiting for it. + +Change-Id: I57877f5d347e59354cfebf9480c709fcb96d2ee6 +Signed-off-by: Gustaw Lewandowski +--- + sound/soc/intel/skylake/skl-pcm.c | 76 +++++++++++++++++++++++++- + sound/soc/intel/skylake/skl-sst-ipc.c | 40 ++++++++++++++ + sound/soc/intel/skylake/skl-sst-ipc.h | 8 +++ + sound/soc/intel/skylake/skl-topology.c | 2 + + sound/soc/intel/skylake/skl-topology.h | 10 ++++ + 5 files changed, 134 insertions(+), 2 deletions(-) + +diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c +index 690b1f90320e..ac87d177229a 100644 +--- a/sound/soc/intel/skylake/skl-pcm.c ++++ b/sound/soc/intel/skylake/skl-pcm.c +@@ -35,7 +35,8 @@ static const struct snd_pcm_hardware azx_pcm_hw = { + SNDRV_PCM_INFO_SYNC_START | + SNDRV_PCM_INFO_HAS_WALL_CLOCK | /* legacy */ + SNDRV_PCM_INFO_HAS_LINK_ATIME | +- SNDRV_PCM_INFO_NO_PERIOD_WAKEUP), ++ SNDRV_PCM_INFO_NO_PERIOD_WAKEUP | ++ SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME), + .formats = SNDRV_PCM_FMTBIT_S16_LE | + SNDRV_PCM_FMTBIT_S32_LE | + SNDRV_PCM_FMTBIT_S24_LE, +@@ -1245,6 +1246,41 @@ static u64 skl_adjust_codec_delay(struct snd_pcm_substream *substream, + return (nsec > codec_nsecs) ? nsec - codec_nsecs : 0; + } + ++static struct skl_module_cfg * ++skl_find_first_be_mconfig(struct snd_pcm_substream *substream) ++{ ++ struct snd_soc_pcm_runtime *rtd = substream->private_data; ++ struct snd_soc_dai *be_dai = NULL; ++ struct snd_soc_dpcm *dpcm; ++ struct snd_pcm_substream *be_substream; ++ struct snd_soc_pcm_runtime *be_rtd; ++ struct skl_module_cfg *mconfig; ++ ++ /* find first BE copier for given substream */ ++ for_each_dpcm_be(rtd, substream->stream, dpcm) { ++ be_rtd = dpcm->be; ++ be_substream = snd_soc_dpcm_get_substream(be_rtd, ++ substream->stream); ++ be_rtd = snd_pcm_substream_chip(be_substream); ++ be_dai = be_rtd->cpu_dai; ++ break; ++ } ++ ++ if (!be_dai) { ++ dev_err(rtd->dev, "%s: Could not find BE DAI\n", ++ __func__); ++ return NULL; ++ } ++ ++ mconfig = skl_tplg_be_get_cpr_module(be_dai, substream->stream); ++ if (!mconfig) ++ dev_err(rtd->dev, "%s: Could not find copier in BE\n", ++ __func__); ++ ++ return mconfig; ++} ++ ++#define SKL_LOCAL_TSCTRL_HHTSE (1 << 7) + static int skl_get_time_info(struct snd_pcm_substream *substream, + struct timespec *system_ts, struct timespec *audio_ts, + struct snd_pcm_audio_tstamp_config *audio_tstamp_config, +@@ -1252,7 +1288,9 @@ static int skl_get_time_info(struct snd_pcm_substream *substream, + { + struct hdac_ext_stream *sstream = get_hdac_ext_stream(substream); + struct hdac_stream *hstr = hdac_stream(sstream); ++ struct snd_soc_pcm_runtime *rtd = substream->private_data; + u64 nsec; ++ int ret = 0; + + if ((substream->runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_ATIME) && + (audio_tstamp_config->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK)) { +@@ -1269,12 +1307,46 @@ static int skl_get_time_info(struct snd_pcm_substream *substream, + audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK; + audio_tstamp_report->accuracy_report = 1; /* rest of struct is valid */ + audio_tstamp_report->accuracy = 42; /* 24MHzWallClk == 42ns resolution */ ++ } else if ((substream->runtime->hw.info & ++ SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME) && ++ (audio_tstamp_config->type_requested == ++ SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED)) { ++ struct skl_module_cfg *mconfig = ++ skl_find_first_be_mconfig(substream); ++ u32 local_ts_control = SKL_LOCAL_TSCTRL_HHTSE; ++ struct skl_dev *skl = bus_to_skl(hstr->bus); ++ ++ if (!mconfig) ++ return -EINVAL; ++ ++ ret = skl_set_module_params(skl, &local_ts_control, ++ sizeof(local_ts_control), SKL_COPIER_TIMESTAMP_INIT, ++ mconfig); ++ if (ret < 0) { ++ dev_err(rtd->dev, "%s: Could not send timestamp init\n", ++ __func__); ++ return ret; ++ } + ++ ret = wait_for_completion_interruptible_timeout( ++ &mconfig->ts_completion, msecs_to_jiffies(1000)); ++ if (ret <= 0) { ++ dev_warn(rtd->dev, "%s: timestamp notification timeout\n", ++ __func__); ++ return ret ? ret : -ETIMEDOUT; ++ } ++ snd_pcm_gettime(substream->runtime, system_ts); ++ audio_tstamp_report->actual_type = ++ audio_tstamp_config->type_requested; ++ /* rest of struct is valid */ ++ audio_tstamp_report->accuracy_report = 1; ++ *system_ts = ns_to_timespec(mconfig->ts.local_walclk); ++ *audio_ts = ns_to_timespec(mconfig->ts.time_stamp_cnt); + } else { + audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT; + } + +- return 0; ++ return ret; + } + + static const struct snd_pcm_ops skl_platform_ops = { +diff --git a/sound/soc/intel/skylake/skl-sst-ipc.c b/sound/soc/intel/skylake/skl-sst-ipc.c +index 72d7284d2fff..1d9492b97879 100644 +--- a/sound/soc/intel/skylake/skl-sst-ipc.c ++++ b/sound/soc/intel/skylake/skl-sst-ipc.c +@@ -13,6 +13,7 @@ + #include "skl-sst-ipc.h" + #include "skl-topology.h" + #include "sound/hdaudio_ext.h" ++#include "skl-topology.h" + + + #define IPC_IXC_STATUS_BITS 24 +@@ -278,6 +279,12 @@ enum skl_ipc_module_msg { + IPC_MOD_SET_D0IX = 8 + }; + ++struct skl_event_timestamp_notify { ++ u32 module_instance_id; ++ u32 node_id; ++ struct skl_event_timestamp ts; ++} __packed; ++ + void skl_ipc_tx_data_copy(struct ipc_message *msg, char *tx_data, + size_t tx_size) + { +@@ -343,6 +350,36 @@ static struct ipc_message *skl_ipc_reply_get_msg(struct sst_generic_ipc *ipc, + + } + ++static ++int skl_process_timestamp_notification(struct skl_dev *skl) ++{ ++ struct skl_module_cfg *mconfig; ++ struct skl_event_timestamp_notify ts_notif; ++ struct skl_pipeline *ppl; ++ struct skl_pipe_module *m; ++ u32 instance_id; ++ int copier_id = skl_get_module_id(skl, &skl_copier_mod_uuid); ++ int ret = -ENXIO; ++ ++ sst_dsp_inbox_read(skl->dsp, &ts_notif, sizeof(ts_notif)); ++ instance_id = ts_notif.module_instance_id & IPC_MOD_INSTANCE_ID_MASK; ++ dev_dbg(skl->dev, "%s copier instance:%d\n", __func__, instance_id); ++ ++ list_for_each_entry(ppl, &skl->ppl_list, node) ++ list_for_each_entry(m, &ppl->pipe->w_list, node) { ++ mconfig = m->w->priv; ++ if ((mconfig->id.module_id == copier_id) && ++ (mconfig->id.pvt_id == instance_id)) { ++ mconfig->ts = ts_notif.ts; ++ complete(&mconfig->ts_completion); ++ ret = 0; ++ break; ++ } ++ } ++ ++ return ret; ++} ++ + int skl_ipc_process_notification(struct sst_generic_ipc *ipc, + struct skl_ipc_header header) + { +@@ -378,6 +415,9 @@ int skl_ipc_process_notification(struct sst_generic_ipc *ipc, + skl->miscbdcg_disabled = true; + break; + ++ case IPC_GLB_NOTIFY_TIMESTAMP_CAPTURED: ++ return skl_process_timestamp_notification(skl); ++ + default: + dev_err(ipc->dev, "ipc: Unhandled error msg=%x\n", + header.primary); +diff --git a/sound/soc/intel/skylake/skl-sst-ipc.h b/sound/soc/intel/skylake/skl-sst-ipc.h +index 50fedc213837..7d58a2f05af6 100644 +--- a/sound/soc/intel/skylake/skl-sst-ipc.h ++++ b/sound/soc/intel/skylake/skl-sst-ipc.h +@@ -138,6 +138,10 @@ struct skl_tlv { + u8 value[0]; + }; + ++static const guid_t skl_copier_mod_uuid = ++ GUID_INIT(0x9BA00C83, 0xCA12, 0x4A83, 0x94, 0x3C, ++ 0x1F, 0xA2, 0xE8, 0x2F, 0x9D, 0xDA); ++ + enum skl_ipc_pipeline_state { + PPL_INVALID_STATE = 0, + PPL_UNINITIALIZED = 1, +@@ -149,6 +153,10 @@ enum skl_ipc_pipeline_state { + PPL_RESTORED = 7 + }; + ++enum skl_copier_runtime_param { ++ SKL_COPIER_TIMESTAMP_INIT = 1, ++}; ++ + struct skl_ipc_dxstate_info { + u32 core_mask; + u32 dx_mask; +diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c +index 3af4bcaacf92..ed30cb454794 100644 +--- a/sound/soc/intel/skylake/skl-topology.c ++++ b/sound/soc/intel/skylake/skl-topology.c +@@ -2887,6 +2887,8 @@ static int skl_tplg_widget_load(struct snd_soc_component *cmpnt, int index, + if (!mconfig) + return -ENOMEM; + ++ init_completion(&mconfig->ts_completion); ++ + if (skl->nr_modules == 0) { + mconfig->module = devm_kzalloc(bus->dev, + sizeof(*mconfig->module), GFP_KERNEL); +diff --git a/sound/soc/intel/skylake/skl-topology.h b/sound/soc/intel/skylake/skl-topology.h +index dbc416c30846..514ccd4a6cb0 100644 +--- a/sound/soc/intel/skylake/skl-topology.h ++++ b/sound/soc/intel/skylake/skl-topology.h +@@ -13,6 +13,7 @@ + #define __SKL_TOPOLOGY_H__ + + #include ++#include + + #include + #include +@@ -248,6 +249,13 @@ struct skl_module { + struct skl_module_iface formats[SKL_MAX_MODULE_FORMATS]; + }; + ++struct skl_event_timestamp { ++ u32 isoffset; ++ u64 local_sample; ++ u64 local_walclk; ++ u64 time_stamp_cnt; ++} __packed; ++ + struct skl_module_cfg { + u8 guid[16]; + struct skl_module_inst_id id; +@@ -286,6 +294,8 @@ struct skl_module_cfg { + struct skl_pipe *pipe; + struct skl_specific_cfg formats_config; + struct skl_pipe_mcfg mod_cfg[SKL_MAX_MODULES_IN_PIPE]; ++ struct skl_event_timestamp ts; ++ struct completion ts_completion; + }; + + struct skl_algo_data { +-- +2.17.1 + diff --git a/patches/0043-drm-i915-Only-activate-i915_active-debugobject-once.drm b/patches/0043-drm-i915-Only-activate-i915_active-debugobject-once.drm new file mode 100644 index 0000000000..d416c5913c --- /dev/null +++ b/patches/0043-drm-i915-Only-activate-i915_active-debugobject-once.drm @@ -0,0 +1,46 @@ +From d1c67a827bd73d6c25db81ca10f30df798686bdf Mon Sep 17 00:00:00 2001 +From: Chris Wilson +Date: Tue, 27 Aug 2019 14:26:31 +0100 +Subject: [PATCH 043/690] drm/i915: Only activate i915_active debugobject once + +The point of debug_object_activate is to mark the first, and only the +first, acquisition. The object then remains active until the last +release. However, we marked up all successful first acquires even though +we allowed concurrent parties to try and acquire the i915_active +simultaneously (serialised by the i915_active.mutex). + +Testcase: igt/gem_mmap_gtt/fault-concurrent +Signed-off-by: Chris Wilson +Cc: Matthew Auld +Reviewed-by: Matthew Auld +Link: https://patchwork.freedesktop.org/patch/msgid/20190827132631.18627-1-chris@chris-wilson.co.uk +--- + drivers/gpu/drm/i915/i915_active.c | 8 ++++++-- + 1 file changed, 6 insertions(+), 2 deletions(-) + +diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c +index 48e16ad93bbd..6a447f1d0110 100644 +--- a/drivers/gpu/drm/i915/i915_active.c ++++ b/drivers/gpu/drm/i915/i915_active.c +@@ -92,12 +92,16 @@ static void debug_active_init(struct i915_active *ref) + + static void debug_active_activate(struct i915_active *ref) + { +- debug_object_activate(ref, &active_debug_desc); ++ lockdep_assert_held(&ref->mutex); ++ if (!atomic_read(&ref->count)) /* before the first inc */ ++ debug_object_activate(ref, &active_debug_desc); + } + + static void debug_active_deactivate(struct i915_active *ref) + { +- debug_object_deactivate(ref, &active_debug_desc); ++ lockdep_assert_held(&ref->mutex); ++ if (!atomic_read(&ref->count)) /* after the last dec */ ++ debug_object_deactivate(ref, &active_debug_desc); + } + + static void debug_active_fini(struct i915_active *ref) +-- +2.17.1 + diff --git a/patches/0043-license-update-intel-license-for-ACRN-VBS.acrn b/patches/0043-license-update-intel-license-for-ACRN-VBS.acrn new file mode 100644 index 0000000000..60664ede64 --- /dev/null +++ b/patches/0043-license-update-intel-license-for-ACRN-VBS.acrn @@ -0,0 +1,153 @@ +From 3fc30465196604972def7fda18f1a1b391636462 Mon Sep 17 00:00:00 2001 +From: Hao Li +Date: Fri, 31 Aug 2018 10:58:59 +0800 +Subject: [PATCH 043/150] license: update intel license for ACRN VBS + +Change-Id: I9afb8d0b45075051e59307b41f6754ebabfe6808 +Tracked-On: 219330 +Signed-off-by: Hao Li +Reviewed-on: +--- + drivers/vbs/vbs.c | 4 ++-- + drivers/vbs/vbs_rng.c | 4 ++-- + drivers/vbs/vq.c | 4 ++-- + include/linux/vbs/vbs.h | 4 ++-- + include/linux/vbs/vbs_common_if.h | 4 ++-- + include/linux/vbs/vq.h | 4 ++-- + 6 files changed, 12 insertions(+), 12 deletions(-) + +diff --git a/drivers/vbs/vbs.c b/drivers/vbs/vbs.c +index 9d96f45b9644..6c364364db3c 100644 +--- a/drivers/vbs/vbs.c ++++ b/drivers/vbs/vbs.c +@@ -7,7 +7,7 @@ + * + * GPL LICENSE SUMMARY + * +- * Copyright (c) 2017 Intel Corporation. All rights reserved. ++ * Copyright (c) 2018 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as +@@ -22,7 +22,7 @@ + * + * BSD LICENSE + * +- * Copyright (c) 2017 Intel Corporation. All rights reserved. ++ * Copyright (c) 2018 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions +diff --git a/drivers/vbs/vbs_rng.c b/drivers/vbs/vbs_rng.c +index 2c71186801e7..cdfed63bb1ad 100644 +--- a/drivers/vbs/vbs_rng.c ++++ b/drivers/vbs/vbs_rng.c +@@ -7,7 +7,7 @@ + * + * GPL LICENSE SUMMARY + * +- * Copyright (c) 2017 Intel Corporation. All rights reserved. ++ * Copyright (c) 2018 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as +@@ -22,7 +22,7 @@ + * + * BSD LICENSE + * +- * Copyright (c) 2017 Intel Corporation. All rights reserved. ++ * Copyright (c) 2018 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions +diff --git a/drivers/vbs/vq.c b/drivers/vbs/vq.c +index c344002d4005..f929a29dfffd 100644 +--- a/drivers/vbs/vq.c ++++ b/drivers/vbs/vq.c +@@ -7,7 +7,7 @@ + * + * GPL LICENSE SUMMARY + * +- * Copyright (c) 2017 Intel Corporation. All rights reserved. ++ * Copyright (c) 2018 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as +@@ -22,7 +22,7 @@ + * + * BSD LICENSE + * +- * Copyright (c) 2017 Intel Corporation. All rights reserved. ++ * Copyright (c) 2018 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions +diff --git a/include/linux/vbs/vbs.h b/include/linux/vbs/vbs.h +index ef54960e59c4..32666ee0870c 100644 +--- a/include/linux/vbs/vbs.h ++++ b/include/linux/vbs/vbs.h +@@ -7,7 +7,7 @@ + * + * GPL LICENSE SUMMARY + * +- * Copyright (c) 2017 Intel Corporation. All rights reserved. ++ * Copyright (c) 2018 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as +@@ -22,7 +22,7 @@ + * + * BSD LICENSE + * +- * Copyright (c) 2017 Intel Corporation. All rights reserved. ++ * Copyright (c) 2018 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions +diff --git a/include/linux/vbs/vbs_common_if.h b/include/linux/vbs/vbs_common_if.h +index 1736174a2651..8da239aac224 100644 +--- a/include/linux/vbs/vbs_common_if.h ++++ b/include/linux/vbs/vbs_common_if.h +@@ -7,7 +7,7 @@ + * + * GPL LICENSE SUMMARY + * +- * Copyright (c) 2017 Intel Corporation. All rights reserved. ++ * Copyright (c) 2018 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as +@@ -22,7 +22,7 @@ + * + * BSD LICENSE + * +- * Copyright (c) 2017 Intel Corporation. All rights reserved. ++ * Copyright (c) 2018 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions +diff --git a/include/linux/vbs/vq.h b/include/linux/vbs/vq.h +index 647466567db4..ca14f8f34888 100644 +--- a/include/linux/vbs/vq.h ++++ b/include/linux/vbs/vq.h +@@ -7,7 +7,7 @@ + * + * GPL LICENSE SUMMARY + * +- * Copyright (c) 2017 Intel Corporation. All rights reserved. ++ * Copyright (c) 2018 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as +@@ -22,7 +22,7 @@ + * + * BSD LICENSE + * +- * Copyright (c) 2017 Intel Corporation. All rights reserved. ++ * Copyright (c) 2018 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions +-- +2.17.1 + diff --git a/patches/0043-mei-add-a-spin-lock-to-protect-rd_completed-queue.security b/patches/0043-mei-add-a-spin-lock-to-protect-rd_completed-queue.security new file mode 100644 index 0000000000..37e7974dda --- /dev/null +++ b/patches/0043-mei-add-a-spin-lock-to-protect-rd_completed-queue.security @@ -0,0 +1,191 @@ +From e43a8eddcd96d72d3e479d3dff57b5da5e5f584e Mon Sep 17 00:00:00 2001 +From: Alexander Usyskin +Date: Sun, 12 Aug 2018 14:21:27 +0300 +Subject: [PATCH 43/65] mei: add a spin lock to protect rd_completed queue + +This will add ability to access read completed queue +outside of the driver big lock. + +Change-Id: I5713f32bca80b4b6ac3bf7cb9b21726b3ce7a224 +Signed-off-by: Alexander Usyskin +Signed-off-by: Tomas Winkler +--- + drivers/misc/mei/bus.c | 6 +++--- + drivers/misc/mei/client.c | 19 +++++++++++++------ + drivers/misc/mei/client.h | 20 ++++++++++++++++++-- + drivers/misc/mei/main.c | 6 +++--- + drivers/misc/mei/mei_dev.h | 2 ++ + 5 files changed, 39 insertions(+), 14 deletions(-) + +diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c +index d5ba41177392..af1a6c66a342 100644 +--- a/drivers/misc/mei/bus.c ++++ b/drivers/misc/mei/bus.c +@@ -152,7 +152,7 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, + if (timeout) { + rets = wait_event_interruptible_timeout + (cl->rx_wait, +- (!list_empty(&cl->rd_completed)) || ++ mei_cl_read_cb(cl, NULL) || + (!mei_cl_is_connected(cl)), + msecs_to_jiffies(timeout)); + if (rets == 0) +@@ -165,7 +165,7 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, + } else { + if (wait_event_interruptible + (cl->rx_wait, +- (!list_empty(&cl->rd_completed)) || ++ mei_cl_read_cb(cl, NULL) || + (!mei_cl_is_connected(cl)))) { + if (signal_pending(current)) + return -EINTR; +@@ -198,7 +198,7 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, + rets = r_length; + + free: +- mei_io_cb_free(cb); ++ mei_cl_del_rd_completed(cl, cb); + out: + mutex_unlock(&bus->device_lock); + +diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c +index c133df526fd8..37761525ac9b 100644 +--- a/drivers/misc/mei/client.c ++++ b/drivers/misc/mei/client.c +@@ -504,15 +504,19 @@ struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length, + * + * Return: cb on success, NULL if cb is not found + */ +-struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl, const struct file *fp) ++struct mei_cl_cb *mei_cl_read_cb(struct mei_cl *cl, const struct file *fp) + { + struct mei_cl_cb *cb; ++ struct mei_cl_cb *ret_cb = NULL; + ++ spin_lock(&cl->rd_completed_lock); + list_for_each_entry(cb, &cl->rd_completed, list) +- if (!fp || fp == cb->fp) +- return cb; +- +- return NULL; ++ if (!fp || fp == cb->fp) { ++ ret_cb = cb; ++ break; ++ } ++ spin_unlock(&cl->rd_completed_lock); ++ return ret_cb; + } + + /** +@@ -538,7 +542,9 @@ int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp) + mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl); + mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl); + mei_io_list_free_fp(&cl->rd_pending, fp); ++ spin_lock(&cl->rd_completed_lock); + mei_io_list_free_fp(&cl->rd_completed, fp); ++ spin_unlock(&cl->rd_completed_lock); + + return 0; + } +@@ -556,6 +562,7 @@ static void mei_cl_init(struct mei_cl *cl, struct mei_device *dev) + init_waitqueue_head(&cl->rx_wait); + init_waitqueue_head(&cl->tx_wait); + init_waitqueue_head(&cl->ev_wait); ++ spin_lock_init(&cl->rd_completed_lock); + INIT_LIST_HEAD(&cl->rd_completed); + INIT_LIST_HEAD(&cl->rd_pending); + INIT_LIST_HEAD(&cl->link); +@@ -1834,7 +1841,7 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb) + break; + + case MEI_FOP_READ: +- list_add_tail(&cb->list, &cl->rd_completed); ++ mei_cl_add_rd_completed(cl, cb); + if (!mei_cl_is_fixed_address(cl) && + !WARN_ON(!cl->rx_flow_ctrl_creds)) + cl->rx_flow_ctrl_creds--; +diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h +index c1f9e810cf81..557c7fa5f052 100644 +--- a/drivers/misc/mei/client.h ++++ b/drivers/misc/mei/client.h +@@ -85,8 +85,24 @@ int mei_cl_unlink(struct mei_cl *cl); + + struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev); + +-struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl, +- const struct file *fp); ++struct mei_cl_cb *mei_cl_read_cb(struct mei_cl *cl, const struct file *fp); ++ ++static inline void mei_cl_add_rd_completed(struct mei_cl *cl, ++ struct mei_cl_cb *cb) ++{ ++ spin_lock(&cl->rd_completed_lock); ++ list_add_tail(&cb->list, &cl->rd_completed); ++ spin_unlock(&cl->rd_completed_lock); ++} ++ ++static inline void mei_cl_del_rd_completed(struct mei_cl *cl, ++ struct mei_cl_cb *cb) ++{ ++ spin_lock(&cl->rd_completed_lock); ++ mei_io_cb_free(cb); ++ spin_unlock(&cl->rd_completed_lock); ++} ++ + struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length, + enum mei_cb_file_ops type, + const struct file *fp); +diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c +index 4ef6e37caafc..e7069559408c 100644 +--- a/drivers/misc/mei/main.c ++++ b/drivers/misc/mei/main.c +@@ -178,7 +178,7 @@ static ssize_t mei_read(struct file *file, char __user *ubuf, + + mutex_unlock(&dev->device_lock); + if (wait_event_interruptible(cl->rx_wait, +- !list_empty(&cl->rd_completed) || ++ mei_cl_read_cb(cl, file) || + !mei_cl_is_connected(cl))) { + if (signal_pending(current)) + return -EINTR; +@@ -229,7 +229,7 @@ static ssize_t mei_read(struct file *file, char __user *ubuf, + goto out; + + free: +- mei_io_cb_free(cb); ++ mei_cl_del_rd_completed(cl, cb); + *offset = 0; + + out: +@@ -590,7 +590,7 @@ static __poll_t mei_poll(struct file *file, poll_table *wait) + if (req_events & (EPOLLIN | EPOLLRDNORM)) { + poll_wait(file, &cl->rx_wait, wait); + +- if (!list_empty(&cl->rd_completed)) ++ if (mei_cl_read_cb(cl, file)) + mask |= EPOLLIN | EPOLLRDNORM; + else + mei_cl_read_start(cl, mei_cl_mtu(cl), file); +diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h +index 5359ea8f93cb..afe35ce1b9a3 100644 +--- a/drivers/misc/mei/mei_dev.h ++++ b/drivers/misc/mei/mei_dev.h +@@ -217,6 +217,7 @@ struct mei_cl_cb { + * @tx_cb_queued: number of tx callbacks in queue + * @writing_state: state of the tx + * @rd_pending: pending read credits ++ * @rd_completed_lock: protects rd_completed queue + * @rd_completed: completed read + * + * @cldev: device on the mei client bus +@@ -242,6 +243,7 @@ struct mei_cl { + u8 tx_cb_queued; + enum mei_file_transaction_states writing_state; + struct list_head rd_pending; ++ spinlock_t rd_completed_lock; /* protects rd_completed queue */ + struct list_head rd_completed; + + struct mei_cl_device *cldev; +-- +2.17.1 + diff --git a/patches/0043-net-stmmac-introduce-Enhanced-Tx-Descriptor-f.connectivity b/patches/0043-net-stmmac-introduce-Enhanced-Tx-Descriptor-f.connectivity new file mode 100644 index 0000000000..ba25c75cb1 --- /dev/null +++ b/patches/0043-net-stmmac-introduce-Enhanced-Tx-Descriptor-f.connectivity @@ -0,0 +1,173 @@ +From 412b5ee8ef0e2f76f8bd1b0142a4182116e74294 Mon Sep 17 00:00:00 2001 +From: Kweh Hock Leong +Date: Thu, 8 Aug 2019 17:43:30 +0800 +Subject: [PATCH 043/108] net: stmmac: introduce Enhanced Tx Descriptor for + Time-Based Scheduling + +DWMAC EQoS IPv5.xx newly introduces Time-Based Scheduling (TBS) that allow +per-packet transmission setting through Enhanced Tx Descriptor. The TxTime +is set in Launch Time field of the descriptor. + +Signed-off-by: Kweh Hock Leong +Signed-off-by: Ong Boon Leong +--- + drivers/net/ethernet/stmicro/stmmac/common.h | 2 + + drivers/net/ethernet/stmicro/stmmac/descs.h | 17 ++++ + .../ethernet/stmicro/stmmac/dwmac4_descs.c | 87 +++++++++++++++++++ + 3 files changed, 106 insertions(+) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h +index 842219fa7931..e37380f01275 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/common.h ++++ b/drivers/net/ethernet/stmicro/stmmac/common.h +@@ -408,6 +408,7 @@ struct dma_features { + + #define STMMAC_CHAIN_MODE 0x1 + #define STMMAC_RING_MODE 0x2 ++#define STMMAC_ENHANCED_TX_MODE 0x3 + + #define JUMBO_LEN 9000 + +@@ -511,5 +512,6 @@ void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); + extern const struct stmmac_mode_ops ring_mode_ops; + extern const struct stmmac_mode_ops chain_mode_ops; + extern const struct stmmac_desc_ops dwmac4_desc_ops; ++extern const struct stmmac_desc_ops dwmac5_desc_ops; + + #endif /* __COMMON_H__ */ +diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h +index 9f0b9a9e63b3..8bab8c83121f 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/descs.h ++++ b/drivers/net/ethernet/stmicro/stmmac/descs.h +@@ -154,6 +154,14 @@ + #define RDES_PTP_SIGNALING 0xa + #define RDES_PTP_PKT_RESERVED_TYPE 0xf + ++/* Enhanced TX descriptor definitions (DWMAC v5.xx) */ ++#define ETDESC4_LTV BIT(31) ++#define ETDESC4_GSN GENMASK(11, 8) ++#define ETDESC4_GSN_SHIFT 8 ++#define ETDESC4_LT_SEC GENMASK(7, 0) ++#define ETDESC5_LT_NANOSEC GENMASK(31, 8) ++#define ETDESC5_LT_NANOSEC_SHIFT 8 ++ + /* Basic descriptor structure for normal and alternate descriptors */ + struct dma_desc { + __le32 des0; +@@ -171,6 +179,15 @@ struct dma_extended_desc { + __le32 des7; /* Tx/Rx Timestamp High */ + }; + ++/* Enhanced TX descriptor structure (e.g. >= databook 5.00) */ ++struct dma_enhanced_tx_desc { ++ __le32 etdes4; /* Launch Time (s), GSN, LTV */ ++ __le32 etdes5; /* Launch Time (us) */ ++ __le32 etdes6; /* Reserved */ ++ __le32 etdes7; /* Reserved */ ++ struct dma_desc basic; /* Basic descriptors */ ++}; ++ + /* Transmit checksum insertion control */ + #define TX_CIC_FULL 3 /* Include IP header and pseudoheader */ + +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +index 4677dd96148a..082cb1dce13f 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +@@ -534,6 +534,93 @@ const struct stmmac_desc_ops dwmac4_desc_ops = { + .set_vlan = dwmac4_set_vlan, + }; + ++static void dwmac5_rd_init_tx_desc(struct dma_desc *p, int mode, int end) ++{ ++ p->des0 = 0; ++ p->des1 = 0; ++ p->des2 = 0; ++ p->des3 = 0; ++ ++ if (mode == STMMAC_ENHANCED_TX_MODE) { ++ struct dma_enhanced_tx_desc *enhtxdesc; ++ ++ enhtxdesc = container_of(p, struct dma_enhanced_tx_desc, basic); ++ enhtxdesc->etdes4 = 0; ++ enhtxdesc->etdes5 = 0; ++ enhtxdesc->etdes6 = 0; ++ enhtxdesc->etdes7 = 0; ++ } ++} ++ ++static void dwmac5_release_tx_desc(struct dma_desc *p, int mode) ++{ ++ p->des2 = 0; ++ p->des3 = 0; ++ ++ if (mode == STMMAC_ENHANCED_TX_MODE) { ++ struct dma_enhanced_tx_desc *enhtxdesc; ++ ++ enhtxdesc = container_of(p, struct dma_enhanced_tx_desc, basic); ++ enhtxdesc->etdes4 = 0; ++ enhtxdesc->etdes5 = 0; ++ } ++} ++ ++static void dwmac5_display_ring(void *head, unsigned int size, bool rx) ++{ ++ struct dma_enhanced_tx_desc *enhp = (struct dma_enhanced_tx_desc *)head; ++ struct dma_desc *p = (struct dma_desc *)head; ++ int i; ++ ++ pr_info("%s descriptor ring:\n", rx ? "RX" : "TX"); ++ ++ for (i = 0; i < size; i++) { ++ if (rx) { ++ pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", ++ i, (unsigned int)virt_to_phys(p), ++ le32_to_cpu(p->des0), le32_to_cpu(p->des1), ++ le32_to_cpu(p->des2), le32_to_cpu(p->des3)); ++ p++; ++ } else { ++ pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", ++ i, (unsigned int)virt_to_phys(enhp), ++ le32_to_cpu(enhp->basic.des0), ++ le32_to_cpu(enhp->basic.des1), ++ le32_to_cpu(enhp->basic.des2), ++ le32_to_cpu(enhp->basic.des3)); ++ enhp++; ++ } ++ } ++} ++ ++const struct stmmac_desc_ops dwmac5_desc_ops = { ++ .tx_status = dwmac4_wrback_get_tx_status, ++ .rx_status = dwmac4_wrback_get_rx_status, ++ .get_tx_len = dwmac4_rd_get_tx_len, ++ .get_tx_owner = dwmac4_get_tx_owner, ++ .set_tx_owner = dwmac4_set_tx_owner, ++ .set_rx_owner = dwmac4_set_rx_owner, ++ .get_tx_ls = dwmac4_get_tx_ls, ++ .get_rx_vlan_tci = dwmac4_wrback_get_rx_vlan_tci, ++ .get_rx_vlan_valid = dwmac4_wrback_get_rx_vlan_valid, ++ .get_rx_frame_len = dwmac4_wrback_get_rx_frame_len, ++ .enable_tx_timestamp = dwmac4_rd_enable_tx_timestamp, ++ .get_tx_timestamp_status = dwmac4_wrback_get_tx_timestamp_status, ++ .get_rx_timestamp_status = dwmac4_wrback_get_rx_timestamp_status, ++ .get_timestamp = dwmac4_get_timestamp, ++ .set_tx_ic = dwmac4_rd_set_tx_ic, ++ .prepare_tx_desc = dwmac4_rd_prepare_tx_desc, ++ .prepare_tso_tx_desc = dwmac4_rd_prepare_tso_tx_desc, ++ .release_tx_desc = dwmac5_release_tx_desc, ++ .init_rx_desc = dwmac4_rd_init_rx_desc, ++ .init_tx_desc = dwmac5_rd_init_tx_desc, ++ .display_ring = dwmac5_display_ring, ++ .set_mss = dwmac4_set_mss_ctxt, ++ .get_addr = dwmac4_get_addr, ++ .set_addr = dwmac4_set_addr, ++ .clear = dwmac4_clear, ++}; ++ + const struct stmmac_mode_ops dwmac4_ring_mode_ops = { + .set_16kib_bfsize = set_16kib_bfsize, + }; +-- +2.17.1 + diff --git a/patches/0043-trusty-kernel-driver-code-refine.trusty b/patches/0043-trusty-kernel-driver-code-refine.trusty new file mode 100644 index 0000000000..35def3ab92 --- /dev/null +++ b/patches/0043-trusty-kernel-driver-code-refine.trusty @@ -0,0 +1,242 @@ +From c96d640ceab7009579705ddc6da4e216febb33b1 Mon Sep 17 00:00:00 2001 +From: yingbinx +Date: Thu, 10 Aug 2017 06:57:08 +0000 +Subject: [PATCH 43/63] trusty kernel driver code refine + +Merge the code refine change form kerenl 4.4 code. +Use define CONFIG_X86 to sperate IA code and ARM code. +Use NR_CPUS to replace the hardcode value. +Inprove the debug log. + +Change-Id: I11075d594e6b119913cc38d79fe5fa3032ca254e +Tracked-On: OAM-48360 +Signed-off-by: yingbinx +Signed-off-by: Sheng, W +--- + drivers/trusty/trusty-irq.c | 4 +- + drivers/trusty/trusty-log.c | 6 ++- + drivers/trusty/trusty-mem.c | 71 ++++++++++++++++++++++++++++++++++- + drivers/trusty/trusty.c | 4 +- + include/linux/trusty/trusty.h | 2 + + 5 files changed, 80 insertions(+), 7 deletions(-) + mode change 100644 => 100755 drivers/trusty/trusty-log.c + mode change 100644 => 100755 drivers/trusty/trusty-mem.c + mode change 100644 => 100755 drivers/trusty/trusty.c + +diff --git a/drivers/trusty/trusty-irq.c b/drivers/trusty/trusty-irq.c +index eda0bff48c40..b576729ec868 100644 +--- a/drivers/trusty/trusty-irq.c ++++ b/drivers/trusty/trusty-irq.c +@@ -405,7 +405,7 @@ static int trusty_irq_init_per_cpu_irq(struct trusty_irq_state *is, int tirq) + struct trusty_irq *trusty_irq; + struct trusty_irq_irqset *irqset; + +- if (cpu >= 32) ++ if (cpu >= NR_CPUS) + return -EINVAL; + trusty_irq = per_cpu_ptr(trusty_irq_handler_data, cpu); + irqset = per_cpu_ptr(is->percpu_irqs, cpu); +@@ -430,7 +430,7 @@ static int trusty_irq_init_per_cpu_irq(struct trusty_irq_state *is, int tirq) + for_each_possible_cpu(cpu) { + struct trusty_irq *trusty_irq; + +- if (cpu >= 32) ++ if (cpu >= NR_CPUS) + return -EINVAL; + trusty_irq = per_cpu_ptr(trusty_irq_handler_data, cpu); + hlist_del(&trusty_irq->node); +diff --git a/drivers/trusty/trusty-log.c b/drivers/trusty/trusty-log.c +old mode 100644 +new mode 100755 +index c5a85ccaf222..b58715cc2ef3 +--- a/drivers/trusty/trusty-log.c ++++ b/drivers/trusty/trusty-log.c +@@ -156,10 +156,12 @@ static void trusty_vmm_dump_header(struct deadloop_dump *dump) + return; + + header = &(dump->header); ++ pr_info("-----------VMM PANIC HEADER-----------\n"); + pr_info("VMM version = %s\n", header->vmm_version); + pr_info("Signature = %s\n", header->signature); + pr_info("Error_info = %s\n", header->error_info); + pr_info("Cpuid = %d\n", header->cpuid); ++ pr_info("-----------END OF VMM PANIC HEADER-----------\n"); + } + + static void trusty_vmm_dump_data(struct deadloop_dump *dump) +@@ -172,6 +174,7 @@ static void trusty_vmm_dump_data(struct deadloop_dump *dump) + + dump_data = &(dump->data); + ++ pr_info("-----------VMM PANIC DATA INFO-----------\n"); + pstr = (char *)dump_data->data; + for (p = pstr; p < ((char *)dump_data->data + dump_data->length); p++) { + if (*p == '\r') { +@@ -187,12 +190,13 @@ static void trusty_vmm_dump_data(struct deadloop_dump *dump) + *p = 0x00; + pr_info("%s\n", pstr); + } ++ pr_info("-----------END OF VMM PANIC DATA INFO-----------\n"); + } + + static int trusty_vmm_panic_notify(struct notifier_block *nb, + unsigned long action, void *data) + { +- struct deadloop_dump *dump_info; ++ struct deadloop_dump *dump_info = NULL; + + if (g_vmm_debug_buf) { + dump_info = (struct deadloop_dump *)g_vmm_debug_buf; +diff --git a/drivers/trusty/trusty-mem.c b/drivers/trusty/trusty-mem.c +old mode 100644 +new mode 100755 +index 1317ec734315..fc299e348581 +--- a/drivers/trusty/trusty-mem.c ++++ b/drivers/trusty/trusty-mem.c +@@ -26,7 +26,58 @@ + + static int get_mem_attr(struct page *page, pgprot_t pgprot) + { +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) ++#if defined(CONFIG_ARM64) ++ uint64_t mair; ++ uint attr_index = (pgprot_val(pgprot) & PTE_ATTRINDX_MASK) >> 2; ++ ++ asm ("mrs %0, mair_el1\n" : "=&r" (mair)); ++ return (mair >> (attr_index * 8)) & 0xff; ++ ++#elif defined(CONFIG_ARM_LPAE) ++ uint32_t mair; ++ uint attr_index = ((pgprot_val(pgprot) & L_PTE_MT_MASK) >> 2); ++ ++ if (attr_index >= 4) { ++ attr_index -= 4; ++ asm volatile("mrc p15, 0, %0, c10, c2, 1\n" : "=&r" (mair)); ++ } else { ++ asm volatile("mrc p15, 0, %0, c10, c2, 0\n" : "=&r" (mair)); ++ } ++ return (mair >> (attr_index * 8)) & 0xff; ++ ++#elif defined(CONFIG_ARM) ++ /* check memory type */ ++ switch (pgprot_val(pgprot) & L_PTE_MT_MASK) { ++ case L_PTE_MT_WRITEALLOC: ++ /* Normal: write back write allocate */ ++ return 0xFF; ++ ++ case L_PTE_MT_BUFFERABLE: ++ /* Normal: non-cacheble */ ++ return 0x44; ++ ++ case L_PTE_MT_WRITEBACK: ++ /* Normal: writeback, read allocate */ ++ return 0xEE; ++ ++ case L_PTE_MT_WRITETHROUGH: ++ /* Normal: write through */ ++ return 0xAA; ++ ++ case L_PTE_MT_UNCACHED: ++ /* strongly ordered */ ++ return 0x00; ++ ++ case L_PTE_MT_DEV_SHARED: ++ case L_PTE_MT_DEV_NONSHARED: ++ /* device */ ++ return 0x04; ++ ++ default: ++ return -EINVAL; ++ } ++#elif defined(CONFIG_X86) ++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) + /* The porting to CHT kernel (3.14.55) is in the #else clause. + ** For BXT kernel (4.1.0), the function get_page_memtype() is static. + ** +@@ -42,7 +93,7 @@ static int get_mem_attr(struct page *page, pgprot_t pgprot) + ** with SMP, which only allow UNCACHED. + */ + return NS_MAIR_NORMAL_UNCACHED; +-#else ++ #else + unsigned long type; + int ret_mem_attr = 0; + +@@ -73,6 +124,9 @@ static int get_mem_attr(struct page *page, pgprot_t pgprot) + ret_mem_attr = -EINVAL; + } + return ret_mem_attr; ++ #endif ++#else ++ return 0; + #endif + } + +@@ -92,10 +146,23 @@ int trusty_encode_page_info(struct ns_mem_page_info *inf, + mem_attr = get_mem_attr(page, pgprot); + if (mem_attr < 0) + return mem_attr; ++ ++ /* add other attributes */ ++#if defined(CONFIG_ARM64) || defined(CONFIG_ARM_LPAE) ++ pte |= pgprot_val(pgprot); ++#elif defined(CONFIG_ARM) ++ if (pgprot_val(pgprot) & L_PTE_USER) ++ pte |= (1 << 6); ++ if (pgprot_val(pgprot) & L_PTE_RDONLY) ++ pte |= (1 << 7); ++ if (pgprot_val(pgprot) & L_PTE_SHARED) ++ pte |= (3 << 8); /* inner sharable */ ++#elif defined(CONFIG_X86) + if (pgprot_val(pgprot) & _PAGE_USER) + pte |= (1 << 6); + if (!(pgprot_val(pgprot) & _PAGE_RW)) + pte |= (1 << 7); ++#endif + + inf->attr = (pte & 0x0000FFFFFFFFFFFFull) | ((uint64_t)mem_attr << 48); + return 0; +diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c +old mode 100644 +new mode 100755 +index 1568849e4501..d4eeb40e2b60 +--- a/drivers/trusty/trusty.c ++++ b/drivers/trusty/trusty.c +@@ -209,7 +209,7 @@ static long trusty_std_call32_work(void *args) + + BUG_ON(!args); + +- work_args = args; ++ work_args = (struct trusty_std_call32_args *)args; + dev = work_args->dev; + s = platform_get_drvdata(to_platform_device(dev)); + +@@ -332,7 +332,7 @@ static void trusty_init_version(struct trusty_state *s, struct device *dev) + } + s->version_str[i] = '\0'; + +- dev_info(dev, "trusty version: Built: %s\n", s->version_str); ++ dev_info(dev, "trusty version: %s\n", s->version_str); + + ret = device_create_file(dev, &dev_attr_trusty_version); + if (ret) +diff --git a/include/linux/trusty/trusty.h b/include/linux/trusty/trusty.h +index 029b0986566f..1e9b4559d1b6 100644 +--- a/include/linux/trusty/trusty.h ++++ b/include/linux/trusty/trusty.h +@@ -88,10 +88,12 @@ void trusty_dequeue_nop(struct device *dev, struct trusty_nop *nop); + #define TRUSTY_VMCALL_PENDING_INTR 0x74727505 + static inline void set_pending_intr_to_lk(uint8_t vector) + { ++#ifdef CONFIG_X86 + __asm__ __volatile__( + "vmcall" + ::"a"(TRUSTY_VMCALL_PENDING_INTR), "b"(vector) + ); ++#endif + } + + void trusty_update_wall_info(struct device *dev, void *va, size_t sz); +-- +2.17.1 + diff --git a/patches/0044-ASoC-Intel-Skylake-Add-delete-instance-IPC.audio b/patches/0044-ASoC-Intel-Skylake-Add-delete-instance-IPC.audio new file mode 100644 index 0000000000..5d0b47b1a8 --- /dev/null +++ b/patches/0044-ASoC-Intel-Skylake-Add-delete-instance-IPC.audio @@ -0,0 +1,76 @@ +From d6787361e7e0dffa4eb4ff82cedfb474d9a3fe1a Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Tue, 19 Mar 2019 14:43:54 +0100 +Subject: [PATCH 044/193] ASoC: Intel: Skylake: Add delete instance IPC + +Standalone modules - with no parent pipeline assigned - have to be +deleted explicitly using Delete Instance IPC request. When owned by +pipeline, this is not required as pipeline takes care of module cleanup +during its deletion. + +Change-Id: Iafce0d81f8cc1b4531c2ee61fa98a20ca51c039b +Signed-off-by: Cezary Rojewski +--- + sound/soc/intel/skylake/skl-sst-ipc.c | 25 ++++++++++++++++++++++++- + sound/soc/intel/skylake/skl-sst-ipc.h | 2 ++ + 2 files changed, 26 insertions(+), 1 deletion(-) + +diff --git a/sound/soc/intel/skylake/skl-sst-ipc.c b/sound/soc/intel/skylake/skl-sst-ipc.c +index 1d9492b97879..b7d422e6d95b 100644 +--- a/sound/soc/intel/skylake/skl-sst-ipc.c ++++ b/sound/soc/intel/skylake/skl-sst-ipc.c +@@ -276,7 +276,8 @@ enum skl_ipc_module_msg { + IPC_MOD_BIND = 5, + IPC_MOD_UNBIND = 6, + IPC_MOD_SET_DX = 7, +- IPC_MOD_SET_D0IX = 8 ++ IPC_MOD_SET_D0IX = 8, ++ IPC_MOD_DELETE_INSTANCE = 11 + }; + + struct skl_event_timestamp_notify { +@@ -1096,6 +1097,28 @@ int skl_ipc_set_d0ix(struct sst_generic_ipc *ipc, struct skl_ipc_d0ix_msg *msg) + } + EXPORT_SYMBOL_GPL(skl_ipc_set_d0ix); + ++int skl_ipc_delete_instance(struct sst_generic_ipc *ipc, ++ unsigned int module_id, unsigned int instance_id) ++{ ++ struct skl_ipc_header header = {0}; ++ struct sst_ipc_message request = {0}; ++ int ret; ++ ++ header.primary = IPC_MSG_TARGET(IPC_MOD_MSG); ++ header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST); ++ header.primary |= IPC_GLB_TYPE(IPC_MOD_DELETE_INSTANCE); ++ header.primary |= IPC_MOD_INSTANCE_ID(instance_id); ++ header.primary |= IPC_MOD_ID(module_id); ++ request.header = *(u64 *)&header; ++ ++ ret = sst_ipc_tx_message_wait(ipc, request, NULL); ++ if (ret < 0) ++ dev_err(ipc->dev, "ipc: delete instance failed, ret %d\n", ret); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(skl_ipc_delete_instance); ++ + int skl_ipc_fw_cfg_get(struct sst_generic_ipc *ipc, struct skl_fw_cfg *cfg) + { + struct skl_ipc_large_config_msg msg = {0}; +diff --git a/sound/soc/intel/skylake/skl-sst-ipc.h b/sound/soc/intel/skylake/skl-sst-ipc.h +index 7d58a2f05af6..3ffaafb2e8a4 100644 +--- a/sound/soc/intel/skylake/skl-sst-ipc.h ++++ b/sound/soc/intel/skylake/skl-sst-ipc.h +@@ -391,6 +391,8 @@ int skl_sst_ipc_load_library(struct sst_generic_ipc *ipc, + + int skl_ipc_set_d0ix(struct sst_generic_ipc *ipc, + struct skl_ipc_d0ix_msg *msg); ++int skl_ipc_delete_instance(struct sst_generic_ipc *ipc, ++ unsigned int module_id, unsigned int instance_id); + + int skl_ipc_check_D0i0(struct sst_dsp *dsp, bool state); + +-- +2.17.1 + diff --git a/patches/0044-Change-Trusty-Kconfig-to-build-for-X86-Arch-only.trusty b/patches/0044-Change-Trusty-Kconfig-to-build-for-X86-Arch-only.trusty new file mode 100644 index 0000000000..3a7bd19f59 --- /dev/null +++ b/patches/0044-Change-Trusty-Kconfig-to-build-for-X86-Arch-only.trusty @@ -0,0 +1,30 @@ +From a4d9915132b5cd7c89db3111992e0266d218c845 Mon Sep 17 00:00:00 2001 +From: weideng +Date: Fri, 21 Apr 2017 00:52:03 +0000 +Subject: [PATCH 44/63] Change Trusty Kconfig to build for X86 Arch only + +Currently Trusty only works on x86, so the module should never build +for other archs except x86. Add this patch to add 'depends' part on +drivers/trusty/Kconfig to disable them. + +Change-Id: Ic18f351696a9c1c31d57621a4af3e8993cc73de5 +Signed-off-by: weideng +--- + drivers/trusty/Kconfig | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/drivers/trusty/Kconfig b/drivers/trusty/Kconfig +index 7b58db5e9a21..7d26922ed84c 100644 +--- a/drivers/trusty/Kconfig ++++ b/drivers/trusty/Kconfig +@@ -6,6 +6,7 @@ menu "Trusty" + + config TRUSTY + tristate "Trusty" ++ depends on X86 + default n + + config TRUSTY_FIQ +-- +2.17.1 + diff --git a/patches/0044-VBS-K-fix-compilation-warnings-in-VBS-K-reference-dri.acrn b/patches/0044-VBS-K-fix-compilation-warnings-in-VBS-K-reference-dri.acrn new file mode 100644 index 0000000000..b598094e78 --- /dev/null +++ b/patches/0044-VBS-K-fix-compilation-warnings-in-VBS-K-reference-dri.acrn @@ -0,0 +1,121 @@ +From 2a204ada7faf3d786dd44a5205e8b53f520cb285 Mon Sep 17 00:00:00 2001 +From: Hao Li +Date: Fri, 31 Aug 2018 10:58:59 +0800 +Subject: [PATCH 044/150] VBS-K: fix compilation warnings in VBS-K reference + driver + +Change-Id: I0e67e625145082d7e2a9a44f744de015855aa7ce +Tracked-On: 218445 +Signed-off-by: Hao Li +Reviewed-on: +--- + drivers/vbs/vbs_rng.c | 50 +++++++++++++++++++++++-------------------- + 1 file changed, 27 insertions(+), 23 deletions(-) + +diff --git a/drivers/vbs/vbs_rng.c b/drivers/vbs/vbs_rng.c +index cdfed63bb1ad..88f6108ca2e8 100644 +--- a/drivers/vbs/vbs_rng.c ++++ b/drivers/vbs/vbs_rng.c +@@ -124,15 +124,17 @@ static int vbs_rng_connection_cnt = 0; + /* function declarations */ + static int handle_kick(int client_id, int req_cnt); + static void vbs_rng_reset(struct vbs_rng *rng); +-static void vbs_rng_disable_vq(struct vbs_rng *rng, +- struct virtio_vq_info *vq); ++static void vbs_rng_stop(struct vbs_rng *rng); ++static void vbs_rng_flush(struct vbs_rng *rng); ++#ifdef RUNTIME_CTRL + static int vbs_rng_enable_vq(struct vbs_rng *rng, + struct virtio_vq_info *vq); ++static void vbs_rng_disable_vq(struct vbs_rng *rng, ++ struct virtio_vq_info *vq); + static void vbs_rng_stop_vq(struct vbs_rng *rng, + struct virtio_vq_info *vq); +-static void vbs_rng_stop(struct vbs_rng *rng); + static void vbs_rng_flush_vq(struct vbs_rng *rng, int index); +-static void vbs_rng_flush(struct vbs_rng *rng); ++#endif + + /* hash table related functions */ + static void vbs_rng_hash_init(void) +@@ -347,17 +349,6 @@ static int vbs_rng_release(struct inode *inode, struct file *f) + return 0; + } + +-static struct hwrng get_hwrng(struct vbs_rng *rng) +-{ +- return rng->hwrng; +-} +- +-/* Set feature bits in kernel side device */ +-static int vbs_rng_set_features(struct vbs_rng *rng, u64 features) +-{ +- return 0; +-} +- + static long vbs_rng_ioctl(struct file *f, unsigned int ioctl, + unsigned long arg) + { +@@ -424,11 +415,17 @@ static void vbs_rng_reset(struct vbs_rng *rng) + } + + /* device specific function */ +-static void vbs_rng_disable_vq(struct vbs_rng *rng, +- struct virtio_vq_info *vq) ++static void vbs_rng_stop(struct vbs_rng *rng) ++{ ++ virtio_dev_deregister(&rng->dev); ++} ++ ++/* device specific function */ ++static void vbs_rng_flush(struct vbs_rng *rng) + { + } + ++#ifdef RUNTIME_CTRL + /* device specific function */ + static int vbs_rng_enable_vq(struct vbs_rng *rng, + struct virtio_vq_info *vq) +@@ -437,15 +434,15 @@ static int vbs_rng_enable_vq(struct vbs_rng *rng, + } + + /* device specific function */ +-static void vbs_rng_stop_vq(struct vbs_rng *rng, +- struct virtio_vq_info *vq) ++static void vbs_rng_disable_vq(struct vbs_rng *rng, ++ struct virtio_vq_info *vq) + { + } + + /* device specific function */ +-static void vbs_rng_stop(struct vbs_rng *rng) ++static void vbs_rng_stop_vq(struct vbs_rng *rng, ++ struct virtio_vq_info *vq) + { +- virtio_dev_deregister(&rng->dev); + } + + /* device specific function */ +@@ -453,10 +450,17 @@ static void vbs_rng_flush_vq(struct vbs_rng *rng, int index) + { + } + +-/* device specific function */ +-static void vbs_rng_flush(struct vbs_rng *rng) ++static struct hwrng get_hwrng(struct vbs_rng *rng) + { ++ return rng->hwrng; ++} ++ ++/* Set feature bits in kernel side device */ ++static int vbs_rng_set_features(struct vbs_rng *rng, u64 features) ++{ ++ return 0; + } ++#endif + + static const struct file_operations vbs_rng_fops = { + .owner = THIS_MODULE, +-- +2.17.1 + diff --git a/patches/0044-drm-i915-Make-engine-s-batch-pool-safe-for-use-with-vi.drm b/patches/0044-drm-i915-Make-engine-s-batch-pool-safe-for-use-with-vi.drm new file mode 100644 index 0000000000..576f045ace --- /dev/null +++ b/patches/0044-drm-i915-Make-engine-s-batch-pool-safe-for-use-with-vi.drm @@ -0,0 +1,143 @@ +From 3ba7190bcdc9a88633610b7631eb056580533f40 Mon Sep 17 00:00:00 2001 +From: Chris Wilson +Date: Tue, 27 Aug 2019 14:59:35 +0100 +Subject: [PATCH 044/690] drm/i915: Make engine's batch pool safe for use with + virtual engines + +A virtual engine itself does not have a batch pool, but we can gleefully +use any of its siblings instead. + +Signed-off-by: Chris Wilson +Cc: Matthew Auld +Reviewed-by: Matthew Auld +Link: https://patchwork.freedesktop.org/patch/msgid/20190827135935.3831-1-chris@chris-wilson.co.uk +--- + drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 4 ++-- + drivers/gpu/drm/i915/gem/i915_gem_object_blt.c | 4 ++-- + drivers/gpu/drm/i915/gt/intel_engine_pool.c | 12 +++++++++++- + drivers/gpu/drm/i915/gt/intel_engine_pool.h | 2 +- + drivers/gpu/drm/i915/gt/intel_lrc.c | 12 ++++++++++++ + drivers/gpu/drm/i915/gt/intel_lrc.h | 4 ++++ + 6 files changed, 32 insertions(+), 6 deletions(-) + +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +index b5f6937369ea..7b1d8c4e5ef5 100644 +--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c ++++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +@@ -1145,7 +1145,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, + u32 *cmd; + int err; + +- pool = intel_engine_pool_get(&eb->engine->pool, PAGE_SIZE); ++ pool = intel_engine_get_pool(eb->engine, PAGE_SIZE); + if (IS_ERR(pool)) + return PTR_ERR(pool); + +@@ -1961,7 +1961,7 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master) + struct i915_vma *vma; + int err; + +- pool = intel_engine_pool_get(&eb->engine->pool, eb->batch_len); ++ pool = intel_engine_get_pool(eb->engine, eb->batch_len); + if (IS_ERR(pool)) + return ERR_CAST(pool); + +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c +index 6415f9a17e2d..5bd8de124d74 100644 +--- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c ++++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c +@@ -32,7 +32,7 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce, + count = div_u64(vma->size, block_size); + size = (1 + 8 * count) * sizeof(u32); + size = round_up(size, PAGE_SIZE); +- pool = intel_engine_pool_get(&ce->engine->pool, size); ++ pool = intel_engine_get_pool(ce->engine, size); + if (IS_ERR(pool)) { + err = PTR_ERR(pool); + goto out_pm; +@@ -216,7 +216,7 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce, + count = div_u64(dst->size, block_size); + size = (1 + 11 * count) * sizeof(u32); + size = round_up(size, PAGE_SIZE); +- pool = intel_engine_pool_get(&ce->engine->pool, size); ++ pool = intel_engine_get_pool(ce->engine, size); + if (IS_ERR(pool)) { + err = PTR_ERR(pool); + goto out_pm; +diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.c b/drivers/gpu/drm/i915/gt/intel_engine_pool.c +index 4cd54c569911..97d36cca8ded 100644 +--- a/drivers/gpu/drm/i915/gt/intel_engine_pool.c ++++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.c +@@ -107,9 +107,19 @@ node_create(struct intel_engine_pool *pool, size_t sz) + return node; + } + ++static struct intel_engine_pool *lookup_pool(struct intel_engine_cs *engine) ++{ ++ if (intel_engine_is_virtual(engine)) ++ engine = intel_virtual_engine_get_sibling(engine, 0); ++ ++ GEM_BUG_ON(!engine); ++ return &engine->pool; ++} ++ + struct intel_engine_pool_node * +-intel_engine_pool_get(struct intel_engine_pool *pool, size_t size) ++intel_engine_get_pool(struct intel_engine_cs *engine, size_t size) + { ++ struct intel_engine_pool *pool = lookup_pool(engine); + struct intel_engine_pool_node *node; + struct list_head *list; + unsigned long flags; +diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.h b/drivers/gpu/drm/i915/gt/intel_engine_pool.h +index 8d069efd9457..7e2123b33594 100644 +--- a/drivers/gpu/drm/i915/gt/intel_engine_pool.h ++++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.h +@@ -12,7 +12,7 @@ + #include "i915_request.h" + + struct intel_engine_pool_node * +-intel_engine_pool_get(struct intel_engine_pool *pool, size_t size); ++intel_engine_get_pool(struct intel_engine_cs *engine, size_t size); + + static inline int + intel_engine_pool_mark_active(struct intel_engine_pool_node *node, +diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c +index 06a506c29463..08da6e8662bc 100644 +--- a/drivers/gpu/drm/i915/gt/intel_lrc.c ++++ b/drivers/gpu/drm/i915/gt/intel_lrc.c +@@ -3899,6 +3899,18 @@ int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine, + return 0; + } + ++struct intel_engine_cs * ++intel_virtual_engine_get_sibling(struct intel_engine_cs *engine, ++ unsigned int sibling) ++{ ++ struct virtual_engine *ve = to_virtual_engine(engine); ++ ++ if (sibling >= ve->num_siblings) ++ return NULL; ++ ++ return ve->siblings[sibling]; ++} ++ + void intel_execlists_show_requests(struct intel_engine_cs *engine, + struct drm_printer *m, + void (*show_request)(struct drm_printer *m, +diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h +index c2bba82bcc16..dc0252e0589e 100644 +--- a/drivers/gpu/drm/i915/gt/intel_lrc.h ++++ b/drivers/gpu/drm/i915/gt/intel_lrc.h +@@ -131,4 +131,8 @@ int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine, + const struct intel_engine_cs *master, + const struct intel_engine_cs *sibling); + ++struct intel_engine_cs * ++intel_virtual_engine_get_sibling(struct intel_engine_cs *engine, ++ unsigned int sibling); ++ + #endif /* _INTEL_LRC_H_ */ +-- +2.17.1 + diff --git a/patches/0044-mei-add-connect-with-vtag-ioctl.security b/patches/0044-mei-add-connect-with-vtag-ioctl.security new file mode 100644 index 0000000000..5a43558615 --- /dev/null +++ b/patches/0044-mei-add-connect-with-vtag-ioctl.security @@ -0,0 +1,589 @@ +From 0e55c6f2ebb7e50a4c8390b026846af8d5efc216 Mon Sep 17 00:00:00 2001 +From: Alexander Usyskin +Date: Thu, 2 Aug 2018 14:54:47 +0300 +Subject: [PATCH 44/65] mei: add connect with vtag ioctl + +Change-Id: I3d68af4fd71bacac1180b80ace6d3b843142069c +Signed-off-by: Alexander Usyskin +Signed-off-by: Tomas Winkler +--- + drivers/misc/mei/client.c | 82 +++++++++++++- + drivers/misc/mei/client.h | 10 +- + drivers/misc/mei/main.c | 225 ++++++++++++++++++++++++++++++++++--- + drivers/misc/mei/mei_dev.h | 9 ++ + include/uapi/linux/mei.h | 19 ++++ + 5 files changed, 318 insertions(+), 27 deletions(-) + +diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c +index 37761525ac9b..011de8eaf6a1 100644 +--- a/drivers/misc/mei/client.c ++++ b/drivers/misc/mei/client.c +@@ -352,6 +352,19 @@ static inline void mei_tx_cb_dequeue(struct mei_cl_cb *cb) + mei_io_cb_free(cb); + } + ++static void mei_cl_set_read_by_fp(const struct mei_cl *cl, ++ const struct file *fp) ++{ ++ struct mei_cl_vtag *cl_vtag; ++ ++ list_for_each_entry(cl_vtag, &cl->vtag_map, list) { ++ if (cl_vtag->fp == fp) { ++ cl_vtag->pending_read = true; ++ return; ++ } ++ } ++} ++ + /** + * mei_io_cb_init - allocate and initialize io callback + * +@@ -377,6 +390,7 @@ static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, + cb->buf_idx = 0; + cb->fop_type = type; + cb->vtag = 0; ++ + return cb; + } + +@@ -562,6 +576,7 @@ static void mei_cl_init(struct mei_cl *cl, struct mei_device *dev) + init_waitqueue_head(&cl->rx_wait); + init_waitqueue_head(&cl->tx_wait); + init_waitqueue_head(&cl->ev_wait); ++ INIT_LIST_HEAD(&cl->vtag_map); + spin_lock_init(&cl->rd_completed_lock); + INIT_LIST_HEAD(&cl->rd_completed); + INIT_LIST_HEAD(&cl->rd_pending); +@@ -1234,6 +1249,61 @@ static int mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl *cl) + return 0; + } + ++const struct file *mei_cl_fp_by_vtag(const struct mei_cl *cl, u8 vtag) ++{ ++ struct mei_cl_vtag *vtag_l; ++ ++ list_for_each_entry(vtag_l, &cl->vtag_map, list) ++ if (vtag_l->vtag == vtag) ++ return vtag_l->fp; ++ ++ return NULL; ++} ++ ++static void mei_cl_reset_read_by_vtag(const struct mei_cl *cl, u8 vtag) ++{ ++ struct mei_cl_vtag *vtag_l; ++ ++ list_for_each_entry(vtag_l, &cl->vtag_map, list) { ++ if (vtag_l->vtag == vtag) { ++ vtag_l->pending_read = false; ++ break; ++ } ++ } ++} ++ ++static void mei_cl_read_vtag_add_fc(struct mei_cl *cl) ++{ ++ struct mei_cl_vtag *cl_vtag; ++ ++ list_for_each_entry(cl_vtag, &cl->vtag_map, list) { ++ if (cl_vtag->pending_read) { ++ if (mei_cl_enqueue_ctrl_wr_cb(cl, ++ mei_cl_mtu(cl), ++ MEI_FOP_READ, ++ cl_vtag->fp)) ++ cl->rx_flow_ctrl_creds++; ++ break; ++ } ++ } ++} ++ ++void mei_cl_add_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb) ++{ ++ const struct file *fp; ++ ++ fp = mei_cl_fp_by_vtag(cl, cb->vtag); ++ if (fp) ++ cb->fp = fp; ++ mei_cl_reset_read_by_vtag(cl, cb->vtag); ++ ++ spin_lock(&cl->rd_completed_lock); ++ list_add_tail(&cb->list, &cl->rd_completed); ++ spin_unlock(&cl->rd_completed_lock); ++ ++ mei_cl_read_vtag_add_fc(cl); ++} ++ + /** + * mei_cl_notify_fop2req - convert fop to proper request + * +@@ -1489,13 +1559,17 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp) + return 0; + + /* HW currently supports only one pending read */ +- if (cl->rx_flow_ctrl_creds) ++ if (cl->rx_flow_ctrl_creds) { ++ mei_cl_set_read_by_fp(cl, fp); + return -EBUSY; ++ } + + cb = mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, fp); + if (!cb) + return -ENOMEM; + ++ mei_cl_set_read_by_fp(cl, fp); ++ + rets = pm_runtime_get(dev->dev); + if (rets < 0 && rets != -EINPROGRESS) { + pm_runtime_put_noidle(dev->dev); +@@ -1613,6 +1687,9 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, + + hdr_len = mei_msg_hdr_init(mei_hdr, cb); + ++ cl_dbg(dev, cl, "Extend Header %d vtag = %d\n", ++ mei_hdr->extended, cb->vtag); ++ + /** + * Split the message only if we can write the whole host buffer + * otherwise wait for next time the host buffer is empty. +@@ -1723,6 +1800,9 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) + + hdr_len = mei_msg_hdr_init(mei_hdr, cb); + ++ cl_dbg(dev, cl, "Extend Header %d vtag = %d\n", ++ mei_hdr->extended, cb->vtag); ++ + if (rets == 0) { + cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); + rets = len; +diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h +index 557c7fa5f052..2e2385e562a9 100644 +--- a/drivers/misc/mei/client.h ++++ b/drivers/misc/mei/client.h +@@ -86,14 +86,7 @@ int mei_cl_unlink(struct mei_cl *cl); + struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev); + + struct mei_cl_cb *mei_cl_read_cb(struct mei_cl *cl, const struct file *fp); +- +-static inline void mei_cl_add_rd_completed(struct mei_cl *cl, +- struct mei_cl_cb *cb) +-{ +- spin_lock(&cl->rd_completed_lock); +- list_add_tail(&cb->list, &cl->rd_completed); +- spin_unlock(&cl->rd_completed_lock); +-} ++void mei_cl_add_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb); + + static inline void mei_cl_del_rd_completed(struct mei_cl *cl, + struct mei_cl_cb *cb) +@@ -111,6 +104,7 @@ struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length, + const struct file *fp); + int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp); + ++const struct file *mei_cl_fp_by_vtag(const struct mei_cl *cl, u8 vtag); + /* + * MEI input output function prototype + */ +diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c +index e7069559408c..afad9ca037be 100644 +--- a/drivers/misc/mei/main.c ++++ b/drivers/misc/mei/main.c +@@ -80,6 +80,20 @@ static int mei_open(struct inode *inode, struct file *file) + return err; + } + ++static void mei_cl_vtag_remove_by_fp(const struct mei_cl *cl, ++ const struct file *fp) ++{ ++ struct mei_cl_vtag *vtag_l, *next; ++ ++ list_for_each_entry_safe(vtag_l, next, &cl->vtag_map, list) { ++ if (vtag_l->fp == fp) { ++ list_del(&vtag_l->list); ++ kfree(vtag_l); ++ return; ++ } ++ } ++} ++ + /** + * mei_release - the release function + * +@@ -101,17 +115,26 @@ static int mei_release(struct inode *inode, struct file *file) + + mutex_lock(&dev->device_lock); + ++ mei_cl_vtag_remove_by_fp(cl, file); ++ ++ if (!list_empty(&cl->vtag_map)) { ++ cl_dbg(dev, cl, "not the last vtag\n"); ++ mei_cl_flush_queues(cl, file); ++ rets = 0; ++ goto out; ++ } ++ + rets = mei_cl_disconnect(cl); + + mei_cl_flush_queues(cl, file); + cl_dbg(dev, cl, "removing\n"); + + mei_cl_unlink(cl); ++ kfree(cl); + ++out: + file->private_data = NULL; + +- kfree(cl); +- + mutex_unlock(&dev->device_lock); + return rets; + } +@@ -237,6 +260,20 @@ static ssize_t mei_read(struct file *file, char __user *ubuf, + mutex_unlock(&dev->device_lock); + return rets; + } ++ ++static u8 mei_cl_vtag_by_fp(const struct mei_cl *cl, const struct file *fp) ++{ ++ struct mei_cl_vtag *cl_vtag; ++ ++ if (!fp) ++ return 0; ++ ++ list_for_each_entry(cl_vtag, &cl->vtag_map, list) ++ if (cl_vtag->fp == fp) ++ return cl_vtag->vtag; ++ return 0; ++} ++ + /** + * mei_write - the write function. + * +@@ -314,6 +351,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf, + rets = -ENOMEM; + goto out; + } ++ cb->vtag = mei_cl_vtag_by_fp(cl, file); + + rets = copy_from_user(cb->buf.data, ubuf, length); + if (rets) { +@@ -333,17 +371,18 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf, + * mei_ioctl_connect_client - the connect to fw client IOCTL function + * + * @file: private data of the file object +- * @data: IOCTL connect data, input and output parameters ++ * @in_client_uuid: requested UUID for connection ++ * @client: IOCTL connect data, output parameters + * + * Locking: called under "dev->device_lock" lock + * + * Return: 0 on success, <0 on failure. + */ + static int mei_ioctl_connect_client(struct file *file, +- struct mei_connect_client_data *data) ++ const uuid_le *in_client_uuid, ++ struct mei_client *client) + { + struct mei_device *dev; +- struct mei_client *client; + struct mei_me_client *me_cl; + struct mei_cl *cl; + int rets; +@@ -351,18 +390,15 @@ static int mei_ioctl_connect_client(struct file *file, + cl = file->private_data; + dev = cl->dev; + +- if (dev->dev_state != MEI_DEV_ENABLED) +- return -ENODEV; +- + if (cl->state != MEI_FILE_INITIALIZING && + cl->state != MEI_FILE_DISCONNECTED) + return -EBUSY; + + /* find ME client we're trying to connect to */ +- me_cl = mei_me_cl_by_uuid(dev, &data->in_client_uuid); ++ me_cl = mei_me_cl_by_uuid(dev, in_client_uuid); + if (!me_cl) { + dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n", +- &data->in_client_uuid); ++ in_client_uuid); + rets = -ENOTTY; + goto end; + } +@@ -372,7 +408,7 @@ static int mei_ioctl_connect_client(struct file *file, + !dev->allow_fixed_address : !dev->hbm_f_fa_supported; + if (forbidden) { + dev_dbg(dev->dev, "Connection forbidden to FW Client UUID = %pUl\n", +- &data->in_client_uuid); ++ in_client_uuid); + rets = -ENOTTY; + goto end; + } +@@ -386,7 +422,6 @@ static int mei_ioctl_connect_client(struct file *file, + me_cl->props.max_msg_length); + + /* prepare the output buffer */ +- client = &data->out_client_properties; + client->max_msg_length = me_cl->props.max_msg_length; + client->protocol_version = me_cl->props.protocol_version; + dev_dbg(dev->dev, "Can connect?\n"); +@@ -398,6 +433,111 @@ static int mei_ioctl_connect_client(struct file *file, + return rets; + } + ++static int mei_cl_vt_support_check(struct mei_device *dev, const uuid_le *uuid) ++{ ++ struct mei_me_client *me_cl; ++ int ret; ++ ++ if (!dev->hbm_f_vt_supported) ++ return -EOPNOTSUPP; ++ ++ me_cl = mei_me_cl_by_uuid(dev, uuid); ++ if (!me_cl) { ++ dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n", ++ uuid); ++ return -ENOTTY; ++ } ++ ret = me_cl->props.vt_supported ? 0 : -EOPNOTSUPP; ++ mei_me_cl_put(me_cl); ++ ++ return ret; ++} ++ ++static struct mei_cl_vtag *mei_cl_vtag_alloc(struct file *fp, u8 vtag) ++{ ++ struct mei_cl_vtag *cl_vtag; ++ ++ cl_vtag = kzalloc(sizeof(*cl_vtag), GFP_KERNEL); ++ if (!cl_vtag) ++ return ERR_PTR(-ENOMEM); ++ ++ INIT_LIST_HEAD(&cl_vtag->list); ++ cl_vtag->vtag = vtag; ++ cl_vtag->fp = fp; ++ ++ return cl_vtag; ++} ++ ++static int mei_ioctl_connect_vtag(struct file *file, ++ const uuid_le *in_client_uuid, ++ struct mei_client *client, ++ u8 vtag) ++{ ++ struct mei_device *dev; ++ struct mei_cl *cl; ++ struct mei_cl *pos; ++ struct mei_cl_vtag *cl_vtag; ++ ++ cl = file->private_data; ++ dev = cl->dev; ++ ++ dev_dbg(dev->dev, "FW Client %pUl vtag %d\n", in_client_uuid, vtag); ++ ++ if (cl->state != MEI_FILE_INITIALIZING && ++ cl->state != MEI_FILE_DISCONNECTED) ++ return -EBUSY; ++ ++ list_for_each_entry(pos, &dev->file_list, link) { ++ if (pos == cl) ++ continue; ++ if (!pos->me_cl) ++ continue; ++ ++ /* FIXME: just compare me_cl addr */ ++ if (uuid_le_cmp(*mei_cl_uuid(pos), *in_client_uuid)) ++ continue; ++ ++ /* if tag already exist try another fp */ ++ if (mei_cl_fp_by_vtag(pos, vtag)) ++ continue; ++ ++ /* replace cl with acquired one */ ++ dev_dbg(dev->dev, "replacing with existing cl\n"); ++ mei_cl_unlink(cl); ++ kfree(cl); ++ file->private_data = pos; ++ cl = pos; ++ break; ++ } ++ ++ cl_vtag = mei_cl_vtag_alloc(file, vtag); ++ if (IS_ERR(cl_vtag)) ++ return -ENOMEM; ++ ++ list_add_tail(&cl_vtag->list, &cl->vtag_map); ++ ++ while (cl->state != MEI_FILE_INITIALIZING && ++ cl->state != MEI_FILE_DISCONNECTED && ++ cl->state != MEI_FILE_CONNECTED) { ++ mutex_unlock(&dev->device_lock); ++ wait_event_timeout(cl->wait, ++ (cl->state == MEI_FILE_CONNECTED || ++ cl->state == MEI_FILE_DISCONNECTED || ++ cl->state == MEI_FILE_DISCONNECT_REQUIRED || ++ cl->state == MEI_FILE_DISCONNECT_REPLY), ++ mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); ++ mutex_lock(&dev->device_lock); ++ } ++ ++ if (!mei_cl_is_connected(cl)) ++ return mei_ioctl_connect_client(file, in_client_uuid, client); ++ ++ client->max_msg_length = cl->me_cl->props.max_msg_length; ++ client->protocol_version = cl->me_cl->props.protocol_version; ++ ++ return 0; ++} ++ + /** + * mei_ioctl_client_notify_request - + * propagate event notification request to client +@@ -454,7 +594,11 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data) + { + struct mei_device *dev; + struct mei_cl *cl = file->private_data; +- struct mei_connect_client_data connect_data; ++ struct mei_connect_client_data conn; ++ struct mei_connect_client_data_vtag conn_vtag; ++ const uuid_le *cl_uuid; ++ struct mei_client *props; ++ u8 vtag; + u32 notify_get, notify_req; + int rets; + +@@ -475,20 +619,65 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data) + switch (cmd) { + case IOCTL_MEI_CONNECT_CLIENT: + dev_dbg(dev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n"); +- if (copy_from_user(&connect_data, (char __user *)data, +- sizeof(struct mei_connect_client_data))) { ++ if (copy_from_user(&conn, (char __user *)data, sizeof(conn))) { + dev_dbg(dev->dev, "failed to copy data from userland\n"); + rets = -EFAULT; + goto out; + } ++ cl_uuid = &conn.in_client_uuid; ++ props = &conn.out_client_properties; ++ vtag = 0; ++ ++ if (!mei_cl_vt_support_check(dev, cl_uuid)) ++ rets = mei_ioctl_connect_vtag(file, cl_uuid, props, ++ vtag); ++ else ++ rets = mei_ioctl_connect_client(file, cl_uuid, props); ++ if (rets) ++ goto out; ++ ++ /* if all is ok, copying the data back to user. */ ++ if (copy_to_user((char __user *)data, &conn, sizeof(conn))) { ++ dev_dbg(dev->dev, "failed to copy data to userland\n"); ++ rets = -EFAULT; ++ goto out; ++ } ++ ++ break; ++ ++ case IOCTL_MEI_CONNECT_CLIENT_VTAG: ++ dev_dbg(dev->dev, "IOCTL_MEI_CONNECT_CLIENT_VTAG\n"); ++ if (copy_from_user(&conn_vtag, (char __user *)data, ++ sizeof(conn_vtag))) { ++ dev_dbg(dev->dev, "failed to copy data from userland\n"); ++ rets = -EFAULT; ++ goto out; ++ } ++ ++ cl_uuid = &conn_vtag.connect.in_client_uuid; ++ props = &conn_vtag.out_client_properties; ++ vtag = conn_vtag.connect.vtag; ++ ++ if (mei_cl_vt_support_check(dev, cl_uuid)) { ++ dev_dbg(dev->dev, "FW Client %pUl does not support vtags\n", ++ cl_uuid); ++ rets = -EOPNOTSUPP; ++ goto out; ++ } ++ ++ if (!vtag) { ++ dev_dbg(dev->dev, "vtag can't be zero\n"); ++ rets = -EINVAL; ++ goto out; ++ } + +- rets = mei_ioctl_connect_client(file, &connect_data); ++ rets = mei_ioctl_connect_vtag(file, cl_uuid, props, vtag); + if (rets) + goto out; + + /* if all is ok, copying the data back to user. */ +- if (copy_to_user((char __user *)data, &connect_data, +- sizeof(struct mei_connect_client_data))) { ++ if (copy_to_user((char __user *)data, &conn_vtag, ++ sizeof(conn_vtag))) { + dev_dbg(dev->dev, "failed to copy data to userland\n"); + rets = -EFAULT; + goto out; +diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h +index afe35ce1b9a3..edab5f98665a 100644 +--- a/drivers/misc/mei/mei_dev.h ++++ b/drivers/misc/mei/mei_dev.h +@@ -193,6 +193,13 @@ struct mei_cl_cb { + u32 blocking:1; + }; + ++struct mei_cl_vtag { ++ struct list_head list; ++ const struct file *fp; ++ u8 vtag; ++ u8 pending_read:1; ++}; ++ + /** + * struct mei_cl - me client host representation + * carried in file->private_data +@@ -209,6 +216,7 @@ struct mei_cl_cb { + * @me_cl: fw client connected + * @fp: file associated with client + * @host_client_id: host id ++ * @vtag_map: vm tag map + * @tx_flow_ctrl_creds: transmit flow credentials + * @rx_flow_ctrl_creds: receive flow credentials + * @timer_count: watchdog timer for operation completion +@@ -235,6 +243,7 @@ struct mei_cl { + struct mei_me_client *me_cl; + const struct file *fp; + u8 host_client_id; ++ struct list_head vtag_map; + u8 tx_flow_ctrl_creds; + u8 rx_flow_ctrl_creds; + u8 timer_count; +diff --git a/include/uapi/linux/mei.h b/include/uapi/linux/mei.h +index c6aec86cc5de..cc46028ca1ed 100644 +--- a/include/uapi/linux/mei.h ++++ b/include/uapi/linux/mei.h +@@ -66,4 +66,23 @@ struct mei_connect_client_data { + */ + #define IOCTL_MEI_NOTIFY_GET _IOR('H', 0x03, __u32) + ++/* ++ * IOCTL Connect Client Data structure with vtag ++ */ ++struct mei_connect_client_vtag { ++ uuid_le in_client_uuid; ++ __u8 vtag; ++ __u8 reserved[3]; ++}; ++ ++struct mei_connect_client_data_vtag { ++ union { ++ struct mei_connect_client_vtag connect; ++ struct mei_client out_client_properties; ++ }; ++}; ++ ++#define IOCTL_MEI_CONNECT_CLIENT_VTAG \ ++ _IOWR('H', 0x04, struct mei_connect_client_data_vtag) ++ + #endif /* _LINUX_MEI_H */ +-- +2.17.1 + diff --git a/patches/0044-net-stmmac-add-DMA-Tx-chan-init-for-Time-Base.connectivity b/patches/0044-net-stmmac-add-DMA-Tx-chan-init-for-Time-Base.connectivity new file mode 100644 index 0000000000..692c4afa55 --- /dev/null +++ b/patches/0044-net-stmmac-add-DMA-Tx-chan-init-for-Time-Base.connectivity @@ -0,0 +1,112 @@ +From 3ca78f77a9c2802abba8a671878f2cc223c6fc34 Mon Sep 17 00:00:00 2001 +From: Kweh Hock Leong +Date: Thu, 8 Aug 2019 18:44:56 +0800 +Subject: [PATCH 044/108] net: stmmac: add DMA Tx chan init for Time-Based + Scheduling + +Enable Enhanced Descriptor Enable (EDSE) in DMA Tx Channel Control for TBS. + +Signed-off-by: Kweh Hock Leong +Signed-off-by: Ong Boon Leong +--- + .../net/ethernet/stmicro/stmmac/dwmac4_dma.c | 46 +++++++++++++++++++ + drivers/net/ethernet/stmicro/stmmac/dwmac5.h | 4 ++ + drivers/net/ethernet/stmicro/stmmac/hwif.h | 1 + + 3 files changed, 51 insertions(+) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +index 68c157979b94..87c283b88408 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +@@ -13,6 +13,7 @@ + #include + #include "dwmac4.h" + #include "dwmac4_dma.h" ++#include "dwmac5.h" + + static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi) + { +@@ -486,3 +487,48 @@ const struct stmmac_dma_ops dwmac410_dma_ops = { + .qmode = dwmac4_qmode, + .set_bfsize = dwmac4_set_bfsize, + }; ++ ++static void dwmac5_dma_init_tx_chan(void __iomem *ioaddr, ++ struct stmmac_dma_cfg *dma_cfg, ++ dma_addr_t dma_tx_phy, u32 chan) ++{ ++ u32 txpbl = dma_cfg->txpbl ? 0 : dma_cfg->pbl; ++ u32 value; ++ ++ value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)); ++ value = value | (txpbl << DMA_BUS_MODE_PBL_SHIFT) | DMA_CONTROL_EDSE; ++ ++ /* Enable OSP to get best performance */ ++ value |= DMA_CONTROL_OSP; ++ ++ writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan)); ++ writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(chan)); ++} ++ ++const struct stmmac_dma_ops dwmac5_dma_ops = { ++ .reset = dwmac4_dma_reset, ++ .init = dwmac4_dma_init, ++ .init_chan = dwmac4_dma_init_channel, ++ .init_rx_chan = dwmac4_dma_init_rx_chan, ++ .init_tx_chan = dwmac5_dma_init_tx_chan, ++ .axi = dwmac4_dma_axi, ++ .dump_regs = dwmac4_dump_dma_regs, ++ .dma_rx_mode = dwmac4_dma_rx_chan_op_mode, ++ .dma_tx_mode = dwmac4_dma_tx_chan_op_mode, ++ .enable_dma_irq = dwmac410_enable_dma_irq, ++ .disable_dma_irq = dwmac4_disable_dma_irq, ++ .start_tx = dwmac4_dma_start_tx, ++ .stop_tx = dwmac4_dma_stop_tx, ++ .start_rx = dwmac4_dma_start_rx, ++ .stop_rx = dwmac4_dma_stop_rx, ++ .dma_interrupt = dwmac4_dma_interrupt, ++ .get_hw_feature = dwmac4_get_hw_feature, ++ .rx_watchdog = dwmac4_rx_watchdog, ++ .set_rx_ring_len = dwmac4_set_rx_ring_len, ++ .set_tx_ring_len = dwmac4_set_tx_ring_len, ++ .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, ++ .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, ++ .enable_tso = dwmac4_enable_tso, ++ .qmode = dwmac4_qmode, ++ .set_bfsize = dwmac4_set_bfsize, ++}; +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h +index af55842bef08..ae0d77c09634 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h +@@ -164,6 +164,9 @@ + /* CBS Global defines */ + #define CBS_IDLESLOPE_MAX 0x1fffff + ++/* DMA Tx Channel X Control register TBS bits defines */ ++#define DMA_CONTROL_EDSE BIT(28) ++ + /* MAC Core Version */ + #define TSN_VER_MASK 0xFF + +@@ -179,4 +182,5 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index, + struct stmmac_pps_cfg *cfg, bool enable, + u32 sub_second_inc, u32 systime_flags); + void dwmac510_tsnif_setup(struct mac_device_info *mac); ++ + #endif /* __DWMAC5_H__ */ +diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h +index b6110b35dcae..8bd666a520ce 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h ++++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h +@@ -696,6 +696,7 @@ extern const struct stmmac_dma_ops dwmac4_dma_ops; + extern const struct stmmac_ops dwmac410_ops; + extern const struct stmmac_dma_ops dwmac410_dma_ops; + extern const struct stmmac_ops dwmac510_ops; ++extern const struct stmmac_dma_ops dwmac5_dma_ops; + extern const struct stmmac_tc_ops dwmac510_tc_ops; + extern const struct stmmac_ops dwxgmac210_ops; + extern const struct stmmac_dma_ops dwxgmac210_dma_ops; +-- +2.17.1 + diff --git a/patches/0045-ALSA-hda-Allow-for-compress-stream-to-hdac_ext_strea.audio b/patches/0045-ALSA-hda-Allow-for-compress-stream-to-hdac_ext_strea.audio new file mode 100644 index 0000000000..0b52028b79 --- /dev/null +++ b/patches/0045-ALSA-hda-Allow-for-compress-stream-to-hdac_ext_strea.audio @@ -0,0 +1,140 @@ +From e0dbebb50cff9e5313b175d948c64ea5a8400c2f Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Wed, 6 Mar 2019 11:04:16 +0100 +Subject: [PATCH 045/193] ALSA: hda: Allow for compress stream to + hdac_ext_stream assignment + +Currently only PCM streams can enlist hdac_stream for their data +transfer. Add cstream field to hdac_ext_stream to expose possibility of +compress stream assignment in place of PCM one. +Limited to HOST-type only. + +Rather than copying entire hdac_ext_host_stream_assign, declare separate +PCM and compress wrappers and reuse it for both cases. + +Change-Id: I54d3c597d26f0eea21cceac0c3c37d67877d7775 +Signed-off-by: Cezary Rojewski +--- + include/sound/hdaudio.h | 1 + + include/sound/hdaudio_ext.h | 2 ++ + sound/hda/ext/hdac_ext_stream.c | 41 ++++++++++++++++++++++++++++----- + 3 files changed, 38 insertions(+), 6 deletions(-) + +diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h +index b260c5fd2337..afb3d04f699d 100644 +--- a/include/sound/hdaudio.h ++++ b/include/sound/hdaudio.h +@@ -481,6 +481,7 @@ struct hdac_stream { + struct snd_pcm_substream *substream; /* assigned substream, + * set in PCM open + */ ++ struct snd_compr_stream *cstream; + unsigned int format_val; /* format value to be set in the + * controller and the codec + */ +diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h +index ef88b20c7b0a..3e16c23a9221 100644 +--- a/include/sound/hdaudio_ext.h ++++ b/include/sound/hdaudio_ext.h +@@ -84,6 +84,8 @@ int snd_hdac_ext_stream_init_all(struct hdac_bus *bus, int start_idx, + int num_stream, int dir); + void snd_hdac_stream_free_all(struct hdac_bus *bus); + void snd_hdac_link_free_all(struct hdac_bus *bus); ++struct hdac_ext_stream *hdac_ext_host_stream_compr_assign(struct hdac_bus *bus, ++ struct snd_compr_stream *cstream); + struct hdac_ext_stream *snd_hdac_ext_stream_assign(struct hdac_bus *bus, + struct snd_pcm_substream *substream, + int type); +diff --git a/sound/hda/ext/hdac_ext_stream.c b/sound/hda/ext/hdac_ext_stream.c +index 6b1b4b834bae..de3369c666cd 100644 +--- a/sound/hda/ext/hdac_ext_stream.c ++++ b/sound/hda/ext/hdac_ext_stream.c +@@ -14,6 +14,7 @@ + #include + #include + #include ++#include + + /** + * snd_hdac_ext_stream_init - initialize each stream (aka device) +@@ -281,8 +282,7 @@ hdac_ext_link_stream_assign(struct hdac_bus *bus, + } + + static struct hdac_ext_stream * +-hdac_ext_host_stream_assign(struct hdac_bus *bus, +- struct snd_pcm_substream *substream) ++hdac_ext_host_stream_assign(struct hdac_bus *bus, int direction) + { + struct hdac_ext_stream *res = NULL; + struct hdac_stream *stream = NULL; +@@ -296,12 +296,13 @@ hdac_ext_host_stream_assign(struct hdac_bus *bus, + struct hdac_ext_stream *hstream = container_of(stream, + struct hdac_ext_stream, + hstream); +- if (stream->direction != substream->stream) ++ if (stream->direction != direction) + continue; + + if (!stream->opened) { + if (!hstream->decoupled) +- snd_hdac_ext_stream_decouple(bus, hstream, true); ++ snd_hdac_ext_stream_decouple(bus, ++ hstream, true); + res = hstream; + break; + } +@@ -310,13 +311,41 @@ hdac_ext_host_stream_assign(struct hdac_bus *bus, + spin_lock_irq(&bus->reg_lock); + res->hstream.opened = 1; + res->hstream.running = 0; +- res->hstream.substream = substream; ++ res->hstream.substream = NULL; ++ res->hstream.cstream = NULL; + spin_unlock_irq(&bus->reg_lock); + } + + return res; + } + ++static struct hdac_ext_stream * ++hdac_ext_host_stream_pcm_assign(struct hdac_bus *bus, ++ struct snd_pcm_substream *substream) ++{ ++ struct hdac_ext_stream *res; ++ ++ res = hdac_ext_host_stream_assign(bus, substream->stream); ++ if (res) ++ res->hstream.substream = substream; ++ ++ return res; ++} ++ ++struct hdac_ext_stream * ++hdac_ext_host_stream_compr_assign(struct hdac_bus *bus, ++ struct snd_compr_stream *cstream) ++{ ++ struct hdac_ext_stream *res; ++ ++ res = hdac_ext_host_stream_assign(bus, cstream->direction); ++ if (res) ++ res->hstream.cstream = cstream; ++ ++ return res; ++} ++EXPORT_SYMBOL_GPL(hdac_ext_host_stream_compr_assign); ++ + /** + * snd_hdac_ext_stream_assign - assign a stream for the PCM + * @bus: HD-audio core bus +@@ -350,7 +379,7 @@ struct hdac_ext_stream *snd_hdac_ext_stream_assign(struct hdac_bus *bus, + return hstream; + + case HDAC_EXT_STREAM_TYPE_HOST: +- return hdac_ext_host_stream_assign(bus, substream); ++ return hdac_ext_host_stream_pcm_assign(bus, substream); + + case HDAC_EXT_STREAM_TYPE_LINK: + return hdac_ext_link_stream_assign(bus, substream); +-- +2.17.1 + diff --git a/patches/0045-Cleanup-Kconfig.acrn b/patches/0045-Cleanup-Kconfig.acrn new file mode 100644 index 0000000000..72d5ad07b4 --- /dev/null +++ b/patches/0045-Cleanup-Kconfig.acrn @@ -0,0 +1,78 @@ +From 858e92b4ab814ae7cf1e8eba4db5dd56198df958 Mon Sep 17 00:00:00 2001 +From: "Yan, Like" +Date: Fri, 31 Aug 2018 10:58:59 +0800 +Subject: [PATCH 045/150] Cleanup Kconfig + +Change-Id: I219bc9343fe47a1cdab70c247370beb9e425fcd4 +Signed-off-by: Yan, Like +--- + drivers/acrn/Kconfig | 5 +++-- + drivers/vbs/Kconfig | 4 ++-- + drivers/vhm/Kconfig | 1 - + 3 files changed, 5 insertions(+), 5 deletions(-) + +diff --git a/drivers/acrn/Kconfig b/drivers/acrn/Kconfig +index 9056a4f1f20a..9fc4cae04a56 100644 +--- a/drivers/acrn/Kconfig ++++ b/drivers/acrn/Kconfig +@@ -1,12 +1,13 @@ + config ACRN_SHARED_BUFFER + bool "Intel ACRN SHARED BUFFER" ++ depends on ACRN_VHM + ---help--- + Ring buffer shared between ACRN Hypervisor and its SOS. + Help ACRN performance profiling. + + config ACRN_TRACE + tristate "Intel ACRN Hypervisor Trace support" +- select ACRN_SHARED_BUFFER ++ depends on ACRN_SHARED_BUFFER + ---help--- + This is the Trace driver for the Intel ACRN hypervisor. + You can say y to build it into the kernel, or m to build +@@ -14,7 +15,7 @@ config ACRN_TRACE + + config ACRN_HVLOG + bool "Intel ACRN Hypervisor Logmsg support" +- select ACRN_SHARED_BUFFER ++ depends on ACRN_SHARED_BUFFER + ---help--- + This is the Trace driver for the Intel ACRN hypervisor log. + You can say y to build it into the kernel. +diff --git a/drivers/vbs/Kconfig b/drivers/vbs/Kconfig +index da189ec0eea4..b5141a20daaf 100644 +--- a/drivers/vbs/Kconfig ++++ b/drivers/vbs/Kconfig +@@ -2,7 +2,7 @@ + # This Kconfig describes VBS for ACRN hypervisor + # + config VBS +- tristate "Enable VBS framework for ACRN hypervisor" ++ bool "Enable VBS framework for ACRN hypervisor" + depends on ACRN_GUEST + depends on ACRN_VHM + default n +@@ -20,7 +20,7 @@ config VBS_DEBUG + say N here. This enables ACRN VBS debugging. + + config VBS_RNG +- tristate "ACRN VBS reference driver: virtio RNG" ++ bool "ACRN VBS reference driver: virtio RNG" + depends on VBS != n + default n + ---help--- +diff --git a/drivers/vhm/Kconfig b/drivers/vhm/Kconfig +index 5edf0d165095..64465431673b 100644 +--- a/drivers/vhm/Kconfig ++++ b/drivers/vhm/Kconfig +@@ -3,7 +3,6 @@ config ACRN_VHM + depends on ACRN_GUEST + depends on DMA_CMA + depends on PCI_MSI +- depends on !INTEL_IOMMU + depends on !VMAP_STACK + default n + ---help--- +-- +2.17.1 + diff --git a/patches/0045-drm-i915-tgl-Guard-and-warn-if-more-than-one-eDP-panel.drm b/patches/0045-drm-i915-tgl-Guard-and-warn-if-more-than-one-eDP-panel.drm new file mode 100644 index 0000000000..946f1b8f49 --- /dev/null +++ b/patches/0045-drm-i915-tgl-Guard-and-warn-if-more-than-one-eDP-panel.drm @@ -0,0 +1,54 @@ +From d6b96b509ef898a6e2af260131cd02cb381ccd8d Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Jos=C3=A9=20Roberto=20de=20Souza?= +Date: Fri, 23 Aug 2019 01:20:37 -0700 +Subject: [PATCH 045/690] drm/i915/tgl: Guard and warn if more than one eDP + panel is present +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +On TGL+ it's possible to have PSR1 enabled in other ports besides DDIA. +PSR2 is still limited to DDIA. However currently we handle only one +instance of PSR struct. Lets guard intel_psr_init_dpcd() against +multiple eDP panels and warn about it. + +v2: Reword commit message to be TGL+ only and with the info where +PSR1/PSR2 are supported (Lucas) + +Cc: Dhinakaran Pandiyan +Cc: Rodrigo Vivi +Signed-off-by: José Roberto de Souza +Signed-off-by: Lucas De Marchi +Reviewed-by: Anshuman Gupta +Link: https://patchwork.freedesktop.org/patch/msgid/20190823082055.5992-6-lucas.demarchi@intel.com +--- + drivers/gpu/drm/i915/display/intel_psr.c | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c +index 28b62e587204..78e920015a00 100644 +--- a/drivers/gpu/drm/i915/display/intel_psr.c ++++ b/drivers/gpu/drm/i915/display/intel_psr.c +@@ -283,6 +283,11 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp) + struct drm_i915_private *dev_priv = + to_i915(dp_to_dig_port(intel_dp)->base.base.dev); + ++ if (dev_priv->psr.dp) { ++ DRM_WARN("More than one eDP panel found, PSR support should be extended\n"); ++ return; ++ } ++ + drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd, + sizeof(intel_dp->psr_dpcd)); + +@@ -305,7 +310,6 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp) + dev_priv->psr.sink_sync_latency = + intel_dp_get_sink_sync_latency(intel_dp); + +- WARN_ON(dev_priv->psr.dp); + dev_priv->psr.dp = intel_dp; + + if (INTEL_GEN(dev_priv) >= 9 && +-- +2.17.1 + diff --git a/patches/0045-mei-keep-pending-read-on-one-client-disconnect.security b/patches/0045-mei-keep-pending-read-on-one-client-disconnect.security new file mode 100644 index 0000000000..cb12780bcb --- /dev/null +++ b/patches/0045-mei-keep-pending-read-on-one-client-disconnect.security @@ -0,0 +1,149 @@ +From 77f6b376ae3b2ff9b3341e773b7add8d6cf711df Mon Sep 17 00:00:00 2001 +From: Alexander Usyskin +Date: Thu, 18 Oct 2018 17:40:29 +0300 +Subject: [PATCH 45/65] mei: keep pending read on one client disconnect + +Keep pending read callback for sake of other clients. +Drop data that came for already disconnected client. + +Change-Id: I36fc4c3799b70ca7774ca4875a36a5ca43d06c18 +Signed-off-by: Alexander Usyskin +Signed-off-by: Tomas Winkler +--- + drivers/misc/mei/client.c | 45 +++++++++++++++++++++++++++++++++------ + drivers/misc/mei/main.c | 16 ++++++++------ + 2 files changed, 49 insertions(+), 12 deletions(-) + +diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c +index 011de8eaf6a1..393aad218513 100644 +--- a/drivers/misc/mei/client.c ++++ b/drivers/misc/mei/client.c +@@ -446,6 +446,20 @@ static void mei_io_list_free_fp(struct list_head *head, const struct file *fp) + mei_io_cb_free(cb); + } + ++/** ++ * mei_cl_free_pending - free pending cb ++ * ++ * @cl: host client ++ */ ++static void mei_cl_free_pending(struct mei_cl *cl) ++{ ++ struct mei_cl_cb *cb; ++ ++ cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list); ++ if (cb) ++ mei_io_cb_free(cb); ++} ++ + /** + * mei_cl_alloc_cb - a convenient wrapper for allocating read cb + * +@@ -555,7 +569,8 @@ int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp) + mei_io_tx_list_free_cl(&cl->dev->write_waiting_list, cl); + mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl); + mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl); +- mei_io_list_free_fp(&cl->rd_pending, fp); ++ if (!fp) ++ mei_cl_free_pending(cl); + spin_lock(&cl->rd_completed_lock); + mei_io_list_free_fp(&cl->rd_completed, fp); + spin_unlock(&cl->rd_completed_lock); +@@ -1288,20 +1303,38 @@ static void mei_cl_read_vtag_add_fc(struct mei_cl *cl) + } + } + ++static int mei_cl_vt_support_check(struct mei_cl *cl) ++{ ++ struct mei_device *dev = cl->dev; ++ ++ if (!dev->hbm_f_vt_supported) ++ return -EOPNOTSUPP; ++ ++ if (!cl->me_cl) ++ return 0; ++ ++ return cl->me_cl->props.vt_supported ? 0 : -EOPNOTSUPP; ++} ++ + void mei_cl_add_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb) + { + const struct file *fp; + +- fp = mei_cl_fp_by_vtag(cl, cb->vtag); +- if (fp) ++ if (!mei_cl_vt_support_check(cl)) { ++ fp = mei_cl_fp_by_vtag(cl, cb->vtag); ++ if (!fp) { ++ /* client already disconnected, discarding */ ++ mei_io_cb_free(cb); ++ return; ++ } + cb->fp = fp; +- mei_cl_reset_read_by_vtag(cl, cb->vtag); ++ mei_cl_reset_read_by_vtag(cl, cb->vtag); ++ mei_cl_read_vtag_add_fc(cl); ++ } + + spin_lock(&cl->rd_completed_lock); + list_add_tail(&cb->list, &cl->rd_completed); + spin_unlock(&cl->rd_completed_lock); +- +- mei_cl_read_vtag_add_fc(cl); + } + + /** +diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c +index afad9ca037be..6e89a442e03c 100644 +--- a/drivers/misc/mei/main.c ++++ b/drivers/misc/mei/main.c +@@ -126,7 +126,7 @@ static int mei_release(struct inode *inode, struct file *file) + + rets = mei_cl_disconnect(cl); + +- mei_cl_flush_queues(cl, file); ++ mei_cl_flush_queues(cl, NULL); + cl_dbg(dev, cl, "removing\n"); + + mei_cl_unlink(cl); +@@ -433,7 +433,7 @@ static int mei_ioctl_connect_client(struct file *file, + return rets; + } + +-static int mei_cl_vt_support_check(struct mei_device *dev, const uuid_le *uuid) ++static int mei_vt_support_check(struct mei_device *dev, const uuid_le *uuid) + { + struct mei_me_client *me_cl; + int ret; +@@ -628,7 +628,11 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data) + props = &conn.out_client_properties; + vtag = 0; + +- if (!mei_cl_vt_support_check(dev, cl_uuid)) ++ rets = mei_vt_support_check(dev, cl_uuid); ++ if (rets == -ENOTTY) ++ goto out; ++ ++ if (!rets) + rets = mei_ioctl_connect_vtag(file, cl_uuid, props, + vtag); + else +@@ -658,12 +662,12 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data) + props = &conn_vtag.out_client_properties; + vtag = conn_vtag.connect.vtag; + +- if (mei_cl_vt_support_check(dev, cl_uuid)) { ++ rets = mei_vt_support_check(dev, cl_uuid); ++ if (rets == -EOPNOTSUPP) + dev_dbg(dev->dev, "FW Client %pUl does not support vtags\n", + cl_uuid); +- rets = -EOPNOTSUPP; ++ if (rets) + goto out; +- } + + if (!vtag) { + dev_dbg(dev->dev, "vtag can't be zero\n"); +-- +2.17.1 + diff --git a/patches/0045-net-stmmac-add-Enhanced-Tx-Descriptor-support.connectivity b/patches/0045-net-stmmac-add-Enhanced-Tx-Descriptor-support.connectivity new file mode 100644 index 0000000000..6824126d52 --- /dev/null +++ b/patches/0045-net-stmmac-add-Enhanced-Tx-Descriptor-support.connectivity @@ -0,0 +1,310 @@ +From 395bea0bac89fb0576e1160f9068d1dfc1ac8cf3 Mon Sep 17 00:00:00 2001 +From: Kweh Hock Leong +Date: Fri, 9 Aug 2019 00:22:00 +0800 +Subject: [PATCH 045/108] net: stmmac: add Enhanced Tx Descriptor support in + main flow + +Add support for Enhanced Tx Descriptor in stmmac_main.c for all +Tx related functions. + +For TSO, it is available in DWMAC v4.0 and above, so it is not +applicable for extended descriptor which is used in DWMAC v3.5. + +The tail pointer for supporting enhanced and extended descriptor +is made to be updated correctly. + +Signed-off-by: Kweh Hock Leong +Signed-off-by: Ong Boon Leong +--- + drivers/net/ethernet/stmicro/stmmac/stmmac.h | 2 + + .../net/ethernet/stmicro/stmmac/stmmac_main.c | 121 ++++++++++++++++-- + 2 files changed, 109 insertions(+), 14 deletions(-) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h +index 703d87d0c178..78d6c5dbda15 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h +@@ -50,6 +50,7 @@ struct stmmac_tx_queue { + struct timer_list txtimer; + u32 queue_index; + struct stmmac_priv *priv_data; ++ struct dma_enhanced_tx_desc *dma_enhtx ____cacheline_aligned_in_smp; + struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp; + struct dma_desc *dma_tx; + struct sk_buff **tx_skbuff; +@@ -205,6 +206,7 @@ struct stmmac_priv { + int tx_lpi_enabled; + unsigned int mode; + unsigned int chain_mode; ++ int enhanced_tx_desc; + int extend_desc; + struct hwtstamp_config tstamp_config; + struct ptp_clock *ptp_clock; +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index 0f9e24e2a6d8..1270c34f9258 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -1114,6 +1114,8 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv) + + if (priv->extend_desc) + head_tx = (void *)tx_q->dma_etx; ++ else if (priv->enhanced_tx_desc) ++ head_tx = (void *)tx_q->dma_enhtx; + else + head_tx = (void *)tx_q->dma_tx; + +@@ -1188,7 +1190,12 @@ static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue) + for (i = 0; i < DMA_TX_SIZE; i++) + if (priv->extend_desc) + stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic, +- priv->mode, (i == DMA_TX_SIZE - 1)); ++ priv->mode, ++ (i == DMA_TX_SIZE - 1)); ++ else if (priv->enhanced_tx_desc) ++ stmmac_init_tx_desc(priv, &tx_q->dma_enhtx[i].basic, ++ priv->mode, ++ (i == DMA_TX_SIZE - 1)); + else + stmmac_init_tx_desc(priv, &tx_q->dma_tx[i], + priv->mode, (i == DMA_TX_SIZE - 1)); +@@ -1416,7 +1423,12 @@ static int init_dma_tx_desc_rings(struct net_device *dev) + if (priv->mode == STMMAC_CHAIN_MODE) { + if (priv->extend_desc) + stmmac_mode_init(priv, tx_q->dma_etx, +- tx_q->dma_tx_phy, DMA_TX_SIZE, 1); ++ tx_q->dma_tx_phy, ++ DMA_TX_SIZE, 1); ++ else if (priv->enhanced_tx_desc) ++ stmmac_mode_init(priv, tx_q->dma_enhtx, ++ tx_q->dma_tx_phy, ++ DMA_TX_SIZE, 1); + else + stmmac_mode_init(priv, tx_q->dma_tx, + tx_q->dma_tx_phy, DMA_TX_SIZE, 0); +@@ -1426,6 +1438,8 @@ static int init_dma_tx_desc_rings(struct net_device *dev) + struct dma_desc *p; + if (priv->extend_desc) + p = &((tx_q->dma_etx + i)->basic); ++ else if (priv->enhanced_tx_desc) ++ p = &((tx_q->dma_enhtx + i)->basic); + else + p = tx_q->dma_tx + i; + +@@ -1552,14 +1566,18 @@ static void free_dma_tx_desc_resources(struct stmmac_priv *priv) + dma_free_tx_skbufs(priv, queue); + + /* Free DMA regions of consistent memory previously allocated */ +- if (!priv->extend_desc) +- dma_free_coherent(priv->device, +- DMA_TX_SIZE * sizeof(struct dma_desc), +- tx_q->dma_tx, tx_q->dma_tx_phy); +- else ++ if (priv->extend_desc) + dma_free_coherent(priv->device, DMA_TX_SIZE * + sizeof(struct dma_extended_desc), + tx_q->dma_etx, tx_q->dma_tx_phy); ++ else if (priv->enhanced_tx_desc) ++ dma_free_coherent(priv->device, DMA_TX_SIZE * ++ sizeof(struct dma_enhanced_tx_desc), ++ tx_q->dma_enhtx, tx_q->dma_tx_phy); ++ else ++ dma_free_coherent(priv->device, DMA_TX_SIZE * ++ sizeof(struct dma_desc), ++ tx_q->dma_tx, tx_q->dma_tx_phy); + + kfree(tx_q->tx_skbuff_dma); + kfree(tx_q->tx_skbuff); +@@ -1675,6 +1693,15 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) + GFP_KERNEL); + if (!tx_q->dma_etx) + goto err_dma; ++ } else if (priv->enhanced_tx_desc) { ++ tx_q->dma_enhtx = dma_alloc_coherent(priv->device, ++ DMA_TX_SIZE * ++ sizeof(struct ++ dma_enhanced_tx_desc), ++ &tx_q->dma_tx_phy, ++ GFP_KERNEL); ++ if (!tx_q->dma_enhtx) ++ goto err_dma; + } else { + tx_q->dma_tx = dma_alloc_coherent(priv->device, + DMA_TX_SIZE * sizeof(struct dma_desc), +@@ -1921,6 +1948,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) + + if (priv->extend_desc) + p = (struct dma_desc *)(tx_q->dma_etx + entry); ++ else if (priv->enhanced_tx_desc) ++ p = &(tx_q->dma_enhtx + entry)->basic; + else + p = tx_q->dma_tx + entry; + +@@ -2028,7 +2057,12 @@ static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) + for (i = 0; i < DMA_TX_SIZE; i++) + if (priv->extend_desc) + stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic, +- priv->mode, (i == DMA_TX_SIZE - 1)); ++ priv->mode, ++ (i == DMA_TX_SIZE - 1)); ++ else if (priv->enhanced_tx_desc) ++ stmmac_init_tx_desc(priv, &tx_q->dma_enhtx[i].basic, ++ priv->mode, ++ (i == DMA_TX_SIZE - 1)); + else + stmmac_init_tx_desc(priv, &tx_q->dma_tx[i], + priv->mode, (i == DMA_TX_SIZE - 1)); +@@ -3140,7 +3174,11 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des, + + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); + WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); +- desc = tx_q->dma_tx + tx_q->cur_tx; ++ /* TSO is not available in DWMAC v3.5 */ ++ if (priv->enhanced_tx_desc) ++ desc = &(tx_q->dma_enhtx + tx_q->cur_tx)->basic; ++ else ++ desc = tx_q->dma_tx + tx_q->cur_tx; + + curr_addr = des + (total_len - tmp_len); + if (priv->dma_cap.addr64 <= 32) +@@ -3227,7 +3265,11 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) + + /* set new MSS value if needed */ + if (mss != tx_q->mss) { +- mss_desc = tx_q->dma_tx + tx_q->cur_tx; ++ /* TSO is not available in DWMAC v3.5 */ ++ if (priv->enhanced_tx_desc) ++ mss_desc = &(tx_q->dma_enhtx + tx_q->cur_tx)->basic; ++ else ++ mss_desc = tx_q->dma_tx + tx_q->cur_tx; + stmmac_set_mss(priv, mss_desc, mss); + tx_q->mss = mss; + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); +@@ -3246,8 +3288,12 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) + + first_entry = tx_q->cur_tx; + WARN_ON(tx_q->tx_skbuff[first_entry]); ++ /* TSO is not available in DWMAC v3.5 */ ++ if (priv->enhanced_tx_desc) ++ desc = &(tx_q->dma_enhtx + first_entry)->basic; ++ else ++ desc = tx_q->dma_tx + first_entry; + +- desc = tx_q->dma_tx + first_entry; + first = desc; + + if (has_vlan) +@@ -3374,13 +3420,29 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) + + stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0); + ++ /* TSO is not available in DWMAC v3.5 */ ++ if (priv->enhanced_tx_desc) ++ stmmac_display_ring(priv, (void *)tx_q->dma_enhtx, ++ DMA_TX_SIZE, 0); ++ else ++ stmmac_display_ring(priv, (void *)tx_q->dma_tx, ++ DMA_TX_SIZE, 0); + pr_info(">>> frame to be transmitted: "); + print_pkt(skb->data, skb_headlen(skb)); + } + + netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); + +- tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc)); ++ /* TSO is not available in DWMAC v3.5 */ ++ if (priv->enhanced_tx_desc) ++ tx_q->tx_tail_addr = tx_q->dma_tx_phy + ++ (tx_q->cur_tx * ++ sizeof(struct dma_enhanced_tx_desc)); ++ else ++ tx_q->tx_tail_addr = tx_q->dma_tx_phy + ++ (tx_q->cur_tx * ++ sizeof(struct dma_desc)); ++ + stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); + + return NETDEV_TX_OK; +@@ -3449,6 +3511,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) + + if (likely(priv->extend_desc)) + desc = (struct dma_desc *)(tx_q->dma_etx + entry); ++ else if (priv->enhanced_tx_desc) ++ desc = &tx_q->dma_enhtx[entry].basic; + else + desc = tx_q->dma_tx + entry; + +@@ -3478,6 +3542,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) + + if (likely(priv->extend_desc)) + desc = (struct dma_desc *)(tx_q->dma_etx + entry); ++ else if (priv->enhanced_tx_desc) ++ desc = &tx_q->dma_enhtx[entry].basic; + else + desc = tx_q->dma_tx + entry; + +@@ -3520,6 +3586,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) + + if (priv->extend_desc) + tx_head = (void *)tx_q->dma_etx; ++ else if (priv->enhanced_tx_desc) ++ tx_head = (void *)tx_q->dma_enhtx; + else + tx_head = (void *)tx_q->dma_tx; + +@@ -3603,7 +3671,19 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) + + stmmac_enable_dma_transmission(priv, priv->ioaddr); + +- tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc)); ++ if (priv->extend_desc) ++ tx_q->tx_tail_addr = tx_q->dma_tx_phy + ++ (tx_q->cur_tx * ++ sizeof(struct dma_extended_desc)); ++ else if (priv->enhanced_tx_desc) ++ tx_q->tx_tail_addr = tx_q->dma_tx_phy + ++ (tx_q->cur_tx * ++ sizeof(struct dma_enhanced_tx_desc)); ++ else ++ tx_q->tx_tail_addr = tx_q->dma_tx_phy + ++ (tx_q->cur_tx * ++ sizeof(struct dma_desc)); ++ + stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); + + return NETDEV_TX_OK; +@@ -4453,11 +4533,20 @@ static void sysfs_display_ring(void *head, int size, int extend_desc, + struct seq_file *seq) + { + int i; ++ struct dma_enhanced_tx_desc *enhp = (struct dma_enhanced_tx_desc *)head; + struct dma_extended_desc *ep = (struct dma_extended_desc *)head; + struct dma_desc *p = (struct dma_desc *)head; + + for (i = 0; i < size; i++) { +- if (extend_desc) { ++ if (extend_desc == 2) { ++ seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", ++ i, (unsigned int)virt_to_phys(enhp), ++ le32_to_cpu(enhp->basic.des0), ++ le32_to_cpu(enhp->basic.des1), ++ le32_to_cpu(enhp->basic.des2), ++ le32_to_cpu(enhp->basic.des3)); ++ enhp++; ++ } else if (extend_desc == 1) { + seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", + i, (unsigned int)virt_to_phys(ep), + le32_to_cpu(ep->basic.des0), +@@ -4512,6 +4601,10 @@ static int stmmac_rings_status_show(struct seq_file *seq, void *v) + seq_printf(seq, "Extended descriptor ring:\n"); + sysfs_display_ring((void *)tx_q->dma_etx, + DMA_TX_SIZE, 1, seq); ++ } else if (priv->enhanced_tx_desc) { ++ seq_printf(seq, "Enhanced descriptor ring:\n"); ++ sysfs_display_ring((void *)tx_q->dma_enhtx, ++ DMA_TX_SIZE, 2, seq); + } else { + seq_printf(seq, "Descriptor ring:\n"); + sysfs_display_ring((void *)tx_q->dma_tx, +-- +2.17.1 + diff --git a/patches/0045-trusty-Add-null-check-pointer-before-deference.trusty b/patches/0045-trusty-Add-null-check-pointer-before-deference.trusty new file mode 100644 index 0000000000..7359e89027 --- /dev/null +++ b/patches/0045-trusty-Add-null-check-pointer-before-deference.trusty @@ -0,0 +1,40 @@ +From 234c0ee2b5fe764699ba40ee4ff18905105c139d Mon Sep 17 00:00:00 2001 +From: "Zhang, Qi" +Date: Mon, 6 Nov 2017 12:35:31 +0800 +Subject: [PATCH 45/63] trusty: Add null check pointer before deference + +Add null check before deference pointer. + +Change-Id: Icc9d61e17e3ecadfa1bd7fc252cf5e3d7aabb636 +Signed-off-by: Zhang, Qi +--- + drivers/trusty/trusty-irq.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/drivers/trusty/trusty-irq.c b/drivers/trusty/trusty-irq.c +index b576729ec868..868a31c01f19 100644 +--- a/drivers/trusty/trusty-irq.c ++++ b/drivers/trusty/trusty-irq.c +@@ -199,6 +199,9 @@ static int trusty_irq_cpu_up(unsigned int cpu, struct hlist_node *node) + unsigned long irq_flags; + struct trusty_irq_state *is = hlist_entry_safe(node, struct trusty_irq_state, node); + ++ if(is == NULL) ++ return 0; ++ + dev_dbg(is->dev, "%s: cpu %d\n", __func__, smp_processor_id()); + + local_irq_save(irq_flags); +@@ -212,6 +215,9 @@ static int trusty_irq_cpu_down(unsigned int cpu, struct hlist_node *node) + unsigned long irq_flags; + struct trusty_irq_state *is = hlist_entry_safe(node, struct trusty_irq_state, node); + ++ if(is == NULL) ++ return 0; ++ + dev_dbg(is->dev, "%s: cpu %d\n", __func__, smp_processor_id()); + + local_irq_save(irq_flags); +-- +2.17.1 + diff --git a/patches/0046-ALSA-hda-Interrupt-servicing-and-BDL-setup-for-compr.audio b/patches/0046-ALSA-hda-Interrupt-servicing-and-BDL-setup-for-compr.audio new file mode 100644 index 0000000000..adaf169617 --- /dev/null +++ b/patches/0046-ALSA-hda-Interrupt-servicing-and-BDL-setup-for-compr.audio @@ -0,0 +1,145 @@ +From 464cd099ed4c403f97826df172493b12edb37b9b Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Wed, 6 Mar 2019 11:51:32 +0100 +Subject: [PATCH 046/193] ALSA: hda: Interrupt servicing and BDL setup for + compress streams + +Account for compress streams when receiving and servicing buffer +completed interrupts. In case of compress stream enlisting hdac_stream +for data transfer, setup BDL entries much like it is the case for PCM +streams. + +Change-Id: Ia0d0ed4e1f49146136b93bf24ec83213bee394a9 +Signed-off-by: Cezary Rojewski +Signed-off-by: Divya Prakash +--- + sound/hda/hdac_controller.c | 4 ++-- + sound/hda/hdac_stream.c | 47 +++++++++++++++++++++++-------------- + 2 files changed, 32 insertions(+), 19 deletions(-) + +diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c +index d3999e7b0705..86f95a05595f 100644 +--- a/sound/hda/hdac_controller.c ++++ b/sound/hda/hdac_controller.c +@@ -555,8 +555,8 @@ int snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status, + sd_status = snd_hdac_stream_readb(azx_dev, SD_STS); + snd_hdac_stream_writeb(azx_dev, SD_STS, SD_INT_MASK); + handled |= 1 << azx_dev->index; +- if (!azx_dev->substream || !azx_dev->running || +- !(sd_status & SD_INT_COMPLETE)) ++ if ((!azx_dev->substream && !azx_dev->cstream) || ++ !azx_dev->running || !(sd_status & SD_INT_COMPLETE)) + continue; + if (ack) + ack(bus, azx_dev); +diff --git a/sound/hda/hdac_stream.c b/sound/hda/hdac_stream.c +index d8fe7ff0cd58..471833dde8b9 100644 +--- a/sound/hda/hdac_stream.c ++++ b/sound/hda/hdac_stream.c +@@ -11,6 +11,7 @@ + #include + #include + #include ++#include + #include "trace.h" + + /** +@@ -405,11 +406,20 @@ int snd_hdac_stream_setup_periods(struct hdac_stream *azx_dev) + { + struct hdac_bus *bus = azx_dev->bus; + struct snd_pcm_substream *substream = azx_dev->substream; +- struct snd_pcm_runtime *runtime = substream->runtime; ++ struct snd_compr_stream *cstream = azx_dev->cstream; ++ struct snd_pcm_runtime *runtime = NULL; ++ struct snd_dma_buffer *dma_buffer_p = NULL; + __le32 *bdl; + int i, ofs, periods, period_bytes; + int pos_adj, pos_align; + ++ if (substream) { ++ runtime = substream->runtime; ++ dma_buffer_p = snd_pcm_get_dma_buf(substream); ++ } else { ++ dma_buffer_p = cstream->runtime->dma_buffer_p; ++ } ++ + /* reset BDL address */ + snd_hdac_stream_writel(azx_dev, SD_BDLPL, 0); + snd_hdac_stream_writel(azx_dev, SD_BDLPU, 0); +@@ -423,7 +433,7 @@ int snd_hdac_stream_setup_periods(struct hdac_stream *azx_dev) + azx_dev->frags = 0; + + pos_adj = bus->bdl_pos_adj; +- if (!azx_dev->no_period_wakeup && pos_adj > 0) { ++ if (runtime && !azx_dev->no_period_wakeup && pos_adj > 0) { + pos_align = pos_adj; + pos_adj = (pos_adj * runtime->rate + 47999) / 48000; + if (!pos_adj) +@@ -437,8 +447,7 @@ int snd_hdac_stream_setup_periods(struct hdac_stream *azx_dev) + pos_adj); + pos_adj = 0; + } else { +- ofs = setup_bdle(bus, snd_pcm_get_dma_buf(substream), +- azx_dev, ++ ofs = setup_bdle(bus, dma_buffer_p, azx_dev, + &bdl, ofs, pos_adj, true); + if (ofs < 0) + goto error; +@@ -448,13 +457,11 @@ int snd_hdac_stream_setup_periods(struct hdac_stream *azx_dev) + + for (i = 0; i < periods; i++) { + if (i == periods - 1 && pos_adj) +- ofs = setup_bdle(bus, snd_pcm_get_dma_buf(substream), +- azx_dev, &bdl, ofs, +- period_bytes - pos_adj, 0); ++ ofs = setup_bdle(bus, dma_buffer_p, azx_dev, ++ &bdl, ofs, period_bytes - pos_adj, 0); + else +- ofs = setup_bdle(bus, snd_pcm_get_dma_buf(substream), +- azx_dev, &bdl, ofs, +- period_bytes, ++ ofs = setup_bdle(bus, dma_buffer_p, azx_dev, ++ &bdl, ofs, period_bytes, + !azx_dev->no_period_wakeup); + if (ofs < 0) + goto error; +@@ -482,23 +489,29 @@ int snd_hdac_stream_set_params(struct hdac_stream *azx_dev, + + unsigned int bufsize, period_bytes; + struct snd_pcm_substream *substream = azx_dev->substream; +- struct snd_pcm_runtime *runtime; ++ struct snd_compr_stream *cstream = azx_dev->cstream; ++ unsigned int no_period_wakeup = 0; + int err; + +- if (!substream) ++ if (substream) { ++ bufsize = snd_pcm_lib_buffer_bytes(substream); ++ period_bytes = snd_pcm_lib_period_bytes(substream); ++ no_period_wakeup = substream->runtime->no_period_wakeup; ++ } else if (cstream) { ++ bufsize = cstream->runtime->buffer_size; ++ period_bytes = cstream->runtime->fragment_size; ++ } else { + return -EINVAL; +- runtime = substream->runtime; +- bufsize = snd_pcm_lib_buffer_bytes(substream); +- period_bytes = snd_pcm_lib_period_bytes(substream); ++ } + + if (bufsize != azx_dev->bufsize || + period_bytes != azx_dev->period_bytes || + format_val != azx_dev->format_val || +- runtime->no_period_wakeup != azx_dev->no_period_wakeup) { ++ no_period_wakeup != azx_dev->no_period_wakeup) { + azx_dev->bufsize = bufsize; + azx_dev->period_bytes = period_bytes; + azx_dev->format_val = format_val; +- azx_dev->no_period_wakeup = runtime->no_period_wakeup; ++ azx_dev->no_period_wakeup = no_period_wakeup; + err = snd_hdac_stream_setup_periods(azx_dev); + if (err < 0) + return err; +-- +2.17.1 + diff --git a/patches/0046-drm-i915-Do-not-read-PSR2-register-in-transcoders-with.drm b/patches/0046-drm-i915-Do-not-read-PSR2-register-in-transcoders-with.drm new file mode 100644 index 0000000000..761c827cf7 --- /dev/null +++ b/patches/0046-drm-i915-Do-not-read-PSR2-register-in-transcoders-with.drm @@ -0,0 +1,119 @@ +From a96c594add5aea790ef016892b660d9cc5b1406c Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Jos=C3=A9=20Roberto=20de=20Souza?= +Date: Sat, 17 Aug 2019 02:38:33 -0700 +Subject: [PATCH 046/690] drm/i915: Do not read PSR2 register in transcoders + without PSR2 +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +This fix unclaimed access warnings: + +[ 245.525788] ------------[ cut here ]------------ +[ 245.525884] Unclaimed read from register 0x62900 +[ 245.526154] WARNING: CPU: 0 PID: 1234 at drivers/gpu/drm/i915/intel_uncore.c:1100 __unclaimed_reg_debug+0x40/0x50 [i915] +[ 245.526160] Modules linked in: i915 x86_pkg_temp_thermal ax88179_178a coretemp usbnet crct10dif_pclmul mii crc32_pclmul ghash_clmulni_intel e1000e [last unloaded: i915] +[ 245.526191] CPU: 0 PID: 1234 Comm: kms_fullmodeset Not tainted 5.1.0-rc6+ #915 +[ 245.526197] Hardware name: Intel Corporation Tiger Lake Client Platform/TigerLake U DDR4 SODIMM RVP, BIOS TGLSFWR1.D00.2081.A10.1904182155 04/18/2019 +[ 245.526273] RIP: 0010:__unclaimed_reg_debug+0x40/0x50 [i915] +[ 245.526281] Code: 74 05 5b 5d 41 5c c3 45 84 e4 48 c7 c0 76 97 21 a0 48 c7 c6 6c 97 21 a0 89 ea 48 0f 44 f0 48 c7 c7 7f 97 21 a0 e8 4f 1e fe e0 <0f> 0b 83 2d 6f d9 1c 00 01 5b 5d 41 5c c3 66 90 41 57 41 56 41 55 +[ 245.526288] RSP: 0018:ffffc900006bf7d8 EFLAGS: 00010086 +[ 245.526297] RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000000 +[ 245.526304] RDX: 0000000000000007 RSI: 0000000000000000 RDI: 00000000ffffffff +[ 245.526310] RBP: 0000000000061900 R08: 0000000000000000 R09: 0000000000000001 +[ 245.526317] R10: 0000000000000006 R11: 0000000000000000 R12: 0000000000000001 +[ 245.526324] R13: 0000000000000000 R14: ffff8882914f0d58 R15: 0000000000000206 +[ 245.526332] FS: 00007fed2a3c39c0(0000) GS:ffff8882a8600000(0000) knlGS:0000000000000000 +[ 245.526340] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 +[ 245.526347] CR2: 00007fed28dff000 CR3: 00000002a086c006 CR4: 0000000000760ef0 +[ 245.526354] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 +[ 245.526361] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 +[ 245.526367] PKRU: 55555554 +[ 245.526373] Call Trace: +[ 245.526454] gen11_fwtable_read32+0x219/0x250 [i915] +[ 245.526576] intel_psr_activate+0x57/0x400 [i915] +[ 245.526697] intel_psr_enable_locked+0x367/0x4b0 [i915] +[ 245.526828] intel_psr_enable+0xa4/0xd0 [i915] +[ 245.526946] intel_enable_ddi+0x127/0x2f0 [i915] +[ 245.527075] intel_encoders_enable.isra.79+0x62/0x90 [i915] +[ 245.527202] haswell_crtc_enable+0x2a2/0x850 [i915] +[ 245.527337] intel_update_crtc+0x51/0x360 [i915] +[ 245.527466] skl_update_crtcs+0x26c/0x300 [i915] +[ 245.527603] intel_atomic_commit_tail+0x3e5/0x13c0 [i915] +[ 245.527757] intel_atomic_commit+0x24d/0x2d0 [i915] +[ 245.527782] drm_atomic_helper_set_config+0x7b/0x90 +[ 245.527799] drm_mode_setcrtc+0x1b4/0x6f0 +[ 245.527856] ? drm_mode_getcrtc+0x180/0x180 +[ 245.527867] drm_ioctl_kernel+0xad/0xf0 +[ 245.527886] drm_ioctl+0x2f4/0x3b0 +[ 245.527902] ? drm_mode_getcrtc+0x180/0x180 +[ 245.527935] ? rcu_read_lock_sched_held+0x6f/0x80 +[ 245.527956] do_vfs_ioctl+0xa0/0x6d0 +[ 245.527970] ? __task_pid_nr_ns+0xb6/0x200 +[ 245.527991] ksys_ioctl+0x35/0x70 +[ 245.528009] __x64_sys_ioctl+0x11/0x20 +[ 245.528020] do_syscall_64+0x55/0x180 +[ 245.528034] entry_SYSCALL_64_after_hwframe+0x49/0xbe +[ 245.528042] RIP: 0033:0x7fed2cc7c3c7 +[ 245.528050] Code: 00 00 90 48 8b 05 c9 3a 0d 00 64 c7 00 26 00 00 00 48 c7 c0 ff ff ff ff c3 66 2e 0f 1f 84 00 00 00 00 00 b8 10 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 99 3a 0d 00 f7 d8 64 89 01 48 +[ 245.528057] RSP: 002b:00007ffe36944378 EFLAGS: 00000246 ORIG_RAX: 0000000000000010 +[ 245.528067] RAX: ffffffffffffffda RBX: 00007ffe369443b0 RCX: 00007fed2cc7c3c7 +[ 245.528074] RDX: 00007ffe369443b0 RSI: 00000000c06864a2 RDI: 0000000000000003 +[ 245.528081] RBP: 00007ffe369443b0 R08: 0000000000000000 R09: 0000564c0173ae98 +[ 245.528088] R10: 0000564c0173aeb8 R11: 0000000000000246 R12: 00000000c06864a2 +[ 245.528095] R13: 0000000000000003 R14: 0000000000000000 R15: 0000000000000000 +[ 245.528128] irq event stamp: 140866 +[ 245.528138] hardirqs last enabled at (140865): [] _raw_spin_unlock_irqrestore+0x4c/0x60 +[ 245.528148] hardirqs last disabled at (140866): [] _raw_spin_lock_irqsave+0xd/0x50 +[ 245.528158] softirqs last enabled at (140860): [] __do_softirq+0x38c/0x499 +[ 245.528170] softirqs last disabled at (140853): [] irq_exit+0xa9/0xc0 +[ 245.528247] WARNING: CPU: 0 PID: 1234 at drivers/gpu/drm/i915/intel_uncore.c:1100 __unclaimed_reg_debug+0x40/0x50 [i915] +[ 245.528254] ---[ end trace 366069676e98a410 ]--- + +Signed-off-by: José Roberto de Souza +Signed-off-by: Lucas De Marchi +Reviewed-by: Imre Deak +Reviewed-by: Lucas De Marchi +Link: https://patchwork.freedesktop.org/patch/msgid/20190823082055.5992-7-lucas.demarchi@intel.com +--- + drivers/gpu/drm/i915/display/intel_psr.c | 9 ++++++--- + 1 file changed, 6 insertions(+), 3 deletions(-) + +diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c +index 78e920015a00..dafd3737cc5a 100644 +--- a/drivers/gpu/drm/i915/display/intel_psr.c ++++ b/drivers/gpu/drm/i915/display/intel_psr.c +@@ -541,7 +541,9 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) + static bool + transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans) + { +- if (INTEL_GEN(dev_priv) >= 12) ++ if (INTEL_GEN(dev_priv) < 9) ++ return false; ++ else if (INTEL_GEN(dev_priv) >= 12) + return trans == TRANSCODER_A; + else + return trans == TRANSCODER_EDP; +@@ -667,8 +669,9 @@ static void intel_psr_activate(struct intel_dp *intel_dp) + { + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + +- if (INTEL_GEN(dev_priv) >= 9) ++ if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder)) + WARN_ON(I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)) & EDP_PSR2_ENABLE); ++ + WARN_ON(I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)) & EDP_PSR_ENABLE); + WARN_ON(dev_priv->psr.active); + lockdep_assert_held(&dev_priv->psr.lock); +@@ -821,7 +824,7 @@ static void intel_psr_exit(struct drm_i915_private *dev_priv) + u32 val; + + if (!dev_priv->psr.active) { +- if (INTEL_GEN(dev_priv) >= 9) { ++ if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder)) { + val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)); + WARN_ON(val & EDP_PSR2_ENABLE); + } +-- +2.17.1 + diff --git a/patches/0046-mei-flush-only-objects-belonging-to-fp-in-non-fin.security b/patches/0046-mei-flush-only-objects-belonging-to-fp-in-non-fin.security new file mode 100644 index 0000000000..ea73ef65aa --- /dev/null +++ b/patches/0046-mei-flush-only-objects-belonging-to-fp-in-non-fin.security @@ -0,0 +1,68 @@ +From b5fef342dcfe42249b98126e9f21942fecf46652 Mon Sep 17 00:00:00 2001 +From: Alexander Usyskin +Date: Sun, 21 Oct 2018 17:09:17 +0300 +Subject: [PATCH 46/65] mei: flush only objects belonging to fp in non-final + flush + +Change-Id: Ibbec077c1aa8fd114491fa4e63f2fa2f4751994e +Signed-off-by: Alexander Usyskin +--- + drivers/misc/mei/client.c | 21 ++++++++++++--------- + 1 file changed, 12 insertions(+), 9 deletions(-) + +diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c +index 393aad218513..55ed821a49e3 100644 +--- a/drivers/misc/mei/client.c ++++ b/drivers/misc/mei/client.c +@@ -419,14 +419,16 @@ static void mei_io_list_flush_cl(struct list_head *head, + * + * @head: An instance of our list structure + * @cl: host client ++ * @fp: file pointer (matching cb file object), may be NULL + */ + static void mei_io_tx_list_free_cl(struct list_head *head, +- const struct mei_cl *cl) ++ const struct mei_cl *cl, ++ const struct file *fp) + { + struct mei_cl_cb *cb, *next; + + list_for_each_entry_safe(cb, next, head, list) { +- if (cl == cb->cl) ++ if (cl == cb->cl && (!fp || fp == cb->fp)) + mei_tx_cb_dequeue(cb); + } + } +@@ -565,12 +567,13 @@ int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp) + dev = cl->dev; + + cl_dbg(dev, cl, "remove list entry belonging to cl\n"); +- mei_io_tx_list_free_cl(&cl->dev->write_list, cl); +- mei_io_tx_list_free_cl(&cl->dev->write_waiting_list, cl); +- mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl); +- mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl); +- if (!fp) ++ mei_io_tx_list_free_cl(&cl->dev->write_list, cl, fp); ++ mei_io_tx_list_free_cl(&cl->dev->write_waiting_list, cl, fp); ++ if (!fp) { ++ mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl); ++ mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl); + mei_cl_free_pending(cl); ++ } + spin_lock(&cl->rd_completed_lock); + mei_io_list_free_fp(&cl->rd_completed, fp); + spin_unlock(&cl->rd_completed_lock); +@@ -788,8 +791,8 @@ static void mei_cl_set_disconnected(struct mei_cl *cl) + return; + + cl->state = MEI_FILE_DISCONNECTED; +- mei_io_tx_list_free_cl(&dev->write_list, cl); +- mei_io_tx_list_free_cl(&dev->write_waiting_list, cl); ++ mei_io_tx_list_free_cl(&dev->write_list, cl, NULL); ++ mei_io_tx_list_free_cl(&dev->write_waiting_list, cl, NULL); + mei_io_list_flush_cl(&dev->ctrl_rd_list, cl); + mei_io_list_flush_cl(&dev->ctrl_wr_list, cl); + mei_cl_wake_all(cl); +-- +2.17.1 + diff --git a/patches/0046-net-stmmac-scan-for-Time-Based-Scheduling-in-.connectivity b/patches/0046-net-stmmac-scan-for-Time-Based-Scheduling-in-.connectivity new file mode 100644 index 0000000000..1d31d94acb --- /dev/null +++ b/patches/0046-net-stmmac-scan-for-Time-Based-Scheduling-in-.connectivity @@ -0,0 +1,275 @@ +From f46524abb1ec9c048e77c810135f4a49bb9391a2 Mon Sep 17 00:00:00 2001 +From: Kweh Hock Leong +Date: Fri, 9 Aug 2019 08:10:27 +0800 +Subject: [PATCH 046/108] net: stmmac: scan for Time-Based Scheduling in DWMAC + HW and setup + +For DWMAC v5.1 and above, the capability for TBS is announced through +MAC_HW_Feature3 and the current design allow driver to TBS to be selected +through platform data:- + +a) has_tbs : to load v5.1x support for Enhanced Tx Descriptor in desc and + dma. +b) tsn_tbs_en: to enable/disable TBS capability + +Signed-off-by: Kweh Hock Leong +Signed-off-by: Ong Boon Leong +--- + drivers/net/ethernet/stmicro/stmmac/dwmac5.h | 1 + + .../net/ethernet/stmicro/stmmac/dwmac5_tsn.c | 2 + + drivers/net/ethernet/stmicro/stmmac/hwif.c | 38 ++++++++++++++++++- + .../net/ethernet/stmicro/stmmac/stmmac_main.c | 7 ++++ + .../net/ethernet/stmicro/stmmac/stmmac_pci.c | 2 + + .../net/ethernet/stmicro/stmmac/stmmac_tsn.c | 8 ++++ + .../net/ethernet/stmicro/stmmac/stmmac_tsn.h | 2 + + include/linux/stmmac.h | 2 + + 8 files changed, 60 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h +index ae0d77c09634..40bbb93db382 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h +@@ -87,6 +87,7 @@ + */ + + /* MAC HW features3 bitmap */ ++#define GMAC_HW_FEAT_TBSSEL BIT(27) + #define GMAC_HW_FEAT_ESTWID GENMASK(21, 20) + #define GMAC_HW_FEAT_ESTWID_SHIFT 20 + #define GMAC_HW_FEAT_ESTDEP GENMASK(19, 17) +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c +index b2fecb07be6c..84c979c5a5dc 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c +@@ -78,6 +78,8 @@ static bool dwmac5_has_tsn_cap(void __iomem *ioaddr, enum tsn_feat_id featid) + switch (featid) { + case TSN_FEAT_ID_EST: + return (hw_cap3 & GMAC_HW_FEAT_ESTSEL); ++ case TSN_FEAT_ID_TBS: ++ return (hw_cap3 & GMAC_HW_FEAT_TBSSEL); + default: + return false; + }; +diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c +index b5dc33cb7054..35fa88057860 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/hwif.c ++++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c +@@ -75,6 +75,7 @@ static const struct stmmac_hwif_entry { + bool xgmac; + bool mdio_intr_en; + bool has_serdes; ++ bool tbs; + u32 min_id; + const struct stmmac_regs_off regs; + const void *desc; +@@ -95,6 +96,7 @@ static const struct stmmac_hwif_entry { + .xgmac = false, + .mdio_intr_en = false, + .has_serdes = false, ++ .tbs = false, + .min_id = 0, + .regs = { + .ptp_off = PTP_GMAC3_X_OFFSET, +@@ -116,6 +118,7 @@ static const struct stmmac_hwif_entry { + .xgmac = false, + .mdio_intr_en = false, + .has_serdes = false, ++ .tbs = false, + .min_id = 0, + .regs = { + .ptp_off = PTP_GMAC3_X_OFFSET, +@@ -137,6 +140,7 @@ static const struct stmmac_hwif_entry { + .xgmac = false, + .mdio_intr_en = false, + .has_serdes = false, ++ .tbs = false, + .min_id = 0, + .regs = { + .ptp_off = PTP_GMAC4_OFFSET, +@@ -158,6 +162,7 @@ static const struct stmmac_hwif_entry { + .xgmac = false, + .mdio_intr_en = false, + .has_serdes = false, ++ .tbs = false, + .min_id = DWMAC_CORE_4_00, + .regs = { + .ptp_off = PTP_GMAC4_OFFSET, +@@ -179,6 +184,7 @@ static const struct stmmac_hwif_entry { + .xgmac = false, + .mdio_intr_en = false, + .has_serdes = false, ++ .tbs = false, + .min_id = DWMAC_CORE_4_10, + .regs = { + .ptp_off = PTP_GMAC4_OFFSET, +@@ -200,6 +206,7 @@ static const struct stmmac_hwif_entry { + .xgmac = false, + .mdio_intr_en = true, + .has_serdes = false, ++ .tbs = true, + .min_id = DWMAC_CORE_5_10, + .regs = { + .ptp_off = PTP_GMAC4_OFFSET, +@@ -221,6 +228,7 @@ static const struct stmmac_hwif_entry { + .xgmac = true, + .mdio_intr_en = false, + .has_serdes = false, ++ .tbs = false, + .min_id = DWXGMAC_CORE_2_10, + .regs = { + .ptp_off = PTP_XGMAC_OFFSET, +@@ -236,19 +244,42 @@ static const struct stmmac_hwif_entry { + .mmc = &dwxgmac_mmc_ops, + .setup = dwxgmac2_setup, + .quirks = NULL, ++ }, { ++ .gmac = false, ++ .gmac4 = true, ++ .xgmac = false, ++ .mdio_intr_en = true, ++ .has_serdes = false, ++ .tbs = true, ++ .min_id = DWMAC_CORE_5_10, ++ .regs = { ++ .ptp_off = PTP_GMAC4_OFFSET, ++ .mmc_off = MMC_GMAC4_OFFSET, ++ }, ++ .desc = &dwmac5_desc_ops, ++ .dma = &dwmac5_dma_ops, ++ .mac = &dwmac510_ops, ++ .serdes = NULL, ++ .hwtimestamp = &stmmac_ptp, ++ .mode = &dwmac4_ring_mode_ops, ++ .tc = &dwmac510_tc_ops, ++ .mmc = &dwmac_mmc_ops, ++ .setup = dwmac4_setup, ++ .quirks = NULL, + }, { + .gmac = false, + .gmac4 = true, + .xgmac = false, + .mdio_intr_en = true, + .has_serdes = true, ++ .tbs = true, + .min_id = DWMAC_CORE_5_10, + .regs = { + .ptp_off = PTP_GMAC4_OFFSET, + .mmc_off = MMC_GMAC4_OFFSET, + }, +- .desc = &dwmac4_desc_ops, +- .dma = &dwmac410_dma_ops, ++ .desc = &dwmac5_desc_ops, ++ .dma = &dwmac5_dma_ops, + .mac = &dwmac510_ops, + .serdes = &intel_serdes_ops, + .hwtimestamp = &stmmac_ptp, +@@ -266,6 +297,7 @@ int stmmac_hwif_init(struct stmmac_priv *priv) + bool needs_gmac4 = priv->plat->has_gmac4; + bool needs_gmac = priv->plat->has_gmac; + bool needs_serdes = priv->plat->has_serdes; ++ bool needs_tbs = priv->plat->has_tbs; + const struct stmmac_hwif_entry *entry; + struct mac_device_info *mac; + bool needs_setup = true; +@@ -312,6 +344,8 @@ int stmmac_hwif_init(struct stmmac_priv *priv) + continue; + if (needs_serdes ^ entry->has_serdes) + continue; ++ if (needs_tbs ^ entry->tbs) ++ continue; + /* Use synopsys_id var because some setups can override this */ + if (priv->synopsys_id < entry->min_id) + continue; +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index 1270c34f9258..de87a1f10247 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -5080,6 +5080,13 @@ int stmmac_dvr_probe(struct device *device, + true); + dev_info(priv->device, "EST feature enabled\n"); + } ++ if (priv->hw->tsn_info.cap.tbs_support && priv->plat->tsn_tbs_en) { ++ stmmac_set_tsn_feat(priv, priv->hw, ndev, TSN_FEAT_ID_TBS, ++ true); ++ priv->enhanced_tx_desc = 1; ++ priv->mode = STMMAC_ENHANCED_TX_MODE; ++ dev_info(priv->device, "TBS feature enabled\n"); ++ } + + #ifdef STMMAC_VLAN_TAG_USED + /* Both mac100 and gmac support receive VLAN tag detection */ +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +index 168121240545..d176404fdb02 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +@@ -158,9 +158,11 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, + plat->clk_trail_n = 2; + plat->has_gmac = 0; + plat->has_gmac4 = 1; ++ plat->has_tbs = 1; + plat->force_sf_dma_mode = 0; + plat->tso_en = 1; + plat->tsn_est_en = 1; ++ plat->tsn_tbs_en = 1; + + plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP; + +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c +index a76a5f68353f..d2e615659f4a 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c +@@ -97,6 +97,14 @@ int tsn_init(struct mac_device_info *hw, struct net_device *dev) + return 0; + } + ++ if (!tsnif_has_tsn_cap(hw, ioaddr, TSN_FEAT_ID_TBS)) { ++ dev_info(pdev, "TBS NOT supported\n"); ++ cap->tbs_support = 0; ++ } else { ++ dev_info(pdev, "TBS capable\n"); ++ cap->tbs_support = 1; ++ } ++ + gcl_depth = tsnif_est_get_gcl_depth(hw, ioaddr); + if (gcl_depth < 0) { + dev_err(pdev, "EST GCL depth(%d) < 0\n", gcl_depth); +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h +index 6571fa92d4f8..46de14829eff 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h +@@ -22,6 +22,7 @@ enum tsn_hwtunable_id { + /* TSN Feature Enabled List */ + enum tsn_feat_id { + TSN_FEAT_ID_EST = 0, ++ TSN_FEAT_ID_TBS = 2, + TSN_FEAT_ID_MAX, + }; + +@@ -43,6 +44,7 @@ enum tsn_gcl_param_idx { + /* TSN HW Capabilities */ + struct tsn_hw_cap { + bool est_support; /* 1: supported */ ++ bool tbs_support; /* 1: supported */ + u32 txqcnt; /* Number of TxQ (control gate) */ + u32 gcl_depth; /* GCL depth. */ + u32 ti_wid; /* time interval width */ +diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h +index 4d3b6269198c..5c14b0c0cd90 100644 +--- a/include/linux/stmmac.h ++++ b/include/linux/stmmac.h +@@ -181,10 +181,12 @@ struct plat_stmmacenet_data { + struct stmmac_axi *axi; + int has_gmac4; + int has_serdes; ++ int has_tbs; + bool has_sun8i; + bool tso_en; + int rss_en; + bool tsn_est_en; ++ bool tsn_tbs_en; + int mac_port_sel_speed; + bool en_tx_lpi_clockgating; + int has_xgmac; +-- +2.17.1 + diff --git a/patches/0046-skip-sbuf-and-vhm-initialization-when-booting-nativel.acrn b/patches/0046-skip-sbuf-and-vhm-initialization-when-booting-nativel.acrn new file mode 100644 index 0000000000..1fb2e8352c --- /dev/null +++ b/patches/0046-skip-sbuf-and-vhm-initialization-when-booting-nativel.acrn @@ -0,0 +1,60 @@ +From beceb68b8a52057f0736bb2be5183d557386dbde Mon Sep 17 00:00:00 2001 +From: Jack Ren +Date: Fri, 31 Aug 2018 10:59:00 +0800 +Subject: [PATCH 046/150] skip sbuf and vhm initialization when booting + natively + +Change-Id: Ib5cd72c208f6e7cc905418671cd655054132806f +Tracked-On: 229665 +Signed-off-by: Jack Ren +Reviewed-on: +--- + drivers/acrn/sbuf.c | 4 ++++ + drivers/char/vhm/vhm_dev.c | 8 +++----- + 2 files changed, 7 insertions(+), 5 deletions(-) + +diff --git a/drivers/acrn/sbuf.c b/drivers/acrn/sbuf.c +index a3582325d9b9..b51ee04e12fa 100644 +--- a/drivers/acrn/sbuf.c ++++ b/drivers/acrn/sbuf.c +@@ -57,6 +57,7 @@ + + #include + #include ++#include + #include + #include + #include "sbuf.h" +@@ -170,6 +171,9 @@ int sbuf_share_setup(uint32_t pcpu_id, uint32_t sbuf_id, shared_buf_t *sbuf) + { + struct sbuf_setup_param ssp; + ++ if (x86_hyper_type != X86_HYPER_ACRN) ++ return -ENODEV; ++ + ssp.pcpu_id = pcpu_id; + ssp.sbuf_id = sbuf_id; + +diff --git a/drivers/char/vhm/vhm_dev.c b/drivers/char/vhm/vhm_dev.c +index 200fb0c0ebb8..3c0ebc3a3ca8 100644 +--- a/drivers/char/vhm/vhm_dev.c ++++ b/drivers/char/vhm/vhm_dev.c +@@ -495,12 +495,10 @@ static int __init vhm_init(void) + unsigned long flag; + struct hc_api_version api_version = {0, 0}; + +- pr_info("vhm: initializing\n"); ++ if (x86_hyper_type != X86_HYPER_ACRN) ++ return -ENODEV; + +- if (x86_hyper_type != X86_HYPER_ACRN) { +- pr_err("vhm: not support acrn hypervisor!\n"); +- return -EINVAL; +- } ++ pr_info("vhm: initializing\n"); + + if (hcall_get_api_version(virt_to_phys(&api_version)) < 0) { + pr_err("vhm: failed to get api version from Hypervisor !\n"); +-- +2.17.1 + diff --git a/patches/0046-trusty-Check-if-eVmm-is-available-before-init-drive.trusty b/patches/0046-trusty-Check-if-eVmm-is-available-before-init-drive.trusty new file mode 100644 index 0000000000..f473fac909 --- /dev/null +++ b/patches/0046-trusty-Check-if-eVmm-is-available-before-init-drive.trusty @@ -0,0 +1,64 @@ +From ce6781af06c19c95d4e58633f5c127152f70bc27 Mon Sep 17 00:00:00 2001 +From: Zhou Furong +Date: Tue, 21 Nov 2017 14:13:27 +0800 +Subject: [PATCH 46/63] trusty: Check if eVmm is available before init driver + +eVmm not available in recovery mode, there is a kernel panic if + trusty driver initialize without check it. + +Change-Id: I21f788ab7186cddbc0b0d1a10f7896b5523d257a +Tracked-On: PKT-472 +--- + drivers/trusty/trusty-timer.c | 8 ++++++-- + drivers/trusty/trusty-wall.c | 6 ++++++ + 2 files changed, 12 insertions(+), 2 deletions(-) + +diff --git a/drivers/trusty/trusty-timer.c b/drivers/trusty/trusty-timer.c +index 0998e027984b..e88dc5f4cdf8 100644 +--- a/drivers/trusty/trusty-timer.c ++++ b/drivers/trusty/trusty-timer.c +@@ -79,10 +79,15 @@ static int trusty_timer_call_notify(struct notifier_block *nb, + static int trusty_timer_probe(struct platform_device *pdev) + { + int ret; +- unsigned int cpu; + struct trusty_timer_dev_state *s; + struct trusty_timer *tt; + ++ ret = trusty_check_cpuid(NULL); ++ if (ret < 0) { ++ dev_err(&pdev->dev, "CPUID Error: Cannot find eVmm in trusty driver initialization!"); ++ return -EINVAL; ++ } ++ + dev_dbg(&pdev->dev, "%s\n", __func__); + + if (!trusty_wall_base(pdev->dev.parent)) { +@@ -127,7 +132,6 @@ static int trusty_timer_probe(struct platform_device *pdev) + + static int trusty_timer_remove(struct platform_device *pdev) + { +- unsigned int cpu; + struct trusty_timer_dev_state *s = platform_get_drvdata(pdev); + struct trusty_timer *tt; + +diff --git a/drivers/trusty/trusty-wall.c b/drivers/trusty/trusty-wall.c +index 3c33d724b3fa..64368480c309 100644 +--- a/drivers/trusty/trusty-wall.c ++++ b/drivers/trusty/trusty-wall.c +@@ -147,6 +147,12 @@ static int trusty_wall_probe(struct platform_device *pdev) + int ret; + struct trusty_wall_dev_state *s; + ++ ret = trusty_check_cpuid(NULL); ++ if (ret < 0) { ++ dev_err(&pdev->dev, "CPUID Error: Cannot find eVmm in trusty driver initialization!"); ++ return -EINVAL; ++ } ++ + dev_dbg(&pdev->dev, "%s\n", __func__); + + s = kzalloc(sizeof(*s), GFP_KERNEL); +-- +2.17.1 + diff --git a/patches/0047-ALSA-core-Implement-compress-page-allocation-and-fre.audio b/patches/0047-ALSA-core-Implement-compress-page-allocation-and-fre.audio new file mode 100644 index 0000000000..b6491d799d --- /dev/null +++ b/patches/0047-ALSA-core-Implement-compress-page-allocation-and-fre.audio @@ -0,0 +1,164 @@ +From 5ad63a2ba24264e883eb63dcd1b1aa45c1368add Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Wed, 6 Mar 2019 13:35:01 +0100 +Subject: [PATCH 047/193] ALSA: core: Implement compress page allocation and + free routines + +Add simple malloc and free methods for memory management for compress +streams. Based on snd_pcm_lib_malloc_pages and snd_pcm_lib_free_pages +implementation. + +Change-Id: Ic59813296ccc173d64907c5791b4f6a35c5c17e8 +Signed-off-by: Cezary Rojewski +Signed-off-by: Divya Prakash +--- + include/sound/compress_driver.h | 39 +++++++++++++++++++++++------- + sound/core/compress_offload.c | 42 +++++++++++++++++++++++++++++++++ + 2 files changed, 73 insertions(+), 8 deletions(-) + +diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h +index bc88d6f964da..130ad5ff687f 100644 +--- a/include/sound/compress_driver.h ++++ b/include/sound/compress_driver.h +@@ -34,11 +34,14 @@ struct snd_compr_ops; + * @total_bytes_transferred: cumulative bytes transferred by offload DSP + * @sleep: poll sleep + * @private_data: driver private data pointer ++ * @dma_area: DMA area ++ * @dma_addr: physical bus address (not accessible from main CPU) ++ * @dma_bytes: size of DMA area ++ * @dma_buffer_p: pointer to allocated buffer + */ + struct snd_compr_runtime { + snd_pcm_state_t state; + struct snd_compr_ops *ops; +- struct snd_dma_buffer *dma_buffer_p; + void *buffer; + u64 buffer_size; + u32 fragment_size; +@@ -47,6 +50,11 @@ struct snd_compr_runtime { + u64 total_bytes_transferred; + wait_queue_head_t sleep; + void *private_data; ++ ++ unsigned char *dma_area; ++ dma_addr_t dma_addr; ++ size_t dma_bytes; ++ struct snd_dma_buffer *dma_buffer_p; + }; + + /** +@@ -60,6 +68,7 @@ struct snd_compr_runtime { + * @metadata_set: metadata set flag, true when set + * @next_track: has userspace signal next track transition, true when set + * @private_data: pointer to DSP private data ++ * @dma_buffer: allocated buffer if any + */ + struct snd_compr_stream { + const char *name; +@@ -71,6 +80,7 @@ struct snd_compr_stream { + bool metadata_set; + bool next_track; + void *private_data; ++ struct snd_dma_buffer dma_buffer; + }; + + /** +@@ -180,21 +190,34 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream) + + /** + * snd_compr_set_runtime_buffer - Set the Compress runtime buffer +- * @substream: compress substream to set ++ * @stream: compress stream to set + * @bufp: the buffer information, NULL to clear + * + * Copy the buffer information to runtime buffer when @bufp is non-NULL. + * Otherwise it clears the current buffer information. + */ +-static inline void snd_compr_set_runtime_buffer( +- struct snd_compr_stream *substream, +- struct snd_dma_buffer *bufp) ++static inline void ++snd_compr_set_runtime_buffer(struct snd_compr_stream *stream, ++ struct snd_dma_buffer *bufp) + { +- struct snd_compr_runtime *runtime = substream->runtime; +- +- runtime->dma_buffer_p = bufp; ++ struct snd_compr_runtime *runtime = stream->runtime; ++ ++ if (bufp) { ++ runtime->dma_buffer_p = bufp; ++ runtime->dma_area = bufp->area; ++ runtime->dma_addr = bufp->addr; ++ runtime->dma_bytes = bufp->bytes; ++ } else { ++ runtime->dma_buffer_p = NULL; ++ runtime->dma_area = NULL; ++ runtime->dma_addr = 0; ++ runtime->dma_bytes = 0; ++ } + } + ++int snd_compr_malloc_pages(struct snd_compr_stream *stream, size_t size); ++int snd_compr_free_pages(struct snd_compr_stream *stream); ++ + int snd_compr_stop_error(struct snd_compr_stream *stream, + snd_pcm_state_t state); + +diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c +index 41905afada63..0e08a71d95eb 100644 +--- a/sound/core/compress_offload.c ++++ b/sound/core/compress_offload.c +@@ -488,6 +488,48 @@ snd_compr_get_codec_caps(struct snd_compr_stream *stream, unsigned long arg) + } + #endif /* !COMPR_CODEC_CAPS_OVERFLOW */ + ++int snd_compr_malloc_pages(struct snd_compr_stream *stream, size_t size) ++{ ++ struct snd_dma_buffer *dmab; ++ int ret; ++ ++ if (PCM_RUNTIME_CHECK(stream)) ++ return -EINVAL; ++ dmab = kzalloc(sizeof(*dmab), GFP_KERNEL); ++ if (!dmab) ++ return -ENOMEM; ++ dmab->dev = stream->dma_buffer.dev; ++ ret = snd_dma_alloc_pages(dmab->dev.type, dmab->dev.dev, size, dmab); ++ if (ret < 0) { ++ kfree(dmab); ++ return ret; ++ } ++ ++ snd_compr_set_runtime_buffer(stream, dmab); ++ stream->runtime->dma_bytes = size; ++ return 1; ++} ++EXPORT_SYMBOL(snd_compr_malloc_pages); ++ ++int snd_compr_free_pages(struct snd_compr_stream *stream) ++{ ++ struct snd_compr_runtime *runtime = stream->runtime; ++ ++ if (PCM_RUNTIME_CHECK(stream)) ++ return -EINVAL; ++ if (runtime->dma_area == NULL) ++ return 0; ++ if (runtime->dma_buffer_p != &stream->dma_buffer) { ++ /* It's a newly allocated buffer. Release it now. */ ++ snd_dma_free_pages(runtime->dma_buffer_p); ++ kfree(runtime->dma_buffer_p); ++ } ++ ++ snd_compr_set_runtime_buffer(stream, NULL); ++ return 0; ++} ++EXPORT_SYMBOL(snd_compr_free_pages); ++ + /* revisit this with snd_pcm_preallocate_xxx */ + static int snd_compr_allocate_buffer(struct snd_compr_stream *stream, + struct snd_compr_params *params) +-- +2.17.1 + diff --git a/patches/0047-VHM-add-hugetlb-page-ept-mapping-support.acrn b/patches/0047-VHM-add-hugetlb-page-ept-mapping-support.acrn new file mode 100644 index 0000000000..11b04b52a1 --- /dev/null +++ b/patches/0047-VHM-add-hugetlb-page-ept-mapping-support.acrn @@ -0,0 +1,261 @@ +From 6a5b6fc7ede2d29004bad744ee2e18600d199305 Mon Sep 17 00:00:00 2001 +From: Jason Chen CJ +Date: Fri, 31 Aug 2018 10:59:00 +0800 +Subject: [PATCH 047/150] VHM: add hugetlb page ept mapping support + +unlike cma, hugetlb allocates hugepage under user space, so VHM only +need take care of ept mapping for these allocated huge pages. +this patch add hugepage_map_guest function, it gets huge page struct +pointer according to user virtual address input from ioctl IC_SET_MEMSEG, +then build all required parameters for recording guest memseg and mapping +ept entry through this page struct. + +Change-Id: I0b333613dc20fce41b9b091c72892bbac6b07735 +Signed-off-by: Jason Chen CJ +Reviewed-on: +--- + drivers/char/vhm/vhm_dev.c | 1 + + drivers/vhm/Kconfig | 1 + + drivers/vhm/vhm_mm.c | 101 +++++++++++++++++++++++++---- + include/linux/vhm/vhm_ioctl_defs.h | 27 +++++--- + include/linux/vhm/vhm_vm_mngt.h | 1 + + 5 files changed, 110 insertions(+), 21 deletions(-) + +diff --git a/drivers/char/vhm/vhm_dev.c b/drivers/char/vhm/vhm_dev.c +index 3c0ebc3a3ca8..fd2d43b176ea 100644 +--- a/drivers/char/vhm/vhm_dev.c ++++ b/drivers/char/vhm/vhm_dev.c +@@ -117,6 +117,7 @@ static int vhm_dev_open(struct inode *inodep, struct file *filep) + + vm_mutex_lock(&vhm_vm_list_lock); + vm->refcnt = 1; ++ vm->hugetlb_enabled = 0; + vm_list_add(&vm->list); + vm_mutex_unlock(&vhm_vm_list_lock); + filep->private_data = vm; +diff --git a/drivers/vhm/Kconfig b/drivers/vhm/Kconfig +index 64465431673b..373dbf161805 100644 +--- a/drivers/vhm/Kconfig ++++ b/drivers/vhm/Kconfig +@@ -3,6 +3,7 @@ config ACRN_VHM + depends on ACRN_GUEST + depends on DMA_CMA + depends on PCI_MSI ++ depends on HUGETLBFS + depends on !VMAP_STACK + default n + ---help--- +diff --git a/drivers/vhm/vhm_mm.c b/drivers/vhm/vhm_mm.c +index be6a47afad9a..cc08fd9d0965 100644 +--- a/drivers/vhm/vhm_mm.c ++++ b/drivers/vhm/vhm_mm.c +@@ -110,31 +110,25 @@ static bool _free_memblk(struct device *dev, u64 vm0_gpa, size_t len) + return dma_release_from_contiguous(dev, page, count); + } + +-int alloc_guest_memseg(struct vhm_vm *vm, struct vm_memseg *memseg) ++static int add_guest_memseg(struct vhm_vm *vm, unsigned long vm0_gpa, ++ unsigned long guest_gpa, unsigned long len) + { + struct guest_memseg *seg; +- u64 vm0_gpa; + int max_gfn; + + seg = kzalloc(sizeof(struct guest_memseg), GFP_KERNEL); + if (seg == NULL) + return -ENOMEM; + +- vm0_gpa = _alloc_memblk(vm->dev, memseg->len); +- if (vm0_gpa == 0ULL) { +- kfree(seg); +- return -ENOMEM; +- } +- + seg->vm0_gpa = vm0_gpa; +- seg->len = memseg->len; +- seg->gpa = memseg->gpa; ++ seg->gpa = guest_gpa; ++ seg->len = len; + + max_gfn = (seg->gpa + seg->len) >> PAGE_SHIFT; + if (vm->max_gfn < max_gfn) + vm->max_gfn = max_gfn; + +- pr_info("VHM: alloc memseg with len=0x%lx, vm0_gpa=0x%llx," ++ pr_info("VHM: add memseg with len=0x%lx, vm0_gpa=0x%llx," + " and its guest gpa = 0x%llx, vm max_gfn 0x%x\n", + seg->len, seg->vm0_gpa, seg->gpa, vm->max_gfn); + +@@ -146,6 +140,22 @@ int alloc_guest_memseg(struct vhm_vm *vm, struct vm_memseg *memseg) + return 0; + } + ++int alloc_guest_memseg(struct vhm_vm *vm, struct vm_memseg *memseg) ++{ ++ unsigned long vm0_gpa; ++ int ret; ++ ++ vm0_gpa = _alloc_memblk(vm->dev, memseg->len); ++ if (vm0_gpa == 0ULL) ++ return -ENOMEM; ++ ++ ret = add_guest_memseg(vm, vm0_gpa, memseg->gpa, memseg->len); ++ if (ret < 0) ++ _free_memblk(vm->dev, vm0_gpa, memseg->len); ++ ++ return ret; ++} ++ + static int _mem_set_memmap(unsigned long vmid, unsigned long guest_gpa, + unsigned long host_gpa, unsigned long len, + unsigned int mem_type, unsigned int mem_access_right, +@@ -197,6 +207,61 @@ int update_memmap_attr(unsigned long vmid, unsigned long guest_gpa, + mem_type, mem_access_right, MAP_MEM); + } + ++static int hugepage_map_guest(struct vhm_vm *vm, struct vm_memmap *memmap) ++{ ++ struct page *page; ++ unsigned long len, guest_gpa, vma; ++ unsigned int type; ++ unsigned int mem_type, mem_access_right; ++ int ret; ++ ++ if (vm == NULL || memmap == NULL) ++ return -EINVAL; ++ ++ len = memmap->len; ++ vma = memmap->vma_base; ++ guest_gpa = memmap->gpa; ++ ++ while (len > 0) { ++ unsigned long vm0_gpa, pagesize; ++ ++ ret = get_user_pages_fast(vma, 1, 1, &page); ++ if (unlikely(ret != 1) || (page == NULL)) { ++ pr_err("failed to pin huge page!\n"); ++ return -ENOMEM; ++ } ++ ++ vm0_gpa = page_to_phys(page); ++ pagesize = PAGE_SIZE << compound_order(page); ++ ++ ret = add_guest_memseg(vm, vm0_gpa, guest_gpa, pagesize); ++ if (ret < 0) { ++ pr_err("failed to add memseg for huge page!\n"); ++ put_page(page); ++ return ret; ++ } ++ ++ /* TODO: do batch hypercall for multi ept mapping */ ++ mem_type = MEM_TYPE_WB; ++ mem_access_right = (memmap->prot & MEM_ACCESS_RIGHT_MASK); ++ type = MAP_MEM; ++ if (_mem_set_memmap(vm->vmid, guest_gpa, vm0_gpa, pagesize, ++ mem_type, mem_access_right, type) < 0) { ++ pr_err("vhm: failed to set memmap %ld!\n", vm->vmid); ++ put_page(page); ++ return -EFAULT; ++ } ++ ++ len -= pagesize; ++ vma += pagesize; ++ guest_gpa += pagesize; ++ } ++ ++ vm->hugetlb_enabled = 1; ++ ++ return 0; ++} ++ + int map_guest_memseg(struct vhm_vm *vm, struct vm_memmap *memmap) + { + struct guest_memseg *seg = NULL; +@@ -204,8 +269,13 @@ int map_guest_memseg(struct vhm_vm *vm, struct vm_memmap *memmap) + unsigned int mem_type, mem_access_right; + unsigned long guest_gpa, host_gpa; + ++ /* hugetlb use vma to do the mapping */ ++ if (memmap->type == VM_SYSMEM && memmap->using_vma) ++ return hugepage_map_guest(vm, memmap); ++ + mutex_lock(&vm->seg_lock); + ++ /* cma or mmio */ + if (memmap->type == VM_SYSMEM) { + list_for_each_entry(seg, &vm->memseg_list, list) { + if (seg->gpa == memmap->gpa +@@ -249,8 +319,13 @@ void free_guest_mem(struct vhm_vm *vm) + while (!list_empty(&vm->memseg_list)) { + seg = list_first_entry(&vm->memseg_list, + struct guest_memseg, list); +- if (!_free_memblk(vm->dev, seg->vm0_gpa, seg->len)) +- pr_warn("failed to free memblk\n"); ++ if (vm->hugetlb_enabled) { ++ /* just put_page to unpin huge page */ ++ put_page(pfn_to_page(seg->vm0_gpa >> PAGE_SHIFT)); ++ } else { ++ if (!_free_memblk(vm->dev, seg->vm0_gpa, seg->len)) ++ pr_warn("failed to free memblk\n"); ++ } + list_del(&seg->list); + kfree(seg); + } +diff --git a/include/linux/vhm/vhm_ioctl_defs.h b/include/linux/vhm/vhm_ioctl_defs.h +index 5bc7c666f2ea..a0a830dec3fa 100644 +--- a/include/linux/vhm/vhm_ioctl_defs.h ++++ b/include/linux/vhm/vhm_ioctl_defs.h +@@ -117,19 +117,30 @@ struct vm_memseg { + + /** + * struct vm_memmap - EPT memory mapping info for guest +- * +- * @type: memory mapping type +- * @gpa: guest physical start address of memory mapping +- * @hpa: host physical start address of memory +- * @len: the length of memory range mapped +- * @prot: memory mapping attribute + */ + struct vm_memmap { ++ /** @type: memory mapping type */ + uint32_t type; +- uint32_t reserved; ++ /** @using_vma: using vma_base to get vm0_gpa, ++ * only for type == VM_SYSTEM ++ */ ++ uint32_t using_vma; ++ /** @gpa: user OS guest physical start address of memory mapping */ + uint64_t gpa; +- uint64_t hpa; /* only for type == VM_MMIO */ ++ /** union */ ++ union { ++ /** @hpa: host physical start address of memory, ++ * only for type == VM_MMIO ++ */ ++ uint64_t hpa; ++ /** @vma_base: service OS user virtual start address of ++ * memory, only for type == VM_SYSMEM && using_vma == true ++ */ ++ uint64_t vma_base; ++ }; ++ /** @len: the length of memory range mapped */ + uint64_t len; /* mmap length */ ++ /** @prot: memory mapping attribute */ + uint32_t prot; /* RWX */ + }; + +diff --git a/include/linux/vhm/vhm_vm_mngt.h b/include/linux/vhm/vhm_vm_mngt.h +index e7bc8b2372f7..306bd54c4103 100644 +--- a/include/linux/vhm/vhm_vm_mngt.h ++++ b/include/linux/vhm/vhm_vm_mngt.h +@@ -96,6 +96,7 @@ struct vhm_vm { + struct list_head ioreq_client_list; + struct vhm_request_buffer *req_buf; + struct page *pg; ++ int hugetlb_enabled; + }; + + /** +-- +2.17.1 + diff --git a/patches/0047-drm-i915-tgl-Add-maximum-resolution-supported-by-PSR2-.drm b/patches/0047-drm-i915-tgl-Add-maximum-resolution-supported-by-PSR2-.drm new file mode 100644 index 0000000000..bf44330a32 --- /dev/null +++ b/patches/0047-drm-i915-tgl-Add-maximum-resolution-supported-by-PSR2-.drm @@ -0,0 +1,42 @@ +From da35d51d058dae452f713e9eb83fd9e9abc8ac47 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Jos=C3=A9=20Roberto=20de=20Souza?= +Date: Fri, 23 Aug 2019 01:20:41 -0700 +Subject: [PATCH 047/690] drm/i915/tgl: Add maximum resolution supported by + PSR2 HW +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +TGL PSR2 HW supports a bigger resolution, so lets add it + +BSpec: 50422, 49199 +Cc: Dhinakaran Pandiyan +Cc: Rodrigo Vivi +Signed-off-by: José Roberto de Souza +Signed-off-by: Lucas De Marchi +Reviewed-by: Lucas De Marchi +Reviewed-by: Anshuman Gupta +Link: https://patchwork.freedesktop.org/patch/msgid/20190823082055.5992-10-lucas.demarchi@intel.com +--- + drivers/gpu/drm/i915/display/intel_psr.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c +index dafd3737cc5a..2af3826121fa 100644 +--- a/drivers/gpu/drm/i915/display/intel_psr.c ++++ b/drivers/gpu/drm/i915/display/intel_psr.c +@@ -576,7 +576,10 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, + return false; + } + +- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { ++ if (INTEL_GEN(dev_priv) >= 12) { ++ psr_max_h = 5120; ++ psr_max_v = 3200; ++ } else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { + psr_max_h = 4096; + psr_max_v = 2304; + } else if (IS_GEN(dev_priv, 9)) { +-- +2.17.1 + diff --git a/patches/0047-mei-bus-use-zero-vtag-for-bus-clients.security b/patches/0047-mei-bus-use-zero-vtag-for-bus-clients.security new file mode 100644 index 0000000000..aa03419f3c --- /dev/null +++ b/patches/0047-mei-bus-use-zero-vtag-for-bus-clients.security @@ -0,0 +1,183 @@ +From 2b298aa6563c75c1ffea5dbed0779b56d2f5b701 Mon Sep 17 00:00:00 2001 +From: Alexander Usyskin +Date: Wed, 24 Oct 2018 14:30:40 +0300 +Subject: [PATCH 47/65] mei: bus: use zero vtag for bus clients. + +Once vtags are enabled a zero vtag is required +for the read flow to work for also for devices on mei client bus. + +Change-Id: I932ca3227348fa3415a8a7b4689b4da32e363594 +Signed-off-by: Alexander Usyskin +Signed-off-by: Tomas Winkler +--- + drivers/misc/mei/bus.c | 32 ++++++++++++++++++++++++++++++++ + drivers/misc/mei/client.c | 19 +++++++++++++++++-- + drivers/misc/mei/client.h | 1 + + drivers/misc/mei/main.c | 17 +---------------- + 4 files changed, 51 insertions(+), 18 deletions(-) + +diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c +index af1a6c66a342..2cc72da613b4 100644 +--- a/drivers/misc/mei/bus.c ++++ b/drivers/misc/mei/bus.c +@@ -495,6 +495,16 @@ static void mei_cl_bus_module_put(struct mei_cl_device *cldev) + module_put(cldev->bus->dev->driver->owner); + } + ++static int mei_cldev_vt_support_check(struct mei_cl_device *cldev) ++{ ++ struct mei_device *bus = cldev->bus; ++ ++ if (!bus->hbm_f_vt_supported) ++ return -EOPNOTSUPP; ++ ++ return cldev->me_cl->props.vt_supported ? 0 : -EOPNOTSUPP; ++} ++ + /** + * mei_cldev_enable - enable me client device + * create connection with me client +@@ -507,6 +517,7 @@ int mei_cldev_enable(struct mei_cl_device *cldev) + { + struct mei_device *bus = cldev->bus; + struct mei_cl *cl; ++ struct mei_cl_vtag *cl_vtag; + int ret; + + cl = cldev->cl; +@@ -531,6 +542,19 @@ int mei_cldev_enable(struct mei_cl_device *cldev) + goto out; + } + ++ if (!mei_cldev_vt_support_check(cldev)) { ++ if (!list_first_entry_or_null(&cl->vtag_map, ++ struct mei_cl_vtag, list)) { ++ cl_vtag = mei_cl_vtag_alloc(NULL, 0); ++ if (IS_ERR(cl_vtag)) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ list_add_tail(&cl_vtag->list, &cl->vtag_map); ++ } ++ } ++ + ret = mei_cl_connect(cl, cldev->me_cl, NULL); + if (ret < 0) + dev_err(&cldev->dev, "cannot connect\n"); +@@ -573,6 +597,7 @@ int mei_cldev_disable(struct mei_cl_device *cldev) + { + struct mei_device *bus; + struct mei_cl *cl; ++ struct mei_cl_vtag *cl_vtag; + int err; + + if (!cldev) +@@ -586,6 +611,13 @@ int mei_cldev_disable(struct mei_cl_device *cldev) + + mutex_lock(&bus->device_lock); + ++ cl_vtag = list_first_entry_or_null(&cl->vtag_map, ++ struct mei_cl_vtag, list); ++ if (cl_vtag) { ++ list_del(&cl_vtag->list); ++ kfree(cl_vtag); ++ } ++ + if (!mei_cl_is_connected(cl)) { + dev_dbg(bus->dev, "Already disconnected\n"); + err = 0; +diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c +index 55ed821a49e3..fcf12edf6fc6 100644 +--- a/drivers/misc/mei/client.c ++++ b/drivers/misc/mei/client.c +@@ -1267,6 +1267,21 @@ static int mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl *cl) + return 0; + } + ++struct mei_cl_vtag *mei_cl_vtag_alloc(struct file *fp, u8 vtag) ++{ ++ struct mei_cl_vtag *cl_vtag; ++ ++ cl_vtag = kzalloc(sizeof(*cl_vtag), GFP_KERNEL); ++ if (!cl_vtag) ++ return ERR_PTR(-ENOMEM); ++ ++ INIT_LIST_HEAD(&cl_vtag->list); ++ cl_vtag->vtag = vtag; ++ cl_vtag->fp = fp; ++ ++ return cl_vtag; ++} ++ + const struct file *mei_cl_fp_by_vtag(const struct mei_cl *cl, u8 vtag) + { + struct mei_cl_vtag *vtag_l; +@@ -1275,7 +1290,7 @@ const struct file *mei_cl_fp_by_vtag(const struct mei_cl *cl, u8 vtag) + if (vtag_l->vtag == vtag) + return vtag_l->fp; + +- return NULL; ++ return ERR_PTR(-ENOENT); + } + + static void mei_cl_reset_read_by_vtag(const struct mei_cl *cl, u8 vtag) +@@ -1325,7 +1340,7 @@ void mei_cl_add_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb) + + if (!mei_cl_vt_support_check(cl)) { + fp = mei_cl_fp_by_vtag(cl, cb->vtag); +- if (!fp) { ++ if (IS_ERR(fp)) { + /* client already disconnected, discarding */ + mei_io_cb_free(cb); + return; +diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h +index 2e2385e562a9..f138419ab1d3 100644 +--- a/drivers/misc/mei/client.h ++++ b/drivers/misc/mei/client.h +@@ -104,6 +104,7 @@ struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length, + const struct file *fp); + int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp); + ++struct mei_cl_vtag *mei_cl_vtag_alloc(struct file *fp, u8 vtag); + const struct file *mei_cl_fp_by_vtag(const struct mei_cl *cl, u8 vtag); + /* + * MEI input output function prototype +diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c +index 6e89a442e03c..888d804a90de 100644 +--- a/drivers/misc/mei/main.c ++++ b/drivers/misc/mei/main.c +@@ -453,21 +453,6 @@ static int mei_vt_support_check(struct mei_device *dev, const uuid_le *uuid) + return ret; + } + +-static struct mei_cl_vtag *mei_cl_vtag_alloc(struct file *fp, u8 vtag) +-{ +- struct mei_cl_vtag *cl_vtag; +- +- cl_vtag = kzalloc(sizeof(*cl_vtag), GFP_KERNEL); +- if (!cl_vtag) +- return ERR_PTR(-ENOMEM); +- +- INIT_LIST_HEAD(&cl_vtag->list); +- cl_vtag->vtag = vtag; +- cl_vtag->fp = fp; +- +- return cl_vtag; +-} +- + static int mei_ioctl_connect_vtag(struct file *file, + const uuid_le *in_client_uuid, + struct mei_client *client, +@@ -498,7 +483,7 @@ static int mei_ioctl_connect_vtag(struct file *file, + continue; + + /* if tag already exist try another fp */ +- if (mei_cl_fp_by_vtag(pos, vtag)) ++ if (!IS_ERR(mei_cl_fp_by_vtag(pos, vtag))) + continue; + + /* replace cl with acquired one */ +-- +2.17.1 + diff --git a/patches/0047-net-stmmac-add-support-for-ETF-QDISC-for-Time.connectivity b/patches/0047-net-stmmac-add-support-for-ETF-QDISC-for-Time.connectivity new file mode 100644 index 0000000000..1b7bae7999 --- /dev/null +++ b/patches/0047-net-stmmac-add-support-for-ETF-QDISC-for-Time.connectivity @@ -0,0 +1,174 @@ +From f1579a1936a5d388a83a2856f87addc3de7cff35 Mon Sep 17 00:00:00 2001 +From: Weifeng Voon +Date: Mon, 22 Oct 2018 15:36:14 +0800 +Subject: [PATCH 047/108] net: stmmac: add support for ETF QDISC for Time-Based + Scheduling + +This adds support for ETF configuration using TC application and +the setting of LaunchTime (per-packet TxTime) through Enhanced +Tx Descriptor. + +Please also take note that both TSO and TBS cannot coexist, so +we add a warning there. + +Signed-off-by: Weifeng Voon +--- + drivers/net/ethernet/stmicro/stmmac/hwif.h | 5 +++ + .../net/ethernet/stmicro/stmmac/stmmac_main.c | 40 +++++++++++++++++++ + .../net/ethernet/stmicro/stmmac/stmmac_tc.c | 21 ++++++++++ + include/linux/stmmac.h | 1 + + 4 files changed, 67 insertions(+) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h +index 8bd666a520ce..d8e76cfaf6be 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h ++++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h +@@ -642,6 +642,7 @@ struct tc_cls_u32_offload; + struct tc_cbs_qopt_offload; + struct flow_cls_offload; + struct tc_taprio_qopt_offload; ++struct tc_etf_qopt_offload; + + struct stmmac_tc_ops { + int (*init)(struct stmmac_priv *priv); +@@ -653,6 +654,8 @@ struct stmmac_tc_ops { + struct flow_cls_offload *cls); + int (*setup_taprio)(struct stmmac_priv *priv, + struct tc_taprio_qopt_offload *qopt); ++ int (*setup_etf)(struct stmmac_priv *priv, ++ struct tc_etf_qopt_offload *qopt); + }; + + #define stmmac_tc_init(__priv, __args...) \ +@@ -665,6 +668,8 @@ struct stmmac_tc_ops { + stmmac_do_callback(__priv, tc, setup_cls, __args) + #define stmmac_tc_setup_taprio(__priv, __args...) \ + stmmac_do_callback(__priv, tc, setup_taprio, __args) ++#define stmmac_tc_setup_etf(__priv, __args...) \ ++ stmmac_do_callback(__priv, tc, setup_etf, __args) + + struct stmmac_counters; + +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index de87a1f10247..6dcd8392cad6 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -2428,6 +2428,25 @@ static void stmmac_configure_cbs(struct stmmac_priv *priv) + } + } + ++static int stmmac_set_tbs_launchtime(struct stmmac_priv *priv, ++ struct dma_desc *desc, ++ u64 tx_time) ++{ ++ struct dma_enhanced_tx_desc *enhtxdesc; ++ u32 launchtime_ns; ++ u8 launchtime_s; ++ ++ enhtxdesc = container_of(desc, struct dma_enhanced_tx_desc, basic); ++ launchtime_ns = do_div(tx_time, NSEC_PER_SEC); ++ launchtime_s = tx_time; ++ ++ enhtxdesc->etdes4 = launchtime_s & ETDESC4_LT_SEC; ++ enhtxdesc->etdes5 = launchtime_ns & ETDESC5_LT_NANOSEC; ++ enhtxdesc->etdes4 |= ETDESC4_LTV; ++ ++ return 0; ++} ++ + /** + * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel + * @priv: driver private structure +@@ -3521,6 +3540,20 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) + if (has_vlan) + stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); + ++ /* Fill the Enhanced Tx Descriptor with launch time. ++ * If skb is fragmented, only the 1st descriptor will be filled. ++ * Drop the skb and show warning when launch time value ++ * is invalid. ++ */ ++ if (priv->plat->tx_queues_cfg[queue].tbs_en && skb->tstamp && ++ priv->enhanced_tx_desc) { ++ if (stmmac_set_tbs_launchtime(priv, first, ++ ktime_to_ns(skb->tstamp))) { ++ netdev_warn(priv->dev, "Launch time setting failed\n"); ++ goto tbs_err; ++ } ++ } ++ + enh_desc = priv->plat->enh_desc; + /* To program the descriptors according to the size of the frame */ + if (enh_desc) +@@ -3693,6 +3726,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) + dev_kfree_skb(skb); + priv->dev->stats.tx_dropped++; + return NETDEV_TX_OK; ++ ++tbs_err: ++ dev_kfree_skb(skb); ++ priv->dev->stats.tx_dropped++; ++ return NETDEV_TX_OK; + } + + static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) +@@ -4491,6 +4529,8 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type, + return stmmac_tc_setup_cbs(priv, priv, type_data); + case TC_SETUP_QDISC_TAPRIO: + return stmmac_tc_setup_taprio(priv, priv, type_data); ++ case TC_SETUP_QDISC_ETF: ++ return stmmac_tc_setup_etf(priv, priv, type_data); + default: + return -EOPNOTSUPP; + } +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +index cb871bf2707f..4a2cfa35675f 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +@@ -731,10 +731,31 @@ static int tc_setup_taprio(struct stmmac_priv *priv, + return ret; + } + ++static int tc_setup_etf(struct stmmac_priv *priv, ++ struct tc_etf_qopt_offload *qopt) ++{ ++ if (!priv->enhanced_tx_desc) ++ return -EOPNOTSUPP; ++ ++ if (priv->speed == SPEED_10) ++ return -EOPNOTSUPP; ++ ++ if (priv->tso && qopt->enable) { ++ dev_warn(priv->device, ++ "TSO is ON, please disable it to enable TBS\n"); ++ return -EOPNOTSUPP; ++ } ++ ++ priv->plat->tx_queues_cfg[qopt->queue].tbs_en = qopt->enable; ++ ++ return 0; ++} ++ + const struct stmmac_tc_ops dwmac510_tc_ops = { + .init = tc_init, + .setup_cls_u32 = tc_setup_cls_u32, + .setup_cbs = tc_setup_cbs, + .setup_cls = tc_setup_cls, + .setup_taprio = tc_setup_taprio, ++ .setup_etf = tc_setup_etf, + }; +diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h +index 5c14b0c0cd90..1f667368edfa 100644 +--- a/include/linux/stmmac.h ++++ b/include/linux/stmmac.h +@@ -126,6 +126,7 @@ struct stmmac_txq_cfg { + u32 low_credit; + bool use_prio; + u32 prio; ++ bool tbs_en; + }; + + struct plat_stmmacenet_data { +-- +2.17.1 + diff --git a/patches/0047-trusty-Update-Trusty-timer-solution.trusty b/patches/0047-trusty-Update-Trusty-timer-solution.trusty new file mode 100644 index 0000000000..e2dd2a5e4f --- /dev/null +++ b/patches/0047-trusty-Update-Trusty-timer-solution.trusty @@ -0,0 +1,152 @@ +From 9a8ef5853d910e89f8272e83adf4ef138effbb98 Mon Sep 17 00:00:00 2001 +From: "Zhang, Qi" +Date: Tue, 12 Dec 2017 15:16:21 +0800 +Subject: [PATCH 47/63] trusty: Update Trusty timer solution + +1. Add new customized SMC calls +2. Move send pending interrupt implementation to trusty-irq.c +3. Invokes new added standard SMC call to inject timer interrupt to +secure side + +Change-Id: I6c9a94c8ff50f42b58abd2e2b2dd5efd26c126e2 +Signed-off-by: Zhong,Fangjian +--- + drivers/trusty/trusty-irq.c | 9 +++++++++ + drivers/trusty/trusty-timer.c | 38 +++++++++++++++++++++++++++++++++-- + include/linux/trusty/trusty.h | 11 ---------- + 3 files changed, 45 insertions(+), 13 deletions(-) + +diff --git a/drivers/trusty/trusty-irq.c b/drivers/trusty/trusty-irq.c +index 868a31c01f19..04df531bf9d0 100644 +--- a/drivers/trusty/trusty-irq.c ++++ b/drivers/trusty/trusty-irq.c +@@ -59,6 +59,15 @@ struct trusty_irq_state { + + static enum cpuhp_state trusty_irq_online; + ++#define TRUSTY_VMCALL_PENDING_INTR 0x74727505 ++static inline void set_pending_intr_to_lk(uint8_t vector) ++{ ++ __asm__ __volatile__( ++ "vmcall" ++ ::"a"(TRUSTY_VMCALL_PENDING_INTR), "b"(vector) ++ ); ++} ++ + static void trusty_irq_enable_pending_irqs(struct trusty_irq_state *is, + struct trusty_irq_irqset *irqset, + bool percpu) +diff --git a/drivers/trusty/trusty-timer.c b/drivers/trusty/trusty-timer.c +index e88dc5f4cdf8..43e43265c3c6 100644 +--- a/drivers/trusty/trusty-timer.c ++++ b/drivers/trusty/trusty-timer.c +@@ -24,6 +24,7 @@ + struct trusty_timer { + struct sec_timer_state *sts; + struct hrtimer tm; ++ struct work_struct work; + }; + + struct trusty_timer_dev_state { +@@ -32,16 +33,33 @@ struct trusty_timer_dev_state { + struct device *trusty_dev; + struct notifier_block call_notifier; + struct trusty_timer timer; ++ struct workqueue_struct *workqueue; + }; + ++/* Max entity defined as SMC_NUM_ENTITIES(64) */ ++#define SMC_ENTITY_SMC_X86 63 /* Used for customized SMC calls */ ++ ++#define SMC_SC_LK_TIMER SMC_STDCALL_NR(SMC_ENTITY_SMC_X86, 0) ++ ++static void timer_work_func(struct work_struct *work) ++{ ++ int ret; ++ struct trusty_timer_dev_state *s; ++ ++ s = container_of(work, struct trusty_timer_dev_state, timer.work); ++ ++ ret = trusty_std_call32(s->trusty_dev, SMC_SC_LK_TIMER, 0, 0, 0); ++ if (ret != 0) ++ dev_err(s->dev, "%s failed %d\n", __func__, ret); ++} ++ + static enum hrtimer_restart trusty_timer_cb(struct hrtimer *tm) + { + struct trusty_timer_dev_state *s; + + s = container_of(tm, struct trusty_timer_dev_state, timer.tm); + +- set_pending_intr_to_lk(0x31); +- trusty_enqueue_nop(s->trusty_dev, NULL); ++ queue_work(s->workqueue, &s->timer.work); + + return HRTIMER_NORESTART; + } +@@ -114,6 +132,12 @@ static int trusty_timer_probe(struct platform_device *pdev) + sizeof(*tt->sts)); + WARN_ON(!tt->sts); + ++ s->workqueue = alloc_workqueue("trusty-timer-wq", WQ_CPU_INTENSIVE, 0); ++ if (!s->workqueue) { ++ ret = -ENODEV; ++ dev_err(&pdev->dev, "Failed to allocate work queue\n"); ++ goto err_allocate_work_queue; ++ } + + /* register notifier */ + s->call_notifier.notifier_call = trusty_timer_call_notify; +@@ -124,10 +148,18 @@ static int trusty_timer_probe(struct platform_device *pdev) + return ret; + } + ++ INIT_WORK(&s->timer.work, timer_work_func); ++ + dev_info(s->dev, "initialized\n"); + + return 0; + ++err_register_call_notifier: ++ destroy_workqueue(s->workqueue); ++err_allocate_work_queue: ++ kfree(s); ++ return ret; ++ + } + + static int trusty_timer_remove(struct platform_device *pdev) +@@ -144,6 +176,8 @@ static int trusty_timer_remove(struct platform_device *pdev) + tt = &s->timer; + hrtimer_cancel(&tt->tm); + ++ flush_work(&tt->work); ++ destroy_workqueue(s->workqueue); + /* free state */ + kfree(s); + return 0; +diff --git a/include/linux/trusty/trusty.h b/include/linux/trusty/trusty.h +index 1e9b4559d1b6..3189c7ec967c 100644 +--- a/include/linux/trusty/trusty.h ++++ b/include/linux/trusty/trusty.h +@@ -85,17 +85,6 @@ static inline void trusty_nop_init(struct trusty_nop *nop, + void trusty_enqueue_nop(struct device *dev, struct trusty_nop *nop); + void trusty_dequeue_nop(struct device *dev, struct trusty_nop *nop); + +-#define TRUSTY_VMCALL_PENDING_INTR 0x74727505 +-static inline void set_pending_intr_to_lk(uint8_t vector) +-{ +-#ifdef CONFIG_X86 +- __asm__ __volatile__( +- "vmcall" +- ::"a"(TRUSTY_VMCALL_PENDING_INTR), "b"(vector) +- ); +-#endif +-} +- + void trusty_update_wall_info(struct device *dev, void *va, size_t sz); + void *trusty_wall_base(struct device *dev); + void *trusty_wall_per_cpu_item_ptr(struct device *dev, unsigned int cpu, +-- +2.17.1 + diff --git a/patches/0048-ASoC-compress-Power-up-components-before-startup.audio b/patches/0048-ASoC-compress-Power-up-components-before-startup.audio new file mode 100644 index 0000000000..fe5f6fe094 --- /dev/null +++ b/patches/0048-ASoC-compress-Power-up-components-before-startup.audio @@ -0,0 +1,87 @@ +From 3308bf2b0e1686f2507d71a5a26814bc3d87597c Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Wed, 6 Mar 2019 13:52:12 +0100 +Subject: [PATCH 048/193] ASoC: compress: Power up components before startup + +For some devices, components need to be power up before stream startup +sequence commences. Update soc_compr_open to provide such functionality. +Based on soc_pcm_open. Adjust soc_compr_free accordingly to power down +components once compress stream is closed. + +Change-Id: Ida63574ab8a7f601088a5d7b27c2b8dbe172b3ef +Signed-off-by: Cezary Rojewski +--- + sound/soc/soc-compress.c | 25 +++++++++++++++++++++++++ + 1 file changed, 25 insertions(+) + +diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c +index 9e54d8ae6d2c..0056a2247974 100644 +--- a/sound/soc/soc-compress.c ++++ b/sound/soc/soc-compress.c +@@ -19,6 +19,7 @@ + #include + #include + #include ++#include + + static int soc_compr_components_open(struct snd_compr_stream *cstream, + struct snd_soc_component **last) +@@ -77,10 +78,16 @@ static int soc_compr_open(struct snd_compr_stream *cstream) + { + struct snd_soc_pcm_runtime *rtd = cstream->private_data; + struct snd_soc_component *component; ++ struct snd_soc_rtdcom_list *rtdcom; + struct snd_soc_dai *cpu_dai = rtd->cpu_dai; + int ret; + + mutex_lock_nested(&rtd->card->pcm_mutex, rtd->card->pcm_subclass); ++ for_each_rtdcom(rtd, rtdcom) { ++ component = rtdcom->component; ++ ++ pm_runtime_get_sync(component->dev); ++ } + + if (cpu_dai->driver->cops && cpu_dai->driver->cops->startup) { + ret = cpu_dai->driver->cops->startup(cstream, cpu_dai); +@@ -119,6 +126,14 @@ static int soc_compr_open(struct snd_compr_stream *cstream) + cpu_dai->driver->cops->shutdown(cstream, cpu_dai); + out: + mutex_unlock(&rtd->card->pcm_mutex); ++ ++ for_each_rtdcom(rtd, rtdcom) { ++ component = rtdcom->component; ++ ++ pm_runtime_mark_last_busy(component->dev); ++ pm_runtime_put_autosuspend(component->dev); ++ } ++ + return ret; + } + +@@ -245,6 +260,8 @@ static void close_delayed_work(struct work_struct *work) + static int soc_compr_free(struct snd_compr_stream *cstream) + { + struct snd_soc_pcm_runtime *rtd = cstream->private_data; ++ struct snd_soc_component *component; ++ struct snd_soc_rtdcom_list *rtdcom; + struct snd_soc_dai *cpu_dai = rtd->cpu_dai; + struct snd_soc_dai *codec_dai = rtd->codec_dai; + int stream; +@@ -293,6 +310,14 @@ static int soc_compr_free(struct snd_compr_stream *cstream) + } + + mutex_unlock(&rtd->card->pcm_mutex); ++ ++ for_each_rtdcom(rtd, rtdcom) { ++ component = rtdcom->component; ++ ++ pm_runtime_mark_last_busy(component->dev); ++ pm_runtime_put_autosuspend(component->dev); ++ } ++ + return 0; + } + +-- +2.17.1 + diff --git a/patches/0048-VHM-change-VM_SYSMEM-VM_MMIO-to-VM_MEMMAP_SYSMEM-VM_M.acrn b/patches/0048-VHM-change-VM_SYSMEM-VM_MMIO-to-VM_MEMMAP_SYSMEM-VM_M.acrn new file mode 100644 index 0000000000..e2ac4a4395 --- /dev/null +++ b/patches/0048-VHM-change-VM_SYSMEM-VM_MMIO-to-VM_MEMMAP_SYSMEM-VM_M.acrn @@ -0,0 +1,67 @@ +From cae5dd7d3fc75eb64adcad736863f265169d338d Mon Sep 17 00:00:00 2001 +From: Jason Chen CJ +Date: Fri, 31 Aug 2018 10:59:00 +0800 +Subject: [PATCH 048/150] VHM: change VM_SYSMEM/VM_MMIO to + VM_MEMMAP_SYSMEM/VM_MEMMAP_MMIO + +Change-Id: I7dc07502530ae47c6f9a3bc6a29fc271a053e8da +Signed-off-by: Jason Chen CJ +Reviewed-on: +--- + drivers/vhm/vhm_mm.c | 4 ++-- + include/linux/vhm/vhm_ioctl_defs.h | 9 +++++---- + 2 files changed, 7 insertions(+), 6 deletions(-) + +diff --git a/drivers/vhm/vhm_mm.c b/drivers/vhm/vhm_mm.c +index cc08fd9d0965..728998d0341d 100644 +--- a/drivers/vhm/vhm_mm.c ++++ b/drivers/vhm/vhm_mm.c +@@ -270,13 +270,13 @@ int map_guest_memseg(struct vhm_vm *vm, struct vm_memmap *memmap) + unsigned long guest_gpa, host_gpa; + + /* hugetlb use vma to do the mapping */ +- if (memmap->type == VM_SYSMEM && memmap->using_vma) ++ if (memmap->type == VM_MEMMAP_SYSMEM && memmap->using_vma) + return hugepage_map_guest(vm, memmap); + + mutex_lock(&vm->seg_lock); + + /* cma or mmio */ +- if (memmap->type == VM_SYSMEM) { ++ if (memmap->type == VM_MEMMAP_SYSMEM) { + list_for_each_entry(seg, &vm->memseg_list, list) { + if (seg->gpa == memmap->gpa + && seg->len == memmap->len) +diff --git a/include/linux/vhm/vhm_ioctl_defs.h b/include/linux/vhm/vhm_ioctl_defs.h +index a0a830dec3fa..eb8d0d08a89d 100644 +--- a/include/linux/vhm/vhm_ioctl_defs.h ++++ b/include/linux/vhm/vhm_ioctl_defs.h +@@ -112,8 +112,8 @@ struct vm_memseg { + uint64_t gpa; + }; + +-#define VM_SYSMEM 0 +-#define VM_MMIO 1 ++#define VM_MEMMAP_SYSMEM 0 ++#define VM_MEMMAP_MMIO 1 + + /** + * struct vm_memmap - EPT memory mapping info for guest +@@ -130,11 +130,12 @@ struct vm_memmap { + /** union */ + union { + /** @hpa: host physical start address of memory, +- * only for type == VM_MMIO ++ * only for type == VM_MEMMAP_MMIO + */ + uint64_t hpa; + /** @vma_base: service OS user virtual start address of +- * memory, only for type == VM_SYSMEM && using_vma == true ++ * memory, only for type == VM_MEMMAP_SYSMEM && ++ * using_vma == true + */ + uint64_t vma_base; + }; +-- +2.17.1 + diff --git a/patches/0048-drm-Add-for_each_oldnew_intel_crtc_in_state_reverse.drm b/patches/0048-drm-Add-for_each_oldnew_intel_crtc_in_state_reverse.drm new file mode 100644 index 0000000000..b198731181 --- /dev/null +++ b/patches/0048-drm-Add-for_each_oldnew_intel_crtc_in_state_reverse.drm @@ -0,0 +1,48 @@ +From 101c1e1e64123db4902b2f63a87fc678ccdae22e Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Jos=C3=A9=20Roberto=20de=20Souza?= +Date: Fri, 23 Aug 2019 01:20:43 -0700 +Subject: [PATCH 048/690] drm: Add + for_each_oldnew_intel_crtc_in_state_reverse() +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Same as for_each_oldnew_intel_crtc_in_state() but iterates in reverse +order. + +v2: Fix additional blank line +v3: Rebase + +Cc: Rodrigo Vivi +Cc: Ville Syrjälä +Signed-off-by: José Roberto de Souza +Signed-off-by: Lucas De Marchi +Reviewed-by: Mika Kahola +Link: https://patchwork.freedesktop.org/patch/msgid/20190823082055.5992-12-lucas.demarchi@intel.com +--- + drivers/gpu/drm/i915/display/intel_display.h | 9 +++++++++ + 1 file changed, 9 insertions(+) + +diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h +index 01fa87ad3270..9752a4c09753 100644 +--- a/drivers/gpu/drm/i915/display/intel_display.h ++++ b/drivers/gpu/drm/i915/display/intel_display.h +@@ -411,6 +411,15 @@ enum phy_fia { + (__i)++) \ + for_each_if(crtc) + ++#define for_each_oldnew_intel_crtc_in_state_reverse(__state, crtc, old_crtc_state, new_crtc_state, __i) \ ++ for ((__i) = (__state)->base.dev->mode_config.num_crtc - 1; \ ++ (__i) >= 0 && \ ++ ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \ ++ (old_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].old_state), \ ++ (new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \ ++ (__i)--) \ ++ for_each_if(crtc) ++ + void intel_link_compute_m_n(u16 bpp, int nlanes, + int pixel_clock, int link_clock, + struct intel_link_m_n *m_n, +-- +2.17.1 + diff --git a/patches/0048-mei-bus-add-more-client-attributes-to-sysfs.security b/patches/0048-mei-bus-add-more-client-attributes-to-sysfs.security new file mode 100644 index 0000000000..0ca74ddcb4 --- /dev/null +++ b/patches/0048-mei-bus-add-more-client-attributes-to-sysfs.security @@ -0,0 +1,171 @@ +From 59c49bf1dc00d1b01b45464b2c19e07bd575176f Mon Sep 17 00:00:00 2001 +From: Alexander Usyskin +Date: Mon, 3 Sep 2018 14:37:27 +0300 +Subject: [PATCH 48/65] mei: bus: add more client attributes to sysfs + +Change-Id: Icfb5dfec59927718373432c8f5a1cf455bccf064 +Signed-off-by: Alexander Usyskin +--- + Documentation/ABI/testing/sysfs-bus-mei | 28 +++++++++++++++ + drivers/misc/mei/bus.c | 44 +++++++++++++++++++++++ + drivers/misc/mei/client.h | 48 +++++++++++++++++++++++++ + 3 files changed, 120 insertions(+) + +diff --git a/Documentation/ABI/testing/sysfs-bus-mei b/Documentation/ABI/testing/sysfs-bus-mei +index 3f8701e8fa24..631547c7c195 100644 +--- a/Documentation/ABI/testing/sysfs-bus-mei ++++ b/Documentation/ABI/testing/sysfs-bus-mei +@@ -26,3 +26,31 @@ KernelVersion: 4.3 + Contact: Tomas Winkler + Description: Stores mei client protocol version + Format: %d ++ ++What: /sys/bus/mei/devices/.../max_conn ++Date: Sep 2018 ++KernelVersion: 4.19 ++Contact: Tomas Winkler ++Description: Stores mei client maximum number of connections ++ Format: %d ++ ++What: /sys/bus/mei/devices/.../fixed ++Date: Sep 2018 ++KernelVersion: 4.19 ++Contact: Tomas Winkler ++Description: Stores mei client fixed address, if any ++ Format: %d ++ ++What: /sys/bus/mei/devices/.../vtag ++Date: Sep 2018 ++KernelVersion: 4.19 ++Contact: Tomas Winkler ++Description: Stores mei client v tag support status ++ Format: %d ++ ++What: /sys/bus/mei/devices/.../max_len ++Date: Sep 2018 ++KernelVersion: 4.19 ++Contact: Tomas Winkler ++Description: Stores mei client maximum message length ++ Format: %d +diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c +index 2cc72da613b4..be03b781625c 100644 +--- a/drivers/misc/mei/bus.c ++++ b/drivers/misc/mei/bus.c +@@ -823,11 +823,55 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a, + } + static DEVICE_ATTR_RO(modalias); + ++static ssize_t max_conn_show(struct device *dev, struct device_attribute *a, ++ char *buf) ++{ ++ struct mei_cl_device *cldev = to_mei_cl_device(dev); ++ u8 maxconn = mei_me_cl_max_conn(cldev->me_cl); ++ ++ return scnprintf(buf, PAGE_SIZE, "%d", maxconn); ++} ++static DEVICE_ATTR_RO(max_conn); ++ ++static ssize_t fixed_show(struct device *dev, struct device_attribute *a, ++ char *buf) ++{ ++ struct mei_cl_device *cldev = to_mei_cl_device(dev); ++ u8 fixed = mei_me_cl_fixed(cldev->me_cl); ++ ++ return scnprintf(buf, PAGE_SIZE, "%d", fixed); ++} ++static DEVICE_ATTR_RO(fixed); ++ ++static ssize_t vtag_show(struct device *dev, struct device_attribute *a, ++ char *buf) ++{ ++ struct mei_cl_device *cldev = to_mei_cl_device(dev); ++ bool vt = mei_me_cl_vt(cldev->me_cl); ++ ++ return scnprintf(buf, PAGE_SIZE, "%d", vt); ++} ++static DEVICE_ATTR_RO(vtag); ++ ++static ssize_t max_len_show(struct device *dev, struct device_attribute *a, ++ char *buf) ++{ ++ struct mei_cl_device *cldev = to_mei_cl_device(dev); ++ u32 maxlen = mei_me_cl_max_len(cldev->me_cl); ++ ++ return scnprintf(buf, PAGE_SIZE, "%u", maxlen); ++} ++static DEVICE_ATTR_RO(max_len); ++ + static struct attribute *mei_cldev_attrs[] = { + &dev_attr_name.attr, + &dev_attr_uuid.attr, + &dev_attr_version.attr, + &dev_attr_modalias.attr, ++ &dev_attr_max_conn.attr, ++ &dev_attr_fixed.attr, ++ &dev_attr_vtag.attr, ++ &dev_attr_max_len.attr, + NULL, + }; + ATTRIBUTE_GROUPS(mei_cldev); +diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h +index f138419ab1d3..75cb727fc48b 100644 +--- a/drivers/misc/mei/client.h ++++ b/drivers/misc/mei/client.h +@@ -69,6 +69,54 @@ static inline u8 mei_me_cl_ver(const struct mei_me_client *me_cl) + return me_cl->props.protocol_version; + } + ++/** ++ * mei_me_cl_max_conn - return me client max number of connections ++ * ++ * @me_cl: me client ++ * ++ * Return: me client max number of connections ++ */ ++static inline u8 mei_me_cl_max_conn(const struct mei_me_client *me_cl) ++{ ++ return me_cl->props.max_number_of_connections; ++} ++ ++/** ++ * mei_me_cl_fixed - return me client fixed address, if any ++ * ++ * @me_cl: me client ++ * ++ * Return: me client fixed address ++ */ ++static inline u8 mei_me_cl_fixed(const struct mei_me_client *me_cl) ++{ ++ return me_cl->props.fixed_address; ++} ++ ++/** ++ * mei_me_cl_vt - return me client vtag supported status ++ * ++ * @me_cl: me client ++ * ++ * Return: true if me client supports vt tagging ++ */ ++static inline bool mei_me_cl_vt(const struct mei_me_client *me_cl) ++{ ++ return me_cl->props.vt_supported == 1; ++} ++ ++/** ++ * mei_me_cl_max_len - return me client max msg length ++ * ++ * @me_cl: me client ++ * ++ * Return: me client max msg length ++ */ ++static inline u32 mei_me_cl_max_len(const struct mei_me_client *me_cl) ++{ ++ return me_cl->props.max_msg_length; ++} ++ + /* + * MEI IO Functions + */ +-- +2.17.1 + diff --git a/patches/0048-net-stmmac-add-Time-Based-Scheduling-mode-lau.connectivity b/patches/0048-net-stmmac-add-Time-Based-Scheduling-mode-lau.connectivity new file mode 100644 index 0000000000..161c6213a7 --- /dev/null +++ b/patches/0048-net-stmmac-add-Time-Based-Scheduling-mode-lau.connectivity @@ -0,0 +1,333 @@ +From c2c33dbb91391152278c8d0c5f28c365720d4f0b Mon Sep 17 00:00:00 2001 +From: Kweh Hock Leong +Date: Fri, 9 Aug 2019 11:14:59 +0800 +Subject: [PATCH 048/108] net: stmmac: add Time-Based Scheduling mode & launch + time offset setting + +Introduce functions for setting TBS mode, Launch Time offset & Launch Time +GSN offset under TSN hardware tunable framework. + +Signed-off-by: Kweh Hock Leong +Signed-off-by: Ong Boon Leong +--- + drivers/net/ethernet/stmicro/stmmac/dwmac5.h | 13 +++ + .../net/ethernet/stmicro/stmmac/dwmac5_tsn.c | 67 +++++++++++ + drivers/net/ethernet/stmicro/stmmac/hwif.h | 15 +++ + .../net/ethernet/stmicro/stmmac/stmmac_tsn.c | 105 ++++++++++++++++++ + .../net/ethernet/stmicro/stmmac/stmmac_tsn.h | 5 + + 5 files changed, 205 insertions(+) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h +index 40bbb93db382..acb24eb68429 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h +@@ -168,6 +168,19 @@ + /* DMA Tx Channel X Control register TBS bits defines */ + #define DMA_CONTROL_EDSE BIT(28) + ++/* MTL TBS Control register */ ++#define MTL_TBS_CTRL 0x00000c40 ++#define MTL_TBS_CTRL_LEOS GENMASK(31, 8) ++#define MTL_TBS_CTRL_LEOS_SHIFT 8 ++#define MTL_TBS_CTRL_LEGOS GENMASK(6, 4) ++#define MTL_TBS_CTRL_LEGOS_SHIFT 4 ++#define MTL_TBS_CTRL_LEOV BIT(1) ++#define MTL_TBS_CTRL_ESTM BIT(0) ++ ++/* TBS Global defines */ ++#define TBS_LEOS_MAX 999999999 /* Max LEOS (ns) */ ++#define TBS_LEGOS_MAX 7 /* Max LE GSN Slot */ ++ + /* MAC Core Version */ + #define TSN_VER_MASK 0xFF + +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c +index 84c979c5a5dc..d33a72225741 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c +@@ -403,6 +403,69 @@ int dwmac5_est_irq_status(void __iomem *ioaddr, struct net_device *dev, + return status; + } + ++static void dwmac5_tbs_get_max(u32 *leos_max, ++ u32 *legos_max) ++{ ++ *leos_max = TBS_LEOS_MAX; ++ *legos_max = TBS_LEGOS_MAX; ++} ++ ++static void dwmac5_tbs_set_estm(void __iomem *ioaddr, const u32 estm) ++{ ++ u32 value; ++ ++ value = readl(ioaddr + MTL_TBS_CTRL); ++ if (estm) ++ value |= MTL_TBS_CTRL_ESTM; ++ else ++ value &= MTL_TBS_CTRL_ESTM; ++ ++ writel(value, ioaddr + MTL_TBS_CTRL); ++} ++ ++static void dwmac5_tbs_set_leos(void __iomem *ioaddr, const u32 leos, ++ const u32 estm) ++{ ++ u32 value; ++ ++ value = readl(ioaddr + MTL_TBS_CTRL); ++ ++ /* Launch expiry offset not valid when launch ++ * expiry offset value is 0 and vice versa ++ */ ++ if (leos || (estm && leos)) ++ value |= MTL_TBS_CTRL_LEOV; ++ else ++ value &= ~MTL_TBS_CTRL_LEOV; ++ ++ /* Launch expiry offset is in unit of 256ns ++ * Get the actual leos ns value ++ */ ++ value &= ~MTL_TBS_CTRL_LEOS; ++ value |= (leos & MTL_TBS_CTRL_LEOS); ++ ++ writel(value, ioaddr + MTL_TBS_CTRL); ++} ++ ++static void dwmac5_tbs_set_legos(void __iomem *ioaddr, const u32 legos, ++ const u32 leos) ++{ ++ u32 value; ++ ++ value = readl(ioaddr + MTL_TBS_CTRL); ++ ++ if (leos || legos) ++ value |= MTL_TBS_CTRL_LEOV; ++ else ++ value &= ~MTL_TBS_CTRL_LEOV; ++ ++ value &= ~MTL_TBS_CTRL_LEGOS; ++ value |= MTL_TBS_CTRL_LEGOS & ++ (legos << MTL_TBS_CTRL_LEGOS_SHIFT); ++ ++ writel(value, ioaddr + MTL_TBS_CTRL); ++} ++ + const struct tsnif_ops dwmac510_tsnif_ops = { + .read_hwid = dwmac5_read_hwid, + .has_tsn_cap = dwmac5_has_tsn_cap, +@@ -422,6 +485,10 @@ const struct tsnif_ops dwmac510_tsnif_ops = { + .est_get_bank = dwmac5_est_get_bank, + .est_switch_swol = dwmac5_est_switch_swol, + .est_irq_status = dwmac5_est_irq_status, ++ .tbs_get_max = dwmac5_tbs_get_max, ++ .tbs_set_estm = dwmac5_tbs_set_estm, ++ .tbs_set_leos = dwmac5_tbs_set_leos, ++ .tbs_set_legos = dwmac5_tbs_set_legos, + }; + + void dwmac510_tsnif_setup(struct mac_device_info *mac) +diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h +index d8e76cfaf6be..6ab71a7ec873 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h ++++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h +@@ -769,6 +769,13 @@ struct tsnif_ops { + int (*est_irq_status)(void *ioaddr, struct net_device *dev, + struct tsn_mmc_stat *mmc_stat, + unsigned int txqcnt); ++ /* Time-Based Scheduling (TBS) */ ++ void (*tbs_get_max)(u32 *leos_max, u32 *legos_max); ++ void (*tbs_set_estm)(void __iomem *ioaddr, const u32 estm); ++ void (*tbs_set_leos)(void __iomem *ioaddr, const u32 leos, ++ const u32 estm); ++ void (*tbs_set_legos)(void __iomem *ioaddr, const u32 legos, ++ const u32 leos); + }; + + #define tsnif_read_hwid(__hw, __args...) \ +@@ -807,5 +814,13 @@ struct tsnif_ops { + tsnif_do_void_callback(__hw, est_switch_swol, __args) + #define tsnif_est_irq_status(__hw, __args...) \ + tsnif_do_callback(__hw, est_irq_status, __args) ++#define tsnif_tbs_get_max(__hw, __args...) \ ++ tsnif_do_void_callback(__hw, tbs_get_max, __args) ++#define tsnif_tbs_set_estm(__hw, __args...) \ ++ tsnif_do_void_callback(__hw, tbs_set_estm, __args) ++#define tsnif_tbs_set_leos(__hw, __args...) \ ++ tsnif_do_void_callback(__hw, tbs_set_leos, __args) ++#define tsnif_tbs_set_legos(__hw, __args...) \ ++ tsnif_do_void_callback(__hw, tbs_set_legos, __args) + + #endif /* __STMMAC_HWIF_H__ */ +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c +index d2e615659f4a..553e511ad47d 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c +@@ -149,9 +149,15 @@ int tsn_init(struct mac_device_info *hw, struct net_device *dev) + &cap->cycle_max, &cap->idleslope_max); + cap->est_support = 1; + ++ tsnif_tbs_get_max(hw, &cap->leos_max, &cap->legos_max); ++ + dev_info(pdev, "EST: depth=%u, ti_wid=%u, ter_max=%uns, tils_max=%u, tqcnt=%u\n", + gcl_depth, ti_wid, cap->ext_max, tils_max, cap->txqcnt); + ++ if (cap->tbs_support) ++ dev_info(pdev, "TBS: leos_max=%u, legos_max=%u\n", ++ cap->leos_max, cap->legos_max); ++ + return 0; + } + +@@ -199,6 +205,11 @@ int tsn_hwtunable_set(struct mac_device_info *hw, struct net_device *dev, + struct tsn_hw_cap *cap = &info->cap; + void __iomem *ioaddr = hw->pcsr; + int ret = 0; ++ struct est_gc_bank *gcbc; ++ u32 hw_bank; ++ u32 estm; ++ u32 leos; ++ + + switch (id) { + case TSN_HWTUNA_TX_EST_TILS: +@@ -209,6 +220,14 @@ int tsn_hwtunable_set(struct mac_device_info *hw, struct net_device *dev, + return -ENOTSUPP; + } + break; ++ case TSN_HWTUNA_TX_TBS_ESTM: ++ case TSN_HWTUNA_TX_TBS_LEOS: ++ case TSN_HWTUNA_TX_TBS_LEGOS: ++ if (!tsn_has_feat(hw, dev, TSN_FEAT_ID_TBS)) { ++ netdev_info(dev, "TBS: feature unsupported\n"); ++ return -ENOTSUPP; ++ } ++ break; + default: + netdev_warn(dev, "TSN: invalid tunable id(%u)\n", id); + return -EINVAL; +@@ -256,6 +275,84 @@ int tsn_hwtunable_set(struct mac_device_info *hw, struct net_device *dev, + netdev_info(dev, "EST: Set CTOV = %u\n", data); + } + break; ++ case TSN_HWTUNA_TX_TBS_ESTM: ++ if (!data && data != 1) { ++ netdev_warn(dev, ++ "TBS: invalid ESTM(%u) - 0 or 1 only\n", ++ data); ++ ++ return -EINVAL; ++ } ++ ++ if (data == 1 && !tsn_has_feat(hw, dev, TSN_FEAT_ID_EST)) { ++ netdev_warn(dev, ++ "TBS: ESTM(%u) but EST is OFF\n", ++ data); ++ ++ return -EINVAL; ++ } ++ ++ if (data != info->hwtunable[TSN_HWTUNA_TX_TBS_ESTM]) { ++ tsnif_tbs_set_estm(hw, ioaddr, data); ++ info->hwtunable[TSN_HWTUNA_TX_TBS_ESTM] = data; ++ netdev_info(dev, "TBS: Set ESTM = %u\n", data); ++ } ++ break; ++ case TSN_HWTUNA_TX_TBS_LEOS: ++ estm = info->hwtunable[TSN_HWTUNA_TX_TBS_ESTM]; ++ ++ if (data > cap->leos_max) { ++ netdev_warn(dev, ++ "TBS: invalid LEOS(%u), max=%u\n", ++ data, cap->leos_max); ++ ++ return -EINVAL; ++ } ++ ++ /* For EST mode, make sure leos does not exceed cycle time */ ++ if (estm) { ++ hw_bank = tsnif_est_get_bank(hw, ioaddr, 0); ++ gcbc = &info->est_gcc.gcb[hw_bank]; ++ ++ if (data > (gcbc->gcrr.cycle_nsec - 1)) { ++ netdev_warn(dev, ++ "TBS: LEOS > (cycle time - 1ns)\n"); ++ ++ return -EINVAL; ++ } ++ } ++ ++ if (data != info->hwtunable[TSN_HWTUNA_TX_TBS_LEOS]) { ++ tsnif_tbs_set_leos(hw, ioaddr, data, estm); ++ info->hwtunable[TSN_HWTUNA_TX_TBS_LEOS] = data; ++ netdev_info(dev, "TBS: Set LEOS = %u\n", data); ++ } ++ break; ++ case TSN_HWTUNA_TX_TBS_LEGOS: ++ estm = info->hwtunable[TSN_HWTUNA_TX_TBS_ESTM]; ++ leos = info->hwtunable[TSN_HWTUNA_TX_TBS_LEOS]; ++ ++ /* if EST not turn on, ret fail */ ++ if (!(tsn_has_feat(hw, dev, TSN_FEAT_ID_EST) && estm)) { ++ netdev_warn(dev, "TBS EST mode is not enabled\n"); ++ ++ return -EINVAL; ++ } ++ ++ if (data > cap->legos_max) { ++ netdev_warn(dev, ++ "TBS: LEGOS(%u) > max=%u\n", ++ data, cap->legos_max); ++ ++ return -EINVAL; ++ } ++ ++ if (data != info->hwtunable[TSN_HWTUNA_TX_TBS_LEGOS]) { ++ tsnif_tbs_set_legos(hw, ioaddr, data, leos); ++ info->hwtunable[TSN_HWTUNA_TX_TBS_LEGOS] = data; ++ netdev_info(dev, "TBS: Set LEGOS = %u\n", data); ++ } ++ break; + default: + netdev_warn(dev, "TSN: invalid tunable id(%u)\n", id); + ret = -EINVAL; +@@ -278,6 +375,14 @@ int tsn_hwtunable_get(struct mac_device_info *hw, struct net_device *dev, + return -ENOTSUPP; + } + break; ++ case TSN_HWTUNA_TX_TBS_ESTM: ++ case TSN_HWTUNA_TX_TBS_LEOS: ++ case TSN_HWTUNA_TX_TBS_LEGOS: ++ if (!tsn_has_feat(hw, dev, TSN_FEAT_ID_TBS)) { ++ netdev_info(dev, "TBS: feature unsupported\n"); ++ return -ENOTSUPP; ++ } ++ break; + default: + netdev_warn(dev, "TSN: invalid tunable id(%u)\n", id); + return -EINVAL; +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h +index 46de14829eff..abdc4635175b 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h +@@ -16,6 +16,9 @@ enum tsn_hwtunable_id { + TSN_HWTUNA_TX_EST_TILS = 0, + TSN_HWTUNA_TX_EST_PTOV, + TSN_HWTUNA_TX_EST_CTOV, ++ TSN_HWTUNA_TX_TBS_ESTM, /* TBS Absolute or EST mode */ ++ TSN_HWTUNA_TX_TBS_LEOS, ++ TSN_HWTUNA_TX_TBS_LEGOS, + TSN_HWTUNA_MAX, + }; + +@@ -54,6 +57,8 @@ struct tsn_hw_cap { + u32 ptov_max; /* Max PTP Offset */ + u32 ctov_max; /* Max Current Time Offset */ + u32 idleslope_max; /* Max idle slope */ ++ u32 leos_max; /* Launch Expiry Offset */ ++ u32 legos_max; /* Launch Expiry GSN Offset */ + }; + + /* EST Gate Control Entry */ +-- +2.17.1 + diff --git a/patches/0048-trusty-detect-vmm-when-load-trusty-driver.trusty b/patches/0048-trusty-detect-vmm-when-load-trusty-driver.trusty new file mode 100644 index 0000000000..972a2b3661 --- /dev/null +++ b/patches/0048-trusty-detect-vmm-when-load-trusty-driver.trusty @@ -0,0 +1,287 @@ +From ba22c567e127295271c737a807017da3f0dbf18a Mon Sep 17 00:00:00 2001 +From: "Qi, Yadong" +Date: Fri, 2 Feb 2018 13:12:40 +0800 +Subject: [PATCH 48/63] trusty: detect vmm when load trusty driver + +Use hypervisor_cpuid_base() to detect VMM which support Trusty. +Currently, there are 2 hypervisors support trusty: CWP and EVMM. +Use different hypercall to implement SMC for EVMM and CWP. + +Change-Id: I45a9c69862c785aba3d2911ca439b5e3d8cf0cf6 +Signed-off-by: Qi, Yadong +Tracked-On: OAM-56970 +--- + drivers/trusty/trusty-ipc.c | 4 +-- + drivers/trusty/trusty-irq.c | 4 +-- + drivers/trusty/trusty-log.c | 10 +++---- + drivers/trusty/trusty-timer.c | 4 +-- + drivers/trusty/trusty-virtio.c | 4 +-- + drivers/trusty/trusty-wall.c | 4 +-- + drivers/trusty/trusty.c | 52 +++++++++++++++++++++++++++------- + include/linux/trusty/trusty.h | 33 +++++++++++---------- + 8 files changed, 74 insertions(+), 41 deletions(-) + +diff --git a/drivers/trusty/trusty-ipc.c b/drivers/trusty/trusty-ipc.c +index 93003b45eb32..a2bc3fcba29a 100644 +--- a/drivers/trusty/trusty-ipc.c ++++ b/drivers/trusty/trusty-ipc.c +@@ -1525,9 +1525,9 @@ static int tipc_virtio_probe(struct virtio_device *vdev) + vq_callback_t *vq_cbs[] = {_rxvq_cb, _txvq_cb}; + const char *vq_names[] = { "rx", "tx" }; + +- err = trusty_check_cpuid(NULL); ++ err = trusty_detect_vmm(); + if (err < 0) { +- dev_err(&vdev->dev, "CPUID Error: Cannot find eVmm in trusty driver initialization!"); ++ dev_err(&vdev->dev, "Cannot detect VMM which supports trusty!"); + return -EINVAL; + } + +diff --git a/drivers/trusty/trusty-irq.c b/drivers/trusty/trusty-irq.c +index 04df531bf9d0..af2af6ee37ba 100644 +--- a/drivers/trusty/trusty-irq.c ++++ b/drivers/trusty/trusty-irq.c +@@ -539,9 +539,9 @@ static int trusty_irq_probe(struct platform_device *pdev) + unsigned long irq_flags; + struct trusty_irq_state *is; + +- ret = trusty_check_cpuid(NULL); ++ ret = trusty_detect_vmm(); + if (ret < 0) { +- dev_err(&pdev->dev, "CPUID Error: Cannot find eVmm in trusty driver initialization!"); ++ dev_err(&pdev->dev, "Cannot detect VMM which supports trusty!"); + return -EINVAL; + } + +diff --git a/drivers/trusty/trusty-log.c b/drivers/trusty/trusty-log.c +index b58715cc2ef3..d2446a1f34c9 100755 +--- a/drivers/trusty/trusty-log.c ++++ b/drivers/trusty/trusty-log.c +@@ -259,13 +259,13 @@ static int trusty_log_probe(struct platform_device *pdev) + { + struct trusty_log_state *s; + int result; +- u32 vmm_signature; ++ int vmm_id; + phys_addr_t pa; + struct deadloop_dump *dump; + +- result = trusty_check_cpuid(&vmm_signature); +- if (result < 0) { +- dev_err(&pdev->dev, "CPUID Error: Cannot find eVmm in trusty driver initialization!"); ++ vmm_id = trusty_detect_vmm(); ++ if (vmm_id < 0) { ++ dev_err(&pdev->dev, "Cannot detect VMM which supports trusty!"); + return -EINVAL; + } + +@@ -321,7 +321,7 @@ static int trusty_log_probe(struct platform_device *pdev) + goto error_panic_notifier; + } + +- if(vmm_signature == EVMM_SIGNATURE_VMM) { ++ if(vmm_id == VMM_ID_EVMM) { + /* allocate debug buffer for vmm panic dump */ + g_vmm_debug_buf = __get_free_pages(GFP_KERNEL | __GFP_ZERO, 2); + if (!g_vmm_debug_buf) { +diff --git a/drivers/trusty/trusty-timer.c b/drivers/trusty/trusty-timer.c +index 43e43265c3c6..5d4466d4e157 100644 +--- a/drivers/trusty/trusty-timer.c ++++ b/drivers/trusty/trusty-timer.c +@@ -100,9 +100,9 @@ static int trusty_timer_probe(struct platform_device *pdev) + struct trusty_timer_dev_state *s; + struct trusty_timer *tt; + +- ret = trusty_check_cpuid(NULL); ++ ret = trusty_detect_vmm(); + if (ret < 0) { +- dev_err(&pdev->dev, "CPUID Error: Cannot find eVmm in trusty driver initialization!"); ++ dev_err(&pdev->dev, "Cannot detect VMM which supports trusty!"); + return -EINVAL; + } + +diff --git a/drivers/trusty/trusty-virtio.c b/drivers/trusty/trusty-virtio.c +index b2418d7da5e1..743a4789772f 100644 +--- a/drivers/trusty/trusty-virtio.c ++++ b/drivers/trusty/trusty-virtio.c +@@ -641,9 +641,9 @@ static int trusty_virtio_probe(struct platform_device *pdev) + int ret; + struct trusty_ctx *tctx; + +- ret = trusty_check_cpuid(NULL); ++ ret = trusty_detect_vmm(); + if (ret < 0) { +- dev_err(&pdev->dev, "CPUID Error: Cannot find eVmm in trusty driver initialization!"); ++ dev_err(&pdev->dev, "Cannot detect VMM which supports trusty!"); + return -EINVAL; + } + +diff --git a/drivers/trusty/trusty-wall.c b/drivers/trusty/trusty-wall.c +index 64368480c309..2345f56a6405 100644 +--- a/drivers/trusty/trusty-wall.c ++++ b/drivers/trusty/trusty-wall.c +@@ -147,9 +147,9 @@ static int trusty_wall_probe(struct platform_device *pdev) + int ret; + struct trusty_wall_dev_state *s; + +- ret = trusty_check_cpuid(NULL); ++ ret = trusty_detect_vmm(); + if (ret < 0) { +- dev_err(&pdev->dev, "CPUID Error: Cannot find eVmm in trusty driver initialization!"); ++ dev_err(&pdev->dev, "Cannot detect VMM which supports trusty!"); + return -EINVAL; + } + +diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c +index d4eeb40e2b60..98c866487a3e 100755 +--- a/drivers/trusty/trusty.c ++++ b/drivers/trusty/trusty.c +@@ -26,7 +26,8 @@ + #include + #include + +-#define TRUSTY_VMCALL_SMC 0x74727500 ++#define EVMM_SMC_HC_ID 0x74727500 ++#define CWP_SMC_HC_ID 0x80000071 + + struct trusty_state; + +@@ -53,13 +54,28 @@ struct trusty_smc_interface { + ulong args[5]; + }; + +-static inline ulong smc(ulong r0, ulong r1, ulong r2, ulong r3) ++static ulong (*smc)(ulong, ulong, ulong, ulong); ++ ++#define asm_smc_vmcall(smc_id, rdi, rsi, rdx, rbx) \ ++do { \ ++ __asm__ __volatile__( \ ++ "vmcall; \n" \ ++ : "=D"(rdi) \ ++ : "r"(smc_id), "D"(rdi), "S"(rsi), "d"(rdx), "b"(rbx) \ ++ ); \ ++} while (0) ++ ++static inline ulong smc_evmm(ulong r0, ulong r1, ulong r2, ulong r3) + { +- __asm__ __volatile__( +- "vmcall; \n" +- : "=D"(r0) +- : "a"(TRUSTY_VMCALL_SMC), "D"(r0), "S"(r1), "d"(r2), "b"(r3) +- ); ++ register unsigned long smc_id asm("rax") = EVMM_SMC_HC_ID; ++ asm_smc_vmcall(smc_id, r0, r1, r2, r3); ++ return r0; ++} ++ ++static inline ulong smc_cwp(ulong r0, ulong r1, ulong r2, ulong r3) ++{ ++ register unsigned long smc_id asm("r8") = CWP_SMC_HC_ID; ++ asm_smc_vmcall(smc_id, r0, r1, r2, r3); + return r0; + } + +@@ -443,6 +459,19 @@ static void nop_work_func(struct work_struct *work) + dev_dbg(s->dev, "%s: done\n", __func__); + } + ++static void trusty_init_smc(int vmm_id) ++{ ++ if (vmm_id == VMM_ID_EVMM) { ++ smc = smc_evmm; ++ } else if (vmm_id == VMM_ID_CWP) { ++ smc = smc_cwp; ++ } else { ++ pr_err("%s: No smc supports VMM[%d](sig:%s)!", ++ __func__, vmm_id, vmm_signature[vmm_id]); ++ BUG(); ++ } ++} ++ + void trusty_enqueue_nop(struct device *dev, struct trusty_nop *nop) + { + unsigned long flags; +@@ -479,8 +508,6 @@ void trusty_dequeue_nop(struct device *dev, struct trusty_nop *nop) + } + EXPORT_SYMBOL(trusty_dequeue_nop); + +- +- + static int trusty_probe(struct platform_device *pdev) + { + int ret; +@@ -489,11 +516,14 @@ static int trusty_probe(struct platform_device *pdev) + struct trusty_state *s; + struct device_node *node = pdev->dev.of_node; + +- ret = trusty_check_cpuid(NULL); ++ ret = trusty_detect_vmm(); + if (ret < 0) { +- dev_err(&pdev->dev, "CPUID Error: Cannot find eVmm in trusty driver initialization!"); ++ dev_err(&pdev->dev, "Cannot detect VMM which supports trusty!"); + return -EINVAL; + } ++ dev_dbg(&pdev->dev, "Detected VMM: sig=%s\n", vmm_signature[ret]); ++ ++ trusty_init_smc(ret); + + if (!node) { + dev_err(&pdev->dev, "of_node required\n"); +diff --git a/include/linux/trusty/trusty.h b/include/linux/trusty/trusty.h +index 3189c7ec967c..48e1ea716889 100644 +--- a/include/linux/trusty/trusty.h ++++ b/include/linux/trusty/trusty.h +@@ -18,6 +18,7 @@ + #include + #include + #include ++#include + + + #if IS_ENABLED(CONFIG_TRUSTY) +@@ -90,25 +91,27 @@ void *trusty_wall_base(struct device *dev); + void *trusty_wall_per_cpu_item_ptr(struct device *dev, unsigned int cpu, + u32 item_id, size_t exp_sz); + +-/* CPUID leaf 0x3 is used because eVMM will trap this leaf.*/ +-#define EVMM_SIGNATURE_CORP 0x43544E49 /* "INTC", edx */ +-#define EVMM_SIGNATURE_VMM 0x4D4D5645 /* "EVMM", ecx */ +- +-static inline int trusty_check_cpuid(u32 *vmm_signature) +-{ +- u32 eax, ebx, ecx, edx; ++enum { ++ VMM_ID_EVMM = 0, ++ VMM_ID_CWP, ++ VMM_SUPPORTED_NUM ++}; + +- cpuid(3, &eax, &ebx, &ecx, &edx); +- if ((ecx != EVMM_SIGNATURE_VMM) || +- (edx != EVMM_SIGNATURE_CORP)) { +- return -EINVAL; +- } ++static const char *vmm_signature[] = { ++ [VMM_ID_EVMM] = "EVMMEVMMEVMM", ++ [VMM_ID_CWP] = "CWPCWPCWP\0\0" ++}; + +- if(vmm_signature) { +- *vmm_signature = ecx; ++/* Detect VMM and return vmm_id */ ++static inline int trusty_detect_vmm(void) ++{ ++ int i; ++ for (i = 0; i < VMM_SUPPORTED_NUM; i++) { ++ if (hypervisor_cpuid_base(vmm_signature[i], 0)) ++ return i; + } + +- return 0; ++ return -EINVAL; + } + + /* High 32 bits of unsigned 64-bit integer*/ +-- +2.17.1 + diff --git a/patches/0049-ASoC-Intel-Skylake-Account-for-compress-streams-when.audio b/patches/0049-ASoC-Intel-Skylake-Account-for-compress-streams-when.audio new file mode 100644 index 0000000000..c1ce18acc2 --- /dev/null +++ b/patches/0049-ASoC-Intel-Skylake-Account-for-compress-streams-when.audio @@ -0,0 +1,70 @@ +From ae7eacfb234beb09da3d699a63c8631ef7fee0b0 Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Wed, 6 Mar 2019 19:39:45 +0100 +Subject: [PATCH 049/193] ASoC: Intel: Skylake: Account for compress streams + when servicing IRQs + +Update skl_stream_update definition to correctly set hdac_stream current +position when servicing stream interrupts for compress streams. + +Change-Id: I1d3773ccebbe698721fc621e45d9fe5b7776c5e6 +Signed-off-by: Cezary Rojewski +Signed-off-by: Divya Prakash +--- + include/sound/hdaudio.h | 1 + + sound/soc/intel/skylake/skl.c | 24 +++++++++++++++++++++++- + 2 files changed, 24 insertions(+), 1 deletion(-) + +diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h +index afb3d04f699d..f420be11d618 100644 +--- a/include/sound/hdaudio.h ++++ b/include/sound/hdaudio.h +@@ -495,6 +495,7 @@ struct hdac_stream { + bool no_period_wakeup:1; + bool locked:1; + ++ unsigned long curr_pos; + /* timestamp */ + unsigned long start_wallclk; /* start + minimum wallclk */ + unsigned long period_wallclk; /* wallclk for period */ +diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c +index 3225f4f8793e..1ad69cce4bf0 100644 +--- a/sound/soc/intel/skylake/skl.c ++++ b/sound/soc/intel/skylake/skl.c +@@ -206,10 +206,32 @@ static void skl_dum_set(struct hdac_bus *bus) + snd_hdac_chip_updatel(bus, VS_EM2, AZX_VS_EM2_DUM, AZX_VS_EM2_DUM); + } + ++static void skl_set_total_bytes_transferred(struct hdac_stream *hstream) ++{ ++ int pos, no_of_bytes; ++ unsigned int prev_pos; ++ u64 buffer_size = hstream->cstream->runtime->buffer_size; ++ ++ div_u64_rem(hstream->curr_pos, buffer_size, &prev_pos); ++ pos = snd_hdac_stream_get_pos_posbuf(hstream); ++ ++ if (pos < prev_pos) ++ no_of_bytes = (buffer_size - prev_pos) + pos; ++ else ++ no_of_bytes = pos - prev_pos; ++ ++ hstream->curr_pos += no_of_bytes; ++} ++ + /* called from IRQ */ + static void skl_stream_update(struct hdac_bus *bus, struct hdac_stream *hstr) + { +- snd_pcm_period_elapsed(hstr->substream); ++ if (hstr->substream) { ++ snd_pcm_period_elapsed(hstr->substream); ++ } else if (hstr->cstream) { ++ skl_set_total_bytes_transferred(hstr); ++ snd_compr_fragment_elapsed(hstr->cstream); ++ } + } + + static irqreturn_t skl_interrupt(int irq, void *dev_id) +-- +2.17.1 + diff --git a/patches/0049-Remove-unused-label-to-depress-compile-warning.trusty b/patches/0049-Remove-unused-label-to-depress-compile-warning.trusty new file mode 100644 index 0000000000..7d4febbcd2 --- /dev/null +++ b/patches/0049-Remove-unused-label-to-depress-compile-warning.trusty @@ -0,0 +1,27 @@ +From 9bd1904f5908b614fc753438bcf097aa6c47d4c4 Mon Sep 17 00:00:00 2001 +From: Zhou Furong +Date: Wed, 14 Feb 2018 15:14:20 +0800 +Subject: [PATCH 49/63] Remove unused label to depress compile warning + +As title, remove a unused label to depress compile wrning + +Change-Id: I8a6daa1d85b9a95ec9a475ef39990e74c84e89e9 +--- + drivers/trusty/trusty-timer.c | 1 - + 1 file changed, 1 deletion(-) + +diff --git a/drivers/trusty/trusty-timer.c b/drivers/trusty/trusty-timer.c +index 5d4466d4e157..18e315c25067 100644 +--- a/drivers/trusty/trusty-timer.c ++++ b/drivers/trusty/trusty-timer.c +@@ -154,7 +154,6 @@ static int trusty_timer_probe(struct platform_device *pdev) + + return 0; + +-err_register_call_notifier: + destroy_workqueue(s->workqueue); + err_allocate_work_queue: + kfree(s); +-- +2.17.1 + diff --git a/patches/0049-VHM-add-hash-table-support-for-huge-pages.acrn b/patches/0049-VHM-add-hash-table-support-for-huge-pages.acrn new file mode 100644 index 0000000000..4149ec0469 --- /dev/null +++ b/patches/0049-VHM-add-hash-table-support-for-huge-pages.acrn @@ -0,0 +1,524 @@ +From 74c878f8813bac5b31e6d73d1e2632b930800392 Mon Sep 17 00:00:00 2001 +From: Jason Chen CJ +Date: Fri, 31 Aug 2018 10:59:00 +0800 +Subject: [PATCH 049/150] VHM: add hash table support for huge pages + +use HUGEPAGE_2M_HLIST_ARRAY_SIZE(16) for 2M hash table size, +HUGEPAGE_1G_HLIST_ARRAY_SIZE(1) for 1G hash table size. + +The assumption is that we only support 2M & 1G huge pages. + +Change-Id: I08d331d7b7ff7e6a96f36e8c496db3644628aa9e +Signed-off-by: Jason Chen CJ +Reviewed-on: +--- + drivers/char/vhm/vhm_dev.c | 5 + + drivers/vhm/Makefile | 2 +- + drivers/vhm/vhm_hugetlb.c | 273 ++++++++++++++++++++++++++++++++ + drivers/vhm/vhm_mm.c | 83 +++------- + include/linux/vhm/acrn_vhm_mm.h | 8 + + include/linux/vhm/vhm_vm_mngt.h | 10 ++ + 6 files changed, 315 insertions(+), 66 deletions(-) + create mode 100644 drivers/vhm/vhm_hugetlb.c + +diff --git a/drivers/char/vhm/vhm_dev.c b/drivers/char/vhm/vhm_dev.c +index fd2d43b176ea..6cd1859beb24 100644 +--- a/drivers/char/vhm/vhm_dev.c ++++ b/drivers/char/vhm/vhm_dev.c +@@ -100,6 +100,7 @@ static atomic_t ioreq_retry = ATOMIC_INIT(0); + static int vhm_dev_open(struct inode *inodep, struct file *filep) + { + struct vhm_vm *vm; ++ int i; + + vm = kzalloc(sizeof(struct vhm_vm), GFP_KERNEL); + pr_info("vhm_dev_open: opening device node\n"); +@@ -112,6 +113,10 @@ static int vhm_dev_open(struct inode *inodep, struct file *filep) + INIT_LIST_HEAD(&vm->memseg_list); + mutex_init(&vm->seg_lock); + ++ for (i = 0; i < HUGEPAGE_HLIST_ARRAY_SIZE; i++) ++ INIT_HLIST_HEAD(&vm->hugepage_hlist[i]); ++ mutex_init(&vm->hugepage_lock); ++ + INIT_LIST_HEAD(&vm->ioreq_client_list); + spin_lock_init(&vm->ioreq_client_lock); + +diff --git a/drivers/vhm/Makefile b/drivers/vhm/Makefile +index 4bd960d564b3..a3dd5c92f12c 100644 +--- a/drivers/vhm/Makefile ++++ b/drivers/vhm/Makefile +@@ -1 +1 @@ +-obj-y += vhm_mm.o vhm_ioreq.o vhm_vm_mngt.o vhm_hypercall.o ++obj-y += vhm_mm.o vhm_ioreq.o vhm_vm_mngt.o vhm_hypercall.o vhm_hugetlb.o +diff --git a/drivers/vhm/vhm_hugetlb.c b/drivers/vhm/vhm_hugetlb.c +new file mode 100644 +index 000000000000..afab8ab52567 +--- /dev/null ++++ b/drivers/vhm/vhm_hugetlb.c +@@ -0,0 +1,273 @@ ++/* ++ * virtio and hyperviosr service module (VHM): hugetlb ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright (c) 2018 Intel Corporation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * BSD LICENSE ++ * ++ * Copyright (C) 2018 Intel Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * ++ * ++ * Jason Chen CJ ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++ ++#define HUGEPAGE_2M_SHIFT 21 ++#define HUGEPAGE_1G_SHIFT 30 ++ ++#define HUGEPAGE_1G_HLIST_IDX (HUGEPAGE_HLIST_ARRAY_SIZE - 1) ++ ++struct hugepage_map { ++ struct hlist_node hlist; ++ u64 vm0_gpa; ++ size_t size; ++ u64 guest_gpa; ++}; ++ ++static inline struct hlist_head *hlist_2m_hash(struct vhm_vm *vm, ++ unsigned long guest_gpa) ++{ ++ return &vm->hugepage_hlist[guest_gpa >> HUGEPAGE_2M_SHIFT & ++ (HUGEPAGE_2M_HLIST_ARRAY_SIZE - 1)]; ++} ++ ++static int add_guest_map(struct vhm_vm *vm, unsigned long vm0_gpa, ++ unsigned long guest_gpa, unsigned long size) ++{ ++ struct hugepage_map *map; ++ int max_gfn; ++ ++ map = kzalloc(sizeof(struct hugepage_map), GFP_KERNEL); ++ if (map == NULL) ++ return -ENOMEM; ++ ++ map->vm0_gpa = vm0_gpa; ++ map->guest_gpa = guest_gpa; ++ map->size = size; ++ ++ INIT_HLIST_NODE(&map->hlist); ++ ++ max_gfn = (map->guest_gpa + map->size) >> PAGE_SHIFT; ++ if (vm->max_gfn < max_gfn) ++ vm->max_gfn = max_gfn; ++ ++ pr_info("VHM: add hugepage with size=0x%lx, vm0_gpa=0x%llx," ++ " and its guest gpa = 0x%llx, vm max_gfn 0x%x\n", ++ map->size, map->vm0_gpa, map->guest_gpa, vm->max_gfn); ++ ++ mutex_lock(&vm->hugepage_lock); ++ /* 1G hugepage? */ ++ if (map->size == (1UL << HUGEPAGE_1G_SHIFT)) ++ hlist_add_head(&map->hlist, ++ &vm->hugepage_hlist[HUGEPAGE_1G_HLIST_IDX]); ++ else ++ hlist_add_head(&map->hlist, ++ hlist_2m_hash(vm, map->guest_gpa)); ++ mutex_unlock(&vm->hugepage_lock); ++ ++ return 0; ++} ++ ++int hugepage_map_guest(struct vhm_vm *vm, struct vm_memmap *memmap) ++{ ++ struct page *page; ++ unsigned long len, guest_gpa, vma; ++ unsigned int type; ++ unsigned int mem_type, mem_access_right; ++ int ret; ++ ++ if (vm == NULL || memmap == NULL) ++ return -EINVAL; ++ ++ len = memmap->len; ++ vma = memmap->vma_base; ++ guest_gpa = memmap->gpa; ++ ++ while (len > 0) { ++ unsigned long vm0_gpa, pagesize; ++ ++ ret = get_user_pages_fast(vma, 1, 1, &page); ++ if (unlikely(ret != 1) || (page == NULL)) { ++ pr_err("failed to pin huge page!\n"); ++ return -ENOMEM; ++ } ++ ++ vm0_gpa = page_to_phys(page); ++ pagesize = PAGE_SIZE << compound_order(page); ++ ++ ret = add_guest_map(vm, vm0_gpa, guest_gpa, pagesize); ++ if (ret < 0) { ++ pr_err("failed to add memseg for huge page!\n"); ++ put_page(page); ++ return ret; ++ } ++ ++ /* TODO: do batch hypercall for multi ept mapping */ ++ mem_type = MEM_TYPE_WB; ++ mem_access_right = (memmap->prot & MEM_ACCESS_RIGHT_MASK); ++ type = MAP_MEM; ++ if (_mem_set_memmap(vm->vmid, guest_gpa, vm0_gpa, pagesize, ++ mem_type, mem_access_right, type) < 0) { ++ pr_err("vhm: failed to set memmap %ld!\n", vm->vmid); ++ put_page(page); ++ return -EFAULT; ++ } ++ ++ len -= pagesize; ++ vma += pagesize; ++ guest_gpa += pagesize; ++ } ++ ++ vm->hugetlb_enabled = 1; ++ ++ return 0; ++} ++ ++void hugepage_free_guest(struct vhm_vm *vm) ++{ ++ struct hlist_node *htmp; ++ struct hugepage_map *map; ++ int i; ++ ++ mutex_lock(&vm->hugepage_lock); ++ for (i = 0; i < HUGEPAGE_HLIST_ARRAY_SIZE; i++) { ++ if (!hlist_empty(&vm->hugepage_hlist[i])) { ++ hlist_for_each_entry_safe(map, htmp, ++ &vm->hugepage_hlist[i], hlist) { ++ hlist_del(&map->hlist); ++ /* put_page to unpin huge page */ ++ put_page(pfn_to_page( ++ map->vm0_gpa >> PAGE_SHIFT)); ++ kfree(map); ++ } ++ } ++ } ++ mutex_unlock(&vm->hugepage_lock); ++} ++ ++void *hugepage_map_guest_phys(struct vhm_vm *vm, u64 guest_phys, size_t size) ++{ ++ struct hlist_node *htmp; ++ struct hugepage_map *map; ++ ++ mutex_lock(&vm->hugepage_lock); ++ /* check 1G hlist first */ ++ if (!hlist_empty(&vm->hugepage_hlist[HUGEPAGE_1G_HLIST_IDX])) { ++ hlist_for_each_entry_safe(map, htmp, ++ &vm->hugepage_hlist[HUGEPAGE_1G_HLIST_IDX], hlist) { ++ if (map->guest_gpa >= guest_phys + size || ++ guest_phys >= map->guest_gpa + map->size) ++ continue; ++ ++ if (guest_phys + size > map->guest_gpa + map->size || ++ guest_phys < map->guest_gpa) ++ goto err; ++ ++ mutex_unlock(&vm->hugepage_lock); ++ return phys_to_virt(map->vm0_gpa + ++ guest_phys - map->guest_gpa); ++ } ++ } ++ ++ /* check 2m hlist */ ++ hlist_for_each_entry_safe(map, htmp, ++ hlist_2m_hash(vm, guest_phys), hlist) { ++ if (map->guest_gpa >= guest_phys + size || ++ guest_phys >= map->guest_gpa + map->size) ++ continue; ++ ++ if (guest_phys + size > map->guest_gpa + map->size || ++ guest_phys < map->guest_gpa) ++ goto err; ++ ++ mutex_unlock(&vm->hugepage_lock); ++ return phys_to_virt(map->vm0_gpa + ++ guest_phys - map->guest_gpa); ++ } ++ ++err: ++ mutex_unlock(&vm->hugepage_lock); ++ printk(KERN_WARNING "cannot find correct mem map, please check the " ++ "input's range or alignment"); ++ return NULL; ++} ++ ++int hugepage_unmap_guest_phys(struct vhm_vm *vm, u64 guest_phys) ++{ ++ struct hlist_node *htmp; ++ struct hugepage_map *map; ++ ++ mutex_lock(&vm->hugepage_lock); ++ /* check 1G hlist first */ ++ if (!hlist_empty(&vm->hugepage_hlist[HUGEPAGE_1G_HLIST_IDX])) { ++ hlist_for_each_entry_safe(map, htmp, ++ &vm->hugepage_hlist[HUGEPAGE_1G_HLIST_IDX], hlist) { ++ if (map->guest_gpa <= guest_phys && ++ guest_phys < map->guest_gpa + map->size) { ++ mutex_unlock(&vm->hugepage_lock); ++ return 0; ++ } ++ } ++ } ++ /* check 2m hlist */ ++ hlist_for_each_entry_safe(map, htmp, ++ hlist_2m_hash(vm, guest_phys), hlist) { ++ if (map->guest_gpa <= guest_phys && ++ guest_phys < map->guest_gpa + map->size) { ++ mutex_unlock(&vm->hugepage_lock); ++ return 0; ++ } ++ } ++ mutex_unlock(&vm->hugepage_lock); ++ return -ESRCH; ++} +diff --git a/drivers/vhm/vhm_mm.c b/drivers/vhm/vhm_mm.c +index 728998d0341d..070327e616d6 100644 +--- a/drivers/vhm/vhm_mm.c ++++ b/drivers/vhm/vhm_mm.c +@@ -156,7 +156,7 @@ int alloc_guest_memseg(struct vhm_vm *vm, struct vm_memseg *memseg) + return ret; + } + +-static int _mem_set_memmap(unsigned long vmid, unsigned long guest_gpa, ++int _mem_set_memmap(unsigned long vmid, unsigned long guest_gpa, + unsigned long host_gpa, unsigned long len, + unsigned int mem_type, unsigned int mem_access_right, + unsigned int type) +@@ -207,61 +207,6 @@ int update_memmap_attr(unsigned long vmid, unsigned long guest_gpa, + mem_type, mem_access_right, MAP_MEM); + } + +-static int hugepage_map_guest(struct vhm_vm *vm, struct vm_memmap *memmap) +-{ +- struct page *page; +- unsigned long len, guest_gpa, vma; +- unsigned int type; +- unsigned int mem_type, mem_access_right; +- int ret; +- +- if (vm == NULL || memmap == NULL) +- return -EINVAL; +- +- len = memmap->len; +- vma = memmap->vma_base; +- guest_gpa = memmap->gpa; +- +- while (len > 0) { +- unsigned long vm0_gpa, pagesize; +- +- ret = get_user_pages_fast(vma, 1, 1, &page); +- if (unlikely(ret != 1) || (page == NULL)) { +- pr_err("failed to pin huge page!\n"); +- return -ENOMEM; +- } +- +- vm0_gpa = page_to_phys(page); +- pagesize = PAGE_SIZE << compound_order(page); +- +- ret = add_guest_memseg(vm, vm0_gpa, guest_gpa, pagesize); +- if (ret < 0) { +- pr_err("failed to add memseg for huge page!\n"); +- put_page(page); +- return ret; +- } +- +- /* TODO: do batch hypercall for multi ept mapping */ +- mem_type = MEM_TYPE_WB; +- mem_access_right = (memmap->prot & MEM_ACCESS_RIGHT_MASK); +- type = MAP_MEM; +- if (_mem_set_memmap(vm->vmid, guest_gpa, vm0_gpa, pagesize, +- mem_type, mem_access_right, type) < 0) { +- pr_err("vhm: failed to set memmap %ld!\n", vm->vmid); +- put_page(page); +- return -EFAULT; +- } +- +- len -= pagesize; +- vma += pagesize; +- guest_gpa += pagesize; +- } +- +- vm->hugetlb_enabled = 1; +- +- return 0; +-} +- + int map_guest_memseg(struct vhm_vm *vm, struct vm_memmap *memmap) + { + struct guest_memseg *seg = NULL; +@@ -315,17 +260,15 @@ void free_guest_mem(struct vhm_vm *vm) + { + struct guest_memseg *seg; + ++ if (vm->hugetlb_enabled) ++ return hugepage_free_guest(vm); ++ + mutex_lock(&vm->seg_lock); + while (!list_empty(&vm->memseg_list)) { + seg = list_first_entry(&vm->memseg_list, + struct guest_memseg, list); +- if (vm->hugetlb_enabled) { +- /* just put_page to unpin huge page */ +- put_page(pfn_to_page(seg->vm0_gpa >> PAGE_SHIFT)); +- } else { +- if (!_free_memblk(vm->dev, seg->vm0_gpa, seg->len)) +- pr_warn("failed to free memblk\n"); +- } ++ if (!_free_memblk(vm->dev, seg->vm0_gpa, seg->len)) ++ pr_warn("failed to free memblk\n"); + list_del(&seg->list); + kfree(seg); + } +@@ -412,6 +355,9 @@ int vhm_dev_mmap(struct file *file, struct vm_area_struct *vma) + size_t len = vma->vm_end - vma->vm_start; + int ret; + ++ if (vm->hugetlb_enabled) ++ return -EINVAL; ++ + mutex_lock(&vm->seg_lock); + list_for_each_entry(seg, &vm->memseg_list, list) { + if (seg->gpa != offset || seg->len != len) +@@ -456,7 +402,10 @@ void *map_guest_phys(unsigned long vmid, u64 guest_phys, size_t size) + if (vm == NULL) + return NULL; + +- ret = do_map_guest_phys(vm, guest_phys, size); ++ if (vm->hugetlb_enabled) ++ ret = hugepage_map_guest_phys(vm, guest_phys, size); ++ else ++ ret = do_map_guest_phys(vm, guest_phys, size); + + put_vm(vm); + +@@ -492,7 +441,11 @@ int unmap_guest_phys(unsigned long vmid, u64 guest_phys) + return -ESRCH; + } + +- ret = do_unmap_guest_phys(vm, guest_phys); ++ if (vm->hugetlb_enabled) ++ ret = hugepage_unmap_guest_phys(vm, guest_phys); ++ else ++ ret = do_unmap_guest_phys(vm, guest_phys); ++ + put_vm(vm); + return ret; + } +diff --git a/include/linux/vhm/acrn_vhm_mm.h b/include/linux/vhm/acrn_vhm_mm.h +index ba383b354986..9be6749d12e2 100644 +--- a/include/linux/vhm/acrn_vhm_mm.h ++++ b/include/linux/vhm/acrn_vhm_mm.h +@@ -199,4 +199,12 @@ int alloc_guest_memseg(struct vhm_vm *vm, struct vm_memseg *memseg); + */ + int map_guest_memseg(struct vhm_vm *vm, struct vm_memmap *memmap); + ++int _mem_set_memmap(unsigned long vmid, unsigned long guest_gpa, ++ unsigned long host_gpa, unsigned long len, ++ unsigned int mem_type, unsigned int mem_access_right, ++ unsigned int type); ++int hugepage_map_guest(struct vhm_vm *vm, struct vm_memmap *memmap); ++void hugepage_free_guest(struct vhm_vm *vm); ++void *hugepage_map_guest_phys(struct vhm_vm *vm, u64 guest_phys, size_t size); ++int hugepage_unmap_guest_phys(struct vhm_vm *vm, u64 guest_phys); + #endif +diff --git a/include/linux/vhm/vhm_vm_mngt.h b/include/linux/vhm/vhm_vm_mngt.h +index 306bd54c4103..00ee5c9ec300 100644 +--- a/include/linux/vhm/vhm_vm_mngt.h ++++ b/include/linux/vhm/vhm_vm_mngt.h +@@ -63,10 +63,16 @@ + #define VHM_VM_MNGT_H + + #include ++#include ++#include + + extern struct list_head vhm_vm_list; + extern struct mutex vhm_vm_list_lock; + ++#define HUGEPAGE_2M_HLIST_ARRAY_SIZE 16 ++#define HUGEPAGE_1G_HLIST_ARRAY_SIZE 1 ++#define HUGEPAGE_HLIST_ARRAY_SIZE (HUGEPAGE_2M_HLIST_ARRAY_SIZE + \ ++ HUGEPAGE_1G_HLIST_ARRAY_SIZE) + /** + * struct vhm_vm - data structure to track guest + * +@@ -77,6 +83,8 @@ extern struct mutex vhm_vm_list_lock; + * @refcnt: reference count of guest + * @seg_lock: mutex to protect memseg_list + * @memseg_list: list of memseg ++ * @hugepage_lock: mutex to protect hugepage_hlist ++ * @hugepage_hlist: hash list of hugepage + * @max_gfn: maximum guest page frame number + * @ioreq_client_lock: spinlock to protect ioreq_client_list + * @ioreq_client_list: list of ioreq clients +@@ -91,6 +99,8 @@ struct vhm_vm { + long refcnt; + struct mutex seg_lock; + struct list_head memseg_list; ++ struct mutex hugepage_lock; ++ struct hlist_head hugepage_hlist[HUGEPAGE_HLIST_ARRAY_SIZE]; + int max_gfn; + spinlock_t ioreq_client_lock; + struct list_head ioreq_client_list; +-- +2.17.1 + diff --git a/patches/0049-drm-i915-Disable-pipes-in-reverse-order.drm b/patches/0049-drm-i915-Disable-pipes-in-reverse-order.drm new file mode 100644 index 0000000000..be199b6c40 --- /dev/null +++ b/patches/0049-drm-i915-Disable-pipes-in-reverse-order.drm @@ -0,0 +1,48 @@ +From e8a12f6de2b937cc6d3b38f16c9d557617719738 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Jos=C3=A9=20Roberto=20de=20Souza?= +Date: Fri, 23 Aug 2019 01:20:44 -0700 +Subject: [PATCH 049/690] drm/i915: Disable pipes in reverse order +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Disable CRTC/pipes in reverse order because some features (MST in +TGL+) requires master and slave relationship between pipes, so it +should always pick the lowest pipe as master as it will be enabled +first and disable in the reverse order so the master will be the last +one to be disabled. + +Cc: Rodrigo Vivi +Cc: Ville Syrjälä +Signed-off-by: José Roberto de Souza +Signed-off-by: Lucas De Marchi +Reviewed-by: Mika Kahola +Link: https://patchwork.freedesktop.org/patch/msgid/20190823082055.5992-13-lucas.demarchi@intel.com +--- + drivers/gpu/drm/i915/display/intel_display.c | 10 +++++++++- + 1 file changed, 9 insertions(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c +index 231741fe7098..822581deaaac 100644 +--- a/drivers/gpu/drm/i915/display/intel_display.c ++++ b/drivers/gpu/drm/i915/display/intel_display.c +@@ -13925,7 +13925,15 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) + if (state->modeset) + wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); + +- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { ++ /* ++ * Disable CRTC/pipes in reverse order because some features(MST in ++ * TGL+) requires master and slave relationship between pipes, so it ++ * should always pick the lowest pipe as master as it will be enabled ++ * first and disable in the reverse order so the master will be the ++ * last one to be disabled. ++ */ ++ for_each_oldnew_intel_crtc_in_state_reverse(state, crtc, old_crtc_state, ++ new_crtc_state, i) { + if (needs_modeset(new_crtc_state) || + new_crtc_state->update_pipe) { + +-- +2.17.1 + diff --git a/patches/0049-mei-bus-unconditionally-enable-clients-with-vt-su.security b/patches/0049-mei-bus-unconditionally-enable-clients-with-vt-su.security new file mode 100644 index 0000000000..e4ba4e0294 --- /dev/null +++ b/patches/0049-mei-bus-unconditionally-enable-clients-with-vt-su.security @@ -0,0 +1,47 @@ +From 5771aa24b7e4699834c096e1bdd7f41eea016a6c Mon Sep 17 00:00:00 2001 +From: Alexander Usyskin +Date: Sun, 16 Sep 2018 10:22:51 +0300 +Subject: [PATCH 49/65] mei: bus: unconditionally enable clients with vt + support + +Change-Id: I9d3677d7cf1486ce4d15560a08fed3c6265b603a +Signed-off-by: Alexander Usyskin +--- + drivers/misc/mei/bus-fixup.c | 14 ++++++++++++++ + 1 file changed, 14 insertions(+) + +diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c +index 9ad9c01ddf41..b46967889ec5 100644 +--- a/drivers/misc/mei/bus-fixup.c ++++ b/drivers/misc/mei/bus-fixup.c +@@ -464,6 +464,19 @@ static void mei_nfc(struct mei_cl_device *cldev) + dev_dbg(bus->dev, "end of fixup match = %d\n", cldev->do_match); + } + ++/** ++ * vt_support - enable on bus clients with vtag support ++ * ++ * @cldev: me clients device ++ */ ++static void vt_support(struct mei_cl_device *cldev) ++{ ++ dev_dbg(&cldev->dev, "running hook %s\n", __func__); ++ ++ if (cldev->me_cl->props.vt_supported == 1) ++ cldev->do_match = 1; ++} ++ + #define MEI_FIXUP(_uuid, _hook) { _uuid, _hook } + + static struct mei_fixup { +@@ -477,6 +490,7 @@ static struct mei_fixup { + MEI_FIXUP(MEI_UUID_WD, mei_wd), + MEI_FIXUP(MEI_UUID_MKHIF_FIX, mei_mkhi_fix), + MEI_FIXUP(MEI_UUID_HDCP, whitelist), ++ MEI_FIXUP(MEI_UUID_ANY, vt_support), + }; + + /** +-- +2.17.1 + diff --git a/patches/0049-net-stmmac-add-TBS-fetch-time-offset-setting-.connectivity b/patches/0049-net-stmmac-add-TBS-fetch-time-offset-setting-.connectivity new file mode 100644 index 0000000000..9c8c292f89 --- /dev/null +++ b/patches/0049-net-stmmac-add-TBS-fetch-time-offset-setting-.connectivity @@ -0,0 +1,301 @@ +From ddea49db4b4debb5817d4ff0a1d4ace0fe9580d7 Mon Sep 17 00:00:00 2001 +From: Kweh Hock Leong +Date: Fri, 9 Aug 2019 15:10:34 +0800 +Subject: [PATCH 049/108] net: stmmac: add TBS fetch time offset setting + functions + +Introduce tbs_set_ftos() & tbs_set_fgos() to configure the TBS fetch time +offset (FTOS) or TBS fetch GSN slot offset (FGOS) under TSN HW tunable +framework. + +Signed-off-by: Kweh Hock Leong +Signed-off-by: Ong Boon Leong +--- + drivers/net/ethernet/stmicro/stmmac/dwmac5.h | 10 +++ + .../net/ethernet/stmicro/stmmac/dwmac5_tsn.c | 52 +++++++++++++- + drivers/net/ethernet/stmicro/stmmac/hwif.h | 11 ++- + .../net/ethernet/stmicro/stmmac/stmmac_tsn.c | 72 ++++++++++++++++++- + .../net/ethernet/stmicro/stmmac/stmmac_tsn.h | 4 ++ + 5 files changed, 144 insertions(+), 5 deletions(-) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h +index acb24eb68429..d222599b1896 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h +@@ -177,9 +177,19 @@ + #define MTL_TBS_CTRL_LEOV BIT(1) + #define MTL_TBS_CTRL_ESTM BIT(0) + ++/* DMA TBS control register */ ++#define DMA_TBS_CTRL 0x00001050 ++#define DMA_TBS_CTRL_FTOS GENMASK(31, 8) /* Fetch time offset */ ++#define DMA_TBS_CTRL_FTOS_SHIFT 8 ++#define DMA_TBS_CTRL_FGOS GENMASK(6, 4) /* Fetch GSN offset */ ++#define DMA_TBS_CTRL_FGOS_SHIFT 4 ++#define DMA_TBS_CTRL_FTOV BIT(0) /* Valid bit */ ++ + /* TBS Global defines */ + #define TBS_LEOS_MAX 999999999 /* Max LEOS (ns) */ + #define TBS_LEGOS_MAX 7 /* Max LE GSN Slot */ ++#define TBS_FTOS_MAX 999999999 /* Max FTOS (ns) */ ++#define TBS_FGOS_MAX 7 /* Max FT GSN Slot */ + + /* MAC Core Version */ + #define TSN_VER_MASK 0xFF +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c +index d33a72225741..8bd5a3ee695f 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c +@@ -404,10 +404,14 @@ int dwmac5_est_irq_status(void __iomem *ioaddr, struct net_device *dev, + } + + static void dwmac5_tbs_get_max(u32 *leos_max, +- u32 *legos_max) ++ u32 *legos_max, ++ u32 *ftos_max, ++ u32 *fgos_max) + { + *leos_max = TBS_LEOS_MAX; + *legos_max = TBS_LEGOS_MAX; ++ *ftos_max = TBS_FTOS_MAX; ++ *fgos_max = TBS_FGOS_MAX; + } + + static void dwmac5_tbs_set_estm(void __iomem *ioaddr, const u32 estm) +@@ -466,6 +470,50 @@ static void dwmac5_tbs_set_legos(void __iomem *ioaddr, const u32 legos, + writel(value, ioaddr + MTL_TBS_CTRL); + } + ++static void dwmac5_tbs_set_ftos(void __iomem *ioaddr, const u32 ftos, ++ const u32 estm, const u32 fgos) ++{ ++ u32 value; ++ ++ value = readl(ioaddr + DMA_TBS_CTRL); ++ ++ /* unset the valid bit for updating new fetch time ftos */ ++ value &= ~DMA_TBS_CTRL_FTOV; ++ writel(value, ioaddr + DMA_TBS_CTRL); ++ ++ value &= ~DMA_TBS_CTRL_FTOS; ++ value |= DMA_TBS_CTRL_FTOS & ++ (ftos << DMA_TBS_CTRL_FTOS_SHIFT); ++ ++ /* disable fetch time while it is zero */ ++ if (ftos || (estm && fgos)) ++ value |= DMA_TBS_CTRL_FTOV; ++ ++ writel(value, ioaddr + DMA_TBS_CTRL); ++} ++ ++static void dwmac5_tbs_set_fgos(void __iomem *ioaddr, const u32 fgos, ++ const u32 ftos) ++{ ++ u32 value; ++ ++ value = readl(ioaddr + DMA_TBS_CTRL); ++ ++ /* Unset the valid bit for updating new fetch GSN slot */ ++ value &= ~DMA_TBS_CTRL_FTOV; ++ writel(value, ioaddr + DMA_TBS_CTRL); ++ ++ value &= ~DMA_TBS_CTRL_FGOS; ++ value |= DMA_TBS_CTRL_FGOS & ++ (fgos << DMA_TBS_CTRL_FGOS_SHIFT); ++ ++ /* Disable fetch time while it is zero */ ++ if (ftos || fgos) ++ value |= DMA_TBS_CTRL_FTOV; ++ ++ writel(value, ioaddr + DMA_TBS_CTRL); ++} ++ + const struct tsnif_ops dwmac510_tsnif_ops = { + .read_hwid = dwmac5_read_hwid, + .has_tsn_cap = dwmac5_has_tsn_cap, +@@ -489,6 +537,8 @@ const struct tsnif_ops dwmac510_tsnif_ops = { + .tbs_set_estm = dwmac5_tbs_set_estm, + .tbs_set_leos = dwmac5_tbs_set_leos, + .tbs_set_legos = dwmac5_tbs_set_legos, ++ .tbs_set_ftos = dwmac5_tbs_set_ftos, ++ .tbs_set_fgos = dwmac5_tbs_set_fgos, + }; + + void dwmac510_tsnif_setup(struct mac_device_info *mac) +diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h +index 6ab71a7ec873..a4714bcc2c50 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h ++++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h +@@ -770,12 +770,17 @@ struct tsnif_ops { + struct tsn_mmc_stat *mmc_stat, + unsigned int txqcnt); + /* Time-Based Scheduling (TBS) */ +- void (*tbs_get_max)(u32 *leos_max, u32 *legos_max); ++ void (*tbs_get_max)(u32 *leos_max, u32 *legos_max, ++ u32 *ftos_max, u32 *fgos_max); + void (*tbs_set_estm)(void __iomem *ioaddr, const u32 estm); + void (*tbs_set_leos)(void __iomem *ioaddr, const u32 leos, + const u32 estm); + void (*tbs_set_legos)(void __iomem *ioaddr, const u32 legos, + const u32 leos); ++ void (*tbs_set_ftos)(void __iomem *ioaddr, const u32 ftos, ++ const u32 estm, const u32 fgos); ++ void (*tbs_set_fgos)(void __iomem *ioaddr, const u32 fgos, ++ const u32 ftos); + }; + + #define tsnif_read_hwid(__hw, __args...) \ +@@ -822,5 +827,9 @@ struct tsnif_ops { + tsnif_do_void_callback(__hw, tbs_set_leos, __args) + #define tsnif_tbs_set_legos(__hw, __args...) \ + tsnif_do_void_callback(__hw, tbs_set_legos, __args) ++#define tsnif_tbs_set_ftos(__hw, __args...) \ ++ tsnif_do_void_callback(__hw, tbs_set_ftos, __args) ++#define tsnif_tbs_set_fgos(__hw, __args...) \ ++ tsnif_do_void_callback(__hw, tbs_set_fgos, __args) + + #endif /* __STMMAC_HWIF_H__ */ +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c +index 553e511ad47d..a8f4aa430636 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c +@@ -147,16 +147,21 @@ int tsn_init(struct mac_device_info *hw, struct net_device *dev) + cap->tils_max = tils_max; + tsnif_est_get_max(hw, &cap->ptov_max, &cap->ctov_max, + &cap->cycle_max, &cap->idleslope_max); ++ + cap->est_support = 1; + +- tsnif_tbs_get_max(hw, &cap->leos_max, &cap->legos_max); ++ tsnif_tbs_get_max(hw, &cap->leos_max, &cap->legos_max, ++ &cap->ftos_max, &cap->fgos_max); + + dev_info(pdev, "EST: depth=%u, ti_wid=%u, ter_max=%uns, tils_max=%u, tqcnt=%u\n", + gcl_depth, ti_wid, cap->ext_max, tils_max, cap->txqcnt); + +- if (cap->tbs_support) ++ if (cap->tbs_support) { + dev_info(pdev, "TBS: leos_max=%u, legos_max=%u\n", + cap->leos_max, cap->legos_max); ++ dev_info(pdev, "TBS: ftos_max=%u, fgos_max=%u\n", ++ cap->ftos_max, cap->fgos_max); ++ } + + return 0; + } +@@ -209,7 +214,8 @@ int tsn_hwtunable_set(struct mac_device_info *hw, struct net_device *dev, + u32 hw_bank; + u32 estm; + u32 leos; +- ++ u32 ftos; ++ u32 fgos; + + switch (id) { + case TSN_HWTUNA_TX_EST_TILS: +@@ -223,6 +229,8 @@ int tsn_hwtunable_set(struct mac_device_info *hw, struct net_device *dev, + case TSN_HWTUNA_TX_TBS_ESTM: + case TSN_HWTUNA_TX_TBS_LEOS: + case TSN_HWTUNA_TX_TBS_LEGOS: ++ case TSN_HWTUNA_TX_TBS_FTOS: ++ case TSN_HWTUNA_TX_TBS_FGOS: + if (!tsn_has_feat(hw, dev, TSN_FEAT_ID_TBS)) { + netdev_info(dev, "TBS: feature unsupported\n"); + return -ENOTSUPP; +@@ -353,6 +361,62 @@ int tsn_hwtunable_set(struct mac_device_info *hw, struct net_device *dev, + netdev_info(dev, "TBS: Set LEGOS = %u\n", data); + } + break; ++ case TSN_HWTUNA_TX_TBS_FTOS: ++ estm = info->hwtunable[TSN_HWTUNA_TX_TBS_ESTM]; ++ fgos = info->hwtunable[TSN_HWTUNA_TX_TBS_FGOS]; ++ ++ if (data > cap->ftos_max) { ++ netdev_warn(dev, ++ "TBS: invalid FTOS(%u), max=%u\n", ++ data, cap->ftos_max); ++ ++ return -EINVAL; ++ } ++ ++ /* For EST mode, make sure leos does not exceed cycle time */ ++ if (estm) { ++ hw_bank = tsnif_est_get_bank(hw, ioaddr, 0); ++ gcbc = &info->est_gcc.gcb[hw_bank]; ++ ++ if (data > (gcbc->gcrr.cycle_nsec - 1)) { ++ netdev_warn(dev, ++ "TBS: FTOS > (cycle time - 1ns)\n"); ++ ++ return -EINVAL; ++ } ++ } ++ ++ if (data != info->hwtunable[TSN_HWTUNA_TX_TBS_FTOS]) { ++ tsnif_tbs_set_ftos(hw, ioaddr, data, estm, fgos); ++ info->hwtunable[TSN_HWTUNA_TX_TBS_FTOS] = data; ++ netdev_info(dev, "TBS: Set FTOS = %u\n", data); ++ } ++ break; ++ case TSN_HWTUNA_TX_TBS_FGOS: ++ estm = info->hwtunable[TSN_HWTUNA_TX_TBS_ESTM]; ++ ftos = info->hwtunable[TSN_HWTUNA_TX_TBS_FTOS]; ++ ++ /* if EST not turn on, ret fail */ ++ if (!(tsn_has_feat(hw, dev, TSN_FEAT_ID_EST) && estm)) { ++ netdev_warn(dev, "TBS EST mode is not enabled\n"); ++ ++ return -EINVAL; ++ } ++ ++ if (data > cap->fgos_max) { ++ netdev_warn(dev, ++ "TBS: invalid FGOS(%u), max=%u\n", ++ data, cap->fgos_max); ++ ++ return -EINVAL; ++ } ++ ++ if (data != info->hwtunable[TSN_HWTUNA_TX_TBS_FGOS]) { ++ tsnif_tbs_set_fgos(hw, ioaddr, data, ftos); ++ info->hwtunable[TSN_HWTUNA_TX_TBS_FGOS] = data; ++ netdev_info(dev, "TBS: Set FGOS = %u\n", data); ++ } ++ break; + default: + netdev_warn(dev, "TSN: invalid tunable id(%u)\n", id); + ret = -EINVAL; +@@ -378,6 +442,8 @@ int tsn_hwtunable_get(struct mac_device_info *hw, struct net_device *dev, + case TSN_HWTUNA_TX_TBS_ESTM: + case TSN_HWTUNA_TX_TBS_LEOS: + case TSN_HWTUNA_TX_TBS_LEGOS: ++ case TSN_HWTUNA_TX_TBS_FTOS: ++ case TSN_HWTUNA_TX_TBS_FGOS: + if (!tsn_has_feat(hw, dev, TSN_FEAT_ID_TBS)) { + netdev_info(dev, "TBS: feature unsupported\n"); + return -ENOTSUPP; +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h +index abdc4635175b..dd366db0c8c8 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h +@@ -19,6 +19,8 @@ enum tsn_hwtunable_id { + TSN_HWTUNA_TX_TBS_ESTM, /* TBS Absolute or EST mode */ + TSN_HWTUNA_TX_TBS_LEOS, + TSN_HWTUNA_TX_TBS_LEGOS, ++ TSN_HWTUNA_TX_TBS_FTOS, /* Fetch time offset (nsec) */ ++ TSN_HWTUNA_TX_TBS_FGOS, /* Fetch GSN offset (slot number) */ + TSN_HWTUNA_MAX, + }; + +@@ -59,6 +61,8 @@ struct tsn_hw_cap { + u32 idleslope_max; /* Max idle slope */ + u32 leos_max; /* Launch Expiry Offset */ + u32 legos_max; /* Launch Expiry GSN Offset */ ++ u32 ftos_max; /* Max Fetch Time Offset */ ++ u32 fgos_max; /* Max Fetch GSN Offset */ + }; + + /* EST Gate Control Entry */ +-- +2.17.1 + diff --git a/patches/0050-ASoC-Intel-Skylake-Implement-Probe-IPC-API.audio b/patches/0050-ASoC-Intel-Skylake-Implement-Probe-IPC-API.audio new file mode 100644 index 0000000000..77c4fc11d2 --- /dev/null +++ b/patches/0050-ASoC-Intel-Skylake-Implement-Probe-IPC-API.audio @@ -0,0 +1,203 @@ +From b7312239275e08e7dc48cd45593194954bec3a5c Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Wed, 6 Mar 2019 14:21:46 +0100 +Subject: [PATCH 050/193] ASoC: Intel: Skylake: Implement Probe IPC API + +Probe is one of cAVS firmware features which allows for data extraction +and injection directly from or to module. +Exposes six IPCs: set & get of injection DMAs, injection DMA detach, +set & get of connected probe points and probe points disconnection. +This change adds all required types and methods to support each and +every request that driver could sent to firmware. + +Change-Id: Ie48c6a53f42e9569057f60e9ebab7ac7e8d74d01 +Signed-off-by: Cezary Rojewski +--- + sound/soc/intel/skylake/skl-messages.c | 100 +++++++++++++++++++++++++ + sound/soc/intel/skylake/skl-sst-ipc.h | 57 ++++++++++++++ + 2 files changed, 157 insertions(+) + +diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c +index d4127fed12ad..d18d95eee130 100644 +--- a/sound/soc/intel/skylake/skl-messages.c ++++ b/sound/soc/intel/skylake/skl-messages.c +@@ -1232,3 +1232,103 @@ int skl_get_module_params(struct skl_dev *skl, u32 *params, int size, + + return skl_ipc_get_large_config(&skl->ipc, &msg, ¶ms, &bytes); + } ++ ++int skl_probe_get_dma(struct skl_dev *skl, ++ struct skl_probe_dma **dma, size_t *num_dma) ++{ ++ struct skl_ipc_large_config_msg msg = {0}; ++ u32 *payload = NULL; ++ size_t bytes = 0; ++ int ret; ++ ++ *dma = NULL; ++ *num_dma = 0; ++ msg.module_id = skl_get_module_id(skl, &skl_probe_mod_uuid); ++ msg.instance_id = 0; ++ msg.large_param_id = SKL_PROBE_INJECTION_DMA; ++ ++ ret = skl_ipc_get_large_config(&skl->ipc, &msg, &payload, &bytes); ++ if (ret < 0 || !bytes) ++ return ret; ++ ++ *dma = (struct skl_probe_dma *)payload; ++ *num_dma = bytes / sizeof(**dma); ++ ++ return 0; ++} ++ ++int skl_probe_dma_attach(struct skl_dev *skl, ++ struct skl_probe_dma *dma, size_t num_dma) ++{ ++ struct skl_ipc_large_config_msg msg = {0}; ++ ++ msg.module_id = skl_get_module_id(skl, &skl_probe_mod_uuid); ++ msg.instance_id = 0; ++ msg.param_data_size = sizeof(*dma) * num_dma; ++ msg.large_param_id = SKL_PROBE_INJECTION_DMA; ++ ++ return skl_ipc_set_large_config(&skl->ipc, &msg, (u32 *)dma); ++} ++ ++int skl_probe_dma_detach(struct skl_dev *skl, ++ union skl_connector_node_id *node_id, size_t num_node_id) ++{ ++ struct skl_ipc_large_config_msg msg = {0}; ++ ++ msg.module_id = skl_get_module_id(skl, &skl_probe_mod_uuid); ++ msg.instance_id = 0; ++ msg.param_data_size = sizeof(*node_id) * num_node_id; ++ msg.large_param_id = SKL_PROBE_INJECTION_DMA_DETACH; ++ ++ return skl_ipc_set_large_config(&skl->ipc, &msg, (u32 *)node_id); ++} ++ ++int skl_probe_get_points(struct skl_dev *skl, ++ struct skl_probe_point_desc **desc, size_t *num_desc) ++{ ++ struct skl_ipc_large_config_msg msg = {0}; ++ u32 *payload = NULL; ++ size_t bytes = 0; ++ int ret; ++ ++ *desc = NULL; ++ *num_desc = 0; ++ msg.module_id = skl_get_module_id(skl, &skl_probe_mod_uuid); ++ msg.instance_id = 0; ++ msg.large_param_id = SKL_PROBE_POINTS; ++ ++ ret = skl_ipc_get_large_config(&skl->ipc, &msg, &payload, &bytes); ++ if (ret < 0 || !bytes) ++ return ret; ++ ++ *desc = (struct skl_probe_point_desc *)payload; ++ *num_desc = bytes / sizeof(**desc); ++ ++ return 0; ++} ++ ++int skl_probe_points_connect(struct skl_dev *skl, ++ struct skl_probe_point_desc *desc, size_t num_desc) ++{ ++ struct skl_ipc_large_config_msg msg = {0}; ++ ++ msg.module_id = skl_get_module_id(skl, &skl_probe_mod_uuid); ++ msg.instance_id = 0; ++ msg.param_data_size = sizeof(*desc) * num_desc; ++ msg.large_param_id = SKL_PROBE_POINTS; ++ ++ return skl_ipc_set_large_config(&skl->ipc, &msg, (u32 *)desc); ++} ++ ++int skl_probe_points_disconnect(struct skl_dev *skl, ++ union skl_probe_point_id *id, size_t num_id) ++{ ++ struct skl_ipc_large_config_msg msg = {0}; ++ ++ msg.module_id = skl_get_module_id(skl, &skl_probe_mod_uuid); ++ msg.instance_id = 0; ++ msg.param_data_size = sizeof(*id) * num_id; ++ msg.large_param_id = SKL_PROBE_POINTS_DISCONNECT; ++ ++ return skl_ipc_set_large_config(&skl->ipc, &msg, (u32 *)id); ++} +diff --git a/sound/soc/intel/skylake/skl-sst-ipc.h b/sound/soc/intel/skylake/skl-sst-ipc.h +index 3ffaafb2e8a4..205f93e6f207 100644 +--- a/sound/soc/intel/skylake/skl-sst-ipc.h ++++ b/sound/soc/intel/skylake/skl-sst-ipc.h +@@ -142,6 +142,50 @@ static const guid_t skl_copier_mod_uuid = + GUID_INIT(0x9BA00C83, 0xCA12, 0x4A83, 0x94, 0x3C, + 0x1F, 0xA2, 0xE8, 0x2F, 0x9D, 0xDA); + ++static const guid_t skl_probe_mod_uuid = ++ GUID_INIT(0x7CAD0808, 0xAB10, 0xCD23, 0xEF, 0x45, ++ 0x12, 0xAB, 0x34, 0xCD, 0x56, 0xEF); ++ ++enum skl_probe_runtime_param { ++ SKL_PROBE_INJECTION_DMA = 1, ++ SKL_PROBE_INJECTION_DMA_DETACH, ++ SKL_PROBE_POINTS, ++ SKL_PROBE_POINTS_DISCONNECT, ++}; ++ ++struct skl_probe_dma { ++ union skl_connector_node_id node_id; ++ unsigned int dma_buffer_size; ++} __packed; ++ ++enum skl_probe_type { ++ SKL_PROBE_TYPE_INPUT = 0, ++ SKL_PROBE_TYPE_OUTPUT, ++ SKL_PROBE_TYPE_INTERNAL ++}; ++ ++union skl_probe_point_id { ++ unsigned int value; ++ struct { ++ unsigned int module_id:16; ++ unsigned int instance_id:8; ++ enum skl_probe_type type:2; ++ unsigned int index:6; ++ } id; ++} __packed; ++ ++enum skl_connection_purpose { ++ SKL_CONNECTION_PURPOSE_EXTRACT = 0, ++ SKL_CONNECTION_PURPOSE_INJECT, ++ SKL_CONNECTION_PURPOSE_INJECT_REEXTRACT, ++}; ++ ++struct skl_probe_point_desc { ++ union skl_probe_point_id id; ++ enum skl_connection_purpose purpose __aligned(4); ++ union skl_connector_node_id node_id; ++} __packed; ++ + enum skl_ipc_pipeline_state { + PPL_INVALID_STATE = 0, + PPL_UNINITIALIZED = 1, +@@ -415,4 +459,17 @@ void skl_ipc_tx_data_copy(struct ipc_message *msg, char *tx_data, + int skl_ipc_fw_cfg_get(struct sst_generic_ipc *ipc, struct skl_fw_cfg *cfg); + int skl_ipc_hw_cfg_get(struct sst_generic_ipc *ipc, struct skl_hw_cfg *cfg); + ++int skl_probe_get_dma(struct skl_dev *skl, ++ struct skl_probe_dma **dma, size_t *num_dma); ++int skl_probe_dma_attach(struct skl_dev *skl, ++ struct skl_probe_dma *dma, size_t num_dma); ++int skl_probe_dma_detach(struct skl_dev *skl, ++ union skl_connector_node_id *node_id, size_t num_node_id); ++int skl_probe_get_points(struct skl_dev *skl, ++ struct skl_probe_point_desc **desc, size_t *num_desc); ++int skl_probe_points_connect(struct skl_dev *skl, ++ struct skl_probe_point_desc *desc, size_t num_desc); ++int skl_probe_points_disconnect(struct skl_dev *skl, ++ union skl_probe_point_id *id, size_t num_id); ++ + #endif /* __SKL_IPC_H */ +-- +2.17.1 + diff --git a/patches/0050-VHM-add-service-to-support-px-data-transition.acrn b/patches/0050-VHM-add-service-to-support-px-data-transition.acrn new file mode 100644 index 0000000000..3ecc4cc27c --- /dev/null +++ b/patches/0050-VHM-add-service-to-support-px-data-transition.acrn @@ -0,0 +1,170 @@ +From fa023db334b51dc9d75ff37ebd587500304d41bf Mon Sep 17 00:00:00 2001 +From: Victor Sun +Date: Fri, 31 Aug 2018 10:59:00 +0800 +Subject: [PATCH 050/150] VHM: add service to support px data transition + +The px data is hard coded within HV, DM will get these data to build +DSDT for UOS. With this DSDT, UOS would have capability on Px control +if acpi-cpufreq driver is enabled in kernel. + +So this patch is to add the service to interact with both HV and DM. + +The detailed working rationale is illustrated in HV patch set. + +Change-Id: Icfd01880dcfe0fd938a05c6f31614dfdcd48631a +Tracked-On: 212378 +Signed-off-by: Victor Sun +Reviewed-on: +--- + drivers/char/vhm/vhm_dev.c | 40 ++++++++++++++++++++++++++++++ + drivers/vhm/vhm_hypercall.c | 5 ++++ + include/linux/vhm/acrn_common.h | 23 +++++++++++++++++ + include/linux/vhm/acrn_hv_defs.h | 4 +++ + include/linux/vhm/vhm_hypercall.h | 1 + + include/linux/vhm/vhm_ioctl_defs.h | 4 +++ + 6 files changed, 77 insertions(+) + +diff --git a/drivers/char/vhm/vhm_dev.c b/drivers/char/vhm/vhm_dev.c +index 6cd1859beb24..4c9cdabc0028 100644 +--- a/drivers/char/vhm/vhm_dev.c ++++ b/drivers/char/vhm/vhm_dev.c +@@ -437,6 +437,46 @@ static long vhm_dev_ioctl(struct file *filep, + break; + } + ++ case IC_PM_GET_CPU_STATE: { ++ uint64_t cmd; ++ ++ if (copy_from_user(&cmd, ++ (void *)ioctl_param, sizeof(cmd))) ++ return -EFAULT; ++ ++ switch (cmd & PMCMD_TYPE_MASK) { ++ case PMCMD_GET_PX_CNT: { ++ uint8_t px_cnt; ++ ++ ret = hcall_get_cpu_state(cmd, virt_to_phys(&px_cnt)); ++ if (ret < 0) ++ return -EFAULT; ++ ++ if (copy_to_user((void *)ioctl_param, ++ &px_cnt, sizeof(px_cnt))) ++ ret = -EFAULT; ++ ++ break; ++ } ++ case PMCMD_GET_PX_DATA: { ++ struct cpu_px_data px_data; ++ ++ ret = hcall_get_cpu_state(cmd, virt_to_phys(&px_data)); ++ if (ret < 0) ++ return -EFAULT; ++ ++ if (copy_to_user((void *)ioctl_param, ++ &px_data, sizeof(px_data))) ++ ret = -EFAULT; ++ break; ++ } ++ default: ++ ret = -EFAULT; ++ break; ++ } ++ break; ++ } ++ + default: + pr_warn("Unknown IOCTL 0x%x\n", ioctl_num); + ret = 0; +diff --git a/drivers/vhm/vhm_hypercall.c b/drivers/vhm/vhm_hypercall.c +index d0da22f2a88b..df87febaf60d 100644 +--- a/drivers/vhm/vhm_hypercall.c ++++ b/drivers/vhm/vhm_hypercall.c +@@ -82,6 +82,11 @@ inline long hcall_setup_sbuf(unsigned long sbuf_head) + return acrn_hypercall1(HC_SETUP_SBUF, sbuf_head); + } + ++inline long hcall_get_cpu_state(unsigned long cmd, unsigned long state_pa) ++{ ++ return acrn_hypercall2(HC_PM_GET_CPU_STATE, cmd, state_pa); ++} ++ + inline long hcall_set_memmap(unsigned long vmid, unsigned long memmap) + { + return acrn_hypercall2(HC_VM_SET_MEMMAP, vmid, memmap); +diff --git a/include/linux/vhm/acrn_common.h b/include/linux/vhm/acrn_common.h +index e34970656b56..7a49d4d59744 100644 +--- a/include/linux/vhm/acrn_common.h ++++ b/include/linux/vhm/acrn_common.h +@@ -212,4 +212,27 @@ struct acrn_vm_pci_msix_remap { + */ + #define GUEST_CFG_OFFSET 0xd0000 + ++struct cpu_px_data { ++ uint64_t core_frequency; /* megahertz */ ++ uint64_t power; /* milliWatts */ ++ uint64_t transition_latency; /* microseconds */ ++ uint64_t bus_master_latency; /* microseconds */ ++ uint64_t control; /* control value */ ++ uint64_t status; /* success indicator */ ++} __attribute__((aligned(8))); ++ ++#define PMCMD_VMID_MASK 0xff000000 ++#define PMCMD_VCPUID_MASK 0x00ff0000 ++#define PMCMD_STATE_NUM_MASK 0x0000ff00 ++#define PMCMD_TYPE_MASK 0x000000ff ++ ++#define PMCMD_VMID_SHIFT 24 ++#define PMCMD_VCPUID_SHIFT 16 ++#define PMCMD_STATE_NUM_SHIFT 8 ++ ++enum pm_cmd_type { ++ PMCMD_GET_PX_CNT, ++ PMCMD_GET_PX_DATA, ++}; ++ + #endif /* ACRN_COMMON_H */ +diff --git a/include/linux/vhm/acrn_hv_defs.h b/include/linux/vhm/acrn_hv_defs.h +index 411f197f7f3a..d2da1a760783 100644 +--- a/include/linux/vhm/acrn_hv_defs.h ++++ b/include/linux/vhm/acrn_hv_defs.h +@@ -106,6 +106,10 @@ + #define HC_ID_DBG_BASE 0x60UL + #define HC_SETUP_SBUF _HC_ID(HC_ID, HC_ID_DBG_BASE + 0x00) + ++/* Power management */ ++#define HC_ID_PM_BASE 0x80UL ++#define HC_PM_GET_CPU_STATE _HC_ID(HC_ID, HC_ID_PM_BASE + 0x00) ++ + #define ACRN_DOM0_VMID (0UL) + #define ACRN_INVALID_VMID (-1) + #define ACRN_INVALID_HPA (-1UL) +diff --git a/include/linux/vhm/vhm_hypercall.h b/include/linux/vhm/vhm_hypercall.h +index e56a16c5518f..2372906946d6 100644 +--- a/include/linux/vhm/vhm_hypercall.h ++++ b/include/linux/vhm/vhm_hypercall.h +@@ -144,6 +144,7 @@ inline long hcall_pause_vm(unsigned long vmid); + inline long hcall_destroy_vm(unsigned long vmid); + inline long hcall_query_vm_state(unsigned long vmid); + inline long hcall_setup_sbuf(unsigned long sbuf_head); ++inline long hcall_get_cpu_state(unsigned long cmd, unsigned long state_pa); + inline long hcall_set_memmap(unsigned long vmid, + unsigned long memmap); + inline long hcall_set_ioreq_buffer(unsigned long vmid, +diff --git a/include/linux/vhm/vhm_ioctl_defs.h b/include/linux/vhm/vhm_ioctl_defs.h +index eb8d0d08a89d..3b05d8228e53 100644 +--- a/include/linux/vhm/vhm_ioctl_defs.h ++++ b/include/linux/vhm/vhm_ioctl_defs.h +@@ -101,6 +101,10 @@ + #define IC_SET_PTDEV_INTR_INFO _IC_ID(IC_ID, IC_ID_PCI_BASE + 0x03) + #define IC_RESET_PTDEV_INTR_INFO _IC_ID(IC_ID, IC_ID_PCI_BASE + 0x04) + ++/* Power management */ ++#define IC_ID_PM_BASE 0x60UL ++#define IC_PM_GET_CPU_STATE _IC_ID(IC_ID, IC_ID_PM_BASE + 0x00) ++ + /** + * struct vm_memseg - memory segment info for guest + * +-- +2.17.1 + diff --git a/patches/0050-drm-i915-tgl-Implement-TGL-DisplayPort-training-sequen.drm b/patches/0050-drm-i915-tgl-Implement-TGL-DisplayPort-training-sequen.drm new file mode 100644 index 0000000000..c490bb91a0 --- /dev/null +++ b/patches/0050-drm-i915-tgl-Implement-TGL-DisplayPort-training-sequen.drm @@ -0,0 +1,235 @@ +From 36aae98b51b0b4cd58ffb3881300f19790af9436 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Jos=C3=A9=20Roberto=20de=20Souza?= +Date: Fri, 23 Aug 2019 01:20:47 -0700 +Subject: [PATCH 050/690] drm/i915/tgl: Implement TGL DisplayPort training + sequence +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +On TGL some registers moved from DDI to transcoder and the +DisplayPort training sequence has a separate BSpec page. + +I started adding 'ifs' to the original intel_ddi_pre_enable_dp() but +it was becoming really hard to follow, so a new and cleaner function +for TGL was added with comments of all steps. It's similar to ICL, +but different enough to deserve a new function. + +The rest of DisplayPort enable and the whole disable sequences +remained the same. + +v2: FEC and DSC should be enabled on sink side before start link +training(Maarten reported and Manasi confirmed the DSC part) + +v3: Add call to enable FEC on step 7.l(Manasi) + +BSpec: 49190 +Cc: Maarten Lankhorst +Cc: Manasi Navare +Cc: Ville Syrjälä +Signed-off-by: José Roberto de Souza +Signed-off-by: Lucas De Marchi +Reviewed-by: Maarten Lankhorst +Link: https://patchwork.freedesktop.org/patch/msgid/20190823082055.5992-16-lucas.demarchi@intel.com +--- + drivers/gpu/drm/i915/display/intel_ddi.c | 140 ++++++++++++++++++++++- + drivers/gpu/drm/i915/display/intel_dp.c | 8 +- + 2 files changed, 140 insertions(+), 8 deletions(-) + +diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c +index 8eb2b3ec01ed..3180dacb5be4 100644 +--- a/drivers/gpu/drm/i915/display/intel_ddi.c ++++ b/drivers/gpu/drm/i915/display/intel_ddi.c +@@ -1761,7 +1761,14 @@ void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state, + I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp); + } + +-void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state) ++/* ++ * Returns the TRANS_DDI_FUNC_CTL value based on CRTC state. ++ * ++ * Only intended to be used by intel_ddi_enable_transcoder_func() and ++ * intel_ddi_config_transcoder_func(). ++ */ ++static u32 ++intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state) + { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc); +@@ -1845,6 +1852,34 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state) + temp |= DDI_PORT_WIDTH(crtc_state->lane_count); + } + ++ return temp; ++} ++ ++void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state) ++{ ++ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); ++ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); ++ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; ++ u32 temp; ++ ++ temp = intel_ddi_transcoder_func_reg_val_get(crtc_state); ++ I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp); ++} ++ ++/* ++ * Same as intel_ddi_enable_transcoder_func(), but it does not set the enable ++ * bit. ++ */ ++static void ++intel_ddi_config_transcoder_func(const struct intel_crtc_state *crtc_state) ++{ ++ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); ++ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); ++ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; ++ u32 temp; ++ ++ temp = intel_ddi_transcoder_func_reg_val_get(crtc_state); ++ temp &= ~TRANS_DDI_FUNC_ENABLE; + I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp); + } + +@@ -3160,9 +3195,94 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder, + POSTING_READ(DP_TP_CTL(port)); + } + +-static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, +- const struct intel_crtc_state *crtc_state, +- const struct drm_connector_state *conn_state) ++static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder, ++ const struct intel_crtc_state *crtc_state, ++ const struct drm_connector_state *conn_state) ++{ ++ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); ++ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); ++ enum phy phy = intel_port_to_phy(dev_priv, encoder->port); ++ struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); ++ bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST); ++ int level = intel_ddi_dp_level(intel_dp); ++ ++ intel_dp_set_link_params(intel_dp, crtc_state->port_clock, ++ crtc_state->lane_count, is_mst); ++ ++ /* 1.a got on intel_atomic_commit_tail() */ ++ ++ /* 2. */ ++ intel_edp_panel_on(intel_dp); ++ ++ /* ++ * 1.b, 3. and 4. is done before tgl_ddi_pre_enable_dp() by: ++ * haswell_crtc_enable()->intel_encoders_pre_pll_enable() and ++ * haswell_crtc_enable()->intel_enable_shared_dpll() ++ */ ++ ++ /* 5. */ ++ if (!intel_phy_is_tc(dev_priv, phy) || ++ dig_port->tc_mode != TC_PORT_TBT_ALT) ++ intel_display_power_get(dev_priv, ++ dig_port->ddi_io_power_domain); ++ ++ /* 6. */ ++ icl_program_mg_dp_mode(dig_port); ++ ++ /* ++ * 7.a - Steps in this function should only be executed over MST ++ * master, what will be taken in care by MST hook ++ * intel_mst_pre_enable_dp() ++ */ ++ intel_ddi_enable_pipe_clock(crtc_state); ++ ++ /* 7.b */ ++ intel_ddi_config_transcoder_func(crtc_state); ++ ++ /* 7.d */ ++ icl_disable_phy_clock_gating(dig_port); ++ ++ /* 7.e */ ++ icl_ddi_vswing_sequence(encoder, crtc_state->port_clock, level, ++ encoder->type); ++ ++ /* 7.f */ ++ if (intel_phy_is_combo(dev_priv, phy)) { ++ bool lane_reversal = ++ dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; ++ ++ intel_combo_phy_power_up_lanes(dev_priv, phy, false, ++ crtc_state->lane_count, ++ lane_reversal); ++ } ++ ++ /* 7.g */ ++ intel_ddi_init_dp_buf_reg(encoder); ++ ++ if (!is_mst) ++ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); ++ ++ intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true); ++ /* ++ * DDI FEC: "anticipates enabling FEC encoding sets the FEC_READY bit ++ * in the FEC_CONFIGURATION register to 1 before initiating link ++ * training ++ */ ++ intel_dp_sink_set_fec_ready(intel_dp, crtc_state); ++ /* 7.c, 7.h, 7.i, 7.j */ ++ intel_dp_start_link_train(intel_dp); ++ ++ /* 7.k */ ++ intel_dp_stop_link_train(intel_dp); ++ ++ /* 7.l */ ++ intel_ddi_enable_fec(encoder, crtc_state); ++ intel_dsc_enable(encoder, crtc_state); ++} ++ ++static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder, ++ const struct intel_crtc_state *crtc_state, ++ const struct drm_connector_state *conn_state) + { + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); +@@ -3228,6 +3348,18 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, + intel_dsc_enable(encoder, crtc_state); + } + ++static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, ++ const struct intel_crtc_state *crtc_state, ++ const struct drm_connector_state *conn_state) ++{ ++ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); ++ ++ if (INTEL_GEN(dev_priv) >= 12) ++ tgl_ddi_pre_enable_dp(encoder, crtc_state, conn_state); ++ else ++ hsw_ddi_pre_enable_dp(encoder, crtc_state, conn_state); ++} ++ + static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c +index 0dee844ca08a..f6aefba822fb 100644 +--- a/drivers/gpu/drm/i915/display/intel_dp.c ++++ b/drivers/gpu/drm/i915/display/intel_dp.c +@@ -4045,13 +4045,13 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) + I915_WRITE(DP_TP_CTL(port), val); + + /* +- * On PORT_A we can have only eDP in SST mode. There the only reason +- * we need to set idle transmission mode is to work around a HW issue +- * where we enable the pipe while not in idle link-training mode. ++ * Until TGL on PORT_A we can have only eDP in SST mode. There the only ++ * reason we need to set idle transmission mode is to work around a HW ++ * issue where we enable the pipe while not in idle link-training mode. + * In this case there is requirement to wait for a minimum number of + * idle patterns to be sent. + */ +- if (port == PORT_A) ++ if (port == PORT_A && INTEL_GEN(dev_priv) < 12) + return; + + if (intel_de_wait_for_set(dev_priv, DP_TP_STATUS(port), +-- +2.17.1 + diff --git a/patches/0050-mei-virtio-virtualization-frontend-driver.security b/patches/0050-mei-virtio-virtualization-frontend-driver.security new file mode 100644 index 0000000000..086f29260b --- /dev/null +++ b/patches/0050-mei-virtio-virtualization-frontend-driver.security @@ -0,0 +1,943 @@ +From 990fcf899983713ff261b8d978c327d3e6169ae5 Mon Sep 17 00:00:00 2001 +From: Tomas Winkler +Date: Tue, 16 Oct 2018 09:02:33 +0300 +Subject: [PATCH 50/65] mei: virtio: virtualization frontend driver + +This frontend driver implements MEI hw interface based on virtio +framework to let MEI driver work without changes under virtualization. +It needs a backend service in the device-model on service OS side +to make it work. The backend service will emulate most of the mei +device behavior to satisfy MEI driver requirements. + +The backend service is available in ACRN device-model at github. +For more information, please refer to https://projectacrn.org + +The ACRN virtio sub device id for MEI is is 0x8602. + +Change-Id: I135e121e043e5181263faf11541399672ae28291 +Signed-off-by: Tomas Winkler +Signed-off-by: Alexander Usyskin +Signed-off-by: Wang Yu +Signed-off-by: Liu Shuo +--- + drivers/misc/mei/Kconfig | 10 + + drivers/misc/mei/Makefile | 3 + + drivers/misc/mei/hw-virtio.c | 872 +++++++++++++++++++++++++++++++++++ + 3 files changed, 885 insertions(+) + create mode 100644 drivers/misc/mei/hw-virtio.c + +diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig +index 241ab7acd742..60d8c0fe8ef5 100644 +--- a/drivers/misc/mei/Kconfig ++++ b/drivers/misc/mei/Kconfig +@@ -46,5 +46,15 @@ config INTEL_MEI_TXE + Supported SoCs: + Intel Bay Trail + ++config INTEL_MEI_VIRTIO ++ tristate "Intel MEI interface emulation with virtio framework" ++ select INTEL_MEI ++ depends on X86 && PCI && VIRTIO_PCI ++ help ++ This module implements mei hw emulation over virtio transport. ++ The module will be called mei_virtio. ++ Enable this if your virtual machine supports virtual mei ++ device over virtio. ++ + source "drivers/misc/mei/hdcp/Kconfig" + source "drivers/misc/mei/spd/Kconfig" +diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile +index f4721e8116e7..60ae4375f579 100644 +--- a/drivers/misc/mei/Makefile ++++ b/drivers/misc/mei/Makefile +@@ -22,6 +22,9 @@ obj-$(CONFIG_INTEL_MEI_TXE) += mei-txe.o + mei-txe-objs := pci-txe.o + mei-txe-objs += hw-txe.o + ++obj-$(CONFIG_INTEL_MEI_VIRTIO) += mei-virtio.o ++mei-virtio-objs := hw-virtio.o ++ + mei-$(CONFIG_EVENT_TRACING) += mei-trace.o + CFLAGS_mei-trace.o = -I$(src) + +diff --git a/drivers/misc/mei/hw-virtio.c b/drivers/misc/mei/hw-virtio.c +new file mode 100644 +index 000000000000..2b72113bff3f +--- /dev/null ++++ b/drivers/misc/mei/hw-virtio.c +@@ -0,0 +1,872 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Intel Management Engine Interface (Intel MEI) Linux driver ++ * Copyright (c) 2018, Intel Corporation. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "mei_dev.h" ++#include "hbm.h" ++#include "client.h" ++ ++#define MEI_VIRTIO_RPM_TIMEOUT 500 ++/* ACRN virtio device types */ ++#ifndef VIRTIO_ID_MEI ++#define VIRTIO_ID_MEI 0xFFFE /* virtio mei */ ++#endif ++ ++/** ++ * struct mei_virtio_cfg - settings passed from the virtio backend ++ * @buf_depth: read buffer depth in slots (4bytes) ++ * @hw_ready: hw is ready for operation ++ * @host_reset: synchronize reset with virtio backend ++ * @reserved: reserved for alignment ++ * @fw_status: FW status ++ */ ++struct mei_virtio_cfg { ++ u32 buf_depth; ++ u8 hw_ready; ++ u8 host_reset; ++ u8 reserved[2]; ++ u32 fw_status[MEI_FW_STATUS_MAX]; ++} __packed; ++ ++struct mei_virtio_hw { ++ struct mei_device mdev; ++ char name[32]; ++ ++ struct virtqueue *in; ++ struct virtqueue *out; ++ ++ bool host_ready; ++ struct work_struct intr_handler; ++ ++ u32 *recv_buf; ++ u8 recv_rdy; ++ size_t recv_sz; ++ u32 recv_idx; ++ u32 recv_len; ++ ++ /* send buffer */ ++ atomic_t hbuf_ready; ++ const void *send_hdr; ++ const void *send_buf; ++ ++ struct mei_virtio_cfg cfg; ++}; ++ ++#define to_virtio_hw(_dev) container_of(_dev, struct mei_virtio_hw, mdev) ++ ++/** ++ * mei_virtio_fw_status() - read status register of mei ++ * @dev: mei device ++ * @fw_status: fw status register values ++ * ++ * Return: always 0 ++ */ ++static int mei_virtio_fw_status(struct mei_device *dev, ++ struct mei_fw_status *fw_status) ++{ ++ struct virtio_device *vdev = dev_to_virtio(dev->dev); ++ ++ fw_status->count = MEI_FW_STATUS_MAX; ++ virtio_cread_bytes(vdev, offsetof(struct mei_virtio_cfg, fw_status), ++ fw_status->status, sizeof(fw_status->status)); ++ return 0; ++} ++ ++/** ++ * mei_virtio_pg_state() - translate internal pg state ++ * to the mei power gating state ++ * @dev: mei device ++ * ++ * Return: ++ * * MEI_PG_OFF - if aliveness is on (always) ++ * * MEI_PG_ON - otherwise ++ */ ++static inline enum mei_pg_state mei_virtio_pg_state(struct mei_device *dev) ++{ ++ /* TODO: not support power management in PV mode */ ++ return MEI_PG_OFF; ++} ++ ++/** ++ * mei_virtio_hw_config() - configure hw dependent settings ++ * ++ * @dev: mei device ++ */ ++static void mei_virtio_hw_config(struct mei_device *dev) ++{ ++ /* nop */ ++} ++ ++/** ++ * mei_virtio_hbuf_empty_slots() - counts write empty slots. ++ * @dev: the device structure ++ * ++ * Return: always return frontend buf size if buffer is ready, 0 otherwise ++ */ ++static int mei_virtio_hbuf_empty_slots(struct mei_device *dev) ++{ ++ struct mei_virtio_hw *hw = to_virtio_hw(dev); ++ ++ return (atomic_read(&hw->hbuf_ready) == 1) ? hw->cfg.buf_depth : 0; ++} ++ ++/** ++ * mei_virtio_hbuf_is_ready() - checks if write buffer is ready ++ * @dev: the device structure ++ * ++ * Return: true if hbuf is ready ++ */ ++static bool mei_virtio_hbuf_is_ready(struct mei_device *dev) ++{ ++ struct mei_virtio_hw *hw = to_virtio_hw(dev); ++ ++ return atomic_read(&hw->hbuf_ready) == 1; ++} ++ ++/** ++ * mei_virtio_hbuf_max_depth() - returns depth of FE write buffer. ++ * @dev: the device structure ++ * ++ * Return: size of frontend write buffer in bytes ++ */ ++static u32 mei_virtio_hbuf_depth(const struct mei_device *dev) ++{ ++ struct mei_virtio_hw *hw = to_virtio_hw(dev); ++ ++ return hw->cfg.buf_depth; ++} ++ ++/** ++ * mei_virtio_intr_clear() - clear and stop interrupts ++ * @dev: the device structure ++ */ ++static void mei_virtio_intr_clear(struct mei_device *dev) ++{ ++ /* ++ * In our virtio solution, there are two types of interrupts, ++ * vq interrupt and config change interrupt. ++ * 1) start/reset rely on virtio config changed interrupt; ++ * 2) send/recv rely on virtio virtqueue interrupts. ++ * They are all virtual interrupts. So, we don't have corresponding ++ * operation to do here. ++ */ ++} ++ ++/** ++ * mei_virtio_intr_enable() - enables mei BE virtqueues callbacks ++ * @dev: the device structure ++ */ ++static void mei_virtio_intr_enable(struct mei_device *dev) ++{ ++ struct mei_virtio_hw *hw = to_virtio_hw(dev); ++ struct virtio_device *vdev = dev_to_virtio(dev->dev); ++ ++ virtio_config_enable(vdev); ++ ++ virtqueue_enable_cb(hw->in); ++ virtqueue_enable_cb(hw->out); ++} ++ ++/** ++ * mei_virtio_intr_disable() - disables mei BE virtqueues callbacks ++ * ++ * @dev: the device structure ++ */ ++static void mei_virtio_intr_disable(struct mei_device *dev) ++{ ++ struct mei_virtio_hw *hw = to_virtio_hw(dev); ++ struct virtio_device *vdev = dev_to_virtio(dev->dev); ++ ++ virtio_config_disable(vdev); ++ ++ virtqueue_disable_cb(hw->in); ++ virtqueue_disable_cb(hw->out); ++} ++ ++/** ++ * mei_virtio_synchronize_irq() - wait for pending IRQ handlers for all ++ * virtqueue ++ * @dev: the device structure ++ */ ++static void mei_virtio_synchronize_irq(struct mei_device *dev) ++{ ++ struct mei_virtio_hw *hw = to_virtio_hw(dev); ++ ++ /* ++ * Now, all IRQ handlers are converted to workqueue. ++ * Change synchronize irq to flush this work. ++ */ ++ flush_work(&hw->intr_handler); ++} ++ ++static void mei_virtio_free_outbufs(struct mei_virtio_hw *hw) ++{ ++ kfree(hw->send_hdr); ++ kfree(hw->send_buf); ++ hw->send_hdr = NULL; ++ hw->send_buf = NULL; ++} ++ ++/** ++ * mei_virtio_write_message() - writes a message to mei virtio back-end service. ++ * @dev: the device structure ++ * @hdr: mei header of message ++ * @hdr_len: header length ++ * @data: message payload will be written ++ * @data_len: messag payload length ++ * ++ * Return: -EIO if write has failed ++ */ ++static int mei_virtio_write_message(struct mei_device *dev, ++ const void *hdr, size_t hdr_len, ++ const void *data, size_t data_len) ++{ ++ struct mei_virtio_hw *hw = to_virtio_hw(dev); ++ struct scatterlist sg[2]; ++ const void *hbuf, *dbuf; ++ int ret; ++ ++ if (WARN_ON(!atomic_add_unless(&hw->hbuf_ready, -1, 0))) ++ return -EIO; ++ ++ hbuf = kmemdup(hdr, hdr_len, GFP_KERNEL); ++ hw->send_hdr = hbuf; ++ ++ dbuf = kmemdup(data, data_len, GFP_KERNEL); ++ hw->send_buf = dbuf; ++ ++ if (!hbuf || !dbuf) { ++ ret = -ENOMEM; ++ goto fail; ++ } ++ ++ sg_init_table(sg, 2); ++ sg_set_buf(&sg[0], hbuf, hdr_len); ++ sg_set_buf(&sg[1], dbuf, data_len); ++ ++ ret = virtqueue_add_outbuf(hw->out, sg, 2, hw, GFP_KERNEL); ++ if (ret) { ++ dev_err(dev->dev, "failed to add outbuf\n"); ++ goto fail; ++ } ++ ++ virtqueue_kick(hw->out); ++ return 0; ++fail: ++ ++ mei_virtio_free_outbufs(hw); ++ ++ return ret; ++} ++ ++/** ++ * mei_virtio_count_full_read_slots() - counts read full slots. ++ * @dev: the device structure ++ * ++ * Return: -EOVERFLOW if overflow, otherwise filled slots count ++ */ ++static int mei_virtio_count_full_read_slots(struct mei_device *dev) ++{ ++ struct mei_virtio_hw *hw = to_virtio_hw(dev); ++ ++ if (hw->recv_idx > hw->recv_len) ++ return -EOVERFLOW; ++ ++ return hw->recv_len - hw->recv_idx; ++} ++ ++/** ++ * mei_virtio_read_hdr() - Reads 32bit dword from mei virtio receive buffer ++ * ++ * @dev: the device structure ++ * ++ * Return: 32bit dword of receive buffer (u32) ++ */ ++static inline u32 mei_virtio_read_hdr(const struct mei_device *dev) ++{ ++ struct mei_virtio_hw *hw = to_virtio_hw(dev); ++ ++ WARN_ON(hw->cfg.buf_depth < hw->recv_idx + 1); ++ ++ return hw->recv_buf[hw->recv_idx++]; ++} ++ ++static int mei_virtio_read(struct mei_device *dev, unsigned char *buffer, ++ unsigned long len) ++{ ++ struct mei_virtio_hw *hw = to_virtio_hw(dev); ++ u32 slots = mei_data2slots(len); ++ ++ if (WARN_ON(hw->cfg.buf_depth < hw->recv_idx + slots)) ++ return -EOVERFLOW; ++ ++ /* ++ * Assumption: There is only one MEI message in recv_buf each time. ++ * Backend service need follow this rule too. ++ * ++ * TODO: use double/triple buffers for recv_buf ++ */ ++ memcpy(buffer, hw->recv_buf + hw->recv_idx, len); ++ hw->recv_idx += slots; ++ ++ return 0; ++} ++ ++static bool mei_virtio_pg_is_enabled(struct mei_device *dev) ++{ ++ return false; ++} ++ ++static bool mei_virtio_pg_in_transition(struct mei_device *dev) ++{ ++ return false; ++} ++ ++static void mei_virtio_add_recv_buf(struct mei_virtio_hw *hw) ++{ ++ struct scatterlist sg; ++ ++ if (hw->recv_rdy) /* not needed */ ++ return; ++ ++ /* refill the recv_buf to IN virtqueue to get next message */ ++ sg_init_one(&sg, hw->recv_buf, mei_slots2data(hw->cfg.buf_depth)); ++ hw->recv_len = 0; ++ hw->recv_idx = 0; ++ hw->recv_rdy = 1; ++ virtqueue_add_inbuf(hw->in, &sg, 1, hw->recv_buf, GFP_KERNEL); ++ virtqueue_kick(hw->in); ++} ++ ++/** ++ * mei_virtio_hw_is_ready() - check whether the BE(hw) has turned ready ++ * @dev: mei device ++ * Return: bool ++ */ ++static bool mei_virtio_hw_is_ready(struct mei_device *dev) ++{ ++ struct mei_virtio_hw *hw = to_virtio_hw(dev); ++ struct virtio_device *vdev = dev_to_virtio(dev->dev); ++ ++ virtio_cread(vdev, struct mei_virtio_cfg, ++ hw_ready, &hw->cfg.hw_ready); ++ ++ dev_dbg(dev->dev, "hw ready %d\n", hw->cfg.hw_ready); ++ ++ return hw->cfg.hw_ready; ++} ++ ++/** ++ * mei_virtio_hw_reset - resets virtio hw. ++ * ++ * @dev: the device structure ++ * @intr_enable: virtio use data/config callbacks ++ * ++ * Return: 0 on success an error code otherwise ++ */ ++static int mei_virtio_hw_reset(struct mei_device *dev, bool intr_enable) ++{ ++ struct mei_virtio_hw *hw = to_virtio_hw(dev); ++ struct virtio_device *vdev = dev_to_virtio(dev->dev); ++ ++ dev_dbg(dev->dev, "hw reset\n"); ++ ++ dev->recvd_hw_ready = false; ++ hw->host_ready = false; ++ atomic_set(&hw->hbuf_ready, 0); ++ hw->recv_len = 0; ++ hw->recv_idx = 0; ++ ++ hw->cfg.host_reset = 1; ++ virtio_cwrite(vdev, struct mei_virtio_cfg, ++ host_reset, &hw->cfg.host_reset); ++ ++ mei_virtio_hw_is_ready(dev); ++ ++ if (intr_enable) ++ mei_virtio_intr_enable(dev); ++ ++ return 0; ++} ++ ++/** ++ * mei_virtio_hw_reset_release() - release device from the reset ++ * @dev: the device structure ++ */ ++static void mei_virtio_hw_reset_release(struct mei_device *dev) ++{ ++ struct mei_virtio_hw *hw = to_virtio_hw(dev); ++ struct virtio_device *vdev = dev_to_virtio(dev->dev); ++ ++ dev_dbg(dev->dev, "hw reset release\n"); ++ hw->cfg.host_reset = 0; ++ virtio_cwrite(vdev, struct mei_virtio_cfg, ++ host_reset, &hw->cfg.host_reset); ++} ++ ++/** ++ * mei_virtio_hw_ready_wait() - wait until the virtio(hw) has turned ready ++ * or timeout is reached ++ * @dev: mei device ++ * ++ * Return: 0 on success, error otherwise ++ */ ++static int mei_virtio_hw_ready_wait(struct mei_device *dev) ++{ ++ mutex_unlock(&dev->device_lock); ++ wait_event_timeout(dev->wait_hw_ready, ++ dev->recvd_hw_ready, ++ mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT)); ++ mutex_lock(&dev->device_lock); ++ if (!dev->recvd_hw_ready) { ++ dev_err(dev->dev, "wait hw ready failed\n"); ++ return -ETIMEDOUT; ++ } ++ ++ dev->recvd_hw_ready = false; ++ return 0; ++} ++ ++/** ++ * mei_virtio_hw_start() - hw start routine ++ * @dev: mei device ++ * ++ * Return: 0 on success, error otherwise ++ */ ++static int mei_virtio_hw_start(struct mei_device *dev) ++{ ++ struct mei_virtio_hw *hw = to_virtio_hw(dev); ++ int ret; ++ ++ dev_dbg(dev->dev, "hw start\n"); ++ mei_virtio_hw_reset_release(dev); ++ ++ ret = mei_virtio_hw_ready_wait(dev); ++ if (ret) ++ return ret; ++ ++ mei_virtio_add_recv_buf(hw); ++ atomic_set(&hw->hbuf_ready, 1); ++ dev_dbg(dev->dev, "hw is ready\n"); ++ hw->host_ready = true; ++ ++ return 0; ++} ++ ++/** ++ * mei_virtio_host_is_ready() - check whether the FE has turned ready ++ * @dev: mei device ++ * ++ * Return: bool ++ */ ++static bool mei_virtio_host_is_ready(struct mei_device *dev) ++{ ++ struct mei_virtio_hw *hw = to_virtio_hw(dev); ++ ++ dev_dbg(dev->dev, "host ready %d\n", hw->host_ready); ++ ++ return hw->host_ready; ++} ++ ++/** ++ * mei_virtio_data_in() - The callback of recv virtqueue of virtio mei ++ * @vq: receiving virtqueue ++ */ ++static void mei_virtio_data_in(struct virtqueue *vq) ++{ ++ struct mei_virtio_hw *hw = vq->vdev->priv; ++ ++ /* disable interrupts (enabled again from in the interrupt worker) */ ++ virtqueue_disable_cb(hw->in); ++ ++ schedule_work(&hw->intr_handler); ++} ++ ++/** ++ * mei_virtio_data_out() - The callback of send virtqueue of virtio mei ++ * @vq: transmiting virtqueue ++ */ ++static void mei_virtio_data_out(struct virtqueue *vq) ++{ ++ struct mei_virtio_hw *hw = vq->vdev->priv; ++ ++ schedule_work(&hw->intr_handler); ++} ++ ++static void mei_virtio_intr_handler(struct work_struct *work) ++{ ++ struct mei_virtio_hw *hw = ++ container_of(work, struct mei_virtio_hw, intr_handler); ++ struct mei_device *dev = &hw->mdev; ++ LIST_HEAD(complete_list); ++ s32 slots; ++ int rets = 0; ++ void *data; ++ unsigned int len; ++ ++ mutex_lock(&dev->device_lock); ++ ++ if (dev->dev_state == MEI_DEV_DISABLED) { ++ dev_warn(dev->dev, "Interrupt in disabled state.\n"); ++ mei_virtio_intr_disable(dev); ++ goto end; ++ } ++ ++ /* check if ME wants a reset */ ++ if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) { ++ dev_warn(dev->dev, "BE service not ready: resetting.\n"); ++ schedule_work(&dev->reset_work); ++ goto end; ++ } ++ ++ /* check if we need to start the dev */ ++ if (!mei_host_is_ready(dev)) { ++ if (mei_hw_is_ready(dev)) { ++ dev_dbg(dev->dev, "we need to start the dev.\n"); ++ dev->recvd_hw_ready = true; ++ wake_up(&dev->wait_hw_ready); ++ } else { ++ dev_warn(dev->dev, "Spurious Interrupt\n"); ++ } ++ goto end; ++ } ++ ++ /* read */ ++ if (hw->recv_rdy) { ++ data = virtqueue_get_buf(hw->in, &len); ++ if (!data || !len) { ++ dev_dbg(dev->dev, "No data %d", len); ++ } else { ++ dev_dbg(dev->dev, "data_in %d\n", len); ++ WARN_ON(data != hw->recv_buf); ++ hw->recv_len = mei_data2slots(len); ++ hw->recv_rdy = 0; ++ } ++ } ++ ++ /* write */ ++ if (!atomic_read(&hw->hbuf_ready)) { ++ if (!virtqueue_get_buf(hw->out, &len)) { ++ dev_warn(dev->dev, "Failed to getbuf\n"); ++ } else { ++ mei_virtio_free_outbufs(hw); ++ atomic_inc(&hw->hbuf_ready); ++ } ++ } ++ ++ /* check slots available for reading */ ++ slots = mei_count_full_read_slots(dev); ++ while (slots > 0) { ++ dev_dbg(dev->dev, "slots to read = %08x\n", slots); ++ rets = mei_irq_read_handler(dev, &complete_list, &slots); ++ ++ if (rets && ++ (dev->dev_state != MEI_DEV_RESETTING && ++ dev->dev_state != MEI_DEV_POWER_DOWN)) { ++ dev_err(dev->dev, "mei_irq_read_handler ret = %d.\n", ++ rets); ++ schedule_work(&dev->reset_work); ++ goto end; ++ } ++ } ++ ++ dev->hbuf_is_ready = mei_hbuf_is_ready(dev); ++ ++ mei_irq_write_handler(dev, &complete_list); ++ ++ dev->hbuf_is_ready = mei_hbuf_is_ready(dev); ++ ++ mei_irq_compl_handler(dev, &complete_list); ++ ++ mei_virtio_add_recv_buf(hw); ++ ++end: ++ if (dev->dev_state != MEI_DEV_DISABLED) { ++ if (!virtqueue_enable_cb(hw->in)) { ++ dev_dbg(dev->dev, "IN queue pending 1\n"); ++ schedule_work(&hw->intr_handler); ++ } ++ } ++ ++ mutex_unlock(&dev->device_lock); ++} ++ ++static void mei_virtio_config_changed(struct virtio_device *vdev) ++{ ++ struct mei_virtio_hw *hw = vdev->priv; ++ struct mei_device *dev = &hw->mdev; ++ ++ virtio_cread(vdev, struct mei_virtio_cfg, ++ hw_ready, &hw->cfg.hw_ready); ++ ++ if (dev->dev_state == MEI_DEV_DISABLED) { ++ dev_dbg(dev->dev, "disabled state don't start\n"); ++ return; ++ } ++ ++ /* Run intr handler once to handle reset notify */ ++ schedule_work(&hw->intr_handler); ++} ++ ++static void mei_virtio_remove_vqs(struct virtio_device *vdev) ++{ ++ struct mei_virtio_hw *hw = vdev->priv; ++ ++ virtqueue_detach_unused_buf(hw->in); ++ hw->recv_len = 0; ++ hw->recv_idx = 0; ++ hw->recv_rdy = 0; ++ ++ virtqueue_detach_unused_buf(hw->out); ++ ++ mei_virtio_free_outbufs(hw); ++ ++ vdev->config->del_vqs(vdev); ++} ++ ++/* ++ * There are two virtqueues, one is for send and another is for recv. ++ */ ++static int mei_virtio_init_vqs(struct mei_virtio_hw *hw, ++ struct virtio_device *vdev) ++{ ++ struct virtqueue *vqs[2]; ++ ++ vq_callback_t *cbs[] = { ++ mei_virtio_data_in, ++ mei_virtio_data_out, ++ }; ++ static const char * const names[] = { ++ "in", ++ "out", ++ }; ++ int ret; ++ ++ ret = virtio_find_vqs(vdev, 2, vqs, cbs, names, NULL); ++ if (ret) ++ return ret; ++ ++ hw->in = vqs[0]; ++ hw->out = vqs[1]; ++ ++ return 0; ++} ++ ++static const struct mei_hw_ops mei_virtio_ops = { ++ .fw_status = mei_virtio_fw_status, ++ .pg_state = mei_virtio_pg_state, ++ ++ .host_is_ready = mei_virtio_host_is_ready, ++ ++ .hw_is_ready = mei_virtio_hw_is_ready, ++ .hw_reset = mei_virtio_hw_reset, ++ .hw_config = mei_virtio_hw_config, ++ .hw_start = mei_virtio_hw_start, ++ ++ .pg_in_transition = mei_virtio_pg_in_transition, ++ .pg_is_enabled = mei_virtio_pg_is_enabled, ++ ++ .intr_clear = mei_virtio_intr_clear, ++ .intr_enable = mei_virtio_intr_enable, ++ .intr_disable = mei_virtio_intr_disable, ++ .synchronize_irq = mei_virtio_synchronize_irq, ++ ++ .hbuf_free_slots = mei_virtio_hbuf_empty_slots, ++ .hbuf_is_ready = mei_virtio_hbuf_is_ready, ++ .hbuf_depth = mei_virtio_hbuf_depth, ++ ++ .write = mei_virtio_write_message, ++ ++ .rdbuf_full_slots = mei_virtio_count_full_read_slots, ++ .read_hdr = mei_virtio_read_hdr, ++ .read = mei_virtio_read, ++}; ++ ++static int mei_virtio_probe(struct virtio_device *vdev) ++{ ++ struct mei_virtio_hw *hw; ++ int ret; ++ ++ hw = devm_kzalloc(&vdev->dev, sizeof(*hw), GFP_KERNEL); ++ if (!hw) ++ return -ENOMEM; ++ ++ vdev->priv = hw; ++ ++ INIT_WORK(&hw->intr_handler, mei_virtio_intr_handler); ++ ++ ret = mei_virtio_init_vqs(hw, vdev); ++ if (ret) ++ goto vqs_failed; ++ ++ virtio_cread(vdev, struct mei_virtio_cfg, ++ buf_depth, &hw->cfg.buf_depth); ++ ++ hw->recv_buf = kzalloc(mei_slots2data(hw->cfg.buf_depth), GFP_KERNEL); ++ if (!hw->recv_buf) { ++ ret = -ENOMEM; ++ goto hbuf_failed; ++ } ++ atomic_set(&hw->hbuf_ready, 0); ++ ++ virtio_device_ready(vdev); ++ ++ mei_device_init(&hw->mdev, &vdev->dev, &mei_virtio_ops); ++ ++ pm_runtime_get_noresume(&vdev->dev); ++ pm_runtime_set_active(&vdev->dev); ++ pm_runtime_enable(&vdev->dev); ++ ++ ret = mei_start(&hw->mdev); ++ if (ret) ++ goto mei_start_failed; ++ ++ pm_runtime_set_autosuspend_delay(&vdev->dev, MEI_VIRTIO_RPM_TIMEOUT); ++ pm_runtime_use_autosuspend(&vdev->dev); ++ ++ ret = mei_register(&hw->mdev, &vdev->dev); ++ if (ret) ++ goto mei_failed; ++ ++ pm_runtime_put(&vdev->dev); ++ ++ return 0; ++ ++mei_failed: ++ mei_stop(&hw->mdev); ++mei_start_failed: ++ mei_cancel_work(&hw->mdev); ++ mei_disable_interrupts(&hw->mdev); ++ kfree(hw->recv_buf); ++hbuf_failed: ++ vdev->config->del_vqs(vdev); ++vqs_failed: ++ return ret; ++} ++ ++static int __maybe_unused mei_virtio_pm_runtime_idle(struct device *device) ++{ ++ struct virtio_device *vdev = dev_to_virtio(device); ++ struct mei_virtio_hw *hw = vdev->priv; ++ ++ dev_dbg(&vdev->dev, "rpm: mei_virtio : runtime_idle\n"); ++ ++ if (!hw) ++ return -ENODEV; ++ ++ if (mei_write_is_idle(&hw->mdev)) ++ pm_runtime_autosuspend(device); ++ ++ return -EBUSY; ++} ++ ++static int __maybe_unused mei_virtio_pm_runtime_suspend(struct device *device) ++{ ++ return 0; ++} ++ ++static int __maybe_unused mei_virtio_pm_runtime_resume(struct device *device) ++{ ++ return 0; ++} ++ ++static int __maybe_unused mei_virtio_freeze(struct virtio_device *vdev) ++{ ++ struct mei_virtio_hw *hw = vdev->priv; ++ ++ dev_dbg(&vdev->dev, "freeze\n"); ++ ++ if (!hw) ++ return -ENODEV; ++ ++ mei_stop(&hw->mdev); ++ mei_disable_interrupts(&hw->mdev); ++ cancel_work_sync(&hw->intr_handler); ++ vdev->config->reset(vdev); ++ mei_virtio_remove_vqs(vdev); ++ ++ return 0; ++} ++ ++static int __maybe_unused mei_virtio_restore(struct virtio_device *vdev) ++{ ++ struct mei_virtio_hw *hw = vdev->priv; ++ int ret; ++ ++ dev_dbg(&vdev->dev, "restore\n"); ++ ++ if (!hw) ++ return -ENODEV; ++ ++ ret = mei_virtio_init_vqs(hw, vdev); ++ if (ret) ++ return ret; ++ ++ virtio_device_ready(vdev); ++ ++ ret = mei_restart(&hw->mdev); ++ if (ret) ++ return ret; ++ ++ /* Start timer if stopped in suspend */ ++ schedule_delayed_work(&hw->mdev.timer_work, HZ); ++ ++ return 0; ++} ++ ++static const struct dev_pm_ops mei_virtio_pm_ops = { ++ SET_RUNTIME_PM_OPS(mei_virtio_pm_runtime_suspend, ++ mei_virtio_pm_runtime_resume, ++ mei_virtio_pm_runtime_idle) ++}; ++ ++static void mei_virtio_remove(struct virtio_device *vdev) ++{ ++ struct mei_virtio_hw *hw = vdev->priv; ++ ++ mei_stop(&hw->mdev); ++ mei_disable_interrupts(&hw->mdev); ++ cancel_work_sync(&hw->intr_handler); ++ mei_deregister(&hw->mdev); ++ vdev->config->reset(vdev); ++ mei_virtio_remove_vqs(vdev); ++ kfree(hw->recv_buf); ++ pm_runtime_disable(&vdev->dev); ++} ++ ++static struct virtio_device_id id_table[] = { ++ { VIRTIO_ID_MEI, VIRTIO_DEV_ANY_ID }, ++ { } ++}; ++ ++static struct virtio_driver mei_virtio_driver = { ++ .id_table = id_table, ++ .probe = mei_virtio_probe, ++ .remove = mei_virtio_remove, ++ .config_changed = mei_virtio_config_changed, ++ .driver = { ++ .name = KBUILD_MODNAME, ++ .owner = THIS_MODULE, ++ .pm = &mei_virtio_pm_ops, ++ }, ++#ifdef CONFIG_PM_SLEEP ++ .freeze = mei_virtio_freeze, ++ .restore = mei_virtio_restore, ++#endif ++}; ++ ++module_virtio_driver(mei_virtio_driver); ++MODULE_DEVICE_TABLE(virtio, id_table); ++MODULE_DESCRIPTION("Virtio MEI frontend driver"); ++MODULE_LICENSE("GPL v2"); +-- +2.17.1 + diff --git a/patches/0050-net-stmmac-introduce-platform-data-for-config.connectivity b/patches/0050-net-stmmac-introduce-platform-data-for-config.connectivity new file mode 100644 index 0000000000..b03f4a0c8d --- /dev/null +++ b/patches/0050-net-stmmac-introduce-platform-data-for-config.connectivity @@ -0,0 +1,87 @@ +From d78d78fb481f38bca8ec50d8ed543d9f47128fd5 Mon Sep 17 00:00:00 2001 +From: Ong Boon Leong +Date: Fri, 9 Aug 2019 16:27:04 +0800 +Subject: [PATCH 050/108] net: stmmac: introduce platform data for configuring + TBS tunables + +Time-Based Scheduling has several tunables as listed below. Since this +is IP specific and not a standardized parameters in TSN, we set these +tunables through platform data. + +Signed-off-by: Ong Boon Leong +--- + .../net/ethernet/stmicro/stmmac/stmmac_main.c | 23 +++++++++++++++++++ + .../net/ethernet/stmicro/stmmac/stmmac_pci.c | 6 +++++ + include/linux/stmmac.h | 6 +++++ + 3 files changed, 35 insertions(+) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index 6dcd8392cad6..b1b33314ab79 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -2743,6 +2743,29 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) + TSN_HWTUNA_TX_EST_TILS, + priv->plat->tils); + ++ if (priv->plat->estm) ++ stmmac_set_tsn_hwtunable(priv, priv->hw, priv->dev, ++ TSN_HWTUNA_TX_TBS_ESTM, ++ priv->plat->estm); ++ ++ if (priv->plat->leos) ++ stmmac_set_tsn_hwtunable(priv, priv->hw, priv->dev, ++ TSN_HWTUNA_TX_TBS_LEOS, ++ priv->plat->leos); ++ if (priv->plat->legos) ++ stmmac_set_tsn_hwtunable(priv, priv->hw, priv->dev, ++ TSN_HWTUNA_TX_TBS_LEGOS, ++ priv->plat->legos); ++ ++ if (priv->plat->ftos) ++ stmmac_set_tsn_hwtunable(priv, priv->hw, priv->dev, ++ TSN_HWTUNA_TX_TBS_FTOS, ++ priv->plat->ftos); ++ if (priv->plat->fgos) ++ stmmac_set_tsn_hwtunable(priv, priv->hw, priv->dev, ++ TSN_HWTUNA_TX_TBS_FGOS, ++ priv->plat->fgos); ++ + return 0; + } + +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +index d176404fdb02..c0258bf17fec 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +@@ -163,6 +163,12 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, + plat->tso_en = 1; + plat->tsn_est_en = 1; + plat->tsn_tbs_en = 1; ++ /* TBS HW Tunable */ ++ plat->estm = 0; /* Absolute Mode */ ++ plat->leos = 0; /* Launch Expiry Offset */ ++ plat->legos = 0; /* Launch Expiry GSN Offset */ ++ plat->ftos = 0; /* Fetch Time Offset */ ++ plat->fgos = 0; /* Fetch GSN Offset */ + + plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP; + +diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h +index 1f667368edfa..e47ec4ebc002 100644 +--- a/include/linux/stmmac.h ++++ b/include/linux/stmmac.h +@@ -206,5 +206,11 @@ struct plat_stmmacenet_data { + u32 ptov; + u32 ctov; + u32 tils; ++ /* TBS */ ++ u32 estm; ++ u32 leos; ++ u32 legos; ++ u32 ftos; ++ u32 fgos; + }; + #endif +-- +2.17.1 + diff --git a/patches/0050-trusty-Update-dependency-of-trusty-module.trusty b/patches/0050-trusty-Update-dependency-of-trusty-module.trusty new file mode 100644 index 0000000000..a5f55c4bb2 --- /dev/null +++ b/patches/0050-trusty-Update-dependency-of-trusty-module.trusty @@ -0,0 +1,30 @@ +From 547e9255b9fab9cf0c140d3371efdf111ceee1d3 Mon Sep 17 00:00:00 2001 +From: "Qi, Yadong" +Date: Fri, 23 Feb 2018 14:12:07 +0800 +Subject: [PATCH 50/63] trusty: Update dependency of trusty module + +Trusty is supported only for x86_64 arch. +Modify Kconfig to make it depends on x86_64. + +Change-Id: Ia52a8ba05f2de3d423e070a53e7368901b20ada7 +Signed-off-by: Qi, Yadong +--- + drivers/trusty/Kconfig | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/trusty/Kconfig b/drivers/trusty/Kconfig +index 7d26922ed84c..a230dad0434d 100644 +--- a/drivers/trusty/Kconfig ++++ b/drivers/trusty/Kconfig +@@ -6,7 +6,7 @@ menu "Trusty" + + config TRUSTY + tristate "Trusty" +- depends on X86 ++ depends on X86_64 + default n + + config TRUSTY_FIQ +-- +2.17.1 + diff --git a/patches/0051-ASoC-Intel-Skylake-Probe-module-init-and-delete-inte.audio b/patches/0051-ASoC-Intel-Skylake-Probe-module-init-and-delete-inte.audio new file mode 100644 index 0000000000..d83d08bf55 --- /dev/null +++ b/patches/0051-ASoC-Intel-Skylake-Probe-module-init-and-delete-inte.audio @@ -0,0 +1,152 @@ +From 5f34c13e9e5f7c959ebe9596b44c49712b406daa Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Wed, 6 Mar 2019 18:38:49 +0100 +Subject: [PATCH 051/193] ASoC: Intel: Skylake: Probe module init and delete + interface +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Probe is a loadable, standalone module. Being designed as standalone, it +has no parent pipeline assigned, thus its instance must be explicity +deleted by sending Delete Instance IPC request. + +Change-Id: I15c45e282d051b2320cdce32c843b7e1e4383b52 +Signed-off-by: Cezary Rojewski +Signed-off-by: Amadeusz Sławiński +Reviewed-on: +Tested-by: gkblditp +Reviewed-by: Rojewski, Cezary +--- + sound/soc/intel/skylake/skl-messages.c | 44 ++++++++++++++++++++++++++ + sound/soc/intel/skylake/skl-sst-ipc.h | 15 +++++++++ + sound/soc/intel/skylake/skl.h | 4 +++ + 3 files changed, 63 insertions(+) + +diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c +index d18d95eee130..f14a8aa3d316 100644 +--- a/sound/soc/intel/skylake/skl-messages.c ++++ b/sound/soc/intel/skylake/skl-messages.c +@@ -13,6 +13,7 @@ + #include + #include + #include ++#include + #include + #include "skl-sst-dsp.h" + #include "cnl-sst-dsp.h" +@@ -22,6 +23,8 @@ + #include "../common/sst-dsp-priv.h" + #include "skl-topology.h" + ++#define INVALID_PIPELINE_ID 0xFF ++ + int skl_alloc_dma_buf(struct device *dev, + struct snd_dma_buffer *dmab, size_t size) + { +@@ -1233,6 +1236,47 @@ int skl_get_module_params(struct skl_dev *skl, u32 *params, int size, + return skl_ipc_get_large_config(&skl->ipc, &msg, ¶ms, &bytes); + } + ++int skl_probe_init_module(struct skl_dev *skl, size_t buffer_size) ++{ ++ struct skl_ipc_init_instance_msg msg = {0}; ++ struct skl_probe_mod_cfg cfg = {{0}}; ++ union skl_connector_node_id node_id = {0}; ++ struct skl_module_entry *m = skl_find_module(skl, &skl_probe_mod_uuid); ++ ++ if (skl->extractor) { ++ node_id.node.vindex = skl->extractor->hstream.stream_tag - 1; ++ node_id.node.dma_type = SKL_DMA_HDA_HOST_INPUT_CLASS; ++ } else { ++ node_id = INVALID_NODE_ID; ++ } ++ ++ /* ++ * Probe module uses no cycles, audio data format ++ * and input and output frame sizes are unused. ++ */ ++ cfg.base_cfg.ibs = 1; ++ cfg.base_cfg.is_pages = ++ DIV_ROUND_UP(m->segments[2].flags.length, PAGE_SIZE); ++ cfg.gtw_cfg.node_id = node_id; ++ cfg.gtw_cfg.dma_buffer_size = buffer_size; ++ ++ msg.module_id = m->module_id; ++ msg.instance_id = 0; ++ msg.param_data_size = sizeof(cfg); ++ msg.ppl_instance_id = INVALID_PIPELINE_ID; ++ msg.core_id = 0; ++ msg.domain = 0; ++ ++ return skl_ipc_init_instance(&skl->ipc, &msg, &cfg); ++} ++ ++int skl_probe_delete_module(struct skl_dev *skl) ++{ ++ unsigned int module_id = skl_get_module_id(skl, &skl_probe_mod_uuid); ++ ++ return skl_ipc_delete_instance(&skl->ipc, module_id, 0); ++} ++ + int skl_probe_get_dma(struct skl_dev *skl, + struct skl_probe_dma **dma, size_t *num_dma) + { +diff --git a/sound/soc/intel/skylake/skl-sst-ipc.h b/sound/soc/intel/skylake/skl-sst-ipc.h +index 205f93e6f207..76170fea9da7 100644 +--- a/sound/soc/intel/skylake/skl-sst-ipc.h ++++ b/sound/soc/intel/skylake/skl-sst-ipc.h +@@ -25,6 +25,9 @@ union skl_connector_node_id { + } node; + }; + ++#define INVALID_NODE_ID \ ++ ((union skl_connector_node_id) { UINT_MAX }) ++ + enum skl_channel_index { + SKL_CHANNEL_LEFT = 0, + SKL_CHANNEL_RIGHT = 1, +@@ -146,6 +149,16 @@ static const guid_t skl_probe_mod_uuid = + GUID_INIT(0x7CAD0808, 0xAB10, 0xCD23, 0xEF, 0x45, + 0x12, 0xAB, 0x34, 0xCD, 0x56, 0xEF); + ++struct skl_probe_gtw_cfg { ++ union skl_connector_node_id node_id; ++ u32 dma_buffer_size; ++} __packed; ++ ++struct skl_probe_mod_cfg { ++ struct skl_base_cfg base_cfg; ++ struct skl_probe_gtw_cfg gtw_cfg; ++} __packed; ++ + enum skl_probe_runtime_param { + SKL_PROBE_INJECTION_DMA = 1, + SKL_PROBE_INJECTION_DMA_DETACH, +@@ -459,6 +472,8 @@ void skl_ipc_tx_data_copy(struct ipc_message *msg, char *tx_data, + int skl_ipc_fw_cfg_get(struct sst_generic_ipc *ipc, struct skl_fw_cfg *cfg); + int skl_ipc_hw_cfg_get(struct sst_generic_ipc *ipc, struct skl_hw_cfg *cfg); + ++int skl_probe_init_module(struct skl_dev *skl, size_t buffer_size); ++int skl_probe_delete_module(struct skl_dev *skl); + int skl_probe_get_dma(struct skl_dev *skl, + struct skl_probe_dma **dma, size_t *num_dma); + int skl_probe_dma_attach(struct skl_dev *skl, +diff --git a/sound/soc/intel/skylake/skl.h b/sound/soc/intel/skylake/skl.h +index a7401d178183..e0fc736ad619 100644 +--- a/sound/soc/intel/skylake/skl.h ++++ b/sound/soc/intel/skylake/skl.h +@@ -135,6 +135,10 @@ struct skl_dev { + + /* Callback to update dynamic clock and power gating registers */ + void (*clock_power_gating)(struct device *dev, bool enable); ++ ++ /* probe stream management */ ++ struct hdac_ext_stream *extractor; ++ unsigned int num_probe_streams; + }; + + #define skl_to_bus(s) (&(s)->hbus.core) +-- +2.17.1 + diff --git a/patches/0051-drm-i915-tgl-Do-not-apply-WaIncreaseDefaultTLBEntries-.drm b/patches/0051-drm-i915-tgl-Do-not-apply-WaIncreaseDefaultTLBEntries-.drm new file mode 100644 index 0000000000..c8a1a2e33b --- /dev/null +++ b/patches/0051-drm-i915-tgl-Do-not-apply-WaIncreaseDefaultTLBEntries-.drm @@ -0,0 +1,34 @@ +From 8909137fc1c866e9032edfd86e9bfb2e38f01c82 Mon Sep 17 00:00:00 2001 +From: Michel Thierry +Date: Fri, 23 Aug 2019 01:20:48 -0700 +Subject: [PATCH 051/690] drm/i915/tgl: Do not apply + WaIncreaseDefaultTLBEntries from GEN12 onwards + +Workaround no longer needed (plus L3_LRA_1_GPGPU doesn't exist). + +Cc: Daniele Ceraolo Spurio +Cc: Michal Wajdeczko +Signed-off-by: Michel Thierry +Signed-off-by: Lucas De Marchi +Reviewed-by: Stuart Summers +Link: https://patchwork.freedesktop.org/patch/msgid/20190823082055.5992-17-lucas.demarchi@intel.com +--- + drivers/gpu/drm/i915/i915_gem_gtt.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c +index 0db82921fb38..c94dfa562247 100644 +--- a/drivers/gpu/drm/i915/i915_gem_gtt.c ++++ b/drivers/gpu/drm/i915/i915_gem_gtt.c +@@ -2029,7 +2029,7 @@ static void gtt_write_workarounds(struct intel_gt *gt) + intel_uncore_write(uncore, + GEN8_L3_LRA_1_GPGPU, + GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT); +- else if (INTEL_GEN(i915) >= 9) ++ else if (INTEL_GEN(i915) >= 9 && INTEL_GEN(i915) <= 11) + intel_uncore_write(uncore, + GEN8_L3_LRA_1_GPGPU, + GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL); +-- +2.17.1 + diff --git a/patches/0051-mei-fix-queue-flush-with-non-empty-vtag-list.security b/patches/0051-mei-fix-queue-flush-with-non-empty-vtag-list.security new file mode 100644 index 0000000000..fd7f86a3d2 --- /dev/null +++ b/patches/0051-mei-fix-queue-flush-with-non-empty-vtag-list.security @@ -0,0 +1,32 @@ +From d1690c75c678533ecfe08dae7e8300e637bf3e31 Mon Sep 17 00:00:00 2001 +From: Aviad Nissel +Date: Sun, 16 Dec 2018 14:16:11 +0200 +Subject: [PATCH 51/65] mei: fix queue flush with non empty vtag list + +Change-Id: Ie2e930d4033ee17f1920dfc4619be0fe3cde4119 +Signed-off-by: Aviad Nissel +Signed-off-by: Tomas Winkler +--- + drivers/misc/mei/main.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c +index 888d804a90de..d6ed85b04b9b 100644 +--- a/drivers/misc/mei/main.c ++++ b/drivers/misc/mei/main.c +@@ -125,6 +125,12 @@ static int mei_release(struct inode *inode, struct file *file) + } + + rets = mei_cl_disconnect(cl); ++ /* Check again: This is necessary since disconnect releases the lock. */ ++ if (!list_empty(&cl->vtag_map)) { ++ cl_dbg(dev, cl, "not the last vtag after disconnect\n"); ++ mei_cl_flush_queues(cl, file); ++ goto out; ++ } + + mei_cl_flush_queues(cl, NULL); + cl_dbg(dev, cl, "removing\n"); +-- +2.17.1 + diff --git a/patches/0051-net-stmmac-introduce-IEEE-802.1Qbu-implementa.connectivity b/patches/0051-net-stmmac-introduce-IEEE-802.1Qbu-implementa.connectivity new file mode 100644 index 0000000000..3806ff9547 --- /dev/null +++ b/patches/0051-net-stmmac-introduce-IEEE-802.1Qbu-implementa.connectivity @@ -0,0 +1,646 @@ +From 6f1c597fc8b3ffd365b2ce1da75d8be0cdddcbb9 Mon Sep 17 00:00:00 2001 +From: Ong Boon Leong +Date: Sat, 10 Aug 2019 06:54:21 +0800 +Subject: [PATCH 051/108] net: stmmac: introduce IEEE 802.1Qbu implementation + +The following functionalities are added:- + +PEC is meant to specify which TxQ is preemptible queue. By default, +TxQ0 must NOT be express queue. + +pMAC STS is to indicate whether preemptible MAC is in either Hold +or Release state. + +The newly added IEEE 802.1Qbu functionalities are also hooked into +the driver main logics: +a) stmmac_dvr_probe: to enable/disable FPE feature according to platform + data setting. +b) stmmac_hw_setup: to include FPRQ (frame preemption residue queue) + setting, i.e. default RxQ (none RxQ0 - Express Frame default RxQ) + to receive preemptible frame that is not steered to any other RxQs. + +Signed-off-by: Ong Boon Leong +--- + .../net/ethernet/stmicro/stmmac/dwmac4_core.c | 4 + + drivers/net/ethernet/stmicro/stmmac/dwmac5.h | 19 +++ + .../net/ethernet/stmicro/stmmac/dwmac5_tsn.c | 73 +++++++- + drivers/net/ethernet/stmicro/stmmac/hwif.h | 41 ++++- + .../net/ethernet/stmicro/stmmac/stmmac_main.c | 7 +- + .../net/ethernet/stmicro/stmmac/stmmac_pci.c | 3 + + .../net/ethernet/stmicro/stmmac/stmmac_tsn.c | 158 +++++++++++++++--- + .../net/ethernet/stmicro/stmmac/stmmac_tsn.h | 22 ++- + include/linux/stmmac.h | 3 + + 9 files changed, 306 insertions(+), 24 deletions(-) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +index 454ed0d1e31b..286c66883551 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +@@ -1193,6 +1193,10 @@ const struct stmmac_ops dwmac510_ops = { + .est_irq_status = tsn_est_irq_status, + .dump_tsn_mmc = tsn_mmc_dump, + .cbs_recal_idleslope = tsn_cbs_recal_idleslope, ++ .fpe_set_txqpec = tsn_fpe_set_txqpec, ++ .fpe_set_enable = tsn_fpe_set_enable, ++ .fpe_get_config = tsn_fpe_get_config, ++ .fpe_show_pmac_sts = tsn_fpe_show_pmac_sts, + }; + + static u32 dwmac4_get_num_vlan(void __iomem *ioaddr) +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h +index d222599b1896..f3dbb98027c9 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h +@@ -88,6 +88,7 @@ + + /* MAC HW features3 bitmap */ + #define GMAC_HW_FEAT_TBSSEL BIT(27) ++#define GMAC_HW_FEAT_FPESEL BIT(26) + #define GMAC_HW_FEAT_ESTWID GENMASK(21, 20) + #define GMAC_HW_FEAT_ESTWID_SHIFT 20 + #define GMAC_HW_FEAT_ESTDEP GENMASK(19, 17) +@@ -165,6 +166,24 @@ + /* CBS Global defines */ + #define CBS_IDLESLOPE_MAX 0x1fffff + ++/* FPRQ only available in EQoS ver5.00 MAC_RxQ_Ctrl1 */ ++#define GMAC_RXQCTRL_FPRQ_MASK GENMASK(26, 24) /* FPE Residue Queue */ ++#define GMAC_RXQCTRL_FPRQ_SHIFT 24 ++ ++/* MAC FPE control status */ ++#define MAC_FPE_CTRL_STS 0x00000234 ++#define MAC_FPE_CTRL_STS_EFPE BIT(0) ++ ++/* MTL FPE control status */ ++#define MTL_FPE_CTRL_STS 0x00000c90 ++#define MTL_FPE_CTRL_STS_HRS BIT(28) /* Hold/Release Status */ ++#define MTL_FPE_CTRL_STS_HRS_SHIFT 28 ++#define MTL_FPE_CTRL_STS_PEC GENMASK(15, 8) /* FPE Classification */ ++#define MTL_FPE_CTRL_STS_PEC_SHIFT 8 ++ ++/* FPE Global defines */ ++#define FPE_PMAC_BIT BIT(0) /* TxQ0 is always preemptible */ ++ + /* DMA Tx Channel X Control register TBS bits defines */ + #define DMA_CONTROL_EDSE BIT(28) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c +index 8bd5a3ee695f..826e623350bf 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c +@@ -78,6 +78,8 @@ static bool dwmac5_has_tsn_cap(void __iomem *ioaddr, enum tsn_feat_id featid) + switch (featid) { + case TSN_FEAT_ID_EST: + return (hw_cap3 & GMAC_HW_FEAT_ESTSEL); ++ case TSN_FEAT_ID_FPE: ++ return (hw_cap3 & GMAC_HW_FEAT_FPESEL); + case TSN_FEAT_ID_TBS: + return (hw_cap3 & GMAC_HW_FEAT_TBSSEL); + default: +@@ -85,7 +87,8 @@ static bool dwmac5_has_tsn_cap(void __iomem *ioaddr, enum tsn_feat_id featid) + }; + } + +-static void dwmac5_hw_setup(void __iomem *ioaddr, enum tsn_feat_id featid) ++static void dwmac5_hw_setup(void __iomem *ioaddr, enum tsn_feat_id featid, ++ u32 fprq) + { + u32 value; + +@@ -97,6 +100,12 @@ static void dwmac5_hw_setup(void __iomem *ioaddr, enum tsn_feat_id featid) + MTL_EST_INT_EN_IECC); + writel(value, ioaddr + MTL_EST_INT_EN); + break; ++ case TSN_FEAT_ID_FPE: ++ /* Update FPRQ */ ++ value = readl(ioaddr + GMAC_RXQ_CTRL1); ++ value &= ~GMAC_RXQCTRL_FPRQ_MASK; ++ value |= fprq << GMAC_RXQCTRL_FPRQ_SHIFT; ++ writel(value, ioaddr + GMAC_RXQ_CTRL1); + default: + return; + }; +@@ -167,6 +176,13 @@ static u32 dwmac5_est_get_txqcnt(void __iomem *ioaddr) + return ((hw_cap2 & GMAC_HW_FEAT_TXQCNT) >> 6) + 1; + } + ++static u32 dwmac5_est_get_rxqcnt(void __iomem *ioaddr) ++{ ++ u32 hw_cap2 = readl(ioaddr + GMAC_HW_FEATURE2); ++ ++ return (hw_cap2 & GMAC_HW_FEAT_RXQCNT) + 1; ++} ++ + static void dwmac5_est_get_max(u32 *ptov_max, + u32 *ctov_max, + u32 *cycle_max, +@@ -403,6 +419,55 @@ int dwmac5_est_irq_status(void __iomem *ioaddr, struct net_device *dev, + return status; + } + ++static void dwmac5_fpe_get_info(u32 *pmac_bit) ++{ ++ *pmac_bit = FPE_PMAC_BIT; ++} ++ ++static void dwmac5_fpe_set_txqpec(void *ioaddr, u32 txqpec, u32 txqmask) ++{ ++ u32 value; ++ ++ value = readl(ioaddr + MTL_FPE_CTRL_STS); ++ value &= ~(txqmask << MTL_FPE_CTRL_STS_PEC_SHIFT); ++ value |= (txqpec << MTL_FPE_CTRL_STS_PEC_SHIFT); ++ ++ writel(value, ioaddr + MTL_FPE_CTRL_STS); ++} ++ ++static void dwmac5_fpe_set_enable(void *ioaddr, bool enable) ++{ ++ u32 value; ++ ++ value = readl(ioaddr + MAC_FPE_CTRL_STS); ++ if (enable) ++ value |= MAC_FPE_CTRL_STS_EFPE; ++ else ++ value &= ~MAC_FPE_CTRL_STS_EFPE; ++ ++ writel(value, ioaddr + MAC_FPE_CTRL_STS); ++} ++ ++void dwmac5_fpe_get_config(void *ioaddr, u32 *txqpec, bool *enable) ++{ ++ u32 value; ++ ++ value = readl(ioaddr + MTL_FPE_CTRL_STS); ++ *txqpec = (value & MTL_FPE_CTRL_STS_PEC) >> ++ MTL_FPE_CTRL_STS_PEC_SHIFT; ++ ++ value = readl(ioaddr + MAC_FPE_CTRL_STS); ++ *enable = (bool)(value & MAC_FPE_CTRL_STS_EFPE); ++} ++ ++void dwmac5_fpe_get_pmac_sts(void *ioaddr, u32 *hrs) ++{ ++ u32 value; ++ ++ value = readl(ioaddr + MTL_FPE_CTRL_STS); ++ *hrs = (value & MTL_FPE_CTRL_STS_HRS) >> MTL_FPE_CTRL_STS_HRS_SHIFT; ++} ++ + static void dwmac5_tbs_get_max(u32 *leos_max, + u32 *legos_max, + u32 *ftos_max, +@@ -521,6 +586,7 @@ const struct tsnif_ops dwmac510_tsnif_ops = { + .est_get_gcl_depth = dwmac5_est_get_gcl_depth, + .est_get_ti_width = dwmac5_est_get_ti_width, + .est_get_txqcnt = dwmac5_est_get_txqcnt, ++ .est_get_rxqcnt = dwmac5_est_get_rxqcnt, + .est_get_max = dwmac5_est_get_max, + .est_write_gcl_config = dwmac5_est_write_gcl_config, + .est_read_gcl_config = dwmac5_est_read_gcl_config, +@@ -533,6 +599,11 @@ const struct tsnif_ops dwmac510_tsnif_ops = { + .est_get_bank = dwmac5_est_get_bank, + .est_switch_swol = dwmac5_est_switch_swol, + .est_irq_status = dwmac5_est_irq_status, ++ .fpe_get_info = dwmac5_fpe_get_info, ++ .fpe_set_txqpec = dwmac5_fpe_set_txqpec, ++ .fpe_set_enable = dwmac5_fpe_set_enable, ++ .fpe_get_config = dwmac5_fpe_get_config, ++ .fpe_get_pmac_sts = dwmac5_fpe_get_pmac_sts, + .tbs_get_max = dwmac5_tbs_get_max, + .tbs_set_estm = dwmac5_tbs_set_estm, + .tbs_set_leos = dwmac5_tbs_set_leos, +diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h +index a4714bcc2c50..2097aaca3fbd 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h ++++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h +@@ -405,7 +405,7 @@ struct stmmac_ops { + bool (*has_tsn_feat)(struct mac_device_info *hw, struct net_device *dev, + enum tsn_feat_id featid); + void (*setup_tsn_hw)(struct mac_device_info *hw, +- struct net_device *dev); ++ struct net_device *dev, u32 fprq); + int (*set_tsn_hwtunable)(struct mac_device_info *hw, + struct net_device *dev, + enum tsn_hwtunable_id id, +@@ -440,6 +440,15 @@ struct stmmac_ops { + struct net_device *dev, + u32 queue, + u32 *idle_slope); ++ int (*fpe_set_txqpec)(struct mac_device_info *hw, ++ struct net_device *dev, u32 txqpec); ++ int (*fpe_set_enable)(struct mac_device_info *hw, ++ struct net_device *dev, bool enable); ++ int (*fpe_get_config)(struct mac_device_info *hw, ++ struct net_device *dev, u32 *txqpec, ++ bool *enable); ++ int (*fpe_show_pmac_sts)(struct mac_device_info *hw, ++ struct net_device *dev); + }; + + #define stmmac_core_init(__priv, __args...) \ +@@ -570,6 +579,14 @@ struct stmmac_ops { + stmmac_do_callback(__priv, mac, dump_tsn_mmc, __args) + #define stmmac_cbs_recal_idleslope(__priv, __args...) \ + stmmac_do_callback(__priv, mac, cbs_recal_idleslope, __args) ++#define stmmac_fpe_set_txqpec(__priv, __args...) \ ++ stmmac_do_callback(__priv, mac, fpe_set_txqpec, __args) ++#define stmmac_fpe_set_enable(__priv, __args...) \ ++ stmmac_do_callback(__priv, mac, fpe_set_enable, __args) ++#define stmmac_fpe_get_config(__priv, __args...) \ ++ stmmac_do_callback(__priv, mac, fpe_get_config, __args) ++#define stmmac_fpe_show_pmac_sts(__priv, __args...) \ ++ stmmac_do_callback(__priv, mac, fpe_show_pmac_sts, __args) + + /* Helpers for serdes */ + struct stmmac_serdes_ops { +@@ -742,11 +759,13 @@ struct tsn_mmc_stat; + struct tsnif_ops { + u32 (*read_hwid)(void __iomem *ioaddr); + bool (*has_tsn_cap)(void __iomem *ioaddr, enum tsn_feat_id featid); +- void (*hw_setup)(void __iomem *ioaddr, enum tsn_feat_id featid); ++ void (*hw_setup)(void __iomem *ioaddr, enum tsn_feat_id featid, ++ u32 fprq); + /* IEEE 802.1Qbv Enhanced Scheduled Traffics (EST) */ + u32 (*est_get_gcl_depth)(void __iomem *ioaddr); + u32 (*est_get_ti_width)(void __iomem *ioaddr); + u32 (*est_get_txqcnt)(void __iomem *ioaddr); ++ u32 (*est_get_rxqcnt)(void __iomem *ioaddr); + void (*est_get_max)(u32 *ptov_max, u32 *ctov_max, u32 *ct_max, + u32 *idleslope_max); + int (*est_write_gcl_config)(void __iomem *ioaddr, u32 data, u32 addr, +@@ -769,6 +788,12 @@ struct tsnif_ops { + int (*est_irq_status)(void *ioaddr, struct net_device *dev, + struct tsn_mmc_stat *mmc_stat, + unsigned int txqcnt); ++ /* Frame Preemption (FPE) */ ++ void (*fpe_get_info)(u32 *pmac_bit); ++ void (*fpe_set_txqpec)(void *ioaddr, u32 txqpec, u32 txqmask); ++ void (*fpe_set_enable)(void *ioaddr, bool enable); ++ void (*fpe_get_config)(void *ioaddr, u32 *txqpec, bool *enable); ++ void (*fpe_get_pmac_sts)(void *ioaddr, u32 *hrs); + /* Time-Based Scheduling (TBS) */ + void (*tbs_get_max)(u32 *leos_max, u32 *legos_max, + u32 *ftos_max, u32 *fgos_max); +@@ -795,6 +820,8 @@ struct tsnif_ops { + tsnif_do_callback(__hw, est_get_ti_width, __args) + #define tsnif_est_get_txqcnt(__hw, __args...) \ + tsnif_do_callback(__hw, est_get_txqcnt, __args) ++#define tsnif_est_get_rxqcnt(__hw, __args...) \ ++ tsnif_do_callback(__hw, est_get_rxqcnt, __args) + #define tsnif_est_get_max(__hw, __args...) \ + tsnif_do_void_callback(__hw, est_get_max, __args) + #define tsnif_est_write_gcl_config(__hw, __args...) \ +@@ -819,6 +846,16 @@ struct tsnif_ops { + tsnif_do_void_callback(__hw, est_switch_swol, __args) + #define tsnif_est_irq_status(__hw, __args...) \ + tsnif_do_callback(__hw, est_irq_status, __args) ++#define tsnif_fpe_get_info(__hw, __args...) \ ++ tsnif_do_void_callback(__hw, fpe_get_info, __args) ++#define tsnif_fpe_set_txqpec(__hw, __args...) \ ++ tsnif_do_void_callback(__hw, fpe_set_txqpec, __args) ++#define tsnif_fpe_set_enable(__hw, __args...) \ ++ tsnif_do_void_callback(__hw, fpe_set_enable, __args) ++#define tsnif_fpe_get_config(__hw, __args...) \ ++ tsnif_do_void_callback(__hw, fpe_get_config, __args) ++#define tsnif_fpe_get_pmac_sts(__hw, __args...) \ ++ tsnif_do_void_callback(__hw, fpe_get_pmac_sts, __args) + #define tsnif_tbs_get_max(__hw, __args...) \ + tsnif_do_void_callback(__hw, tbs_get_max, __args) + #define tsnif_tbs_set_estm(__hw, __args...) \ +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index b1b33314ab79..0dcf970ffd82 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -2725,7 +2725,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) + /* Set HW VLAN stripping mode */ + stmmac_set_hw_vlan_mode(priv, priv->ioaddr, dev->features); + +- stmmac_tsn_hw_setup(priv, priv->hw, priv->dev); ++ stmmac_tsn_hw_setup(priv, priv->hw, priv->dev, priv->plat->fprq); + + /* Set TSN HW tunable */ + if (priv->plat->ptov) +@@ -5143,6 +5143,11 @@ int stmmac_dvr_probe(struct device *device, + true); + dev_info(priv->device, "EST feature enabled\n"); + } ++ if (priv->hw->tsn_info.cap.fpe_support && priv->plat->tsn_fpe_en) { ++ stmmac_set_tsn_feat(priv, priv->hw, ndev, TSN_FEAT_ID_FPE, ++ true); ++ dev_info(priv->device, "FPE feature enabled\n"); ++ } + if (priv->hw->tsn_info.cap.tbs_support && priv->plat->tsn_tbs_en) { + stmmac_set_tsn_feat(priv, priv->hw, ndev, TSN_FEAT_ID_TBS, + true); +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +index c0258bf17fec..eadf3359505d 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +@@ -162,7 +162,10 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, + plat->force_sf_dma_mode = 0; + plat->tso_en = 1; + plat->tsn_est_en = 1; ++ plat->tsn_fpe_en = 1; + plat->tsn_tbs_en = 1; ++ /* FPE HW Tunable */ ++ plat->fprq = 1; + /* TBS HW Tunable */ + plat->estm = 0; /* Absolute Mode */ + plat->leos = 0; /* Launch Expiry Offset */ +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c +index a8f4aa430636..799face82466 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c +@@ -94,15 +94,7 @@ int tsn_init(struct mac_device_info *hw, struct net_device *dev) + if (!tsnif_has_tsn_cap(hw, ioaddr, TSN_FEAT_ID_EST)) { + dev_info(pdev, "EST NOT supported\n"); + cap->est_support = 0; +- return 0; +- } +- +- if (!tsnif_has_tsn_cap(hw, ioaddr, TSN_FEAT_ID_TBS)) { +- dev_info(pdev, "TBS NOT supported\n"); +- cap->tbs_support = 0; +- } else { +- dev_info(pdev, "TBS capable\n"); +- cap->tbs_support = 1; ++ goto check_fpe; + } + + gcl_depth = tsnif_est_get_gcl_depth(hw, ioaddr); +@@ -150,19 +142,42 @@ int tsn_init(struct mac_device_info *hw, struct net_device *dev) + + cap->est_support = 1; + +- tsnif_tbs_get_max(hw, &cap->leos_max, &cap->legos_max, +- &cap->ftos_max, &cap->fgos_max); +- + dev_info(pdev, "EST: depth=%u, ti_wid=%u, ter_max=%uns, tils_max=%u, tqcnt=%u\n", + gcl_depth, ti_wid, cap->ext_max, tils_max, cap->txqcnt); + +- if (cap->tbs_support) { +- dev_info(pdev, "TBS: leos_max=%u, legos_max=%u\n", +- cap->leos_max, cap->legos_max); +- dev_info(pdev, "TBS: ftos_max=%u, fgos_max=%u\n", +- cap->ftos_max, cap->fgos_max); ++check_fpe: ++ if (!tsnif_has_tsn_cap(hw, ioaddr, TSN_FEAT_ID_FPE)) { ++ dev_info(pdev, "FPE NOT supported\n"); ++ cap->fpe_support = 0; ++ goto check_tbs; ++ } ++ ++ tsnif_fpe_get_info(hw, &cap->pmac_bit); ++ cap->rxqcnt = tsnif_est_get_rxqcnt(hw, ioaddr); ++ cap->fpe_support = 1; ++ ++ dev_info(pdev, "FPE: pMAC Bit=0x%x\n", cap->pmac_bit); ++ ++check_tbs: ++ if (!tsnif_has_tsn_cap(hw, ioaddr, TSN_FEAT_ID_TBS)) { ++ dev_info(pdev, "TBS NOT supported\n"); ++ cap->tbs_support = 0; ++ goto scan_done; ++ } else { ++ dev_info(pdev, "TBS capable\n"); ++ cap->tbs_support = 1; + } + ++ tsnif_tbs_get_max(hw, &cap->leos_max, &cap->legos_max, ++ &cap->ftos_max, &cap->fgos_max); ++ ++ dev_info(pdev, "TBS: leos_max=%u, legos_max=%u\n", ++ cap->leos_max, cap->legos_max); ++ dev_info(pdev, "TBS: ftos_max=%u, fgos_max=%u\n", ++ cap->ftos_max, cap->fgos_max); ++ ++scan_done: ++ + return 0; + } + +@@ -194,12 +209,25 @@ bool tsn_has_feat(struct mac_device_info *hw, struct net_device *dev, + * stmmac_init_dma_engine() which resets MAC controller. + * This is so-that MAC registers are not cleared. + */ +-void tsn_hw_setup(struct mac_device_info *hw, struct net_device *dev) ++void tsn_hw_setup(struct mac_device_info *hw, struct net_device *dev, ++ u32 fprq) + { ++ struct tsnif_info *info = &hw->tsn_info; ++ struct tsn_hw_cap *cap = &info->cap; + void __iomem *ioaddr = hw->pcsr; + + if (tsn_has_feat(hw, dev, TSN_FEAT_ID_EST)) +- tsnif_hw_setup(hw, ioaddr, TSN_FEAT_ID_EST); ++ tsnif_hw_setup(hw, ioaddr, TSN_FEAT_ID_EST, 0); ++ ++ if (tsn_has_feat(hw, dev, TSN_FEAT_ID_FPE)) { ++ /* RxQ0 default to Express Frame, FPRQ != RxQ0 */ ++ if (fprq > 0 && fprq < cap->rxqcnt) { ++ netdev_info(dev, "FPE: Set FPRQ = %d\n", fprq); ++ tsnif_hw_setup(hw, ioaddr, TSN_FEAT_ID_FPE, fprq); ++ } else { ++ netdev_warn(dev, "FPE: FPRQ is out-of-bound.\n"); ++ } ++ } + } + + int tsn_hwtunable_set(struct mac_device_info *hw, struct net_device *dev, +@@ -988,3 +1016,95 @@ int tsn_cbs_recal_idleslope(struct mac_device_info *hw, struct net_device *dev, + + return 0; + } ++ ++int tsn_fpe_set_txqpec(struct mac_device_info *hw, struct net_device *dev, ++ u32 txqpec) ++{ ++ struct tsnif_info *info = &hw->tsn_info; ++ struct tsn_hw_cap *cap = &info->cap; ++ void __iomem *ioaddr = hw->pcsr; ++ u32 txqmask; ++ ++ if (!tsn_has_feat(hw, dev, TSN_FEAT_ID_FPE)) { ++ netdev_info(dev, "FPE: feature unsupported\n"); ++ return -ENOTSUPP; ++ } ++ ++ /* Check PEC is within TxQ range */ ++ txqmask = (1 << cap->txqcnt) - 1; ++ if (txqpec & ~txqmask) { ++ netdev_warn(dev, "FPE: Tx PEC is out-of-bound.\n"); ++ ++ return -EINVAL; ++ } ++ ++ /* When EST and FPE are both enabled, TxQ0 is always preemptible ++ * queue. If FPE is enabled, we expect at least lsb is set. ++ * If FPE is not enabled, we should allow PEC = 0. ++ */ ++ if (txqpec && !(txqpec & cap->pmac_bit) && info->est_gcc.enable) { ++ netdev_warn(dev, "FPE: TxQ0 must not be express queue.\n"); ++ ++ return -EINVAL; ++ } ++ ++ tsnif_fpe_set_txqpec(hw, ioaddr, txqpec, txqmask); ++ info->fpe_cfg.txqpec = txqpec; ++ netdev_info(dev, "FPE: TxQ PEC = 0x%x\n", txqpec); ++ ++ return 0; ++} ++ ++int tsn_fpe_set_enable(struct mac_device_info *hw, struct net_device *dev, ++ bool enable) ++{ ++ struct tsnif_info *info = &hw->tsn_info; ++ void __iomem *ioaddr = hw->pcsr; ++ ++ if (!tsn_has_feat(hw, dev, TSN_FEAT_ID_FPE)) { ++ netdev_info(dev, "FPE: feature unsupported\n"); ++ return -ENOTSUPP; ++ } ++ ++ if (info->fpe_cfg.enable != enable) { ++ tsnif_fpe_set_enable(hw, ioaddr, enable); ++ info->fpe_cfg.enable = enable; ++ } ++ ++ return 0; ++} ++ ++int tsn_fpe_get_config(struct mac_device_info *hw, struct net_device *dev, ++ u32 *txqpec, bool *enable) ++{ ++ void __iomem *ioaddr = hw->pcsr; ++ ++ if (!tsn_has_feat(hw, dev, TSN_FEAT_ID_FPE)) { ++ netdev_info(dev, "FPE: feature unsupported\n"); ++ return -ENOTSUPP; ++ } ++ ++ tsnif_fpe_get_config(hw, ioaddr, txqpec, enable); ++ ++ return 0; ++} ++ ++int tsn_fpe_show_pmac_sts(struct mac_device_info *hw, struct net_device *dev) ++{ ++ void __iomem *ioaddr = hw->pcsr; ++ u32 hrs; ++ ++ if (!tsn_has_feat(hw, dev, TSN_FEAT_ID_FPE)) { ++ netdev_info(dev, "FPE: feature unsupported\n"); ++ return -ENOTSUPP; ++ } ++ ++ tsnif_fpe_get_pmac_sts(hw, ioaddr, &hrs); ++ ++ if (hrs) ++ netdev_info(dev, "FPE: pMAC is in Hold state.\n"); ++ else ++ netdev_info(dev, "FPE: pMAC is in Release state.\n"); ++ ++ return 0; ++} +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h +index dd366db0c8c8..5651633e20ed 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h +@@ -27,6 +27,7 @@ enum tsn_hwtunable_id { + /* TSN Feature Enabled List */ + enum tsn_feat_id { + TSN_FEAT_ID_EST = 0, ++ TSN_FEAT_ID_FPE = 1, + TSN_FEAT_ID_TBS = 2, + TSN_FEAT_ID_MAX, + }; +@@ -49,8 +50,10 @@ enum tsn_gcl_param_idx { + /* TSN HW Capabilities */ + struct tsn_hw_cap { + bool est_support; /* 1: supported */ ++ bool fpe_support; /* 1: supported */ + bool tbs_support; /* 1: supported */ + u32 txqcnt; /* Number of TxQ (control gate) */ ++ u32 rxqcnt; /* Number of RxQ (for FPRQ) */ + u32 gcl_depth; /* GCL depth. */ + u32 ti_wid; /* time interval width */ + u32 ext_max; /* Max time extension */ +@@ -59,6 +62,8 @@ struct tsn_hw_cap { + u32 ptov_max; /* Max PTP Offset */ + u32 ctov_max; /* Max Current Time Offset */ + u32 idleslope_max; /* Max idle slope */ ++ /* FPE */ ++ u32 pmac_bit; /* Preemptible MAC bit */ + u32 leos_max; /* Launch Expiry Offset */ + u32 legos_max; /* Launch Expiry GSN Offset */ + u32 ftos_max; /* Max Fetch Time Offset */ +@@ -95,6 +100,12 @@ struct est_gc_config { + bool enable; /* 1: enabled */ + }; + ++/* FPE Configuration */ ++struct fpe_config { ++ u32 txqpec; /* TxQ Preemption Classification */ ++ bool enable; /* 1: enabled */ ++}; ++ + /* TSN MMC Statistics */ + struct tsn_mmc_desc { + bool valid; +@@ -110,6 +121,7 @@ struct tsnif_info { + bool feat_en[TSN_FEAT_ID_MAX]; + u32 hwtunable[TSN_HWTUNA_MAX]; + struct est_gc_config est_gcc; ++ struct fpe_config fpe_cfg; + struct tsn_mmc_stat mmc_stat; + const struct tsn_mmc_desc *mmc_desc; + }; +@@ -122,7 +134,8 @@ int tsn_feat_set(struct mac_device_info *hw, struct net_device *dev, + enum tsn_feat_id featid, bool enable); + bool tsn_has_feat(struct mac_device_info *hw, struct net_device *dev, + enum tsn_feat_id featid); +-void tsn_hw_setup(struct mac_device_info *hw, struct net_device *dev); ++void tsn_hw_setup(struct mac_device_info *hw, struct net_device *dev, ++ u32 fprq); + int tsn_hwtunable_set(struct mac_device_info *hw, struct net_device *dev, + enum tsn_hwtunable_id id, const u32 data); + int tsn_hwtunable_get(struct mac_device_info *hw, struct net_device *dev, +@@ -149,5 +162,12 @@ int tsn_mmc_dump(struct mac_device_info *hw, + int index, unsigned long *count, const char **desc); + int tsn_cbs_recal_idleslope(struct mac_device_info *hw, struct net_device *dev, + u32 queue, u32 *idle_slope); ++int tsn_fpe_set_txqpec(struct mac_device_info *hw, struct net_device *dev, ++ u32 txqpec); ++int tsn_fpe_set_enable(struct mac_device_info *hw, struct net_device *dev, ++ bool enable); ++int tsn_fpe_get_config(struct mac_device_info *hw, struct net_device *dev, ++ u32 *txqpec, bool *enable); ++int tsn_fpe_show_pmac_sts(struct mac_device_info *hw, struct net_device *dev); + + #endif /* __STMMAC_TSN_H__ */ +diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h +index e47ec4ebc002..01d23591bec7 100644 +--- a/include/linux/stmmac.h ++++ b/include/linux/stmmac.h +@@ -187,6 +187,7 @@ struct plat_stmmacenet_data { + bool tso_en; + int rss_en; + bool tsn_est_en; ++ bool tsn_fpe_en; + bool tsn_tbs_en; + int mac_port_sel_speed; + bool en_tx_lpi_clockgating; +@@ -206,6 +207,8 @@ struct plat_stmmacenet_data { + u32 ptov; + u32 ctov; + u32 tils; ++ /*FPE */ ++ u32 fprq; + /* TBS */ + u32 estm; + u32 leos; +-- +2.17.1 + diff --git a/patches/0051-sos-sync-common-header-file.acrn b/patches/0051-sos-sync-common-header-file.acrn new file mode 100644 index 0000000000..440c9e4d24 --- /dev/null +++ b/patches/0051-sos-sync-common-header-file.acrn @@ -0,0 +1,252 @@ +From 2fe7c701fbf45246d61feeff961cb349190df46a Mon Sep 17 00:00:00 2001 +From: Mingqiang Chi +Date: Fri, 31 Aug 2018 10:59:00 +0800 +Subject: [PATCH 051/150] sos: sync common header file + +sync common header file (acrn_common.h) + +Change-Id: I5d236b89f0799c788dca652ac0ebeb729e20e40c +Signed-off-by: Mingqiang Chi +Reviewed-on: +--- + include/linux/vhm/acrn_common.h | 169 ++++++++++++++++++++++++++------ + 1 file changed, 138 insertions(+), 31 deletions(-) + +diff --git a/include/linux/vhm/acrn_common.h b/include/linux/vhm/acrn_common.h +index 7a49d4d59744..3a87636bc824 100644 +--- a/include/linux/vhm/acrn_common.h ++++ b/include/linux/vhm/acrn_common.h +@@ -80,6 +80,16 @@ + #define REQUEST_READ 0 + #define REQUEST_WRITE 1 + ++/* Generic VM flags from guest OS */ ++#define SECURE_WORLD_ENABLED (1UL<<0) /* Whether secure world is enabled */ ++ ++/** ++ * @brief Hypercall ++ * ++ * @addtogroup acrn_hypercall ACRN Hypercall ++ * @{ ++ */ ++ + struct mmio_request { + uint32_t direction; + uint32_t reserved; +@@ -145,66 +155,149 @@ struct vhm_request_buffer { + }; + } __attribute__((aligned(4096))); + +-/* Common API params */ ++/** ++ * @brief Info to create a VM, the parameter for HC_CREATE_VM hypercall ++ */ + struct acrn_create_vm { +- int32_t vmid; /* OUT: return vmid to VHM. Keep it first field */ +- uint32_t vcpu_num; /* IN: VM vcpu number */ +- uint8_t GUID[16]; /* IN: GUID of this vm */ +- uint8_t secure_world_enabled;/* IN: whether Secure World is enabled */ +- uint8_t reserved[31]; /* Reserved for future use */ ++ /** created vmid return to VHM. Keep it first field */ ++ int32_t vmid; ++ ++ /** VCPU numbers this VM want to create */ ++ uint32_t vcpu_num; ++ ++ /** the GUID of this VM */ ++ uint8_t GUID[16]; ++ ++ /* VM flag bits from Guest OS, now used ++ * SECURE_WORLD_ENABLED (1UL<<0) ++ */ ++ uint64_t vm_flag; ++ ++ /** Reserved for future use*/ ++ uint8_t reserved[24]; + } __attribute__((aligned(8))); + ++/** ++ * @brief Info to create a VCPU ++ * ++ * the parameter for HC_CREATE_VCPU hypercall ++ */ + struct acrn_create_vcpu { +- uint32_t vcpu_id; /* IN: vcpu id */ +- uint32_t pcpu_id; /* IN: pcpu id */ ++ /** the virtual CPU ID for the VCPU created */ ++ uint32_t vcpu_id; ++ ++ /** the physical CPU ID for the VCPU created */ ++ uint32_t pcpu_id; + } __attribute__((aligned(8))); + ++/** ++ * @brief Info to set ioreq buffer for a created VM ++ * ++ * the parameter for HC_SET_IOREQ_BUFFER hypercall ++ */ + struct acrn_set_ioreq_buffer { +- uint64_t req_buf; /* IN: gpa of per VM request_buffer*/ ++ /** guest physical address of VM request_buffer */ ++ uint64_t req_buf; + } __attribute__((aligned(8))); + +-/* +- * intr type +- * IOAPIC: inject interrupt to IOAPIC +- * ISA: inject interrupt to both PIC and IOAPIC +- */ ++/** Interrupt type for acrn_irqline: inject interrupt to IOAPIC */ + #define ACRN_INTR_TYPE_ISA 0 ++ ++/** Interrupt type for acrn_irqline: inject interrupt to both PIC and IOAPIC */ + #define ACRN_INTR_TYPE_IOAPIC 1 + +-/* For ISA, PIC, IOAPIC etc */ ++/** ++ * @brief Info to assert/deassert/pulse a virtual IRQ line for a VM ++ * ++ * the parameter for HC_ASSERT_IRQLINE/HC_DEASSERT_IRQLINE/HC_PULSE_IRQLINE ++ * hypercall ++ */ + struct acrn_irqline { ++ /** interrupt type which could be IOAPIC or ISA */ + uint32_t intr_type; ++ ++ /** reserved for alignment padding */ + uint32_t reserved; +- uint64_t pic_irq; /* IN: for ISA type */ +- uint64_t ioapic_irq; /* IN: for IOAPIC type, -1 don't inject */ ++ ++ /** pic IRQ for ISA type */ ++ uint64_t pic_irq; ++ ++ /** ioapic IRQ for IOAPIC & ISA TYPE, ++ * if -1 then this IRQ will not be injected ++ */ ++ uint64_t ioapic_irq; + } __attribute__((aligned(8))); + +-/* For MSI type inject */ ++/** ++ * @brief Info to inject a MSI interrupt to VM ++ * ++ * the parameter for HC_INJECT_MSI hypercall ++ */ + struct acrn_msi_entry { +- uint64_t msi_addr; /* IN: addr[19:12] with dest vcpu id */ +- uint64_t msi_data; /* IN: data[7:0] with vector */ ++ /** MSI addr[19:12] with dest VCPU ID */ ++ uint64_t msi_addr; ++ ++ /** MSI data[7:0] with vector */ ++ uint64_t msi_data; + } __attribute__((aligned(8))); + +-/* For NMI inject */ ++/** ++ * @brief Info to inject a NMI interrupt for a VM ++ */ + struct acrn_nmi_entry { +- int64_t vcpuid; /* IN: -1 means vcpu0 */ ++ /** virtual CPU ID to inject */ ++ int64_t vcpu_id; + } __attribute__((aligned(8))); + ++/** ++ * @brief Info to remap pass-through PCI MSI for a VM ++ * ++ * the parameter for HC_VM_PCI_MSIX_REMAP hypercall ++ */ + struct acrn_vm_pci_msix_remap { +- uint16_t virt_bdf; /* IN: Device virtual BDF# */ +- uint16_t phys_bdf; /* IN: Device physical BDF# */ +- uint16_t msi_ctl; /* IN: PCI MSI/x cap control data */ ++ /** pass-through PCI device virtual BDF# */ ++ uint16_t virt_bdf; ++ ++ /** pass-through PCI device physical BDF# */ ++ uint16_t phys_bdf; ++ ++ /** pass-through PCI device MSI/MSI-X cap control data */ ++ uint16_t msi_ctl; ++ ++ /** reserved for alignment padding */ + uint16_t reserved; ++ ++ /** pass-through PCI device MSI address to remap, which will ++ * return the caller after remapping ++ */ + uint64_t msi_addr; /* IN/OUT: msi address to fix */ +- uint32_t msi_data; /* IN/OUT: msi data to fix */ +- int32_t msix; /* IN: 0 - MSI, 1 - MSI-X */ +- int32_t msix_entry_index; /* IN: MSI-X the entry table index */ +- /* IN: Vector Control for MSI-X Entry, field defined in MSIX spec */ ++ ++ /** pass-through PCI device MSI data to remap, which will ++ * return the caller after remapping ++ */ ++ uint32_t msi_data; ++ ++ /** pass-through PCI device is MSI or MSI-X ++ * 0 - MSI, 1 - MSI-X ++ */ ++ int32_t msix; ++ ++ /** if the pass-through PCI device is MSI-X, this field contains ++ * the MSI-X entry table index ++ */ ++ int32_t msix_entry_index; ++ ++ /** if the pass-through PCI device is MSI-X, this field contains ++ * Vector Control for MSI-X Entry, field defined in MSI-X spec ++ */ + uint32_t vector_ctl; + } __attribute__((aligned(8))); + +-/* It's designed to support passing DM config data pointer, based on it, +- * hypervisor would parse then pass DM defined configration to GUEST vcpu ++/** ++ * @brief The guest config pointer offset. ++ * ++ * It's designed to support passing DM config data pointer, based on it, ++ * hypervisor would parse then pass DM defined configuration to GUEST VCPU + * when booting guest VM. + * the address 0xd0000 here is designed by DM, as it arranged all memory + * layout below 1M, DM should make sure there is no overlap for the address +@@ -212,6 +305,10 @@ struct acrn_vm_pci_msix_remap { + */ + #define GUEST_CFG_OFFSET 0xd0000 + ++/** ++ * @brief Info The power state data of a VCPU. ++ * ++ */ + struct cpu_px_data { + uint64_t core_frequency; /* megahertz */ + uint64_t power; /* milliWatts */ +@@ -221,6 +318,12 @@ struct cpu_px_data { + uint64_t status; /* success indicator */ + } __attribute__((aligned(8))); + ++/** ++ * @brief Info PM command from DM/VHM. ++ * ++ * The command would specify request type(i.e. get px count or data) for ++ * specific VM and specific VCPU with specific state number.like P(n). ++ */ + #define PMCMD_VMID_MASK 0xff000000 + #define PMCMD_VCPUID_MASK 0x00ff0000 + #define PMCMD_STATE_NUM_MASK 0x0000ff00 +@@ -235,4 +338,8 @@ enum pm_cmd_type { + PMCMD_GET_PX_DATA, + }; + ++/** ++ * @} ++ */ ++ + #endif /* ACRN_COMMON_H */ +-- +2.17.1 + diff --git a/patches/0051-trusty-Rename-CWP-with-ACRN.trusty b/patches/0051-trusty-Rename-CWP-with-ACRN.trusty new file mode 100644 index 0000000000..a27996540e --- /dev/null +++ b/patches/0051-trusty-Rename-CWP-with-ACRN.trusty @@ -0,0 +1,73 @@ +From d64f312e17b2f7078c7936df118890ecfc725dba Mon Sep 17 00:00:00 2001 +From: "Qi, Yadong" +Date: Mon, 26 Feb 2018 09:48:06 +0800 +Subject: [PATCH 51/63] trusty: Rename CWP with ACRN + +The CWP hypervisor has been renamed to ACRN. + +Change-Id: I23bcff44954110fbc20148fd3266ac48864a3a1f +Signed-off-by: Qi, Yadong +--- + drivers/trusty/trusty.c | 10 +++++----- + include/linux/trusty/trusty.h | 4 ++-- + 2 files changed, 7 insertions(+), 7 deletions(-) + +diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c +index 98c866487a3e..e253ee498ab5 100755 +--- a/drivers/trusty/trusty.c ++++ b/drivers/trusty/trusty.c +@@ -27,7 +27,7 @@ + #include + + #define EVMM_SMC_HC_ID 0x74727500 +-#define CWP_SMC_HC_ID 0x80000071 ++#define ACRN_SMC_HC_ID 0x80000071 + + struct trusty_state; + +@@ -72,9 +72,9 @@ static inline ulong smc_evmm(ulong r0, ulong r1, ulong r2, ulong r3) + return r0; + } + +-static inline ulong smc_cwp(ulong r0, ulong r1, ulong r2, ulong r3) ++static inline ulong smc_acrn(ulong r0, ulong r1, ulong r2, ulong r3) + { +- register unsigned long smc_id asm("r8") = CWP_SMC_HC_ID; ++ register unsigned long smc_id asm("r8") = ACRN_SMC_HC_ID; + asm_smc_vmcall(smc_id, r0, r1, r2, r3); + return r0; + } +@@ -463,8 +463,8 @@ static void trusty_init_smc(int vmm_id) + { + if (vmm_id == VMM_ID_EVMM) { + smc = smc_evmm; +- } else if (vmm_id == VMM_ID_CWP) { +- smc = smc_cwp; ++ } else if (vmm_id == VMM_ID_ACRN) { ++ smc = smc_acrn; + } else { + pr_err("%s: No smc supports VMM[%d](sig:%s)!", + __func__, vmm_id, vmm_signature[vmm_id]); +diff --git a/include/linux/trusty/trusty.h b/include/linux/trusty/trusty.h +index 48e1ea716889..546e6db03498 100644 +--- a/include/linux/trusty/trusty.h ++++ b/include/linux/trusty/trusty.h +@@ -93,13 +93,13 @@ void *trusty_wall_per_cpu_item_ptr(struct device *dev, unsigned int cpu, + + enum { + VMM_ID_EVMM = 0, +- VMM_ID_CWP, ++ VMM_ID_ACRN, + VMM_SUPPORTED_NUM + }; + + static const char *vmm_signature[] = { + [VMM_ID_EVMM] = "EVMMEVMMEVMM", +- [VMM_ID_CWP] = "CWPCWPCWP\0\0" ++ [VMM_ID_ACRN] = "ACRNACRNACRN" + }; + + /* Detect VMM and return vmm_id */ +-- +2.17.1 + diff --git a/patches/0052-ASoC-Intel-Skylake-Probe-compress-operations.audio b/patches/0052-ASoC-Intel-Skylake-Probe-compress-operations.audio new file mode 100644 index 0000000000..a6b8cd0d27 --- /dev/null +++ b/patches/0052-ASoC-Intel-Skylake-Probe-compress-operations.audio @@ -0,0 +1,342 @@ +From df39fe7046a75ea18e2d8ff4d7d9f1b4419c439a Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Wed, 6 Mar 2019 18:54:06 +0100 +Subject: [PATCH 052/193] ASoC: Intel: Skylake: Probe compress operations + +Add SKL handlers for soc_compr_ops and snd_compr_ops which cover probe +related operations. Implementation supports all connection purposes. +As per firmware spec, maximum of one extraction stream is allowed, while +for injection, there can be plenty. + +Change-Id: Iffcfa593e4d16e5fbd17d06b8d5a6f781068e9fa +Signed-off-by: Cezary Rojewski +--- + sound/soc/intel/Kconfig | 1 + + sound/soc/intel/skylake/Makefile | 2 +- + sound/soc/intel/skylake/skl-compress.c | 247 +++++++++++++++++++++++++ + sound/soc/intel/skylake/skl-compress.h | 34 ++++ + 4 files changed, 283 insertions(+), 1 deletion(-) + create mode 100644 sound/soc/intel/skylake/skl-compress.c + create mode 100644 sound/soc/intel/skylake/skl-compress.h + +diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig +index ee7c44f1c4d0..f644254e1510 100644 +--- a/sound/soc/intel/Kconfig ++++ b/sound/soc/intel/Kconfig +@@ -218,6 +218,7 @@ config SND_SOC_INTEL_SKYLAKE_COMMON + select SND_SOC_HDAC_HDA if SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC + select SND_SOC_ACPI_INTEL_MATCH + select SND_INTEL_NHLT if ACPI ++ select SND_SOC_COMPRESS + help + If you have a Intel Skylake/Broxton/ApolloLake/KabyLake/ + GeminiLake or CannonLake platform with the DSP enabled in the BIOS +diff --git a/sound/soc/intel/skylake/Makefile b/sound/soc/intel/skylake/Makefile +index 48544ff1a3e6..9766fb3c96e3 100644 +--- a/sound/soc/intel/skylake/Makefile ++++ b/sound/soc/intel/skylake/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + snd-soc-skl-objs := skl.o skl-pcm.o skl-nhlt.o skl-messages.o skl-topology.o \ + skl-sst-ipc.o skl-sst-dsp.o cnl-sst-dsp.o skl-sst-cldma.o \ +- skl-sst.o bxt-sst.o cnl-sst.o skl-sst-utils.o ++ skl-sst.o bxt-sst.o cnl-sst.o skl-sst-utils.o skl-compress.o + + ifdef CONFIG_DEBUG_FS + snd-soc-skl-objs += skl-debug.o +diff --git a/sound/soc/intel/skylake/skl-compress.c b/sound/soc/intel/skylake/skl-compress.c +new file mode 100644 +index 000000000000..de6630046706 +--- /dev/null ++++ b/sound/soc/intel/skylake/skl-compress.c +@@ -0,0 +1,247 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++// ++// skl-compress.c -- ASoC Skylake compress operations ++// ++// Copyright (C) 2018 Intel Corp. ++// ++// Author: Cezary Rojewski ++ ++#include ++#include ++#include "skl-sst-dsp.h" ++#include "skl-sst-ipc.h" ++#include "skl-topology.h" ++#include "skl-compress.h" ++ ++int skl_probe_compr_open(struct snd_compr_stream *cstream, ++ struct snd_soc_dai *dai) ++{ ++ struct hdac_bus *bus = dev_get_drvdata(dai->dev); ++ struct hdac_ext_stream *stream; ++ struct skl_dev *skl = get_skl_ctx(dai->dev); ++ ++ if (cstream->direction == SND_COMPRESS_CAPTURE && skl->extractor) { ++ dev_err(dai->dev, ++ "Cannot open more than one extractor stream\n"); ++ return -EEXIST; ++ } ++ ++ stream = hdac_ext_host_stream_compr_assign(bus, cstream); ++ if (!stream) { ++ dev_err(dai->dev, "Failed to assign probe stream\n"); ++ return -EBUSY; ++ } ++ ++ if (cstream->direction == SND_COMPRESS_CAPTURE) ++ skl->extractor = stream; ++ hdac_stream(stream)->curr_pos = 0; ++ cstream->runtime->private_data = stream; ++ ++ return 0; ++} ++ ++int skl_probe_compr_free(struct snd_compr_stream *cstream, ++ struct snd_soc_dai *dai) ++{ ++ struct hdac_ext_stream *stream = skl_compr_get_stream(cstream); ++ struct skl_dev *skl = get_skl_ctx(dai->dev); ++ struct skl_probe_point_desc *desc; ++ struct skl_probe_dma *dma; ++ size_t num_desc, num_dma; ++ unsigned int vindex = INVALID_NODE_ID.node.vindex; ++ int i, ret; ++ ++ ret = skl_probe_get_points(skl, &desc, &num_desc); ++ if (ret < 0) { ++ dev_err(dai->dev, "Failed to get probe points, ret: %d\n", ret); ++ goto release_resources; ++ } ++ ++ if (cstream->direction == SND_COMPRESS_PLAYBACK) ++ vindex = hdac_stream(stream)->stream_tag - 1; ++ ++ for (i = 0; i < num_desc; i++) ++ if (desc[i].node_id.node.vindex == vindex) ++ skl_probe_points_disconnect(skl, &desc[i].id, 1); ++ kfree(desc); ++ ++ if (cstream->direction != SND_COMPRESS_PLAYBACK) ++ goto release_resources; ++ ++ ret = skl_probe_get_dma(skl, &dma, &num_dma); ++ if (ret < 0) { ++ dev_err(dai->dev, "Failed to get inject dma, ret: %d\n", ret); ++ goto release_resources; ++ } ++ ++ for (i = 0; i < num_dma; i++) ++ if (dma[i].node_id.node.vindex == vindex) ++ skl_probe_dma_detach(skl, &dma[i].node_id, 1); ++ kfree(dma); ++ ++release_resources: ++ snd_hdac_stream_cleanup(hdac_stream(stream)); ++ hdac_stream(stream)->prepared = 0; ++ snd_compr_free_pages(cstream); ++ ++ snd_hdac_ext_stream_release(stream, HDAC_EXT_STREAM_TYPE_HOST); ++ ++ if (skl->extractor == stream) ++ skl->extractor = NULL; ++ if (skl->num_probe_streams) { ++ skl->num_probe_streams--; ++ if (!skl->num_probe_streams) ++ ret = skl_probe_delete_module(skl); ++ } ++ ++ return ret; ++} ++ ++int skl_probe_compr_set_params(struct snd_compr_stream *cstream, ++ struct snd_compr_params *params, struct snd_soc_dai *dai) ++{ ++ struct hdac_ext_stream *stream = skl_compr_get_stream(cstream); ++ struct snd_compr_runtime *rtd = cstream->runtime; ++ struct skl_dev *skl = get_skl_ctx(dai->dev); ++ struct skl_probe_dma dma; ++ unsigned int format_val; ++ int bps, ret; ++ /* compr params do not store bit depth, default to S32_LE */ ++ snd_pcm_format_t format = SNDRV_PCM_FORMAT_S32_LE; ++ ++ hdac_stream(stream)->bufsize = 0; ++ hdac_stream(stream)->period_bytes = 0; ++ hdac_stream(stream)->format_val = 0; ++ cstream->dma_buffer.dev.type = SNDRV_DMA_TYPE_DEV_SG; ++ cstream->dma_buffer.dev.dev = snd_dma_pci_data(skl->pci); ++ ++ ret = snd_compr_malloc_pages(cstream, rtd->buffer_size); ++ if (ret < 0) ++ return ret; ++ bps = snd_pcm_format_physical_width(format); ++ if (bps < 0) ++ return bps; ++ format_val = snd_hdac_calc_stream_format(params->codec.sample_rate, ++ params->codec.ch_out, format, bps, 0); ++ ret = snd_hdac_stream_set_params(hdac_stream(stream), format_val); ++ if (ret < 0) ++ return ret; ++ ret = snd_hdac_stream_setup(hdac_stream(stream)); ++ if (ret < 0) ++ return ret; ++ ++ hdac_stream(stream)->prepared = 1; ++ ++ if (!skl->num_probe_streams) { ++ ret = skl_probe_init_module(skl, rtd->dma_bytes); ++ if (ret < 0) ++ return ret; ++ } ++ ++ if (cstream->direction == SND_COMPRESS_PLAYBACK) { ++ dma.node_id.node.vindex = hdac_stream(stream)->stream_tag - 1; ++ dma.node_id.node.dma_type = SKL_DMA_HDA_HOST_OUTPUT_CLASS; ++ dma.node_id.node.rsvd = 0; ++ dma.dma_buffer_size = rtd->dma_bytes; ++ ++ ret = skl_probe_dma_attach(skl, &dma, 1); ++ if (ret < 0) ++ return ret; ++ } ++ ++ skl->num_probe_streams++; ++ return 0; ++} ++ ++int skl_probe_compr_trigger(struct snd_compr_stream *cstream, int cmd, ++ struct snd_soc_dai *dai) ++{ ++ struct hdac_bus *bus = dev_get_drvdata(dai->dev); ++ struct hdac_ext_stream *stream = skl_compr_get_stream(cstream); ++ unsigned long cookie; ++ ++ if (!hdac_stream(stream)->prepared) ++ return -EPIPE; ++ ++ switch (cmd) { ++ case SNDRV_PCM_TRIGGER_START: ++ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: ++ case SNDRV_PCM_TRIGGER_RESUME: ++ spin_lock_irqsave(&bus->reg_lock, cookie); ++ snd_hdac_stream_start(hdac_stream(stream), true); ++ spin_unlock_irqrestore(&bus->reg_lock, cookie); ++ break; ++ ++ case SNDRV_PCM_TRIGGER_PAUSE_PUSH: ++ case SNDRV_PCM_TRIGGER_SUSPEND: ++ case SNDRV_PCM_TRIGGER_STOP: ++ spin_lock_irqsave(&bus->reg_lock, cookie); ++ snd_hdac_stream_stop(hdac_stream(stream)); ++ spin_unlock_irqrestore(&bus->reg_lock, cookie); ++ break; ++ ++ default: ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++int skl_probe_compr_pointer(struct snd_compr_stream *cstream, ++ struct snd_compr_tstamp *tstamp, struct snd_soc_dai *dai) ++{ ++ struct hdac_ext_stream *stream = skl_compr_get_stream(cstream); ++ struct snd_soc_pcm_stream *pstream; ++ ++ if (cstream->direction == SND_COMPRESS_PLAYBACK) ++ pstream = &dai->driver->playback; ++ else ++ pstream = &dai->driver->capture; ++ ++ tstamp->copied_total = hdac_stream(stream)->curr_pos; ++ tstamp->sampling_rate = snd_pcm_rate_bit_to_rate(pstream->rates); ++ return 0; ++} ++ ++int skl_probe_compr_copy(struct snd_compr_stream *cstream, ++ char __user *buf, size_t count) ++{ ++ struct snd_compr_runtime *rtd = cstream->runtime; ++ unsigned int offset, n; ++ void *ptr; ++ int ret; ++ ++ if (count > rtd->buffer_size) ++ count = rtd->buffer_size; ++ ++ if (cstream->direction == SND_COMPRESS_CAPTURE) { ++ div_u64_rem(rtd->total_bytes_transferred, ++ rtd->buffer_size, &offset); ++ ptr = rtd->dma_area + offset; ++ n = rtd->buffer_size - offset; ++ ++ if (count < n) { ++ ret = copy_to_user(buf, ptr, count); ++ } else { ++ ret = copy_to_user(buf, ptr, n); ++ ret += copy_to_user(buf + n, rtd->dma_area, count - n); ++ } ++ } else { ++ div_u64_rem(rtd->total_bytes_available, ++ rtd->buffer_size, &offset); ++ ptr = rtd->dma_area + offset; ++ n = rtd->buffer_size - offset; ++ ++ if (count < n) { ++ ret = copy_from_user(ptr, buf, count); ++ } else { ++ ret = copy_from_user(ptr, buf, n); ++ ret += copy_from_user(rtd->dma_area, ++ buf + n, count - n); ++ } ++ } ++ ++ if (ret) ++ return count - ret; ++ return count; ++} +diff --git a/sound/soc/intel/skylake/skl-compress.h b/sound/soc/intel/skylake/skl-compress.h +new file mode 100644 +index 000000000000..bdb6c5a79a26 +--- /dev/null ++++ b/sound/soc/intel/skylake/skl-compress.h +@@ -0,0 +1,34 @@ ++/* SPDX-License-Identifier: GPL-2.0 ++ * ++ * skl-compress.h -- ASoC Skylake compress header file ++ * ++ * Copyright (C) 2018 Intel Corp. ++ * ++ * Author: Cezary Rojewski ++ */ ++ ++#ifndef __SKL_COMPRESS_H__ ++#define __SKL_COMPRESS_H__ ++ ++#include ++ ++static inline ++struct hdac_ext_stream *skl_compr_get_stream(struct snd_compr_stream *cstream) ++{ ++ return cstream->runtime->private_data; ++} ++ ++int skl_probe_compr_open(struct snd_compr_stream *cstream, ++ struct snd_soc_dai *dai); ++int skl_probe_compr_free(struct snd_compr_stream *cstream, ++ struct snd_soc_dai *dai); ++int skl_probe_compr_set_params(struct snd_compr_stream *cstream, ++ struct snd_compr_params *params, struct snd_soc_dai *dai); ++int skl_probe_compr_trigger(struct snd_compr_stream *cstream, int cmd, ++ struct snd_soc_dai *dai); ++int skl_probe_compr_pointer(struct snd_compr_stream *cstream, ++ struct snd_compr_tstamp *tstamp, struct snd_soc_dai *dai); ++int skl_probe_compr_copy(struct snd_compr_stream *cstream, ++ char __user *buf, size_t count); ++ ++#endif /* __SKL_COMPRESS_H__*/ +-- +2.17.1 + diff --git a/patches/0052-drm-i915-tgl-perf-use-the-same-oa-ctx_id-format-as-icl.drm b/patches/0052-drm-i915-tgl-perf-use-the-same-oa-ctx_id-format-as-icl.drm new file mode 100644 index 0000000000..7e0a95d447 --- /dev/null +++ b/patches/0052-drm-i915-tgl-perf-use-the-same-oa-ctx_id-format-as-icl.drm @@ -0,0 +1,36 @@ +From 0b95cc9191fb4bbcdaab176543ffbc1d6666c264 Mon Sep 17 00:00:00 2001 +From: Michel Thierry +Date: Fri, 23 Aug 2019 01:20:50 -0700 +Subject: [PATCH 052/690] drm/i915/tgl/perf: use the same oa ctx_id format as + icl + +Compared to Icelake, Tigerlake's MAX_CONTEXT_HW_ID is smaller by one, but +since we just use the upper 32 bits of the lrc_desc, it's guaranteed OA +will use the correct one. + +Cc: Lionel Landwerlin +Signed-off-by: Michel Thierry +Signed-off-by: Lucas De Marchi +Reviewed-by: Umesh Nerlige Ramappa +Link: https://patchwork.freedesktop.org/patch/msgid/20190823082055.5992-19-lucas.demarchi@intel.com +--- + drivers/gpu/drm/i915/i915_perf.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c +index e42b86827d6b..2c9f46e12622 100644 +--- a/drivers/gpu/drm/i915/i915_perf.c ++++ b/drivers/gpu/drm/i915/i915_perf.c +@@ -1299,7 +1299,8 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream) + } + break; + +- case 11: { ++ case 11: ++ case 12: { + stream->specific_ctx_id_mask = + ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32) | + ((1U << GEN11_ENGINE_INSTANCE_WIDTH) - 1) << (GEN11_ENGINE_INSTANCE_SHIFT - 32) | +-- +2.17.1 + diff --git a/patches/0052-mei-support-variable-extended-heci-header.security b/patches/0052-mei-support-variable-extended-heci-header.security new file mode 100644 index 0000000000..565e8dfa7f --- /dev/null +++ b/patches/0052-mei-support-variable-extended-heci-header.security @@ -0,0 +1,707 @@ +From 4d0f1c7f008e56513c6367e4d4354f99db3b14f7 Mon Sep 17 00:00:00 2001 +From: Aviad Nissel +Date: Thu, 8 Nov 2018 15:17:42 +0200 +Subject: [PATCH 52/65] mei: support variable extended heci header. + +The HECI header was adjusted to support variable numbers of +extensions. + +Change-Id: I46b63034745286767c7bc022d4aaca9c3b5f52bb +Signed-off-by: Aviad Nissel +--- + drivers/misc/mei/client.c | 150 ++++++++++++++++++++++------------- + drivers/misc/mei/hw.h | 96 +++++++++++++++++----- + drivers/misc/mei/interrupt.c | 112 +++++++++++++++++++++----- + drivers/misc/mei/mei_dev.h | 4 +- + 4 files changed, 267 insertions(+), 95 deletions(-) + +diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c +index fcf12edf6fc6..c40166af96a2 100644 +--- a/drivers/misc/mei/client.c ++++ b/drivers/misc/mei/client.c +@@ -1652,31 +1652,53 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp) + /** + * mei_msg_hdr_init - initialize mei message header + * +- * @mei_hdr: mei message header + * @cb: message callback structure + * +- * Return: header length in bytes ++ * Return: initialized header + */ +-static size_t mei_msg_hdr_init(struct mei_msg_hdr *mei_hdr, +- struct mei_cl_cb *cb) ++static struct mei_msg_hdr *mei_msg_hdr_init(struct mei_cl_cb *cb) + { +- size_t hdr_len = sizeof(*mei_hdr); +- struct mei_msg_extd_hdr *ext_hdr; ++ size_t hdr_len; ++ struct mei_ext_meta_hdr *meta; ++ struct mei_ext_hdr *ext; ++ struct mei_msg_hdr *mei_hdr; ++ bool is_ext, is_vtag; ++ ++ is_ext = (cb->vtag && cb->buf_idx == 0); ++ is_vtag = is_ext; ++ ++ hdr_len = sizeof(*mei_hdr); ++ if (is_ext) ++ hdr_len += sizeof(*meta); ++ ++ if (is_vtag) ++ hdr_len += sizeof(*ext); ++ ++ mei_hdr = kzalloc(hdr_len, GFP_KERNEL); ++ if (!mei_hdr) ++ return ERR_PTR(-ENOMEM); + +- memset(mei_hdr, 0, sizeof(*mei_hdr)); + mei_hdr->host_addr = mei_cl_host_addr(cb->cl); + mei_hdr->me_addr = mei_cl_me_id(cb->cl); + mei_hdr->internal = cb->internal; ++ mei_hdr->extended = is_ext; + +- if (cb->vtag && cb->buf_idx == 0) { +- ext_hdr = (struct mei_msg_extd_hdr *)mei_hdr->extension; +- memset(ext_hdr, 0, sizeof(*ext_hdr)); +- mei_hdr->extended = 1; +- ext_hdr->vtag = cb->vtag; +- hdr_len += sizeof(*ext_hdr); +- } ++ if (!is_ext) ++ goto out; + +- return hdr_len; ++ meta = (struct mei_ext_meta_hdr *)mei_hdr->extension; ++ if (is_vtag) { ++ meta->count++; ++ meta->size = mei_data2slots(sizeof(*ext)); ++ ++ ext = meta->hdrs; ++ ext->type = MEI_EXT_HDR_VTAG; ++ ext->ext_payload[0] = cb->vtag; ++ ext->length = mei_data2slots(sizeof(*ext)); ++ } ++out: ++ mei_hdr->length = hdr_len - sizeof(*mei_hdr); ++ return mei_hdr; + } + + /** +@@ -1694,11 +1716,11 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, + { + struct mei_device *dev; + struct mei_msg_data *buf; +- u32 __hdr[MEI_MSG_HDR_MAX]; +- struct mei_msg_hdr *mei_hdr = (void *)__hdr; ++ struct mei_msg_hdr *mei_hdr = NULL; + size_t hdr_len; +- size_t len; + size_t hbuf_len, dr_len; ++ size_t buf_len; ++ size_t data_len; + int hbuf_slots; + u32 dr_slots; + u32 dma_len; +@@ -1724,7 +1746,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, + return 0; + } + +- len = buf->size - cb->buf_idx; ++ buf_len = buf->size - cb->buf_idx; + data = buf->data + cb->buf_idx; + hbuf_slots = mei_hbuf_empty_slots(dev); + if (hbuf_slots < 0) { +@@ -1736,45 +1758,54 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, + dr_slots = mei_dma_ring_empty_slots(dev); + dr_len = mei_slots2data(dr_slots); + +- hdr_len = mei_msg_hdr_init(mei_hdr, cb); ++ mei_hdr = mei_msg_hdr_init(cb); ++ if (IS_ERR(mei_hdr)) { ++ rets = PTR_ERR(mei_hdr); ++ mei_hdr = NULL; ++ goto err; ++ } + + cl_dbg(dev, cl, "Extend Header %d vtag = %d\n", + mei_hdr->extended, cb->vtag); + ++ hdr_len = sizeof(*mei_hdr) + mei_hdr->length; ++ + /** + * Split the message only if we can write the whole host buffer + * otherwise wait for next time the host buffer is empty. + */ +- if (len + hdr_len <= hbuf_len) { +- mei_hdr->length = len; ++ if (hdr_len + buf_len <= hbuf_len) { ++ data_len = buf_len; + mei_hdr->msg_complete = 1; + } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) { + mei_hdr->dma_ring = 1; +- if (len > dr_len) +- len = dr_len; ++ if (buf_len > dr_len) ++ buf_len = dr_len; + else + mei_hdr->msg_complete = 1; + +- mei_hdr->length = sizeof(dma_len); +- dma_len = len; ++ data_len = sizeof(dma_len); ++ dma_len = buf_len; + data = &dma_len; + } else if ((u32)hbuf_slots == mei_hbuf_depth(dev)) { +- len = hbuf_len - hdr_len; +- mei_hdr->length = len; ++ buf_len = hbuf_len - hdr_len; ++ data_len = buf_len; + } else { ++ kfree(mei_hdr); + return 0; + } ++ mei_hdr->length += data_len; + + if (mei_hdr->dma_ring) +- mei_dma_ring_write(dev, buf->data + cb->buf_idx, len); ++ mei_dma_ring_write(dev, buf->data + cb->buf_idx, buf_len); ++ rets = mei_write_message(dev, mei_hdr, hdr_len, data, data_len); + +- rets = mei_write_message(dev, mei_hdr, hdr_len, data, mei_hdr->length); + if (rets) + goto err; + + cl->status = 0; + cl->writing_state = MEI_WRITING; +- cb->buf_idx += len; ++ cb->buf_idx += buf_len; + + if (first_chunk) { + if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) { +@@ -1786,9 +1817,11 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, + if (mei_hdr->msg_complete) + list_move_tail(&cb->list, &dev->write_waiting_list); + ++ kfree(mei_hdr); + return 0; + + err: ++ kfree(mei_hdr); + cl->status = rets; + list_move_tail(&cb->list, cmpl_list); + return rets; +@@ -1807,10 +1840,11 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) + { + struct mei_device *dev; + struct mei_msg_data *buf; +- u32 __hdr[MEI_MSG_HDR_MAX]; +- struct mei_msg_hdr *mei_hdr = (void *)__hdr; ++ struct mei_msg_hdr *mei_hdr = NULL; + size_t hdr_len; +- size_t len, hbuf_len, dr_len; ++ size_t hbuf_len, dr_len; ++ size_t buf_len; ++ size_t data_len; + int hbuf_slots; + u32 dr_slots; + u32 dma_len; +@@ -1827,9 +1861,9 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) + dev = cl->dev; + + buf = &cb->buf; +- len = buf->size; ++ buf_len = buf->size; + +- cl_dbg(dev, cl, "len=%zd\n", len); ++ cl_dbg(dev, cl, "buf_len=%zd\n", buf_len); + + blocking = cb->blocking; + data = buf->data; +@@ -1849,20 +1883,27 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) + if (rets < 0) + goto err; + +- hdr_len = mei_msg_hdr_init(mei_hdr, cb); ++ mei_hdr = mei_msg_hdr_init(cb); ++ if (IS_ERR(mei_hdr)) { ++ rets = -PTR_ERR(mei_hdr); ++ mei_hdr = NULL; ++ goto err; ++ } + + cl_dbg(dev, cl, "Extend Header %d vtag = %d\n", + mei_hdr->extended, cb->vtag); + ++ hdr_len = sizeof(*mei_hdr) + mei_hdr->length; ++ + if (rets == 0) { + cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); +- rets = len; ++ rets = buf_len; + goto out; + } + + if (!mei_hbuf_acquire(dev)) { + cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n"); +- rets = len; ++ rets = buf_len; + goto out; + } + +@@ -1876,29 +1917,30 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) + dr_slots = mei_dma_ring_empty_slots(dev); + dr_len = mei_slots2data(dr_slots); + +- if (len + hdr_len <= hbuf_len) { +- mei_hdr->length = len; ++ if (hdr_len + buf_len <= hbuf_len) { ++ data_len = buf_len; + mei_hdr->msg_complete = 1; + } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) { + mei_hdr->dma_ring = 1; +- if (len > dr_len) +- len = dr_len; ++ if (buf_len > dr_len) ++ buf_len = dr_len; + else + mei_hdr->msg_complete = 1; + +- mei_hdr->length = sizeof(dma_len); +- dma_len = len; ++ data_len = sizeof(dma_len); ++ dma_len = buf_len; + data = &dma_len; + } else { +- len = hbuf_len - hdr_len; +- mei_hdr->length = len; ++ buf_len = hbuf_len - hdr_len; ++ data_len = buf_len; + } + ++ mei_hdr->length += data_len; ++ + if (mei_hdr->dma_ring) +- mei_dma_ring_write(dev, buf->data, len); ++ mei_dma_ring_write(dev, buf->data, buf_len); ++ rets = mei_write_message(dev, mei_hdr, hdr_len, data, data_len); + +- rets = mei_write_message(dev, mei_hdr, hdr_len, +- data, mei_hdr->length); + if (rets) + goto err; + +@@ -1907,9 +1949,9 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) + goto err; + + cl->writing_state = MEI_WRITING; +- cb->buf_idx = len; ++ cb->buf_idx = buf_len; + /* restore return value */ +- len = buf->size; ++ buf_len = buf->size; + + out: + if (mei_hdr->msg_complete) +@@ -1937,7 +1979,7 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) + } + } + +- rets = len; ++ rets = buf_len; + err: + cl_dbg(dev, cl, "rpm: autosuspend\n"); + pm_runtime_mark_last_busy(dev->dev); +@@ -1945,6 +1987,8 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) + free: + mei_io_cb_free(cb); + ++ kfree(mei_hdr); ++ + return rets; + } + +diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h +index efb9f9e019b0..6d07732aae69 100644 +--- a/drivers/misc/mei/hw.h ++++ b/drivers/misc/mei/hw.h +@@ -1,6 +1,6 @@ + /* SPDX-License-Identifier: GPL-2.0 */ + /* +- * Copyright (c) 2003-2018, Intel Corporation. All rights reserved ++ * Copyright (c) 2003-2019, Intel Corporation. All rights reserved + * Intel Management Engine Interface (Intel MEI) Linux driver + */ + +@@ -197,19 +197,79 @@ enum mei_cl_connect_status { + /* + * Client Disconnect Status + */ +-enum mei_cl_disconnect_status { ++enum mei_cl_disconnect_status { + MEI_CL_DISCONN_SUCCESS = MEI_HBMS_SUCCESS + }; + ++enum mei_ext_hdr_type { ++ MEI_EXT_HDR_NONE = 0, ++ MEI_EXT_HDR_VTAG = 1, ++ MEI_EXT_HDR_GSC = 2, ++}; ++ + /** +- * struct mei_msg_extd_hdr - mei extended header +- * +- * @vtag: virtual tag. +- * @reserved: reserved. ++ * struct mei_ext_hdr - extend header descriptor (TLV) ++ * @type: enum mei_ext_hdr_type ++ * @length: length exluding descriptor ++ * @ext_payload: payload of the specific extended header ++ * @hdr: place holder for actuall header ++ */ ++struct mei_ext_hdr { ++ u8 type; ++ u8 length; ++ u8 ext_payload[2]; ++ u8 hdr[0]; ++}; ++ ++/** ++ * struct mei_ext_meta_hdr - extend header meta data ++ * @count: number of headers ++ * @size: total size of the extended header list excluding meta header ++ * @reserved: reserved + */ +-struct mei_msg_extd_hdr { +- u8 vtag; +- u8 reserved[3]; ++struct mei_ext_meta_hdr { ++ u8 count; ++ u8 size; ++ u8 reserved[2]; ++ struct mei_ext_hdr hdrs[0]; ++}; ++ ++static inline struct mei_ext_hdr *mei_ext_begin(struct mei_ext_meta_hdr *meta) ++{ ++ return meta->hdrs; ++} ++ ++static inline struct mei_ext_hdr *mei_ext_next(struct mei_ext_hdr *ext) ++{ ++ return (struct mei_ext_hdr *)(ext->hdr + (ext->length * 4)); ++} ++ ++static inline bool mei_ext_last(struct mei_ext_meta_hdr *meta, ++ struct mei_ext_hdr *ext) ++{ ++ return (u8 *)ext >= (u8 *)meta + sizeof(*meta) + (meta->size * 4); ++} ++ ++struct mei_gcs_sgl { ++ u32 low; ++ u32 high; ++ u32 length; ++} __packed; ++ ++struct mei_ext_hdr_gcs_h2f { ++ u32 fence_id; ++ u32 addr_type; ++ u32 input_address_count; ++ u32 output_address_count; ++ struct mei_gcs_sgl input_buffer[0]; ++ struct mei_gcs_sgl output_buffer[0]; ++} __packed; ++ ++struct mei_ext_hdr_gcs_f2h { ++ u8 client_id; ++ u8 reserved[3]; ++ u32 fence_id; ++ u32 total_bytes_written; + } __packed; + + /** +@@ -237,8 +297,6 @@ struct mei_msg_hdr { + u32 extension[0]; + } __packed; + +-#define MEI_MSG_HDR_MAX 3 +- + struct mei_bus_message { + u8 hbm_cmd; + u8 data[0]; +@@ -451,19 +509,17 @@ struct hbm_notification_request { + + /** + * struct hbm_notification_response - start/stop notification response +- * + * @hbm_cmd: bus message command header + * @me_addr: address of the client in ME +- * @host_addr: - address of the client in the driver ++ * @host_addr: address of the client in the driver + * @status: (mei_hbm_status) response status for the request +- * - MEI_HBMS_SUCCESS: successful stop/start +- * - MEI_HBMS_CLIENT_NOT_FOUND: if the connection could not be found. +- * - MEI_HBMS_ALREADY_STARTED: for start requests for a previously +- * started notification. +- * - MEI_HBMS_NOT_STARTED: for stop request for a connected client for whom ++ * * MEI_HBMS_SUCCESS: successful stop/start ++ * * MEI_HBMS_CLIENT_NOT_FOUND: if the connection could not be found. ++ * * MEI_HBMS_ALREADY_STARTED: for start requests for a previously ++ * started notification. ++ * * MEI_HBMS_NOT_STARTED: for stop request for a connected client for whom + * asynchronous notifications are currently disabled. +- * +- * @start: start = 1 or stop = 0 asynchronous notifications ++ * @start: start = 1 or stop = 0 asynchronous notifications + * @reserved: reserved + */ + struct hbm_notification_response { +diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c +index 32757cd6529b..cd3ab9276cd8 100644 +--- a/drivers/misc/mei/interrupt.c ++++ b/drivers/misc/mei/interrupt.c +@@ -61,16 +61,21 @@ static inline int mei_cl_hbm_equal(struct mei_cl *cl, + * + * @dev: mei device + * @hdr: message header ++ * @discard_len: the length of the message to discard (excluding header) + */ +-static void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr) ++static void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr, ++ size_t discard_len) + { +- if (hdr->dma_ring) +- mei_dma_ring_read(dev, NULL, hdr->extension[0]); ++ if (hdr->dma_ring) { ++ mei_dma_ring_read(dev, NULL, ++ hdr->extension[dev->rd_msg_hdr_count - 2]); ++ discard_len = 0; ++ } + /* + * no need to check for size as it is guarantied + * that length fits into rd_msg_buf + */ +- mei_read_slots(dev, dev->rd_msg_buf, hdr->length); ++ mei_read_slots(dev, dev->rd_msg_buf, discard_len); + dev_dbg(dev->dev, "discarding message " MEI_HDR_FMT "\n", + MEI_HDR_PRM(hdr)); + } +@@ -80,19 +85,29 @@ static void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr) + * + * @cl: reading client + * @mei_hdr: header of mei client message ++ * @meta: extend meta header + * @cmpl_list: completion list + * + * Return: always 0 + */ + static int mei_cl_irq_read_msg(struct mei_cl *cl, + struct mei_msg_hdr *mei_hdr, ++ struct mei_ext_meta_hdr *meta, + struct list_head *cmpl_list) + { + struct mei_device *dev = cl->dev; + struct mei_cl_cb *cb; +- struct mei_msg_extd_hdr *ext_hdr = (void *)mei_hdr->extension; ++ + size_t buf_sz; + u32 length; ++ int ext_len; ++ ++ length = mei_hdr->length; ++ ext_len = 0; ++ if (mei_hdr->extended) { ++ ext_len = sizeof(*meta) + mei_slots2data(meta->size); ++ length -= ext_len; ++ } + + cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list); + if (!cb) { +@@ -107,14 +122,40 @@ static int mei_cl_irq_read_msg(struct mei_cl *cl, + } + + if (mei_hdr->extended) { +- cl_dbg(dev, cl, "vtag: %d\n", ext_hdr->vtag); +- if (cb->vtag && cb->vtag != ext_hdr->vtag) { ++ struct mei_ext_hdr *ext; ++ struct mei_ext_hdr *vtag = NULL; ++ ++ ext = mei_ext_begin(meta); ++ do { ++ switch (ext->type) { ++ case MEI_EXT_HDR_VTAG: ++ vtag = ext; ++ break; ++ case MEI_EXT_HDR_GSC: ++ case MEI_EXT_HDR_NONE: ++ default: ++ cb->status = -EPROTO; ++ break; ++ } ++ ++ ext = mei_ext_next(ext); ++ } while (!mei_ext_last(meta, ext)); ++ ++ if (!vtag) { ++ cl_dbg(dev, cl, "vtag not found in extended header.\n"); ++ cb->status = -EPROTO; ++ goto discard; ++ } ++ ++ cl_dbg(dev, cl, "vtag: %d\n", vtag->ext_payload[0]); ++ if (cb->vtag && cb->vtag != vtag->ext_payload[0]) { + cl_err(dev, cl, "mismatched tag: %d != %d\n", +- cb->vtag, ext_hdr->vtag); ++ cb->vtag, vtag->ext_payload[0]); + cb->status = -EPROTO; + goto discard; + } +- cb->vtag = ext_hdr->vtag; ++ cb->vtag = vtag->ext_payload[0]; ++ + } + + if (!mei_cl_is_connected(cl)) { +@@ -123,7 +164,8 @@ static int mei_cl_irq_read_msg(struct mei_cl *cl, + goto discard; + } + +- length = mei_hdr->dma_ring ? mei_hdr->extension[1] : mei_hdr->length; ++ if (mei_hdr->dma_ring) ++ length = mei_hdr->extension[mei_data2slots(ext_len)]; + + buf_sz = length + cb->buf_idx; + /* catch for integer overflow */ +@@ -141,11 +183,13 @@ static int mei_cl_irq_read_msg(struct mei_cl *cl, + goto discard; + } + +- if (mei_hdr->dma_ring) ++ if (mei_hdr->dma_ring) { + mei_dma_ring_read(dev, cb->buf.data + cb->buf_idx, length); +- +- /* for DMA read 0 length to generate an interrupt to the device */ +- mei_read_slots(dev, cb->buf.data + cb->buf_idx, mei_hdr->length); ++ /* for DMA read 0 length to generate interrupt to the device */ ++ mei_read_slots(dev, cb->buf.data + cb->buf_idx, 0); ++ } else { ++ mei_read_slots(dev, cb->buf.data + cb->buf_idx, length); ++ } + + cb->buf_idx += length; + +@@ -162,7 +206,7 @@ static int mei_cl_irq_read_msg(struct mei_cl *cl, + discard: + if (cb) + list_move_tail(&cb->list, cmpl_list); +- mei_irq_discard_msg(dev, mei_hdr); ++ mei_irq_discard_msg(dev, mei_hdr, length); + return 0; + } + +@@ -277,11 +321,16 @@ int mei_irq_read_handler(struct mei_device *dev, + struct list_head *cmpl_list, s32 *slots) + { + struct mei_msg_hdr *mei_hdr; ++ struct mei_ext_meta_hdr *meta_hdr = NULL; + struct mei_cl *cl; + int ret; ++ u32 ext_meta_hdr_u32; ++ int i; ++ int ext_hdr_end; + + if (!dev->rd_msg_hdr[0]) { + dev->rd_msg_hdr[0] = mei_read_hdr(dev); ++ dev->rd_msg_hdr_count = 1; + (*slots)--; + dev_dbg(dev->dev, "slots =%08x.\n", *slots); + +@@ -304,14 +353,34 @@ int mei_irq_read_handler(struct mei_device *dev, + goto end; + } + ++ ext_hdr_end = 1; ++ + if (mei_hdr->extended) { +- dev->rd_msg_hdr[1] = mei_read_hdr(dev); +- (*slots)--; ++ if (!dev->rd_msg_hdr[1]) { ++ ext_meta_hdr_u32 = mei_read_hdr(dev); ++ dev->rd_msg_hdr[1] = ext_meta_hdr_u32; ++ dev->rd_msg_hdr_count++; ++ (*slots)--; ++ dev_dbg(dev->dev, "extended header is %08x\n", ++ ext_meta_hdr_u32); ++ } ++ meta_hdr = ((struct mei_ext_meta_hdr *) ++ dev->rd_msg_hdr + 1); ++ ext_hdr_end = meta_hdr->size + 2; ++ for (i = dev->rd_msg_hdr_count; i < ext_hdr_end; i++) { ++ dev->rd_msg_hdr[i] = mei_read_hdr(dev); ++ dev_dbg(dev->dev, "extended header %d is %08x\n", i, ++ dev->rd_msg_hdr[i]); ++ dev->rd_msg_hdr_count++; ++ (*slots)--; ++ } + } ++ + if (mei_hdr->dma_ring) { +- dev->rd_msg_hdr[2] = mei_read_hdr(dev); ++ dev->rd_msg_hdr[ext_hdr_end] = mei_read_hdr(dev); ++ dev->rd_msg_hdr_count++; + (*slots)--; +- mei_hdr->length = 0; ++ mei_hdr->length -= sizeof(dev->rd_msg_hdr[ext_hdr_end]); + } + + /* HBM message */ +@@ -342,7 +411,7 @@ int mei_irq_read_handler(struct mei_device *dev, + */ + if (hdr_is_fixed(mei_hdr) || + dev->dev_state == MEI_DEV_POWER_DOWN) { +- mei_irq_discard_msg(dev, mei_hdr); ++ mei_irq_discard_msg(dev, mei_hdr, mei_hdr->length); + ret = 0; + goto reset_slots; + } +@@ -352,12 +421,13 @@ int mei_irq_read_handler(struct mei_device *dev, + goto end; + } + +- ret = mei_cl_irq_read_msg(cl, mei_hdr, cmpl_list); ++ ret = mei_cl_irq_read_msg(cl, mei_hdr, meta_hdr, cmpl_list); + + + reset_slots: + /* reset the number of slots and header */ + memset(dev->rd_msg_hdr, 0, sizeof(dev->rd_msg_hdr)); ++ dev->rd_msg_hdr_count = 0; + *slots = mei_count_full_read_slots(dev); + if (*slots == -EOVERFLOW) { + /* overflow - reset */ +diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h +index edab5f98665a..6b234349cd44 100644 +--- a/drivers/misc/mei/mei_dev.h ++++ b/drivers/misc/mei/mei_dev.h +@@ -426,6 +426,7 @@ struct mei_fw_version { + * + * @rd_msg_buf : control messages buffer + * @rd_msg_hdr : read message header storage ++ * @rd_msg_hdr_count : how many dwords were already read from header + * + * @hbuf_is_ready : query if the host host/write buffer is ready + * @dr_dscr: DMA ring descriptors: TX, RX, and CTRL +@@ -507,7 +508,8 @@ struct mei_device { + #endif /* CONFIG_PM */ + + unsigned char rd_msg_buf[MEI_RD_MSG_BUF_SIZE]; +- u32 rd_msg_hdr[MEI_MSG_HDR_MAX]; ++ u32 rd_msg_hdr[MEI_RD_MSG_BUF_SIZE]; ++ int rd_msg_hdr_count; + + /* write buffer */ + bool hbuf_is_ready; +-- +2.17.1 + diff --git a/patches/0052-net-stmmac-add-FPE-HW-tunables-setting.connectivity b/patches/0052-net-stmmac-add-FPE-HW-tunables-setting.connectivity new file mode 100644 index 0000000000..163363a554 --- /dev/null +++ b/patches/0052-net-stmmac-add-FPE-HW-tunables-setting.connectivity @@ -0,0 +1,342 @@ +From fabc876646be9f21d3c3d2dd81e3325f9c4e11b6 Mon Sep 17 00:00:00 2001 +From: Ong Boon Leong +Date: Sat, 10 Aug 2019 07:40:18 +0800 +Subject: [PATCH 052/108] net: stmmac: add FPE HW tunables setting + +AFSZ is meant to change the non-final fragment size of preempted +frame. A smaller value here means express frame will be transmitted by +express MAC as soon as preempted MAC (pMAC) has collected the configured +fragment size, default to 64-byte fragment. + +HADV is meant to specify the time (in nano-seconds) for pMAC to +switch from release to hold state in advance. + +RADV is meant to specify the time (in nano-seconds) for pMAC to +switch from hold to release state in advance. + +Signed-off-by: Ong Boon Leong +Signed-off-by: Voon Weifeng +--- + drivers/net/ethernet/stmicro/stmmac/dwmac5.h | 10 +++ + .../net/ethernet/stmicro/stmmac/dwmac5_tsn.c | 41 +++++++++++- + drivers/net/ethernet/stmicro/stmmac/hwif.h | 12 +++- + .../net/ethernet/stmicro/stmmac/stmmac_main.c | 20 ++++++ + .../net/ethernet/stmicro/stmmac/stmmac_pci.c | 3 + + .../net/ethernet/stmicro/stmmac/stmmac_tsn.c | 66 ++++++++++++++++++- + .../net/ethernet/stmicro/stmmac/stmmac_tsn.h | 6 ++ + include/linux/stmmac.h | 3 + + 8 files changed, 157 insertions(+), 4 deletions(-) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h +index f3dbb98027c9..013ebd6af865 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h +@@ -180,9 +180,19 @@ + #define MTL_FPE_CTRL_STS_HRS_SHIFT 28 + #define MTL_FPE_CTRL_STS_PEC GENMASK(15, 8) /* FPE Classification */ + #define MTL_FPE_CTRL_STS_PEC_SHIFT 8 ++#define MTL_FPE_CTRL_STS_AFSZ GENMASK(1, 0) /* Extra Frag Size */ ++ ++/* MTL FPE Advance */ ++#define MTL_FPE_ADVANCE 0x00000c94 ++#define MTL_FPE_ADVANCE_RADV GENMASK(31, 16) /* Release Advance */ ++#define MTL_FPE_ADVANCE_RADV_SHIFT 16 ++#define MTL_FPE_ADVANCE_HADV GENMASK(15, 0) /* Hold Advance */ + + /* FPE Global defines */ + #define FPE_PMAC_BIT BIT(0) /* TxQ0 is always preemptible */ ++#define FPE_AFSZ_MAX 0x3 /* Max AFSZ */ ++#define FPE_RADV_MAX 0xFFFF /* Max Release advance */ ++#define FPE_HADV_MAX 0xFFFF /* Max Hold advance */ + + /* DMA Tx Channel X Control register TBS bits defines */ + #define DMA_CONTROL_EDSE BIT(28) +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c +index 826e623350bf..a8156138a0a1 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c +@@ -419,9 +419,13 @@ int dwmac5_est_irq_status(void __iomem *ioaddr, struct net_device *dev, + return status; + } + +-static void dwmac5_fpe_get_info(u32 *pmac_bit) ++static void dwmac5_fpe_get_info(u32 *pmac_bit, u32 *afsz_max, ++ u32 *hadv_max, u32 *radv_max) + { + *pmac_bit = FPE_PMAC_BIT; ++ *afsz_max = FPE_AFSZ_MAX; ++ *hadv_max = FPE_HADV_MAX; ++ *radv_max = FPE_RADV_MAX; + } + + static void dwmac5_fpe_set_txqpec(void *ioaddr, u32 txqpec, u32 txqmask) +@@ -468,6 +472,38 @@ void dwmac5_fpe_get_pmac_sts(void *ioaddr, u32 *hrs) + *hrs = (value & MTL_FPE_CTRL_STS_HRS) >> MTL_FPE_CTRL_STS_HRS_SHIFT; + } + ++static void dwmac5_fpe_set_afsz(void *ioaddr, const u32 afsz) ++{ ++ u32 value; ++ ++ value = readl(ioaddr + MTL_FPE_CTRL_STS); ++ value &= ~MTL_FPE_CTRL_STS_AFSZ; ++ value |= afsz; ++ writel(value, ioaddr + MTL_FPE_CTRL_STS); ++} ++ ++static void dwmac5_fpe_set_hadv(void *ioaddr, const u32 hadv) ++{ ++ u32 value; ++ ++ value = readl(ioaddr + MTL_FPE_ADVANCE); ++ value &= ~MTL_FPE_ADVANCE_HADV; ++ value |= hadv; ++ writel(value, ioaddr + MTL_FPE_ADVANCE); ++} ++ ++static void dwmac5_fpe_set_radv(void *ioaddr, const u32 radv) ++{ ++ u32 value; ++ ++ value = readl(ioaddr + MTL_FPE_ADVANCE); ++ value &= ~MTL_FPE_ADVANCE_RADV; ++ value |= MTL_FPE_ADVANCE_RADV & ++ (radv << MTL_FPE_ADVANCE_RADV_SHIFT); ++ value |= radv; ++ writel(value, ioaddr + MTL_FPE_ADVANCE); ++} ++ + static void dwmac5_tbs_get_max(u32 *leos_max, + u32 *legos_max, + u32 *ftos_max, +@@ -604,6 +640,9 @@ const struct tsnif_ops dwmac510_tsnif_ops = { + .fpe_set_enable = dwmac5_fpe_set_enable, + .fpe_get_config = dwmac5_fpe_get_config, + .fpe_get_pmac_sts = dwmac5_fpe_get_pmac_sts, ++ .fpe_set_afsz = dwmac5_fpe_set_afsz, ++ .fpe_set_hadv = dwmac5_fpe_set_hadv, ++ .fpe_set_radv = dwmac5_fpe_set_radv, + .tbs_get_max = dwmac5_tbs_get_max, + .tbs_set_estm = dwmac5_tbs_set_estm, + .tbs_set_leos = dwmac5_tbs_set_leos, +diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h +index 2097aaca3fbd..9d878b18dd72 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h ++++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h +@@ -789,11 +789,15 @@ struct tsnif_ops { + struct tsn_mmc_stat *mmc_stat, + unsigned int txqcnt); + /* Frame Preemption (FPE) */ +- void (*fpe_get_info)(u32 *pmac_bit); ++ void (*fpe_get_info)(u32 *pmac_bit, u32 *afsz_max, ++ u32 *hadv_max, u32 *radv_max); + void (*fpe_set_txqpec)(void *ioaddr, u32 txqpec, u32 txqmask); + void (*fpe_set_enable)(void *ioaddr, bool enable); + void (*fpe_get_config)(void *ioaddr, u32 *txqpec, bool *enable); + void (*fpe_get_pmac_sts)(void *ioaddr, u32 *hrs); ++ void (*fpe_set_afsz)(void *ioaddr, const u32 afsz); ++ void (*fpe_set_hadv)(void *ioaddr, const u32 hadv); ++ void (*fpe_set_radv)(void *ioaddr, const u32 radv); + /* Time-Based Scheduling (TBS) */ + void (*tbs_get_max)(u32 *leos_max, u32 *legos_max, + u32 *ftos_max, u32 *fgos_max); +@@ -856,6 +860,12 @@ struct tsnif_ops { + tsnif_do_void_callback(__hw, fpe_get_config, __args) + #define tsnif_fpe_get_pmac_sts(__hw, __args...) \ + tsnif_do_void_callback(__hw, fpe_get_pmac_sts, __args) ++#define tsnif_fpe_set_afsz(__hw, __args...) \ ++ tsnif_do_void_callback(__hw, fpe_set_afsz, __args) ++#define tsnif_fpe_set_hadv(__hw, __args...) \ ++ tsnif_do_void_callback(__hw, fpe_set_hadv, __args) ++#define tsnif_fpe_set_radv(__hw, __args...) \ ++ tsnif_do_void_callback(__hw, fpe_set_radv, __args) + #define tsnif_tbs_get_max(__hw, __args...) \ + tsnif_do_void_callback(__hw, tbs_get_max, __args) + #define tsnif_tbs_set_estm(__hw, __args...) \ +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index 0dcf970ffd82..c17e25758284 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -2766,6 +2766,26 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) + TSN_HWTUNA_TX_TBS_FGOS, + priv->plat->fgos); + ++ if (priv->plat->afsz) ++ stmmac_set_tsn_hwtunable(priv, priv->hw, priv->dev, ++ TSN_HWTUNA_TX_FPE_AFSZ, ++ priv->plat->afsz); ++ ++ if (priv->plat->hadv) ++ stmmac_set_tsn_hwtunable(priv, priv->hw, priv->dev, ++ TSN_HWTUNA_TX_FPE_HADV, ++ priv->plat->hadv); ++ ++ if (priv->plat->radv) ++ stmmac_set_tsn_hwtunable(priv, priv->hw, priv->dev, ++ TSN_HWTUNA_TX_FPE_RADV, ++ priv->plat->radv); ++ ++ if (stmmac_has_tsn_feat(priv, priv->hw, dev, TSN_FEAT_ID_FPE)) { ++ if (priv->hw->cached_fpe_en) ++ stmmac_fpe_set_enable(priv, priv->hw, dev, true); ++ } ++ + return 0; + } + +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +index eadf3359505d..fca6c3392eb2 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +@@ -166,6 +166,9 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, + plat->tsn_tbs_en = 1; + /* FPE HW Tunable */ + plat->fprq = 1; ++ plat->afsz = 0; /* Adjustable Fragment Size */ ++ plat->hadv = 0; /* Hold Advance */ ++ plat->radv = 0; /* Release Advance*/ + /* TBS HW Tunable */ + plat->estm = 0; /* Absolute Mode */ + plat->leos = 0; /* Launch Expiry Offset */ +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c +index 799face82466..6a82ac5f00b8 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c +@@ -152,11 +152,15 @@ int tsn_init(struct mac_device_info *hw, struct net_device *dev) + goto check_tbs; + } + +- tsnif_fpe_get_info(hw, &cap->pmac_bit); ++ tsnif_fpe_get_info(hw, &cap->pmac_bit, &cap->afsz_max, ++ &cap->hadv_max, &cap->radv_max); + cap->rxqcnt = tsnif_est_get_rxqcnt(hw, ioaddr); + cap->fpe_support = 1; + +- dev_info(pdev, "FPE: pMAC Bit=0x%x\n", cap->pmac_bit); ++ dev_info(pdev, "FPE: pMAC Bit=0x%x\n afsz_max=%d", cap->pmac_bit, ++ cap->afsz_max); ++ dev_info(pdev, "FPE: hadv_max=%d radv_max=%d", cap->hadv_max, ++ cap->radv_max); + + check_tbs: + if (!tsnif_has_tsn_cap(hw, ioaddr, TSN_FEAT_ID_TBS)) { +@@ -254,6 +258,14 @@ int tsn_hwtunable_set(struct mac_device_info *hw, struct net_device *dev, + return -ENOTSUPP; + } + break; ++ case TSN_HWTUNA_TX_FPE_AFSZ: ++ case TSN_HWTUNA_TX_FPE_HADV: ++ case TSN_HWTUNA_TX_FPE_RADV: ++ if (!tsn_has_feat(hw, dev, TSN_FEAT_ID_FPE)) { ++ netdev_info(dev, "FPE: feature unsupported\n"); ++ return -ENOTSUPP; ++ } ++ break; + case TSN_HWTUNA_TX_TBS_ESTM: + case TSN_HWTUNA_TX_TBS_LEOS: + case TSN_HWTUNA_TX_TBS_LEGOS: +@@ -311,6 +323,48 @@ int tsn_hwtunable_set(struct mac_device_info *hw, struct net_device *dev, + netdev_info(dev, "EST: Set CTOV = %u\n", data); + } + break; ++ case TSN_HWTUNA_TX_FPE_AFSZ: ++ if (data > cap->afsz_max) { ++ netdev_warn(dev, ++ "EST: invalid AFSZ(%u), max=%u\n", ++ data, cap->afsz_max); ++ ++ return -EINVAL; ++ } ++ if (data != info->hwtunable[TSN_HWTUNA_TX_FPE_AFSZ]) { ++ tsnif_fpe_set_afsz(hw, ioaddr, data); ++ info->hwtunable[TSN_HWTUNA_TX_FPE_AFSZ] = data; ++ netdev_info(dev, "FPE: Set AFSZ = %u\n", data); ++ } ++ break; ++ case TSN_HWTUNA_TX_FPE_HADV: ++ if (data > cap->hadv_max) { ++ netdev_warn(dev, ++ "EST: invalid HADV(%u), max=%u\n", ++ data, cap->hadv_max); ++ ++ return -EINVAL; ++ } ++ if (data != info->hwtunable[TSN_HWTUNA_TX_FPE_HADV]) { ++ tsnif_fpe_set_hadv(hw, ioaddr, data); ++ info->hwtunable[TSN_HWTUNA_TX_FPE_HADV] = data; ++ netdev_info(dev, "FPE: Set HADV = %u\n", data); ++ } ++ break; ++ case TSN_HWTUNA_TX_FPE_RADV: ++ if (data > cap->radv_max) { ++ netdev_warn(dev, ++ "EST: invalid RADV(%u), max=%u\n", ++ data, cap->radv_max); ++ ++ return -EINVAL; ++ } ++ if (data != info->hwtunable[TSN_HWTUNA_TX_FPE_RADV]) { ++ tsnif_fpe_set_radv(hw, ioaddr, data); ++ info->hwtunable[TSN_HWTUNA_TX_FPE_RADV] = data; ++ netdev_info(dev, "FPE: Set RADV = %u\n", data); ++ } ++ break; + case TSN_HWTUNA_TX_TBS_ESTM: + if (!data && data != 1) { + netdev_warn(dev, +@@ -467,6 +521,14 @@ int tsn_hwtunable_get(struct mac_device_info *hw, struct net_device *dev, + return -ENOTSUPP; + } + break; ++ case TSN_HWTUNA_TX_FPE_AFSZ: ++ case TSN_HWTUNA_TX_FPE_HADV: ++ case TSN_HWTUNA_TX_FPE_RADV: ++ if (!tsn_has_feat(hw, dev, TSN_FEAT_ID_FPE)) { ++ netdev_info(dev, "FPE: feature unsupported\n"); ++ return -ENOTSUPP; ++ } ++ break; + case TSN_HWTUNA_TX_TBS_ESTM: + case TSN_HWTUNA_TX_TBS_LEOS: + case TSN_HWTUNA_TX_TBS_LEGOS: +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h +index 5651633e20ed..ac4cfe6c7569 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h +@@ -16,6 +16,9 @@ enum tsn_hwtunable_id { + TSN_HWTUNA_TX_EST_TILS = 0, + TSN_HWTUNA_TX_EST_PTOV, + TSN_HWTUNA_TX_EST_CTOV, ++ TSN_HWTUNA_TX_FPE_AFSZ, ++ TSN_HWTUNA_TX_FPE_HADV, ++ TSN_HWTUNA_TX_FPE_RADV, + TSN_HWTUNA_TX_TBS_ESTM, /* TBS Absolute or EST mode */ + TSN_HWTUNA_TX_TBS_LEOS, + TSN_HWTUNA_TX_TBS_LEGOS, +@@ -64,6 +67,9 @@ struct tsn_hw_cap { + u32 idleslope_max; /* Max idle slope */ + /* FPE */ + u32 pmac_bit; /* Preemptible MAC bit */ ++ u32 afsz_max; /* Adj Frag Size */ ++ u32 hadv_max; /* Max Hold Advance */ ++ u32 radv_max; /* Max Release Advance */ + u32 leos_max; /* Launch Expiry Offset */ + u32 legos_max; /* Launch Expiry GSN Offset */ + u32 ftos_max; /* Max Fetch Time Offset */ +diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h +index 01d23591bec7..525ea9686efa 100644 +--- a/include/linux/stmmac.h ++++ b/include/linux/stmmac.h +@@ -209,6 +209,9 @@ struct plat_stmmacenet_data { + u32 tils; + /*FPE */ + u32 fprq; ++ u32 afsz; ++ u32 hadv; ++ u32 radv; + /* TBS */ + u32 estm; + u32 leos; +-- +2.17.1 + diff --git a/patches/0052-sos_kernel-export-reset-vm-function-to-DM.acrn b/patches/0052-sos_kernel-export-reset-vm-function-to-DM.acrn new file mode 100644 index 0000000000..534cfb124c --- /dev/null +++ b/patches/0052-sos_kernel-export-reset-vm-function-to-DM.acrn @@ -0,0 +1,94 @@ +From d967e6be147a22dea1a5d414af1e45ea75adb76a Mon Sep 17 00:00:00 2001 +From: Yin Fengwei +Date: Fri, 31 Aug 2018 10:59:00 +0800 +Subject: [PATCH 052/150] sos_kernel: export reset vm function to DM. + +Two major changes: + - Add ioctl interface to invoke vm reset between DM and vhm + - Add hypercall interface to invoke vm reset between vhm and hv + +Change-Id: If5d1555b2fe7b6e3ef9dad2c471b67ff1ac888c9 +Signed-off-by: Yin Fengwei +--- + drivers/char/vhm/vhm_dev.c | 9 +++++++++ + drivers/vhm/vhm_hypercall.c | 5 +++++ + include/linux/vhm/acrn_hv_defs.h | 1 + + include/linux/vhm/vhm_hypercall.h | 1 + + include/linux/vhm/vhm_ioctl_defs.h | 1 + + 5 files changed, 17 insertions(+) + +diff --git a/drivers/char/vhm/vhm_dev.c b/drivers/char/vhm/vhm_dev.c +index 4c9cdabc0028..11aea23d40ef 100644 +--- a/drivers/char/vhm/vhm_dev.c ++++ b/drivers/char/vhm/vhm_dev.c +@@ -223,6 +223,15 @@ static long vhm_dev_ioctl(struct file *filep, + break; + } + ++ case IC_RESET_VM: { ++ ret = hcall_reset_vm(vm->vmid); ++ if (ret < 0) { ++ pr_err("vhm: failed to restart VM %ld!\n", vm->vmid); ++ return -EFAULT; ++ } ++ break; ++ } ++ + case IC_DESTROY_VM: { + ret = hcall_destroy_vm(vm->vmid); + if (ret < 0) { +diff --git a/drivers/vhm/vhm_hypercall.c b/drivers/vhm/vhm_hypercall.c +index df87febaf60d..5940022403c0 100644 +--- a/drivers/vhm/vhm_hypercall.c ++++ b/drivers/vhm/vhm_hypercall.c +@@ -72,6 +72,11 @@ inline long hcall_pause_vm(unsigned long vmid) + return acrn_hypercall1(HC_PAUSE_VM, vmid); + } + ++inline long hcall_reset_vm(unsigned long vmid) ++{ ++ return acrn_hypercall1(HC_RESET_VM, vmid); ++} ++ + inline long hcall_destroy_vm(unsigned long vmid) + { + return acrn_hypercall1(HC_DESTROY_VM, vmid); +diff --git a/include/linux/vhm/acrn_hv_defs.h b/include/linux/vhm/acrn_hv_defs.h +index d2da1a760783..902312049970 100644 +--- a/include/linux/vhm/acrn_hv_defs.h ++++ b/include/linux/vhm/acrn_hv_defs.h +@@ -76,6 +76,7 @@ + #define HC_START_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x02) + #define HC_PAUSE_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x03) + #define HC_CREATE_VCPU _HC_ID(HC_ID, HC_ID_VM_BASE + 0x04) ++#define HC_RESET_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x05) + + /* IRQ and Interrupts */ + #define HC_ID_IRQ_BASE 0x20UL +diff --git a/include/linux/vhm/vhm_hypercall.h b/include/linux/vhm/vhm_hypercall.h +index 2372906946d6..1cc47ffab1a9 100644 +--- a/include/linux/vhm/vhm_hypercall.h ++++ b/include/linux/vhm/vhm_hypercall.h +@@ -142,6 +142,7 @@ inline long hcall_create_vm(unsigned long vminfo); + inline long hcall_start_vm(unsigned long vmid); + inline long hcall_pause_vm(unsigned long vmid); + inline long hcall_destroy_vm(unsigned long vmid); ++inline long hcall_reset_vm(unsigned long vmid); + inline long hcall_query_vm_state(unsigned long vmid); + inline long hcall_setup_sbuf(unsigned long sbuf_head); + inline long hcall_get_cpu_state(unsigned long cmd, unsigned long state_pa); +diff --git a/include/linux/vhm/vhm_ioctl_defs.h b/include/linux/vhm/vhm_ioctl_defs.h +index 3b05d8228e53..822fa4305f44 100644 +--- a/include/linux/vhm/vhm_ioctl_defs.h ++++ b/include/linux/vhm/vhm_ioctl_defs.h +@@ -72,6 +72,7 @@ + #define IC_START_VM _IC_ID(IC_ID, IC_ID_VM_BASE + 0x02) + #define IC_PAUSE_VM _IC_ID(IC_ID, IC_ID_VM_BASE + 0x03) + #define IC_CREATE_VCPU _IC_ID(IC_ID, IC_ID_VM_BASE + 0x04) ++#define IC_RESET_VM _IC_ID(IC_ID, IC_ID_VM_BASE + 0x05) + + /* IRQ and Interrupts */ + #define IC_ID_IRQ_BASE 0x20UL +-- +2.17.1 + diff --git a/patches/0052-trusty-add-RAX-into-clobber-list-of-inline-asm-for-.trusty b/patches/0052-trusty-add-RAX-into-clobber-list-of-inline-asm-for-.trusty new file mode 100644 index 0000000000..be13b228a4 --- /dev/null +++ b/patches/0052-trusty-add-RAX-into-clobber-list-of-inline-asm-for-.trusty @@ -0,0 +1,63 @@ +From 61fd9a89fee38982054df7e3197155bb348ed347 Mon Sep 17 00:00:00 2001 +From: "Qi, Yadong" +Date: Fri, 16 Mar 2018 15:42:07 +0800 +Subject: [PATCH 52/63] trusty: add RAX into clobber list of inline asm for + ACRN + +The RAX regiser will be modified when do "vmcall" for ACRN +hypervisor. So the RAX register should to be listed in asm +clobber list to inform compiler aware of such changes. + +Change-Id: I298c056c109e974d2a391ba7b3e8dfbb7f25ed4f +Signed-off-by: Qi, Yadong +--- + drivers/trusty/trusty.c | 24 +++++++++++++----------- + 1 file changed, 13 insertions(+), 11 deletions(-) + +diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c +index e253ee498ab5..4d33f269851d 100755 +--- a/drivers/trusty/trusty.c ++++ b/drivers/trusty/trusty.c +@@ -56,26 +56,28 @@ struct trusty_smc_interface { + + static ulong (*smc)(ulong, ulong, ulong, ulong); + +-#define asm_smc_vmcall(smc_id, rdi, rsi, rdx, rbx) \ +-do { \ +- __asm__ __volatile__( \ +- "vmcall; \n" \ +- : "=D"(rdi) \ +- : "r"(smc_id), "D"(rdi), "S"(rsi), "d"(rdx), "b"(rbx) \ +- ); \ +-} while (0) +- + static inline ulong smc_evmm(ulong r0, ulong r1, ulong r2, ulong r3) + { + register unsigned long smc_id asm("rax") = EVMM_SMC_HC_ID; +- asm_smc_vmcall(smc_id, r0, r1, r2, r3); ++ __asm__ __volatile__( ++ "vmcall; \n" ++ : "=D"(r0) ++ : "r"(smc_id), "D"(r0), "S"(r1), "d"(r2), "b"(r3) ++ ); ++ + return r0; + } + + static inline ulong smc_acrn(ulong r0, ulong r1, ulong r2, ulong r3) + { + register unsigned long smc_id asm("r8") = ACRN_SMC_HC_ID; +- asm_smc_vmcall(smc_id, r0, r1, r2, r3); ++ __asm__ __volatile__( ++ "vmcall; \n" ++ : "=D"(r0) ++ : "r"(smc_id), "D"(r0), "S"(r1), "d"(r2), "b"(r3) ++ : "rax" ++ ); ++ + return r0; + } + +-- +2.17.1 + diff --git a/patches/0053-ASoC-Intel-Skylake-Define-strsplit_u32-input-parsing.audio b/patches/0053-ASoC-Intel-Skylake-Define-strsplit_u32-input-parsing.audio new file mode 100644 index 0000000000..ec2234e765 --- /dev/null +++ b/patches/0053-ASoC-Intel-Skylake-Define-strsplit_u32-input-parsing.audio @@ -0,0 +1,86 @@ +From 347ffe9d15d114bb0b6b0f3e0ecf15fbcb888c84 Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Wed, 6 Mar 2019 21:26:46 +0100 +Subject: [PATCH 053/193] ASoC: Intel: Skylake: Define strsplit_u32 input + parsing helper + +SKL provides functionality for interacting with DSP over debugfs. To +improve and streamline input to payload parsing, strsplit_u32 helper +method has been defined. Said method tokenizes specified input given the +delimiters into sequence of DWORDs for further handling. + +Change-Id: I5abc1c2faaa56430c21998bb27a2cff05ffca2cd +Signed-off-by: Cezary Rojewski +--- + sound/soc/intel/skylake/skl-debug.c | 55 +++++++++++++++++++++++++++++ + 1 file changed, 55 insertions(+) + +diff --git a/sound/soc/intel/skylake/skl-debug.c b/sound/soc/intel/skylake/skl-debug.c +index c9c6d40f7d1d..001c498659f6 100644 +--- a/sound/soc/intel/skylake/skl-debug.c ++++ b/sound/soc/intel/skylake/skl-debug.c +@@ -28,6 +28,61 @@ struct skl_debug { + u8 fw_read_buff[FW_REG_BUF]; + }; + ++/** ++ * strsplit_u32 - Split string into sequence of u32 tokens ++ * @buf: String to split into tokens. ++ * @delim: String containing delimiter characters. ++ * @tkns: Returned u32 sequence pointer. ++ * @num_tkns: Returned number of tokens obtained. ++ */ ++static int ++strsplit_u32(char **buf, const char *delim, u32 **tkns, size_t *num_tkns) ++{ ++ char *s; ++ u32 *data, *tmp; ++ size_t count = 0; ++ size_t max_count = 32; ++ int ret = 0; ++ ++ *tkns = NULL; ++ *num_tkns = 0; ++ data = kcalloc(max_count, sizeof(*data), GFP_KERNEL); ++ if (!data) ++ return -ENOMEM; ++ ++ while ((s = strsep(buf, delim)) != NULL) { ++ ret = kstrtouint(s, 0, (data + count)); ++ if (ret) ++ goto exit; ++ if (++count >= max_count) { ++ max_count *= 2; ++ tmp = kcalloc(max_count, sizeof(*data), GFP_KERNEL); ++ if (!tmp) { ++ ret = -ENOMEM; ++ goto exit; ++ } ++ ++ memcpy(tmp, data, count * sizeof(*data)); ++ kfree(data); ++ data = tmp; ++ } ++ } ++ ++ if (!count) ++ goto exit; ++ *tkns = kcalloc(count, sizeof(*data), GFP_KERNEL); ++ if (*tkns == NULL) { ++ ret = -ENOMEM; ++ goto exit; ++ } ++ memcpy(*tkns, data, count * sizeof(*data)); ++ *num_tkns = count; ++ ++exit: ++ kfree(data); ++ return ret; ++} ++ + static ssize_t skl_print_pins(struct skl_module_pin *m_pin, char *buf, + int max_pin, ssize_t size, bool direction) + { +-- +2.17.1 + diff --git a/patches/0053-VHM-add-service-to-support-cx-data-transition.acrn b/patches/0053-VHM-add-service-to-support-cx-data-transition.acrn new file mode 100644 index 0000000000..6a39142208 --- /dev/null +++ b/patches/0053-VHM-add-service-to-support-cx-data-transition.acrn @@ -0,0 +1,122 @@ +From ae2c1bfa716b21f755940394b56a2e22182a141e Mon Sep 17 00:00:00 2001 +From: Victor Sun +Date: Thu, 19 Apr 2018 00:15:43 +0800 +Subject: [PATCH 053/150] VHM: add service to support cx data transition + +Like Acrn px enabling, the cx data is also hard coded within HV, DM will +get hard coded cx data to build DSDT for UOS. With this DSDT, UOS would +have capability on Cx control if acpi-idle driver is enabled in kernel. + +Refine the field_name name of cpu_cx_data to avoid the redefinition conflict. + +Change-Id: I34cf5d99a7458ced51a52789027b0451e40a20bb +Signed-off-by: Victor Sun +--- + drivers/char/vhm/vhm_dev.c | 21 +++++++++++++++++---- + include/linux/vhm/acrn_common.h | 32 ++++++++++++++++++++++++++++++-- + 2 files changed, 47 insertions(+), 6 deletions(-) + +diff --git a/drivers/char/vhm/vhm_dev.c b/drivers/char/vhm/vhm_dev.c +index 11aea23d40ef..4eb5a1636d7a 100644 +--- a/drivers/char/vhm/vhm_dev.c ++++ b/drivers/char/vhm/vhm_dev.c +@@ -454,15 +454,16 @@ static long vhm_dev_ioctl(struct file *filep, + return -EFAULT; + + switch (cmd & PMCMD_TYPE_MASK) { +- case PMCMD_GET_PX_CNT: { +- uint8_t px_cnt; ++ case PMCMD_GET_PX_CNT: ++ case PMCMD_GET_CX_CNT: { ++ uint64_t pm_info; + +- ret = hcall_get_cpu_state(cmd, virt_to_phys(&px_cnt)); ++ ret = hcall_get_cpu_state(cmd, virt_to_phys(&pm_info)); + if (ret < 0) + return -EFAULT; + + if (copy_to_user((void *)ioctl_param, +- &px_cnt, sizeof(px_cnt))) ++ &pm_info, sizeof(pm_info))) + ret = -EFAULT; + + break; +@@ -479,6 +480,18 @@ static long vhm_dev_ioctl(struct file *filep, + ret = -EFAULT; + break; + } ++ case PMCMD_GET_CX_DATA: { ++ struct cpu_cx_data cx_data; ++ ++ ret = hcall_get_cpu_state(cmd, virt_to_phys(&cx_data)); ++ if (ret < 0) ++ return -EFAULT; ++ ++ if (copy_to_user((void *)ioctl_param, ++ &cx_data, sizeof(cx_data))) ++ ret = -EFAULT; ++ break; ++ } + default: + ret = -EFAULT; + break; +diff --git a/include/linux/vhm/acrn_common.h b/include/linux/vhm/acrn_common.h +index 3a87636bc824..0fa524ef2af1 100644 +--- a/include/linux/vhm/acrn_common.h ++++ b/include/linux/vhm/acrn_common.h +@@ -309,6 +309,30 @@ struct acrn_vm_pci_msix_remap { + * @brief Info The power state data of a VCPU. + * + */ ++ ++#define SPACE_SYSTEM_MEMORY 0 ++#define SPACE_SYSTEM_IO 1 ++#define SPACE_PCI_CONFIG 2 ++#define SPACE_Embedded_Control 3 ++#define SPACE_SMBUS 4 ++#define SPACE_PLATFORM_COMM 10 ++#define SPACE_FFixedHW 0x7F ++ ++struct acrn_generic_address { ++ uint8_t space_id; ++ uint8_t bit_width; ++ uint8_t bit_offset; ++ uint8_t access_size; ++ uint64_t address; ++} __attribute__((aligned(8))); ++ ++struct cpu_cx_data { ++ struct acrn_generic_address cx_reg; ++ uint8_t type; ++ uint32_t latency; ++ uint64_t power; ++} __attribute__((aligned(8))); ++ + struct cpu_px_data { + uint64_t core_frequency; /* megahertz */ + uint64_t power; /* milliWatts */ +@@ -321,8 +345,10 @@ struct cpu_px_data { + /** + * @brief Info PM command from DM/VHM. + * +- * The command would specify request type(i.e. get px count or data) for +- * specific VM and specific VCPU with specific state number.like P(n). ++ * The command would specify request type(e.g. get px count or data) for ++ * specific VM and specific VCPU with specific state number. ++ * For Px, PMCMD_STATE_NUM means Px number from 0 to (MAX_PSTATE - 1), ++ * For Cx, PMCMD_STATE_NUM means Cx entry index from 1 to MAX_CX_ENTRY. + */ + #define PMCMD_VMID_MASK 0xff000000 + #define PMCMD_VCPUID_MASK 0x00ff0000 +@@ -336,6 +362,8 @@ struct cpu_px_data { + enum pm_cmd_type { + PMCMD_GET_PX_CNT, + PMCMD_GET_PX_DATA, ++ PMCMD_GET_CX_CNT, ++ PMCMD_GET_CX_DATA, + }; + + /** +-- +2.17.1 + diff --git a/patches/0053-drm-i915-use-a-separate-context-for-gpu-relocs.drm b/patches/0053-drm-i915-use-a-separate-context-for-gpu-relocs.drm new file mode 100644 index 0000000000..26fa3019bd --- /dev/null +++ b/patches/0053-drm-i915-use-a-separate-context-for-gpu-relocs.drm @@ -0,0 +1,134 @@ +From e079fbcb70e02995d5957655a30d1b6dd67eed8b Mon Sep 17 00:00:00 2001 +From: Daniele Ceraolo Spurio +Date: Tue, 27 Aug 2019 11:58:05 -0700 +Subject: [PATCH 053/690] drm/i915: use a separate context for gpu relocs + +The CS pre-parser can pre-fetch commands across memory sync points and +starting from gen12 it is able to pre-fetch across BB_START and BB_END +boundaries as well, so when we emit gpu relocs the pre-parser might +fetch the target location of the reloc before the memory write lands. + +The parser can't pre-fetch across the ctx switch, so we use a separate +context to guarantee that the memory is synchronized before the parser +can get to it. + +Note that there is no risk of the CS doing a lite restore from the reloc +context to the user context, even if the two have the same hw_id, +because since gen11 the CS also checks the LRCA when deciding if it can +lite-restore. + +v2: limit new context to gen12+, release in eb_destroy, add a comment + in emit_fini_breadcrumb (Chris). + +Suggested-by: Chris Wilson +Signed-off-by: Daniele Ceraolo Spurio +Cc: Chris Wilson +Reviewed-by: Chris Wilson +Signed-off-by: Chris Wilson +Link: https://patchwork.freedesktop.org/patch/msgid/20190827185805.21799-1-daniele.ceraolospurio@intel.com +--- + .../gpu/drm/i915/gem/i915_gem_execbuffer.c | 30 ++++++++++++++++++- + drivers/gpu/drm/i915/gt/intel_lrc.c | 18 +++++++++++ + 2 files changed, 47 insertions(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +index 7b1d8c4e5ef5..27dbcb508055 100644 +--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c ++++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +@@ -252,6 +252,7 @@ struct i915_execbuffer { + bool has_fence : 1; + bool needs_unfenced : 1; + ++ struct intel_context *ce; + struct i915_request *rq; + u32 *rq_cmd; + unsigned int rq_size; +@@ -880,6 +881,9 @@ static void eb_destroy(const struct i915_execbuffer *eb) + { + GEM_BUG_ON(eb->reloc_cache.rq); + ++ if (eb->reloc_cache.ce) ++ intel_context_put(eb->reloc_cache.ce); ++ + if (eb->lut_size > 0) + kfree(eb->buckets); + } +@@ -903,6 +907,7 @@ static void reloc_cache_init(struct reloc_cache *cache, + cache->has_fence = cache->gen < 4; + cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment; + cache->node.allocated = false; ++ cache->ce = NULL; + cache->rq = NULL; + cache->rq_size = 0; + } +@@ -1168,7 +1173,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, + if (err) + goto err_unmap; + +- rq = i915_request_create(eb->context); ++ rq = intel_context_create_request(cache->ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_unpin; +@@ -1239,6 +1244,29 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb, + if (!intel_engine_can_store_dword(eb->engine)) + return ERR_PTR(-ENODEV); + ++ if (!cache->ce) { ++ struct intel_context *ce; ++ ++ /* ++ * The CS pre-parser can pre-fetch commands across ++ * memory sync points and starting gen12 it is able to ++ * pre-fetch across BB_START and BB_END boundaries ++ * (within the same context). We therefore use a ++ * separate context gen12+ to guarantee that the reloc ++ * writes land before the parser gets to the target ++ * memory location. ++ */ ++ if (cache->gen >= 12) ++ ce = intel_context_create(eb->context->gem_context, ++ eb->engine); ++ else ++ ce = intel_context_get(eb->context); ++ if (IS_ERR(ce)) ++ return ERR_CAST(ce); ++ ++ cache->ce = ce; ++ } ++ + err = __reloc_gpu_alloc(eb, vma, len); + if (unlikely(err)) + return ERR_PTR(err); +diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c +index 08da6e8662bc..4ef8b5fd5d0c 100644 +--- a/drivers/gpu/drm/i915/gt/intel_lrc.c ++++ b/drivers/gpu/drm/i915/gt/intel_lrc.c +@@ -2957,6 +2957,24 @@ static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) + return gen8_emit_fini_breadcrumb_footer(request, cs); + } + ++/* ++ * Note that the CS instruction pre-parser will not stall on the breadcrumb ++ * flush and will continue pre-fetching the instructions after it before the ++ * memory sync is completed. On pre-gen12 HW, the pre-parser will stop at ++ * BB_START/END instructions, so, even though we might pre-fetch the pre-amble ++ * of the next request before the memory has been flushed, we're guaranteed that ++ * we won't access the batch itself too early. ++ * However, on gen12+ the parser can pre-fetch across the BB_START/END commands, ++ * so, if the current request is modifying an instruction in the next request on ++ * the same intel_context, we might pre-fetch and then execute the pre-update ++ * instruction. To avoid this, the users of self-modifying code should either ++ * disable the parser around the code emitting the memory writes, via a new flag ++ * added to MI_ARB_CHECK, or emit the writes from a different intel_context. For ++ * the in-kernel use-cases we've opted to use a separate context, see ++ * reloc_gpu() as an example. ++ * All the above applies only to the instructions themselves. Non-inline data ++ * used by the instructions is not pre-fetched. ++ */ + static u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *request, + u32 *cs) + { +-- +2.17.1 + diff --git a/patches/0053-mei-bus-fix-vtag-management.security b/patches/0053-mei-bus-fix-vtag-management.security new file mode 100644 index 0000000000..107730fbbd --- /dev/null +++ b/patches/0053-mei-bus-fix-vtag-management.security @@ -0,0 +1,120 @@ +From d153500cf9e8e07ffaf7a13797a9db86750a3669 Mon Sep 17 00:00:00 2001 +From: Alexander Usyskin +Date: Mon, 13 May 2019 17:32:53 +0300 +Subject: [PATCH 53/65] mei: bus: fix vtag management + +Add service functions for vtag alloc and free. +Free vtag on connect failure. + +Change-Id: Ia0aa157a3a7746ab7b96647ed489c88026f7acee +Signed-off-by: Alexander Usyskin +Signed-off-by: Tomas Winkler +--- + drivers/misc/mei/bus.c | 60 +++++++++++++++++++++++++++--------------- + 1 file changed, 39 insertions(+), 21 deletions(-) + +diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c +index be03b781625c..f5776135cf93 100644 +--- a/drivers/misc/mei/bus.c ++++ b/drivers/misc/mei/bus.c +@@ -505,6 +505,38 @@ static int mei_cldev_vt_support_check(struct mei_cl_device *cldev) + return cldev->me_cl->props.vt_supported ? 0 : -EOPNOTSUPP; + } + ++static inline int mei_cldev_vtag_alloc(struct mei_cl_device *cldev) ++{ ++ struct mei_cl *cl = cldev->cl; ++ struct mei_cl_vtag *cl_vtag; ++ ++ /* client supports virtualization and have not already allocated one */ ++ if (mei_cldev_vt_support_check(cldev) || ++ list_first_entry_or_null(&cl->vtag_map, struct mei_cl_vtag, list)) ++ return 0; ++ ++ cl_vtag = mei_cl_vtag_alloc(NULL, 0); ++ if (IS_ERR(cl_vtag)) ++ return -ENOMEM; ++ ++ list_add_tail(&cl_vtag->list, &cl->vtag_map); ++ return 0; ++} ++ ++static inline void mei_cldev_vtag_free(struct mei_cl_device *cldev) ++{ ++ struct mei_cl *cl = cldev->cl; ++ struct mei_cl_vtag *cl_vtag; ++ ++ cl_vtag = list_first_entry_or_null(&cl->vtag_map, ++ struct mei_cl_vtag, list); ++ if (!cl_vtag) ++ return; ++ ++ list_del(&cl_vtag->list); ++ kfree(cl_vtag); ++} ++ + /** + * mei_cldev_enable - enable me client device + * create connection with me client +@@ -517,7 +549,6 @@ int mei_cldev_enable(struct mei_cl_device *cldev) + { + struct mei_device *bus = cldev->bus; + struct mei_cl *cl; +- struct mei_cl_vtag *cl_vtag; + int ret; + + cl = cldev->cl; +@@ -542,22 +573,15 @@ int mei_cldev_enable(struct mei_cl_device *cldev) + goto out; + } + +- if (!mei_cldev_vt_support_check(cldev)) { +- if (!list_first_entry_or_null(&cl->vtag_map, +- struct mei_cl_vtag, list)) { +- cl_vtag = mei_cl_vtag_alloc(NULL, 0); +- if (IS_ERR(cl_vtag)) { +- ret = -ENOMEM; +- goto out; +- } +- +- list_add_tail(&cl_vtag->list, &cl->vtag_map); +- } +- } ++ ret = mei_cldev_vtag_alloc(cldev); ++ if (ret) ++ goto out; + + ret = mei_cl_connect(cl, cldev->me_cl, NULL); +- if (ret < 0) ++ if (ret < 0) { + dev_err(&cldev->dev, "cannot connect\n"); ++ mei_cldev_vtag_free(cldev); ++ } + + out: + mutex_unlock(&bus->device_lock); +@@ -597,7 +621,6 @@ int mei_cldev_disable(struct mei_cl_device *cldev) + { + struct mei_device *bus; + struct mei_cl *cl; +- struct mei_cl_vtag *cl_vtag; + int err; + + if (!cldev) +@@ -611,12 +634,7 @@ int mei_cldev_disable(struct mei_cl_device *cldev) + + mutex_lock(&bus->device_lock); + +- cl_vtag = list_first_entry_or_null(&cl->vtag_map, +- struct mei_cl_vtag, list); +- if (cl_vtag) { +- list_del(&cl_vtag->list); +- kfree(cl_vtag); +- } ++ mei_cldev_vtag_free(cldev); + + if (!mei_cl_is_connected(cl)) { + dev_dbg(bus->dev, "Already disconnected\n"); +-- +2.17.1 + diff --git a/patches/0053-net-stmmac-support-FPE-link-partner-hand-shak.connectivity b/patches/0053-net-stmmac-support-FPE-link-partner-hand-shak.connectivity new file mode 100644 index 0000000000..8a65ad0061 --- /dev/null +++ b/patches/0053-net-stmmac-support-FPE-link-partner-hand-shak.connectivity @@ -0,0 +1,630 @@ +From 31e640441fd7ac8f4df25ac2f79f45fc337701c9 Mon Sep 17 00:00:00 2001 +From: Ong Boon Leong +Date: Sat, 10 Aug 2019 12:55:12 +0800 +Subject: [PATCH 053/108] net: stmmac: support FPE link partner hand-shaking + procedure + +In order to discover whether remote station supports frame preemption, +local station sends verify mPacket and expects response mPacket in return +from the remote station. + +So, we add the functions to send and handle event when verify mPacket and +response mPacket are exchanged between the networked stations. + +The mechanism to handle different FPE states between local and remote +station (link partner) is implemented using workqueue which starts a task +each time there is some sign of verify & response mPacket exchange as +check in FPE IRQ event. The task retries couple of times to try to spot +the states that both stations are ready to enter FPE ON. This allows +different end points to enable FPE at different time and verify-response +mPacket can happen asynchronously. Ultimately, the task will only turn FPE +ON when local station have both exchange response in both directions. + +Thanks to Voon Weifeng for implementing the core functions for detecting +FPE events and send mPacket and phylink related change. + +Tested-by: Tan Tee Min +Signed-off-by: Voon Weifeng +Signed-off-by: Ong Boon Leong +--- + drivers/net/ethernet/stmicro/stmmac/common.h | 1 + + .../net/ethernet/stmicro/stmmac/dwmac4_core.c | 7 + + drivers/net/ethernet/stmicro/stmmac/dwmac5.h | 8 + + .../net/ethernet/stmicro/stmmac/dwmac5_tsn.c | 49 +++++ + drivers/net/ethernet/stmicro/stmmac/hwif.h | 27 +++ + .../net/ethernet/stmicro/stmmac/stmmac_main.c | 17 ++ + .../net/ethernet/stmicro/stmmac/stmmac_tsn.c | 178 +++++++++++++++++- + .../net/ethernet/stmicro/stmmac/stmmac_tsn.h | 42 +++++ + 8 files changed, 328 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h +index e37380f01275..357ea781b6ce 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/common.h ++++ b/drivers/net/ethernet/stmicro/stmmac/common.h +@@ -483,6 +483,7 @@ struct mac_device_info { + u8 vlan_fail_q; + const struct tsnif_ops *tsnif; + struct tsnif_info tsn_info; ++ bool cached_fpe_en; + }; + + struct stmmac_rx_routing { +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +index 286c66883551..16630df356c5 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +@@ -58,6 +58,9 @@ static void dwmac4_core_init(struct mac_device_info *hw, + if (hw->mdio_intr_en) + value |= GMAC_INT_MDIO_EN; + ++ if (hw->tsn_info.feat_en[TSN_FEAT_ID_FPE]) ++ value |= GMAC_INT_FPE_EN; ++ + writel(value, ioaddr + GMAC_INT_EN); + } + +@@ -1179,6 +1182,7 @@ const struct stmmac_ops dwmac510_ops = { + .tsnif_setup = dwmac510_tsnif_setup, + .init_tsn = tsn_init, + .setup_tsn_hw = tsn_hw_setup, ++ .unsetup_tsn_hw = tsn_hw_unsetup, + .set_tsn_feat = tsn_feat_set, + .has_tsn_feat = tsn_has_feat, + .set_tsn_hwtunable = tsn_hwtunable_set, +@@ -1197,6 +1201,9 @@ const struct stmmac_ops dwmac510_ops = { + .fpe_set_enable = tsn_fpe_set_enable, + .fpe_get_config = tsn_fpe_get_config, + .fpe_show_pmac_sts = tsn_fpe_show_pmac_sts, ++ .fpe_send_mpacket = tsn_fpe_send_mpacket, ++ .fpe_link_state_handle = tsn_fpe_link_state_handle, ++ .fpe_irq_status = tsn_fpe_irq_status, + }; + + static u32 dwmac4_get_num_vlan(void __iomem *ioaddr) +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h +index 013ebd6af865..1df32ac5d7df 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h +@@ -75,6 +75,8 @@ + + /* MDIO interrupt enable in MAC_Interrupt_Enable register */ + #define GMAC_INT_MDIO_EN BIT(18) ++/* FPE enable in MAC_Interrupt_Enable register */ ++#define GMAC_INT_FPE_EN BIT(17) + + /* EQoS version 5.xx VLAN Tag Filter Fail Packets Queuing */ + #define GMAC_RXQ_CTRL4 0x00000094 +@@ -172,6 +174,12 @@ + + /* MAC FPE control status */ + #define MAC_FPE_CTRL_STS 0x00000234 ++#define MAC_FPE_CTRL_STS_TRSP BIT(19) /* Tx'ed Response mPacket */ ++#define MAC_FPE_CTRL_STS_TVER BIT(18) /* Tx'ed Verify mPacket */ ++#define MAC_FPE_CTRL_STS_RRSP BIT(17) /* Rcvd Response mPacket */ ++#define MAC_FPE_CTRL_STS_RVER BIT(16) /* Rcvd Verify mPacket */ ++#define MAC_FPE_CTRL_STS_SRSP BIT(2) /* Send Response mPacket */ ++#define MAC_FPE_CTRL_STS_SVER BIT(1) /* Send Verify mPacket */ + #define MAC_FPE_CTRL_STS_EFPE BIT(0) + + /* MTL FPE control status */ +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c +index a8156138a0a1..192af21d6815 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c +@@ -504,6 +504,53 @@ static void dwmac5_fpe_set_radv(void *ioaddr, const u32 radv) + writel(value, ioaddr + MTL_FPE_ADVANCE); + } + ++void dwmac5_fpe_irq_status(void *ioaddr, struct net_device *dev, ++ enum fpe_event *event) ++{ ++ u32 value; ++ ++ *event = FPE_EVENT_UNKNOWN; ++ ++ value = readl(ioaddr + MAC_FPE_CTRL_STS); ++ ++ if (value & MAC_FPE_CTRL_STS_TRSP) { ++ *event |= FPE_EVENT_TRSP; ++ netdev_info(dev, "FPE: Respond mPacket is transmitted\n"); ++ } ++ ++ if (value & MAC_FPE_CTRL_STS_TVER) { ++ *event |= FPE_EVENT_TVER; ++ netdev_info(dev, "FPE: Verify mPacket is transmitted\n"); ++ } ++ ++ if (value & MAC_FPE_CTRL_STS_RRSP) { ++ *event |= FPE_EVENT_RRSP; ++ netdev_info(dev, "FPE: Respond mPacket is received\n"); ++ } ++ ++ if (value & MAC_FPE_CTRL_STS_RVER) { ++ *event |= FPE_EVENT_RVER; ++ netdev_info(dev, "FPE: Verify mPacket is received\n"); ++ } ++} ++ ++void dwmac5_fpe_send_mpacket(void *ioaddr, enum mpacket_type type) ++{ ++ u32 value; ++ ++ value = readl(ioaddr + MAC_FPE_CTRL_STS); ++ ++ if (type == MPACKET_VERIFY) { ++ value &= ~MAC_FPE_CTRL_STS_SRSP; ++ value |= MAC_FPE_CTRL_STS_SVER; ++ } else { ++ value &= ~MAC_FPE_CTRL_STS_SVER; ++ value |= MAC_FPE_CTRL_STS_SRSP; ++ } ++ ++ writel(value, ioaddr + MAC_FPE_CTRL_STS); ++} ++ + static void dwmac5_tbs_get_max(u32 *leos_max, + u32 *legos_max, + u32 *ftos_max, +@@ -643,6 +690,8 @@ const struct tsnif_ops dwmac510_tsnif_ops = { + .fpe_set_afsz = dwmac5_fpe_set_afsz, + .fpe_set_hadv = dwmac5_fpe_set_hadv, + .fpe_set_radv = dwmac5_fpe_set_radv, ++ .fpe_irq_status = dwmac5_fpe_irq_status, ++ .fpe_send_mpacket = dwmac5_fpe_send_mpacket, + .tbs_get_max = dwmac5_tbs_get_max, + .tbs_set_estm = dwmac5_tbs_set_estm, + .tbs_set_leos = dwmac5_tbs_set_leos, +diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h +index 9d878b18dd72..a1ef1f0e1322 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h ++++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h +@@ -287,6 +287,7 @@ enum tsn_hwtunable_id; + struct est_gc_entry; + struct est_gcrr; + struct est_gc_config; ++enum mpacket_type; + + /* Helpers to program the MAC core */ + struct stmmac_ops { +@@ -406,6 +407,8 @@ struct stmmac_ops { + enum tsn_feat_id featid); + void (*setup_tsn_hw)(struct mac_device_info *hw, + struct net_device *dev, u32 fprq); ++ void (*unsetup_tsn_hw)(struct mac_device_info *hw, ++ struct net_device *dev); + int (*set_tsn_hwtunable)(struct mac_device_info *hw, + struct net_device *dev, + enum tsn_hwtunable_id id, +@@ -449,6 +452,12 @@ struct stmmac_ops { + bool *enable); + int (*fpe_show_pmac_sts)(struct mac_device_info *hw, + struct net_device *dev); ++ int (*fpe_send_mpacket)(struct mac_device_info *hw, ++ struct net_device *dev, enum mpacket_type type); ++ void (*fpe_link_state_handle)(struct mac_device_info *hw, ++ struct net_device *dev, bool is_up); ++ void (*fpe_irq_status)(struct mac_device_info *hw, ++ struct net_device *dev); + }; + + #define stmmac_core_init(__priv, __args...) \ +@@ -555,6 +564,8 @@ struct stmmac_ops { + stmmac_do_callback(__priv, mac, has_tsn_feat, __args) + #define stmmac_tsn_hw_setup(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, setup_tsn_hw, __args) ++#define stmmac_tsn_hw_unsetup(__priv, __args...) \ ++ stmmac_do_void_callback(__priv, mac, unsetup_tsn_hw, __args) + #define stmmac_set_tsn_hwtunable(__priv, __args...) \ + stmmac_do_callback(__priv, mac, set_tsn_hwtunable, __args) + #define stmmac_get_tsn_hwtunable(__priv, __args...) \ +@@ -587,6 +598,12 @@ struct stmmac_ops { + stmmac_do_callback(__priv, mac, fpe_get_config, __args) + #define stmmac_fpe_show_pmac_sts(__priv, __args...) \ + stmmac_do_callback(__priv, mac, fpe_show_pmac_sts, __args) ++#define stmmac_fpe_send_mpacket(__priv, __args...) \ ++ stmmac_do_callback(__priv, mac, fpe_send_mpacket, __args) ++#define stmmac_fpe_link_state_handle(__priv, __args...) \ ++ stmmac_do_void_callback(__priv, mac, fpe_link_state_handle, __args) ++#define stmmac_fpe_irq_status(__priv, __args...) \ ++ stmmac_do_void_callback(__priv, mac, fpe_irq_status, __args) + + /* Helpers for serdes */ + struct stmmac_serdes_ops { +@@ -755,6 +772,7 @@ int stmmac_hwif_init(struct stmmac_priv *priv); + }) + + struct tsn_mmc_stat; ++enum fpe_event; + + struct tsnif_ops { + u32 (*read_hwid)(void __iomem *ioaddr); +@@ -798,6 +816,9 @@ struct tsnif_ops { + void (*fpe_set_afsz)(void *ioaddr, const u32 afsz); + void (*fpe_set_hadv)(void *ioaddr, const u32 hadv); + void (*fpe_set_radv)(void *ioaddr, const u32 radv); ++ void (*fpe_send_mpacket)(void *ioaddr, enum mpacket_type type); ++ void (*fpe_irq_status)(void *ioaddr, struct net_device *dev, ++ enum fpe_event *fpe_event); + /* Time-Based Scheduling (TBS) */ + void (*tbs_get_max)(u32 *leos_max, u32 *legos_max, + u32 *ftos_max, u32 *fgos_max); +@@ -866,6 +887,12 @@ struct tsnif_ops { + tsnif_do_void_callback(__hw, fpe_set_hadv, __args) + #define tsnif_fpe_set_radv(__hw, __args...) \ + tsnif_do_void_callback(__hw, fpe_set_radv, __args) ++#define tsnif_fpe_set_radv(__hw, __args...) \ ++ tsnif_do_void_callback(__hw, fpe_set_radv, __args) ++#define tsnif_fpe_send_mpacket(__hw, __args...) \ ++ tsnif_do_void_callback(__hw, fpe_send_mpacket, __args) ++#define tsnif_fpe_irq_status(__hw, __args...) \ ++ tsnif_do_void_callback(__hw, fpe_irq_status, __args) + #define tsnif_tbs_get_max(__hw, __args...) \ + tsnif_do_void_callback(__hw, tbs_get_max, __args) + #define tsnif_tbs_set_estm(__hw, __args...) \ +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index c17e25758284..58da7a5955cf 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -972,6 +972,8 @@ static void stmmac_mac_link_down(struct phylink_config *config, + priv->eee_active = false; + stmmac_eee_init(priv); + stmmac_set_eee_pls(priv, priv->hw, false); ++ stmmac_fpe_link_state_handle(priv, priv->hw, priv->dev, false); ++ + } + + static void stmmac_mac_link_up(struct phylink_config *config, +@@ -986,6 +988,7 @@ static void stmmac_mac_link_up(struct phylink_config *config, + priv->eee_enabled = stmmac_eee_init(priv); + stmmac_set_eee_pls(priv, priv->hw, true); + } ++ stmmac_fpe_link_state_handle(priv, priv->hw, priv->dev, true); + } + + static const struct phylink_mac_ops stmmac_phylink_mac_ops = { +@@ -3180,6 +3183,8 @@ static int stmmac_release(struct net_device *dev) + + stmmac_release_ptp(priv); + ++ stmmac_tsn_hw_unsetup(priv, priv->hw, dev); ++ + return 0; + } + +@@ -4285,6 +4290,9 @@ static void stmmac_common_interrupt(struct stmmac_priv *priv) + if (priv->hw->tsn_info.feat_en[TSN_FEAT_ID_EST]) + stmmac_est_irq_status(priv, priv->hw, priv->dev); + ++ if (priv->hw->tsn_info.feat_en[TSN_FEAT_ID_FPE]) ++ stmmac_fpe_irq_status(priv, priv->hw, priv->dev); ++ + /* To handle GMAC own interrupts */ + if ((priv->plat->has_gmac) || xmac) { + int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats); +@@ -5401,6 +5409,15 @@ int stmmac_suspend(struct device *dev) + clk_disable_unprepare(priv->plat->pclk); + clk_disable_unprepare(priv->plat->stmmac_clk); + } ++ ++ if (stmmac_has_tsn_feat(priv, priv->hw, ndev, TSN_FEAT_ID_FPE)) { ++ /* Keep the FPE enable/disable state before suspend */ ++ priv->hw->cached_fpe_en = priv->hw->tsn_info.fpe_cfg.enable; ++ netdev_info(ndev, "FPE: cached Enable %d", ++ priv->hw->cached_fpe_en); ++ stmmac_fpe_set_enable(priv, priv->hw, ndev, false); ++ } ++ + mutex_unlock(&priv->lock); + + priv->speed = SPEED_UNKNOWN; +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c +index 6a82ac5f00b8..9b7ad371d4ba 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c +@@ -3,6 +3,7 @@ + * TSN General APIs + */ + #include ++#include + #include + #include + #include "stmmac_ptp.h" +@@ -66,6 +67,83 @@ static u64 est_get_all_open_time(struct est_gc_config *est_gcc, + return total; + } + ++static void fpe_lp_task(struct work_struct *work) ++{ ++ struct mac_device_info *hw; ++ enum fpe_state *lo_state; ++ enum fpe_state *lp_state; ++ struct tsnif_info *info; ++ void __iomem *ioaddr; ++ bool *enable; ++ int retries; ++ ++ info = container_of(work, struct tsnif_info, fpe_task); ++ lo_state = &info->fpe_cfg.lo_fpe_state; ++ lp_state = &info->fpe_cfg.lp_fpe_state; ++ enable = &info->fpe_cfg.enable; ++ ++ hw = container_of(info, struct mac_device_info, tsn_info); ++ ioaddr = hw->pcsr; ++ ++ retries = 20; ++ ++ while (retries-- > 0) { ++ /* Bail out immediately if FPE is OFF */ ++ if (*lo_state == FPE_STATE_OFF || !*enable) ++ break; ++ ++ if (*lo_state == FPE_STATE_ENTERING_ON && ++ *lp_state == FPE_STATE_ENTERING_ON) { ++ tsnif_fpe_set_enable(hw, ioaddr, true); ++ *lo_state = FPE_STATE_ON; ++ *lp_state = FPE_STATE_ON; ++ break; ++ } ++ ++ if ((*lo_state == FPE_STATE_CAPABLE || ++ *lo_state == FPE_STATE_ENTERING_ON) && ++ *lp_state != FPE_STATE_ON) ++ tsnif_fpe_send_mpacket(hw, ioaddr, MPACKET_VERIFY); ++ ++ /* Sleep then retry */ ++ msleep(500); ++ } ++ ++ clear_bit(__FPE_TASK_SCHED, &info->task_state); ++} ++ ++static int fpe_start_wq(struct mac_device_info *hw, struct net_device *dev) ++{ ++ struct tsnif_info *info = &hw->tsn_info; ++ char *name; ++ ++ clear_bit(__FPE_TASK_SCHED, &info->task_state); ++ ++ name = info->wq_name; ++ sprintf(name, "%s-fpe", dev->name); ++ ++ info->fpe_wq = create_singlethread_workqueue(name); ++ if (!info->fpe_wq) { ++ netdev_err(dev, "%s: Failed to create workqueue\n", name); ++ ++ return -ENOMEM; ++ } ++ netdev_info(dev, "FPE workqueue start"); ++ ++ return 0; ++} ++ ++static void fpe_stop_wq(struct mac_device_info *hw, struct net_device *dev) ++{ ++ struct tsnif_info *info = &hw->tsn_info; ++ ++ set_bit(__FPE_REMOVING, &info->task_state); ++ ++ if (info->fpe_wq) ++ destroy_workqueue(info->fpe_wq); ++ ++ netdev_info(dev, "FPE workqueue stop"); ++} + int tsn_init(struct mac_device_info *hw, struct net_device *dev) + { + struct tsnif_info *info = &hw->tsn_info; +@@ -152,6 +230,7 @@ int tsn_init(struct mac_device_info *hw, struct net_device *dev) + goto check_tbs; + } + ++ INIT_WORK(&info->fpe_task, fpe_lp_task); + tsnif_fpe_get_info(hw, &cap->pmac_bit, &cap->afsz_max, + &cap->hadv_max, &cap->radv_max); + cap->rxqcnt = tsnif_est_get_rxqcnt(hw, ioaddr); +@@ -231,9 +310,17 @@ void tsn_hw_setup(struct mac_device_info *hw, struct net_device *dev, + } else { + netdev_warn(dev, "FPE: FPRQ is out-of-bound.\n"); + } ++ ++ fpe_start_wq(hw, dev); + } + } + ++void tsn_hw_unsetup(struct mac_device_info *hw, struct net_device *dev) ++{ ++ if (tsn_has_feat(hw, dev, TSN_FEAT_ID_FPE)) ++ fpe_stop_wq(hw, dev); ++} ++ + int tsn_hwtunable_set(struct mac_device_info *hw, struct net_device *dev, + enum tsn_hwtunable_id id, + const u32 data) +@@ -1129,7 +1216,11 @@ int tsn_fpe_set_enable(struct mac_device_info *hw, struct net_device *dev, + } + + if (info->fpe_cfg.enable != enable) { +- tsnif_fpe_set_enable(hw, ioaddr, enable); ++ if (enable) ++ tsnif_fpe_send_mpacket(hw, ioaddr, MPACKET_VERIFY); ++ else ++ info->fpe_cfg.lo_fpe_state = FPE_STATE_OFF; ++ + info->fpe_cfg.enable = enable; + } + +@@ -1170,3 +1261,88 @@ int tsn_fpe_show_pmac_sts(struct mac_device_info *hw, struct net_device *dev) + + return 0; + } ++ ++int tsn_fpe_send_mpacket(struct mac_device_info *hw, struct net_device *dev, ++ enum mpacket_type type) ++{ ++ void __iomem *ioaddr = hw->pcsr; ++ ++ if (!tsn_has_feat(hw, dev, TSN_FEAT_ID_FPE)) { ++ netdev_info(dev, "FPE: feature unsupported\n"); ++ return -ENOTSUPP; ++ } ++ ++ tsnif_fpe_send_mpacket(hw, ioaddr, type); ++ ++ return 0; ++} ++ ++void tsn_fpe_link_state_handle(struct mac_device_info *hw, ++ struct net_device *dev, bool is_up) ++{ ++ struct tsnif_info *info = &hw->tsn_info; ++ void __iomem *ioaddr = hw->pcsr; ++ enum fpe_state *lo_state; ++ enum fpe_state *lp_state; ++ bool *enable; ++ ++ lo_state = &info->fpe_cfg.lo_fpe_state; ++ lp_state = &info->fpe_cfg.lp_fpe_state; ++ enable = &info->fpe_cfg.enable; ++ ++ if (is_up && *enable) { ++ tsnif_fpe_send_mpacket(hw, ioaddr, MPACKET_VERIFY); ++ } else { ++ *lo_state = FPE_EVENT_UNKNOWN; ++ *lp_state = FPE_EVENT_UNKNOWN; ++ } ++} ++ ++void tsn_fpe_irq_status(struct mac_device_info *hw, struct net_device *dev) ++{ ++ struct tsnif_info *info = &hw->tsn_info; ++ void __iomem *ioaddr = hw->pcsr; ++ enum fpe_event *event; ++ enum fpe_state *lo_state; ++ enum fpe_state *lp_state; ++ bool *enable; ++ ++ event = &info->fpe_cfg.fpe_event; ++ lo_state = &info->fpe_cfg.lo_fpe_state; ++ lp_state = &info->fpe_cfg.lp_fpe_state; ++ enable = &info->fpe_cfg.enable; ++ ++ tsnif_fpe_irq_status(hw, ioaddr, dev, event); ++ ++ if (*event == FPE_EVENT_UNKNOWN || !*enable) ++ return; ++ ++ /* If LP has sent verify mPacket, LP is FPE capable */ ++ if ((*event & FPE_EVENT_RVER) == FPE_EVENT_RVER) { ++ if (*lp_state < FPE_STATE_CAPABLE) ++ *lp_state = FPE_STATE_CAPABLE; ++ ++ /* If user has requested FPE enable, quickly response */ ++ if (*enable) ++ tsnif_fpe_send_mpacket(hw, ioaddr, MPACKET_RESPONSE); ++ } ++ ++ /* If Local has sent verify mPacket, Local is FPE capable */ ++ if ((*event & FPE_EVENT_TVER) == FPE_EVENT_TVER) { ++ if (*lo_state < FPE_STATE_CAPABLE) ++ *lo_state = FPE_STATE_CAPABLE; ++ } ++ ++ /* If LP has sent response mPacket, LP is entering FPE ON */ ++ if ((*event & FPE_EVENT_RRSP) == FPE_EVENT_RRSP) ++ *lp_state = FPE_STATE_ENTERING_ON; ++ ++ /* If Local has sent response mPacket, Local is entering FPE ON */ ++ if ((*event & FPE_EVENT_TRSP) == FPE_EVENT_TRSP) ++ *lo_state = FPE_STATE_ENTERING_ON; ++ ++ if (!test_bit(__FPE_REMOVING, &info->task_state) && ++ !test_and_set_bit(__FPE_TASK_SCHED, &info->task_state) && ++ info->fpe_wq) ++ queue_work(info->fpe_wq, &info->fpe_task); ++} +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h +index ac4cfe6c7569..2a27e2b86a60 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h +@@ -106,10 +106,36 @@ struct est_gc_config { + bool enable; /* 1: enabled */ + }; + ++/* FPE hand-shaking event */ ++enum fpe_event { ++ FPE_EVENT_UNKNOWN = 0, ++ FPE_EVENT_TRSP = 1, /* Tx'ed Response mPacket */ ++ FPE_EVENT_TVER = 2, /* Tx'ed Verify mPacket */ ++ FPE_EVENT_RRSP = 4, /* Rcvd Response mPacket */ ++ FPE_EVENT_RVER = 8, /* Rcvd Verify mPacket */ ++}; ++ ++/* FPE link state */ ++enum fpe_state { ++ FPE_STATE_OFF = 0, ++ FPE_STATE_CAPABLE = 1, ++ FPE_STATE_ENTERING_ON = 2, ++ FPE_STATE_ON = 3, ++}; ++ ++/* FPE link-partner hand-shaking mPacket type */ ++enum mpacket_type { ++ MPACKET_VERIFY = 0, ++ MPACKET_RESPONSE = 1, ++}; ++ + /* FPE Configuration */ + struct fpe_config { + u32 txqpec; /* TxQ Preemption Classification */ + bool enable; /* 1: enabled */ ++ enum fpe_state lp_fpe_state; /* Link Partner FPE state */ ++ enum fpe_state lo_fpe_state; /* Local station FPE state */ ++ enum fpe_event fpe_event; /* FPE Hand-shake state */ + }; + + /* TSN MMC Statistics */ +@@ -122,6 +148,11 @@ struct tsn_mmc_stat { + unsigned long count[STMMAC_TSN_STAT_SIZE]; + }; + ++enum fpe_task_state_t { ++ __FPE_REMOVING, ++ __FPE_TASK_SCHED, ++}; ++ + struct tsnif_info { + struct tsn_hw_cap cap; + bool feat_en[TSN_FEAT_ID_MAX]; +@@ -130,6 +161,11 @@ struct tsnif_info { + struct fpe_config fpe_cfg; + struct tsn_mmc_stat mmc_stat; + const struct tsn_mmc_desc *mmc_desc; ++ /* Workqueue for handling FPE hand-shaking */ ++ unsigned long task_state; ++ struct workqueue_struct *fpe_wq; ++ struct work_struct fpe_task; ++ char wq_name[IFNAMSIZ]; + }; + + struct mac_device_info; +@@ -142,6 +178,7 @@ bool tsn_has_feat(struct mac_device_info *hw, struct net_device *dev, + enum tsn_feat_id featid); + void tsn_hw_setup(struct mac_device_info *hw, struct net_device *dev, + u32 fprq); ++void tsn_hw_unsetup(struct mac_device_info *hw, struct net_device *dev); + int tsn_hwtunable_set(struct mac_device_info *hw, struct net_device *dev, + enum tsn_hwtunable_id id, const u32 data); + int tsn_hwtunable_get(struct mac_device_info *hw, struct net_device *dev, +@@ -175,5 +212,10 @@ int tsn_fpe_set_enable(struct mac_device_info *hw, struct net_device *dev, + int tsn_fpe_get_config(struct mac_device_info *hw, struct net_device *dev, + u32 *txqpec, bool *enable); + int tsn_fpe_show_pmac_sts(struct mac_device_info *hw, struct net_device *dev); ++int tsn_fpe_send_mpacket(struct mac_device_info *hw, struct net_device *dev, ++ enum mpacket_type type); ++void tsn_fpe_link_state_handle(struct mac_device_info *hw, ++ struct net_device *dev, bool is_up); ++void tsn_fpe_irq_status(struct mac_device_info *hw, struct net_device *dev); + + #endif /* __STMMAC_TSN_H__ */ +-- +2.17.1 + diff --git a/patches/0053-trusty-Update-macro-SMC_FC_GET_WALL_SIZE-from-12-to.trusty b/patches/0053-trusty-Update-macro-SMC_FC_GET_WALL_SIZE-from-12-to.trusty new file mode 100644 index 0000000000..716136bd92 --- /dev/null +++ b/patches/0053-trusty-Update-macro-SMC_FC_GET_WALL_SIZE-from-12-to.trusty @@ -0,0 +1,30 @@ +From 778673ddf9f917d219ddb6d9fd0bf5e589fa21f3 Mon Sep 17 00:00:00 2001 +From: "Ding,XinX" +Date: Wed, 21 Mar 2018 11:09:27 +0800 +Subject: [PATCH 53/63] trusty: Update macro SMC_FC_GET_WALL_SIZE from 12 to 20 + +Keep this macro synced with that of Trusty OS because we rebased + trusty OS with Google's and this smc id was increased. + +Change-Id: I09d68971de6d8f3d099525c21f99fe7ed2fdcb9d +Signed-off-by: Ding,XinX +--- + include/linux/trusty/smcall.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/include/linux/trusty/smcall.h b/include/linux/trusty/smcall.h +index ee5dda2560b6..3ab2f688cb33 100644 +--- a/include/linux/trusty/smcall.h ++++ b/include/linux/trusty/smcall.h +@@ -139,7 +139,7 @@ + * SMC_SC_DESTROY_WALL - notifies secure side that previously specifies SM Wall + * object should be released usually as part of normal shutdown sequence. + */ +-#define SMC_FC_GET_WALL_SIZE SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 12) ++#define SMC_FC_GET_WALL_SIZE SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 20) + #define SMC_SC_SETUP_WALL SMC_STDCALL_NR(SMC_ENTITY_SECURE_MONITOR, 12) + #define SMC_SC_DESTROY_WALL SMC_STDCALL_NR(SMC_ENTITY_SECURE_MONITOR, 13) + +-- +2.17.1 + diff --git a/patches/0054-ASoC-Intel-Skylake-Provide-probe-IPC-iface-debugfs-s.audio b/patches/0054-ASoC-Intel-Skylake-Provide-probe-IPC-iface-debugfs-s.audio new file mode 100644 index 0000000000..5bb4e38a52 --- /dev/null +++ b/patches/0054-ASoC-Intel-Skylake-Provide-probe-IPC-iface-debugfs-s.audio @@ -0,0 +1,249 @@ +From e295c8efdc0ef67eb83e4ec84173cd035dc74de1 Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Wed, 6 Mar 2019 21:41:03 +0100 +Subject: [PATCH 054/193] ASoC: Intel: Skylake: Provide probe IPC iface debugfs + support + +Define debugfs subdirectory delegated for IPC communitation with DSP. +Input format: uint,uint,(...) which are later translated into DWORDS +sequence and further into struct of interest instances given IPC type. + +For Probe, following have been enabled: +- injection dma: LARGE_CONFIG_GET +- probe points: LARGE_CONFIG_SET, LARGE_CONFIG_GET +- probe points disconnect: LARGE_CONFIG_SET + +Injection dma attach and detach (LARGE_CONFIG_SET) are unsupported +in case these should only be send by assigned compress stream. + +Change-Id: Iba6742429f11a0dd9b2897606fac00ede9c2771d +Signed-off-by: Cezary Rojewski +--- + sound/soc/intel/skylake/skl-debug.c | 197 ++++++++++++++++++++++++++++ + 1 file changed, 197 insertions(+) + +diff --git a/sound/soc/intel/skylake/skl-debug.c b/sound/soc/intel/skylake/skl-debug.c +index 001c498659f6..e2884b66891a 100644 +--- a/sound/soc/intel/skylake/skl-debug.c ++++ b/sound/soc/intel/skylake/skl-debug.c +@@ -24,6 +24,7 @@ struct skl_debug { + struct device *dev; + + struct dentry *fs; ++ struct dentry *ipc; + struct dentry *modules; + u8 fw_read_buff[FW_REG_BUF]; + }; +@@ -268,6 +269,196 @@ static const struct file_operations soft_regs_ctrl_fops = { + .llseek = default_llseek, + }; + ++static ssize_t injection_dma_read(struct file *file, ++ char __user *to, size_t count, loff_t *ppos) ++{ ++ struct skl_debug *d = file->private_data; ++ struct skl_probe_dma *dma; ++ size_t num_dma, len = 0; ++ char *buf; ++ int i, ret; ++ ++ buf = kzalloc(PAGE_SIZE, GFP_KERNEL); ++ if (!buf) ++ return -ENOMEM; ++ ++ ret = skl_probe_get_dma(d->skl, &dma, &num_dma); ++ if (ret < 0) ++ goto exit; ++ ++ for (i = 0; i < num_dma; i++) { ++ ret = snprintf(buf + len, PAGE_SIZE - len, ++ "Node id: %#x DMA buffer size: %d\n", ++ dma[i].node_id.val, dma[i].dma_buffer_size); ++ if (ret < 0) ++ goto free_dma; ++ len += ret; ++ } ++ ++ ret = simple_read_from_buffer(to, count, ppos, buf, len); ++free_dma: ++ kfree(dma); ++exit: ++ kfree(buf); ++ return ret; ++} ++ ++static const struct file_operations injection_dma_fops = { ++ .open = simple_open, ++ .read = injection_dma_read, ++ .llseek = default_llseek, ++}; ++ ++static ssize_t ppoints_read(struct file *file, ++ char __user *to, size_t count, loff_t *ppos) ++{ ++ struct skl_debug *d = file->private_data; ++ struct skl_probe_point_desc *desc; ++ size_t num_desc, len = 0; ++ char *buf; ++ int i, ret; ++ ++ buf = kzalloc(PAGE_SIZE, GFP_KERNEL); ++ if (!buf) ++ return -ENOMEM; ++ ++ ret = skl_probe_get_points(d->skl, &desc, &num_desc); ++ if (ret < 0) ++ goto exit; ++ ++ for (i = 0; i < num_desc; i++) { ++ ret = snprintf(buf + len, PAGE_SIZE - len, ++ "Id: %#010x Purpose: %d Node id: %#x\n", ++ desc[i].id.value, desc[i].purpose, desc[i].node_id.val); ++ if (ret < 0) ++ goto free_desc; ++ len += ret; ++ } ++ ++ ret = simple_read_from_buffer(to, count, ppos, buf, len); ++free_desc: ++ kfree(desc); ++exit: ++ kfree(buf); ++ return ret; ++} ++ ++static ssize_t ppoints_write(struct file *file, ++ const char __user *from, size_t count, loff_t *ppos) ++{ ++ struct skl_debug *d = file->private_data; ++ struct skl_probe_point_desc *desc; ++ char *buf; ++ u32 *tkns; ++ size_t num_tkns; ++ int ret; ++ ++ buf = kmalloc(count + 1, GFP_KERNEL); ++ if (!buf) ++ return -ENOMEM; ++ ++ ret = simple_write_to_buffer(buf, count, ppos, from, count); ++ if (ret != count) { ++ ret = ret >= 0 ? -EIO : ret; ++ goto exit; ++ } ++ ++ buf[count] = '\0'; ++ ret = strsplit_u32((char **)&buf, ",", &tkns, &num_tkns); ++ if (ret < 0) ++ goto exit; ++ num_tkns *= sizeof(*tkns); ++ if (!num_tkns || (num_tkns % sizeof(*desc))) { ++ ret = -EINVAL; ++ goto free_tkns; ++ } ++ ++ desc = (struct skl_probe_point_desc *)tkns; ++ ret = skl_probe_points_connect(d->skl, desc, ++ num_tkns / sizeof(*desc)); ++ if (ret < 0) ++ goto free_tkns; ++ ++ ret = count; ++free_tkns: ++ kfree(tkns); ++exit: ++ kfree(buf); ++ return ret; ++} ++ ++static const struct file_operations ppoints_fops = { ++ .open = simple_open, ++ .read = ppoints_read, ++ .write = ppoints_write, ++ .llseek = default_llseek, ++}; ++ ++static ssize_t ppoints_discnt_write(struct file *file, ++ const char __user *from, size_t count, loff_t *ppos) ++{ ++ struct skl_debug *d = file->private_data; ++ union skl_probe_point_id *id; ++ char *buf; ++ u32 *tkns; ++ size_t num_tkns; ++ int ret; ++ ++ buf = kmalloc(count + 1, GFP_KERNEL); ++ if (!buf) ++ return -ENOMEM; ++ ++ ret = simple_write_to_buffer(buf, count, ppos, from, count); ++ if (ret != count) { ++ ret = ret >= 0 ? -EIO : ret; ++ goto exit; ++ } ++ ++ buf[count] = '\0'; ++ ret = strsplit_u32((char **)&buf, ",", &tkns, &num_tkns); ++ if (ret < 0) ++ goto exit; ++ num_tkns *= sizeof(*tkns); ++ if (!num_tkns || (num_tkns % sizeof(*id))) { ++ ret = -EINVAL; ++ goto free_tkns; ++ } ++ ++ id = (union skl_probe_point_id *)tkns; ++ ret = skl_probe_points_disconnect(d->skl, id, ++ num_tkns / sizeof(*id)); ++ if (ret < 0) ++ goto free_tkns; ++ ++ ret = count; ++free_tkns: ++ kfree(tkns); ++exit: ++ kfree(buf); ++ return ret; ++} ++ ++static const struct file_operations ppoints_discnt_fops = { ++ .open = simple_open, ++ .write = ppoints_discnt_write, ++ .llseek = default_llseek, ++}; ++ ++static int skl_debugfs_init_ipc(struct skl_debug *d) ++{ ++ if (!debugfs_create_file("injection_dma", 0444, ++ d->ipc, d, &injection_dma_fops)) ++ return -EIO; ++ if (!debugfs_create_file("probe_points", 0644, ++ d->ipc, d, &ppoints_fops)) ++ return -EIO; ++ if (!debugfs_create_file("probe_points_disconnect", 0200, ++ d->ipc, d, &ppoints_discnt_fops)) ++ return -EIO; ++ ++ return 0; ++} ++ + struct skl_debug *skl_debugfs_init(struct skl_dev *skl) + { + struct skl_debug *d; +@@ -282,6 +473,12 @@ struct skl_debug *skl_debugfs_init(struct skl_dev *skl) + d->skl = skl; + d->dev = &skl->pci->dev; + ++ d->ipc = debugfs_create_dir("ipc", d->fs); ++ if (IS_ERR_OR_NULL(d->ipc)) ++ return NULL; ++ if (skl_debugfs_init_ipc(d)) ++ return NULL; ++ + /* now create the module dir */ + d->modules = debugfs_create_dir("modules", d->fs); + +-- +2.17.1 + diff --git a/patches/0054-drm-virtio-add-plane-check.drm b/patches/0054-drm-virtio-add-plane-check.drm new file mode 100644 index 0000000000..1ed69b21d8 --- /dev/null +++ b/patches/0054-drm-virtio-add-plane-check.drm @@ -0,0 +1,46 @@ +From b6b81e203ae1c8c0b83f1ee151d3b69f5f0a6047 Mon Sep 17 00:00:00 2001 +From: Gerd Hoffmann +Date: Thu, 22 Aug 2019 11:46:57 +0200 +Subject: [PATCH 054/690] drm/virtio: add plane check + +Use drm_atomic_helper_check_plane_state() +to sanity check the plane state. + +Signed-off-by: Gerd Hoffmann +Acked-by: Chia-I Wu +Link: http://patchwork.freedesktop.org/patch/msgid/20190822094657.27483-1-kraxel@redhat.com +--- + drivers/gpu/drm/virtio/virtgpu_plane.c | 17 ++++++++++++++++- + 1 file changed, 16 insertions(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c +index a492ac3f4a7e..fe5efb2de90d 100644 +--- a/drivers/gpu/drm/virtio/virtgpu_plane.c ++++ b/drivers/gpu/drm/virtio/virtgpu_plane.c +@@ -84,7 +84,22 @@ static const struct drm_plane_funcs virtio_gpu_plane_funcs = { + static int virtio_gpu_plane_atomic_check(struct drm_plane *plane, + struct drm_plane_state *state) + { +- return 0; ++ bool is_cursor = plane->type == DRM_PLANE_TYPE_CURSOR; ++ struct drm_crtc_state *crtc_state; ++ int ret; ++ ++ if (!state->fb || !state->crtc) ++ return 0; ++ ++ crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc); ++ if (IS_ERR(crtc_state)) ++ return PTR_ERR(crtc_state); ++ ++ ret = drm_atomic_helper_check_plane_state(state, crtc_state, ++ DRM_PLANE_HELPER_NO_SCALING, ++ DRM_PLANE_HELPER_NO_SCALING, ++ is_cursor, true); ++ return ret; + } + + static void virtio_gpu_primary_plane_update(struct drm_plane *plane, +-- +2.17.1 + diff --git a/patches/0054-mei-dal-add-dal-module-stub.security b/patches/0054-mei-dal-add-dal-module-stub.security new file mode 100644 index 0000000000..dd1cf001da --- /dev/null +++ b/patches/0054-mei-dal-add-dal-module-stub.security @@ -0,0 +1,133 @@ +From 7f19756d40774d9e87d61fc3be68690d9accd8a2 Mon Sep 17 00:00:00 2001 +From: Tomas Winkler +Date: Sun, 26 May 2019 13:28:06 +0300 +Subject: [PATCH 54/65] mei: dal: add dal module stub + +DAL stands for Dynamic Application Loader, it provides the ability +to run Java applets in a secured environment inside of Intel ME security +engine (ME). The Java applets are also named as trusted applications TAs. + +This is an empty module to enable first stacking supporting +code and than actual Linux kernel module. + +Change-Id: Iee645dd02cc1231fd7e117d3c17a9ebae48965b9 +Signed-off-by: Tomas Winkler +--- + drivers/misc/mei/Kconfig | 2 ++ + drivers/misc/mei/Makefile | 2 ++ + drivers/misc/mei/dal/Kconfig | 9 ++++++ + drivers/misc/mei/dal/Makefile | 7 +++++ + drivers/misc/mei/dal/dal_class.c | 51 ++++++++++++++++++++++++++++++++ + 5 files changed, 71 insertions(+) + create mode 100644 drivers/misc/mei/dal/Kconfig + create mode 100644 drivers/misc/mei/dal/Makefile + create mode 100644 drivers/misc/mei/dal/dal_class.c + +diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig +index 60d8c0fe8ef5..d21335a64a63 100644 +--- a/drivers/misc/mei/Kconfig ++++ b/drivers/misc/mei/Kconfig +@@ -58,3 +58,5 @@ config INTEL_MEI_VIRTIO + + source "drivers/misc/mei/hdcp/Kconfig" + source "drivers/misc/mei/spd/Kconfig" ++source "drivers/misc/mei/dal/Kconfig" ++ +diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile +index 60ae4375f579..75d05314eaa8 100644 +--- a/drivers/misc/mei/Makefile ++++ b/drivers/misc/mei/Makefile +@@ -30,3 +30,5 @@ CFLAGS_mei-trace.o = -I$(src) + + obj-$(CONFIG_INTEL_MEI_HDCP) += hdcp/ + obj-$(CONFIG_INTEL_MEI_SPD) += spd/ ++obj-$(CONFIG_INTEL_MEI_DAL) += dal/ ++ +diff --git a/drivers/misc/mei/dal/Kconfig b/drivers/misc/mei/dal/Kconfig +new file mode 100644 +index 000000000000..1d9e2dd3b95b +--- /dev/null ++++ b/drivers/misc/mei/dal/Kconfig +@@ -0,0 +1,9 @@ ++# SPDX-License-Identifier: GPL-2.0 ++# Copyright (c) 2016-2019, Intel Corporation. ++config INTEL_MEI_DAL ++ tristate "Dynamic Application Loader for ME" ++ depends on INTEL_MEI ++ help ++ Dynamic Application Loader enables downloading java applets ++ to DAL FW and run it in a secure environment. ++ The DAL module exposes both user space api and kernel space api. +diff --git a/drivers/misc/mei/dal/Makefile b/drivers/misc/mei/dal/Makefile +new file mode 100644 +index 000000000000..13791b6c858e +--- /dev/null ++++ b/drivers/misc/mei/dal/Makefile +@@ -0,0 +1,7 @@ ++# SPDX-License-Identifier: GPL-2.0 ++# Copyright (c) 2016-2019, Intel Corporation ++ ++ccflags-y += -D__CHECK_ENDIAN__ ++ ++obj-$(CONFIG_INTEL_MEI_DAL) += mei_dal.o ++mei_dal-objs += dal_class.o +diff --git a/drivers/misc/mei/dal/dal_class.c b/drivers/misc/mei/dal/dal_class.c +new file mode 100644 +index 000000000000..6990132dc5e2 +--- /dev/null ++++ b/drivers/misc/mei/dal/dal_class.c +@@ -0,0 +1,51 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright(c) 2016-2019 Intel Corporation. ++ */ ++ ++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt ++ ++#include ++#include ++#include ++ ++/* ++ * this class contains the 3 mei_cl_device, ivm, sdm, rtm. ++ * it is initialized during dal_probe and is used by the kernel space kdi ++ * to send/recv data to/from mei. ++ * ++ * this class must be initialized before the kernel space kdi uses it. ++ */ ++static struct class *dal_class; ++ ++/** ++ * mei_dal_exit - module exit function ++ */ ++static void __exit mei_dal_exit(void) ++{ ++ class_destroy(dal_class); ++} ++ ++/** ++ * mei_dal_init - module init function ++ * ++ * Return: 0 on success ++ * <0 on failure ++ */ ++static int __init mei_dal_init(void) ++{ ++ dal_class = class_create(THIS_MODULE, "dal"); ++ if (IS_ERR(dal_class)) { ++ pr_err("couldn't create class\n"); ++ return PTR_ERR(dal_class); ++ } ++ ++ return 0; ++} ++ ++module_init(mei_dal_init); ++module_exit(mei_dal_exit); ++ ++MODULE_AUTHOR("Intel Corporation"); ++MODULE_DESCRIPTION("Intel(R) MEI Dynamic Application Loader (DAL)"); ++MODULE_LICENSE("GPL v2"); +-- +2.17.1 + diff --git a/patches/0054-tc-Add-support-for-configuring-the-frame-pree.connectivity b/patches/0054-tc-Add-support-for-configuring-the-frame-pree.connectivity new file mode 100644 index 0000000000..95d8cc4c77 --- /dev/null +++ b/patches/0054-tc-Add-support-for-configuring-the-frame-pree.connectivity @@ -0,0 +1,101 @@ +From 79f9d879e5e069951b0fb2658e343968a5d926b9 Mon Sep 17 00:00:00 2001 +From: Ong Boon Leong +Date: Tue, 13 Aug 2019 12:53:43 +0800 +Subject: [PATCH 054/108] tc: Add support for configuring the frame preemption + through taprio + +For current implementation, we allow user to enable IEEE 802.1Qbu/ +IEEE 802.3br Frame Preemption through taprio hardware offload only. + +In future, we will enable user to independent enable/disable Frame +Preemption. + +Thanks to Vinicius Costa Gomes for laying down the foundations for +supporting frame preemption in taprio qdisc. + +Signed-off-by: Ong Boon Leong +--- + include/net/pkt_sched.h | 2 ++ + include/uapi/linux/pkt_sched.h | 1 + + net/sched/sch_taprio.c | 19 +++++++++++++++++++ + 3 files changed, 22 insertions(+) + +diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h +index 6a70845bd9ab..a87c02c008df 100644 +--- a/include/net/pkt_sched.h ++++ b/include/net/pkt_sched.h +@@ -180,6 +180,8 @@ struct tc_taprio_qopt_offload { + u64 cycle_time; + u64 cycle_time_extension; + ++ /* bit nth being set means that the nth queue is preemptible */ ++ u32 frame_preemption_queue_mask; + size_t num_entries; + struct tc_taprio_sched_entry entries[0]; + }; +diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h +index 5011259b8f67..57e10400dae3 100644 +--- a/include/uapi/linux/pkt_sched.h ++++ b/include/uapi/linux/pkt_sched.h +@@ -1176,6 +1176,7 @@ enum { + TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION, /* s64 */ + TCA_TAPRIO_ATTR_FLAGS, /* u32 */ + TCA_TAPRIO_ATTR_TXTIME_DELAY, /* u32 */ ++ TCA_TAPRIO_ATTR_FPE_QMASK, /* u32 */ + __TCA_TAPRIO_ATTR_MAX, + }; + +diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c +index 65563e6acdae..ab08186f2880 100644 +--- a/net/sched/sch_taprio.c ++++ b/net/sched/sch_taprio.c +@@ -78,6 +78,7 @@ struct taprio_sched { + struct sk_buff *(*dequeue)(struct Qdisc *sch); + struct sk_buff *(*peek)(struct Qdisc *sch); + u32 txtime_delay; ++ u32 fpe_q_mask; + }; + + struct __tc_taprio_qopt_offload { +@@ -766,6 +767,7 @@ static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = { + [TCA_TAPRIO_ATTR_SCHED_CLOCKID] = { .type = NLA_S32 }, + [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] = { .type = NLA_S64 }, + [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 }, ++ [TCA_TAPRIO_ATTR_FPE_QMASK] = { .type = NLA_S32 }, + }; + + static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry, +@@ -1217,6 +1219,8 @@ static int taprio_enable_offload(struct net_device *dev, + offload->enable = 1; + taprio_sched_to_offload(q, sched, mqprio, offload); + ++ offload->frame_preemption_queue_mask = q->fpe_q_mask; ++ + err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload); + if (err < 0) { + NL_SET_ERR_MSG(extack, +@@ -1384,6 +1388,21 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt, + q->flags = taprio_flags; + } + ++ if (tb[TCA_TAPRIO_ATTR_FPE_QMASK]) { ++ q->fpe_q_mask = nla_get_u32(tb[TCA_TAPRIO_ATTR_FPE_QMASK]); ++ if (FULL_OFFLOAD_IS_ENABLED(q->flags) && !q->fpe_q_mask) { ++ NL_SET_ERR_MSG_MOD(extack, ++ "Invalid FPE Queue Mask - all 0s"); ++ ++ return -EINVAL; ++ } ++ } else { ++ /* If 'fpe-qmask' is not set, mark fpe_q_mask=0 to indicate ++ * FPE is disabled. ++ */ ++ q->fpe_q_mask = 0; ++ } ++ + err = taprio_parse_mqprio_opt(dev, mqprio, extack, taprio_flags); + if (err < 0) + return err; +-- +2.17.1 + diff --git a/patches/0054-unify-trusty-driver.trusty b/patches/0054-unify-trusty-driver.trusty new file mode 100644 index 0000000000..aa24b7766e --- /dev/null +++ b/patches/0054-unify-trusty-driver.trusty @@ -0,0 +1,127 @@ +From d1cf7585e0bcfe7dd1b317189e63ccababc5f9e1 Mon Sep 17 00:00:00 2001 +From: "Zhang, Qi" +Date: Wed, 7 Mar 2018 15:45:48 +0800 +Subject: [PATCH 54/63] unify trusty driver + +Keep One Trusty driver accross different version kernel as we have One Trusty OS + +Change-Id: Ie81201bb543ffdf6050bfab7560bd275f3a92eb0 +Signed-off-by: Zhang, Qi +--- + drivers/trusty/trusty-ipc.c | 7 +++++++ + drivers/trusty/trusty-mem.c | 6 +++--- + drivers/trusty/trusty-virtio.c | 14 ++++++++++++++ + 3 files changed, 24 insertions(+), 3 deletions(-) + mode change 100755 => 100644 drivers/trusty/trusty-mem.c + +diff --git a/drivers/trusty/trusty-ipc.c b/drivers/trusty/trusty-ipc.c +index a2bc3fcba29a..7df0972ddd05 100644 +--- a/drivers/trusty/trusty-ipc.c ++++ b/drivers/trusty/trusty-ipc.c +@@ -21,7 +21,10 @@ + #include + #include + #include ++#include ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) + #include ++#endif + #include + #include + #include +@@ -1558,7 +1561,11 @@ static int tipc_virtio_probe(struct virtio_device *vdev) + vds->cdev_name[sizeof(vds->cdev_name)-1] = '\0'; + + /* find tx virtqueues (rx and tx and in this order) */ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) + err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, vq_names, NULL, NULL); ++#else ++ err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, vq_names); ++#endif + if (err) + goto err_find_vqs; + +diff --git a/drivers/trusty/trusty-mem.c b/drivers/trusty/trusty-mem.c +old mode 100755 +new mode 100644 +index fc299e348581..470df8823d3a +--- a/drivers/trusty/trusty-mem.c ++++ b/drivers/trusty/trusty-mem.c +@@ -77,7 +77,7 @@ static int get_mem_attr(struct page *page, pgprot_t pgprot) + return -EINVAL; + } + #elif defined(CONFIG_X86) +- #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) + /* The porting to CHT kernel (3.14.55) is in the #else clause. + ** For BXT kernel (4.1.0), the function get_page_memtype() is static. + ** +@@ -93,7 +93,7 @@ static int get_mem_attr(struct page *page, pgprot_t pgprot) + ** with SMP, which only allow UNCACHED. + */ + return NS_MAIR_NORMAL_UNCACHED; +- #else ++#else + unsigned long type; + int ret_mem_attr = 0; + +@@ -124,7 +124,7 @@ static int get_mem_attr(struct page *page, pgprot_t pgprot) + ret_mem_attr = -EINVAL; + } + return ret_mem_attr; +- #endif ++#endif + #else + return 0; + #endif +diff --git a/drivers/trusty/trusty-virtio.c b/drivers/trusty/trusty-virtio.c +index 743a4789772f..66b4ee7caf0d 100644 +--- a/drivers/trusty/trusty-virtio.c ++++ b/drivers/trusty/trusty-virtio.c +@@ -15,6 +15,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -337,9 +338,15 @@ static struct virtqueue *_find_vq(struct virtio_device *vdev, + dev_info(&vdev->dev, "vring%d: va(pa) %p(%llx) qsz %d notifyid %d\n", + id, tvr->vaddr, (u64)tvr->paddr, tvr->elem_num, tvr->notifyid); + ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) + tvr->vq = vring_new_virtqueue(id, tvr->elem_num, tvr->align, + vdev, true, true, tvr->vaddr, + trusty_virtio_notify, callback, name); ++#else ++ tvr->vq = vring_new_virtqueue(id, tvr->elem_num, tvr->align, ++ vdev, true, tvr->vaddr, ++ trusty_virtio_notify, callback, name); ++#endif + if (!tvr->vq) { + dev_err(&vdev->dev, "vring_new_virtqueue %s failed\n", + name); +@@ -356,12 +363,19 @@ static struct virtqueue *_find_vq(struct virtio_device *vdev, + return ERR_PTR(-ENOMEM); + } + ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) + static int trusty_virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char * const names[], + const bool *ctx, + struct irq_affinity *desc) ++#else ++static int trusty_virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs, ++ struct virtqueue *vqs[], ++ vq_callback_t *callbacks[], ++ const char * const names[]) ++#endif + { + uint i; + int ret; +-- +2.17.1 + diff --git a/patches/0054-vhm-add-set_memmaps-hypercall-support.acrn b/patches/0054-vhm-add-set_memmaps-hypercall-support.acrn new file mode 100644 index 0000000000..8857bd04ec --- /dev/null +++ b/patches/0054-vhm-add-set_memmaps-hypercall-support.acrn @@ -0,0 +1,164 @@ +From 4a5d827abdc3de036df0598dd344ad9e07888abb Mon Sep 17 00:00:00 2001 +From: Jason Chen CJ +Date: Fri, 31 Aug 2018 10:59:00 +0800 +Subject: [PATCH 054/150] vhm: add set_memmaps hypercall support + +This new added hypercall is to support multi regions memmap in one time, +which improve the performance. + +1 API is added to support set_memmaps hypercall: +- int set_memmaps(struct set_memmaps *memmaps) + +struct set_memmaps is added to present multi region memmap info, which +include a page buffer to fill the memmaps array. + +Signed-off-by: Jason Chen CJ +--- + drivers/vhm/vhm_hypercall.c | 5 +++++ + drivers/vhm/vhm_mm.c | 14 ++++++++++++++ + include/linux/vhm/acrn_hv_defs.h | 32 +++++++++++++++++++++++++++++++ + include/linux/vhm/acrn_vhm_mm.h | 12 +++++++++++- + include/linux/vhm/vhm_hypercall.h | 1 + + 5 files changed, 63 insertions(+), 1 deletion(-) + +diff --git a/drivers/vhm/vhm_hypercall.c b/drivers/vhm/vhm_hypercall.c +index 5940022403c0..9819ab95beaa 100644 +--- a/drivers/vhm/vhm_hypercall.c ++++ b/drivers/vhm/vhm_hypercall.c +@@ -97,6 +97,11 @@ inline long hcall_set_memmap(unsigned long vmid, unsigned long memmap) + return acrn_hypercall2(HC_VM_SET_MEMMAP, vmid, memmap); + } + ++inline long hcall_set_memmaps(unsigned long pa_memmaps) ++{ ++ return acrn_hypercall1(HC_VM_SET_MEMMAPS, pa_memmaps); ++} ++ + inline long hcall_set_ioreq_buffer(unsigned long vmid, unsigned long buffer) + { + return acrn_hypercall2(HC_SET_IOREQ_BUFFER, vmid, buffer); +diff --git a/drivers/vhm/vhm_mm.c b/drivers/vhm/vhm_mm.c +index 070327e616d6..75ccd3f09a4e 100644 +--- a/drivers/vhm/vhm_mm.c ++++ b/drivers/vhm/vhm_mm.c +@@ -199,6 +199,20 @@ int unset_mmio_map(unsigned long vmid, unsigned long guest_gpa, + 0, 0, MAP_UNMAP); + } + ++int set_memmaps(struct set_memmaps *memmaps) ++{ ++ if (memmaps == NULL) ++ return -EINVAL; ++ if (memmaps->memmaps_num > 0) { ++ if (hcall_set_memmaps(virt_to_phys(memmaps)) < 0) { ++ pr_err("vhm: failed to set memmaps!\n"); ++ return -EFAULT; ++ } ++ } ++ ++ return 0; ++} ++ + int update_memmap_attr(unsigned long vmid, unsigned long guest_gpa, + unsigned long host_gpa, unsigned long len, + unsigned int mem_type, unsigned int mem_access_right) +diff --git a/include/linux/vhm/acrn_hv_defs.h b/include/linux/vhm/acrn_hv_defs.h +index 902312049970..161523dca1db 100644 +--- a/include/linux/vhm/acrn_hv_defs.h ++++ b/include/linux/vhm/acrn_hv_defs.h +@@ -94,6 +94,7 @@ + #define HC_ID_MEM_BASE 0x40UL + #define HC_VM_SET_MEMMAP _HC_ID(HC_ID, HC_ID_MEM_BASE + 0x00) + #define HC_VM_GPA2HPA _HC_ID(HC_ID, HC_ID_MEM_BASE + 0x01) ++#define HC_VM_SET_MEMMAPS _HC_ID(HC_ID, HC_ID_MEM_BASE + 0x02) + + /* PCI assignment*/ + #define HC_ID_PCI_BASE 0x50UL +@@ -149,6 +150,37 @@ struct vm_set_memmap { + uint32_t prot; + } __attribute__((aligned(8))); + ++struct memory_map { ++ uint32_t type; ++ ++ /* IN: mem attr */ ++ uint32_t prot; ++ ++ /* IN: beginning guest GPA to map */ ++ uint64_t remote_gpa; ++ ++ /* IN: VM0's GPA which foreign gpa will be mapped to */ ++ uint64_t vm0_gpa; ++ ++ /* IN: length of the range */ ++ uint64_t length; ++} __attribute__((aligned(8))); ++ ++struct set_memmaps { ++ /*IN: vmid for this hypercall */ ++ uint64_t vmid; ++ ++ /* IN: multi memmaps numbers */ ++ uint32_t memmaps_num; ++ ++ /* IN: ++ * the gpa of memmaps buffer, point to the memmaps array: ++ * struct memory_map memmap_array[memmaps_num] ++ * the max buffer size is one page. ++ */ ++ uint64_t memmaps_gpa; ++} __attribute__((aligned(8))); ++ + struct sbuf_setup_param { + uint32_t pcpu_id; + uint32_t sbuf_id; +diff --git a/include/linux/vhm/acrn_vhm_mm.h b/include/linux/vhm/acrn_vhm_mm.h +index 9be6749d12e2..712860b5f5af 100644 +--- a/include/linux/vhm/acrn_vhm_mm.h ++++ b/include/linux/vhm/acrn_vhm_mm.h +@@ -62,6 +62,7 @@ + + #include + #include ++#include + + /** + * acrn_hpa2gpa - physical address conversion +@@ -189,7 +190,7 @@ void free_guest_mem(struct vhm_vm *vm); + int alloc_guest_memseg(struct vhm_vm *vm, struct vm_memseg *memseg); + + /** +- * map_guest_memseg - map EPT mmapping of memory of guest according to ++ * map_guest_memseg - set guest mmapping of memory according to + * pre-defined memory mapping info + * + * @vm: pointer to guest vm +@@ -207,4 +208,13 @@ int hugepage_map_guest(struct vhm_vm *vm, struct vm_memmap *memmap); + void hugepage_free_guest(struct vhm_vm *vm); + void *hugepage_map_guest_phys(struct vhm_vm *vm, u64 guest_phys, size_t size); + int hugepage_unmap_guest_phys(struct vhm_vm *vm, u64 guest_phys); ++ ++/** ++ * set_memmaps - set guest mapping for multi regions ++ * ++ * @memmaps: pointer to set_memmaps ++ * ++ * Return: 0 on success, <0 for error. ++ */ ++int set_memmaps(struct set_memmaps *memmaps); + #endif +diff --git a/include/linux/vhm/vhm_hypercall.h b/include/linux/vhm/vhm_hypercall.h +index 1cc47ffab1a9..be60df1c6f66 100644 +--- a/include/linux/vhm/vhm_hypercall.h ++++ b/include/linux/vhm/vhm_hypercall.h +@@ -148,6 +148,7 @@ inline long hcall_setup_sbuf(unsigned long sbuf_head); + inline long hcall_get_cpu_state(unsigned long cmd, unsigned long state_pa); + inline long hcall_set_memmap(unsigned long vmid, + unsigned long memmap); ++inline long hcall_set_memmaps(unsigned long pa_memmaps); + inline long hcall_set_ioreq_buffer(unsigned long vmid, + unsigned long buffer); + inline long hcall_notify_req_finish(unsigned long vmid, unsigned long vcpu); +-- +2.17.1 + diff --git a/patches/0055-ASoC-Intel-Skylake-Declare-Probe-compress-CPU-DAIs.audio b/patches/0055-ASoC-Intel-Skylake-Declare-Probe-compress-CPU-DAIs.audio new file mode 100644 index 0000000000..b792e0c726 --- /dev/null +++ b/patches/0055-ASoC-Intel-Skylake-Declare-Probe-compress-CPU-DAIs.audio @@ -0,0 +1,90 @@ +From 005d716b5433e9093aa8e9b740fbf8438c2f16e1 Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Wed, 6 Mar 2019 22:23:13 +0100 +Subject: [PATCH 055/193] ASoC: Intel: Skylake: Declare Probe compress CPU DAIs + +Declare extraction and injection CPU DAIs as well as +skl_probe_compr_ops. FE DAIs can link against these new CPU DAIs to +create new compress devices. + +Change-Id: I2407e73ab53de724b59ba2d8fdf4dd28f9b85777 +Signed-off-by: Cezary Rojewski +--- + sound/soc/intel/skylake/skl-pcm.c | 40 +++++++++++++++++++++++++++++++ + 1 file changed, 40 insertions(+) + +diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c +index ac87d177229a..9660038499bb 100644 +--- a/sound/soc/intel/skylake/skl-pcm.c ++++ b/sound/soc/intel/skylake/skl-pcm.c +@@ -19,6 +19,7 @@ + #include "skl-topology.h" + #include "skl-sst-dsp.h" + #include "skl-sst-ipc.h" ++#include "skl-compress.h" + + #define HDA_MONO 1 + #define HDA_STEREO 2 +@@ -671,6 +672,18 @@ static int skl_link_hw_free(struct snd_pcm_substream *substream, + return 0; + } + ++static struct snd_compr_ops skl_platform_compr_ops = { ++ .copy = skl_probe_compr_copy, ++}; ++ ++static struct snd_soc_cdai_ops skl_probe_compr_ops = { ++ .startup = skl_probe_compr_open, ++ .shutdown = skl_probe_compr_free, ++ .set_params = skl_probe_compr_set_params, ++ .trigger = skl_probe_compr_trigger, ++ .pointer = skl_probe_compr_pointer, ++}; ++ + static const struct snd_soc_dai_ops skl_pcm_dai_ops = { + .startup = skl_pcm_open, + .shutdown = skl_pcm_close, +@@ -1071,6 +1084,32 @@ static struct snd_soc_dai_driver skl_platform_dai[] = { + SNDRV_PCM_FMTBIT_S32_LE, + }, + }, ++{ ++ .name = "Probe Injection0 CPU DAI", ++ .compress_new = snd_soc_new_compress, ++ .cops = &skl_probe_compr_ops, ++ .playback = { ++ .stream_name = "Probe Injection0", ++ .channels_min = 1, ++ .channels_max = 8, ++ .rates = SNDRV_PCM_RATE_48000, ++ .rate_min = 48000, ++ .rate_max = 48000, ++ }, ++}, ++{ ++ .name = "Probe Extraction CPU DAI", ++ .compress_new = snd_soc_new_compress, ++ .cops = &skl_probe_compr_ops, ++ .capture = { ++ .stream_name = "Probe Extraction", ++ .channels_min = 1, ++ .channels_max = 8, ++ .rates = SNDRV_PCM_RATE_48000, ++ .rate_min = 48000, ++ .rate_max = 48000, ++ }, ++}, + }; + + int skl_dai_load(struct snd_soc_component *cmp, int index, +@@ -1529,6 +1568,7 @@ static const struct snd_soc_component_driver skl_component = { + .probe = skl_platform_soc_probe, + .remove = skl_pcm_remove, + .ops = &skl_platform_ops, ++ .compr_ops = &skl_platform_compr_ops, + .pcm_new = skl_pcm_new, + .pcm_free = skl_pcm_free, + .module_get_upon_open = 1, /* increment refcount when a pcm is opened */ +-- +2.17.1 + diff --git a/patches/0055-Revert-trusty-ipc-change-DEFAULT_MSG_BUF_SIZE-to-68.trusty b/patches/0055-Revert-trusty-ipc-change-DEFAULT_MSG_BUF_SIZE-to-68.trusty new file mode 100644 index 0000000000..13ae1bb0ad --- /dev/null +++ b/patches/0055-Revert-trusty-ipc-change-DEFAULT_MSG_BUF_SIZE-to-68.trusty @@ -0,0 +1,30 @@ +From c9eece6d8bfb6ded206129c686ccd1fc8efee351 Mon Sep 17 00:00:00 2001 +From: "Yan, Shaopu" +Date: Thu, 12 Apr 2018 09:06:04 +0800 +Subject: [PATCH 55/63] Revert "trusty-ipc: change DEFAULT_MSG_BUF_SIZE to 68K" + +This reverts commit f3e776a486937859e6cd67ab558544544fae7004. + +Change-Id: I26fd8a9e5b0206bce757f30dbbe5b13d59d0819c +Signed-off-by: Yan, Shaopu +--- + drivers/trusty/trusty-ipc.c | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +diff --git a/drivers/trusty/trusty-ipc.c b/drivers/trusty/trusty-ipc.c +index 7df0972ddd05..f0b6b1bb444a 100644 +--- a/drivers/trusty/trusty-ipc.c ++++ b/drivers/trusty/trusty-ipc.c +@@ -49,8 +49,7 @@ + #define MAX_SRV_NAME_LEN 256 + #define MAX_DEV_NAME_LEN 32 + +-#define DEFAULT_MSG_BUF_SIZE (68*1024) +- ++#define DEFAULT_MSG_BUF_SIZE PAGE_SIZE + #define DEFAULT_MSG_BUF_ALIGN PAGE_SIZE + + #define TIPC_CTRL_ADDR 53 +-- +2.17.1 + diff --git a/patches/0055-drm-virtio-cleanup-queue-functions.drm b/patches/0055-drm-virtio-cleanup-queue-functions.drm new file mode 100644 index 0000000000..357c801bf5 --- /dev/null +++ b/patches/0055-drm-virtio-cleanup-queue-functions.drm @@ -0,0 +1,117 @@ +From c620fadab6cac99f3327ea4cf68b1d1115e060f0 Mon Sep 17 00:00:00 2001 +From: Gerd Hoffmann +Date: Tue, 13 Aug 2019 10:25:08 +0200 +Subject: [PATCH 055/690] drm/virtio: cleanup queue functions + +Make the queue functions return void, none of +the call sites checks the return value. + +Signed-off-by: Gerd Hoffmann +Reviewed-by: Chia-I Wu +Link: http://patchwork.freedesktop.org/patch/msgid/20190813082509.29324-2-kraxel@redhat.com +--- + drivers/gpu/drm/virtio/virtgpu_vq.c | 41 ++++++++++------------------- + 1 file changed, 14 insertions(+), 27 deletions(-) + +diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c +index 7ac20490e1b4..ca91e83ffaef 100644 +--- a/drivers/gpu/drm/virtio/virtgpu_vq.c ++++ b/drivers/gpu/drm/virtio/virtgpu_vq.c +@@ -252,8 +252,8 @@ void virtio_gpu_dequeue_cursor_func(struct work_struct *work) + wake_up(&vgdev->cursorq.ack_queue); + } + +-static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev, +- struct virtio_gpu_vbuffer *vbuf) ++static void virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev, ++ struct virtio_gpu_vbuffer *vbuf) + __releases(&vgdev->ctrlq.qlock) + __acquires(&vgdev->ctrlq.qlock) + { +@@ -263,7 +263,7 @@ static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev, + int ret; + + if (!vgdev->vqs_ready) +- return -ENODEV; ++ return; + + sg_init_one(&vcmd, vbuf->buf, vbuf->size); + sgs[outcnt + incnt] = &vcmd; +@@ -294,30 +294,22 @@ static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev, + + virtqueue_kick(vq); + } +- +- if (!ret) +- ret = vq->num_free; +- return ret; + } + +-static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev, +- struct virtio_gpu_vbuffer *vbuf) ++static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev, ++ struct virtio_gpu_vbuffer *vbuf) + { +- int rc; +- + spin_lock(&vgdev->ctrlq.qlock); +- rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf); ++ virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf); + spin_unlock(&vgdev->ctrlq.qlock); +- return rc; + } + +-static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, +- struct virtio_gpu_vbuffer *vbuf, +- struct virtio_gpu_ctrl_hdr *hdr, +- struct virtio_gpu_fence *fence) ++static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, ++ struct virtio_gpu_vbuffer *vbuf, ++ struct virtio_gpu_ctrl_hdr *hdr, ++ struct virtio_gpu_fence *fence) + { + struct virtqueue *vq = vgdev->ctrlq.vq; +- int rc; + + again: + spin_lock(&vgdev->ctrlq.qlock); +@@ -338,13 +330,12 @@ static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, + + if (fence) + virtio_gpu_fence_emit(vgdev, hdr, fence); +- rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf); ++ virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf); + spin_unlock(&vgdev->ctrlq.qlock); +- return rc; + } + +-static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, +- struct virtio_gpu_vbuffer *vbuf) ++static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, ++ struct virtio_gpu_vbuffer *vbuf) + { + struct virtqueue *vq = vgdev->cursorq.vq; + struct scatterlist *sgs[1], ccmd; +@@ -352,7 +343,7 @@ static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, + int outcnt; + + if (!vgdev->vqs_ready) +- return -ENODEV; ++ return; + + sg_init_one(&ccmd, vbuf->buf, vbuf->size); + sgs[0] = &ccmd; +@@ -374,10 +365,6 @@ static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, + } + + spin_unlock(&vgdev->cursorq.qlock); +- +- if (!ret) +- ret = vq->num_free; +- return ret; + } + + /* just create gem objects for userspace and long lived objects, +-- +2.17.1 + diff --git a/patches/0055-mei-dal-add-acp-parser-code.security b/patches/0055-mei-dal-add-acp-parser-code.security new file mode 100644 index 0000000000..12fb5de55b --- /dev/null +++ b/patches/0055-mei-dal-add-acp-parser-code.security @@ -0,0 +1,796 @@ +From bb8f06a958036bde87d7787bbff8383b0199f11f Mon Sep 17 00:00:00 2001 +From: Yael Samet +Date: Mon, 14 Aug 2017 11:23:38 +0300 +Subject: [PATCH 55/65] mei: dal: add acp parser code + +The ACP (Admin Command Pack) file represents a Java trusted application +(JTA) image to be downloaded to the DAL firmware. +This patch adds ACP file parser. + +Change-Id: Ibc71f438b248cc9fc696db22289ad7d4962ea41c +Signed-off-by: Yael Samet +Signed-off-by: Tomas Winkler +--- + drivers/misc/mei/dal/Makefile | 1 + + drivers/misc/mei/dal/acp_format.h | 198 ++++++++++++ + drivers/misc/mei/dal/acp_parser.c | 507 ++++++++++++++++++++++++++++++ + drivers/misc/mei/dal/acp_parser.h | 38 +++ + 4 files changed, 744 insertions(+) + create mode 100644 drivers/misc/mei/dal/acp_format.h + create mode 100644 drivers/misc/mei/dal/acp_parser.c + create mode 100644 drivers/misc/mei/dal/acp_parser.h + +diff --git a/drivers/misc/mei/dal/Makefile b/drivers/misc/mei/dal/Makefile +index 13791b6c858e..bd86590a46c4 100644 +--- a/drivers/misc/mei/dal/Makefile ++++ b/drivers/misc/mei/dal/Makefile +@@ -4,4 +4,5 @@ + ccflags-y += -D__CHECK_ENDIAN__ + + obj-$(CONFIG_INTEL_MEI_DAL) += mei_dal.o ++mei_dal-objs += acp_parser.o + mei_dal-objs += dal_class.o +diff --git a/drivers/misc/mei/dal/acp_format.h b/drivers/misc/mei/dal/acp_format.h +new file mode 100644 +index 000000000000..c9b70600bb20 +--- /dev/null ++++ b/drivers/misc/mei/dal/acp_format.h +@@ -0,0 +1,198 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright(c) 2016-2019, Intel Corporation. ++ */ ++ ++#ifndef _ACP_FORMAT_H ++#define _ACP_FORMAT_H ++ ++#include ++ ++#define AC_MAX_INS_REASONS_LENGTH 1024 ++#define AC_MAX_USED_SERVICES 20 ++#define AC_MAX_PROPS_LENGTH 2048 ++#define AC_MAX_PACK_HASH_LEN 32 ++ ++/** ++ * enum ac_cmd_id - acp file command (acp type) ++ * ++ * @AC_CMD_INVALID: invalid command ++ * @AC_INSTALL_SD: install new sub security domain ++ * @AC_UNINSTALL_SD: uninstall sub security domain ++ * @AC_INSTALL_JTA: install java ta ++ * @AC_UNINSTALL_JTA: uninstall java ta ++ * @AC_INSTALL_NTA: install native ta (currently NOT SUPPORTED) ++ * @AC_UNINSTALL_NTA: uninstall native ta (currently NOT SUPPORTED) ++ * @AC_UPDATE_SVL: update the security version list ++ * @AC_INSTALL_JTA_PROP: ta properties for installation ++ * @AC_CMD_NUM: number of acp commands ++ */ ++enum ac_cmd_id { ++ AC_CMD_INVALID, ++ AC_INSTALL_SD, ++ AC_UNINSTALL_SD, ++ AC_INSTALL_JTA, ++ AC_UNINSTALL_JTA, ++ AC_INSTALL_NTA, ++ AC_UNINSTALL_NTA, ++ AC_UPDATE_SVL, ++ AC_INSTALL_JTA_PROP, ++ AC_CMD_NUM ++}; ++ ++/** ++ * struct ac_pack_hash - ta pack hash ++ * ++ * @data: ta hash ++ */ ++struct ac_pack_hash { ++ u8 data[AC_MAX_PACK_HASH_LEN]; ++} __packed; ++ ++/** ++ * struct ac_pack_header - admin comman pack header ++ * ++ * @magic: magic string which represents an ACP ++ * @version: package format version ++ * @byte_order: byte order of package (0 big endian, 1 little endian) ++ * @reserved: reserved bytes ++ * @size: total package size ++ * @cmd_id: acp command (acp file type) ++ * @svn: security version number ++ * ++ * @idx_num: the number of the indexed sections ++ * @idx_condition: condition section offset ++ * @idx_data: data section offset ++ */ ++struct ac_pack_header { ++ /*ACP Header*/ ++ u8 magic[4]; ++ u8 version; ++ u8 byte_order; ++ u16 reserved; ++ u32 size; ++ u32 cmd_id; ++ u32 svn; ++ ++ /* Index Section */ ++ u32 idx_num; ++ u32 idx_condition; ++ u32 idx_data; ++} __packed; ++ ++/** ++ * struct ac_ta_id_list - A list of ta ids which the ta ++ * is allowed to communicate with. ++ * ++ * @num: ta ids count ++ * @list: ta ids list ++ */ ++struct ac_ta_id_list { ++ u32 num; ++ uuid_t list[0]; ++} __packed; ++ ++/** ++ * struct ac_prop_list - TLV list of acp properties ++ * ++ * @num: number of properties ++ * @len: size of all properties ++ * @data: acp properties. TLV format is "type\0key\0value\0" ++ * (e.g. string\0name\0Tom\0int\0Age\013\0) ++ */ ++struct ac_prop_list { ++ u32 num; ++ u32 len; ++ s8 data[0]; ++} __packed; ++ ++/** ++ * struct ac_ins_reasons - list of event codes that can be ++ * received or posted by ta ++ * ++ * @len: event codes count ++ * @data: event codes list ++ */ ++struct ac_ins_reasons { ++ u32 len; ++ u32 data[0]; ++} __packed; ++ ++/** ++ * struct ac_pack - general struct to hold parsed acp content ++ * ++ * @head: acp pack header ++ * @data: acp parsed content ++ */ ++struct ac_pack { ++ struct ac_pack_header *head; ++ char data[0]; ++} __packed; ++ ++/** ++ * struct ac_ins_ta_header - ta installation header ++ * ++ * @ta_id: ta id ++ * @ta_svn: ta security version number ++ * @hash_alg_type: ta hash algorithm type ++ * @ta_reserved: reserved bytes ++ * @hash: ta pack hash ++ */ ++struct ac_ins_ta_header { ++ uuid_t ta_id; ++ u32 ta_svn; ++ u8 hash_alg_type; ++ u8 ta_reserved[3]; ++ struct ac_pack_hash hash; ++} __packed; ++ ++/** ++ * struct ac_ins_jta_pack - ta installation information ++ * ++ * @ins_cond: ta install conditions (contains some of the manifest data, ++ * including security.version, applet.version, applet.platform, ++ * applet.api.level) ++ * @head: ta installation header ++ */ ++struct ac_ins_jta_pack { ++ struct ac_prop_list *ins_cond; ++ struct ac_ins_ta_header *head; ++} __packed; ++ ++/** ++ * struct ac_ins_jta_prop_header - ta manifest header ++ * ++ * @mem_quota: ta heap size ++ * @ta_encrypted: ta encrypted by provider flag ++ * @padding: padding ++ * @allowed_inter_session_num: allowed internal session count ++ * @ac_groups: ta permission groups ++ * @timeout: ta timeout in milliseconds ++ */ ++struct ac_ins_jta_prop_header { ++ u32 mem_quota; ++ u8 ta_encrypted; ++ u8 padding; ++ u16 allowed_inter_session_num; ++ u64 ac_groups; ++ u32 timeout; ++} __packed; ++ ++/** ++ * struct ac_ins_jta_prop - ta manifest ++ * ++ * @head: manifest header ++ * @post_reasons: list of event codes that can be posted by ta ++ * @reg_reasons: list of event codes that can be received by ta ++ * @prop: all other manifest fields (acp properties) ++ * @used_service_list: list of ta ids which ta is allowed to communicate with ++ */ ++struct ac_ins_jta_prop { ++ struct ac_ins_jta_prop_header *head; ++ struct ac_ins_reasons *post_reasons; ++ struct ac_ins_reasons *reg_reasons; ++ struct ac_prop_list *prop; ++ struct ac_ta_id_list *used_service_list; ++} __packed; ++ ++#endif /* _ACP_FORMAT_H */ +diff --git a/drivers/misc/mei/dal/acp_parser.c b/drivers/misc/mei/dal/acp_parser.c +new file mode 100644 +index 000000000000..3898444dbbe1 +--- /dev/null ++++ b/drivers/misc/mei/dal/acp_parser.c +@@ -0,0 +1,507 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright(c) 2016-2019, Intel Corporation. ++ */ ++ ++#include ++#include ++ ++#include "acp_format.h" ++#include "acp_parser.h" ++ ++/* CSS Header + CSS Crypto Block ++ * Prefixes each signed ACP package ++ */ ++#define AC_CSS_HEADER_LENGTH (128 + 520) ++ ++/** ++ * struct ac_pr_state - admin command pack reader state ++ * ++ * @cur : current read position ++ * @head : acp file head ++ * @total : size of acp file ++ */ ++struct ac_pr_state { ++ const char *cur; ++ const char *head; ++ unsigned int total; ++}; ++ ++/** ++ * ac_pr_init - init pack reader ++ * ++ * @pr: pack reader ++ * @data: acp file content (without CSS header) ++ * @n: acp file size (without CSS header) ++ * ++ * Return: 0 on success ++ * -EINVAL on invalid parameters ++ */ ++static int ac_pr_init(struct ac_pr_state *pr, const char *data, ++ unsigned int n) ++{ ++ /* check integer overflow */ ++ if ((size_t)data > SIZE_MAX - n) ++ return -EINVAL; ++ ++ pr->cur = data; ++ pr->head = data; ++ pr->total = n; ++ return 0; ++} ++ ++/** ++ * ac_pr_8b_align_move - update pack reader cur pointer after reading n_move ++ * bytes. Leave cur aligned to 8 bytes. ++ * (e.g. when n_move is 3, increase cur by 8) ++ * ++ * @pr: pack reader ++ * @n_move: number of bytes to move cur pointer ahead ++ * will be rounded up to keep cur 8 bytes aligned ++ * ++ * Return: 0 on success ++ * -EINVAL on invalid parameters ++ */ ++static int ac_pr_8b_align_move(struct ac_pr_state *pr, size_t n_move) ++{ ++ unsigned long offset; ++ const char *new_cur = pr->cur + n_move; ++ size_t len_from_head = new_cur - pr->head; ++ ++ if ((size_t)pr->cur > SIZE_MAX - n_move || new_cur < pr->head) ++ return -EINVAL; ++ ++ offset = ((8 - (len_from_head & 7)) & 7); ++ if ((size_t)new_cur > SIZE_MAX - offset) ++ return -EINVAL; ++ ++ new_cur = new_cur + offset; ++ if (new_cur > pr->head + pr->total) ++ return -EINVAL; ++ ++ pr->cur = new_cur; ++ return 0; ++} ++ ++/** ++ * ac_pr_align_move - update pack reader cur pointer after reading n_move bytes ++ * Leave cur aligned to 4 bytes. ++ * (e.g. when n_move is 1, increase cur by 4) ++ * ++ * @pr: pack reader ++ * @n_move: number of bytes to move cur pointer ahead ++ * will be rounded up to keep cur 4 bytes aligned ++ * ++ * Return: 0 on success ++ * -EINVAL on invalid parameters ++ */ ++static int ac_pr_align_move(struct ac_pr_state *pr, size_t n_move) ++{ ++ const char *new_cur = pr->cur + n_move; ++ size_t len_from_head = new_cur - pr->head; ++ size_t offset; ++ ++ if ((size_t)pr->cur > SIZE_MAX - n_move || new_cur < pr->head) ++ return -EINVAL; ++ ++ offset = ((4 - (len_from_head & 3)) & 3); ++ if ((size_t)new_cur > SIZE_MAX - offset) ++ return -EINVAL; ++ ++ new_cur = new_cur + offset; ++ if (new_cur > pr->head + pr->total) ++ return -EINVAL; ++ ++ pr->cur = new_cur; ++ return 0; ++} ++ ++/** ++ * ac_pr_move - update pack reader cur pointer after reading n_move bytes ++ * ++ * @pr: pack reader ++ * @n_move: number of bytes to move cur pointer ahead ++ * ++ * Return: 0 on success ++ * -EINVAL on invalid parameters ++ */ ++static int ac_pr_move(struct ac_pr_state *pr, size_t n_move) ++{ ++ const char *new_cur = pr->cur + n_move; ++ ++ /* integer overflow or out of acp pkg size */ ++ if ((size_t)pr->cur > SIZE_MAX - n_move || ++ new_cur > pr->head + pr->total) ++ return -EINVAL; ++ ++ pr->cur = new_cur; ++ ++ return 0; ++} ++ ++/** ++ * ac_pr_is_safe_to_read - check whether it is safe to read more n_move ++ * bytes from the acp file ++ * ++ * @pr: pack reader ++ * @n_move: number of bytes to check if it is safe to read ++ * ++ * Return: true when it is safe to read more n_move bytes ++ * false otherwise ++ */ ++static bool ac_pr_is_safe_to_read(const struct ac_pr_state *pr, size_t n_move) ++{ ++ /* pointer overflow */ ++ if ((size_t)pr->cur > SIZE_MAX - n_move) ++ return false; ++ ++ if (pr->cur + n_move > pr->head + pr->total) ++ return false; ++ ++ return true; ++} ++ ++/** ++ * ac_pr_is_end - check if cur is at the end of the acp file ++ * ++ * @pr: pack reader ++ * ++ * Return: true when cur is at the end of the acp ++ * false otherwise ++ */ ++static bool ac_pr_is_end(const struct ac_pr_state *pr) ++{ ++ return (pr->cur == pr->head + pr->total); ++} ++ ++/** ++ * acp_load_reasons - load list of event codes that can be ++ * received or posted by ta ++ * ++ * @pr: pack reader ++ * @reasons: out param to hold the list of event codes ++ * ++ * Return: 0 on success ++ * -EINVAL on invalid parameters ++ */ ++static int acp_load_reasons(struct ac_pr_state *pr, ++ struct ac_ins_reasons **reasons) ++{ ++ size_t len; ++ struct ac_ins_reasons *r; ++ ++ if (!ac_pr_is_safe_to_read(pr, sizeof(*r))) ++ return -EINVAL; ++ ++ r = (struct ac_ins_reasons *)pr->cur; ++ ++ if (r->len > AC_MAX_INS_REASONS_LENGTH) ++ return -EINVAL; ++ ++ len = sizeof(*r) + r->len * sizeof(r->data[0]); ++ if (!ac_pr_is_safe_to_read(pr, len)) ++ return -EINVAL; ++ ++ *reasons = r; ++ return ac_pr_align_move(pr, len); ++} ++ ++/** ++ * acp_load_taid_list - load list of ta ids which ta is allowed ++ * to communicate with ++ * ++ * @pr: pack reader ++ * @taid_list: out param to hold the loaded ta ids ++ * ++ * Return: 0 on success ++ * -EINVAL on invalid parameters ++ */ ++static int acp_load_taid_list(struct ac_pr_state *pr, ++ struct ac_ta_id_list **taid_list) ++{ ++ size_t len; ++ struct ac_ta_id_list *t; ++ ++ if (!ac_pr_is_safe_to_read(pr, sizeof(*t))) ++ return -EINVAL; ++ ++ t = (struct ac_ta_id_list *)pr->cur; ++ if (t->num > AC_MAX_USED_SERVICES) ++ return -EINVAL; ++ ++ len = sizeof(*t) + t->num * sizeof(t->list[0]); ++ ++ if (!ac_pr_is_safe_to_read(pr, len)) ++ return -EINVAL; ++ ++ *taid_list = t; ++ return ac_pr_align_move(pr, len); ++} ++ ++/** ++ * acp_load_prop - load property from acp ++ * ++ * @pr: pack reader ++ * @prop: out param to hold the loaded property ++ * ++ * Return: 0 on success ++ * -EINVAL on invalid parameters ++ */ ++static int acp_load_prop(struct ac_pr_state *pr, struct ac_prop_list **prop) ++{ ++ size_t len; ++ struct ac_prop_list *p; ++ ++ if (!ac_pr_is_safe_to_read(pr, sizeof(*p))) ++ return -EINVAL; ++ ++ p = (struct ac_prop_list *)pr->cur; ++ if (p->len > AC_MAX_PROPS_LENGTH) ++ return -EINVAL; ++ ++ len = sizeof(*p) + p->len * sizeof(p->data[0]); ++ ++ if (!ac_pr_is_safe_to_read(pr, len)) ++ return -EINVAL; ++ ++ *prop = p; ++ return ac_pr_align_move(pr, len); ++} ++ ++/** ++ * acp_load_ta_pack - load ta pack from acp ++ * ++ * @pr: pack reader ++ * @ta_pack: out param to hold the ta pack ++ * ++ * Return: 0 on success ++ * -EINVAL on invalid parameters ++ */ ++static int acp_load_ta_pack(struct ac_pr_state *pr, char **ta_pack) ++{ ++ size_t len; ++ char *t; ++ ++ /*8 byte align to obey jeff rule*/ ++ if (ac_pr_8b_align_move(pr, 0)) ++ return -EINVAL; ++ ++ t = (char *)pr->cur; ++ ++ /* ++ *assume ta pack is the last item of one package, ++ *move cursor to the end directly ++ */ ++ if (pr->cur > pr->head + pr->total) ++ return -EINVAL; ++ ++ len = pr->head + pr->total - pr->cur; ++ if (!ac_pr_is_safe_to_read(pr, len)) ++ return -EINVAL; ++ ++ *ta_pack = t; ++ return ac_pr_move(pr, len); ++} ++ ++/** ++ * acp_load_ins_jta_prop_head - load ta manifest header ++ * ++ * @pr: pack reader ++ * @head: out param to hold manifest header ++ * ++ * Return: 0 on success ++ * -EINVAL on invalid parameters ++ */ ++static int acp_load_ins_jta_prop_head(struct ac_pr_state *pr, ++ struct ac_ins_jta_prop_header **head) ++{ ++ if (!ac_pr_is_safe_to_read(pr, sizeof(**head))) ++ return -EINVAL; ++ ++ *head = (struct ac_ins_jta_prop_header *)pr->cur; ++ return ac_pr_align_move(pr, sizeof(**head)); ++} ++ ++/** ++ * acp_load_ins_jta_prop - load ta properties information (ta manifest) ++ * ++ * @pr: pack reader ++ * @pack: out param to hold ta manifest ++ * ++ * Return: 0 on success ++ * -EINVAL on invalid parameters ++ */ ++static int acp_load_ins_jta_prop(struct ac_pr_state *pr, ++ struct ac_ins_jta_prop *pack) ++{ ++ int ret; ++ ++ ret = acp_load_ins_jta_prop_head(pr, &pack->head); ++ if (ret) ++ return ret; ++ ++ ret = acp_load_reasons(pr, &pack->post_reasons); ++ if (ret) ++ return ret; ++ ++ ret = acp_load_reasons(pr, &pack->reg_reasons); ++ if (ret) ++ return ret; ++ ++ ret = acp_load_prop(pr, &pack->prop); ++ if (ret) ++ return ret; ++ ++ ret = acp_load_taid_list(pr, &pack->used_service_list); ++ ++ return ret; ++} ++ ++/** ++ * acp_load_ins_jta_head - load ta installation header ++ * ++ * @pr: pack reader ++ * @head: out param to hold the installation header ++ * ++ * Return: 0 on success ++ * -EINVAL on invalid parameters ++ */ ++static int acp_load_ins_jta_head(struct ac_pr_state *pr, ++ struct ac_ins_ta_header **head) ++{ ++ if (!ac_pr_is_safe_to_read(pr, sizeof(**head))) ++ return -EINVAL; ++ ++ *head = (struct ac_ins_ta_header *)pr->cur; ++ return ac_pr_align_move(pr, sizeof(**head)); ++} ++ ++/** ++ * acp_load_ins_jta - load ta installation information from acp ++ * ++ * @pr: pack reader ++ * @pack: out param to hold install information ++ * ++ * Return: 0 on success ++ * -EINVAL on invalid parameters ++ */ ++static int acp_load_ins_jta(struct ac_pr_state *pr, ++ struct ac_ins_jta_pack *pack) ++{ ++ int ret; ++ ++ ret = acp_load_prop(pr, &pack->ins_cond); ++ if (ret) ++ return ret; ++ ++ ret = acp_load_ins_jta_head(pr, &pack->head); ++ ++ return ret; ++} ++ ++/** ++ * acp_load_pack_head - load acp pack header ++ * ++ * @pr: pack reader ++ * @head: out param to hold the acp header ++ * ++ * Return: 0 on success ++ * -EINVAL on invalid parameters ++ */ ++static int acp_load_pack_head(struct ac_pr_state *pr, ++ struct ac_pack_header **head) ++{ ++ if (!ac_pr_is_safe_to_read(pr, sizeof(**head))) ++ return -EINVAL; ++ ++ *head = (struct ac_pack_header *)pr->cur; ++ return ac_pr_align_move(pr, sizeof(**head)); ++} ++ ++/** ++ * acp_load_pack - load and parse pack from acp file ++ * ++ * @raw_pack: acp file content, without the acp CSS header ++ * @size: acp file size (without CSS header) ++ * @cmd_id: command id ++ * @pack: out param to hold the loaded pack ++ * ++ * Return: 0 on success ++ * -EINVAL on invalid parameters ++ */ ++static int acp_load_pack(const char *raw_pack, unsigned int size, ++ unsigned int cmd_id, struct ac_pack *pack) ++{ ++ int ret; ++ struct ac_pr_state pr; ++ struct ac_ins_jta_pack_ext *pack_ext; ++ struct ac_ins_jta_prop_ext *prop_ext; ++ ++ ret = ac_pr_init(&pr, raw_pack, size); ++ if (ret) ++ return ret; ++ ++ if (cmd_id != AC_INSTALL_JTA_PROP) { ++ ret = acp_load_pack_head(&pr, &pack->head); ++ if (ret) ++ return ret; ++ } ++ ++ if (cmd_id != AC_INSTALL_JTA_PROP && cmd_id != pack->head->cmd_id) ++ return -EINVAL; ++ ++ switch (cmd_id) { ++ case AC_INSTALL_JTA: ++ pack_ext = (struct ac_ins_jta_pack_ext *)pack; ++ ret = acp_load_ins_jta(&pr, &pack_ext->cmd_pack); ++ if (ret) ++ break; ++ ret = acp_load_ta_pack(&pr, &pack_ext->ta_pack); ++ break; ++ case AC_INSTALL_JTA_PROP: ++ prop_ext = (struct ac_ins_jta_prop_ext *)pack; ++ ret = acp_load_ins_jta_prop(&pr, &prop_ext->cmd_pack); ++ if (ret) ++ break; ++ /* Note: the next section is JEFF file, ++ * and not ta_pack(JTA_properties+JEFF file), ++ * but we could reuse the ACP_load_ta_pack() here. ++ */ ++ ret = acp_load_ta_pack(&pr, &prop_ext->jeff_pack); ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ if (!ac_pr_is_end(&pr)) ++ return -EINVAL; ++ ++ return ret; ++} ++ ++/** ++ * acp_pload_ins_jta - load and parse ta pack from acp file ++ * ++ * Exported function in acp parser API ++ * ++ * @raw_data: acp file content ++ * @size: acp file size ++ * @pack: out param to hold the ta pack ++ * ++ * Return: 0 on success ++ * -EINVAL on invalid parameters ++ */ ++int acp_pload_ins_jta(const void *raw_data, unsigned int size, ++ struct ac_ins_jta_pack_ext *pack) ++{ ++ int ret; ++ ++ if (!raw_data || size <= AC_CSS_HEADER_LENGTH || !pack) ++ return -EINVAL; ++ ++ ret = acp_load_pack((const char *)raw_data + AC_CSS_HEADER_LENGTH, ++ size - AC_CSS_HEADER_LENGTH, ++ AC_INSTALL_JTA, (struct ac_pack *)pack); ++ ++ return ret; ++} +diff --git a/drivers/misc/mei/dal/acp_parser.h b/drivers/misc/mei/dal/acp_parser.h +new file mode 100644 +index 000000000000..9d413c144aa7 +--- /dev/null ++++ b/drivers/misc/mei/dal/acp_parser.h +@@ -0,0 +1,38 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright(c) 2016-2019, Intel Corporation. ++ */ ++#ifndef _ACP_PARSER_H ++#define _ACP_PARSER_H ++ ++#include "acp_format.h" ++ ++/** ++ * struct ac_ins_jta_pack_ext - parsed ta pack from acp file ++ * ++ * @head: acp pack header ++ * @cmd_pack: ta installation information pack ++ * @ta_pack: raw ta pack ++ */ ++struct ac_ins_jta_pack_ext { ++ struct ac_pack_header *head; ++ struct ac_ins_jta_pack cmd_pack; ++ char *ta_pack; ++} __packed; ++ ++/** ++ * struct ac_ins_jta_prop_ext - parsed ta properties information ++ * from acp file ++ * ++ * @cmd_pack: ta installation properties pack ++ * @jeff_pack: ta jeff pack ++ */ ++struct ac_ins_jta_prop_ext { ++ struct ac_ins_jta_prop cmd_pack; ++ char *jeff_pack; ++} __packed; ++ ++int acp_pload_ins_jta(const void *raw_data, unsigned int size, ++ struct ac_ins_jta_pack_ext *pack); ++ ++#endif /* _ACP_PARSER_H */ +-- +2.17.1 + diff --git a/patches/0055-net-stmmac-add-Frame-Preemption-FPE-queue-mas.connectivity b/patches/0055-net-stmmac-add-Frame-Preemption-FPE-queue-mas.connectivity new file mode 100644 index 0000000000..221d89a528 --- /dev/null +++ b/patches/0055-net-stmmac-add-Frame-Preemption-FPE-queue-mas.connectivity @@ -0,0 +1,59 @@ +From 05bf43f26c8010d5883921f5b33029eaebb0d8cd Mon Sep 17 00:00:00 2001 +From: Ong Boon Leong +Date: Tue, 13 Aug 2019 12:57:30 +0800 +Subject: [PATCH 055/108] net: stmmac: add Frame Preemption (FPE) queue masking + setting + +Make taprio hardware offload mode to allow user to set frame preemption +queue mask that specifies which TxQ is mapped to preemptible MAC. + +For FPE enabling, it is expected that FPE Queue Mask has at least one +TxQ be selected as preemptible, i.e. 'fpe-qmask' option cannot be all 0s. + +Signed-off-by: Ong Boon Leong +--- + .../net/ethernet/stmicro/stmmac/stmmac_tc.c | 18 ++++++++++++++++++ + 1 file changed, 18 insertions(+) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +index 4a2cfa35675f..0c6efa5a8c8e 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +@@ -596,6 +596,7 @@ static int tc_setup_cls(struct stmmac_priv *priv, + static int tc_setup_taprio(struct stmmac_priv *priv, + struct tc_taprio_qopt_offload *qopt) + { ++ u32 fpe_q_mask = qopt->frame_preemption_queue_mask; + u64 time_extension = qopt->cycle_time_extension; + u64 base_time = ktime_to_ns(qopt->base_time); + u64 cycle_time = qopt->cycle_time; +@@ -615,9 +616,26 @@ static int tc_setup_taprio(struct stmmac_priv *priv, + } else { + stmmac_set_est_enable(priv, priv->hw, priv->dev, false); + dev_info(priv->device, "taprio: EST disabled\n"); ++ stmmac_fpe_set_enable(priv, priv->hw, priv->dev, false); ++ dev_info(priv->device, "taprio: FPE disabled\n"); + return 0; + } + ++ if (stmmac_has_tsn_feat(priv, priv->hw, priv->dev, TSN_FEAT_ID_FPE) && ++ fpe_q_mask) { ++ ret = stmmac_fpe_set_txqpec(priv, priv->hw, priv->dev, ++ fpe_q_mask); ++ if (ret) ++ return ret; ++ ++ ret = stmmac_fpe_set_enable(priv, priv->hw, priv->dev, true); ++ if (ret) ++ return ret; ++ ++ dev_info(priv->device, "taprio: FPE enabled (qmask=0x%x)\n", ++ fpe_q_mask); ++ } ++ + dev_dbg(priv->device, + "EST: base_time %llu, cycle_time %llu, cycle_extension %llu\n", + qopt->base_time, qopt->cycle_time, +-- +2.17.1 + diff --git a/patches/0055-vhm-use-set-memmaps-hypercall-for-hugetlb.acrn b/patches/0055-vhm-use-set-memmaps-hypercall-for-hugetlb.acrn new file mode 100644 index 0000000000..544ce18cc3 --- /dev/null +++ b/patches/0055-vhm-use-set-memmaps-hypercall-for-hugetlb.acrn @@ -0,0 +1,124 @@ +From 34442679f30ace35c138170a7e60e974c68b2dd5 Mon Sep 17 00:00:00 2001 +From: Jason Chen CJ +Date: Fri, 31 Aug 2018 10:59:00 +0800 +Subject: [PATCH 055/150] vhm: use set memmaps hypercall for hugetlb + +If hugetlb is using 2M pages, there may be too many memmap hypercall for +ept mapping. To avoid such kind of performance drop, this patch enabled +set memmaps hypercall for hugetlb to handle multi memmap hypercall in +one time. + +Signed-off-by: Jason Chen CJ +--- + drivers/vhm/vhm_hugetlb.c | 62 +++++++++++++++++++++++++++++---------- + 1 file changed, 47 insertions(+), 15 deletions(-) + +diff --git a/drivers/vhm/vhm_hugetlb.c b/drivers/vhm/vhm_hugetlb.c +index afab8ab52567..9c39f9167f77 100644 +--- a/drivers/vhm/vhm_hugetlb.c ++++ b/drivers/vhm/vhm_hugetlb.c +@@ -120,10 +120,11 @@ static int add_guest_map(struct vhm_vm *vm, unsigned long vm0_gpa, + + int hugepage_map_guest(struct vhm_vm *vm, struct vm_memmap *memmap) + { +- struct page *page; ++ struct page *page = NULL, *memmaps_buf_pg = NULL; + unsigned long len, guest_gpa, vma; +- unsigned int type; +- unsigned int mem_type, mem_access_right; ++ struct memory_map *memmap_array; ++ struct set_memmaps memmaps; ++ int max_size = PAGE_SIZE/sizeof(struct memory_map); + int ret; + + if (vm == NULL || memmap == NULL) +@@ -133,13 +134,23 @@ int hugepage_map_guest(struct vhm_vm *vm, struct vm_memmap *memmap) + vma = memmap->vma_base; + guest_gpa = memmap->gpa; + ++ /* prepare set_memmaps info */ ++ memmaps_buf_pg = alloc_page(GFP_KERNEL); ++ if (memmaps_buf_pg == NULL) ++ return -ENOMEM; ++ memmaps.memmaps_num = 0; ++ memmaps.vmid = vm->vmid; ++ memmaps.memmaps_gpa = page_to_phys(memmaps_buf_pg); ++ memmap_array = page_to_virt(memmaps_buf_pg); ++ + while (len > 0) { + unsigned long vm0_gpa, pagesize; + + ret = get_user_pages_fast(vma, 1, 1, &page); + if (unlikely(ret != 1) || (page == NULL)) { + pr_err("failed to pin huge page!\n"); +- return -ENOMEM; ++ ret = -ENOMEM; ++ goto err; + } + + vm0_gpa = page_to_phys(page); +@@ -148,19 +159,27 @@ int hugepage_map_guest(struct vhm_vm *vm, struct vm_memmap *memmap) + ret = add_guest_map(vm, vm0_gpa, guest_gpa, pagesize); + if (ret < 0) { + pr_err("failed to add memseg for huge page!\n"); +- put_page(page); +- return ret; ++ goto err; + } + +- /* TODO: do batch hypercall for multi ept mapping */ +- mem_type = MEM_TYPE_WB; +- mem_access_right = (memmap->prot & MEM_ACCESS_RIGHT_MASK); +- type = MAP_MEM; +- if (_mem_set_memmap(vm->vmid, guest_gpa, vm0_gpa, pagesize, +- mem_type, mem_access_right, type) < 0) { +- pr_err("vhm: failed to set memmap %ld!\n", vm->vmid); +- put_page(page); +- return -EFAULT; ++ /* fill each memmap region into memmap_array */ ++ memmap_array[memmaps.memmaps_num].type = MAP_MEM; ++ memmap_array[memmaps.memmaps_num].remote_gpa = guest_gpa; ++ memmap_array[memmaps.memmaps_num].vm0_gpa = vm0_gpa; ++ memmap_array[memmaps.memmaps_num].length = pagesize; ++ memmap_array[memmaps.memmaps_num].prot = ++ MEM_TYPE_WB & MEM_TYPE_MASK; ++ memmap_array[memmaps.memmaps_num].prot |= ++ memmap->prot & MEM_ACCESS_RIGHT_MASK; ++ memmaps.memmaps_num++; ++ if (memmaps.memmaps_num == max_size) { ++ pr_info("region buffer full, set & renew memmaps!\n"); ++ ret = set_memmaps(&memmaps); ++ if (ret < 0) { ++ pr_err("failed to set memmaps,ret=%d!\n", ret); ++ goto err; ++ } ++ memmaps.memmaps_num = 0; + } + + len -= pagesize; +@@ -168,9 +187,22 @@ int hugepage_map_guest(struct vhm_vm *vm, struct vm_memmap *memmap) + guest_gpa += pagesize; + } + ++ ret = set_memmaps(&memmaps); ++ if (ret < 0) { ++ pr_err("failed to set memmaps, ret=%d!\n", ret); ++ goto err; ++ } ++ ++ __free_page(memmaps_buf_pg); + vm->hugetlb_enabled = 1; + + return 0; ++err: ++ if (memmaps_buf_pg) ++ __free_page(memmaps_buf_pg); ++ if (page) ++ put_page(page); ++ return ret; + } + + void hugepage_free_guest(struct vhm_vm *vm) +-- +2.17.1 + diff --git a/patches/0056-ASoC-Intel-bxt_rt298-Add-compress-probe-DAI-links.audio b/patches/0056-ASoC-Intel-bxt_rt298-Add-compress-probe-DAI-links.audio new file mode 100644 index 0000000000..ab640fe366 --- /dev/null +++ b/patches/0056-ASoC-Intel-bxt_rt298-Add-compress-probe-DAI-links.audio @@ -0,0 +1,55 @@ +From 5ef59d1473cfd39271597b512c106e205a27e59c Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Thu, 28 Mar 2019 12:12:23 +0100 +Subject: [PATCH 056/193] ASoC: Intel: bxt_rt298: Add compress probe DAI links + +Assign probe DAI link to actively used Skylake machine boards. +For current upstream, it is only bxt_rt298. + +Change-Id: I39c23199688e0b698cc19fc571dc5f0789981f76 +Signed-off-by: Cezary Rojewski +--- + sound/soc/intel/boards/bxt_rt298.c | 20 ++++++++++++++++++++ + 1 file changed, 20 insertions(+) + +diff --git a/sound/soc/intel/boards/bxt_rt298.c b/sound/soc/intel/boards/bxt_rt298.c +index adf416a49b48..32840601f960 100644 +--- a/sound/soc/intel/boards/bxt_rt298.c ++++ b/sound/soc/intel/boards/bxt_rt298.c +@@ -378,6 +378,11 @@ SND_SOC_DAILINK_DEF(idisp3_codec, + DAILINK_COMP_ARRAY(COMP_CODEC("ehdaudio0D2", + "intel-hdmi-hifi3"))); + ++SND_SOC_DAILINK_DEF(probe_pb, ++ DAILINK_COMP_ARRAY(COMP_CPU("Probe Injection0 CPU DAI"))); ++SND_SOC_DAILINK_DEF(probe_cp, ++ DAILINK_COMP_ARRAY(COMP_CPU("Probe Extraction CPU DAI"))); ++ + SND_SOC_DAILINK_DEF(platform, + DAILINK_COMP_ARRAY(COMP_PLATFORM("0000:00:0e.0"))); + +@@ -516,6 +521,21 @@ static struct snd_soc_dai_link broxton_rt298_dais[] = { + .no_pcm = 1, + SND_SOC_DAILINK_REG(idisp3_pin, idisp3_codec, platform), + }, ++ /* Probe DAI links */ ++ { ++ .name = "Compress Probe Playback", ++ .init = NULL, ++ .ignore_suspend = 1, ++ .nonatomic = 1, ++ SND_SOC_DAILINK_REG(probe_pb, dummy, platform), ++ }, ++ { ++ .name = "Compress Probe Capture", ++ .init = NULL, ++ .ignore_suspend = 1, ++ .nonatomic = 1, ++ SND_SOC_DAILINK_REG(probe_cp, dummy, platform), ++ }, + }; + + #define NAME_SIZE 32 +-- +2.17.1 + diff --git a/patches/0056-drm-virtio-notify-virtqueues-without-holding-spinlock.drm b/patches/0056-drm-virtio-notify-virtqueues-without-holding-spinlock.drm new file mode 100644 index 0000000000..e2448d4ac3 --- /dev/null +++ b/patches/0056-drm-virtio-notify-virtqueues-without-holding-spinlock.drm @@ -0,0 +1,116 @@ +From 3b6bc96b8bd0ccf80023ceda2b0267c50fbb9087 Mon Sep 17 00:00:00 2001 +From: Gerd Hoffmann +Date: Tue, 13 Aug 2019 10:25:09 +0200 +Subject: [PATCH 056/690] drm/virtio: notify virtqueues without holding + spinlock + +Split virtqueue_kick() call into virtqueue_kick_prepare(), which +requires serialization, and virtqueue_notify(), which does not. Move +the virtqueue_notify() call out of the critical section protected by the +queue lock. This avoids triggering a vmexit while holding the lock and +thereby fixes a rather bad spinlock contention. + +Suggested-by: Chia-I Wu +Signed-off-by: Gerd Hoffmann +Reviewed-by: Chia-I Wu +Link: http://patchwork.freedesktop.org/patch/msgid/20190813082509.29324-3-kraxel@redhat.com +--- + drivers/gpu/drm/virtio/virtgpu_vq.c | 25 +++++++++++++++++++------ + 1 file changed, 19 insertions(+), 6 deletions(-) + +diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c +index ca91e83ffaef..e41c96143342 100644 +--- a/drivers/gpu/drm/virtio/virtgpu_vq.c ++++ b/drivers/gpu/drm/virtio/virtgpu_vq.c +@@ -252,7 +252,7 @@ void virtio_gpu_dequeue_cursor_func(struct work_struct *work) + wake_up(&vgdev->cursorq.ack_queue); + } + +-static void virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev, ++static bool virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf) + __releases(&vgdev->ctrlq.qlock) + __acquires(&vgdev->ctrlq.qlock) +@@ -260,10 +260,11 @@ static void virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev, + struct virtqueue *vq = vgdev->ctrlq.vq; + struct scatterlist *sgs[3], vcmd, vout, vresp; + int outcnt = 0, incnt = 0; ++ bool notify = false; + int ret; + + if (!vgdev->vqs_ready) +- return; ++ return notify; + + sg_init_one(&vcmd, vbuf->buf, vbuf->size); + sgs[outcnt + incnt] = &vcmd; +@@ -292,16 +293,21 @@ static void virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev, + trace_virtio_gpu_cmd_queue(vq, + (struct virtio_gpu_ctrl_hdr *)vbuf->buf); + +- virtqueue_kick(vq); ++ notify = virtqueue_kick_prepare(vq); + } ++ return notify; + } + + static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf) + { ++ bool notify; ++ + spin_lock(&vgdev->ctrlq.qlock); +- virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf); ++ notify = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf); + spin_unlock(&vgdev->ctrlq.qlock); ++ if (notify) ++ virtqueue_notify(vgdev->ctrlq.vq); + } + + static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, +@@ -310,6 +316,7 @@ static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, + struct virtio_gpu_fence *fence) + { + struct virtqueue *vq = vgdev->ctrlq.vq; ++ bool notify; + + again: + spin_lock(&vgdev->ctrlq.qlock); +@@ -330,8 +337,10 @@ static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, + + if (fence) + virtio_gpu_fence_emit(vgdev, hdr, fence); +- virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf); ++ notify = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf); + spin_unlock(&vgdev->ctrlq.qlock); ++ if (notify) ++ virtqueue_notify(vgdev->ctrlq.vq); + } + + static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, +@@ -339,6 +348,7 @@ static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, + { + struct virtqueue *vq = vgdev->cursorq.vq; + struct scatterlist *sgs[1], ccmd; ++ bool notify; + int ret; + int outcnt; + +@@ -361,10 +371,13 @@ static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, + trace_virtio_gpu_cmd_queue(vq, + (struct virtio_gpu_ctrl_hdr *)vbuf->buf); + +- virtqueue_kick(vq); ++ notify = virtqueue_kick_prepare(vq); + } + + spin_unlock(&vgdev->cursorq.qlock); ++ ++ if (notify) ++ virtqueue_notify(vq); + } + + /* just create gem objects for userspace and long lived objects, +-- +2.17.1 + diff --git a/patches/0056-mei-dal-add-bh-plugin-code.security b/patches/0056-mei-dal-add-bh-plugin-code.security new file mode 100644 index 0000000000..84cdd17ab8 --- /dev/null +++ b/patches/0056-mei-dal-add-bh-plugin-code.security @@ -0,0 +1,1913 @@ +From 6d20b86d4c20e0bf67983095934d1357fe777b32 Mon Sep 17 00:00:00 2001 +From: Yael Samet +Date: Mon, 14 Aug 2017 11:27:26 +0300 +Subject: [PATCH 56/65] mei: dal: add bh plugin code + +bh plugin defines and implements the protocol of managing applets in DAL. +(view to bh_command_id enum for all options) +This patch adds the code of the bh plugin + +Change-Id: Ia09cbb903d65e8c1f58becb7ed00238f6f226f12 +Signed-off-by: Yael Samet +Signed-off-by: Tomas Winkler +--- + drivers/misc/mei/dal/Makefile | 2 + + drivers/misc/mei/dal/bh_cmd_defs.h | 238 ++++++++ + drivers/misc/mei/dal/bh_errcode.h | 153 ++++++ + drivers/misc/mei/dal/bh_external.c | 475 ++++++++++++++++ + drivers/misc/mei/dal/bh_external.h | 46 ++ + drivers/misc/mei/dal/bh_internal.c | 845 +++++++++++++++++++++++++++++ + drivers/misc/mei/dal/bh_internal.h | 78 +++ + 7 files changed, 1837 insertions(+) + create mode 100644 drivers/misc/mei/dal/bh_cmd_defs.h + create mode 100644 drivers/misc/mei/dal/bh_errcode.h + create mode 100644 drivers/misc/mei/dal/bh_external.c + create mode 100644 drivers/misc/mei/dal/bh_external.h + create mode 100644 drivers/misc/mei/dal/bh_internal.c + create mode 100644 drivers/misc/mei/dal/bh_internal.h + +diff --git a/drivers/misc/mei/dal/Makefile b/drivers/misc/mei/dal/Makefile +index bd86590a46c4..d43a0f599956 100644 +--- a/drivers/misc/mei/dal/Makefile ++++ b/drivers/misc/mei/dal/Makefile +@@ -5,4 +5,6 @@ ccflags-y += -D__CHECK_ENDIAN__ + + obj-$(CONFIG_INTEL_MEI_DAL) += mei_dal.o + mei_dal-objs += acp_parser.o ++mei_dal-objs += bh_external.o ++mei_dal-objs += bh_internal.o + mei_dal-objs += dal_class.o +diff --git a/drivers/misc/mei/dal/bh_cmd_defs.h b/drivers/misc/mei/dal/bh_cmd_defs.h +new file mode 100644 +index 000000000000..6cbc3e0209e1 +--- /dev/null ++++ b/drivers/misc/mei/dal/bh_cmd_defs.h +@@ -0,0 +1,238 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright(c) 2016-2019, Intel Corporation. ++ */ ++ ++#ifndef __BH_DAL_H_ ++#define __BH_DAL_H_ ++ ++#include ++#include ++ ++/** ++ * enum bh_command_id - bh command ids ++ * ++ * @BHP_CMD_INIT: init command ++ * @BHP_CMD_DEINIT: deinit command ++ * @BHP_CMD_VERIFY_JAVATA: verify ta ++ * @BHP_CMD_DOWNLOAD_JAVATA: download ta to DAL ++ * @BHP_CMD_OPEN_JTASESSION: open session to ta ++ * @BHP_CMD_CLOSE_JTASESSION: close session with ta ++ * @BHP_CMD_FORCECLOSE_JTASESSION: force close session ++ * @BHP_CMD_SENDANDRECV: send and receive massages to ta ++ * @BHP_CMD_SENDANDRECV_INTERNAL: internal send and receive ++ * @BHP_CMD_RUN_NATIVETA: run native trusted application ++ * (currently NOT SUPPORTED) ++ * @BHP_CMD_STOP_NATIVETA: stop running native ta (currently NOT SUPPORTED) ++ * @BHP_CMD_OPEN_SDSESSION: open security domain session ++ * @BHP_CMD_CLOSE_SDSESSION: close security domain session ++ * @BHP_CMD_INSTALL_SD: install new sub security domain ++ * @BHP_CMD_UNINSTALL_SD: uninstall sub security domain ++ * @BHP_CMD_INSTALL_JAVATA: install java ta ++ * @BHP_CMD_UNINSTALL_JAVATA: uninstall java ta ++ * @BHP_CMD_INSTALL_NATIVETA: install native ta (currently NOT SUPPORTED) ++ * @BHP_CMD_UNINSTALL_NATIVETA: uninstall native ta (currently NOT SUPPORTED) ++ * @BHP_CMD_LIST_SD: get list of all security domains ++ * @BHP_CMD_LIST_TA: get list of all installed trusted applications ++ * @BHP_CMD_RESET: reset command ++ * @BHP_CMD_LIST_TA_PROPERTIES: get list of all ta properties (ta manifest) ++ * @BHP_CMD_QUERY_TA_PROPERTY: query specified ta property ++ * @BHP_CMD_LIST_JTA_SESSIONS: get list of all opened ta sessions ++ * @BHP_CMD_LIST_TA_PACKAGES: get list of all ta packages in DAL ++ * @BHP_CMD_GET_ISD: get Intel security domain uuid ++ * @BHP_CMD_GET_SD_BY_TA: get security domain id of ta ++ * @BHP_CMD_LAUNCH_VM: lunch IVM ++ * @BHP_CMD_CLOSE_VM: close IVM ++ * @BHP_CMD_QUERY_NATIVETA_STATUS: query specified native ta status ++ * (currently NOT SUPPORTED) ++ * @BHP_CMD_QUERY_SD_STATUS: query specified security domain status ++ * @BHP_CMD_LIST_DOWNLOADED_NTA: get list of all native trusted applications ++ * (currently NOT SUPPORTED) ++ * @BHP_CMD_UPDATE_SVL: update security version list ++ * @BHP_CMD_CHECK_SVL_TA_BLOCKED_STATE: check if ta security version is blocked ++ * @BHP_CMD_QUERY_TEE_METADATA: get DAL metadata (including api_level, ++ * library_version, dal_key_hash and more) ++ * ++ * @BHP_CMD_MAX: max command id ++ */ ++ ++enum bh_command_id { ++ BHP_CMD_INIT = 0, ++ BHP_CMD_DEINIT, ++ BHP_CMD_VERIFY_JAVATA, ++ BHP_CMD_DOWNLOAD_JAVATA, ++ BHP_CMD_OPEN_JTASESSION, ++ BHP_CMD_CLOSE_JTASESSION, ++ BHP_CMD_FORCECLOSE_JTASESSION, ++ BHP_CMD_SENDANDRECV, ++ BHP_CMD_SENDANDRECV_INTERNAL, ++ BHP_CMD_RUN_NATIVETA, ++ BHP_CMD_STOP_NATIVETA, ++ BHP_CMD_OPEN_SDSESSION, ++ BHP_CMD_CLOSE_SDSESSION, ++ BHP_CMD_INSTALL_SD, ++ BHP_CMD_UNINSTALL_SD, ++ BHP_CMD_INSTALL_JAVATA, ++ BHP_CMD_UNINSTALL_JAVATA, ++ BHP_CMD_INSTALL_NATIVETA, ++ BHP_CMD_UNINSTALL_NATIVETA, ++ BHP_CMD_LIST_SD, ++ BHP_CMD_LIST_TA, ++ BHP_CMD_RESET, ++ BHP_CMD_LIST_TA_PROPERTIES, ++ BHP_CMD_QUERY_TA_PROPERTY, ++ BHP_CMD_LIST_JTA_SESSIONS, ++ BHP_CMD_LIST_TA_PACKAGES, ++ BHP_CMD_GET_ISD, ++ BHP_CMD_GET_SD_BY_TA, ++ BHP_CMD_LAUNCH_VM, ++ BHP_CMD_CLOSE_VM, ++ BHP_CMD_QUERY_NATIVETA_STATUS, ++ BHP_CMD_QUERY_SD_STATUS, ++ BHP_CMD_LIST_DOWNLOADED_NTA, ++ BHP_CMD_UPDATE_SVL, ++ BHP_CMD_CHECK_SVL_TA_BLOCKED_STATE, ++ BHP_CMD_QUERY_TEE_METADATA, ++ BHP_CMD_MAX ++}; ++ ++#define BH_MSG_RESP_MAGIC 0x55aaa5ff ++#define BH_MSG_CMD_MAGIC 0x55aaa3ff ++ ++/** ++ * struct bh_msg_header - transport header ++ * ++ * @magic: BH_MSG_RESP/CMD_MAGIC ++ * @length: overall message length ++ */ ++struct bh_msg_header { ++ u32 magic; ++ u32 length; ++}; ++ ++/** ++ * struct bh_command_header - bh command header ++ * ++ * @h: transport header ++ * @seq: message sequence number ++ * @id: the command id (enum bh_command_id) ++ * @pad: padded for 64 bit ++ * @cmd: command buffer ++ */ ++struct bh_command_header { ++ struct bh_msg_header h; ++ u64 seq; ++ u32 id; ++ u8 pad[4]; ++ s8 cmd[0]; ++} __packed; ++ ++/** ++ * struct bh_response_header - response header (from the DAL) ++ * ++ * @h: transport header ++ * @seq: message sequence number ++ * @ta_session_id: session id (DAL firmware address) ++ * @code: response code ++ * @pad: padded for 64 bit ++ * @data: response buffer ++ */ ++struct bh_response_header { ++ struct bh_msg_header h; ++ u64 seq; ++ u64 ta_session_id; ++ s32 code; ++ u8 pad[4]; ++ s8 data[0]; ++} __packed; ++ ++/** ++ * struct bh_download_jta_cmd - download java trusted application. ++ * ++ * @ta_id: trusted application (ta) id ++ * @ta_blob: trusted application blob ++ */ ++struct bh_download_jta_cmd { ++ uuid_t ta_id; ++ s8 ta_blob[0]; ++} __packed; ++ ++/** ++ * struct bh_open_jta_session_cmd - open session to TA command ++ * ++ * @ta_id: trusted application (ta) id ++ * @buffer: session initial parameters (optional) ++ */ ++struct bh_open_jta_session_cmd { ++ uuid_t ta_id; ++ s8 buffer[0]; ++} __packed; ++ ++/** ++ * struct bh_close_jta_session_cmd - close session to TA command ++ * ++ * @ta_session_id: session id ++ */ ++struct bh_close_jta_session_cmd { ++ u64 ta_session_id; ++} __packed; ++ ++/** ++ * struct bh_cmd - bh command ++ * ++ * @ta_session_id: session id ++ * @command: command id to ta ++ * @outlen: length of output buffer ++ * @buffer: data to send ++ */ ++struct bh_cmd { ++ u64 ta_session_id; ++ s32 command; ++ u32 outlen; ++ s8 buffer[0]; ++} __packed; ++ ++/** ++ * struct bh_check_svl_ta_blocked_state_cmd - command to check if ++ * the trusted application security version is blocked ++ * ++ * @ta_id: trusted application id ++ */ ++struct bh_check_svl_jta_blocked_state_cmd { ++ uuid_t ta_id; ++} __packed; ++ ++/** ++ * struct bh_resp - bh response ++ * ++ * @response: response code. Originated from java in big endian format ++ * @buffer: response buffer ++ */ ++struct bh_resp { ++ __be32 response; ++ s8 buffer[0]; ++} __packed; ++ ++/** ++ * struct bh_resp_bof - response when output buffer is too small ++ * ++ * @response: response code. Originated from java in big endian format ++ * @request_length: the needed output buffer length ++ */ ++struct bh_resp_bof { ++ __be32 response; ++ __be32 request_length; ++} __packed; ++ ++/** ++ * struct bh_resp_list_ta_packages - list of ta packages from DAL ++ * ++ * @count: count of ta packages ++ * @ta_ids: ta packages ids ++ */ ++struct bh_resp_list_ta_packages { ++ u32 count; ++ uuid_t ta_ids[0]; ++} __packed; ++ ++#endif /* __BH_DAL_H_*/ +diff --git a/drivers/misc/mei/dal/bh_errcode.h b/drivers/misc/mei/dal/bh_errcode.h +new file mode 100644 +index 000000000000..5695c4bde226 +--- /dev/null ++++ b/drivers/misc/mei/dal/bh_errcode.h +@@ -0,0 +1,153 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright(c) 2016-2019 Intel Corporation. ++ */ ++ ++#ifndef __BH_ERRCODE_H ++#define __BH_ERRCODE_H ++ ++/* ++ * BH Error codes numbers across Beihai Host and Firmware. ++ */ ++ ++#define BH_SUCCESS 0x000 ++ ++/* BHP specific error code section */ ++ ++#define BPE_NOT_INIT 0x001 ++#define BPE_SERVICE_UNAVAILABLE 0x002 ++#define BPE_INTERNAL_ERROR 0x003 ++#define BPE_COMMS_ERROR 0x004 ++#define BPE_OUT_OF_MEMORY 0x005 ++#define BPE_INVALID_PARAMS 0x006 ++#define BPE_MESSAGE_TOO_SHORT 0x007 ++#define BPE_MESSAGE_ILLEGAL 0x008 ++#define BPE_NO_CONNECTION_TO_FIRMWARE 0x009 ++#define BPE_NOT_IMPLEMENT 0x00A ++#define BPE_OUT_OF_RESOURCE 0x00B ++#define BPE_INITIALIZED_ALREADY 0x00C ++#define BPE_CONNECT_FAILED 0x00D ++ ++/* General error code section for Beihai on FW: 0x100 */ ++ ++#define BHE_OUT_OF_MEMORY 0x101 ++#define BHE_BAD_PARAMETER 0x102 ++#define BHE_INSUFFICIENT_BUFFER 0x103 ++#define BHE_MUTEX_INIT_FAIL 0x104 ++#define BHE_COND_INIT_FAIL 0x105 ++#define BHE_WD_TIMEOUT 0x106 ++#define BHE_FAILED 0x107 ++#define BHE_INVALID_HANDLE 0x108 ++#define BHE_IPC_ERR_DEFAULT 0x109 ++#define BHE_IPC_ERR_PLATFORM 0x10A ++#define BHE_IPC_SRV_INIT_FAIL 0x10B ++ ++/* VM communication error code section: 0x200 */ ++ ++#define BHE_MAILBOX_NOT_FOUND 0x201 ++#define BHE_APPLET_CRASHED BHE_MAILBOX_NOT_FOUND ++#define BHE_MSG_QUEUE_IS_FULL 0x202 ++#define BHE_MAILBOX_DENIED 0x203 ++ ++/* VM InternalAppletCommunication error 0x240 */ ++ ++#define BHE_IAC_INTERNAL_SESSION_NUM_EXCEED 0x241 ++#define BHE_IAC_CLIENT_SLOT_FULL 0x242 ++#define BHE_IAC_SERVICETA_EXITED 0x243 ++#define BHE_IAC_EXIST_INTERNAL_SESSION 0x244 ++#define BHE_IAC_SERVICETA_UNCAUGHT_EXCEPTION 0x245 ++#define BHE_IAC_SERVICE_SESSION_NOT_FOUND 0x246 ++#define BHE_IAC_SERVICE_HOST_SESSION_NUM_EXCEED 0x247 ++ ++/* Firmware thread/mutex error code section: 0x280 */ ++#define BHE_THREAD_ERROR 0x281 ++#define BHE_THREAD_TIMED_OUT 0x282 ++ ++/* Applet manager error code section: 0x300 */ ++ ++#define BHE_LOAD_JEFF_FAIL 0x303 ++#define BHE_PACKAGE_NOT_FOUND 0x304 ++#define BHE_EXIST_LIVE_SESSION 0x305 ++#define BHE_VM_INSTANCE_INIT_FAIL 0x306 ++#define BHE_QUERY_PROP_NOT_SUPPORT 0x307 ++#define BHE_INVALID_BPK_FILE 0x308 ++#define BHE_PACKAGE_EXIST 0x309 ++#define BHE_VM_INSTNACE_NOT_FOUND 0x312 ++#define BHE_STARTING_JDWP_FAIL 0x313 ++#define BHE_GROUP_CHECK_FAIL 0x314 ++#define BHE_SDID_UNMATCH 0x315 ++#define BHE_APPPACK_UNINITED 0x316 ++#define BHE_SESSION_NUM_EXCEED 0x317 ++#define BHE_TA_PACKAGE_HASH_VERIFY_FAIL 0x318 ++#define BHE_SWITCH_ISD 0x319 ++#define BHE_OPERATION_NOT_PERMITTED 0x31A ++ ++/* VM Applet instance error code section: 0x400 */ ++#define BHE_APPLET_GENERIC 0x400 ++#define BHE_UNCAUGHT_EXCEPTION 0x401 ++/* Bad parameters to applet */ ++#define BHE_APPLET_BAD_PARAMETER 0x402 ++/* Small response buffer */ ++#define BHE_APPLET_SMALL_BUFFER 0x403 ++/* Bad state */ ++#define BHE_BAD_STATE 0x404 ++ ++/*TODO: Should be removed these UI error code when integrate with ME 9 */ ++#define BHE_UI_EXCEPTION 0x501 ++#define BHE_UI_ILLEGAL_USE 0x502 ++#define BHE_UI_ILLEGAL_PARAMETER 0x503 ++#define BHE_UI_NOT_INITIALIZED 0x504 ++#define BHE_UI_NOT_SUPPORTED 0x505 ++#define BHE_UI_OUT_OF_RESOURCES 0x506 ++ ++/* BeiHai VMInternalError code section: 0x600 */ ++#define BHE_UNKNOWN 0x602 ++#define BHE_MAGIC_UNMATCH 0x603 ++#define BHE_UNIMPLEMENTED 0x604 ++#define BHE_INTR 0x605 ++#define BHE_CLOSED 0x606 ++/* TODO: no used error, should remove*/ ++#define BHE_BUFFER_OVERFLOW 0x607 ++#define BHE_NOT_SUPPORTED 0x608 ++#define BHE_WEAR_OUT_VIOLATION 0x609 ++#define BHE_NOT_FOUND 0x610 ++#define BHE_INVALID_PARAMS 0x611 ++#define BHE_ACCESS_DENIED 0x612 ++#define BHE_INVALID 0x614 ++#define BHE_TIMEOUT 0x615 ++ ++/* SDM specific error code section: 0x800 */ ++#define BHE_SDM_FAILED 0x800 ++#define BHE_SDM_NOT_FOUND 0x801 ++#define BHE_SDM_ALREADY_EXIST 0x803 ++#define BHE_SDM_TATYPE_MISMATCH 0x804 ++#define BHE_SDM_TA_NUMBER_LIMIT 0x805 ++#define BHE_SDM_SIGNAGURE_VERIFY_FAIL 0x806 ++#define BHE_SDM_PERMGROUP_CHECK_FAIL 0x807 ++#define BHE_SDM_INSTALL_CONDITION_FAIL 0x808 ++#define BHE_SDM_SVN_CHECK_FAIL 0x809 ++#define BHE_SDM_TA_DB_NO_FREE_SLOT 0x80A ++#define BHE_SDM_SD_DB_NO_FREE_SLOT 0x80B ++#define BHE_SDM_SVL_DB_NO_FREE_SLOT 0x80C ++#define BHE_SDM_SVL_CHECK_FAIL 0x80D ++#define BHE_SDM_DB_READ_FAIL 0x80E ++#define BHE_SDM_DB_WRITE_FAIL 0x80F ++ ++/* Launcher specific error code section: 0x900 */ ++#define BHE_LAUNCHER_INIT_FAILED 0x901 ++#define BHE_SD_NOT_INSTALLED 0x902 ++#define BHE_NTA_NOT_INSTALLED 0x903 ++#define BHE_PROCESS_SPAWN_FAILED 0x904 ++#define BHE_PROCESS_KILL_FAILED 0x905 ++#define BHE_PROCESS_ALREADY_RUNNING 0x906 ++#define BHE_PROCESS_IN_TERMINATING 0x907 ++#define BHE_PROCESS_NOT_EXIST 0x908 ++#define BHE_PLATFORM_API_ERR 0x909 ++#define BHE_PROCESS_NUM_EXCEED 0x09A ++ ++/* ++ * BeihaiHAL Layer error code section: ++ * 0x1000,0x2000 reserved here, defined in CSG BeihaiStatusHAL.h ++ */ ++ ++#endif /* __BH_ERRCODE_H */ +diff --git a/drivers/misc/mei/dal/bh_external.c b/drivers/misc/mei/dal/bh_external.c +new file mode 100644 +index 000000000000..e403d3348d8b +--- /dev/null ++++ b/drivers/misc/mei/dal/bh_external.c +@@ -0,0 +1,475 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright(c) 2016-2019, Intel Corporation. ++ */ ++ ++#include ++#include ++#include ++ ++#include "bh_errcode.h" ++#include "bh_external.h" ++#include "bh_internal.h" ++ ++/** ++ * uuid_is_valid_hyphenless - check if uuid is valid in hyphenless format ++ * ++ * @uuid_str: uuid string ++ * ++ * Return: true when uuid is valid in hyphenless format ++ * false when uuid is invalid ++ */ ++static bool uuid_is_valid_hyphenless(const char *uuid_str) ++{ ++ unsigned int i; ++ ++ /* exclude (i == 8 || i == 13 || i == 18 || i == 23) */ ++ for (i = 0; i < UUID_STRING_LEN - 4; i++) ++ if (!isxdigit(uuid_str[i])) ++ return false; ++ ++ return true; ++} ++ ++/** ++ * uuid_normalize_hyphenless - convert uuid from hyphenless format ++ * to standard format ++ * ++ * @uuid_hl: uuid string in hyphenless format ++ * @uuid_str: output param to hold uuid string in standard format ++ */ ++static void uuid_normalize_hyphenless(const char *uuid_hl, char *uuid_str) ++{ ++ unsigned int i; ++ ++ for (i = 0; i < UUID_STRING_LEN; i++) { ++ if (i == 8 || i == 13 || i == 18 || i == 23) ++ uuid_str[i] = '-'; ++ else ++ uuid_str[i] = *uuid_hl++; ++ } ++ uuid_str[i] = '\0'; ++} ++ ++/** ++ * dal_uuid_parse - convert uuid string to binary form ++ * ++ * Input uuid is in either hyphenless or standard format ++ * ++ * @uuid_str: uuid string ++ * @uuid: output param to hold uuid bin ++ * ++ * Return: 0 on success ++ * <0 on failure ++ */ ++static int dal_uuid_parse(const char *uuid_str, uuid_t *uuid) ++{ ++ char __uuid_str[UUID_STRING_LEN + 1]; ++ ++ if (!uuid_str || !uuid) ++ return -EINVAL; ++ ++ if (uuid_is_valid_hyphenless(uuid_str)) { ++ uuid_normalize_hyphenless(uuid_str, __uuid_str); ++ uuid_str = __uuid_str; ++ } ++ ++ return uuid_parse(uuid_str, uuid); ++} ++ ++/** ++ * bh_msg_is_response - check if message is response ++ * ++ * @msg: message ++ * @len: message length ++ * ++ * Return: true when message is response ++ * false otherwise ++ */ ++bool bh_msg_is_response(const void *msg, size_t len) ++{ ++ const struct bh_response_header *r = msg; ++ ++ return (len >= sizeof(*r) && r->h.magic == BH_MSG_RESP_MAGIC); ++} ++ ++/** ++ * bh_msg_is_cmd - check if message is command ++ * ++ * @msg: message ++ * @len: message length ++ * ++ * Return: true when message is command ++ * false otherwise ++ */ ++bool bh_msg_is_cmd(const void *msg, size_t len) ++{ ++ const struct bh_command_header *c = msg; ++ ++ return (len >= sizeof(*c) && c->h.magic == BH_MSG_CMD_MAGIC); ++} ++ ++/** ++ * bh_msg_cmd_hdr - get the command header if message is command ++ * ++ * @msg: message ++ * @len: message length ++ * ++ * Return: pointer to the command header when message is command ++ * NULL otherwise ++ */ ++const struct bh_command_header *bh_msg_cmd_hdr(const void *msg, size_t len) ++{ ++ if (!bh_msg_is_cmd(msg, len)) ++ return NULL; ++ ++ return msg; ++} ++ ++/** ++ * bh_msg_is_cmd_open_session - check if command is open session command ++ * ++ * @hdr: message header ++ * ++ * Return: true when command is open session command ++ * false otherwise ++ */ ++bool bh_msg_is_cmd_open_session(const struct bh_command_header *hdr) ++{ ++ return hdr->id == BHP_CMD_OPEN_JTASESSION; ++} ++ ++/** ++ * bh_open_session_ta_id - get ta id from open session command ++ * ++ * @hdr: message header ++ * @count: message size ++ * ++ * Return: pointer to ta id when command is valid ++ * NULL otherwise ++ */ ++const uuid_t *bh_open_session_ta_id(const struct bh_command_header *hdr, ++ size_t count) ++{ ++ struct bh_open_jta_session_cmd *open_cmd; ++ ++ if (count < sizeof(*hdr) + sizeof(*open_cmd)) ++ return NULL; ++ ++ open_cmd = (struct bh_open_jta_session_cmd *)hdr->cmd; ++ ++ return &open_cmd->ta_id; ++} ++ ++/** ++ * bh_session_is_killed - check if session is killed ++ * ++ * @code: the session return code ++ * ++ * Return: true when the session is killed ++ * false otherwise ++ */ ++static bool bh_session_is_killed(int code) ++{ ++ return (code == BHE_WD_TIMEOUT || ++ code == BHE_UNCAUGHT_EXCEPTION || ++ code == BHE_APPLET_CRASHED); ++} ++ ++/** ++ * bh_ta_session_open - open session to ta ++ * ++ * This function will block until VM replied the response ++ * ++ * @host_id: out param to hold the session host_id ++ * @ta_id: trusted application (ta) id ++ * @ta_pkg: ta binary package ++ * @pkg_len: ta binary package length ++ * @init_param: init parameters to the session (optional) ++ * @init_len: length of the init parameters ++ * ++ * Return: 0 on success ++ * <0 on system failure ++ * >0 on DAL FW failure ++ */ ++int bh_ta_session_open(u64 *host_id, const char *ta_id, ++ const u8 *ta_pkg, size_t pkg_len, ++ const u8 *init_param, size_t init_len) ++{ ++ int ret; ++ uuid_t bin_ta_id; ++ unsigned int conn_idx; ++ unsigned int count; ++ bool found; ++ uuid_t *ta_ids = NULL; ++ unsigned int i; ++ ++ if (!ta_id || !host_id) ++ return -EINVAL; ++ ++ if (!ta_pkg || !pkg_len) ++ return -EINVAL; ++ ++ if (!init_param && init_len != 0) ++ return -EINVAL; ++ ++ if (dal_uuid_parse(ta_id, &bin_ta_id)) ++ return -EINVAL; ++ ++ *host_id = 0; ++ ++ ret = bh_proxy_check_svl_jta_blocked_state(&bin_ta_id); ++ if (ret) ++ return ret; ++ ++ /* 1: vm conn_idx is IVM dal FW client */ ++ conn_idx = BH_CONN_IDX_IVM; ++ ++ /* 2.1: check whether the ta pkg existed in VM or not */ ++ count = 0; ++ ret = bh_proxy_list_jta_packages(conn_idx, &count, &ta_ids); ++ if (ret) ++ return ret; ++ ++ found = false; ++ for (i = 0; i < count; i++) { ++ if (uuid_equal(&bin_ta_id, &ta_ids[i])) { ++ found = true; ++ break; ++ } ++ } ++ kfree(ta_ids); ++ ++ /* 2.2: download ta pkg if not already present. */ ++ if (!found) { ++ ret = bh_proxy_dnload_jta(conn_idx, &bin_ta_id, ++ ta_pkg, pkg_len); ++ if (ret && ret != BHE_PACKAGE_EXIST) ++ return ret; ++ } ++ ++ /* 3: send open session command to VM */ ++ ret = bh_proxy_open_jta_session(conn_idx, &bin_ta_id, ++ init_param, init_len, ++ host_id, ta_pkg, pkg_len); ++ return ret; ++} ++ ++/** ++ * bh_ta_session_command - send and receive data to/from ta ++ * ++ * This function will block until VM replied the response ++ * ++ * @host_id: session host id ++ * @command_id: command id ++ * @input: message to be sent ++ * @length: sent message size ++ * @output: output param to hold pointer to the buffer which ++ * will contain received message. ++ * This buffer is allocated by Beihai and freed by the user. ++ * @output_length: input and output param - ++ * - input: the expected maximum length of the received message ++ * - output: size of the received message ++ * @response_code: An optional output param to hold the return value ++ * from the applet. Can be NULL. ++ * ++ * Return: 0 on success ++ * < 0 on system failure ++ * > 0 on DAL FW failure ++ */ ++int bh_ta_session_command(u64 host_id, int command_id, ++ const void *input, size_t length, ++ void **output, size_t *output_length, ++ int *response_code) ++{ ++ int ret; ++ struct bh_command_header *h; ++ struct bh_cmd *cmd; ++ char cmdbuf[CMD_BUF_SIZE(*cmd)]; ++ struct bh_response_header *resp_hdr; ++ unsigned int resp_len; ++ struct bh_session_record *session; ++ struct bh_resp *resp; ++ unsigned int conn_idx = BH_CONN_IDX_IVM; ++ unsigned int len; ++ ++ memset(cmdbuf, 0, sizeof(cmdbuf)); ++ resp_hdr = NULL; ++ ++ if (!bh_is_initialized()) ++ return -EFAULT; ++ ++ if (!input && length != 0) ++ return -EINVAL; ++ ++ if (!output_length) ++ return -EINVAL; ++ ++ if (output) ++ *output = NULL; ++ ++ session = bh_session_find(conn_idx, host_id); ++ if (!session) ++ return -EINVAL; ++ ++ h = (struct bh_command_header *)cmdbuf; ++ cmd = (struct bh_cmd *)h->cmd; ++ h->id = BHP_CMD_SENDANDRECV; ++ cmd->ta_session_id = session->ta_session_id; ++ cmd->command = command_id; ++ cmd->outlen = *output_length; ++ ++ ret = bh_request(conn_idx, h, CMD_BUF_SIZE(*cmd), input, length, ++ host_id, (void **)&resp_hdr); ++ if (!resp_hdr) ++ return ret ? ret : -EFAULT; ++ ++ if (!ret) ++ ret = resp_hdr->code; ++ ++ session->ta_session_id = resp_hdr->ta_session_id; ++ resp_len = resp_hdr->h.length - sizeof(*resp_hdr); ++ ++ if (ret == BHE_APPLET_SMALL_BUFFER && ++ resp_len == sizeof(struct bh_resp_bof)) { ++ struct bh_resp_bof *bof = ++ (struct bh_resp_bof *)resp_hdr->data; ++ ++ if (response_code) ++ *response_code = be32_to_cpu(bof->response); ++ ++ *output_length = be32_to_cpu(bof->request_length); ++ } ++ ++ if (ret) ++ goto out; ++ ++ if (resp_len < sizeof(struct bh_resp)) { ++ ret = -EBADMSG; ++ goto out; ++ } ++ ++ resp = (struct bh_resp *)resp_hdr->data; ++ ++ if (response_code) ++ *response_code = be32_to_cpu(resp->response); ++ ++ len = resp_len - sizeof(*resp); ++ ++ if (*output_length < len) { ++ ret = -EMSGSIZE; ++ goto out; ++ } ++ ++ if (len && output) { ++ *output = kmemdup(resp->buffer, len, GFP_KERNEL); ++ if (!*output) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ } ++ ++ *output_length = len; ++ ++out: ++ if (bh_session_is_killed(resp_hdr->code)) ++ bh_session_remove(conn_idx, session->host_id); ++ ++ kfree(resp_hdr); ++ ++ return ret; ++} ++ ++/** ++ * bh_ta_session_close - close ta session ++ * ++ * This function will block until VM replied the response ++ * ++ * @host_id: session host id ++ * ++ * Return: 0 on success ++ * <0 on system failure ++ * >0 on DAL FW failure ++ */ ++int bh_ta_session_close(u64 host_id) ++{ ++ int ret; ++ char cmdbuf[CMD_BUF_SIZE(struct bh_close_jta_session_cmd)]; ++ struct bh_response_header *resp_hdr; ++ struct bh_session_record *session; ++ unsigned int conn_idx = BH_CONN_IDX_IVM; ++ ++ memset(cmdbuf, 0, sizeof(cmdbuf)); ++ resp_hdr = NULL; ++ ++ session = bh_session_find(conn_idx, host_id); ++ if (!session) ++ return -EINVAL; ++ ++ bh_prep_session_close_cmd(cmdbuf, session->ta_session_id); ++ ++ ret = bh_request(conn_idx, cmdbuf, sizeof(cmdbuf), NULL, 0, host_id, ++ (void **)&resp_hdr); ++ ++ if (!ret) ++ ret = resp_hdr->code; ++ ++ kfree(resp_hdr); ++ /* ++ * An internal session exists, so we should not close the session. ++ * It means that host app should call this API at appropriate time. ++ */ ++ if (ret != BHE_IAC_EXIST_INTERNAL_SESSION) ++ bh_session_remove(conn_idx, host_id); ++ ++ return ret; ++} ++ ++/** ++ * bh_filter_hdr - filter the sent message ++ * ++ * Allow to send valid messages only. ++ * The filtering is done using given filter functions table ++ * ++ * @hdr: message header ++ * @count: message size ++ * @ctx: context to send to the filter functions ++ * @tbl: filter functions table ++ * ++ * Return: 0 when message is valid ++ * <0 on otherwise ++ */ ++int bh_filter_hdr(const struct bh_command_header *hdr, size_t count, void *ctx, ++ const bh_filter_func tbl[]) ++{ ++ int i; ++ int ret; ++ ++ for (i = 0; tbl[i]; i++) { ++ ret = tbl[i](hdr, count, ctx); ++ if (ret < 0) ++ return ret; ++ } ++ return 0; ++} ++ ++/** ++ * bh_prep_access_denied_response - prepare package with 'access denied' ++ * response code. ++ * ++ * This function is used to send in band error to user who trying to send ++ * message when he lacks the needed permissions ++ * ++ * @cmd: the invalid command message ++ * @res: out param to hold the response header ++ */ ++void bh_prep_access_denied_response(const char *cmd, ++ struct bh_response_header *res) ++{ ++ struct bh_command_header *cmd_hdr = (struct bh_command_header *)cmd; ++ ++ res->h.magic = BH_MSG_RESP_MAGIC; ++ res->h.length = sizeof(*res); ++ res->code = BHE_OPERATION_NOT_PERMITTED; ++ res->seq = cmd_hdr->seq; ++} +diff --git a/drivers/misc/mei/dal/bh_external.h b/drivers/misc/mei/dal/bh_external.h +new file mode 100644 +index 000000000000..ea8b3c87b0e0 +--- /dev/null ++++ b/drivers/misc/mei/dal/bh_external.h +@@ -0,0 +1,46 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright(c) 2016-2019, Intel Corporation. ++ */ ++ ++#ifndef __BH_EXTERNAL_H ++#define __BH_EXTERNAL_H ++ ++#include ++#include "bh_cmd_defs.h" ++ ++# define MSG_SEQ_START_NUMBER BIT_ULL(32) ++ ++bool bh_is_initialized(void); ++void bh_init_internal(void); ++void bh_deinit_internal(void); ++ ++int bh_ta_session_open(u64 *host_id, const char *ta_id, const u8 *ta_pkg, ++ size_t pkg_len, const u8 *init_param, size_t init_len); ++ ++int bh_ta_session_close(u64 host_id); ++ ++int bh_ta_session_command(u64 host_id, int command_id, const void *input, ++ size_t length, void **output, size_t *output_length, ++ int *response_code); ++ ++const struct bh_command_header *bh_msg_cmd_hdr(const void *msg, size_t len); ++ ++typedef int (*bh_filter_func)(const struct bh_command_header *hdr, ++ size_t count, void *ctx); ++ ++int bh_filter_hdr(const struct bh_command_header *hdr, size_t count, void *ctx, ++ const bh_filter_func tbl[]); ++ ++bool bh_msg_is_cmd_open_session(const struct bh_command_header *hdr); ++ ++const uuid_t *bh_open_session_ta_id(const struct bh_command_header *hdr, ++ size_t count); ++ ++void bh_prep_access_denied_response(const char *cmd, ++ struct bh_response_header *res); ++ ++bool bh_msg_is_cmd(const void *msg, size_t len); ++bool bh_msg_is_response(const void *msg, size_t len); ++ ++#endif /* __BH_EXTERNAL_H */ +diff --git a/drivers/misc/mei/dal/bh_internal.c b/drivers/misc/mei/dal/bh_internal.c +new file mode 100644 +index 000000000000..498e4f269381 +--- /dev/null ++++ b/drivers/misc/mei/dal/bh_internal.c +@@ -0,0 +1,845 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright(c) 2016-2019, Intel Corporation. ++ */ ++ ++#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ ++ ++#include ++#include ++#include ++#include "bh_errcode.h" ++#include "bh_external.h" ++#include "bh_internal.h" ++ ++/* BH initialization state */ ++static atomic_t bh_state = ATOMIC_INIT(0); ++static u64 bh_host_id_number = MSG_SEQ_START_NUMBER; ++ ++/** ++ * struct bh_request_cmd - bh request command ++ * ++ * @link: link in the request list of bh service ++ * @cmd: command header and data ++ * @cmd_len: command buffer length ++ * @conn_idx: connection index ++ * @host_id: session host id ++ * @response: response buffer ++ * @complete: request completion ++ * @ret: return value of the request ++ */ ++struct bh_request_cmd { ++ struct list_head link; ++ u8 *cmd; ++ unsigned int cmd_len; ++ unsigned int conn_idx; ++ u64 host_id; ++ void *response; ++ struct completion complete; ++ int ret; ++}; ++ ++struct bh_service { ++ struct work_struct work; ++ struct mutex request_lock; /* request lock */ ++ struct list_head request_list; ++}; ++ ++static struct bh_service bh_srvc; ++ ++/* ++ * dal device session records list (array of list per dal device) ++ * represents opened sessions to dal fw client ++ */ ++static struct list_head dal_dev_session_list[BH_CONN_MAX]; ++ ++/** ++ * bh_get_msg_host_id - increase the shared variable bh_host_id_number by 1 ++ * and wrap around if needed ++ * ++ * Return: the updated host id number ++ */ ++u64 bh_get_msg_host_id(void) ++{ ++ bh_host_id_number++; ++ /* wrap around. sequence_number must ++ * not be 0, as required by Firmware VM ++ */ ++ if (bh_host_id_number == 0) ++ bh_host_id_number = MSG_SEQ_START_NUMBER; ++ ++ return bh_host_id_number; ++} ++ ++/** ++ * bh_session_find - find session record by handle ++ * ++ * @conn_idx: DAL client connection idx ++ * @host_id: session host id ++ * ++ * Return: pointer to bh_session_record if found ++ * NULL if the session wasn't found ++ */ ++struct bh_session_record *bh_session_find(unsigned int conn_idx, u64 host_id) ++{ ++ struct bh_session_record *pos; ++ struct list_head *session_list = &dal_dev_session_list[conn_idx]; ++ ++ list_for_each_entry(pos, session_list, link) { ++ if (pos->host_id == host_id) ++ return pos; ++ } ++ ++ return NULL; ++} ++ ++/** ++ * bh_session_add - add session record to list ++ * ++ * @conn_idx: fw client connection idx ++ * @session: session record ++ */ ++void bh_session_add(unsigned int conn_idx, struct bh_session_record *session) ++{ ++ list_add_tail(&session->link, &dal_dev_session_list[conn_idx]); ++} ++ ++/** ++ * bh_session_remove - remove session record from list, ad release its memory ++ * ++ * @conn_idx: fw client connection idx ++ * @host_id: session host id ++ */ ++void bh_session_remove(unsigned int conn_idx, u64 host_id) ++{ ++ struct bh_session_record *session; ++ ++ session = bh_session_find(conn_idx, host_id); ++ ++ if (session) { ++ list_del(&session->link); ++ kfree(session); ++ } ++} ++ ++static void bh_request_free(struct bh_request_cmd *request) ++{ ++ if (!request) ++ return; ++ kfree(request->cmd); ++ kfree(request->response); ++ kfree(request); ++ request = NULL; ++} ++ ++static struct bh_request_cmd *bh_request_alloc(const void *hdr, ++ size_t hdr_len, ++ const void *data, ++ size_t data_len, ++ unsigned int conn_idx, ++ u64 host_id) ++{ ++ struct bh_request_cmd *request; ++ size_t buf_len; ++ ++ if (!hdr || hdr_len < sizeof(struct bh_command_header)) ++ return ERR_PTR(-EINVAL); ++ ++ if (!data && data_len) ++ return ERR_PTR(-EINVAL); ++ ++ if (check_add_overflow(hdr_len, data_len, &buf_len)) ++ return ERR_PTR(-EOVERFLOW); ++ ++ request = kzalloc(sizeof(*request), GFP_KERNEL); ++ if (!request) ++ return ERR_PTR(-ENOMEM); ++ ++ request->cmd = kmalloc(buf_len, GFP_KERNEL); ++ if (!request->cmd) { ++ kfree(request); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ memcpy(request->cmd, hdr, hdr_len); ++ request->cmd_len = hdr_len; ++ ++ if (data_len) { ++ memcpy(request->cmd + hdr_len, data, data_len); ++ request->cmd_len += data_len; ++ } ++ ++ request->conn_idx = conn_idx; ++ request->host_id = host_id; ++ ++ init_completion(&request->complete); ++ ++ return request; ++} ++ ++/** ++ * bh_transport_recv - receive message from DAL FW. ++ * ++ * @conn_idx: fw client connection idx ++ * @buffer: output buffer to hold the received message ++ * @size: output buffer size ++ * ++ * Return: 0 on success ++ * < 0 on failure ++ */ ++static int bh_transport_recv(unsigned int conn_idx, void *buffer, size_t size) ++{ ++ return 0; ++} ++ ++/** ++ * bh_recv_message_try - try to receive and prosses message from DAL ++ * ++ * @conn_idx: fw client connection idx ++ * @response: output param to hold the response ++ * @out_host_id: output param to hold the received message host id ++ * it should be identical to the sent message host id ++ * ++ * Return: 0 on success ++ * <0 on failure ++ */ ++static int bh_recv_message_try(unsigned int conn_idx, void **response, ++ u64 *out_host_id) ++{ ++ int ret; ++ char *data; ++ struct bh_response_header hdr; ++ ++ if (!response) ++ return -EINVAL; ++ ++ *response = NULL; ++ ++ memset(&hdr, 0, sizeof(hdr)); ++ ret = bh_transport_recv(conn_idx, &hdr, sizeof(hdr)); ++ if (ret) ++ return ret; ++ ++ if (hdr.h.length < sizeof(hdr)) ++ return -EBADMSG; ++ ++ /* check magic */ ++ if (hdr.h.magic != BH_MSG_RESP_MAGIC) ++ return -EBADMSG; ++ ++ data = kzalloc(hdr.h.length, GFP_KERNEL); ++ if (!data) ++ return -ENOMEM; ++ ++ memcpy(data, &hdr, sizeof(hdr)); ++ ++ /* message contains hdr only */ ++ if (hdr.h.length == sizeof(hdr)) ++ goto out; ++ ++ ret = bh_transport_recv(conn_idx, data + sizeof(hdr), ++ hdr.h.length - sizeof(hdr)); ++out: ++ if (out_host_id) ++ *out_host_id = hdr.seq; ++ ++ *response = data; ++ ++ return ret; ++} ++ ++#define MAX_RETRY_COUNT 3 ++static int bh_recv_message(struct bh_request_cmd *request) ++{ ++ u32 retry; ++ u64 res_host_id; ++ void *resp; ++ int ret; ++ ++ for (resp = NULL, retry = 0; retry < MAX_RETRY_COUNT; retry++) { ++ kfree(resp); ++ resp = NULL; ++ ++ res_host_id = 0; ++ ret = bh_recv_message_try(request->conn_idx, ++ &resp, &res_host_id); ++ if (ret) { ++ pr_debug("failed to recv msg = %d\n", ret); ++ continue; ++ } ++ ++ if (res_host_id != request->host_id) { ++ pr_debug("recv message with host_id=%llu != sent host_id=%llu\n", ++ res_host_id, request->host_id); ++ continue; ++ } ++ ++ pr_debug("recv message with try=%d host_id=%llu\n", ++ retry, request->host_id); ++ break; ++ } ++ ++ if (retry == MAX_RETRY_COUNT) { ++ pr_err("out of retry attempts\n"); ++ ret = -EFAULT; ++ } ++ ++ if (ret) { ++ kfree(resp); ++ resp = NULL; ++ } ++ ++ request->response = resp; ++ return ret; ++} ++ ++/** ++ * bh_transport_send - send message to the DAL FW. ++ * ++ * @conn_idx: fw client connection idx ++ * @buffer: message to send ++ * @size: message size ++ * @host_id: message host id ++ * ++ * Return: 0 on success ++ * <0 on failure ++ */ ++static int bh_transport_send(unsigned int conn_idx, const void *buffer, ++ unsigned int size, u64 host_id) ++{ ++ return 0; ++} ++ ++/** ++ * bh_send_message - build and send command message to DAL FW. ++ * ++ * @request: all request details ++ * ++ * Return: 0 on success ++ * < 0 on failure ++ */ ++static int bh_send_message(const struct bh_request_cmd *request) ++{ ++ struct bh_command_header *h; ++ ++ if (!request) ++ return -EINVAL; ++ ++ if (request->cmd_len < sizeof(*h) || !request->cmd) ++ return -EINVAL; ++ ++ if (request->conn_idx > BH_CONN_MAX) ++ return -ENODEV; ++ ++ h = (struct bh_command_header *)request->cmd; ++ h->h.magic = BH_MSG_CMD_MAGIC; ++ h->h.length = request->cmd_len; ++ h->seq = request->host_id; ++ ++ return bh_transport_send(request->conn_idx, ++ request->cmd, request->cmd_len, ++ request->host_id); ++} ++ ++void bh_prep_session_close_cmd(void *cmdbuf, u64 ta_session_id) ++{ ++ struct bh_command_header *h = cmdbuf; ++ struct bh_close_jta_session_cmd *cmd; ++ ++ cmd = (struct bh_close_jta_session_cmd *)h->cmd; ++ h->id = BHP_CMD_CLOSE_JTASESSION; ++ cmd->ta_session_id = ta_session_id; ++} ++ ++static int bh_send_recv_message(struct bh_request_cmd *request) ++{ ++ int ret; ++ ++ ret = bh_send_message(request); ++ if (ret) ++ return ret; ++ ++ return bh_recv_message(request); ++} ++ ++static void bh_request_work(struct work_struct *work) ++{ ++ struct bh_service *bh_srv; ++ struct bh_request_cmd *request; ++ struct bh_command_header *h; ++ struct bh_response_header *resp_hdr; ++ int ret; ++ ++ bh_srv = container_of(work, struct bh_service, work); ++ ++ mutex_lock(&bh_srv->request_lock); ++ request = list_first_entry_or_null(&bh_srv->request_list, ++ struct bh_request_cmd, link); ++ if (!request) { ++ ret = -EINVAL; ++ goto out_free; ++ } ++ ++ list_del_init(&request->link); ++ ++ if (!request->cmd_len || !request->cmd) { ++ ret = -EINVAL; ++ goto out_free; ++ } ++ ++ ret = bh_send_recv_message(request); ++ request->ret = ret; ++ ++ if (wq_has_sleeper(&request->complete.wait)) { ++ mutex_unlock(&bh_srv->request_lock); ++ complete(&request->complete); ++ return; ++ } ++ ++ /* no one waits for the response - clean up is needed */ ++ pr_debug("no waiter - clean up is needed\n"); ++ resp_hdr = (struct bh_response_header *)request->response; ++ /* ++ * if the command was open_session and ++ * it was succeeded then close the session ++ */ ++ if (ret || resp_hdr->code) ++ goto out_free; ++ ++ h = (struct bh_command_header *)request->cmd; ++ if (bh_msg_is_cmd_open_session(h)) { ++ char cmdbuf[CMD_BUF_SIZE(struct bh_close_jta_session_cmd)]; ++ u64 host_id = request->host_id; ++ ++ bh_request_free(request); ++ ++ bh_prep_session_close_cmd(cmdbuf, resp_hdr->ta_session_id); ++ request = bh_request_alloc(cmdbuf, sizeof(cmdbuf), NULL, 0, ++ BH_CONN_IDX_IVM, host_id); ++ if (!IS_ERR(request)) ++ bh_send_recv_message(request); ++ } ++ ++out_free: ++ bh_request_free(request); ++ mutex_unlock(&bh_srv->request_lock); ++} ++ ++/** ++ * bh_request - send request to DAL FW and receive response back ++ * ++ * @conn_idx: fw client connection idx ++ * @cmd_hdr: command header ++ * @cmd_hdr_len: command header length ++ * @cmd_data: command data (message content) ++ * @cmd_data_len: data length ++ * @host_id: message host id ++ * @response: output param to hold the response ++ * ++ * Return: 0 on success ++ * <0 on failure ++ */ ++int bh_request(unsigned int conn_idx, void *cmd_hdr, unsigned int cmd_hdr_len, ++ const void *cmd_data, unsigned int cmd_data_len, ++ u64 host_id, void **response) ++{ ++ int ret; ++ struct bh_request_cmd *request; ++ ++ mutex_lock(&bh_srvc.request_lock); ++ request = bh_request_alloc(cmd_hdr, cmd_hdr_len, cmd_data, cmd_data_len, ++ conn_idx, host_id); ++ if (IS_ERR(request)) { ++ mutex_unlock(&bh_srvc.request_lock); ++ return PTR_ERR(request); ++ } ++ ++ list_add_tail(&request->link, &bh_srvc.request_list); ++ mutex_unlock(&bh_srvc.request_lock); ++ ++ schedule_work(&bh_srvc.work); ++ ret = wait_for_completion_interruptible(&request->complete); ++ /* ++ * if wait was interrupted than do not free allocated memory. ++ * it is used by the worker ++ */ ++ if (ret) ++ return ret; ++ ++ mutex_lock(&bh_srvc.request_lock); ++ ++ /* detach response buffer */ ++ *response = request->response; ++ request->response = NULL; ++ ++ ret = request->ret; ++ ++ bh_request_free(request); ++ ++ mutex_unlock(&bh_srvc.request_lock); ++ ++ return ret; ++} ++ ++/** ++ * bh_ession_list_free - free session list of given dal fw client ++ * ++ * @conn_idx: fw client connection idx ++ */ ++static void bh_session_list_free(unsigned int conn_idx) ++{ ++ struct bh_session_record *pos, *next; ++ struct list_head *session_list = &dal_dev_session_list[conn_idx]; ++ ++ list_for_each_entry_safe(pos, next, session_list, link) { ++ list_del(&pos->link); ++ kfree(pos); ++ } ++ ++ INIT_LIST_HEAD(session_list); ++} ++ ++/** ++ * bh_session_list_init - initialize session list of given dal fw client ++ * ++ * @conn_idx: fw client connection idx ++ */ ++static void bh_session_list_init(unsigned int conn_idx) ++{ ++ INIT_LIST_HEAD(&dal_dev_session_list[conn_idx]); ++} ++ ++/** ++ * bh_proxy_check_svl_jta_blocked_state - check if ta security version ++ * is blocked ++ * ++ * When installing a ta, a minimum security version is given, ++ * so DAL will block installation of this ta from lower version. ++ * (even after the ta will be uninstalled) ++ * ++ * @ta_id: trusted application (ta) id ++ * ++ * Return: 0 when ta security version isn't blocked ++ * <0 on system failure ++ * >0 on DAL FW failure ++ */ ++int bh_proxy_check_svl_jta_blocked_state(uuid_t *ta_id) ++{ ++ int ret; ++ struct bh_command_header *h; ++ struct bh_check_svl_jta_blocked_state_cmd *cmd; ++ char cmdbuf[CMD_BUF_SIZE(*cmd)]; ++ struct bh_response_header *resp_hdr; ++ u64 host_id; ++ ++ if (!ta_id) ++ return -EINVAL; ++ ++ memset(cmdbuf, 0, sizeof(cmdbuf)); ++ resp_hdr = NULL; ++ ++ h = (struct bh_command_header *)cmdbuf; ++ cmd = (struct bh_check_svl_jta_blocked_state_cmd *)h->cmd; ++ h->id = BHP_CMD_CHECK_SVL_TA_BLOCKED_STATE; ++ cmd->ta_id = *ta_id; ++ ++ host_id = bh_get_msg_host_id(); ++ ret = bh_request(BH_CONN_IDX_SDM, h, CMD_BUF_SIZE(*cmd), NULL, 0, ++ host_id, (void **)&resp_hdr); ++ ++ if (!ret) ++ ret = resp_hdr->code; ++ ++ kfree(resp_hdr); ++ ++ return ret; ++} ++ ++/** ++ * bh_proxy_list_jta_packages - get list of ta packages in DAL ++ * ++ * @conn_idx: fw client connection idx ++ * @count: out param to hold the count of ta packages in DAL ++ * @ta_ids: out param to hold pointer to the ids of ta packages in DAL ++ * The buffer which holds the ids is allocated in this function ++ * and freed by the caller ++ * ++ * Return: 0 when ta security version isn't blocked ++ * <0 on system failure ++ * >0 on DAL FW failure ++ */ ++int bh_proxy_list_jta_packages(unsigned int conn_idx, unsigned int *count, ++ uuid_t **ta_ids) ++{ ++ int ret; ++ struct bh_command_header h; ++ struct bh_response_header *resp_hdr; ++ unsigned int resp_len; ++ struct bh_resp_list_ta_packages *resp; ++ uuid_t *outbuf; ++ unsigned int i; ++ u64 host_id; ++ ++ memset(&h, 0, sizeof(h)); ++ resp_hdr = NULL; ++ ++ if (!bh_is_initialized()) ++ return -EFAULT; ++ ++ if (!count || !ta_ids) ++ return -EINVAL; ++ ++ *ta_ids = NULL; ++ *count = 0; ++ ++ h.id = BHP_CMD_LIST_TA_PACKAGES; ++ ++ host_id = bh_get_msg_host_id(); ++ ret = bh_request(conn_idx, &h, sizeof(h), NULL, 0, host_id, ++ (void **)&resp_hdr); ++ ++ if (!ret) ++ ret = resp_hdr->code; ++ if (ret) ++ goto out; ++ ++ resp_len = resp_hdr->h.length - sizeof(*resp_hdr); ++ if (resp_len < sizeof(*resp)) { ++ ret = -EBADMSG; ++ goto out; ++ } ++ ++ resp = (struct bh_resp_list_ta_packages *)resp_hdr->data; ++ if (!resp->count) { ++ /* return success, there are no ta packages loaded in DAL FW */ ++ ret = 0; ++ goto out; ++ } ++ ++ if (resp_len != sizeof(uuid_t) * resp->count + sizeof(*resp)) { ++ ret = -EBADMSG; ++ goto out; ++ } ++ ++ outbuf = kcalloc(resp->count, sizeof(uuid_t), GFP_KERNEL); ++ ++ if (!outbuf) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ for (i = 0; i < resp->count; i++) ++ outbuf[i] = resp->ta_ids[i]; ++ ++ *ta_ids = outbuf; ++ *count = resp->count; ++ ++out: ++ kfree(resp_hdr); ++ return ret; ++} ++ ++/** ++ * bh_proxy_dnload_jta - download ta package to DAL ++ * ++ * @conn_idx: fw client connection idx ++ * @ta_id: trusted application (ta) id ++ * @ta_pkg: ta binary package ++ * @pkg_len: ta binary package length ++ * ++ * Return: 0 on success ++ * <0 on system failure ++ * >0 on DAL FW failure ++ */ ++int bh_proxy_dnload_jta(unsigned int conn_idx, uuid_t *ta_id, ++ const char *ta_pkg, unsigned int pkg_len) ++{ ++ struct bh_command_header *h; ++ struct bh_download_jta_cmd *cmd; ++ char cmdbuf[CMD_BUF_SIZE(*cmd)]; ++ struct bh_response_header *resp_hdr; ++ u64 host_id; ++ int ret; ++ ++ if (!ta_pkg || !pkg_len || !ta_id) ++ return -EINVAL; ++ ++ memset(cmdbuf, 0, sizeof(cmdbuf)); ++ resp_hdr = NULL; ++ ++ h = (struct bh_command_header *)cmdbuf; ++ cmd = (struct bh_download_jta_cmd *)h->cmd; ++ h->id = BHP_CMD_DOWNLOAD_JAVATA; ++ cmd->ta_id = *ta_id; ++ ++ host_id = bh_get_msg_host_id(); ++ ret = bh_request(conn_idx, h, CMD_BUF_SIZE(*cmd), ta_pkg, pkg_len, ++ host_id, (void **)&resp_hdr); ++ ++ if (!ret) ++ ret = resp_hdr->code; ++ ++ kfree(resp_hdr); ++ ++ return ret; ++} ++ ++/** ++ * bh_proxy_open_jta_session - send open session command ++ * ++ * @conn_idx: fw client connection idx ++ * @ta_id: trusted application (ta) id ++ * @init_buffer: init parameters to the session (optional) ++ * @init_len: length of the init parameters ++ * @host_id: out param to hold the session host id ++ * @ta_pkg: ta binary package ++ * @pkg_len: ta binary package length ++ * ++ * Return: 0 on success ++ * <0 on system failure ++ * >0 on DAL FW failure ++ */ ++int bh_proxy_open_jta_session(unsigned int conn_idx, ++ uuid_t *ta_id, ++ const char *init_buffer, ++ unsigned int init_len, ++ u64 *host_id, ++ const char *ta_pkg, ++ unsigned int pkg_len) ++{ ++ int ret; ++ struct bh_command_header *h; ++ struct bh_open_jta_session_cmd *cmd; ++ char cmdbuf[CMD_BUF_SIZE(*cmd)]; ++ struct bh_response_header *resp_hdr; ++ struct bh_session_record *session; ++ ++ if (!host_id || !ta_id) ++ return -EINVAL; ++ ++ if (!init_buffer && init_len > 0) ++ return -EINVAL; ++ ++ memset(cmdbuf, 0, sizeof(cmdbuf)); ++ resp_hdr = NULL; ++ ++ h = (struct bh_command_header *)cmdbuf; ++ cmd = (struct bh_open_jta_session_cmd *)h->cmd; ++ ++ session = kzalloc(sizeof(*session), GFP_KERNEL); ++ if (!session) ++ return -ENOMEM; ++ ++ session->host_id = bh_get_msg_host_id(); ++ bh_session_add(conn_idx, session); ++ ++ h->id = BHP_CMD_OPEN_JTASESSION; ++ cmd->ta_id = *ta_id; ++ ++ ret = bh_request(conn_idx, h, CMD_BUF_SIZE(*cmd), init_buffer, ++ init_len, session->host_id, (void **)&resp_hdr); ++ ++ if (!ret && resp_hdr) ++ ret = resp_hdr->code; ++ ++ if (ret == BHE_PACKAGE_NOT_FOUND) { ++ /* ++ * VM might delete the TA pkg when no live session. ++ * Download the TA pkg and open session again ++ */ ++ ret = bh_proxy_dnload_jta(conn_idx, ta_id, ta_pkg, pkg_len); ++ if (ret) ++ goto out; ++ ++ kfree(resp_hdr); ++ resp_hdr = NULL; ++ ret = bh_request(conn_idx, h, CMD_BUF_SIZE(*cmd), init_buffer, ++ init_len, session->host_id, ++ (void **)&resp_hdr); ++ ++ if (!ret && resp_hdr) ++ ret = resp_hdr->code; ++ } ++ ++ if (resp_hdr) ++ session->ta_session_id = resp_hdr->ta_session_id; ++ *host_id = session->host_id; ++ ++out: ++ if (ret) ++ bh_session_remove(conn_idx, session->host_id); ++ ++ kfree(resp_hdr); ++ ++ return ret; ++} ++ ++/** ++ * bh_request_list_free - free request list of bh_service ++ * ++ * @request_list: request list ++ */ ++static void bh_request_list_free(struct list_head *request_list) ++{ ++ struct bh_request_cmd *pos, *next; ++ ++ list_for_each_entry_safe(pos, next, request_list, link) { ++ list_del(&pos->link); ++ bh_request_free(pos); ++ } ++ ++ INIT_LIST_HEAD(request_list); ++} ++ ++/** ++ * bh_is_initialized - check if bh is initialized ++ * ++ * Return: true when bh is initialized and false otherwise ++ */ ++bool bh_is_initialized(void) ++{ ++ return atomic_read(&bh_state) == 1; ++} ++ ++/** ++ * bh_init_internal - BH initialization function ++ * ++ * The BH initialization creates the session lists for all ++ * dal devices (dal fw clients) ++ * ++ * Return: 0 ++ */ ++void bh_init_internal(void) ++{ ++ unsigned int i; ++ ++ if (!atomic_add_unless(&bh_state, 1, 1)) ++ return; ++ ++ for (i = BH_CONN_IDX_START; i < BH_CONN_MAX; i++) ++ bh_session_list_init(i); ++ ++ INIT_LIST_HEAD(&bh_srvc.request_list); ++ mutex_init(&bh_srvc.request_lock); ++ INIT_WORK(&bh_srvc.work, bh_request_work); ++} ++ ++/** ++ * bh_deinit_internal - BH deinit function ++ * ++ * The deinitialization frees the session lists of all ++ * dal devices (dal fw clients) ++ */ ++void bh_deinit_internal(void) ++{ ++ unsigned int i; ++ ++ if (!atomic_add_unless(&bh_state, -1, 0)) ++ return; ++ ++ for (i = BH_CONN_IDX_START; i < BH_CONN_MAX; i++) ++ bh_session_list_free(i); ++ ++ cancel_work_sync(&bh_srvc.work); ++ bh_request_list_free(&bh_srvc.request_list); ++} +diff --git a/drivers/misc/mei/dal/bh_internal.h b/drivers/misc/mei/dal/bh_internal.h +new file mode 100644 +index 000000000000..68c4e1a435cf +--- /dev/null ++++ b/drivers/misc/mei/dal/bh_internal.h +@@ -0,0 +1,78 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright(c) 2016-2019, Intel Corporation. ++ */ ++ ++#ifndef __BH_INTERNAL_H ++#define __BH_INTERNAL_H ++ ++#include ++#include ++#include ++#include ++ ++#include "bh_cmd_defs.h" ++ ++/** ++ * struct bh_session_record - session record ++ * ++ * @link: link in dal_dev_session_list of dal fw client ++ * @host_id: message/session host id ++ * @ta_session_id: session id ++ */ ++struct bh_session_record { ++ struct list_head link; ++ u64 host_id; ++ u64 ta_session_id; ++}; ++ ++/* command buffer size */ ++#define CMD_BUF_SIZE(cmd) (sizeof(struct bh_command_header) + sizeof(cmd)) ++ ++/** ++ * enum bh_connection_index - connection index to dal fw clients ++ * ++ * @BH_CONN_IDX_START: start idx ++ * ++ * @BH_CONN_IDX_IVM: Intel/Issuer Virtual Machine ++ * @BH_CONN_IDX_SDM: Security Domain Manager ++ * @BH_CONN_IDX_LAUNCHER: Run Time Manager (Launcher) ++ * ++ * @BH_CONN_MAX : max connection idx ++ */ ++enum bh_connection_index { ++ BH_CONN_IDX_START = 0, ++ ++ BH_CONN_IDX_IVM = 0, ++ BH_CONN_IDX_SDM = 1, ++ BH_CONN_IDX_LAUNCHER = 2, ++ ++ BH_CONN_MAX ++}; ++ ++u64 bh_get_msg_host_id(void); ++ ++struct bh_session_record *bh_session_find(unsigned int conn_idx, u64 host_id); ++void bh_session_add(unsigned int conn_idx, struct bh_session_record *session); ++void bh_session_remove(unsigned int conn_idx, u64 host_id); ++ ++int bh_request(unsigned int conn_idx, ++ void *hdr, unsigned int hdr_len, ++ const void *data, unsigned int data_len, ++ u64 host_id, void **response); ++ ++int bh_proxy_check_svl_jta_blocked_state(uuid_t *ta_id); ++ ++int bh_proxy_list_jta_packages(unsigned int conn_idx, ++ unsigned int *count, uuid_t **ta_ids); ++ ++int bh_proxy_dnload_jta(unsigned int conn_idx, uuid_t *ta_id, ++ const char *ta_pkg, unsigned int pkg_len); ++ ++int bh_proxy_open_jta_session(unsigned int conn_idx, uuid_t *ta_id, ++ const char *init_buffer, unsigned int init_len, ++ u64 *host_id, const char *ta_pkg, ++ unsigned int pkg_len); ++ ++void bh_prep_session_close_cmd(void *cmdbuf, u64 ta_session_id); ++#endif /* __BH_INTERNAL_H */ +-- +2.17.1 + diff --git a/patches/0056-net-stmmac-support-gate-control-command-suppo.connectivity b/patches/0056-net-stmmac-support-gate-control-command-suppo.connectivity new file mode 100644 index 0000000000..fde0e1e070 --- /dev/null +++ b/patches/0056-net-stmmac-support-gate-control-command-suppo.connectivity @@ -0,0 +1,99 @@ +From f190ef18e86dc8a9aa9fce20d76b443caf63c58e Mon Sep 17 00:00:00 2001 +From: Ong Boon Leong +Date: Tue, 13 Aug 2019 22:38:15 +0800 +Subject: [PATCH 056/108] net: stmmac: support gate control command support for + Frame Preemption + +When both IEEE 802.1Qbv and IEEE 802.1Qbu are enabled, the gate control +command supports two additional command types for Frame Preemption: + +a) Set-And-Hold-MAC: Set Gates Mask and put preemptible MAC (pMAC) to hold + state, i.e., no traffic frames are allowed to be transmitte from pMAC. + Express frame is allowed to be transmitted without any interruption. + +b) Set-And-Release-MAC: Set Gates Mask and put pMAC to release state, i.e., + traffic frames are allowed to be transmitted from pMAC and if Express + frame is available for transmission, the preemptible frame will be + preempted. + +In EQoS v5.xx, when both EST & FPE are enabled, TxQ0 is always preemptible. +The bit field corresponding to TxQ0 is used to indicate which of the above +commands has been programmed:- + +1b : Set-And-Hold-MAC +0b : Set-And-Release-MAC + +Signed-off-by: Ong Boon Leong +--- + .../net/ethernet/stmicro/stmmac/stmmac_tc.c | 25 +++++++++++++++++++ + .../net/ethernet/stmicro/stmmac/stmmac_tsn.h | 1 + + 2 files changed, 26 insertions(+) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +index 0c6efa5a8c8e..bb490ec71e6f 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +@@ -600,6 +600,7 @@ static int tc_setup_taprio(struct stmmac_priv *priv, + u64 time_extension = qopt->cycle_time_extension; + u64 base_time = ktime_to_ns(qopt->base_time); + u64 cycle_time = qopt->cycle_time; ++ struct tsn_hw_cap *cap; + struct est_gcrr egcrr; + u32 extension_ns; + u32 extension_s; +@@ -641,9 +642,12 @@ static int tc_setup_taprio(struct stmmac_priv *priv, + qopt->base_time, qopt->cycle_time, + qopt->cycle_time_extension); + ++ cap = &priv->hw->tsn_info.cap; ++ + for (i = 0; i < qopt->num_entries; i++) { + struct est_gc_entry sgce; + ++ sgce.command = qopt->entries[i].command; + sgce.gates = qopt->entries[i].gate_mask; + sgce.ti_nsec = qopt->entries[i].interval; + +@@ -658,6 +662,27 @@ static int tc_setup_taprio(struct stmmac_priv *priv, + "EST: gates 0x%x, ti_ns %u, cycle_ns %llu\n", + sgce.gates, sgce.ti_nsec, cycle_time); + ++ if ((sgce.command == TC_TAPRIO_CMD_SET_AND_HOLD || ++ sgce.command == TC_TAPRIO_CMD_SET_AND_RELEASE) && ++ !fpe_q_mask) { ++ dev_err(priv->device, ++ "FPE: FPE QMask must not be all 0s!\n"); ++ return -EINVAL; ++ } ++ ++ /* If FPE is enabled together with EST, the GCL bit for TxQ0 ++ * marks if Set-And-Hold-MAC(1) or Set-And-Release-MAC(0) ++ * operation. Under such condition, any TxQ that is marked as ++ * preemptible in txqpec, the GCL bit is ignored. As this is ++ * DWMAC specific definition, we clear 'gates' bit corresponds ++ * to TxQ0 up-front to prevent incorrectly hold pMAC. ++ */ ++ if (fpe_q_mask) { ++ sgce.gates &= ~cap->pmac_bit; ++ if (sgce.command == TC_TAPRIO_CMD_SET_AND_HOLD) ++ sgce.gates |= cap->pmac_bit; ++ } ++ + ret = stmmac_set_est_gce(priv, priv->hw, priv->dev, + &sgce, i, 0, 0); + if (ret) { +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h +index 2a27e2b86a60..e0770d6b6c7f 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h +@@ -78,6 +78,7 @@ struct tsn_hw_cap { + + /* EST Gate Control Entry */ + struct est_gc_entry { ++ u8 command; /* Gate command */ + u32 gates; /* gate control: 0: closed, + * 1: open. + */ +-- +2.17.1 + diff --git a/patches/0056-refine-work-queue-in-trusty-driver.trusty b/patches/0056-refine-work-queue-in-trusty-driver.trusty new file mode 100644 index 0000000000..3cee744040 --- /dev/null +++ b/patches/0056-refine-work-queue-in-trusty-driver.trusty @@ -0,0 +1,111 @@ +From d74bc689bd67833aa9b9547deaff81c9680f9f6f Mon Sep 17 00:00:00 2001 +From: "Zhang, Qi" +Date: Tue, 17 Jul 2018 21:16:53 +0800 +Subject: [PATCH 56/63] refine work queue in trusty driver + +Change-Id: I049497485f87d2c90e23be11893696513602800b +Tracked-On: OAM-66823 +Signed-off-by: Zhang, Qi +--- + drivers/trusty/trusty-timer.c | 7 +------ + drivers/trusty/trusty-virtio.c | 4 ++-- + drivers/trusty/trusty.c | 9 ++++++--- + include/linux/trusty/smcall.h | 3 +++ + 4 files changed, 12 insertions(+), 11 deletions(-) + +diff --git a/drivers/trusty/trusty-timer.c b/drivers/trusty/trusty-timer.c +index 18e315c25067..6783a30b4a11 100644 +--- a/drivers/trusty/trusty-timer.c ++++ b/drivers/trusty/trusty-timer.c +@@ -36,11 +36,6 @@ struct trusty_timer_dev_state { + struct workqueue_struct *workqueue; + }; + +-/* Max entity defined as SMC_NUM_ENTITIES(64) */ +-#define SMC_ENTITY_SMC_X86 63 /* Used for customized SMC calls */ +- +-#define SMC_SC_LK_TIMER SMC_STDCALL_NR(SMC_ENTITY_SMC_X86, 0) +- + static void timer_work_func(struct work_struct *work) + { + int ret; +@@ -59,7 +54,7 @@ static enum hrtimer_restart trusty_timer_cb(struct hrtimer *tm) + + s = container_of(tm, struct trusty_timer_dev_state, timer.tm); + +- queue_work(s->workqueue, &s->timer.work); ++ queue_work_on(0, s->workqueue, &s->timer.work); + + return HRTIMER_NORESTART; + } +diff --git a/drivers/trusty/trusty-virtio.c b/drivers/trusty/trusty-virtio.c +index 66b4ee7caf0d..df066dda80d3 100644 +--- a/drivers/trusty/trusty-virtio.c ++++ b/drivers/trusty/trusty-virtio.c +@@ -150,7 +150,7 @@ static bool trusty_virtio_notify(struct virtqueue *vq) + + if (api_ver < TRUSTY_API_VERSION_SMP_NOP) { + atomic_set(&tvr->needs_kick, 1); +- queue_work(tctx->kick_wq, &tctx->kick_vqs); ++ queue_work_on(0, tctx->kick_wq, &tctx->kick_vqs); + } else { + trusty_enqueue_nop(tctx->dev->parent, &tvr->kick_nop); + } +@@ -685,7 +685,7 @@ static int trusty_virtio_probe(struct platform_device *pdev) + } + + tctx->kick_wq = alloc_workqueue("trusty-kick-wq", +- WQ_UNBOUND | WQ_CPU_INTENSIVE, 0); ++ WQ_CPU_INTENSIVE, 0); + if (!tctx->kick_wq) { + ret = -ENODEV; + dev_err(&pdev->dev, "Failed create trusty-kick-wq\n"); +diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c +index 4d33f269851d..8f80f9b84772 100755 +--- a/drivers/trusty/trusty.c ++++ b/drivers/trusty/trusty.c +@@ -270,7 +270,6 @@ static long trusty_std_call32_work(void *args) + + s32 trusty_std_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2) + { +- const int cpu = 0; + struct trusty_std_call32_args args = { + .dev = dev, + .smcnr = smcnr, +@@ -280,7 +279,11 @@ s32 trusty_std_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2) + }; + + /* bind cpu 0 for now since trusty OS is running on physical cpu #0*/ +- return work_on_cpu(cpu, trusty_std_call32_work, (void *) &args); ++ if((smcnr == SMC_SC_VDEV_KICK_VQ) || (smcnr == SMC_SC_LK_TIMER) ++ || (smcnr == SMC_SC_LOCKED_NOP) || (smcnr == SMC_SC_NOP)) ++ return trusty_std_call32_work((void *) &args); ++ else ++ return work_on_cpu(0, trusty_std_call32_work, (void *) &args); + } + + EXPORT_SYMBOL(trusty_std_call32); +@@ -490,7 +493,7 @@ void trusty_enqueue_nop(struct device *dev, struct trusty_nop *nop) + list_add_tail(&nop->node, &s->nop_queue); + spin_unlock_irqrestore(&s->nop_lock, flags); + } +- queue_work(s->nop_wq, &tw->work); ++ queue_work_on(0, s->nop_wq, &tw->work); + preempt_enable(); + } + EXPORT_SYMBOL(trusty_enqueue_nop); +diff --git a/include/linux/trusty/smcall.h b/include/linux/trusty/smcall.h +index 3ab2f688cb33..55d25dddc4a8 100644 +--- a/include/linux/trusty/smcall.h ++++ b/include/linux/trusty/smcall.h +@@ -152,4 +152,7 @@ + #define SMC_SC_VDEV_KICK_VQ SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 24) + #define SMC_NC_VDEV_KICK_VQ SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 25) + ++/* Max entity defined as SMC_NUM_ENTITIES(64) */ ++#define SMC_ENTITY_SMC_X86 63 /* Used for customized SMC calls */ ++#define SMC_SC_LK_TIMER SMC_STDCALL_NR(SMC_ENTITY_SMC_X86, 0) + #endif /* __LINUX_TRUSTY_SMCALL_H */ +-- +2.17.1 + diff --git a/patches/0056-vhm-prepare-future-update-for-struct-vm_set_memmap.acrn b/patches/0056-vhm-prepare-future-update-for-struct-vm_set_memmap.acrn new file mode 100644 index 0000000000..5b6d9d2372 --- /dev/null +++ b/patches/0056-vhm-prepare-future-update-for-struct-vm_set_memmap.acrn @@ -0,0 +1,60 @@ +From 3f787e1aa2bff477f2350da1987691030b461b1a Mon Sep 17 00:00:00 2001 +From: Jason Chen CJ +Date: Fri, 31 Aug 2018 10:59:01 +0800 +Subject: [PATCH 056/150] vhm: prepare future update for struct vm_set_memmap + +for back compatible, there is a uint32_t reserved field in struct +vm_set_memmap, which will be removed in the future - finally change +to struct set_memmap. + +this patch is preparing such change by change reserved field to prot +, prot field to prot_2, and updating both prot & prot_2 during +vm_set_memmap setting. + +Signed-off-by: Jason Chen CJ +--- + drivers/vhm/vhm_mm.c | 2 +- + include/linux/vhm/acrn_hv_defs.h | 7 ++++--- + 2 files changed, 5 insertions(+), 4 deletions(-) + +diff --git a/drivers/vhm/vhm_mm.c b/drivers/vhm/vhm_mm.c +index 75ccd3f09a4e..fb09ed2f994f 100644 +--- a/drivers/vhm/vhm_mm.c ++++ b/drivers/vhm/vhm_mm.c +@@ -167,7 +167,7 @@ int _mem_set_memmap(unsigned long vmid, unsigned long guest_gpa, + set_memmap.remote_gpa = guest_gpa; + set_memmap.vm0_gpa = host_gpa; + set_memmap.length = len; +- set_memmap.prot = ((mem_type & MEM_TYPE_MASK) | ++ set_memmap.prot = set_memmap.prot_2 = ((mem_type & MEM_TYPE_MASK) | + (mem_access_right & MEM_ACCESS_RIGHT_MASK)); + + /* hypercall to notify hv the guest EPT setting*/ +diff --git a/include/linux/vhm/acrn_hv_defs.h b/include/linux/vhm/acrn_hv_defs.h +index 161523dca1db..8873f67dac40 100644 +--- a/include/linux/vhm/acrn_hv_defs.h ++++ b/include/linux/vhm/acrn_hv_defs.h +@@ -135,7 +135,9 @@ struct vm_set_memmap { + #define MAP_MMIO 1 + #define MAP_UNMAP 2 + uint32_t type; +- uint32_t reserved; ++ ++ /* IN: mem attr */ ++ uint32_t prot; + + /* IN: beginning guest GPA to map */ + uint64_t remote_gpa; +@@ -146,8 +148,7 @@ struct vm_set_memmap { + /* IN: length of the range */ + uint64_t length; + +- /* IN: mem attr */ +- uint32_t prot; ++ uint32_t prot_2; + } __attribute__((aligned(8))); + + struct memory_map { +-- +2.17.1 + diff --git a/patches/0057-ASoC-doc-Add-probing-documentation.audio b/patches/0057-ASoC-doc-Add-probing-documentation.audio new file mode 100644 index 0000000000..ad1a94cfa0 --- /dev/null +++ b/patches/0057-ASoC-doc-Add-probing-documentation.audio @@ -0,0 +1,306 @@ +From 69bddcb6214457362737701de0e0fb7d4c8367f7 Mon Sep 17 00:00:00 2001 +From: Cezary Rojewski +Date: Thu, 14 Mar 2019 12:51:34 +0100 +Subject: [PATCH 057/193] ASoC: doc: Add probing documentation + +Provide description of Probe module, its interface and adaptation for +userspace via debugfs. + +Change-Id: I9797cac3d809063aa3d3f7329452377f4e2aed9b +Signed-off-by: Cezary Rojewski +--- + Documentation/sound/soc/index.rst | 2 + + Documentation/sound/soc/sst/index.rst | 8 + + Documentation/sound/soc/sst/probe.rst | 252 ++++++++++++++++++++++++++ + 3 files changed, 262 insertions(+) + create mode 100644 Documentation/sound/soc/sst/index.rst + create mode 100644 Documentation/sound/soc/sst/probe.rst + +diff --git a/Documentation/sound/soc/index.rst b/Documentation/sound/soc/index.rst +index e57df2dab2fd..fb4935d7a9a8 100644 +--- a/Documentation/sound/soc/index.rst ++++ b/Documentation/sound/soc/index.rst +@@ -7,6 +7,8 @@ The documentation is spilt into the following sections:- + .. toctree:: + :maxdepth: 2 + ++ sst/index ++ + overview + codec + dai +diff --git a/Documentation/sound/soc/sst/index.rst b/Documentation/sound/soc/sst/index.rst +new file mode 100644 +index 000000000000..50ebd97346cc +--- /dev/null ++++ b/Documentation/sound/soc/sst/index.rst +@@ -0,0 +1,8 @@ ++============================ ++Intel Smart Sound Technology ++============================ ++ ++.. toctree:: ++ :maxdepth: 2 ++ ++ probe +diff --git a/Documentation/sound/soc/sst/probe.rst b/Documentation/sound/soc/sst/probe.rst +new file mode 100644 +index 000000000000..8c7352c17a3c +--- /dev/null ++++ b/Documentation/sound/soc/sst/probe.rst +@@ -0,0 +1,252 @@ ++================================ ++Data probing using Probe module ++================================ ++ ++Pipeline is a living organism made of several pieces called modules, each ++contributing in overall processing of audio data. When encountering distortions, ++is it paramount to bisect pipeline in order to isolate segments which may or may ++not be responsible for said issues. This idea has been embodied in data probing ++and requires enlisting Probe module. ++ ++Probing allows for direct data extraction from or injection to target module, ++providing opportunity to verify if processing done by the module is correct. ++Note: parsing of extracted data is not part of this document and is considered ++Intel internal only. ++ ++Probe is a loadable, standalone module i.e. there is no parent pipeline ++assigned. By being assigned to no pipeline, it must be explicitly deleted by ++sending Delete Instance IPC request once module is no longer needed. No more ++than one Probe module may be initialized. Probe module by itself serves only as ++a mediator - dockyard for dispatching all probing related IPC request. ++ ++**Requirements** ++ ++* DEBUG_FS enabled ++* tinycompress. Please see tinycompress install and readme for its ++ installation and setup of crecord and cplay tools ++ ++ ++Instance initialization ++----------------------- ++ ++UUID: 7CAD0808-AB10-CD23-EF45-12AB34CD56EF ++ ++Set ppl_instance_id to INVALID_PIPELINE_ID (-1) in Init Instance IPC request. ++There is no dedicated execution context, thus core_id and proc_domain should be ++set to 0. Module uses no cycles and input/ output frame sizes are unused. ++ ++struct skl_probe_mod_cfg describes the module configuration. Apart from base ++configuration, it contains struct skl_probe_gtw_cfg field, which specifies ++node_id and dma_buffer_size for extraction gateway. ++Driver may choose to skip setting extraction gateway configuration by assigning ++INVALID_NODE_ID (-1) instead. However, extraction will not be supported. ++ ++ ++Connection purposes ++------------------- ++:: ++ ++ ************** ++ * * -----Out0-----> ++ ------In0-----> * * ++ * Module * -----Out1-----> ++ ------In1-----> * * ++ * * -----Out2-----> ++ ************** ^ each queue is a possible ++ probe connection point ++ ++Each module within pipeline can be described by its UUID, module_id and ++instance_id attributes. Modules expose a number of pins for in and out ++connections with other modules, effectively creating a path. ++``InX(s)`` and ``OutY(s)`` denote pins (or queues as they are also called). ++ ++SKL_CONNECTION_PURPOSE_EXTRACT ++ Extract data from module, given the module_id, instance_id and queue. ++ ++SKL_CONNECTION_PURPOSE_INJECT ++ Inject data to module, given the module_id, instance_id and queue. ++ ++SKL_CONNECTION_PURPOSE_INJECT_REEXTRACT ++ Sometimes we want to do both - extract from and inject data to the exact ++ same queue for given module. This is not possible with previous two, thus ++ INJECT_REEXTRACT has been designed as a solution to this limitation. It ++ combines the two, starting with inject operation which is then followed by ++ data extraction. ++ ++ ++Limitations ++----------- ++ ++There can be at most one extraction stream associated with Probe module, which ++is done during its initialization and cannot be modified during the entire ++lifetime of module. ++ ++Maximum number of probe points connected for extraction may vary between ++platforms and firmware versions but should be no less than 10. In case of ++injectors, number of active probe points is limited by count of available host ++output streams. Currently, for most Intel platforms this number equals 9. ++Maximum of one probe points connected per one injector stream. ++ ++ ++IPC interface ++------------- ++ ++Implementation offers six IPC requests: ++ ++LARGE_CONFIG_GET ++ ++- INJECTION_PROBE_DMA ++ Retrieve list of host output DMAs associated with Probe module ++- PROBE_POINTS ++ Obtain the list of currently active probe points ++ ++LARGE_CONFIG_SET ++ ++- INJECTION_PROBE_DMA ++ Associate host output DMA with Probe module for data injection ++- INJECTION_PROBE_DMA_DETACH ++ Detach one or more host DMAs from Probe module ++- PROBE_POINTS ++ Create one or more probe points ++- PROBE_POINTS_DISCONNECT ++ Disconnect one or more probe points ++ ++ ++Compress adaptation ++------------------- ++ ++open: ++ ++1. Assign required resources, that is streams, given the compress direction for ++ later use ++ ++We do not want to occupy any other resources at this point until probing is ++confirmed by set_params. No Probe module is initialized because no probe points ++can be connected. ++ ++free: ++ ++1. Retrieve and disconnect all currently connected probe points for given DMA ++2. If stream is of PLAYBACK direction, also retrieve and detach host output DMA ++ assigned to this stream ++3. Cleanup and free stream resources ++4. If stream direction is CAPTURE (max one) ensure extractor is invalided before ++ leaving ++5. If no probing streams are left, send Delete Instance IPC request ++ ++set_params: ++ ++1. Allocate required stream resources ++2. Calculate and set stream format ++3. If it is the very first probing stream, send Init Instance IPC request for ++ Probe module ++4. If stream direction equals PLAYBACK, associate this stream DMA with Probe ++ module ++ ++For the rest, source is pretty self explanatory. ++ ++Key thing to note is the probe operation ordering, which goes as follows: ++ ++1. Init Probe module ++2. Attach host DMA to Probe module if direction of type PLAYBACK ++3. Connect probe points ++4. Disconnect probe points ++5. Detach host DMA from Probe module if direction of type PLAYBACK ++6. Delete probe module ++ ++One cannot proceed with connection of injection probe points until given ++stream's DMA is associated with Probe module. Consequently, before detaching ++injector DMA, all probe points for that stream should be disconnected. ++ ++ ++User space ++---------- ++ ++Skylake driver exposes three debugfs entries designed to support four out of six ++available IPC requests. These are: ++ ++General input format: ++ u32,u32,u32,(...) ++ ++probe_injection_dma: ++ ++* dump list of assigned host output DMAs for injection ++ ++:: ++ ++ cat probe_injection_dma ++ ++probe_points: ++ ++* connect new probe points ++* dump currently connected probe points ++ ++:: ++ ++ echo 0x10000004,0,0 > probe_points ++ cat probe_points ++ ++struct skl_probe_point_desc consists of 3 u32 fields, having size of 12 bytes ++total. To connect probe point using debugfs, simply write to probe_points ++sequence of 3 u32s separated with ','. You can, however, enter numerous trios ++causing several probe points connection in the process. ++ ++probe_points_disconnect: ++ ++* disconnect existing probe points ++ ++:: ++ ++ echo 0x10000004 > probe_points_disconnect ++ ++INJECTION_PROBE_DMA (SET) and INJECTION_PROBE_DMA_DETACH (SET) are unsupported ++as they are directly tied to strict initialization process of injection probe ++where host stream becomes paired with said probe. Under no circumstances can ++user interfere with these settings. ++ ++Using debugfs alone will not yield the expected result. It only allows to ++configure the probes yet no processing is executed. In order to actually start ++extracting or injecting data, compress stream must be started. Skylake provides ++separate compress entries for extraction (CAPTURE) and injection (PLAYBACK). ++ ++Despite having no purpose without a separate PCM stream to validate modules for, ++probe compress implementation allows for opening, running and disposing of ++compress streams freely. Direction of stream of interest should be opposite to ++probing direction, that is, data extraction (CAPTURE) targets running playback ++stream and vice versa. ++ ++The most common use case scenario is data extraction: ++ ++1. Start the crecord to initialize and prepare for extraction ++2. Start playback PCM stream using aplay tool ++3. Pause PCM stream ++4. Use probe debugfs ``probe_points`` entry found in ``/dsp/ipc/`` to connect ++ probe points to target module within pipeline ++5. Unpause PCM stream ++6. Once finished, simply close aplay ++7. Close crecord last, so no data from PCM stream is lost ++8. Parse data from output file ++ ++Things get more complicated in case of INJECT_REEXTRACT purpose - we need the ++power of cplay and crecord combined: ++ ++1. Start the crecord to initialize and prepare for extraction ++2. Start cplay to associate host output DMA with Probe module ++3. Start capture PCM stream using arecord tool ++4. Pause PCM capture stream ++5. Start playback PCM stream using aplay tool ++6. Pause PCM playback stream ++7. Use probe debugfs ``probe_points`` entry found in ``/dsp/ipc/`` to connect ++ probe points to target module when connecting, specify same queue for ++ extraction and injection ++8. Unpause playback PCM stream ++9. Unpause capture PCM stream ++10. Once finished, simply close aplay and then arecord ++11. Close cplay and then crecord, ensuring no data from PCM streams is lost ++12. Parse data from output file ++ ++Note: DMA attach, detach, as well as probe points connections and disconnection ++can be done either in bulk or one-by-one. Firmware offers no fallback mechanism ++in failure scenario, thus if it happens to be, modify your test to sent requests ++using one-by-one method rather than bulk. It will be easier to navigate which ++probe point exactly is involved in the failure. +-- +2.17.1 + diff --git a/patches/0057-VHM-bug-fix-on-operating-multi-thread-synchronization.acrn b/patches/0057-VHM-bug-fix-on-operating-multi-thread-synchronization.acrn new file mode 100644 index 0000000000..157cba3ba7 --- /dev/null +++ b/patches/0057-VHM-bug-fix-on-operating-multi-thread-synchronization.acrn @@ -0,0 +1,89 @@ +From b48becaa982c7dba3d16ea7fe0ad755d3c52c86c Mon Sep 17 00:00:00 2001 +From: "Zheng, Gen" +Date: Fri, 31 Aug 2018 10:59:01 +0800 +Subject: [PATCH 057/150] VHM: bug fix on operating multi-thread + synchronization + +With current code, the ioreq client based on VHM kthread may access +client->wq after the client got freed. +The acrn_ioreq_destroy_client_pervm should wait for the client thread +exit then free its client. + +So do the following fixes: +Make the client threads for vcpu and hyper-dma mark kthread_exit +flag as true before exit. +Make the task that triggered to destroy the client thread, explicitly +waits for the kthread_exit flag turnning to true. + +Signed-off-by: Zheng, Gen +Reviewed-by: Chen, Jason CJ +Reviewed-by: Zhao, Yakui +--- + drivers/vhm/vhm_ioreq.c | 26 +++++++++++++++++--------- + 1 file changed, 17 insertions(+), 9 deletions(-) + +diff --git a/drivers/vhm/vhm_ioreq.c b/drivers/vhm/vhm_ioreq.c +index 08826c575780..b570b826be95 100644 +--- a/drivers/vhm/vhm_ioreq.c ++++ b/drivers/vhm/vhm_ioreq.c +@@ -91,8 +91,8 @@ struct ioreq_client { + */ + bool fallback; + +- bool destroying; +- bool kthread_exit; ++ volatile bool destroying; ++ volatile bool kthread_exit; + + /* client covered io ranges - N/A for fallback client */ + struct list_head range_list; +@@ -260,15 +260,15 @@ static void acrn_ioreq_destroy_client_pervm(struct ioreq_client *client, + struct list_head *pos, *tmp; + unsigned long flags; + +- /* blocking operation: notify client for cleanup +- * if waitqueue not active, it means client is handling request, +- * at that time, we need wait client finish its handling. +- */ +- while (!waitqueue_active(&client->wq) && !client->kthread_exit) +- msleep(10); + client->destroying = true; + acrn_ioreq_notify_client(client); + ++ /* the client thread will mark kthread_exit flag as true before exit, ++ * so wait for it exited. ++ */ ++ while (!client->kthread_exit) ++ msleep(10); ++ + spin_lock_irqsave(&client->range_lock, flags); + list_for_each_safe(pos, tmp, &client->range_list) { + struct ioreq_range *range = +@@ -495,6 +495,10 @@ static int ioreq_client_thread(void *data) + is_destroying(client))); + } + ++ /* the client thread such as for hyper-dma will exit from here, ++ * so mark kthread_exit as true before exit */ ++ client->kthread_exit = true; ++ + return 0; + } + +@@ -543,8 +547,12 @@ int acrn_ioreq_attach_client(int client_id, bool check_kthread_stop) + is_destroying(client))); + } + +- if (is_destroying(client)) ++ if (is_destroying(client)) { ++ /* the client thread for vcpu will exit from here, ++ * so mark kthread_exit as true before exit */ ++ client->kthread_exit = true; + return 1; ++ } + } + + return 0; +-- +2.17.1 + diff --git a/patches/0057-drm-i915-selftests-Remove-accidental-serialization-bet.drm b/patches/0057-drm-i915-selftests-Remove-accidental-serialization-bet.drm new file mode 100644 index 0000000000..4967015642 --- /dev/null +++ b/patches/0057-drm-i915-selftests-Remove-accidental-serialization-bet.drm @@ -0,0 +1,212 @@ +From 3997b0d6488bcea621fda001570a06eedfdcc5c1 Mon Sep 17 00:00:00 2001 +From: Chris Wilson +Date: Tue, 27 Aug 2019 17:17:25 +0100 +Subject: [PATCH 057/690] drm/i915/selftests: Remove accidental serialization + between gpu_fill + +Upon object creation for live_gem_contexts, we fill the object with +known scratch and flush it out of the CPU cache. Before performing the +GPU fill, we don't need to flush it again and so avoid serialising with +previous fills. + +However, we do need some throttling on the internal interfaces if we do +not want to run out of memory! + +Signed-off-by: Chris Wilson +Reviewed-by: Matthew Auld +Link: https://patchwork.freedesktop.org/patch/msgid/20190827161726.3640-1-chris@chris-wilson.co.uk +--- + .../drm/i915/gem/selftests/i915_gem_context.c | 83 ++++++++++++++++--- + 1 file changed, 72 insertions(+), 11 deletions(-) + +diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +index 37a177e37665..63116c4fa8ba 100644 +--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c ++++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +@@ -180,12 +180,6 @@ static int gpu_fill(struct intel_context *ce, + if (IS_ERR(vma)) + return PTR_ERR(vma); + +- i915_gem_object_lock(obj); +- err = i915_gem_object_set_to_gtt_domain(obj, true); +- i915_gem_object_unlock(obj); +- if (err) +- return err; +- + err = i915_vma_pin(vma, 0, 0, PIN_HIGH | PIN_USER); + if (err) + return err; +@@ -343,6 +337,45 @@ static unsigned long max_dwords(struct drm_i915_gem_object *obj) + return npages / DW_PER_PAGE; + } + ++static void throttle_release(struct i915_request **q, int count) ++{ ++ int i; ++ ++ for (i = 0; i < count; i++) { ++ if (IS_ERR_OR_NULL(q[i])) ++ continue; ++ ++ i915_request_put(fetch_and_zero(&q[i])); ++ } ++} ++ ++static int throttle(struct intel_context *ce, ++ struct i915_request **q, int count) ++{ ++ int i; ++ ++ if (!IS_ERR_OR_NULL(q[0])) { ++ if (i915_request_wait(q[0], ++ I915_WAIT_INTERRUPTIBLE, ++ MAX_SCHEDULE_TIMEOUT) < 0) ++ return -EINTR; ++ ++ i915_request_put(q[0]); ++ } ++ ++ for (i = 0; i < count - 1; i++) ++ q[i] = q[i + 1]; ++ ++ q[i] = intel_context_create_request(ce); ++ if (IS_ERR(q[i])) ++ return PTR_ERR(q[i]); ++ ++ i915_request_get(q[i]); ++ i915_request_add(q[i]); ++ ++ return 0; ++} ++ + static int igt_ctx_exec(void *arg) + { + struct drm_i915_private *i915 = arg; +@@ -362,6 +395,7 @@ static int igt_ctx_exec(void *arg) + for_each_engine(engine, i915, id) { + struct drm_i915_gem_object *obj = NULL; + unsigned long ncontexts, ndwords, dw; ++ struct i915_request *tq[5] = {}; + struct igt_live_test t; + struct drm_file *file; + IGT_TIMEOUT(end_time); +@@ -409,13 +443,18 @@ static int igt_ctx_exec(void *arg) + } + + err = gpu_fill(ce, obj, dw); +- intel_context_put(ce); +- + if (err) { + pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", + ndwords, dw, max_dwords(obj), + engine->name, ctx->hw_id, + yesno(!!ctx->vm), err); ++ intel_context_put(ce); ++ goto out_unlock; ++ } ++ ++ err = throttle(ce, tq, ARRAY_SIZE(tq)); ++ if (err) { ++ intel_context_put(ce); + goto out_unlock; + } + +@@ -426,6 +465,8 @@ static int igt_ctx_exec(void *arg) + + ndwords++; + ncontexts++; ++ ++ intel_context_put(ce); + } + + pr_info("Submitted %lu contexts to %s, filling %lu dwords\n", +@@ -444,6 +485,7 @@ static int igt_ctx_exec(void *arg) + } + + out_unlock: ++ throttle_release(tq, ARRAY_SIZE(tq)); + if (igt_live_test_end(&t)) + err = -EIO; + mutex_unlock(&i915->drm.struct_mutex); +@@ -461,6 +503,7 @@ static int igt_ctx_exec(void *arg) + static int igt_shared_ctx_exec(void *arg) + { + struct drm_i915_private *i915 = arg; ++ struct i915_request *tq[5] = {}; + struct i915_gem_context *parent; + struct intel_engine_cs *engine; + enum intel_engine_id id; +@@ -535,14 +578,20 @@ static int igt_shared_ctx_exec(void *arg) + } + + err = gpu_fill(ce, obj, dw); +- intel_context_put(ce); +- kernel_context_close(ctx); +- + if (err) { + pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", + ndwords, dw, max_dwords(obj), + engine->name, ctx->hw_id, + yesno(!!ctx->vm), err); ++ intel_context_put(ce); ++ kernel_context_close(ctx); ++ goto out_test; ++ } ++ ++ err = throttle(ce, tq, ARRAY_SIZE(tq)); ++ if (err) { ++ intel_context_put(ce); ++ kernel_context_close(ctx); + goto out_test; + } + +@@ -553,6 +602,9 @@ static int igt_shared_ctx_exec(void *arg) + + ndwords++; + ncontexts++; ++ ++ intel_context_put(ce); ++ kernel_context_close(ctx); + } + pr_info("Submitted %lu contexts to %s, filling %lu dwords\n", + ncontexts, engine->name, ndwords); +@@ -574,6 +626,7 @@ static int igt_shared_ctx_exec(void *arg) + mutex_lock(&i915->drm.struct_mutex); + } + out_test: ++ throttle_release(tq, ARRAY_SIZE(tq)); + if (igt_live_test_end(&t)) + err = -EIO; + out_unlock: +@@ -1050,6 +1103,7 @@ static int igt_ctx_readonly(void *arg) + { + struct drm_i915_private *i915 = arg; + struct drm_i915_gem_object *obj = NULL; ++ struct i915_request *tq[5] = {}; + struct i915_address_space *vm; + struct i915_gem_context *ctx; + unsigned long idx, ndwords, dw; +@@ -1121,6 +1175,12 @@ static int igt_ctx_readonly(void *arg) + goto out_unlock; + } + ++ err = throttle(ce, tq, ARRAY_SIZE(tq)); ++ if (err) { ++ i915_gem_context_unlock_engines(ctx); ++ goto out_unlock; ++ } ++ + if (++dw == max_dwords(obj)) { + obj = NULL; + dw = 0; +@@ -1151,6 +1211,7 @@ static int igt_ctx_readonly(void *arg) + } + + out_unlock: ++ throttle_release(tq, ARRAY_SIZE(tq)); + if (igt_live_test_end(&t)) + err = -EIO; + mutex_unlock(&i915->drm.struct_mutex); +-- +2.17.1 + diff --git a/patches/0057-mei-dal-dynamic-application-loader.security b/patches/0057-mei-dal-dynamic-application-loader.security new file mode 100644 index 0000000000..05588f3944 --- /dev/null +++ b/patches/0057-mei-dal-dynamic-application-loader.security @@ -0,0 +1,951 @@ +From c128b2b660de63ec6db675c7e5918178beb13897 Mon Sep 17 00:00:00 2001 +From: Yael Samet +Date: Tue, 5 Sep 2017 12:21:49 +0300 +Subject: [PATCH 57/65] mei: dal: dynamic application loader + +DAL stands for Dynamic Application Loader, it provides the ability +to run Java applets in a secured environment inside of Intel ME security +engine (ME). The Java applets are also named as trusted applications TAs. + +The DAL driver exposes API for both user-space and kernel-space clients. +Both clients can download a trusted application/applet to the +DAL FW and communicate with it. + +This patch adds the core of the DAL driver, the lowest level +of communication with DAL firmware. + +Change-Id: I0d6d9af6039e888c8575c2f21dc37afbadc676da +Signed-off-by: Yael Samet +Signed-off-by: Tomas Winkler +--- + drivers/misc/mei/dal/dal_class.c | 730 ++++++++++++++++++++++++++++++- + drivers/misc/mei/dal/dal_dev.h | 149 +++++++ + 2 files changed, 878 insertions(+), 1 deletion(-) + create mode 100644 drivers/misc/mei/dal/dal_dev.h + +diff --git a/drivers/misc/mei/dal/dal_class.c b/drivers/misc/mei/dal/dal_class.c +index 6990132dc5e2..1c8e429ef0c6 100644 +--- a/drivers/misc/mei/dal/dal_class.c ++++ b/drivers/misc/mei/dal/dal_class.c +@@ -8,6 +8,21 @@ + #include + #include + #include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include "bh_external.h" ++#include "bh_cmd_defs.h" ++#include "bh_errcode.h" ++#include "dal_dev.h" + + /* + * this class contains the 3 mei_cl_device, ivm, sdm, rtm. +@@ -16,13 +31,714 @@ + * + * this class must be initialized before the kernel space kdi uses it. + */ +-static struct class *dal_class; ++struct class *dal_class; ++ ++/** ++ * dal_dc_print - print client data for debug purpose ++ * ++ * @dev: device structure ++ * @dc: dal client ++ */ ++void dal_dc_print(struct device *dev, struct dal_client *dc) ++{ ++ if (!dc) { ++ dev_dbg(dev, "dc is null\n"); ++ return; ++ } ++ ++ dev_dbg(dev, "dc: intf = %d. expected to send: %d, sent: %d. expected to receive: %d, received: %d\n", ++ dc->intf, ++ dc->expected_msg_size_to_fw, ++ dc->bytes_sent_to_fw, ++ dc->expected_msg_size_from_fw, ++ dc->bytes_rcvd_from_fw); ++} ++ ++/** ++ * dal_dc_update_read_state - update client read state ++ * ++ * @dc : dal client ++ * @len: received message length ++ * ++ * Locking: called under "ddev->context_lock" lock ++ */ ++static void dal_dc_update_read_state(struct dal_client *dc, ssize_t len) ++{ ++ struct dal_device *ddev = dc->ddev; ++ ++ /* check BH msg magic, if it exists this is the header */ ++ if (bh_msg_is_response(ddev->bh_fw_msg.msg, len)) { ++ struct bh_response_header *hdr = ++ (struct bh_response_header *)dc->ddev->bh_fw_msg.msg; ++ ++ dc->expected_msg_size_from_fw = hdr->h.length; ++ dev_dbg(&ddev->dev, "expected_msg_size_from_fw = %d bytes read = %zd\n", ++ dc->expected_msg_size_from_fw, len); ++ ++ /* clear data from the past. */ ++ dc->bytes_rcvd_from_fw = 0; ++ } ++ ++ /* update number of bytes rcvd */ ++ dc->bytes_rcvd_from_fw += len; ++} ++ ++/** ++ * dal_get_client_by_squence_number - find the client interface which ++ * the received message is sent to ++ * ++ * @ddev : dal device ++ * ++ * Return: kernel space interface or user space interface ++ */ ++static enum dal_intf dal_get_client_by_squence_number(struct dal_device *ddev) ++{ ++ struct bh_response_header *head; ++ ++ if (!ddev->clients[DAL_INTF_KDI]) ++ return DAL_INTF_CDEV; ++ ++ head = (struct bh_response_header *)ddev->bh_fw_msg.msg; ++ ++ dev_dbg(&ddev->dev, "msg seq = %llu\n", head->seq); ++ ++ if (head->seq == ddev->clients[DAL_INTF_KDI]->seq) ++ return DAL_INTF_KDI; ++ ++ return DAL_INTF_CDEV; ++} ++ ++/** ++ * dal_recv_cb - callback to receive message from DAL FW over mei ++ * ++ * @cldev : mei client device ++ */ ++static void dal_recv_cb(struct mei_cl_device *cldev) ++{ ++ struct dal_device *ddev; ++ struct dal_client *dc; ++ enum dal_intf intf; ++ ssize_t len; ++ size_t ret; ++ bool is_unexpected_msg = false; ++ ++ ddev = mei_cldev_get_drvdata(cldev); ++ ++ /* ++ * read the msg from MEI ++ */ ++ len = mei_cldev_recv(cldev, ddev->bh_fw_msg.msg, DAL_MAX_BUFFER_SIZE); ++ if (len < 0) { ++ dev_err(&cldev->dev, "recv failed %zd\n", len); ++ return; ++ } ++ ++ /* ++ * lock to prevent read from MEI while writing to MEI and to ++ * deal with just one msg at the same time ++ */ ++ mutex_lock(&ddev->context_lock); ++ ++ /* save msg len */ ++ ddev->bh_fw_msg.len = len; ++ ++ /* set to which interface the msg should be sent */ ++ if (bh_msg_is_response(ddev->bh_fw_msg.msg, len)) { ++ intf = dal_get_client_by_squence_number(ddev); ++ dev_dbg(&ddev->dev, "recv_cb(): Client set by sequence number\n"); ++ dc = ddev->clients[intf]; ++ } else if (!ddev->current_read_client) { ++ intf = DAL_INTF_CDEV; ++ dev_dbg(&ddev->dev, "recv_cb(): EXTRA msg received - curr == NULL\n"); ++ dc = ddev->clients[intf]; ++ is_unexpected_msg = true; ++ } else { ++ dc = ddev->current_read_client; ++ dev_dbg(&ddev->dev, "recv_cb(): FRAGMENT msg received - curr != NULL\n"); ++ } ++ ++ /* save the current read client */ ++ ddev->current_read_client = dc; ++ /* In case of a client is not connected, dc might be NULL */ ++ if (!dc) ++ goto out; ++ ++ dev_dbg(&cldev->dev, "read client type %d data from mei client seq = %llu\n", ++ dc->intf, dc->seq); ++ ++ /* ++ * save new msg in queue, ++ * if the queue is full all new messages will be thrown ++ */ ++ ret = kfifo_in(&dc->read_queue, &ddev->bh_fw_msg.len, sizeof(len)); ++ ret += kfifo_in(&dc->read_queue, ddev->bh_fw_msg.msg, len); ++ if (ret < len + sizeof(len)) ++ dev_dbg(&ddev->dev, "queue is full - MSG THROWN\n"); ++ ++ dal_dc_update_read_state(dc, len); ++ ++ /* ++ * To clear current client we check if the whole msg received ++ * for the current client ++ */ ++ if (is_unexpected_msg || ++ dc->bytes_rcvd_from_fw == dc->expected_msg_size_from_fw) { ++ dev_dbg(&ddev->dev, "recv_cb(): setting CURRENT_READER to NULL\n"); ++ ddev->current_read_client = NULL; ++ } ++out: ++ /* wake up all clients waiting for read or write */ ++ if (wq_has_sleeper(&ddev->wq)) ++ wake_up_interruptible(&ddev->wq); ++ ++ mutex_unlock(&ddev->context_lock); ++} ++ ++/** ++ * dal_mei_enable - enable mei cldev ++ * ++ * @ddev: dal device ++ * ++ * Return: 0 on success ++ * <0 on failure ++ */ ++static int dal_mei_enable(struct dal_device *ddev) ++{ ++ int ret; ++ ++ ret = mei_cldev_enable(ddev->cldev); ++ if (ret < 0) { ++ dev_err(&ddev->cldev->dev, "mei_cldev_enable_device() failed with ret = %d\n", ++ ret); ++ return ret; ++ } ++ ++ /* register to mei bus callbacks */ ++ ret = mei_cldev_register_rx_cb(ddev->cldev, dal_recv_cb); ++ if (ret) { ++ dev_err(&ddev->cldev->dev, "mei_cldev_register_event_cb() failed ret = %d\n", ++ ret); ++ goto err; ++ } ++ ++ /* save pointer to the context in the device */ ++ mei_cldev_set_drvdata(ddev->cldev, ddev); ++ ++ return 0; ++err: ++ mei_cldev_disable(ddev->cldev); ++ return ret; ++} ++ ++/** ++ * dal_wait_for_write - wait until the dal client is the first writer ++ * in writers queue ++ * ++ * @ddev: dal device ++ * @dc: dal client ++ * ++ * Return: 0 on success ++ * -ERESTARTSYS when wait was interrupted ++ * -ENODEV when the device was removed ++ */ ++static int dal_wait_for_write(struct dal_device *ddev, struct dal_client *dc) ++{ ++ if (wait_event_interruptible(ddev->wq, ++ list_first_entry(&ddev->writers, ++ struct dal_client, ++ wrlink) == dc || ++ ddev->is_device_removed)) { ++ return -ERESTARTSYS; ++ } ++ ++ /* if the device was removed indicate that to the caller */ ++ if (ddev->is_device_removed) ++ return -ENODEV; ++ ++ return 0; ++} ++ ++/** ++ * dal_send_error_access_denied - put 'access denied' message ++ * into the client read queue. In-band error message. ++ * ++ * @dc: dal client ++ * @cmd: rejected message header ++ * ++ * Return: 0 on success ++ * -ENOMEM when client read queue is full ++ * ++ * Locking: called under "ddev->write_lock" lock ++ */ ++static int dal_send_error_access_denied(struct dal_client *dc, const void *cmd) ++{ ++ struct dal_device *ddev = dc->ddev; ++ struct bh_response_header res; ++ size_t len; ++ int ret; ++ ++ mutex_lock(&ddev->context_lock); ++ ++ bh_prep_access_denied_response(cmd, &res); ++ len = sizeof(res); ++ ++ if (kfifo_in(&dc->read_queue, &len, sizeof(len)) != sizeof(len)) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ if (kfifo_in(&dc->read_queue, &res, len) != len) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ ret = 0; ++ ++out: ++ mutex_unlock(&ddev->context_lock); ++ return ret; ++} ++ ++/** ++ * dal_is_kdi_msg - check if sequence is in kernel space sequence range ++ * ++ * Each interface (kernel space and user space) has different range of ++ * sequence number. This function checks if given number is in kernel space ++ * sequence range ++ * ++ * @hdr: command header ++ * ++ * Return: true when seq fits kernel space intf ++ * false when seq fits user space intf ++ */ ++static bool dal_is_kdi_msg(const struct bh_command_header *hdr) ++{ ++ return hdr->seq >= MSG_SEQ_START_NUMBER; ++} ++ ++/** ++ * dal_validate_seq - validate that message sequence fits client interface, ++ * prevent user space client to use kernel space sequence ++ * ++ * @hdr: command header ++ * @count: message size ++ * @ctx: context - dal client ++ * ++ * Return: 0 when sequence match ++ * -EPERM when user space client uses kernel space sequence ++ * ++ * Locking: called under "ddev->write_lock" lock ++ */ ++static int dal_validate_seq(const struct bh_command_header *hdr, ++ size_t count, void *ctx) ++{ ++ struct dal_client *dc = ctx; ++ ++ if (dc->intf != DAL_INTF_KDI && dal_is_kdi_msg(hdr)) ++ return -EPERM; ++ ++ return 0; ++} ++ ++/* ++ * dal_write_filter_tbl - filter functions to validate that the message ++ * is being sent is valid, and the user client ++ * has the permissions to send it ++ */ ++static const bh_filter_func dal_write_filter_tbl[] = { ++ dal_validate_seq, ++ NULL, ++}; ++ ++/** ++ * dal_write - write message to DAL FW over mei ++ * ++ * @dc: dal client ++ * @buf: the message. ++ * @count: message size ++ * @seq: message sequence (if client is kernel space client) ++ * ++ * Return: >=0 data length on success ++ * <0 on failure ++ */ ++ssize_t dal_write(struct dal_client *dc, const void *buf, size_t count, u64 seq) ++{ ++ struct dal_device *ddev = dc->ddev; ++ struct device *dev; ++ ssize_t wr; ++ ssize_t ret; ++ enum dal_intf intf = dc->intf; ++ ++ dev = &ddev->dev; ++ ++ dev_dbg(dev, "client interface %d\n", intf); ++ dal_dc_print(dev, dc); ++ ++ /* lock for adding new client that want to write to fifo */ ++ mutex_lock(&ddev->write_lock); ++ /* update client on latest msg seq number*/ ++ dc->seq = seq; ++ dev_dbg(dev, "current_write_client seq = %llu\n", dc->seq); ++ ++ /* put dc in the writers queue if not already set */ ++ if (list_first_entry_or_null(&ddev->writers, ++ struct dal_client, wrlink) != dc) { ++ /* adding client to write queue - this is the first fragment */ ++ const struct bh_command_header *hdr; ++ ++ hdr = bh_msg_cmd_hdr(buf, count); ++ if (!hdr) { ++ dev_dbg(dev, "expected cmd hdr at first fragment\n"); ++ ret = -EINVAL; ++ goto out; ++ } ++ ret = bh_filter_hdr(hdr, count, dc, dal_write_filter_tbl); ++ if (ret == -EPERM) { ++ ret = dal_send_error_access_denied(dc, buf); ++ ret = ret ? ret : count; ++ } ++ if (ret) ++ goto out; ++ ++ dc->bytes_sent_to_fw = 0; ++ dc->expected_msg_size_to_fw = hdr->h.length; ++ ++ list_add_tail(&dc->wrlink, &ddev->writers); ++ } ++ ++ /* wait for current writer to finish his write session */ ++ mutex_unlock(&ddev->write_lock); ++ ret = dal_wait_for_write(ddev, dc); ++ mutex_lock(&ddev->write_lock); ++ if (ret < 0) ++ goto out; ++ ++ dev_dbg(dev, "before mei_cldev_send - client type %d\n", intf); ++ ++ /* send msg via MEI */ ++ wr = mei_cldev_send(ddev->cldev, (void *)buf, count); ++ if (wr != count) { ++ /* ENODEV can be issued upon internal reset */ ++ if (wr != -ENODEV) { ++ dev_err(dev, "mei_cl_send() failed, write_bytes != count (%zd != %zu)\n", ++ wr, count); ++ ret = -EFAULT; ++ goto out; ++ } ++ /* if DAL FW client is disconnected, try to reconnect */ ++ dev_dbg(dev, "try to reconnect to DAL FW cl\n"); ++ ret = mei_cldev_disable(ddev->cldev); ++ if (ret < 0) { ++ dev_err(&ddev->cldev->dev, "failed to disable mei cl [%zd]\n", ++ ret); ++ goto out; ++ } ++ ret = dal_mei_enable(ddev); ++ if (ret < 0) ++ dev_err(&ddev->cldev->dev, "failed to reconnect to DAL FW client [%zd]\n", ++ ret); ++ else ++ ret = -EAGAIN; ++ ++ goto out; ++ } ++ ++ dev_dbg(dev, "wrote %zu bytes to fw - client type %d\n", wr, intf); ++ ++ /* update client byte sent */ ++ dc->bytes_sent_to_fw += count; ++ ret = wr; ++ ++ if (dc->bytes_sent_to_fw != dc->expected_msg_size_to_fw) { ++ dev_dbg(dev, "expecting to write more data to DAL FW - client type %d\n", ++ intf); ++ goto write_more; ++ } ++out: ++ /* remove current dc from the queue */ ++ list_del_init(&dc->wrlink); ++ if (list_empty(&ddev->writers)) ++ wake_up_interruptible(&ddev->wq); ++ ++write_more: ++ mutex_unlock(&ddev->write_lock); ++ return ret; ++} ++ ++/** ++ * dal_wait_for_read - wait until the client (dc) will have data ++ * in his read queue ++ * ++ * @dc: dal client ++ * ++ * Return: 0 on success ++ * -ENODEV when the device was removed ++ * -ERESTARTSYS: when interrupted. ++ */ ++int dal_wait_for_read(struct dal_client *dc) ++{ ++ struct dal_device *ddev = dc->ddev; ++ struct device *dev = &ddev->dev; ++ int ret; ++ ++ dal_dc_print(dev, dc); ++ ++ dev_dbg(dev, "%s - client type %d kfifo status %d\n", __func__, ++ dc->intf, kfifo_is_empty(&dc->read_queue)); ++ ++ /* wait until there is data in the read_queue */ ++ ret = wait_event_interruptible(ddev->wq, ++ !kfifo_is_empty(&dc->read_queue) || ++ ddev->is_device_removed); ++ ++ dev_dbg(dev, "%s - client type %d status %d\n", __func__, ++ dc->intf, ret); ++ ++ /* FIXME: use reference counter */ ++ if (ddev->is_device_removed) { ++ dev_dbg(dev, "woke up, device was removed\n"); ++ return -ENODEV; ++ } ++ ++ return ret; ++} ++ ++/** ++ * dal_dc_destroy - destroy dal client ++ * ++ * @ddev: dal device ++ * @intf: device interface ++ * ++ * Locking: called under "ddev->context_lock" lock ++ */ ++void dal_dc_destroy(struct dal_device *ddev, enum dal_intf intf) ++{ ++ struct dal_client *dc; ++ ++ dc = ddev->clients[intf]; ++ if (!dc) ++ return; ++ ++ kfifo_free(&dc->read_queue); ++ kfree(dc); ++ ddev->clients[intf] = NULL; ++} ++ ++/** ++ * dal_dc_setup - initialize dal client ++ * ++ * @ddev: dal device ++ * @intf: device interface ++ * ++ * Return: 0 on success ++ * -EINVAL when client is already initialized ++ * -ENOMEM on memory allocation failure ++ */ ++int dal_dc_setup(struct dal_device *ddev, enum dal_intf intf) ++{ ++ int ret; ++ struct dal_client *dc; ++ size_t readq_sz; ++ ++ if (ddev->clients[intf]) { ++ dev_err(&ddev->dev, "client already set\n"); ++ return -EINVAL; ++ } ++ ++ dc = kzalloc(sizeof(*dc), GFP_KERNEL); ++ if (!dc) ++ return -ENOMEM; ++ ++ /* each buffer contains data and length */ ++ readq_sz = (DAL_MAX_BUFFER_SIZE + sizeof(ddev->bh_fw_msg.len)) * ++ DAL_BUFFERS_PER_CLIENT; ++ ret = kfifo_alloc(&dc->read_queue, readq_sz, GFP_KERNEL); ++ if (ret) { ++ kfree(dc); ++ return ret; ++ } ++ ++ dc->intf = intf; ++ dc->ddev = ddev; ++ INIT_LIST_HEAD(&dc->wrlink); ++ ddev->clients[intf] = dc; ++ return 0; ++} ++ ++/** ++ * dal_dev_match - match function to find dal device ++ * ++ * Used to get dal device from dal_class by device id ++ * ++ * @dev: device structure ++ * @data: the device id ++ * ++ * Return: 1 on match ++ * 0 on mismatch ++ */ ++static int dal_dev_match(struct device *dev, const void *data) ++{ ++ struct dal_device *ddev; ++ const enum dal_dev_type *device_id = ++ (enum dal_dev_type *)data; ++ ++ ddev = container_of(dev, struct dal_device, dev); ++ ++ return ddev->device_id == *device_id; ++} ++ ++/** ++ * dal_find_dev - get dal device from dal_class by device id ++ * ++ * @device_id: device id ++ * ++ * Return: pointer to the requested device ++ * NULL if the device wasn't found ++ */ ++struct device *dal_find_dev(enum dal_dev_type device_id) ++{ ++ return class_find_device(dal_class, NULL, &device_id, dal_dev_match); ++} ++ ++/** ++ * dal_remove - dal remove callback in mei_cl_driver ++ * ++ * @cldev: mei client device ++ * ++ * Return: 0 ++ */ ++static int dal_remove(struct mei_cl_device *cldev) ++{ ++ struct dal_device *ddev = mei_cldev_get_drvdata(cldev); ++ ++ if (!ddev) ++ return 0; ++ ++ ddev->is_device_removed = 1; ++ /* make sure the above is set */ ++ smp_mb(); ++ /* wakeup write waiters so we can unload */ ++ if (waitqueue_active(&ddev->wq)) ++ wake_up_interruptible(&ddev->wq); ++ ++ mei_cldev_set_drvdata(cldev, NULL); ++ ++ device_unregister(&ddev->dev); ++ ++ mei_cldev_disable(cldev); ++ ++ return 0; ++} ++ ++/** ++ * dal_device_release - dal release callback in dev structure ++ * ++ * @dev: device structure ++ */ ++static void dal_device_release(struct device *dev) ++{ ++ struct dal_device *ddev = to_dal_device(dev); ++ ++ kfree(ddev->bh_fw_msg.msg); ++ kfree(ddev); ++} ++ ++/** ++ * dal_probe - dal probe callback in mei_cl_driver ++ * ++ * @cldev: mei client device ++ * @id: mei client device id ++ * ++ * Return: 0 on success ++ * <0 on failure ++ */ ++static int dal_probe(struct mei_cl_device *cldev, ++ const struct mei_cl_device_id *id) ++{ ++ struct dal_device *ddev; ++ struct device *pdev = &cldev->dev; ++ int ret; ++ ++ ddev = kzalloc(sizeof(*ddev), GFP_KERNEL); ++ if (!ddev) ++ return -ENOMEM; ++ ++ /* initialize the mutex and wait queue */ ++ mutex_init(&ddev->context_lock); ++ mutex_init(&ddev->write_lock); ++ init_waitqueue_head(&ddev->wq); ++ INIT_LIST_HEAD(&ddev->writers); ++ ddev->cldev = cldev; ++ ddev->device_id = id->driver_info; ++ ++ ddev->dev.parent = pdev; ++ ddev->dev.class = dal_class; ++ ddev->dev.release = dal_device_release; ++ dev_set_name(&ddev->dev, "dal%d", ddev->device_id); ++ ++ ret = device_register(&ddev->dev); ++ if (ret) { ++ dev_err(pdev, "unable to register device\n"); ++ goto err; ++ } ++ ++ ddev->bh_fw_msg.msg = kzalloc(DAL_MAX_BUFFER_SIZE, GFP_KERNEL); ++ if (!ddev->bh_fw_msg.msg) { ++ ret = -ENOMEM; ++ goto err; ++ } ++ ++ ret = dal_mei_enable(ddev); ++ if (ret < 0) ++ goto err; ++ ++ return 0; ++ ++err: ++ device_unregister(&ddev->dev); ++ return ret; ++} ++ ++/* DAL FW HECI client GUIDs */ ++#define IVM_UUID UUID_LE(0x3c4852d6, 0xd47b, 0x4f46, \ ++ 0xb0, 0x5e, 0xb5, 0xed, 0xc1, 0xaa, 0x44, 0x0e) ++#define SDM_UUID UUID_LE(0xdba4d603, 0xd7ed, 0x4931, \ ++ 0x88, 0x23, 0x17, 0xad, 0x58, 0x57, 0x05, 0xd5) ++#define RTM_UUID UUID_LE(0x5565a099, 0x7fe2, 0x45c1, \ ++ 0xa2, 0x2b, 0xd7, 0xe9, 0xdf, 0xea, 0x9a, 0x2e) ++ ++#define DAL_DEV_ID(__uuid, __device_type) \ ++ {.uuid = __uuid, \ ++ .version = MEI_CL_VERSION_ANY, \ ++ .driver_info = __device_type} ++ ++/* ++ * dal_device_id - ids of dal FW devices, ++ * for all 3 dal FW clients (IVM, SDM and RTM) ++ */ ++static const struct mei_cl_device_id dal_device_id[] = { ++ DAL_DEV_ID(IVM_UUID, DAL_MEI_DEVICE_IVM), ++ DAL_DEV_ID(SDM_UUID, DAL_MEI_DEVICE_SDM), ++ DAL_DEV_ID(RTM_UUID, DAL_MEI_DEVICE_RTM), ++ /* required last entry */ ++ { } ++}; ++MODULE_DEVICE_TABLE(mei, dal_device_id); ++ ++static struct mei_cl_driver dal_driver = { ++ .id_table = dal_device_id, ++ .name = KBUILD_MODNAME, ++ ++ .probe = dal_probe, ++ .remove = dal_remove, ++}; + + /** + * mei_dal_exit - module exit function + */ + static void __exit mei_dal_exit(void) + { ++ mei_cldev_driver_unregister(&dal_driver); ++ + class_destroy(dal_class); + } + +@@ -34,13 +750,25 @@ static void __exit mei_dal_exit(void) + */ + static int __init mei_dal_init(void) + { ++ int ret; ++ + dal_class = class_create(THIS_MODULE, "dal"); + if (IS_ERR(dal_class)) { + pr_err("couldn't create class\n"); + return PTR_ERR(dal_class); + } + ++ ret = mei_cldev_driver_register(&dal_driver); ++ if (ret < 0) { ++ pr_err("mei_cl_driver_register failed with status = %d\n", ret); ++ goto err_class; ++ } ++ + return 0; ++ ++err_class: ++ class_destroy(dal_class); ++ return ret; + } + + module_init(mei_dal_init); +diff --git a/drivers/misc/mei/dal/dal_dev.h b/drivers/misc/mei/dal/dal_dev.h +new file mode 100644 +index 000000000000..30779c91b854 +--- /dev/null ++++ b/drivers/misc/mei/dal/dal_dev.h +@@ -0,0 +1,149 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright(c) 2016-2019, Intel Corporation. ++ */ ++ ++#ifndef _DAL_KDI_H_ ++#define _DAL_KDI_H_ ++ ++#include ++#include ++#include ++#include ++ ++#define DAL_MAX_BUFFER_SIZE 4096 ++#define DAL_BUFFERS_PER_CLIENT 10 ++ ++#define DAL_CLIENTS_PER_DEVICE 2 ++ ++extern struct class *dal_class; ++ ++/** ++ * enum dal_intf - dal interface type ++ * ++ * @DAL_INTF_KDI: (kdi) kernel space interface ++ * @DAL_INTF_CDEV: char device interface ++ */ ++enum dal_intf { ++ DAL_INTF_KDI, ++ DAL_INTF_CDEV, ++}; ++ ++/** ++ * enum dal_dev_type - devices that are exposed to userspace ++ * ++ * @DAL_MEI_DEVICE_IVM: IVM - Intel/Issuer Virtual Machine ++ * @DAL_MEI_DEVICE_SDM: SDM - Security Domain Manager ++ * @DAL_MEI_DEVICE_RTM: RTM - Run Time Manager (Launcher) ++ * ++ * @DAL_MEI_DEVICE_MAX: max dal device type ++ */ ++enum dal_dev_type { ++ DAL_MEI_DEVICE_IVM, ++ DAL_MEI_DEVICE_SDM, ++ DAL_MEI_DEVICE_RTM, ++ ++ DAL_MEI_DEVICE_MAX ++}; ++ ++/** ++ * struct dal_client - host client ++ * ++ * @ddev: dal parent device ++ * @wrlink: link in the writers list ++ * @read_queue: queue of received messages from DAL FW ++ * @intf: client interface - user space or kernel space ++ * ++ * @seq: the sequence number of the last message sent (in kernel space API only) ++ * When a message is received from DAL FW, we use this sequence number ++ * to decide which client should get the message. If the sequence ++ * number of the message is equals to the kernel space sequence number, ++ * the kernel space client should get the message. ++ * Otherwise the user space client will get it. ++ * @expected_msg_size_from_fw: the expected msg size from DALFW ++ * @expected_msg_size_to_fw: the expected msg size that will be sent to DAL FW ++ * @bytes_rcvd_from_fw: number of bytes that were received from DAL FW ++ * @bytes_sent_to_fw: number of bytes that were sent to DAL FW ++ */ ++struct dal_client { ++ struct dal_device *ddev; ++ struct list_head wrlink; ++ struct kfifo read_queue; ++ enum dal_intf intf; ++ ++ u64 seq; ++ u32 expected_msg_size_from_fw; ++ u32 expected_msg_size_to_fw; ++ u32 bytes_rcvd_from_fw; ++ u32 bytes_sent_to_fw; ++}; ++ ++/** ++ * struct dal_bh_msg - msg received from DAL FW. ++ * ++ * @len: message length ++ * @msg: message buffer ++ */ ++struct dal_bh_msg { ++ size_t len; ++ char *msg; ++}; ++ ++/** ++ * struct dal_device - DAL private device struct. ++ * each DAL device has a context (i.e IVM, SDM, RTM) ++ * ++ * @dev: device on a bus ++ * @cdev: character device ++ * @status: dal device status ++ * ++ * @context_lock: big device lock ++ * @write_lock: lock over write list ++ * @wq: dal clients wait queue. When client wants to send or receive message, ++ * he waits in this queue until he is ready ++ * @writers: write pending list ++ * @clients: clients on this device (userspace and kernel space) ++ * @bh_fw_msg: message which was received from DAL FW ++ * @current_read_client: current reading client (which receives message from ++ * DAL FW) ++ * ++ * @cldev: the MEI CL device which corresponds to a single DAL FW HECI client ++ * ++ * @is_device_removed: device removed flag ++ * ++ * @device_id: DAL device type ++ */ ++struct dal_device { ++ struct device dev; ++ struct cdev cdev; ++#define DAL_DEV_OPENED 0 ++ unsigned long status; ++ ++ struct mutex context_lock; /* device lock */ ++ struct mutex write_lock; /* write lock */ ++ wait_queue_head_t wq; ++ struct list_head writers; ++ struct dal_client *clients[DAL_CLIENTS_PER_DEVICE]; ++ struct dal_bh_msg bh_fw_msg; ++ struct dal_client *current_read_client; ++ ++ struct mei_cl_device *cldev; ++ ++ unsigned int is_device_removed :1; ++ ++ unsigned int device_id; ++}; ++ ++#define to_dal_device(d) container_of(d, struct dal_device, dev) ++ ++ssize_t dal_write(struct dal_client *dc, ++ const void *buf, size_t count, u64 seq); ++int dal_wait_for_read(struct dal_client *dc); ++ ++struct device *dal_find_dev(enum dal_dev_type device_id); ++ ++void dal_dc_print(struct device *dev, struct dal_client *dc); ++int dal_dc_setup(struct dal_device *ddev, enum dal_intf intf); ++void dal_dc_destroy(struct dal_device *ddev, enum dal_intf intf); ++ ++#endif /* _DAL_KDI_H_ */ +-- +2.17.1 + diff --git a/patches/0057-net-stmmac-add-Frame-Preemption-mmc-statistic.connectivity b/patches/0057-net-stmmac-add-Frame-Preemption-mmc-statistic.connectivity new file mode 100644 index 0000000000..6c99c9f8f2 --- /dev/null +++ b/patches/0057-net-stmmac-add-Frame-Preemption-mmc-statistic.connectivity @@ -0,0 +1,347 @@ +From 88400461ae1dc8eee0dbfc1154bb650bff146ceb Mon Sep 17 00:00:00 2001 +From: Voon Weifeng +Date: Wed, 21 Aug 2019 15:18:17 +0800 +Subject: [PATCH 057/108] net: stmmac: add Frame Preemption mmc statistics + reporting + +The following MMC statistics are added:- +a) TXFFC (Tx FPE Fragment Counter) + = Number of additional mPackets transmitted due to preemption. +b) TXHRC (Tx Hold Request Counter) + = Number of hold request is given to MAC. +c) PAEC (Rx Packet Assembly Error Counter) + = Number of MAC frames with reassembly errors on the Rx due to + mismatch in the Fragment Count value. +d) PSEC (Rx Packet SMD Error Counter) + = Number of MAC frames rejected due to incorrect SMD value or + unexpected SMD-C received when there is no preceding preempted + frame. +e) PAOC (Rx Packet Assembly OK Counter) + = Number of MAC frames that were successfully reassembled. +f) FFC (RX FPE Fragment Counter) + = Number of additional mPackets received due to preemption. + +Thanks to Ong Boon Leong who implemented the handling of FPE MMC +interrupt handler and hook into ethtool. + +Signed-off-by: Voon Weifeng +Signed-off-by: Ong Boon Leong +--- + .../net/ethernet/stmicro/stmmac/dwmac4_core.c | 1 + + drivers/net/ethernet/stmicro/stmmac/dwmac5.h | 25 +++++ + .../net/ethernet/stmicro/stmmac/dwmac5_tsn.c | 97 +++++++++++++++++-- + drivers/net/ethernet/stmicro/stmmac/hwif.h | 12 +++ + .../ethernet/stmicro/stmmac/stmmac_ethtool.c | 12 ++- + .../net/ethernet/stmicro/stmmac/stmmac_tsn.c | 11 +++ + .../net/ethernet/stmicro/stmmac/stmmac_tsn.h | 1 + + 7 files changed, 150 insertions(+), 9 deletions(-) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +index 16630df356c5..3a65c5b906c0 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +@@ -1195,6 +1195,7 @@ const struct stmmac_ops dwmac510_ops = { + .set_est_gcrr_times = tsn_est_gcrr_times_set, + .get_est_gcc = tsn_est_gcc_get, + .est_irq_status = tsn_est_irq_status, ++ .update_tsn_mmc_stat = tsn_mmc_stat_update, + .dump_tsn_mmc = tsn_mmc_dump, + .cbs_recal_idleslope = tsn_cbs_recal_idleslope, + .fpe_set_txqpec = tsn_fpe_set_txqpec, +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h +index 1df32ac5d7df..f448cd26e8ab 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h +@@ -196,6 +196,31 @@ + #define MTL_FPE_ADVANCE_RADV_SHIFT 16 + #define MTL_FPE_ADVANCE_HADV GENMASK(15, 0) /* Hold Advance */ + ++/* MMC related registers for FPE */ ++#define MMC_FPE_TX_INTR 0x8a0 /* MMC FPE Tx Int */ ++#define MMC_FPE_TX_INTR_MASK 0x8a4 /* MMC FPE Tx Int Mask */ ++#define MMC_FPE_TX_INTR_MASK_HRCIM BIT(1) /* Hold Request cnt Int En */ ++#define MMC_FPE_TX_INTR_MASK_FCIM BIT(0) /* Tx Fragments cnt Int En */ ++#define MMC_FPE_TX_INTR_MASK_DEFAULT (MMC_FPE_TX_INTR_MASK_HRCIM |\ ++ MMC_FPE_TX_INTR_MASK_FCIM) ++#define MMC_TX_FPE_FRAGMENT 0x8a8 /* Tx FPE Fragment cnt Int En */ ++#define MMC_TX_HOLD_REQ 0x8ac /* Tx Hold Request cnt Int En */ ++ ++#define MMC_FPE_RX_INTR 0x8c0 /* MMC FPE Rx Int */ ++#define MMC_FPE_RX_INTR_MASK 0x8c4 /* MMC FPE Rx Int Mask */ ++#define MMC_FPE_RX_INTR_MASK_FCIM BIT(3) /* Rx Fragments cnt Int En */ ++#define MMC_FPE_RX_INTR_MASK_PAOCIM BIT(2) /* Rx Assembly OK Int En */ ++#define MMC_FPE_RX_INTR_MASK_PSECIM BIT(1) /* Rx SMD Error cnt Int En */ ++#define MMC_FPE_RX_INTR_MASK_PAECIM BIT(0) /* Rx Assembly Err cnt Int En */ ++#define MMC_FPE_RX_INTR_MASK_DEFAULT (MMC_FPE_RX_INTR_MASK_FCIM |\ ++ MMC_FPE_RX_INTR_MASK_PAOCIM |\ ++ MMC_FPE_RX_INTR_MASK_PSECIM |\ ++ MMC_FPE_RX_INTR_MASK_PAECIM) ++#define MMC_RX_PACKET_ASSEMBLY_ERR 0x8c8 /* Rx Pkt Assembly Error */ ++#define MMC_RX_PACKET_SMD_ERR 0x8cc /* Rx frame with SMD errors */ ++#define MMC_RX_PACKET_ASSEMBLY_OK 0x8d0 /* Rx Pkt Assembly OK */ ++#define MMC_RX_FPE_FRAGMENT 0x8d4 /* Rx Fragments count in FPE */ ++ + /* FPE Global defines */ + #define FPE_PMAC_BIT BIT(0) /* TxQ0 is always preemptible */ + #define FPE_AFSZ_MAX 0x3 /* Max AFSZ */ +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c +index 192af21d6815..60d36b7a6daf 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5_tsn.c +@@ -14,6 +14,12 @@ enum tsn_mmc_idx { + EST_MMC_HLBF = 2, + EST_MMC_HLBS = 3, + EST_MMC_CGCE = 4, ++ FPE_MMC_TXFFC = 5, ++ FPE_MMC_TXHRC = 6, ++ FPE_MMC_PAEC = 7, ++ FPE_MMC_PSEC = 8, ++ FPE_MMC_PAOC = 9, ++ FPE_MMC_FFC = 10, + }; + + const struct tsn_mmc_desc dwmac5_tsn_mmc_desc[STMMAC_TSN_STAT_SIZE] = { +@@ -22,12 +28,12 @@ const struct tsn_mmc_desc dwmac5_tsn_mmc_desc[STMMAC_TSN_STAT_SIZE] = { + { true, "HLBF" }, /* Head-of-Line Blocking due to Frame Size */ + { true, "HLBS" }, /* Head-of-Line Blocking due to Scheduling */ + { true, "CGCE" }, /* Constant Gate Control Error */ +- { false, "RESV" }, +- { false, "RESV" }, +- { false, "RESV" }, +- { false, "RESV" }, +- { false, "RESV" }, +- { false, "RESV" }, ++ { true, "TXFFC" }, /* Tx FPE Fragment Counter */ ++ { true, "TXHRC" }, /* Tx Hold Request Counter */ ++ { true, "PAEC" }, /* Rx Packet Assembly Error Counter */ ++ { true, "PSEC" }, /* Rx Packet SMD Error Counter */ ++ { true, "PAOC" }, /* Rx Packet Assembly OK Counter */ ++ { true, "FFC" }, /* Rx FPE Fragment Counter */ + { false, "RESV" }, + { false, "RESV" }, + { false, "RESV" }, +@@ -106,6 +112,13 @@ static void dwmac5_hw_setup(void __iomem *ioaddr, enum tsn_feat_id featid, + value &= ~GMAC_RXQCTRL_FPRQ_MASK; + value |= fprq << GMAC_RXQCTRL_FPRQ_SHIFT; + writel(value, ioaddr + GMAC_RXQ_CTRL1); ++ ++ /* Unmask all FPE Tx & Rx MMC interrupts */ ++ value = (u32)~MMC_FPE_TX_INTR_MASK_DEFAULT; ++ writel(value, ioaddr + MMC_FPE_TX_INTR_MASK); ++ value = (u32)~MMC_FPE_RX_INTR_MASK_DEFAULT; ++ writel(value, ioaddr + MMC_FPE_RX_INTR_MASK); ++ break; + default: + return; + }; +@@ -551,6 +564,76 @@ void dwmac5_fpe_send_mpacket(void *ioaddr, enum mpacket_type type) + writel(value, ioaddr + MAC_FPE_CTRL_STS); + } + ++static void dwmac5_fpe_mmc_irq_status(void __iomem *ioaddr, ++ struct net_device *dev) ++{ ++ u32 tx_stat; ++ u32 rx_stat; ++ u32 value; ++ ++ tx_stat = readl(ioaddr + MMC_FPE_TX_INTR); ++ rx_stat = readl(ioaddr + MMC_FPE_RX_INTR); ++ ++ if (tx_stat & MMC_FPE_TX_INTR_MASK_DEFAULT) { ++ /* Read TXHRC to clear HRCIS bit */ ++ if ((tx_stat & MMC_FPE_TX_INTR_MASK_HRCIM) == ++ MMC_FPE_TX_INTR_MASK_HRCIM) { ++ value = readl(ioaddr + MMC_TX_HOLD_REQ); ++ netdev_info(dev, "FPE IRQ: TXHRC = %d\n", value); ++ } ++ ++ /* Read TXFFC to clear FCIS bit */ ++ if ((tx_stat & MMC_FPE_TX_INTR_MASK_FCIM) == ++ MMC_FPE_TX_INTR_MASK_FCIM) { ++ value = readl(ioaddr + MMC_TX_FPE_FRAGMENT); ++ netdev_info(dev, "FPE IRQ: TXFFC = %d\n", value); ++ } ++ } ++ ++ if (rx_stat & MMC_FPE_RX_INTR_MASK_DEFAULT) { ++ /* Read PAEC to clear PAECIS bit */ ++ if ((rx_stat & MMC_FPE_RX_INTR_MASK_PAECIM) == ++ MMC_FPE_RX_INTR_MASK_PAECIM) { ++ value = readl(ioaddr + MMC_RX_PACKET_ASSEMBLY_ERR); ++ netdev_info(dev, "FPE IRQ: PAEC = %d\n", value); ++ } ++ ++ /* Read PSEC to clear PSECIS bit */ ++ if ((rx_stat & MMC_FPE_RX_INTR_MASK_PSECIM) == ++ MMC_FPE_RX_INTR_MASK_PSECIM) { ++ value = readl(ioaddr + MMC_RX_PACKET_SMD_ERR); ++ netdev_info(dev, "FPE IRQ: PSEC = %d\n", value); ++ } ++ ++ /* Read PAOC to clear PAOCIS bit */ ++ if ((rx_stat & MMC_FPE_RX_INTR_MASK_PAOCIM) == ++ MMC_FPE_RX_INTR_MASK_PAOCIM) { ++ value = readl(ioaddr + MMC_RX_PACKET_ASSEMBLY_OK); ++ netdev_info(dev, "FPE IRQ: PAOC = %d\n", value); ++ } ++ ++ /* Read FFC to clear FCIS bit */ ++ if ((rx_stat & MMC_FPE_RX_INTR_MASK_FCIM) == ++ MMC_FPE_RX_INTR_MASK_FCIM) { ++ value = readl(ioaddr + MMC_RX_FPE_FRAGMENT); ++ netdev_info(dev, "FPE IRQ: RXFFC = %d\n", value); ++ } ++ } ++} ++ ++static void dwmac5_fpe_update_mmc_stat(void __iomem *ioaddr, ++ struct tsn_mmc_stat *mmc_stat) ++{ ++ mmc_stat->count[FPE_MMC_TXHRC] = readl(ioaddr + MMC_TX_HOLD_REQ); ++ mmc_stat->count[FPE_MMC_TXFFC] = readl(ioaddr + MMC_TX_FPE_FRAGMENT); ++ mmc_stat->count[FPE_MMC_PAEC] = readl(ioaddr + ++ MMC_RX_PACKET_ASSEMBLY_ERR); ++ mmc_stat->count[FPE_MMC_PSEC] = readl(ioaddr + MMC_RX_PACKET_SMD_ERR); ++ mmc_stat->count[FPE_MMC_PAOC] = readl(ioaddr + ++ MMC_RX_PACKET_ASSEMBLY_OK); ++ mmc_stat->count[FPE_MMC_FFC] = readl(ioaddr + MMC_RX_FPE_FRAGMENT); ++} ++ + static void dwmac5_tbs_get_max(u32 *leos_max, + u32 *legos_max, + u32 *ftos_max, +@@ -692,6 +775,8 @@ const struct tsnif_ops dwmac510_tsnif_ops = { + .fpe_set_radv = dwmac5_fpe_set_radv, + .fpe_irq_status = dwmac5_fpe_irq_status, + .fpe_send_mpacket = dwmac5_fpe_send_mpacket, ++ .fpe_update_mmc_stat = dwmac5_fpe_update_mmc_stat, ++ .fpe_mmc_irq_status = dwmac5_fpe_mmc_irq_status, + .tbs_get_max = dwmac5_tbs_get_max, + .tbs_set_estm = dwmac5_tbs_set_estm, + .tbs_set_leos = dwmac5_tbs_set_leos, +diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h +index a1ef1f0e1322..f10143585c43 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h ++++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h +@@ -437,6 +437,8 @@ struct stmmac_ops { + struct est_gc_config **gcc); + void (*est_irq_status)(struct mac_device_info *hw, + struct net_device *dev); ++ void (*update_tsn_mmc_stat)(struct mac_device_info *hw, ++ struct net_device *dev); + int (*dump_tsn_mmc)(struct mac_device_info *hw, int index, + unsigned long *count, const char **desc); + int (*cbs_recal_idleslope)(struct mac_device_info *hw, +@@ -586,6 +588,8 @@ struct stmmac_ops { + stmmac_do_callback(__priv, mac, get_est_gcc, __args) + #define stmmac_est_irq_status(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, est_irq_status, __args) ++#define stmmac_update_tsn_mmc_stat(__priv, __args...) \ ++ stmmac_do_void_callback(__priv, mac, update_tsn_mmc_stat, __args) + #define stmmac_dump_tsn_mmc(__priv, __args...) \ + stmmac_do_callback(__priv, mac, dump_tsn_mmc, __args) + #define stmmac_cbs_recal_idleslope(__priv, __args...) \ +@@ -819,6 +823,10 @@ struct tsnif_ops { + void (*fpe_send_mpacket)(void *ioaddr, enum mpacket_type type); + void (*fpe_irq_status)(void *ioaddr, struct net_device *dev, + enum fpe_event *fpe_event); ++ void (*fpe_mmc_irq_status)(void __iomem *ioaddr, ++ struct net_device *dev); ++ void (*fpe_update_mmc_stat)(void __iomem *ioaddr, ++ struct tsn_mmc_stat *mmc_stat); + /* Time-Based Scheduling (TBS) */ + void (*tbs_get_max)(u32 *leos_max, u32 *legos_max, + u32 *ftos_max, u32 *fgos_max); +@@ -893,6 +901,10 @@ struct tsnif_ops { + tsnif_do_void_callback(__hw, fpe_send_mpacket, __args) + #define tsnif_fpe_irq_status(__hw, __args...) \ + tsnif_do_void_callback(__hw, fpe_irq_status, __args) ++#define tsnif_fpe_mmc_irq_status(__hw, __args...) \ ++ tsnif_do_void_callback(__hw, fpe_mmc_irq_status, __args) ++#define tsnif_fpe_update_mmc_stat(__hw, __args...) \ ++ tsnif_do_void_callback(__hw, fpe_update_mmc_stat, __args) + #define tsnif_tbs_get_max(__hw, __args...) \ + tsnif_do_void_callback(__hw, tbs_get_max, __args) + #define tsnif_tbs_set_estm(__hw, __args...) \ +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +index 43f506543314..c48a77f6508c 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +@@ -489,7 +489,11 @@ static void stmmac_get_ethtool_stats(struct net_device *dev, + data[j++] = count; + } + } +- if (priv->hw->tsn_info.cap.est_support) { ++ if (priv->hw->tsn_info.cap.est_support || ++ priv->hw->tsn_info.cap.fpe_support) { ++ /* Update TSN MMC stats that are not refreshed in interrupt */ ++ stmmac_update_tsn_mmc_stat(priv, priv->hw, dev); ++ + for (i = 0; i < STMMAC_TSN_STAT_SIZE; i++) { + if (!stmmac_dump_tsn_mmc(priv, + priv->hw, i, +@@ -553,7 +557,8 @@ static int stmmac_get_sset_count(struct net_device *netdev, int sset) + + len += safety_len; + } +- if (priv->hw->tsn_info.cap.est_support) { ++ if (priv->hw->tsn_info.cap.est_support || ++ priv->hw->tsn_info.cap.fpe_support) { + for (i = 0; i < STMMAC_TSN_STAT_SIZE; i++) { + if (!stmmac_dump_tsn_mmc(priv, + priv->hw, i, +@@ -591,7 +596,8 @@ static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data) + } + } + } +- if (priv->hw->tsn_info.cap.est_support) { ++ if (priv->hw->tsn_info.cap.est_support || ++ priv->hw->tsn_info.cap.fpe_support) { + for (i = 0; i < STMMAC_TSN_STAT_SIZE; i++) { + const char *desc; + +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c +index 9b7ad371d4ba..5398b7afe025 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.c +@@ -1097,6 +1097,16 @@ void tsn_est_irq_status(struct mac_device_info *hw, struct net_device *dev) + info->cap.txqcnt); + } + ++void tsn_mmc_stat_update(struct mac_device_info *hw, struct net_device *dev) ++{ ++ struct tsnif_info *info = &hw->tsn_info; ++ void __iomem *ioaddr = hw->pcsr; ++ ++ if (tsn_has_feat(hw, dev, TSN_FEAT_ID_FPE)) { ++ tsnif_fpe_update_mmc_stat(hw, ioaddr, &info->mmc_stat); ++ } ++} ++ + int tsn_mmc_dump(struct mac_device_info *hw, + int index, unsigned long *count, const char **desc) + { +@@ -1312,6 +1322,7 @@ void tsn_fpe_irq_status(struct mac_device_info *hw, struct net_device *dev) + lp_state = &info->fpe_cfg.lp_fpe_state; + enable = &info->fpe_cfg.enable; + ++ tsnif_fpe_mmc_irq_status(hw, ioaddr, dev); + tsnif_fpe_irq_status(hw, ioaddr, dev, event); + + if (*event == FPE_EVENT_UNKNOWN || !*enable) +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h +index e0770d6b6c7f..ea64d3b2bfc0 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tsn.h +@@ -202,6 +202,7 @@ int tsn_est_gcrr_times_set(struct mac_device_info *hw, + int tsn_est_gcc_get(struct mac_device_info *hw, struct net_device *dev, + struct est_gc_config **gcc); + void tsn_est_irq_status(struct mac_device_info *hw, struct net_device *dev); ++void tsn_mmc_stat_update(struct mac_device_info *hw, struct net_device *dev); + int tsn_mmc_dump(struct mac_device_info *hw, + int index, unsigned long *count, const char **desc); + int tsn_cbs_recal_idleslope(struct mac_device_info *hw, struct net_device *dev, +-- +2.17.1 + diff --git a/patches/0057-register-suspend-callback.trusty b/patches/0057-register-suspend-callback.trusty new file mode 100644 index 0000000000..3619ded98e --- /dev/null +++ b/patches/0057-register-suspend-callback.trusty @@ -0,0 +1,98 @@ +From 70c2292b728327f99ae8240f34afd18b5901191b Mon Sep 17 00:00:00 2001 +From: "Zhang, Qi" +Date: Tue, 17 Jul 2018 15:01:38 +0800 +Subject: [PATCH 57/63] register suspend callback + +Save secure world context by the hyercall + +Change-Id: I21ad1569c12f9b8dda66ab47beab273d4b3791cb +Tracked-On: OAM-67174 +Signed-off-by: Zhang, Qi +--- + drivers/trusty/trusty.c | 42 +++++++++++++++++++++++++++++++++++++++-- + 1 file changed, 40 insertions(+), 2 deletions(-) + +diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c +index 8f80f9b84772..7bff133a4610 100755 +--- a/drivers/trusty/trusty.c ++++ b/drivers/trusty/trusty.c +@@ -27,7 +27,8 @@ + #include + + #define EVMM_SMC_HC_ID 0x74727500 +-#define ACRN_SMC_HC_ID 0x80000071 ++#define ACRN_HC_SWITCH_WORLD 0x80000071 ++#define ACRN_HC_SAVE_SWORLD_CONTEXT 0x80000072 + + struct trusty_state; + +@@ -70,7 +71,7 @@ static inline ulong smc_evmm(ulong r0, ulong r1, ulong r2, ulong r3) + + static inline ulong smc_acrn(ulong r0, ulong r1, ulong r2, ulong r3) + { +- register unsigned long smc_id asm("r8") = ACRN_SMC_HC_ID; ++ register unsigned long smc_id asm("r8") = ACRN_HC_SWITCH_WORLD; + __asm__ __volatile__( + "vmcall; \n" + : "=D"(r0) +@@ -81,6 +82,20 @@ static inline ulong smc_acrn(ulong r0, ulong r1, ulong r2, ulong r3) + return r0; + } + ++static void acrn_save_sworld_context(void *arg) ++{ ++ long *save_ret = arg; ++ register signed long result asm("rax"); ++ register unsigned long hc_id asm("r8") = ACRN_HC_SAVE_SWORLD_CONTEXT; ++ __asm__ __volatile__( ++ "vmcall; \n" ++ : "=r"(result) ++ : "r"(hc_id) ++ ); ++ ++ *save_ret = result; ++} ++ + static void trusty_fast_call32_remote(void *args) + { + struct trusty_smc_interface *p_args = args; +@@ -631,6 +646,24 @@ static int trusty_remove(struct platform_device *pdev) + return 0; + } + ++static int trusty_suspend(struct platform_device *pdev, pm_message_t state) ++{ ++ dev_info(&pdev->dev, "%s() is called\n", __func__); ++ long ret = 0, save_ret = 0; ++ int cpu = 0; ++ ++ ret = smp_call_function_single(cpu, acrn_save_sworld_context, (void *)&save_ret, 1); ++ if (ret) { ++ pr_err("%s: smp_call_function_single failed: %d\n", __func__, ret); ++ } ++ if(save_ret < 0) { ++ dev_err(&pdev->dev, "%s(): failed to save world context!\n", __func__); ++ return -EPERM; ++ } ++ ++ return 0; ++} ++ + static const struct of_device_id trusty_of_match[] = { + { .compatible = "android,trusty-smc-v1", }, + {}, +@@ -764,6 +797,11 @@ static int __init trusty_driver_init(void) + printk(KERN_ERR "%s(): platform_add_devices() failed, ret %d\n", __func__, ret); + return ret; + } ++ ++ if(trusty_detect_vmm() == VMM_ID_ACRN) { ++ trusty_driver.suspend = trusty_suspend; ++ } ++ + return platform_driver_register(&trusty_driver); + } + +-- +2.17.1 + diff --git a/patches/0058-ASoC-utils-add-inputs-and-outputs-to-dummy-codec.audio b/patches/0058-ASoC-utils-add-inputs-and-outputs-to-dummy-codec.audio new file mode 100644 index 0000000000..6b3d27943e --- /dev/null +++ b/patches/0058-ASoC-utils-add-inputs-and-outputs-to-dummy-codec.audio @@ -0,0 +1,80 @@ +From 52edf807697a2044524398844589e7c26d1b8a6e Mon Sep 17 00:00:00 2001 +From: Omair Mohammed Abdullah +Date: Mon, 10 Nov 2014 21:52:55 +0530 +Subject: [PATCH 058/193] ASoC: utils: add inputs and outputs to dummy codec + +Add a dummy input and a dummy output to the codec, so that the platform side +widgets can be triggered if a backend uses a dummy codec. + +Make the dummy codec stream names explicit to avoid confusion. + +Change-Id: I3891e7b670a413c74d71aae1feed9f04e00041e3 +Tracked-On: +Signed-off-by: Omair Mohammed Abdullah +Reviewed-by: Koul, Vinod +Tested-by: Koul, Vinod +Signed-off-by: Dharageswari.R +Reviewed-on: +Reviewed-by: Babu, Ramesh +Tested-by: Babu, Ramesh +--- + sound/soc/soc-utils.c | 26 ++++++++++++++++++++++++-- + 1 file changed, 24 insertions(+), 2 deletions(-) + +diff --git a/sound/soc/soc-utils.c b/sound/soc/soc-utils.c +index 54dcece52b0c..ceb3ad4bcee1 100644 +--- a/sound/soc/soc-utils.c ++++ b/sound/soc/soc-utils.c +@@ -83,7 +83,28 @@ static const struct snd_soc_component_driver dummy_platform = { + .ops = &snd_dummy_dma_ops, + }; + ++static struct snd_soc_dapm_widget dapm_widgets[] = { ++ SND_SOC_DAPM_INPUT("Dummy Input"), ++ SND_SOC_DAPM_OUTPUT("Dummy Output"), ++}; ++ ++static struct snd_soc_dapm_route intercon[] = { ++ { "Dummy Output", NULL, "Dummy Playback"}, ++ { "Dummy Capture", NULL, "Dummy Input"}, ++}; ++ ++static int dummy_codec_probe(struct snd_soc_component *codec) ++{ ++ struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(codec); ++ ++ snd_soc_dapm_new_controls(dapm, dapm_widgets, ++ ARRAY_SIZE(dapm_widgets)); ++ snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon)); ++ return 0; ++} ++ + static const struct snd_soc_component_driver dummy_codec = { ++ .probe = dummy_codec_probe, + .idle_bias_on = 1, + .use_pmdown_time = 1, + .endianness = 1, +@@ -109,17 +130,18 @@ static const struct snd_soc_component_driver dummy_codec = { + * which should be modelled. And the data flow graph also should be modelled + * using DAPM. + */ ++ + static struct snd_soc_dai_driver dummy_dai = { + .name = "snd-soc-dummy-dai", + .playback = { +- .stream_name = "Playback", ++ .stream_name = "Dummy Playback", + .channels_min = 1, + .channels_max = 384, + .rates = STUB_RATES, + .formats = STUB_FORMATS, + }, + .capture = { +- .stream_name = "Capture", ++ .stream_name = "Dummy Capture", + .channels_min = 1, + .channels_max = 384, + .rates = STUB_RATES, +-- +2.17.1 + diff --git a/patches/0058-Fix-compile-warning-from-ISO90-and-output-format.trusty b/patches/0058-Fix-compile-warning-from-ISO90-and-output-format.trusty new file mode 100644 index 0000000000..7cc1d43766 --- /dev/null +++ b/patches/0058-Fix-compile-warning-from-ISO90-and-output-format.trusty @@ -0,0 +1,37 @@ +From 1d85f27de40e5346ee4ded8c4d15430b56af972e Mon Sep 17 00:00:00 2001 +From: Zhou Furong +Date: Fri, 10 Aug 2018 15:00:04 +0800 +Subject: [PATCH 58/63] Fix compile warning from ISO90 and output format + +Fix the warning of mixed declarations and code which are forbidded +in ISO90, and update 'long' output format. + +Change-Id: I96e6e4152151f1b26d5d2243974cc85bd7fc5bdd +--- + drivers/trusty/trusty.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c +index 7bff133a4610..f37a1a58dce8 100755 +--- a/drivers/trusty/trusty.c ++++ b/drivers/trusty/trusty.c +@@ -648,13 +648,14 @@ static int trusty_remove(struct platform_device *pdev) + + static int trusty_suspend(struct platform_device *pdev, pm_message_t state) + { +- dev_info(&pdev->dev, "%s() is called\n", __func__); + long ret = 0, save_ret = 0; + int cpu = 0; + ++ dev_info(&pdev->dev, "%s() is called\n", __func__); ++ + ret = smp_call_function_single(cpu, acrn_save_sworld_context, (void *)&save_ret, 1); + if (ret) { +- pr_err("%s: smp_call_function_single failed: %d\n", __func__, ret); ++ pr_err("%s: smp_call_function_single failed: %ld\n", __func__, ret); + } + if(save_ret < 0) { + dev_err(&pdev->dev, "%s(): failed to save world context!\n", __func__); +-- +2.17.1 + diff --git a/patches/0058-VHM-Update-cpu-id-type-as-uint16_t-for-struct-acrn_cr.acrn b/patches/0058-VHM-Update-cpu-id-type-as-uint16_t-for-struct-acrn_cr.acrn new file mode 100644 index 0000000000..9661a695a6 --- /dev/null +++ b/patches/0058-VHM-Update-cpu-id-type-as-uint16_t-for-struct-acrn_cr.acrn @@ -0,0 +1,37 @@ +From a7a876aaf3d5f8e2c543d2091054f54b0056c005 Mon Sep 17 00:00:00 2001 +From: Xiangyang Wu +Date: Fri, 31 Aug 2018 10:59:01 +0800 +Subject: [PATCH 058/150] VHM:Update cpu id type as uint16_t for struct + acrn_create_vcpu + +Update the cpu id type as uint16_t for struct acrn_create_vcpu in +the VHM driver, this structure is for data transfering between +the hypervisor and device modle in SOS. + +Change-Id: I6bfb67cc25d12f24dbc423ea1a0b91d876c9812e +Tracked-On: +Signed-off-by: Xiangyang Wu +--- + include/linux/vhm/acrn_common.h | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/include/linux/vhm/acrn_common.h b/include/linux/vhm/acrn_common.h +index 0fa524ef2af1..a4ae0146ac39 100644 +--- a/include/linux/vhm/acrn_common.h ++++ b/include/linux/vhm/acrn_common.h +@@ -184,10 +184,10 @@ struct acrn_create_vm { + */ + struct acrn_create_vcpu { + /** the virtual CPU ID for the VCPU created */ +- uint32_t vcpu_id; ++ uint16_t vcpu_id; + + /** the physical CPU ID for the VCPU created */ +- uint32_t pcpu_id; ++ uint16_t pcpu_id; + } __attribute__((aligned(8))); + + /** +-- +2.17.1 + diff --git a/patches/0058-drm-i915-selftests-Try-to-recycle-context-allocations.drm b/patches/0058-drm-i915-selftests-Try-to-recycle-context-allocations.drm new file mode 100644 index 0000000000..fda594aa50 --- /dev/null +++ b/patches/0058-drm-i915-selftests-Try-to-recycle-context-allocations.drm @@ -0,0 +1,65 @@ +From 7fad5a70e824593a8bcad60e3a8e337220518b16 Mon Sep 17 00:00:00 2001 +From: Chris Wilson +Date: Tue, 27 Aug 2019 17:17:26 +0100 +Subject: [PATCH 058/690] drm/i915/selftests: Try to recycle context + allocations + +igt_ctx_exec allocates a new context for each iteration, keeping them +all allocated until the end. Instead, release the local ctx reference at +the end of each iteration, allowing ourselves to reap those if under +mempressure. + +Signed-off-by: Chris Wilson +Reviewed-by: Matthew Auld +Link: https://patchwork.freedesktop.org/patch/msgid/20190827161726.3640-2-chris@chris-wilson.co.uk +--- + drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +index 63116c4fa8ba..da54a718c712 100644 +--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c ++++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +@@ -424,7 +424,7 @@ static int igt_ctx_exec(void *arg) + struct i915_gem_context *ctx; + struct intel_context *ce; + +- ctx = live_context(i915, file); ++ ctx = kernel_context(i915); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto out_unlock; +@@ -438,6 +438,7 @@ static int igt_ctx_exec(void *arg) + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + intel_context_put(ce); ++ kernel_context_close(ctx); + goto out_unlock; + } + } +@@ -449,12 +450,14 @@ static int igt_ctx_exec(void *arg) + engine->name, ctx->hw_id, + yesno(!!ctx->vm), err); + intel_context_put(ce); ++ kernel_context_close(ctx); + goto out_unlock; + } + + err = throttle(ce, tq, ARRAY_SIZE(tq)); + if (err) { + intel_context_put(ce); ++ kernel_context_close(ctx); + goto out_unlock; + } + +@@ -467,6 +470,7 @@ static int igt_ctx_exec(void *arg) + ncontexts++; + + intel_context_put(ce); ++ kernel_context_close(ctx); + } + + pr_info("Submitted %lu contexts to %s, filling %lu dwords\n", +-- +2.17.1 + diff --git a/patches/0058-mei-dal-add-character-device-for-user-space-inter.security b/patches/0058-mei-dal-add-character-device-for-user-space-inter.security new file mode 100644 index 0000000000..2f487fe552 --- /dev/null +++ b/patches/0058-mei-dal-add-character-device-for-user-space-inter.security @@ -0,0 +1,401 @@ +From c861c16ed2d4f6dbc17e28849e67fdf7bdce83e3 Mon Sep 17 00:00:00 2001 +From: Yael Samet +Date: Tue, 5 Sep 2017 14:07:32 +0300 +Subject: [PATCH 58/65] mei: dal: add character device for user space interface + +DAL user space interface allows sending and receiving of DAL messages, +from and to user-space, usually between JHI server to DAL FW. +DAL module is in pass-through mode. +This patch adds the character device interface. + +Change-Id: I5f6d2c17744ad2481387c9a4427c8de5962bca8d +Signed-off-by: Yael Samet +Signed-off-by: Tomas Winkler +--- + drivers/misc/mei/dal/Makefile | 1 + + drivers/misc/mei/dal/dal_cdev.c | 250 +++++++++++++++++++++++++++++++ + drivers/misc/mei/dal/dal_cdev.h | 13 ++ + drivers/misc/mei/dal/dal_class.c | 31 +++- + 4 files changed, 290 insertions(+), 5 deletions(-) + create mode 100644 drivers/misc/mei/dal/dal_cdev.c + create mode 100644 drivers/misc/mei/dal/dal_cdev.h + +diff --git a/drivers/misc/mei/dal/Makefile b/drivers/misc/mei/dal/Makefile +index d43a0f599956..da08dabe3d70 100644 +--- a/drivers/misc/mei/dal/Makefile ++++ b/drivers/misc/mei/dal/Makefile +@@ -8,3 +8,4 @@ mei_dal-objs += acp_parser.o + mei_dal-objs += bh_external.o + mei_dal-objs += bh_internal.o + mei_dal-objs += dal_class.o ++mei_dal-objs += dal_cdev.o +diff --git a/drivers/misc/mei/dal/dal_cdev.c b/drivers/misc/mei/dal/dal_cdev.c +new file mode 100644 +index 000000000000..346a8be4432d +--- /dev/null ++++ b/drivers/misc/mei/dal/dal_cdev.c +@@ -0,0 +1,250 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright(c) 2016-2019, Intel Corporation. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "dal_dev.h" ++#include "dal_cdev.h" ++ ++/* KDI user space devices major and minor numbers */ ++static dev_t dal_devt; ++ ++/** ++ * dal_dev_open - dal cdev open function ++ * ++ * @inode: pointer to inode structure ++ * @fp: pointer to file structure ++ * ++ * Return: 0 on success ++ * <0 on failure ++ */ ++static int dal_dev_open(struct inode *inode, struct file *fp) ++{ ++ int ret; ++ struct dal_device *ddev; ++ ++ ddev = container_of(inode->i_cdev, struct dal_device, cdev); ++ if (!ddev) ++ return -ENODEV; ++ ++ /* single open */ ++ if (test_and_set_bit(DAL_DEV_OPENED, &ddev->status)) ++ return -EBUSY; ++ ++ ret = dal_dc_setup(ddev, DAL_INTF_CDEV); ++ if (ret) ++ goto err; ++ ++ fp->private_data = ddev->clients[DAL_INTF_CDEV]; ++ ++ return nonseekable_open(inode, fp); ++ ++err: ++ clear_bit(DAL_DEV_OPENED, &ddev->status); ++ return ret; ++} ++ ++/** ++ * dal_dev_release - dal cdev release function ++ * ++ * @inode: pointer to inode structure ++ * @fp: pointer to file structure ++ * ++ * Return: 0 on success ++ * <0 on failure ++ */ ++static int dal_dev_release(struct inode *inode, struct file *fp) ++{ ++ struct dal_client *dc = fp->private_data; ++ struct dal_device *ddev = dc->ddev; ++ ++ if (mutex_lock_interruptible(&ddev->context_lock)) { ++ dev_dbg(&ddev->dev, "signal interrupted\n"); ++ return -ERESTARTSYS; ++ } ++ ++ dal_dc_destroy(ddev, dc->intf); ++ ++ mutex_unlock(&ddev->context_lock); ++ ++ clear_bit(DAL_DEV_OPENED, &ddev->status); ++ ++ return 0; ++} ++ ++/** ++ * dal_dev_read - dal cdev read function ++ * ++ * @fp: pointer to file structure ++ * @buf: pointer to user buffer ++ * @count: buffer length ++ * @off: data offset in buffer ++ * ++ * Return: >=0 data length on success ++ * <0 on failure ++ */ ++static ssize_t dal_dev_read(struct file *fp, char __user *buf, ++ size_t count, loff_t *off) ++{ ++ struct dal_client *dc = fp->private_data; ++ struct dal_device *ddev = dc->ddev; ++ int ret; ++ size_t r_len, len; ++ unsigned int copied; ++ ++ if (!buf) ++ return -EINVAL; ++ ++ ret = dal_wait_for_read(dc); ++ if (ret) ++ return ret; ++ ++ if (kfifo_is_empty(&dc->read_queue)) ++ return 0; ++ ++ r_len = kfifo_out(&dc->read_queue, &len, sizeof(len)); ++ if (r_len != sizeof(len) || len > count) { ++ dev_dbg(&ddev->dev, "could not copy buffer: src size = %zd, dest size = %zu\n", ++ len, count); ++ return -EFAULT; ++ } ++ ++ /** ++ * count is the user buffer size, len is the msg size, ++ * if we reach here then len <= count, ++ * we can copy the whole msg to the user because his ++ * buffer is big enough ++ */ ++ ret = kfifo_to_user(&dc->read_queue, buf, len, &copied); ++ if (ret) { ++ dev_dbg(&ddev->dev, "copy_to_user() failed\n"); ++ return -EFAULT; ++ } ++ ++ return copied; ++} ++ ++/** ++ * dal_dev_write - dal cdev write function ++ * ++ * @fp: pointer to file structure ++ * @buff: pointer to user buffer ++ * @count: buffer length ++ * @off: data offset in buffer ++ * ++ * Return: >=0 data length on success ++ * <0 on failure ++ */ ++static ssize_t dal_dev_write(struct file *fp, const char __user *buff, ++ size_t count, loff_t *off) ++{ ++ struct dal_device *ddev; ++ struct dal_client *dc = fp->private_data; ++ void *data; ++ int ret; ++ ++ ddev = dc->ddev; ++ ++ if (count > DAL_MAX_BUFFER_SIZE) { ++ dev_dbg(&ddev->dev, "count is too big, count = %zu\n", count); ++ return -EMSGSIZE; ++ } ++ ++ if (count == 0) ++ return 0; ++ ++ if (!buff) ++ return -EINVAL; ++ ++ data = memdup_user(buff, count); ++ if (IS_ERR(data)) ++ return PTR_ERR(data); ++ ++ ret = dal_write(dc, data, count, 0); ++ ++ kfree(data); ++ ++ return ret; ++} ++ ++static const struct file_operations mei_dal_fops = { ++ .owner = THIS_MODULE, ++ .open = dal_dev_open, ++ .release = dal_dev_release, ++ .read = dal_dev_read, ++ .write = dal_dev_write, ++ .llseek = no_llseek, ++}; ++ ++/** ++ * dal_dev_del - delete dal cdev ++ * ++ * @ddev: dal device ++ */ ++void dal_dev_del(struct dal_device *ddev) ++{ ++ cdev_del(&ddev->cdev); ++} ++ ++/** ++ * dal_dev_setup - initialize dal cdev ++ * ++ * @ddev: dal device ++ */ ++void dal_dev_setup(struct dal_device *ddev) ++{ ++ dev_t devno; ++ ++ cdev_init(&ddev->cdev, &mei_dal_fops); ++ devno = MKDEV(MAJOR(dal_devt), ddev->device_id); ++ ddev->cdev.owner = THIS_MODULE; ++ ddev->dev.devt = devno; ++ ddev->cdev.kobj.parent = &ddev->dev.kobj; ++} ++ ++/** ++ * dal_dev_add - add dal cdev ++ * ++ * @ddev: dal device ++ * ++ * Return: 0 on success ++ * <0 on failure ++ */ ++int dal_dev_add(struct dal_device *ddev) ++{ ++ return cdev_add(&ddev->cdev, ddev->dev.devt, 1); ++} ++ ++/** ++ * dal_dev_init - allocate dev_t number ++ * ++ * Return: 0 on success ++ * <0 on failure ++ */ ++int __init dal_dev_init(void) ++{ ++ return alloc_chrdev_region(&dal_devt, 0, DAL_MEI_DEVICE_MAX, "dal"); ++} ++ ++/** ++ * dal_dev_exit - unregister allocated dev_t number ++ */ ++void dal_dev_exit(void) ++{ ++ unregister_chrdev_region(dal_devt, DAL_MEI_DEVICE_MAX); ++} +diff --git a/drivers/misc/mei/dal/dal_cdev.h b/drivers/misc/mei/dal/dal_cdev.h +new file mode 100644 +index 000000000000..8364511fc9d3 +--- /dev/null ++++ b/drivers/misc/mei/dal/dal_cdev.h +@@ -0,0 +1,13 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright(c) 2016-2019, Intel Corporation. ++ */ ++ ++#ifndef __MEI_DAL_DEV_H__ ++#define __MEI_DAL_DEV_H__ ++void dal_dev_del(struct dal_device *ddev); ++void dal_dev_setup(struct dal_device *ddev); ++int dal_dev_add(struct dal_device *ddev); ++int __init dal_dev_init(void); ++void dal_dev_exit(void); ++#endif /* __MEI_DAL_DEV_H__ */ +diff --git a/drivers/misc/mei/dal/dal_class.c b/drivers/misc/mei/dal/dal_class.c +index 1c8e429ef0c6..04a5d866208d 100644 +--- a/drivers/misc/mei/dal/dal_class.c ++++ b/drivers/misc/mei/dal/dal_class.c +@@ -23,6 +23,7 @@ + #include "bh_cmd_defs.h" + #include "bh_errcode.h" + #include "dal_dev.h" ++#include "dal_cdev.h" + + /* + * this class contains the 3 mei_cl_device, ivm, sdm, rtm. +@@ -613,6 +614,8 @@ static int dal_remove(struct mei_cl_device *cldev) + if (!ddev) + return 0; + ++ dal_dev_del(ddev); ++ + ddev->is_device_removed = 1; + /* make sure the above is set */ + smp_mb(); +@@ -675,25 +678,34 @@ static int dal_probe(struct mei_cl_device *cldev, + ddev->dev.release = dal_device_release; + dev_set_name(&ddev->dev, "dal%d", ddev->device_id); + ++ dal_dev_setup(ddev); ++ + ret = device_register(&ddev->dev); + if (ret) { + dev_err(pdev, "unable to register device\n"); +- goto err; ++ goto err_unregister; + } + + ddev->bh_fw_msg.msg = kzalloc(DAL_MAX_BUFFER_SIZE, GFP_KERNEL); + if (!ddev->bh_fw_msg.msg) { + ret = -ENOMEM; +- goto err; ++ goto err_unregister; + } + + ret = dal_mei_enable(ddev); + if (ret < 0) +- goto err; ++ goto err_unregister; ++ ++ ret = dal_dev_add(ddev); ++ if (ret) ++ goto err_disable; + + return 0; + +-err: ++err_disable: ++ mei_cldev_set_drvdata(cldev, NULL); ++ mei_cldev_disable(cldev); ++err_unregister: + device_unregister(&ddev->dev); + return ret; + } +@@ -739,6 +751,7 @@ static void __exit mei_dal_exit(void) + { + mei_cldev_driver_unregister(&dal_driver); + ++ dal_dev_exit(); + class_destroy(dal_class); + } + +@@ -758,14 +771,22 @@ static int __init mei_dal_init(void) + return PTR_ERR(dal_class); + } + ++ ret = dal_dev_init(); ++ if (ret < 0) { ++ pr_err("failed allocate chrdev region = %d\n", ret); ++ goto err_class; ++ } ++ + ret = mei_cldev_driver_register(&dal_driver); + if (ret < 0) { + pr_err("mei_cl_driver_register failed with status = %d\n", ret); +- goto err_class; ++ goto err_dev; + } + + return 0; + ++err_dev: ++ dal_dev_exit(); + err_class: + class_destroy(dal_class); + return ret; +-- +2.17.1 + diff --git a/patches/0058-net-stmmac-Add-hardware-supported-cross-times.connectivity b/patches/0058-net-stmmac-Add-hardware-supported-cross-times.connectivity new file mode 100644 index 0000000000..b718ce4cd2 --- /dev/null +++ b/patches/0058-net-stmmac-Add-hardware-supported-cross-times.connectivity @@ -0,0 +1,472 @@ +From 8c0b67246e06e4abd6e000406870b730b54c877e Mon Sep 17 00:00:00 2001 +From: "Tan, Tee Min" +Date: Thu, 11 Oct 2018 14:27:00 +0800 +Subject: [PATCH 058/108] net: stmmac: Add hardware supported cross-timestamp + +This lets get_device_system_crosststamp() to use +stmmac_get_syncdevicetime() which uses hardware crosstimestamping +to synchronously capture the system (ART - Always Running Timer) +and device (PTP) timestamp + +The hardware cross-timestamp result is made available to +applications through the PTP_SYS_OFFSET_PRECISE ioctl +which calls stmmac_getcrosststamp(). + +Device time is stored in MAC Auxiliary register and System time +(ART time) is stored in a 64-bit latch and is mapped to ART MDIO +device address space (PCE_ART_Value0-3). + +Signed-off-by: Tan, Tee Min +Signed-off-by: Voon Weifeng +--- + drivers/net/ethernet/stmicro/stmmac/Kconfig | 9 ++ + drivers/net/ethernet/stmicro/stmmac/common.h | 2 + + drivers/net/ethernet/stmicro/stmmac/dwmac4.h | 14 ++ + .../net/ethernet/stmicro/stmmac/dwmac4_dma.c | 4 + + drivers/net/ethernet/stmicro/stmmac/hwif.h | 9 ++ + .../ethernet/stmicro/stmmac/stmmac_hwtstamp.c | 34 +++++ + .../net/ethernet/stmicro/stmmac/stmmac_pci.c | 3 + + .../net/ethernet/stmicro/stmmac/stmmac_ptp.c | 127 ++++++++++++++++++ + .../net/ethernet/stmicro/stmmac/stmmac_ptp.h | 26 ++++ + include/linux/stmmac.h | 2 + + 10 files changed, 230 insertions(+) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig +index 3425d37b6ae9..3ed5b2785e53 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig ++++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig +@@ -211,3 +211,12 @@ config STMMAC_PCI + + If unsure, say N. + endif ++ ++config STMMAC_HWTS ++ bool "Support HW cross-timestamp on Ethernet devices" ++ default n ++ depends on STMMAC_ETH && X86 ++ help ++ Say Y to enable hardware cross-timestamping on supported PCH ++ devices. The cross-timestamp is accessible through the ioctl ++ call PTP_SYS_OFFSET_PRECISE. +diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h +index 357ea781b6ce..7796a0f5b861 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/common.h ++++ b/drivers/net/ethernet/stmicro/stmmac/common.h +@@ -384,6 +384,8 @@ struct dma_features { + unsigned int dvlan; + unsigned int l3l4fnum; + unsigned int arpoffsel; ++ /* Number of Auxiliary Snapshot Inputs */ ++ unsigned int aux_snapshot_n; + }; + + /* GMAC TX FIFO is 8K, Rx FIFO is 16K */ +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +index b7a7967b376a..9afd57996232 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +@@ -44,6 +44,7 @@ + #define GMAC_ARP_ADDR 0x00000210 + #define GMAC_ADDR_HIGH(reg) (0x300 + reg * 8) + #define GMAC_ADDR_LOW(reg) (0x304 + reg * 8) ++#define GMAC_TIMESTAMP_STATUS 0x00000b20 + + /* RX Queues Routing */ + #define GMAC_RXQCTRL_AVCPQ_MASK GENMASK(2, 0) +@@ -136,11 +137,17 @@ + #define GMAC_INT_PCS_PHYIS BIT(3) + #define GMAC_INT_PMT_EN BIT(4) + #define GMAC_INT_LPI_EN BIT(5) ++#define GMAC_INT_TSIE BIT(12) + + #define GMAC_PCS_IRQ_DEFAULT (GMAC_INT_RGSMIIS | GMAC_INT_PCS_LINK | \ + GMAC_INT_PCS_ANE) + ++#ifdef CONFIG_STMMAC_HWTS ++#define GMAC_INT_DEFAULT_ENABLE (GMAC_INT_PMT_EN | GMAC_INT_LPI_EN |\ ++ GMAC_INT_TSIE) ++#else + #define GMAC_INT_DEFAULT_ENABLE (GMAC_INT_PMT_EN | GMAC_INT_LPI_EN) ++#endif + + enum dwmac4_irq_status { + time_stamp_irq = 0x00001000, +@@ -213,6 +220,7 @@ enum power_event { + + /* MAC HW features0 bitmap */ + #define GMAC_HW_FEAT_SAVLANINS BIT(27) ++#define GMAC_HW_FEAT_TSSTSSEL GENMASK(26, 25) + #define GMAC_HW_FEAT_ADDMAC BIT(18) + #define GMAC_HW_FEAT_RXCOESEL BIT(16) + #define GMAC_HW_FEAT_TXCOSEL BIT(14) +@@ -237,6 +245,7 @@ enum power_event { + #define GMAC_HW_RXFIFOSIZE GENMASK(4, 0) + + /* MAC HW features2 bitmap */ ++#define GMAC_HW_FEAT_AUXSNAPNUM GENMASK(30, 28) + #define GMAC_HW_FEAT_PPSOUTNUM GENMASK(26, 24) + #define GMAC_HW_FEAT_TXCHCNT GENMASK(21, 18) + #define GMAC_HW_FEAT_RXCHCNT GENMASK(15, 12) +@@ -256,6 +265,11 @@ enum power_event { + #define GMAC_HI_DCS_SHIFT 16 + #define GMAC_HI_REG_AE BIT(31) + ++/* MAC Timestamp Status */ ++#define GMAC_TIMESTAMP_AUXTSTRIG BIT(2) ++#define GMAC_TIMESTAMP_ATSNS_MASK GENMASK(29, 25) ++#define GMAC_TIMESTAMP_ATSNS_SHIFT 25 ++ + /* MTL registers */ + #define MTL_OPERATION_MODE 0x00000c00 + #define MTL_FRPE BIT(15) +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +index 87c283b88408..b55d31349d61 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +@@ -380,6 +380,10 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr, + /* IEEE 1588-2002 */ + dma_cap->time_stamp = 0; + ++ /* Number of Auxiliary Snapshot Inputs */ ++ dma_cap->aux_snapshot_n = ++ (hw_cap & GMAC_HW_FEAT_AUXSNAPNUM) >> 28; ++ + /* MAC HW feature3 */ + hw_cap = readl(ioaddr + GMAC_HW_FEATURE3); + +diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h +index f10143585c43..f2606823c3ae 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h ++++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h +@@ -623,6 +623,8 @@ struct stmmac_serdes_ops { + #define stmmac_speed_mode_2500(__priv, __args...) \ + stmmac_do_callback(__priv, serdes, speed_mode_2500, __args) + ++struct mii_bus; ++ + /* PTP and HW Timer helpers */ + struct stmmac_hwtimestamp { + void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data); +@@ -633,6 +635,9 @@ struct stmmac_hwtimestamp { + int (*adjust_systime) (void __iomem *ioaddr, u32 sec, u32 nsec, + int add_sub, int gmac4); + void (*get_systime) (void __iomem *ioaddr, u64 *systime); ++ void (*get_arttime)(struct mii_bus *mii, int intel_adhoc_addr, ++ u64 *art_time); ++ void (*get_ptptime)(void __iomem *ioaddr, u64 *ptp_time); + }; + + #define stmmac_config_hw_tstamping(__priv, __args...) \ +@@ -647,6 +652,10 @@ struct stmmac_hwtimestamp { + stmmac_do_callback(__priv, ptp, adjust_systime, __args) + #define stmmac_get_systime(__priv, __args...) \ + stmmac_do_void_callback(__priv, ptp, get_systime, __args) ++#define stmmac_get_arttime(__priv, __args...) \ ++ stmmac_do_void_callback(__priv, ptp, get_arttime, __args) ++#define stmmac_get_ptptime(__priv, __args...) \ ++ stmmac_do_void_callback(__priv, ptp, get_ptptime, __args) + + /* Helpers to manage the descriptors for chain and ring modes */ + struct stmmac_mode_ops { +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c +index 020159622559..6a4dcaa3f501 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c +@@ -13,6 +13,12 @@ + #include + #include "common.h" + #include "stmmac_ptp.h" ++#ifdef CONFIG_STMMAC_HWTS ++#include ++#endif ++#include "stmmac.h" ++#include "dwmac4.h" ++#include + + static void config_hw_tstamping(void __iomem *ioaddr, u32 data) + { +@@ -156,6 +162,32 @@ static void get_systime(void __iomem *ioaddr, u64 *systime) + *systime = ns; + } + ++static void get_arttime(struct mii_bus *mii, int intel_adhoc_addr, ++ u64 *art_time) ++{ ++ u64 ns; ++ ++ ns = mii->read(mii, intel_adhoc_addr, PMC_ART_VALUE3); ++ ns <<= GMAC4_ART_TIME_SHIFT; ++ ns |= mii->read(mii, intel_adhoc_addr, PMC_ART_VALUE2); ++ ns <<= GMAC4_ART_TIME_SHIFT; ++ ns |= mii->read(mii, intel_adhoc_addr, PMC_ART_VALUE1); ++ ns <<= GMAC4_ART_TIME_SHIFT; ++ ns |= mii->read(mii, intel_adhoc_addr, PMC_ART_VALUE0); ++ ++ *art_time = ns; ++} ++ ++static void get_ptptime(void __iomem *ptpaddr, u64 *ptp_time) ++{ ++ u64 ns; ++ ++ ns = readl(ptpaddr + PTP_ATNR); ++ ns += readl(ptpaddr + PTP_ATSR) * 1000000000ULL; ++ ++ *ptp_time = ns; ++} ++ + const struct stmmac_hwtimestamp stmmac_ptp = { + .config_hw_tstamping = config_hw_tstamping, + .init_systime = init_systime, +@@ -163,4 +195,6 @@ const struct stmmac_hwtimestamp stmmac_ptp = { + .config_addend = config_addend, + .adjust_systime = adjust_systime, + .get_systime = get_systime, ++ .get_arttime = get_arttime, ++ .get_ptptime = get_ptptime, + }; +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +index fca6c3392eb2..99a3dafe4d68 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +@@ -16,6 +16,7 @@ + #include + #include "stmmac.h" + #include "dwmac4.h" ++#include "stmmac_ptp.h" + + /* + * This struct is used to associate PCI Function of MAC controller on a board, +@@ -275,6 +276,8 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, + plat->ptov = 0; + plat->tils = 0; + ++ plat->int_snapshot_num = AUX_SNAPSHOT1; ++ + return 0; + } + +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +index df638b18b72c..18355e819075 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +@@ -9,6 +9,8 @@ + *******************************************************************************/ + #include "stmmac.h" + #include "stmmac_ptp.h" ++#include "dwmac4.h" ++#include + + /** + * stmmac_adjust_freq +@@ -161,6 +163,116 @@ static int stmmac_enable(struct ptp_clock_info *ptp, + return ret; + } + ++#ifdef CONFIG_STMMAC_HWTS ++static int stmmac_cross_ts_isr(struct stmmac_priv *priv) ++{ ++ return (readl(priv->ioaddr + GMAC_INT_STATUS) & GMAC_INT_TSIE); ++} ++ ++/** ++ * stmmac_get_syncdevicetime - Callback given to timekeeping code ++ * reads system/device registers ++ * @device: current device time ++ * @system: system counter value read synchronously with device time ++ * @ctx: context provided by timekeeping code ++ * ++ * Read device and system (ART) clock simultaneously and return the corrected ++ * clock values in ns. ++ **/ ++static int stmmac_get_syncdevicetime(ktime_t *device, ++ struct system_counterval_t *system, ++ void *ctx) ++{ ++ struct stmmac_priv *priv = (struct stmmac_priv *)ctx; ++ void __iomem *ptpaddr = priv->ptpaddr; ++ void __iomem *ioaddr = priv->hw->pcsr; ++ unsigned long flags; ++ u32 num_snapshot; ++ u32 gpio_value; ++ u32 acr_value; ++ u64 art_time; ++ u64 ptp_time; ++ u32 v; ++ int i; ++ ++ /* Enable Internal snapshot trigger */ ++ acr_value = readl(ptpaddr + PTP_ACR); ++ acr_value &= ~PTP_ACR_MASK; ++ switch (priv->plat->int_snapshot_num) { ++ case AUX_SNAPSHOT0: ++ acr_value |= PTP_ACR_ATSEN0; ++ break; ++ case AUX_SNAPSHOT1: ++ acr_value |= PTP_ACR_ATSEN1; ++ break; ++ case AUX_SNAPSHOT2: ++ acr_value |= PTP_ACR_ATSEN2; ++ break; ++ case AUX_SNAPSHOT3: ++ acr_value |= PTP_ACR_ATSEN3; ++ break; ++ default: ++ return -EINVAL; ++ } ++ writel(acr_value, ptpaddr + PTP_ACR); ++ ++ /* Clear FIFO */ ++ acr_value = readl(ptpaddr + PTP_ACR); ++ acr_value |= PTP_ACR_ATSFC; ++ writel(acr_value, ptpaddr + PTP_ACR); ++ ++ /** Trigger Internal snapshot signal ++ * Create a rising edge by just toggle the GPO1 to low ++ * and back to high. ++ */ ++ gpio_value = readl(ioaddr + GMAC_GPIO_STATUS); ++ gpio_value &= ~GPO1; ++ writel(gpio_value, ioaddr + GMAC_GPIO_STATUS); ++ gpio_value |= GPO1; ++ writel(gpio_value, ioaddr + GMAC_GPIO_STATUS); ++ ++ /* Time sync done Indication - Interrupt method */ ++ if (priv->hw->mdio_intr_en) { ++ if (!wait_event_timeout(priv->hw->mdio_busy_wait, ++ stmmac_cross_ts_isr(priv), HZ / 100)) ++ return -ETIMEDOUT; ++ } else if (readl_poll_timeout(priv->ioaddr + GMAC_INT_STATUS, v, ++ (v & GMAC_INT_TSIE), 100, 10000)) ++ return -ETIMEDOUT; ++ ++ num_snapshot = (readl(ioaddr + GMAC_TIMESTAMP_STATUS) & ++ GMAC_TIMESTAMP_ATSNS_MASK) >> ++ GMAC_TIMESTAMP_ATSNS_SHIFT; ++ ++ /* Repeat until the timestamps are from the FIFO last segment */ ++ for (i = 0; i < num_snapshot; i++) { ++ spin_lock_irqsave(&priv->ptp_lock, flags); ++ stmmac_get_ptptime(priv, ptpaddr, &ptp_time); ++ *device = ns_to_ktime(ptp_time); ++ spin_unlock_irqrestore(&priv->ptp_lock, flags); ++ ++ stmmac_get_arttime(priv, priv->mii, ++ priv->plat->intel_adhoc_addr, &art_time); ++ *system = convert_art_to_tsc(art_time); ++ } ++ ++ return 0; ++} ++ ++static int stmmac_getcrosststamp(struct ptp_clock_info *ptp, ++ struct system_device_crosststamp *xtstamp) ++{ ++ struct stmmac_priv *priv = ++ container_of(ptp, struct stmmac_priv, ptp_clock_ops); ++ ++ if (!boot_cpu_has(X86_FEATURE_ART)) ++ return -EOPNOTSUPP; ++ ++ return get_device_system_crosststamp(stmmac_get_syncdevicetime, ++ priv, NULL, xtstamp); ++} ++#endif ++ + /* structure describing a PTP hardware clock */ + static struct ptp_clock_info stmmac_ptp_clock_ops = { + .owner = THIS_MODULE, +@@ -176,6 +288,9 @@ static struct ptp_clock_info stmmac_ptp_clock_ops = { + .gettime64 = stmmac_get_time, + .settime64 = stmmac_set_time, + .enable = stmmac_enable, ++#ifdef CONFIG_STMMAC_HWTS ++ .getcrosststamp = stmmac_getcrosststamp, ++#endif + }; + + /** +@@ -186,7 +301,17 @@ static struct ptp_clock_info stmmac_ptp_clock_ops = { + */ + void stmmac_ptp_register(struct stmmac_priv *priv) + { ++ int aux_snapshot_n; + int i; ++#ifdef CONFIG_STMMAC_HWTS ++ void __iomem *ioaddr = priv->hw->pcsr; ++ u32 gpio_value; ++ ++ /* set 200 Mhz xtal clock for Hammock Harbor */ ++ gpio_value = readl(ioaddr + GMAC_GPIO_STATUS); ++ gpio_value &= ~GPO0; ++ writel(gpio_value, ioaddr + GMAC_GPIO_STATUS); ++#endif + + for (i = 0; i < priv->dma_cap.pps_out_num; i++) { + if (i >= STMMAC_PPS_MAX) +@@ -202,6 +327,8 @@ void stmmac_ptp_register(struct stmmac_priv *priv) + spin_lock_init(&priv->ptp_lock); + priv->ptp_clock_ops = stmmac_ptp_clock_ops; + ++ aux_snapshot_n = priv->dma_cap.aux_snapshot_n; ++ + priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_ops, + priv->device); + if (IS_ERR(priv->ptp_clock)) { +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h +index 7abb1d47e7da..c3dbb6364778 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h +@@ -23,6 +23,9 @@ + #define PTP_STSUR 0x10 /* System Time – Seconds Update Reg */ + #define PTP_STNSUR 0x14 /* System Time – Nanoseconds Update Reg */ + #define PTP_TAR 0x18 /* Timestamp Addend Reg */ ++#define PTP_ACR 0x40 /* Auxiliary Control Reg */ ++#define PTP_ATNR 0x48 /* Auxiliary Timestamp - Nanoseconds Reg */ ++#define PTP_ATSR 0x4c /* Auxiliary Timestamp - Seconds Reg */ + + #define PTP_STNSUR_ADDSUB_SHIFT 31 + #define PTP_DIGITAL_ROLLOVER_MODE 0x3B9ACA00 /* 10e9-1 ns */ +@@ -64,4 +67,27 @@ + #define PTP_SSIR_SSINC_MASK 0xff + #define GMAC4_PTP_SSIR_SSINC_SHIFT 16 + ++/* Auxiliary Control defines */ ++#define PTP_ACR_ATSFC BIT(0) /* Auxiliary Snapshot FIFO Clear */ ++#define PTP_ACR_ATSEN0 BIT(4) /* Auxiliary Snapshot 0 Enable */ ++#define PTP_ACR_ATSEN1 BIT(5) /* Auxiliary Snapshot 1 Enable */ ++#define PTP_ACR_ATSEN2 BIT(6) /* Auxiliary Snapshot 2 Enable */ ++#define PTP_ACR_ATSEN3 BIT(7) /* Auxiliary Snapshot 3 Enable */ ++#define PTP_ACR_MASK GENMASK(7, 4) /* Aux Snapshot Mask */ ++#define PMC_ART_VALUE0 0x01 /* PMC_ART[15:0] timer value */ ++#define PMC_ART_VALUE1 0x02 /* PMC_ART[31:16] timer value */ ++#define PMC_ART_VALUE2 0x03 /* PMC_ART[47:32] timer value */ ++#define PMC_ART_VALUE3 0x04 /* PMC_ART[63:48] timer value */ ++#define GMAC_GPIO_STATUS 0x20c /* MAC GPIO Status */ ++#define GPO0 BIT(16) /* GPO0 clock type for HH */ ++#define GPO1 BIT(17) /* GPO1 high-active signal */ ++#define GMAC4_ART_TIME_SHIFT 16 /* ART TIME 16-bits shift */ ++ ++enum aux_snapshot { ++ AUX_SNAPSHOT0 = 0x10, ++ AUX_SNAPSHOT1 = 0x20, ++ AUX_SNAPSHOT2 = 0x40, ++ AUX_SNAPSHOT3 = 0x80, ++}; ++ + #endif /* __STMMAC_PTP_H__ */ +diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h +index 525ea9686efa..092f8720c79e 100644 +--- a/include/linux/stmmac.h ++++ b/include/linux/stmmac.h +@@ -218,5 +218,7 @@ struct plat_stmmacenet_data { + u32 legos; + u32 ftos; + u32 fgos; ++ bool has_art; ++ int int_snapshot_num; + }; + #endif +-- +2.17.1 + diff --git a/patches/0059-ASoC-Intel-Skylake-Add-NHLT-override-control.audio b/patches/0059-ASoC-Intel-Skylake-Add-NHLT-override-control.audio new file mode 100644 index 0000000000..488a7ba2dd --- /dev/null +++ b/patches/0059-ASoC-Intel-Skylake-Add-NHLT-override-control.audio @@ -0,0 +1,122 @@ +From a5c29658cfc3943b78d5149012d3bfffafee41d0 Mon Sep 17 00:00:00 2001 +From: Vinod Koul +Date: Mon, 31 Aug 2015 14:05:10 +0530 +Subject: [PATCH 059/193] ASoC: Intel: Skylake: Add NHLT override control + +For debugging purposes we may want to not use the BIOS values and +test our own values, so allow the override by adding a control +file for override method + +Change-Id: I7b00275a3d99b5abc663b827626c1db88f14d5b2 +Signed-off-by: Vinod Koul +Signed-off-by: Guneshwor Singh +--- + sound/soc/intel/skylake/skl-debug.c | 65 +++++++++++++++++++++++++++++ + sound/soc/intel/skylake/skl.h | 1 + + 2 files changed, 66 insertions(+) + +diff --git a/sound/soc/intel/skylake/skl-debug.c b/sound/soc/intel/skylake/skl-debug.c +index e2884b66891a..a63926d00ecd 100644 +--- a/sound/soc/intel/skylake/skl-debug.c ++++ b/sound/soc/intel/skylake/skl-debug.c +@@ -26,6 +26,7 @@ struct skl_debug { + struct dentry *fs; + struct dentry *ipc; + struct dentry *modules; ++ struct dentry *nhlt; + u8 fw_read_buff[FW_REG_BUF]; + }; + +@@ -459,6 +460,61 @@ static int skl_debugfs_init_ipc(struct skl_debug *d) + return 0; + } + ++static ssize_t nhlt_control_read(struct file *file, ++ char __user *user_buf, size_t count, loff_t *ppos) ++{ ++ struct skl_debug *d = file->private_data; ++ char *state; ++ ++ state = d->skl->nhlt_override ? "enable\n" : "disable\n"; ++ return simple_read_from_buffer(user_buf, count, ppos, ++ state, strlen(state)); ++} ++ ++static ssize_t nhlt_control_write(struct file *file, ++ const char __user *user_buf, size_t count, loff_t *ppos) ++{ ++ struct skl_debug *d = file->private_data; ++ char buf[16]; ++ int len = min(count, (sizeof(buf) - 1)); ++ ++ ++ if (copy_from_user(buf, user_buf, len)) ++ return -EFAULT; ++ buf[len] = 0; ++ ++ if (!strncmp(buf, "enable\n", len)) ++ d->skl->nhlt_override = true; ++ else if (!strncmp(buf, "disable\n", len)) ++ d->skl->nhlt_override = false; ++ else ++ return -EINVAL; ++ ++ /* Userspace has been fiddling around behind the kernel's back */ ++ add_taint(TAINT_USER, LOCKDEP_NOW_UNRELIABLE); ++ ++ return len; ++} ++ ++static const struct file_operations ssp_cntrl_nhlt_fops = { ++ .open = simple_open, ++ .read = nhlt_control_read, ++ .write = nhlt_control_write, ++ .llseek = default_llseek, ++}; ++ ++static int skl_init_nhlt(struct skl_debug *d) ++{ ++ if (!debugfs_create_file("control", ++ 0644, d->nhlt, ++ d, &ssp_cntrl_nhlt_fops)) { ++ dev_err(d->dev, "nhlt control debugfs init failed\n"); ++ return -EIO; ++ } ++ ++ return 0; ++} ++ + struct skl_debug *skl_debugfs_init(struct skl_dev *skl) + { + struct skl_debug *d; +@@ -485,6 +541,15 @@ struct skl_debug *skl_debugfs_init(struct skl_dev *skl) + debugfs_create_file("fw_soft_regs_rd", 0444, d->fs, d, + &soft_regs_ctrl_fops); + ++ /* now create the NHLT dir */ ++ d->nhlt = debugfs_create_dir("nhlt", d->fs); ++ if (IS_ERR(d->nhlt) || !d->nhlt) { ++ dev_err(&skl->pci->dev, "nhlt debugfs create failed\n"); ++ return NULL; ++ } ++ ++ skl_init_nhlt(d); ++ + return d; + } + +diff --git a/sound/soc/intel/skylake/skl.h b/sound/soc/intel/skylake/skl.h +index e0fc736ad619..df3d38639d13 100644 +--- a/sound/soc/intel/skylake/skl.h ++++ b/sound/soc/intel/skylake/skl.h +@@ -89,6 +89,7 @@ struct skl_dev { + bool use_tplg_pcm; + struct skl_fw_config cfg; + struct snd_soc_acpi_mach *mach; ++ bool nhlt_override; + + struct device *dev; + struct sst_dsp *dsp; +-- +2.17.1 + diff --git a/patches/0059-check-return-value-of-hypercall.trusty b/patches/0059-check-return-value-of-hypercall.trusty new file mode 100644 index 0000000000..bb85f05590 --- /dev/null +++ b/patches/0059-check-return-value-of-hypercall.trusty @@ -0,0 +1,42 @@ +From 49e7a0f17979c0dfc3f3eb59ec498819f9fe1609 Mon Sep 17 00:00:00 2001 +From: "Zhang, Qi" +Date: Thu, 16 Aug 2018 18:04:44 +0800 +Subject: [PATCH 59/63] check return value of hypercall + +exit from probe if acrn does not enable trusty + +Change-Id: I99271cd96c6df46e141b4e57a2af378119a1c25c +Tracked-On: OAM-67637 +Signed-off-by: Zhang, Qi +--- + drivers/trusty/trusty.c | 9 +++++++-- + 1 file changed, 7 insertions(+), 2 deletions(-) + +diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c +index f37a1a58dce8..881924f88e4f 100755 +--- a/drivers/trusty/trusty.c ++++ b/drivers/trusty/trusty.c +@@ -72,13 +72,18 @@ static inline ulong smc_evmm(ulong r0, ulong r1, ulong r2, ulong r3) + static inline ulong smc_acrn(ulong r0, ulong r1, ulong r2, ulong r3) + { + register unsigned long smc_id asm("r8") = ACRN_HC_SWITCH_WORLD; ++ register signed long ret asm("rax"); + __asm__ __volatile__( + "vmcall; \n" +- : "=D"(r0) ++ : "=D"(r0), "=r"(ret) + : "r"(smc_id), "D"(r0), "S"(r1), "d"(r2), "b"(r3) +- : "rax" + ); + ++ if(ret < 0) { ++ pr_err("trusty: %s: hypercall failed: %ld\n", __func__, ret); ++ r0 = (ulong)SM_ERR_NOT_SUPPORTED; ++ } ++ + return r0; + } + +-- +2.17.1 + diff --git a/patches/0059-drm-virtio-module_param_named-requires-linux-modulepar.drm b/patches/0059-drm-virtio-module_param_named-requires-linux-modulepar.drm new file mode 100644 index 0000000000..fa88015909 --- /dev/null +++ b/patches/0059-drm-virtio-module_param_named-requires-linux-modulepar.drm @@ -0,0 +1,30 @@ +From 4d7225f436ff38146b87049852bf2aee023265d4 Mon Sep 17 00:00:00 2001 +From: Stephen Rothwell +Date: Wed, 28 Aug 2019 18:55:16 +1000 +Subject: [PATCH 059/690] drm/virtio: module_param_named() requires + linux/moduleparam.h + +Fixes: 3e93bc2a58aa ("drm/virtio: make resource id workaround runtime switchable.") +Signed-off-by: Stephen Rothwell +Link: http://patchwork.freedesktop.org/patch/msgid/20190828185516.22b03da8@canb.auug.org.au +Signed-off-by: Gerd Hoffmann +--- + drivers/gpu/drm/virtio/virtgpu_object.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c +index aab5534056ec..b5f8923ac674 100644 +--- a/drivers/gpu/drm/virtio/virtgpu_object.c ++++ b/drivers/gpu/drm/virtio/virtgpu_object.c +@@ -23,6 +23,8 @@ + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + ++#include ++ + #include + + #include "virtgpu_drv.h" +-- +2.17.1 + diff --git a/patches/0059-mei-dal-add-kernel-space-interface.security b/patches/0059-mei-dal-add-kernel-space-interface.security new file mode 100644 index 0000000000..782ef65343 --- /dev/null +++ b/patches/0059-mei-dal-add-kernel-space-interface.security @@ -0,0 +1,733 @@ +From de65588080c3e5bab1fc3f063a974a7299f0266c Mon Sep 17 00:00:00 2001 +From: Yael Samet +Date: Tue, 5 Sep 2017 14:09:42 +0300 +Subject: [PATCH 59/65] mei: dal: add kernel space interface + +DAL kernel space interface exposes in-kernel API +to access trusted execution service in DAL. +The API is defined in header file. + +Change-Id: Ief3de92c3674ce8a21375bc11da38f1ae5fc3cc6 +Signed-off-by: Yael Samet +Signed-off-by: Tomas Winkler +--- + drivers/misc/mei/dal/Makefile | 1 + + drivers/misc/mei/dal/bh_external.c | 4 +- + drivers/misc/mei/dal/bh_internal.c | 38 ++- + drivers/misc/mei/dal/dal_class.c | 11 +- + drivers/misc/mei/dal/dal_dev.h | 6 + + drivers/misc/mei/dal/dal_kdi.c | 478 +++++++++++++++++++++++++++++ + include/linux/dal.h | 57 ++++ + 7 files changed, 592 insertions(+), 3 deletions(-) + create mode 100644 drivers/misc/mei/dal/dal_kdi.c + create mode 100644 include/linux/dal.h + +diff --git a/drivers/misc/mei/dal/Makefile b/drivers/misc/mei/dal/Makefile +index da08dabe3d70..829e0e3727fe 100644 +--- a/drivers/misc/mei/dal/Makefile ++++ b/drivers/misc/mei/dal/Makefile +@@ -9,3 +9,4 @@ mei_dal-objs += bh_external.o + mei_dal-objs += bh_internal.o + mei_dal-objs += dal_class.o + mei_dal-objs += dal_cdev.o ++mei_dal-objs += dal_kdi.o +diff --git a/drivers/misc/mei/dal/bh_external.c b/drivers/misc/mei/dal/bh_external.c +index e403d3348d8b..0c48693af91d 100644 +--- a/drivers/misc/mei/dal/bh_external.c ++++ b/drivers/misc/mei/dal/bh_external.c +@@ -6,6 +6,7 @@ + #include + #include + #include ++#include + + #include "bh_errcode.h" + #include "bh_external.h" +@@ -62,7 +63,7 @@ static void uuid_normalize_hyphenless(const char *uuid_hl, char *uuid_str) + * Return: 0 on success + * <0 on failure + */ +-static int dal_uuid_parse(const char *uuid_str, uuid_t *uuid) ++int dal_uuid_parse(const char *uuid_str, uuid_t *uuid) + { + char __uuid_str[UUID_STRING_LEN + 1]; + +@@ -76,6 +77,7 @@ static int dal_uuid_parse(const char *uuid_str, uuid_t *uuid) + + return uuid_parse(uuid_str, uuid); + } ++EXPORT_SYMBOL(dal_uuid_parse); + + /** + * bh_msg_is_response - check if message is response +diff --git a/drivers/misc/mei/dal/bh_internal.c b/drivers/misc/mei/dal/bh_internal.c +index 498e4f269381..c8739a417ecb 100644 +--- a/drivers/misc/mei/dal/bh_internal.c ++++ b/drivers/misc/mei/dal/bh_internal.c +@@ -11,6 +11,7 @@ + #include "bh_errcode.h" + #include "bh_external.h" + #include "bh_internal.h" ++#include "dal_dev.h" + + /* BH initialization state */ + static atomic_t bh_state = ATOMIC_INIT(0); +@@ -177,6 +178,7 @@ static struct bh_request_cmd *bh_request_alloc(const void *hdr, + return request; + } + ++static char skip_buffer[DAL_MAX_BUFFER_SIZE] = {0}; + /** + * bh_transport_recv - receive message from DAL FW. + * +@@ -189,6 +191,31 @@ static struct bh_request_cmd *bh_request_alloc(const void *hdr, + */ + static int bh_transport_recv(unsigned int conn_idx, void *buffer, size_t size) + { ++ size_t got; ++ unsigned int count; ++ char *buf = buffer; ++ int ret; ++ ++ if (conn_idx > DAL_MEI_DEVICE_MAX) ++ return -ENODEV; ++ ++ for (count = 0; count < size; count += got) { ++ got = min_t(size_t, size - count, DAL_MAX_BUFFER_SIZE); ++ if (buf) ++ ret = dal_kdi_recv(conn_idx, buf + count, &got); ++ else ++ ret = dal_kdi_recv(conn_idx, skip_buffer, &got); ++ ++ if (!got) ++ return -EFAULT; ++ ++ if (ret) ++ return ret; ++ } ++ ++ if (count != size) ++ return -EFAULT; ++ + return 0; + } + +@@ -307,7 +334,16 @@ static int bh_recv_message(struct bh_request_cmd *request) + static int bh_transport_send(unsigned int conn_idx, const void *buffer, + unsigned int size, u64 host_id) + { +- return 0; ++ size_t chunk_sz = DAL_MAX_BUFFER_SIZE; ++ size_t count; ++ int ret; ++ ++ for (ret = 0, count = 0; count < size && !ret; count += chunk_sz) { ++ chunk_sz = min_t(size_t, size - count, DAL_MAX_BUFFER_SIZE); ++ ret = dal_kdi_send(conn_idx, buffer + count, chunk_sz, host_id); ++ } ++ ++ return ret; + } + + /** +diff --git a/drivers/misc/mei/dal/dal_class.c b/drivers/misc/mei/dal/dal_class.c +index 04a5d866208d..6a82f7d2daa9 100644 +--- a/drivers/misc/mei/dal/dal_class.c ++++ b/drivers/misc/mei/dal/dal_class.c +@@ -752,6 +752,9 @@ static void __exit mei_dal_exit(void) + mei_cldev_driver_unregister(&dal_driver); + + dal_dev_exit(); ++ ++ dal_kdi_exit(); ++ + class_destroy(dal_class); + } + +@@ -777,14 +780,20 @@ static int __init mei_dal_init(void) + goto err_class; + } + ++ ret = dal_kdi_init(); ++ if (ret) ++ goto err_dev; ++ + ret = mei_cldev_driver_register(&dal_driver); + if (ret < 0) { + pr_err("mei_cl_driver_register failed with status = %d\n", ret); +- goto err_dev; ++ goto err; + } + + return 0; + ++err: ++ dal_kdi_exit(); + err_dev: + dal_dev_exit(); + err_class: +diff --git a/drivers/misc/mei/dal/dal_dev.h b/drivers/misc/mei/dal/dal_dev.h +index 30779c91b854..d5480bd0e43d 100644 +--- a/drivers/misc/mei/dal/dal_dev.h ++++ b/drivers/misc/mei/dal/dal_dev.h +@@ -146,4 +146,10 @@ void dal_dc_print(struct device *dev, struct dal_client *dc); + int dal_dc_setup(struct dal_device *ddev, enum dal_intf intf); + void dal_dc_destroy(struct dal_device *ddev, enum dal_intf intf); + ++int dal_kdi_send(unsigned int handle, const unsigned char *buf, ++ size_t len, u64 seq); ++int dal_kdi_recv(unsigned int handle, unsigned char *buf, size_t *count); ++int dal_kdi_init(void); ++void dal_kdi_exit(void); ++ + #endif /* _DAL_KDI_H_ */ +diff --git a/drivers/misc/mei/dal/dal_kdi.c b/drivers/misc/mei/dal/dal_kdi.c +new file mode 100644 +index 000000000000..92d571c2af39 +--- /dev/null ++++ b/drivers/misc/mei/dal/dal_kdi.c +@@ -0,0 +1,478 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright(c) 2016-2019, Intel Corporation. ++ */ ++ ++#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "bh_external.h" ++#include "bh_errcode.h" ++#include "acp_parser.h" ++#include "dal_dev.h" ++ ++static DEFINE_MUTEX(dal_kdi_lock); ++ ++/** ++ * to_kdi_err - converts error number to kdi error ++ * ++ * Beihai errors (>0) are converted to DAL_KDI errors (those errors came ++ * from DAL FW) ++ * system errors and success value (<=0) stay as is ++ * ++ * @err: error code to convert (either bh err or system err) ++ * ++ * Return: the converted kdi error number or system error ++ */ ++static int to_kdi_err(int err) ++{ ++ if (err) ++ pr_debug("Error: %d\n", err); ++ ++ if (err <= 0) ++ return err; ++ ++ /* err > 0: is error from DAL FW */ ++ switch (err) { ++ case BPE_INTERNAL_ERROR: ++ return DAL_KDI_STATUS_INTERNAL_ERROR; ++ case BPE_INVALID_PARAMS: ++ case BHE_INVALID_PARAMS: ++ return DAL_KDI_STATUS_INVALID_PARAMS; ++ case BHE_INVALID_HANDLE: ++ return DAL_KDI_STATUS_INVALID_HANDLE; ++ case BPE_NOT_INIT: ++ return DAL_KDI_STATUS_NOT_INITIALIZED; ++ case BPE_OUT_OF_MEMORY: ++ case BHE_OUT_OF_MEMORY: ++ return DAL_KDI_STATUS_OUT_OF_MEMORY; ++ case BHE_INSUFFICIENT_BUFFER: ++ case BHE_APPLET_SMALL_BUFFER: ++ return DAL_KDI_STATUS_BUFFER_TOO_SMALL; ++ case BPE_OUT_OF_RESOURCE: ++ case BHE_VM_INSTANCE_INIT_FAIL: ++ return DAL_KDI_STATUS_OUT_OF_RESOURCE; ++ case BHE_SESSION_NUM_EXCEED: ++ return DAL_KDI_STATUS_MAX_SESSIONS_REACHED; ++ case BHE_UNCAUGHT_EXCEPTION: ++ return DAL_KDI_STATUS_UNCAUGHT_EXCEPTION; ++ case BHE_WD_TIMEOUT: ++ return DAL_KDI_STATUS_WD_TIMEOUT; ++ case BHE_APPLET_CRASHED: ++ return DAL_KDI_STATUS_APPLET_CRASHED; ++ case BHE_TA_PACKAGE_HASH_VERIFY_FAIL: ++ return DAL_KDI_STATUS_INVALID_ACP; ++ case BHE_PACKAGE_NOT_FOUND: ++ return DAL_KDI_STATUS_TA_NOT_FOUND; ++ case BHE_PACKAGE_EXIST: ++ return DAL_KDI_STATUS_TA_EXIST; ++ default: ++ return DAL_KDI_STATUS_INTERNAL_ERROR; ++ } ++} ++ ++/** ++ * dal_kdi_send - a callback which is called from bhp to send msg over mei ++ * ++ * @dev_idx: DAL device type ++ * @buf: message buffer ++ * @len: buffer length ++ * @seq: message sequence ++ * ++ * Return: 0 on success ++ * -EINVAL on incorrect input ++ * -ENODEV when the device can't be found ++ * -EFAULT if client is NULL ++ * <0 on dal_write failure ++ */ ++int dal_kdi_send(unsigned int dev_idx, const unsigned char *buf, ++ size_t len, u64 seq) ++{ ++ enum dal_dev_type mei_device; ++ struct dal_device *ddev; ++ struct dal_client *dc; ++ struct device *dev; ++ ssize_t wr; ++ int ret; ++ ++ if (!buf) ++ return -EINVAL; ++ ++ if (dev_idx >= DAL_MEI_DEVICE_MAX) ++ return -EINVAL; ++ ++ if (!len) ++ return 0; ++ ++ if (len > DAL_MAX_BUFFER_SIZE) ++ return -EMSGSIZE; ++ ++ mei_device = (enum dal_dev_type)dev_idx; ++ dev = dal_find_dev(mei_device); ++ if (!dev) { ++ dev_dbg(dev, "can't find device\n"); ++ return -ENODEV; ++ } ++ ++ ddev = to_dal_device(dev); ++ dc = ddev->clients[DAL_INTF_KDI]; ++ if (!dc) { ++ dev_dbg(dev, "client is NULL\n"); ++ ret = -EFAULT; ++ goto out; ++ } ++ ++ wr = dal_write(dc, buf, len, seq); ++ if (wr > 0) ++ ret = 0; ++ else ++ ret = wr; ++out: ++ put_device(dev); ++ return ret; ++} ++ ++/** ++ * dal_kdi_recv - a callback which is called from bhp to recv msg from DAL FW ++ * ++ * @dev_idx: DAL device type ++ * @buf: buffer of received message ++ * @count: input and output param - ++ * - input: buffer length ++ * - output: size of the received message ++ * ++ * Return: 0 on success ++ * -EINVAL on incorrect input ++ * -ENODEV when the device can't be found ++ * -EFAULT when client is NULL or copy failed ++ * -EMSGSIZE when buffer is too small ++ * <0 on dal_wait_for_read failure ++ */ ++int dal_kdi_recv(unsigned int dev_idx, unsigned char *buf, size_t *count) ++{ ++ enum dal_dev_type mei_device; ++ struct dal_device *ddev; ++ struct dal_client *dc; ++ struct device *dev; ++ size_t r_len, len; ++ int ret; ++ ++ if (!buf || !count) ++ return -EINVAL; ++ ++ if (dev_idx >= DAL_MEI_DEVICE_MAX) ++ return -EINVAL; ++ ++ mei_device = (enum dal_dev_type)dev_idx; ++ dev = dal_find_dev(mei_device); ++ if (!dev) ++ return -ENODEV; ++ ++ ddev = to_dal_device(dev); ++ dc = ddev->clients[DAL_INTF_KDI]; ++ if (!dc) { ++ dev_dbg(dev, "client is NULL\n"); ++ ret = -EFAULT; ++ goto out; ++ } ++ ++ ret = dal_wait_for_read(dc); ++ if (ret) ++ goto out; ++ ++ if (kfifo_is_empty(&dc->read_queue)) { ++ *count = 0; ++ goto out; ++ } ++ ++ r_len = kfifo_out(&dc->read_queue, &len, sizeof(len)); ++ if (r_len != sizeof(len)) { ++ dev_err(&ddev->dev, "could not copy buffer: cannot fetch size\n"); ++ ret = -EFAULT; ++ goto out; ++ } ++ ++ if (len > *count) { ++ dev_dbg(&ddev->dev, "could not copy buffer: src size = %zd > dest size = %zd\n", ++ len, *count); ++ ret = -EMSGSIZE; ++ goto out; ++ } ++ ++ r_len = kfifo_out(&dc->read_queue, buf, len); ++ if (r_len != len) { ++ dev_err(&ddev->dev, "could not copy buffer: src size = %zd, dest size = %d\n", ++ len, ret); ++ ret = -EFAULT; ++ goto out; ++ } ++ ++ *count = len; ++ ret = 0; ++out: ++ put_device(dev); ++ return ret; ++} ++ ++/** ++ * dal_create_session - create session to an installed trusted application. ++ * ++ * @session_handle: output param to hold the session handle ++ * @ta_id: trusted application (ta) id ++ * @acp_pkg: acp file of the ta ++ * @acp_pkg_len: acp file length ++ * @init_param: init parameters to the session (optional) ++ * @init_param_len: length of the init parameters ++ * ++ * Return: 0 on success ++ * <0 on system failure ++ * >0 on DAL FW failure ++ */ ++int dal_create_session(u64 *session_handle, const char *ta_id, ++ const u8 *acp_pkg, size_t acp_pkg_len, ++ const u8 *init_param, size_t init_param_len) ++{ ++ struct ac_ins_jta_pack_ext pack; ++ char *ta_pkg; ++ int ta_pkg_size; ++ int ret; ++ ++ if (!ta_id || !acp_pkg || !acp_pkg_len || !session_handle) ++ return -EINVAL; ++ ++ /* init_param are optional, if they exists the length shouldn't be 0 */ ++ if (!init_param && init_param_len != 0) { ++ pr_debug("INVALID_PARAMS init_param %p init_param_len %zu\n", ++ init_param, init_param_len); ++ return -EINVAL; ++ } ++ ++ mutex_lock(&dal_kdi_lock); ++ ++ ret = acp_pload_ins_jta(acp_pkg, acp_pkg_len, &pack); ++ if (ret) { ++ pr_debug("acp_pload_ins_jta() return %d\n", ret); ++ goto out; ++ } ++ ++ ta_pkg = pack.ta_pack; ++ if (!ta_pkg) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ ta_pkg_size = ta_pkg - (char *)acp_pkg; ++ ++ if (ta_pkg_size < 0 || (unsigned int)ta_pkg_size > acp_pkg_len) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ ta_pkg_size = acp_pkg_len - ta_pkg_size; ++ ++ ret = bh_ta_session_open(session_handle, ta_id, ta_pkg, ta_pkg_size, ++ init_param, init_param_len); ++ ++ if (ret) ++ pr_debug("bh_ta_session_open failed = %d\n", ret); ++ ++out: ++ mutex_unlock(&dal_kdi_lock); ++ ++ return to_kdi_err(ret); ++} ++EXPORT_SYMBOL(dal_create_session); ++ ++/** ++ * dal_send_and_receive - send and receive data to/from ta ++ * ++ * @session_handle: session handle ++ * @command_id: command id ++ * @input: message to be sent ++ * @input_len: sent message size ++ * @output: output param to hold a pointer to the buffer which ++ * will contain the received message. ++ * This buffer is allocated by DAL KDI module and freed by the user ++ * @output_len: input and output param - ++ * - input: the expected maximum length of the received message ++ * - output: size of the received message ++ * @response_code: An optional output param to hold the return value ++ * from the applet. Can be NULL. ++ * ++ * Return: 0 on success ++ * < 0 on system failure ++ * > 0 on DAL FW failure ++ */ ++int dal_send_and_receive(u64 session_handle, int command_id, const u8 *input, ++ size_t input_len, u8 **output, size_t *output_len, ++ int *response_code) ++{ ++ int ret; ++ ++ mutex_lock(&dal_kdi_lock); ++ ++ ret = bh_ta_session_command(session_handle, command_id, ++ input, input_len, ++ (void **)output, output_len, ++ response_code); ++ ++ if (ret) ++ pr_debug("bh_ta_session_command failed, status = %d\n", ret); ++ ++ mutex_unlock(&dal_kdi_lock); ++ ++ return to_kdi_err(ret); ++} ++EXPORT_SYMBOL(dal_send_and_receive); ++ ++/** ++ * dal_close_session - close ta session ++ * ++ * @session_handle: session handle ++ * ++ * Return: 0 on success ++ * <0 on system failure ++ * >0 on DAL FW failure ++ */ ++int dal_close_session(u64 session_handle) ++{ ++ int ret; ++ ++ mutex_lock(&dal_kdi_lock); ++ ++ ret = bh_ta_session_close(session_handle); ++ ++ if (ret) ++ pr_debug("hp_close_ta_session failed = %d\n", ret); ++ ++ mutex_unlock(&dal_kdi_lock); ++ ++ return to_kdi_err(ret); ++} ++EXPORT_SYMBOL(dal_close_session); ++ ++#define KDI_MAJOR_VER "1" ++#define KDI_MINOR_VER "0" ++#define KDI_HOTFIX_VER "0" ++ ++#define KDI_VERSION KDI_MAJOR_VER "." \ ++ KDI_MINOR_VER "." \ ++ KDI_HOTFIX_VER ++ ++/** ++ * dal_get_version_info - return DAL driver version ++ * ++ * @version_info: output param to hold DAL driver version information ++ * ++ * Return: 0 on success ++ * -EINVAL on incorrect input ++ */ ++int dal_get_version_info(struct dal_version_info *version_info) ++{ ++ if (!version_info) ++ return -EINVAL; ++ ++ memset(version_info, 0, sizeof(*version_info)); ++ snprintf(version_info->version, DAL_VERSION_LEN, "%s", KDI_VERSION); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dal_get_version_info); ++ ++/** ++ * dal_kdi_add_dev - add new dal device (one of dal_dev_type) ++ * ++ * @dev: device object which is associated with dal device ++ * @class_intf: class interface ++ * ++ * Return: 0 on success ++ * <0 on failure ++ * ++ * When new dal device is added, a new client is created for ++ * this device in kernel space interface ++ */ ++static int dal_kdi_add_dev(struct device *dev, ++ struct class_interface *class_intf) ++{ ++ int ret; ++ struct dal_device *ddev; ++ ++ ddev = to_dal_device(dev); ++ ++ mutex_lock(&ddev->context_lock); ++ ret = dal_dc_setup(ddev, DAL_INTF_KDI); ++ mutex_unlock(&ddev->context_lock); ++ return ret; ++} ++ ++/** ++ * dal_kdi_rm_dev - rm dal device (one of dal_dev_type) ++ * ++ * @dev: device object which is associated with dal device ++ * @class_intf: class interface ++ * ++ * Return: 0 on success ++ * <0 on failure ++ */ ++static void dal_kdi_rm_dev(struct device *dev, ++ struct class_interface *class_intf) ++{ ++ struct dal_device *ddev; ++ ++ ddev = to_dal_device(dev); ++ ++ mutex_lock(&ddev->context_lock); ++ dal_dc_destroy(ddev, DAL_INTF_KDI); ++ mutex_unlock(&ddev->context_lock); ++} ++ ++/* ++ * dal_kdi_interface handles addition/removal of dal devices ++ */ ++static struct class_interface dal_kdi_interface __refdata = { ++ .add_dev = dal_kdi_add_dev, ++ .remove_dev = dal_kdi_rm_dev, ++}; ++ ++/** ++ * dal_kdi_init - initialize dal kdi ++ * ++ * Return: 0 on success ++ * <0 on failure ++ */ ++int dal_kdi_init(void) ++{ ++ int ret; ++ ++ bh_init_internal(); ++ ++ dal_kdi_interface.class = dal_class; ++ ret = class_interface_register(&dal_kdi_interface); ++ if (ret) { ++ pr_err("failed to register class interface = %d\n", ret); ++ goto err; ++ } ++ ++ return 0; ++ ++err: ++ bh_deinit_internal(); ++ return ret; ++} ++ ++/** ++ * dal_kdi_exit - dal kdi exit function ++ */ ++void dal_kdi_exit(void) ++{ ++ bh_deinit_internal(); ++ class_interface_unregister(&dal_kdi_interface); ++} +diff --git a/include/linux/dal.h b/include/linux/dal.h +new file mode 100644 +index 000000000000..e36f66eca0ce +--- /dev/null ++++ b/include/linux/dal.h +@@ -0,0 +1,57 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright(c) 2016-2019, Intel Corporation. ++ */ ++ ++#ifndef _DAL_H_ ++#define _DAL_H_ ++ ++#include ++#include ++ ++#define DAL_VERSION_LEN 32 ++ ++/** ++ * struct dal_version_info - dal version ++ * ++ * @version: current dal version ++ * @reserved: reserved bytes for future use ++ */ ++struct dal_version_info { ++ char version[DAL_VERSION_LEN]; ++ u32 reserved[4]; ++}; ++ ++#define DAL_KDI_SUCCESS 0x000 ++#define DAL_KDI_STATUS_INTERNAL_ERROR 0xA00 ++#define DAL_KDI_STATUS_INVALID_PARAMS 0xA01 ++#define DAL_KDI_STATUS_INVALID_HANDLE 0xA02 ++#define DAL_KDI_STATUS_NOT_INITIALIZED 0xA03 ++#define DAL_KDI_STATUS_OUT_OF_MEMORY 0xA04 ++#define DAL_KDI_STATUS_BUFFER_TOO_SMALL 0xA05 ++#define DAL_KDI_STATUS_OUT_OF_RESOURCE 0xA06 ++#define DAL_KDI_STATUS_MAX_SESSIONS_REACHED 0xA07 ++#define DAL_KDI_STATUS_UNCAUGHT_EXCEPTION 0xA08 ++#define DAL_KDI_STATUS_WD_TIMEOUT 0xA09 ++#define DAL_KDI_STATUS_APPLET_CRASHED 0xA0A ++#define DAL_KDI_STATUS_TA_NOT_FOUND 0xA0B ++#define DAL_KDI_STATUS_TA_EXIST 0xA0C ++#define DAL_KDI_STATUS_INVALID_ACP 0xA0D ++ ++#define DAL_KDI_INVALID_HANDLE 0 ++ ++int dal_get_version_info(struct dal_version_info *version_info); ++ ++int dal_create_session(u64 *session_handle, const char *app_id, ++ const u8 *acp_pkg, size_t acp_pkg_len, ++ const u8 *init_param, size_t init_param_len); ++ ++int dal_send_and_receive(u64 session_handle, int command_id, const u8 *input, ++ size_t input_len, u8 **output, size_t *output_len, ++ int *response_code); ++ ++int dal_close_session(u64 session_handle); ++ ++int dal_uuid_parse(const char *uuid_str, uuid_t *uuid); ++ ++#endif /* _DAL_H_ */ +-- +2.17.1 + diff --git a/patches/0059-net-stmmac-introduce-external-trigger-hw-time.connectivity b/patches/0059-net-stmmac-introduce-external-trigger-hw-time.connectivity new file mode 100644 index 0000000000..2de36a4f05 --- /dev/null +++ b/patches/0059-net-stmmac-introduce-external-trigger-hw-time.connectivity @@ -0,0 +1,217 @@ +From 231ea87e164173bffb1010212adc31c0018df653 Mon Sep 17 00:00:00 2001 +From: "Tan, Tee Min" +Date: Thu, 11 Jul 2019 19:21:28 +0800 +Subject: [PATCH 059/108] net: stmmac: introduce external trigger hw + timestamping + +This adds support for External Trigger hardware timestamping. +While interrupt has been fired, ptp_time will being read and store +in ptp_clock_event with type of PTP_CLOCK_EXTTS. + +1) Enable external trigger (auxts) + # echo /sys/class/ptp/ptpX/extts_enable + +Signed-off-by: Tan, Tee Min +--- + drivers/net/ethernet/stmicro/stmmac/dwmac4.h | 1 + + drivers/net/ethernet/stmicro/stmmac/hwif.h | 5 +++- + .../ethernet/stmicro/stmmac/stmmac_hwtstamp.c | 30 +++++++++++++++++++ + .../net/ethernet/stmicro/stmmac/stmmac_main.c | 4 +++ + .../net/ethernet/stmicro/stmmac/stmmac_pci.c | 2 ++ + .../net/ethernet/stmicro/stmmac/stmmac_ptp.c | 20 +++++++++++++ + include/linux/stmmac.h | 2 ++ + 7 files changed, 63 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +index 9afd57996232..f0d10ec18fca 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +@@ -269,6 +269,7 @@ enum power_event { + #define GMAC_TIMESTAMP_AUXTSTRIG BIT(2) + #define GMAC_TIMESTAMP_ATSNS_MASK GENMASK(29, 25) + #define GMAC_TIMESTAMP_ATSNS_SHIFT 25 ++#define GMAC_TIMESTAMP_ATSSTM BIT(24) + + /* MTL registers */ + #define MTL_OPERATION_MODE 0x00000c00 +diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h +index f2606823c3ae..3b9f12e1ea88 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h ++++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h +@@ -624,6 +624,7 @@ struct stmmac_serdes_ops { + stmmac_do_callback(__priv, serdes, speed_mode_2500, __args) + + struct mii_bus; ++struct stmmac_priv; + + /* PTP and HW Timer helpers */ + struct stmmac_hwtimestamp { +@@ -638,6 +639,7 @@ struct stmmac_hwtimestamp { + void (*get_arttime)(struct mii_bus *mii, int intel_adhoc_addr, + u64 *art_time); + void (*get_ptptime)(void __iomem *ioaddr, u64 *ptp_time); ++ void (*tstamp_interrupt)(struct stmmac_priv *priv); + }; + + #define stmmac_config_hw_tstamping(__priv, __args...) \ +@@ -656,6 +658,8 @@ struct stmmac_hwtimestamp { + stmmac_do_void_callback(__priv, ptp, get_arttime, __args) + #define stmmac_get_ptptime(__priv, __args...) \ + stmmac_do_void_callback(__priv, ptp, get_ptptime, __args) ++#define stmmac_tstamp_interrupt(__priv, __args...) \ ++ stmmac_do_void_callback(__priv, ptp, tstamp_interrupt, __args) + + /* Helpers to manage the descriptors for chain and ring modes */ + struct stmmac_mode_ops { +@@ -684,7 +688,6 @@ struct stmmac_mode_ops { + #define stmmac_clean_desc3(__priv, __args...) \ + stmmac_do_void_callback(__priv, mode, clean_desc3, __args) + +-struct stmmac_priv; + struct tc_cls_u32_offload; + struct tc_cbs_qopt_offload; + struct flow_cls_offload; +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c +index 6a4dcaa3f501..acc742caadc4 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c +@@ -188,6 +188,35 @@ static void get_ptptime(void __iomem *ptpaddr, u64 *ptp_time) + *ptp_time = ns; + } + ++static void tstamp_interrupt(struct stmmac_priv *priv) ++{ ++ struct ptp_clock_event event; ++ u32 num_snapshot; ++ u32 tsync_int; ++ u64 ptp_time; ++ int i; ++ ++ tsync_int = readl(priv->ioaddr + GMAC_INT_STATUS) & ++ GMAC_INT_TSIE; ++ ++ if (!tsync_int) ++ return; ++ ++ if (priv->plat->ext_snapshot_en) { ++ num_snapshot = (readl(priv->ioaddr + GMAC_TIMESTAMP_STATUS) & ++ GMAC_TIMESTAMP_ATSNS_MASK) >> ++ GMAC_TIMESTAMP_ATSNS_SHIFT; ++ ++ for (i = 0; i < num_snapshot; i++) { ++ get_ptptime(priv->ptpaddr, &ptp_time); ++ event.type = PTP_CLOCK_EXTTS; ++ event.index = 0; ++ event.timestamp = ptp_time; ++ ptp_clock_event(priv->ptp_clock, &event); ++ } ++ } ++} ++ + const struct stmmac_hwtimestamp stmmac_ptp = { + .config_hw_tstamping = config_hw_tstamping, + .init_systime = init_systime, +@@ -197,4 +226,5 @@ const struct stmmac_hwtimestamp stmmac_ptp = { + .get_systime = get_systime, + .get_arttime = get_arttime, + .get_ptptime = get_ptptime, ++ .tstamp_interrupt = tstamp_interrupt, + }; +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index 58da7a5955cf..9427a9363e35 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -4327,6 +4327,8 @@ static void stmmac_common_interrupt(struct stmmac_priv *priv) + else + netif_carrier_off(priv->dev); + } ++ ++ stmmac_tstamp_interrupt(priv, priv); + } + } + +@@ -4471,6 +4473,8 @@ static irqreturn_t stmmac_msi_intr_rx(int irq, void *data) + + stmmac_napi_check(priv, chan, DMA_DIR_RX); + ++ stmmac_tstamp_interrupt(priv, priv); ++ + return IRQ_HANDLED; + } + +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +index 99a3dafe4d68..ac7bde9ec082 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +@@ -277,6 +277,8 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, + plat->tils = 0; + + plat->int_snapshot_num = AUX_SNAPSHOT1; ++ plat->ext_snapshot_num = AUX_SNAPSHOT0; ++ plat->ext_snapshot_en = 0; + + return 0; + } +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +index 18355e819075..dd8c891c018a 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +@@ -136,9 +136,11 @@ static int stmmac_enable(struct ptp_clock_info *ptp, + { + struct stmmac_priv *priv = + container_of(ptp, struct stmmac_priv, ptp_clock_ops); ++ void __iomem *ptpaddr = priv->ptpaddr; + struct stmmac_pps_cfg *cfg; + int ret = -EOPNOTSUPP; + unsigned long flags; ++ u32 acr_value; + + switch (rq->type) { + case PTP_CLK_REQ_PEROUT: +@@ -156,6 +158,23 @@ static int stmmac_enable(struct ptp_clock_info *ptp, + priv->systime_flags); + spin_unlock_irqrestore(&priv->ptp_lock, flags); + break; ++ case PTP_CLK_REQ_EXTTS: ++ priv->plat->ext_snapshot_en = on; ++ acr_value = readl(ptpaddr + PTP_ACR); ++ acr_value &= ~PTP_ACR_MASK; ++ if (on) { ++ /* Enable External snapshot trigger */ ++ acr_value |= priv->plat->ext_snapshot_num; ++ acr_value |= PTP_ACR_ATSFC; ++ pr_info("Auxiliary Snapshot %d enable\n", ++ priv->plat->ext_snapshot_num >> 5); ++ } else { ++ pr_info("Auxiliary Snapshot %d disable\n", ++ priv->plat->ext_snapshot_num >> 5); ++ } ++ writel(acr_value, ptpaddr + PTP_ACR); ++ ret = 0; ++ break; + default: + break; + } +@@ -323,6 +342,7 @@ void stmmac_ptp_register(struct stmmac_priv *priv) + stmmac_ptp_clock_ops.max_adj = priv->plat->ptp_max_adj; + + stmmac_ptp_clock_ops.n_per_out = priv->dma_cap.pps_out_num; ++ stmmac_ptp_clock_ops.n_ext_ts = priv->dma_cap.aux_snapshot_n; + + spin_lock_init(&priv->ptp_lock); + priv->ptp_clock_ops = stmmac_ptp_clock_ops; +diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h +index 092f8720c79e..a184ce71d1d9 100644 +--- a/include/linux/stmmac.h ++++ b/include/linux/stmmac.h +@@ -220,5 +220,7 @@ struct plat_stmmacenet_data { + u32 fgos; + bool has_art; + int int_snapshot_num; ++ int ext_snapshot_num; ++ int ext_snapshot_en; + }; + #endif +-- +2.17.1 + diff --git a/patches/0059-vhm-add-sos-offline-cpu-support.acrn b/patches/0059-vhm-add-sos-offline-cpu-support.acrn new file mode 100644 index 0000000000..0cff101306 --- /dev/null +++ b/patches/0059-vhm-add-sos-offline-cpu-support.acrn @@ -0,0 +1,131 @@ +From f21ff8c7e6d49ecfd9a05be2046c01a9b0e19f28 Mon Sep 17 00:00:00 2001 +From: Jason Chen CJ +Date: Fri, 31 Aug 2018 10:59:01 +0800 +Subject: [PATCH 059/150] vhm: add sos offline cpu support + +add sysfs with attr "offline_cpu", use + echo cpu_id > /sys/class/vhm/acrn_vhm/offline_cpu +to do the hypercall offline/destroy according vcpu. +before doing it, please make sure you already did cpu offline with +standard flow like below: + echo 0 > /sys/devices/system/cpu/cpuX/online + +Signed-off-by: Jason Chen CJ +Reviewed-by: Zhao Yakui +Acked-by: Eddie Dong +--- + drivers/char/vhm/vhm_dev.c | 41 +++++++++++++++++++++++++++++++ + drivers/vhm/vhm_hypercall.c | 5 ++++ + include/linux/vhm/acrn_hv_defs.h | 1 + + include/linux/vhm/vhm_hypercall.h | 1 + + 4 files changed, 48 insertions(+) + +diff --git a/drivers/char/vhm/vhm_dev.c b/drivers/char/vhm/vhm_dev.c +index 4eb5a1636d7a..dceeeee90220 100644 +--- a/drivers/char/vhm/vhm_dev.c ++++ b/drivers/char/vhm/vhm_dev.c +@@ -556,6 +556,41 @@ static const struct file_operations fops = { + .poll = vhm_dev_poll, + }; + ++static ssize_t ++store_offline_cpu(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++#ifdef CONFIG_X86 ++ u64 cpu, lapicid; ++ ++ if (kstrtoull(buf, 0, &cpu) < 0) ++ return -EINVAL; ++ ++ if (cpu_possible(cpu)) { ++ lapicid = cpu_data(cpu).apicid; ++ pr_info("vhm: try to offline cpu %lld with lapicid %lld\n", ++ cpu, lapicid); ++ if (hcall_sos_offline_cpu(lapicid) < 0) { ++ pr_err("vhm: failed to offline cpu from Hypervisor!\n"); ++ return -EINVAL; ++ } ++ } ++#endif ++ return count; ++} ++ ++static DEVICE_ATTR(offline_cpu, S_IWUSR, NULL, store_offline_cpu); ++ ++static struct attribute *vhm_attrs[] = { ++ &dev_attr_offline_cpu.attr, ++ NULL ++}; ++ ++static struct attribute_group vhm_attr_group = { ++ .attrs = vhm_attrs, ++}; ++ + #define SUPPORT_HV_API_VERSION_MAJOR 1 + #define SUPPORT_HV_API_VERSION_MINOR 0 + static int __init vhm_init(void) +@@ -619,6 +654,11 @@ static int __init vhm_init(void) + x86_platform_ipi_callback = vhm_intr_handler; + local_irq_restore(flag); + ++ if (sysfs_create_group(&vhm_device->kobj, &vhm_attr_group)) { ++ pr_warn("vhm: sysfs create failed\n"); ++ return -EINVAL; ++ } ++ + pr_info("vhm: Virtio & Hypervisor service module initialized\n"); + return 0; + } +@@ -629,6 +669,7 @@ static void __exit vhm_exit(void) + class_unregister(vhm_class); + class_destroy(vhm_class); + unregister_chrdev(major, DEVICE_NAME); ++ sysfs_remove_group(&vhm_device->kobj, &vhm_attr_group); + pr_info("vhm: exit\n"); + } + +diff --git a/drivers/vhm/vhm_hypercall.c b/drivers/vhm/vhm_hypercall.c +index 9819ab95beaa..639ea60472a7 100644 +--- a/drivers/vhm/vhm_hypercall.c ++++ b/drivers/vhm/vhm_hypercall.c +@@ -52,6 +52,11 @@ + #include + #include + ++inline long hcall_sos_offline_cpu(unsigned long cpu) ++{ ++ return acrn_hypercall1(HC_SOS_OFFLINE_CPU, cpu); ++} ++ + inline long hcall_get_api_version(unsigned long api_version) + { + return acrn_hypercall1(HC_GET_API_VERSION, api_version); +diff --git a/include/linux/vhm/acrn_hv_defs.h b/include/linux/vhm/acrn_hv_defs.h +index 8873f67dac40..f20f3afb8e89 100644 +--- a/include/linux/vhm/acrn_hv_defs.h ++++ b/include/linux/vhm/acrn_hv_defs.h +@@ -68,6 +68,7 @@ + /* general */ + #define HC_ID_GEN_BASE 0x0UL + #define HC_GET_API_VERSION _HC_ID(HC_ID, HC_ID_GEN_BASE + 0x00) ++#define HC_SOS_OFFLINE_CPU _HC_ID(HC_ID, HC_ID_GEN_BASE + 0x01) + + /* VM management */ + #define HC_ID_VM_BASE 0x10UL +diff --git a/include/linux/vhm/vhm_hypercall.h b/include/linux/vhm/vhm_hypercall.h +index be60df1c6f66..5d2dc5a7a1af 100644 +--- a/include/linux/vhm/vhm_hypercall.h ++++ b/include/linux/vhm/vhm_hypercall.h +@@ -137,6 +137,7 @@ static inline long acrn_hypercall4(unsigned long hcall_id, unsigned long param1, + return result; + } + ++inline long hcall_sos_offline_cpu(unsigned long cpu); + inline long hcall_get_api_version(unsigned long api_version); + inline long hcall_create_vm(unsigned long vminfo); + inline long hcall_start_vm(unsigned long vmid); +-- +2.17.1 + diff --git a/patches/0060-ASoC-Intel-Skylake-Add-debugfs-NHLT-ssp-override.audio b/patches/0060-ASoC-Intel-Skylake-Add-debugfs-NHLT-ssp-override.audio new file mode 100644 index 0000000000..7d9b5c2d0a --- /dev/null +++ b/patches/0060-ASoC-Intel-Skylake-Add-debugfs-NHLT-ssp-override.audio @@ -0,0 +1,169 @@ +From 6ed7ae1d59f632b1bfdfb0d2fe8602736d292561 Mon Sep 17 00:00:00 2001 +From: Vinod Koul +Date: Mon, 31 Aug 2015 14:12:14 +0530 +Subject: [PATCH 060/193] ASoC: Intel: Skylake: Add debugfs NHLT ssp override + +Add debugfs entries for reading and writing SSP blobs which +driver can use to program DSP + +Change-Id: I144a779bba4679b653c2a17b7e875a66700e97a2 +Signed-off-by: Vinod Koul +--- + sound/soc/intel/skylake/skl-debug.c | 92 +++++++++++++++++++++++++++-- + 1 file changed, 88 insertions(+), 4 deletions(-) + +diff --git a/sound/soc/intel/skylake/skl-debug.c b/sound/soc/intel/skylake/skl-debug.c +index a63926d00ecd..0266744e9022 100644 +--- a/sound/soc/intel/skylake/skl-debug.c ++++ b/sound/soc/intel/skylake/skl-debug.c +@@ -14,10 +14,17 @@ + #include "skl-topology.h" + #include "../common/sst-dsp.h" + #include "../common/sst-dsp-priv.h" ++#include "skl-nhlt.h" + + #define MOD_BUF PAGE_SIZE + #define FW_REG_BUF PAGE_SIZE + #define FW_REG_SIZE 0x60 ++#define MAX_SSP 6 ++ ++struct nhlt_blob { ++ size_t size; ++ struct nhlt_specific_cfg *cfg; ++}; + + struct skl_debug { + struct skl_dev *skl; +@@ -28,6 +35,7 @@ struct skl_debug { + struct dentry *modules; + struct dentry *nhlt; + u8 fw_read_buff[FW_REG_BUF]; ++ struct nhlt_blob ssp_blob[MAX_SSP]; + }; + + /** +@@ -216,7 +224,6 @@ static const struct file_operations mcfg_fops = { + .llseek = default_llseek, + }; + +- + void skl_debug_init_module(struct skl_debug *d, + struct snd_soc_dapm_widget *w, + struct skl_module_cfg *mconfig) +@@ -460,6 +467,70 @@ static int skl_debugfs_init_ipc(struct skl_debug *d) + return 0; + } + ++static ssize_t nhlt_read(struct file *file, char __user *user_buf, ++ size_t count, loff_t *ppos) ++{ ++ struct nhlt_blob *blob = file->private_data; ++ ++ if (!blob->cfg) ++ return -EIO; ++ ++ return simple_read_from_buffer(user_buf, count, ppos, ++ blob->cfg, blob->size); ++} ++ ++static ssize_t nhlt_write(struct file *file, ++ const char __user *user_buf, size_t count, loff_t *ppos) ++{ ++ struct nhlt_blob *blob = file->private_data; ++ struct nhlt_specific_cfg *new_cfg; ++ ssize_t written; ++ size_t size = blob->size; ++ ++ if (!blob->cfg) { ++ /* allocate mem for blob */ ++ blob->cfg = kzalloc(count, GFP_KERNEL); ++ if (!blob->cfg) ++ return -ENOMEM; ++ size = count; ++ } else if (blob->size < count) { ++ /* size if different, so relloc */ ++ new_cfg = krealloc(blob->cfg, count, GFP_KERNEL); ++ if (!new_cfg) ++ return -ENOMEM; ++ size = count; ++ blob->cfg = new_cfg; ++ } ++ ++ written = simple_write_to_buffer(blob->cfg, size, ppos, ++ user_buf, count); ++ blob->size = written; ++ ++ /* Userspace has been fiddling around behind the kernel's back */ ++ add_taint(TAINT_USER, LOCKDEP_NOW_UNRELIABLE); ++ ++ print_hex_dump(KERN_DEBUG, "Debugfs Blob:", DUMP_PREFIX_OFFSET, 8, 4, ++ blob->cfg, blob->size, false); ++ ++ return written; ++} ++ ++static const struct file_operations nhlt_fops = { ++ .open = simple_open, ++ .read = nhlt_read, ++ .write = nhlt_write, ++ .llseek = default_llseek, ++}; ++ ++static void skl_exit_nhlt(struct skl_debug *d) ++{ ++ int i; ++ ++ /* free blob memory, if allocated */ ++ for (i = 0; i < MAX_SSP; i++) ++ kfree(d->ssp_blob[i].cfg); ++} ++ + static ssize_t nhlt_control_read(struct file *file, + char __user *user_buf, size_t count, loff_t *ppos) + { +@@ -483,12 +554,14 @@ static ssize_t nhlt_control_write(struct file *file, + return -EFAULT; + buf[len] = 0; + +- if (!strncmp(buf, "enable\n", len)) ++ if (!strncmp(buf, "enable\n", len)) { + d->skl->nhlt_override = true; +- else if (!strncmp(buf, "disable\n", len)) ++ } else if (!strncmp(buf, "disable\n", len)) { + d->skl->nhlt_override = false; +- else ++ skl_exit_nhlt(d); ++ } else { + return -EINVAL; ++ } + + /* Userspace has been fiddling around behind the kernel's back */ + add_taint(TAINT_USER, LOCKDEP_NOW_UNRELIABLE); +@@ -505,6 +578,9 @@ static const struct file_operations ssp_cntrl_nhlt_fops = { + + static int skl_init_nhlt(struct skl_debug *d) + { ++ int i; ++ char name[12]; ++ + if (!debugfs_create_file("control", + 0644, d->nhlt, + d, &ssp_cntrl_nhlt_fops)) { +@@ -512,6 +588,14 @@ static int skl_init_nhlt(struct skl_debug *d) + return -EIO; + } + ++ for (i = 0; i < MAX_SSP; i++) { ++ snprintf(name, (sizeof(name)-1), "ssp%d", i); ++ if (!debugfs_create_file(name, ++ 0644, d->nhlt, ++ &d->ssp_blob[i], &nhlt_fops)) ++ dev_err(d->dev, "%s: debugfs init failed\n", name); ++ } ++ + return 0; + } + +-- +2.17.1 + diff --git a/patches/0060-Fix-compilation-errors-when-rebase-to-v4.19-rc1.trusty b/patches/0060-Fix-compilation-errors-when-rebase-to-v4.19-rc1.trusty new file mode 100644 index 0000000000..00f61a39e9 --- /dev/null +++ b/patches/0060-Fix-compilation-errors-when-rebase-to-v4.19-rc1.trusty @@ -0,0 +1,62 @@ +From 931f2b6123fded76ebae2bbf326856b1f9ffb399 Mon Sep 17 00:00:00 2001 +From: Zhou Furong +Date: Mon, 27 Aug 2018 15:15:51 +0800 +Subject: [PATCH 60/63] Fix compilation errors when rebase to v4.19-rc1. + +Include header file of_platform.h when update kernel to v4.19-rc1. + +Change-Id: I732913061fed8ab14edddb40544df370e19edc54 +--- + drivers/trusty/trusty-log.c | 4 ++++ + drivers/trusty/trusty-timer.c | 4 ++++ + drivers/trusty/trusty-wall.c | 4 ++++ + 3 files changed, 12 insertions(+) + +diff --git a/drivers/trusty/trusty-log.c b/drivers/trusty/trusty-log.c +index d2446a1f34c9..48883439dce2 100755 +--- a/drivers/trusty/trusty-log.c ++++ b/drivers/trusty/trusty-log.c +@@ -12,6 +12,10 @@ + * + */ + #include ++#include ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) ++#include ++#endif + #include + #include + #include +diff --git a/drivers/trusty/trusty-timer.c b/drivers/trusty/trusty-timer.c +index 6783a30b4a11..ca6ea5799eeb 100644 +--- a/drivers/trusty/trusty-timer.c ++++ b/drivers/trusty/trusty-timer.c +@@ -15,6 +15,10 @@ + #include + #include + #include ++#include ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) ++#include ++#endif + #include + #include + #include +diff --git a/drivers/trusty/trusty-wall.c b/drivers/trusty/trusty-wall.c +index 2345f56a6405..812ac2a3ea98 100644 +--- a/drivers/trusty/trusty-wall.c ++++ b/drivers/trusty/trusty-wall.c +@@ -13,6 +13,10 @@ + * + */ + #include ++#include ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) ++#include ++#endif + #include + #include + #include +-- +2.17.1 + diff --git a/patches/0060-drm-meson-dw_hdmi-add-resume-suspend-hooks.drm b/patches/0060-drm-meson-dw_hdmi-add-resume-suspend-hooks.drm new file mode 100644 index 0000000000..c05a3f0581 --- /dev/null +++ b/patches/0060-drm-meson-dw_hdmi-add-resume-suspend-hooks.drm @@ -0,0 +1,174 @@ +From 4cdb350fa28b63ddc8a331a692a82b3318eef684 Mon Sep 17 00:00:00 2001 +From: Neil Armstrong +Date: Tue, 27 Aug 2019 11:58:24 +0200 +Subject: [PATCH 060/690] drm/meson: dw_hdmi: add resume/suspend hooks + +Add the suspend and resume hooks to: +- reset the whole HDMI glue and HDMI controller on suspend +- re-init the HDMI glue and HDMI controller on resume + +The HDMI glue init is refactored to be re-used from the resume hook. + +It makes usage of dw_hdmi_resume() to recover a functionnal DDC bus. + +Signed-off-by: Neil Armstrong +Acked-by: Daniel Vetter +Tested-by: Kevin Hilman +[narmstrong: fixed typo in commit log, and rebased on drm-misc-next] +Link: https://patchwork.freedesktop.org/patch/msgid/20190827095825.21015-2-narmstrong@baylibre.com +--- + drivers/gpu/drm/meson/meson_dw_hdmi.c | 110 ++++++++++++++++++-------- + 1 file changed, 76 insertions(+), 34 deletions(-) + +diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c +index 68bbd987147b..022286dc6ab2 100644 +--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c ++++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c +@@ -802,6 +802,47 @@ static bool meson_hdmi_connector_is_available(struct device *dev) + return false; + } + ++static void meson_dw_hdmi_init(struct meson_dw_hdmi *meson_dw_hdmi) ++{ ++ struct meson_drm *priv = meson_dw_hdmi->priv; ++ ++ /* Enable clocks */ ++ regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL, 0xffff, 0x100); ++ ++ /* Bring HDMITX MEM output of power down */ ++ regmap_update_bits(priv->hhi, HHI_MEM_PD_REG0, 0xff << 8, 0); ++ ++ /* Reset HDMITX APB & TX & PHY */ ++ reset_control_reset(meson_dw_hdmi->hdmitx_apb); ++ reset_control_reset(meson_dw_hdmi->hdmitx_ctrl); ++ reset_control_reset(meson_dw_hdmi->hdmitx_phy); ++ ++ /* Enable APB3 fail on error */ ++ if (!meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) { ++ writel_bits_relaxed(BIT(15), BIT(15), ++ meson_dw_hdmi->hdmitx + HDMITX_TOP_CTRL_REG); ++ writel_bits_relaxed(BIT(15), BIT(15), ++ meson_dw_hdmi->hdmitx + HDMITX_DWC_CTRL_REG); ++ } ++ ++ /* Bring out of reset */ ++ meson_dw_hdmi->data->top_write(meson_dw_hdmi, ++ HDMITX_TOP_SW_RESET, 0); ++ ++ msleep(20); ++ ++ meson_dw_hdmi->data->top_write(meson_dw_hdmi, ++ HDMITX_TOP_CLK_CNTL, 0xff); ++ ++ /* Enable HDMI-TX Interrupt */ ++ meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_INTR_STAT_CLR, ++ HDMITX_TOP_INTR_CORE); ++ ++ meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_INTR_MASKN, ++ HDMITX_TOP_INTR_CORE); ++ ++} ++ + static int meson_dw_hdmi_bind(struct device *dev, struct device *master, + void *data) + { +@@ -925,40 +966,7 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master, + + DRM_DEBUG_DRIVER("encoder initialized\n"); + +- /* Enable clocks */ +- regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL, 0xffff, 0x100); +- +- /* Bring HDMITX MEM output of power down */ +- regmap_update_bits(priv->hhi, HHI_MEM_PD_REG0, 0xff << 8, 0); +- +- /* Reset HDMITX APB & TX & PHY */ +- reset_control_reset(meson_dw_hdmi->hdmitx_apb); +- reset_control_reset(meson_dw_hdmi->hdmitx_ctrl); +- reset_control_reset(meson_dw_hdmi->hdmitx_phy); +- +- /* Enable APB3 fail on error */ +- if (!meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) { +- writel_bits_relaxed(BIT(15), BIT(15), +- meson_dw_hdmi->hdmitx + HDMITX_TOP_CTRL_REG); +- writel_bits_relaxed(BIT(15), BIT(15), +- meson_dw_hdmi->hdmitx + HDMITX_DWC_CTRL_REG); +- } +- +- /* Bring out of reset */ +- meson_dw_hdmi->data->top_write(meson_dw_hdmi, +- HDMITX_TOP_SW_RESET, 0); +- +- msleep(20); +- +- meson_dw_hdmi->data->top_write(meson_dw_hdmi, +- HDMITX_TOP_CLK_CNTL, 0xff); +- +- /* Enable HDMI-TX Interrupt */ +- meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_INTR_STAT_CLR, +- HDMITX_TOP_INTR_CORE); +- +- meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_INTR_MASKN, +- HDMITX_TOP_INTR_CORE); ++ meson_dw_hdmi_init(meson_dw_hdmi); + + /* Bridge / Connector */ + +@@ -994,6 +1002,34 @@ static const struct component_ops meson_dw_hdmi_ops = { + .unbind = meson_dw_hdmi_unbind, + }; + ++static int __maybe_unused meson_dw_hdmi_pm_suspend(struct device *dev) ++{ ++ struct meson_dw_hdmi *meson_dw_hdmi = dev_get_drvdata(dev); ++ ++ if (!meson_dw_hdmi) ++ return 0; ++ ++ /* Reset TOP */ ++ meson_dw_hdmi->data->top_write(meson_dw_hdmi, ++ HDMITX_TOP_SW_RESET, 0); ++ ++ return 0; ++} ++ ++static int __maybe_unused meson_dw_hdmi_pm_resume(struct device *dev) ++{ ++ struct meson_dw_hdmi *meson_dw_hdmi = dev_get_drvdata(dev); ++ ++ if (!meson_dw_hdmi) ++ return 0; ++ ++ meson_dw_hdmi_init(meson_dw_hdmi); ++ ++ dw_hdmi_resume(meson_dw_hdmi->hdmi); ++ ++ return 0; ++} ++ + static int meson_dw_hdmi_probe(struct platform_device *pdev) + { + return component_add(&pdev->dev, &meson_dw_hdmi_ops); +@@ -1006,6 +1042,11 @@ static int meson_dw_hdmi_remove(struct platform_device *pdev) + return 0; + } + ++static const struct dev_pm_ops meson_dw_hdmi_pm_ops = { ++ SET_SYSTEM_SLEEP_PM_OPS(meson_dw_hdmi_pm_suspend, ++ meson_dw_hdmi_pm_resume) ++}; ++ + static const struct of_device_id meson_dw_hdmi_of_table[] = { + { .compatible = "amlogic,meson-gxbb-dw-hdmi", + .data = &meson_dw_hdmi_gx_data }, +@@ -1025,6 +1066,7 @@ static struct platform_driver meson_dw_hdmi_platform_driver = { + .driver = { + .name = DRIVER_NAME, + .of_match_table = meson_dw_hdmi_of_table, ++ .pm = &meson_dw_hdmi_pm_ops, + }, + }; + module_platform_driver(meson_dw_hdmi_platform_driver); +-- +2.17.1 + diff --git a/patches/0060-mei-dal-add-exclusive-access-management.security b/patches/0060-mei-dal-add-exclusive-access-management.security new file mode 100644 index 0000000000..02e6ad0157 --- /dev/null +++ b/patches/0060-mei-dal-add-exclusive-access-management.security @@ -0,0 +1,466 @@ +From 09784d0036e40da4f4f7d7a350adf94a0728f4fc Mon Sep 17 00:00:00 2001 +From: Yael Samet +Date: Mon, 11 Sep 2017 09:24:28 +0300 +Subject: [PATCH 60/65] mei: dal: add exclusive access management + +The exclusive access option allows a kernel space client to restrict +access to specified trusted application. +When some applet was marked as exclusive by a kernel space client, +no user space client will be allowed to create a session with this applet. + +Change-Id: Ib9397bf9c5a0cd911cf9ca577302c99384782629 +Signed-off-by: Yael Samet +--- + drivers/misc/mei/dal/Makefile | 1 + + drivers/misc/mei/dal/dal_class.c | 39 +++++ + drivers/misc/mei/dal/dal_dev.h | 9 ++ + drivers/misc/mei/dal/dal_kdi.c | 79 +++++++++ + drivers/misc/mei/dal/dal_ta_access.c | 234 +++++++++++++++++++++++++++ + include/linux/dal.h | 2 + + 6 files changed, 364 insertions(+) + create mode 100644 drivers/misc/mei/dal/dal_ta_access.c + +diff --git a/drivers/misc/mei/dal/Makefile b/drivers/misc/mei/dal/Makefile +index 829e0e3727fe..e4e552953265 100644 +--- a/drivers/misc/mei/dal/Makefile ++++ b/drivers/misc/mei/dal/Makefile +@@ -10,3 +10,4 @@ mei_dal-objs += bh_internal.o + mei_dal-objs += dal_class.o + mei_dal-objs += dal_cdev.o + mei_dal-objs += dal_kdi.o ++mei_dal-objs += dal_ta_access.o +diff --git a/drivers/misc/mei/dal/dal_class.c b/drivers/misc/mei/dal/dal_class.c +index 6a82f7d2daa9..b5a0201a1f73 100644 +--- a/drivers/misc/mei/dal/dal_class.c ++++ b/drivers/misc/mei/dal/dal_class.c +@@ -299,6 +299,39 @@ static int dal_send_error_access_denied(struct dal_client *dc, const void *cmd) + return ret; + } + ++/** ++ * dal_validate_access - validate that the access is permitted. ++ * ++ * in case of open session command, validate that the client has the permissions ++ * to open session to the requested ta ++ * ++ * @hdr: command header ++ * @count: message size ++ * @ctx: context (not used) ++ * ++ * Return: 0 when command is permitted ++ * -EINVAL when message is invalid ++ * -EPERM when access is not permitted ++ * ++ * Locking: called under "ddev->write_lock" lock ++ */ ++static int dal_validate_access(const struct bh_command_header *hdr, ++ size_t count, void *ctx) ++{ ++ struct dal_client *dc = ctx; ++ struct dal_device *ddev = dc->ddev; ++ const uuid_t *ta_id; ++ ++ if (!bh_msg_is_cmd_open_session(hdr)) ++ return 0; ++ ++ ta_id = bh_open_session_ta_id(hdr, count); ++ if (!ta_id) ++ return -EINVAL; ++ ++ return dal_access_policy_allowed(ddev, ta_id, dc); ++} ++ + /** + * dal_is_kdi_msg - check if sequence is in kernel space sequence range + * +@@ -346,6 +379,7 @@ static int dal_validate_seq(const struct bh_command_header *hdr, + * has the permissions to send it + */ + static const bh_filter_func dal_write_filter_tbl[] = { ++ dal_validate_access, + dal_validate_seq, + NULL, + }; +@@ -641,6 +675,7 @@ static void dal_device_release(struct device *dev) + { + struct dal_device *ddev = to_dal_device(dev); + ++ dal_access_list_free(ddev); + kfree(ddev->bh_fw_msg.msg); + kfree(ddev); + } +@@ -692,6 +727,10 @@ static int dal_probe(struct mei_cl_device *cldev, + goto err_unregister; + } + ++ ret = dal_access_list_init(ddev); ++ if (ret) ++ goto err_unregister; ++ + ret = dal_mei_enable(ddev); + if (ret < 0) + goto err_unregister; +diff --git a/drivers/misc/mei/dal/dal_dev.h b/drivers/misc/mei/dal/dal_dev.h +index d5480bd0e43d..1cf178786f42 100644 +--- a/drivers/misc/mei/dal/dal_dev.h ++++ b/drivers/misc/mei/dal/dal_dev.h +@@ -152,4 +152,13 @@ int dal_kdi_recv(unsigned int handle, unsigned char *buf, size_t *count); + int dal_kdi_init(void); + void dal_kdi_exit(void); + ++int dal_access_policy_add(struct dal_device *ddev, ++ const uuid_t *ta_id, void *owner); ++int dal_access_policy_remove(struct dal_device *ddev, ++ const uuid_t *ta_id, void *owner); ++int dal_access_policy_allowed(struct dal_device *ddev, ++ const uuid_t *ta_id, void *owner); ++void dal_access_list_free(struct dal_device *ddev); ++int dal_access_list_init(struct dal_device *ddev); ++ + #endif /* _DAL_KDI_H_ */ +diff --git a/drivers/misc/mei/dal/dal_kdi.c b/drivers/misc/mei/dal/dal_kdi.c +index 92d571c2af39..e9805596c91a 100644 +--- a/drivers/misc/mei/dal/dal_kdi.c ++++ b/drivers/misc/mei/dal/dal_kdi.c +@@ -359,6 +359,85 @@ int dal_close_session(u64 session_handle) + } + EXPORT_SYMBOL(dal_close_session); + ++/** ++ * dal_set_ta_exclusive_access - set client to be owner of the ta, ++ * so no one else (especially user space client) ++ * will be able to open session to it ++ * ++ * @ta_id: trusted application (ta) id ++ * ++ * Return: 0 on success ++ * -ENODEV when the device can't be found ++ * -ENOMEM on memory allocation failure ++ * -EPERM when ta is owned by another client ++ * -EEXIST when ta is already owned by current client ++ */ ++int dal_set_ta_exclusive_access(const uuid_t *ta_id) ++{ ++ struct dal_device *ddev; ++ struct device *dev; ++ struct dal_client *dc; ++ int ret; ++ ++ mutex_lock(&dal_kdi_lock); ++ ++ dev = dal_find_dev(DAL_MEI_DEVICE_IVM); ++ if (!dev) { ++ dev_dbg(dev, "can't find device\n"); ++ ret = -ENODEV; ++ goto unlock; ++ } ++ ++ ddev = to_dal_device(dev); ++ dc = ddev->clients[DAL_INTF_KDI]; ++ ++ ret = dal_access_policy_add(ddev, ta_id, dc); ++ ++ put_device(dev); ++unlock: ++ mutex_unlock(&dal_kdi_lock); ++ return ret; ++} ++EXPORT_SYMBOL(dal_set_ta_exclusive_access); ++ ++/** ++ * dal_unset_ta_exclusive_access - unset client from owning ta ++ * ++ * @ta_id: trusted application (ta) id ++ * ++ * Return: 0 on success ++ * -ENODEV when the device can't be found ++ * -ENOENT when ta isn't found in exclusiveness ta list ++ * -EPERM when ta is owned by another client ++ */ ++int dal_unset_ta_exclusive_access(const uuid_t *ta_id) ++{ ++ struct dal_device *ddev; ++ struct device *dev; ++ struct dal_client *dc; ++ int ret; ++ ++ mutex_lock(&dal_kdi_lock); ++ ++ dev = dal_find_dev(DAL_MEI_DEVICE_IVM); ++ if (!dev) { ++ dev_dbg(dev, "can't find device\n"); ++ ret = -ENODEV; ++ goto unlock; ++ } ++ ++ ddev = to_dal_device(dev); ++ dc = ddev->clients[DAL_INTF_KDI]; ++ ++ ret = dal_access_policy_remove(ddev, ta_id, dc); ++ ++ put_device(dev); ++unlock: ++ mutex_unlock(&dal_kdi_lock); ++ return ret; ++} ++EXPORT_SYMBOL(dal_unset_ta_exclusive_access); ++ + #define KDI_MAJOR_VER "1" + #define KDI_MINOR_VER "0" + #define KDI_HOTFIX_VER "0" +diff --git a/drivers/misc/mei/dal/dal_ta_access.c b/drivers/misc/mei/dal/dal_ta_access.c +new file mode 100644 +index 000000000000..23f5b424de62 +--- /dev/null ++++ b/drivers/misc/mei/dal/dal_ta_access.c +@@ -0,0 +1,234 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright(c) 2016-2019, Intel Corporation. ++ */ ++ ++#include ++#include ++#include ++ ++#include ++#include "dal_dev.h" ++ ++/* Spooler UUID */ ++static const uuid_t spooler_ta_id = UUID_INIT(0xba8d1643, 0x50b6, 0x49cc, ++ 0x86, 0x1d, 0x2c, 0x01, ++ 0xbe, 0xd1, 0x4b, 0xe8); ++ ++/** ++ * struct dal_access_policy - ta access information node ++ * ++ * @list: link in access list ++ * @ta_id: trusted application id ++ * @owner: owner of ta ++ */ ++struct dal_access_policy { ++ struct list_head list; ++ uuid_t ta_id; ++ void *owner; ++}; ++ ++/** ++ * dal_dev_get_access_list - get access list of dal device ++ * ++ * @ddev: dal device ++ * ++ * Return: pointer to access list ++ */ ++static struct list_head *dal_dev_get_access_list(struct dal_device *ddev) ++{ ++ return dev_get_drvdata(&ddev->dev); ++} ++ ++/** ++ * dal_access_policy_alloc - allocate memory and initialize access list node ++ * ++ * @ta_id: trusted application id ++ * @owner: owner of ta ++ * ++ * Return: pointer to the new initialized access list node ++ * ++ * Locking: called under "kdi_lock" lock ++ */ ++static struct dal_access_policy * ++dal_access_policy_alloc(const uuid_t *ta_id, void *owner) ++{ ++ struct dal_access_policy *e; ++ ++ e = kzalloc(sizeof(*e), GFP_KERNEL); ++ if (!e) ++ return NULL; ++ ++ INIT_LIST_HEAD(&e->list); ++ e->ta_id = *ta_id; ++ e->owner = owner; ++ ++ return e; ++} ++ ++/** ++ * dal_access_policy_find - find ta id in access list ++ * ++ * @access_list: access list ++ * @ta_id: trusted application id ++ * ++ * Return: pointer to access list node of ta ++ * NULL if ta is not found in access list ++ */ ++static struct dal_access_policy * ++dal_access_policy_find(struct list_head *access_list, const uuid_t *ta_id) ++{ ++ struct dal_access_policy *e; ++ ++ list_for_each_entry(e, access_list, list) { ++ if (uuid_equal(&e->ta_id, ta_id)) ++ return e; ++ } ++ return NULL; ++} ++ ++/** ++ * dal_access_policy_add - add access information of ta and its owner ++ * ++ * @ddev: dal device ++ * @ta_id: trusted application id ++ * @owner: owner of ta ++ * ++ * Return: 0 on success ++ * -ENOMEM on memory allocation failure ++ * -EPERM when ta already has another owner ++ * -EEXIST when access information already exists (same ta and owner) ++ * ++ * Locking: called under "kdi_lock" lock ++ */ ++int dal_access_policy_add(struct dal_device *ddev, ++ const uuid_t *ta_id, void *owner) ++{ ++ struct list_head *access_list = dal_dev_get_access_list(ddev); ++ struct dal_access_policy *e; ++ ++ e = dal_access_policy_find(access_list, ta_id); ++ if (e) { ++ if (!e->owner) ++ return -EPERM; ++ ++ return -EEXIST; ++ } ++ ++ e = dal_access_policy_alloc(ta_id, owner); ++ if (!e) ++ return -ENOMEM; ++ ++ list_add_tail(&e->list, access_list); ++ return 0; ++} ++ ++/** ++ * dal_access_policy_remove - remove access information of ta and its owner ++ * ++ * @ddev: dal device ++ * @ta_id: trusted application id ++ * @owner: owner of ta ++ * ++ * Return: 0 on success ++ * -ENOENT when ta isn't found in access list ++ * -EPERM when ta has another owner ++ * ++ * Locking: called under "kdi_lock" lock ++ */ ++int dal_access_policy_remove(struct dal_device *ddev, ++ const uuid_t *ta_id, void *owner) ++{ ++ struct list_head *access_list = dal_dev_get_access_list(ddev); ++ struct dal_access_policy *e; ++ ++ e = dal_access_policy_find(access_list, ta_id); ++ if (!e) ++ return -ENOENT; ++ ++ if (!e->owner || e->owner != owner) ++ return -EPERM; ++ ++ list_del(&e->list); ++ kfree(e); ++ return 0; ++} ++ ++/** ++ * dal_access_policy_allowed - check if owner is allowed to use ta ++ * ++ * @ddev: dal device ++ * @ta_id: trusted application id ++ * @owner: owner ++ * ++ * Return: 0 on success ++ * -EPERM when owner is not allowed to use ta ++ * ++ * Locking: called under "ddev->write_lock" lock ++ */ ++int dal_access_policy_allowed(struct dal_device *ddev, ++ const uuid_t *ta_id, void *owner) ++{ ++ struct list_head *access_list = dal_dev_get_access_list(ddev); ++ struct dal_access_policy *e; ++ ++ e = dal_access_policy_find(access_list, ta_id); ++ if (!e) ++ return 0; ++ ++ if (e->owner && e->owner != owner) ++ return -EPERM; ++ ++ return 0; ++} ++ ++/** ++ * dal_access_list_free - free memory of access list ++ * ++ * @ddev: dal device ++ */ ++void dal_access_list_free(struct dal_device *ddev) ++{ ++ struct list_head *access_list = dal_dev_get_access_list(ddev); ++ struct dal_access_policy *e, *n; ++ ++ if (!access_list) ++ return; ++ ++ list_for_each_entry_safe(e, n, access_list, list) { ++ list_del(&e->list); ++ kfree(e); ++ } ++ ++ kfree(access_list); ++ dev_set_drvdata(&ddev->dev, NULL); ++} ++ ++/** ++ * dal_access_list_init - initialize an empty access list ++ * ++ * @ddev: dal device ++ * ++ * Note: Add spooler ta id with blank owner to the list. ++ * This will prevent any user from setting itself owner of the spooler, ++ * which will block others from openning session to it. ++ * ++ * Return: 0 on success ++ * -ENOMEM on memory allocation failure ++ */ ++int dal_access_list_init(struct dal_device *ddev) ++{ ++ struct list_head *access_list; ++ ++ access_list = kzalloc(sizeof(*access_list), GFP_KERNEL); ++ if (!access_list) ++ return -ENOMEM; ++ ++ INIT_LIST_HEAD(access_list); ++ dev_set_drvdata(&ddev->dev, access_list); ++ ++ /* Nobody can own SPOOLER TA */ ++ dal_access_policy_add(ddev, &spooler_ta_id, NULL); ++ ++ return 0; ++} +diff --git a/include/linux/dal.h b/include/linux/dal.h +index e36f66eca0ce..4f5310655668 100644 +--- a/include/linux/dal.h ++++ b/include/linux/dal.h +@@ -52,6 +52,8 @@ int dal_send_and_receive(u64 session_handle, int command_id, const u8 *input, + + int dal_close_session(u64 session_handle); + ++int dal_set_ta_exclusive_access(const uuid_t *ta_id); ++int dal_unset_ta_exclusive_access(const uuid_t *ta_id); + int dal_uuid_parse(const char *uuid_str, uuid_t *uuid); + + #endif /* _DAL_H_ */ +-- +2.17.1 + diff --git a/patches/0060-net-stmmac-Enabling-64-bits-DMA-addressing.connectivity b/patches/0060-net-stmmac-Enabling-64-bits-DMA-addressing.connectivity new file mode 100644 index 0000000000..33191bcb15 --- /dev/null +++ b/patches/0060-net-stmmac-Enabling-64-bits-DMA-addressing.connectivity @@ -0,0 +1,248 @@ +From dbfe5868bb554ca34d8b47ed67cd85e5731b5312 Mon Sep 17 00:00:00 2001 +From: Aashish Verma +Date: Sat, 3 Aug 2019 18:13:11 +0800 +Subject: [PATCH 060/108] net: stmmac: Enabling 64-bits DMA addressing + +Currently, stmmac only supports 32 bits DMA addressing. Enable the +support for upto 64 bits addressing. Add #define of high address for +tx and rx in dwmac4_dma.h. Enable register DMA_SysBus_Mode(bit: EAME) +for 64 bit addressing. Implement the 64-bit address register handling +under #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT. + +Signed-off-by: Aashish Verma +Signed-off-by: Voon Weifeng +--- + drivers/net/ethernet/stmicro/stmmac/dwmac4.h | 1 + + .../ethernet/stmicro/stmmac/dwmac4_descs.c | 13 +++++++- + .../net/ethernet/stmicro/stmmac/dwmac4_dma.c | 33 +++++++++++++++++++ + .../net/ethernet/stmicro/stmmac/dwmac4_dma.h | 5 +++ + .../ethernet/stmicro/stmmac/dwxgmac2_descs.c | 2 +- + .../net/ethernet/stmicro/stmmac/enh_desc.c | 2 +- + drivers/net/ethernet/stmicro/stmmac/hwif.h | 2 +- + .../net/ethernet/stmicro/stmmac/norm_desc.c | 2 +- + .../net/ethernet/stmicro/stmmac/stmmac_main.c | 7 ++++ + 9 files changed, 62 insertions(+), 5 deletions(-) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +index f0d10ec18fca..138299222149 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +@@ -241,6 +241,7 @@ enum power_event { + #define GMAC_HW_HASH_TB_SZ GENMASK(25, 24) + #define GMAC_HW_FEAT_AVSEL BIT(20) + #define GMAC_HW_TSOEN BIT(18) ++#define GMAC_HW_FEAT_ADDR64 GENMASK(15, 14) + #define GMAC_HW_TXFIFOSIZE GENMASK(10, 6) + #define GMAC_HW_RXFIFOSIZE GENMASK(4, 0) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +index 082cb1dce13f..7b8f93c0ff06 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +@@ -435,15 +435,26 @@ static void dwmac4_set_mss_ctxt(struct dma_desc *p, unsigned int mss) + p->des3 = cpu_to_le32(TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV); + } + +-static void dwmac4_get_addr(struct dma_desc *p, unsigned int *addr) ++static void dwmac4_get_addr(struct dma_desc *p, dma_addr_t *addr) + { ++#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT ++ *addr = le32_to_cpu(p->des1); ++ *addr <<= 32; ++ *addr |= le32_to_cpu(p->des0); ++#else + *addr = le32_to_cpu(p->des0); ++#endif + } + + static void dwmac4_set_addr(struct dma_desc *p, dma_addr_t addr) + { ++#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT ++ p->des0 = cpu_to_le32(lower_32_bits(addr)); ++ p->des1 = cpu_to_le32(upper_32_bits(addr)); ++#else + p->des0 = cpu_to_le32(addr); + p->des1 = 0; ++#endif + } + + static void dwmac4_clear(struct dma_desc *p) +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +index b55d31349d61..c4c1297f72df 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +@@ -80,6 +80,10 @@ static void dwmac4_dma_init_rx_chan(void __iomem *ioaddr, + value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT); + writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan)); + ++#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT ++ writel(upper_32_bits(dma_rx_phy), ++ ioaddr + DMA_CHAN_RX_BASE_HI_ADDR(chan)); ++#endif + writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_CHAN_RX_BASE_ADDR(chan)); + } + +@@ -98,6 +102,10 @@ static void dwmac4_dma_init_tx_chan(void __iomem *ioaddr, + + writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan)); + ++#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT ++ writel(upper_32_bits(dma_tx_phy), ++ ioaddr + DMA_CHAN_TX_BASE_HI_ADDR(chan)); ++#endif + writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_CHAN_TX_BASE_ADDR(chan)); + } + +@@ -133,6 +141,10 @@ static void dwmac4_dma_init(void __iomem *ioaddr, + if (dma_cfg->aal) + value |= DMA_SYS_BUS_AAL; + ++#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT ++ value |= DMA_SYS_BUS_EAME; ++#endif ++ + writel(value, ioaddr + DMA_SYS_BUS_MODE); + } + +@@ -167,8 +179,12 @@ static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel, + readl(ioaddr + DMA_CHAN_CUR_TX_DESC(channel)); + reg_space[DMA_CHAN_CUR_RX_DESC(channel) / 4] = + readl(ioaddr + DMA_CHAN_CUR_RX_DESC(channel)); ++ reg_space[DMA_CHAN_CUR_TX_BUF_HI_ADDR(channel) / 4] = ++ readl(ioaddr + DMA_CHAN_CUR_TX_BUF_HI_ADDR(channel)); + reg_space[DMA_CHAN_CUR_TX_BUF_ADDR(channel) / 4] = + readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR(channel)); ++ reg_space[DMA_CHAN_CUR_RX_BUF_HI_ADDR(channel) / 4] = ++ readl(ioaddr + DMA_CHAN_CUR_RX_BUF_HI_ADDR(channel)); + reg_space[DMA_CHAN_CUR_RX_BUF_ADDR(channel) / 4] = + readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR(channel)); + reg_space[DMA_CHAN_STATUS(channel) / 4] = +@@ -357,6 +373,23 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr, + dma_cap->hash_tb_sz = (hw_cap & GMAC_HW_HASH_TB_SZ) >> 24; + dma_cap->av = (hw_cap & GMAC_HW_FEAT_AVSEL) >> 20; + dma_cap->tsoen = (hw_cap & GMAC_HW_TSOEN) >> 18; ++ ++ dma_cap->addr64 = (hw_cap & GMAC_HW_FEAT_ADDR64) >> 14; ++ switch (dma_cap->addr64) { ++ case 0: ++ dma_cap->addr64 = 32; ++ break; ++ case 1: ++ dma_cap->addr64 = 40; ++ break; ++ case 2: ++ dma_cap->addr64 = 48; ++ break; ++ default: ++ dma_cap->addr64 = 32; ++ break; ++ } ++ + /* RX and TX FIFO sizes are encoded as log2(n / 128). Undo that by + * shifting and store the sizes in bytes. + */ +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h +index 153bac8bdb23..00e9c185f2e9 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h +@@ -65,6 +65,7 @@ + #define DMA_SYS_BUS_MB BIT(14) + #define DMA_AXI_1KBBE BIT(13) + #define DMA_SYS_BUS_AAL BIT(12) ++#define DMA_SYS_BUS_EAME BIT(11) + #define DMA_AXI_BLEN256 BIT(7) + #define DMA_AXI_BLEN128 BIT(6) + #define DMA_AXI_BLEN64 BIT(5) +@@ -91,7 +92,9 @@ + #define DMA_CHAN_CONTROL(x) DMA_CHANX_BASE_ADDR(x) + #define DMA_CHAN_TX_CONTROL(x) (DMA_CHANX_BASE_ADDR(x) + 0x4) + #define DMA_CHAN_RX_CONTROL(x) (DMA_CHANX_BASE_ADDR(x) + 0x8) ++#define DMA_CHAN_TX_BASE_HI_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x10) + #define DMA_CHAN_TX_BASE_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x14) ++#define DMA_CHAN_RX_BASE_HI_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x18) + #define DMA_CHAN_RX_BASE_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x1c) + #define DMA_CHAN_TX_END_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x20) + #define DMA_CHAN_RX_END_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x28) +@@ -102,7 +105,9 @@ + #define DMA_CHAN_SLOT_CTRL_STATUS(x) (DMA_CHANX_BASE_ADDR(x) + 0x3c) + #define DMA_CHAN_CUR_TX_DESC(x) (DMA_CHANX_BASE_ADDR(x) + 0x44) + #define DMA_CHAN_CUR_RX_DESC(x) (DMA_CHANX_BASE_ADDR(x) + 0x4c) ++#define DMA_CHAN_CUR_TX_BUF_HI_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x50) + #define DMA_CHAN_CUR_TX_BUF_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x54) ++#define DMA_CHAN_CUR_RX_BUF_HI_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x58) + #define DMA_CHAN_CUR_RX_BUF_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x5c) + #define DMA_CHAN_STATUS(x) (DMA_CHANX_BASE_ADDR(x) + 0x60) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c +index ae48154f933c..3408a89d85fc 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c +@@ -239,7 +239,7 @@ static void dwxgmac2_set_mss(struct dma_desc *p, unsigned int mss) + p->des3 = cpu_to_le32(XGMAC_TDES3_CTXT | XGMAC_TDES3_TCMSSV); + } + +-static void dwxgmac2_get_addr(struct dma_desc *p, unsigned int *addr) ++static void dwxgmac2_get_addr(struct dma_desc *p, dma_addr_t *addr) + { + *addr = le32_to_cpu(p->des0); + } +diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +index d02cec296f51..eb5c8e52df58 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c ++++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +@@ -437,7 +437,7 @@ static void enh_desc_display_ring(void *head, unsigned int size, bool rx) + pr_info("\n"); + } + +-static void enh_desc_get_addr(struct dma_desc *p, unsigned int *addr) ++static void enh_desc_get_addr(struct dma_desc *p, dma_addr_t *addr) + { + *addr = le32_to_cpu(p->des2); + } +diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h +index 3b9f12e1ea88..b6fbeebb1df0 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h ++++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h +@@ -85,7 +85,7 @@ struct stmmac_desc_ops { + /* set MSS via context descriptor */ + void (*set_mss)(struct dma_desc *p, unsigned int mss); + /* get descriptor skbuff address */ +- void (*get_addr)(struct dma_desc *p, unsigned int *addr); ++ void (*get_addr)(struct dma_desc *p, dma_addr_t *addr); + /* set descriptor skbuff address */ + void (*set_addr)(struct dma_desc *p, dma_addr_t addr); + /* clear descriptor */ +diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +index f083360e4ba6..473e0bdf2829 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c ++++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +@@ -289,7 +289,7 @@ static void ndesc_display_ring(void *head, unsigned int size, bool rx) + pr_info("\n"); + } + +-static void ndesc_get_addr(struct dma_desc *p, unsigned int *addr) ++static void ndesc_get_addr(struct dma_desc *p, dma_addr_t *addr) + { + *addr = le32_to_cpu(p->des2); + } +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index 9427a9363e35..f529473e10ac 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -4019,6 +4019,13 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) + priv->xstats.rx_split_hdr_pkt_n++; + } + ++ if (netif_msg_rx_status(priv)) { ++ netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%llx\n", ++ p, entry, buf->addr); ++ netdev_dbg(priv->dev, "frame size %d, COE: %d\n", ++ len, status); ++ } ++ + skb = napi_alloc_skb(&ch->rx_napi, len); + if (!skb) { + priv->dev->stats.rx_dropped++; +-- +2.17.1 + diff --git a/patches/0060-vhm-Fix-kernel-doc-issues.acrn b/patches/0060-vhm-Fix-kernel-doc-issues.acrn new file mode 100644 index 0000000000..66988efc9d --- /dev/null +++ b/patches/0060-vhm-Fix-kernel-doc-issues.acrn @@ -0,0 +1,186 @@ +From f9ab78aac70f64b164ced69f408f7c9514f66d40 Mon Sep 17 00:00:00 2001 +From: Xinyun Liu +Date: Fri, 31 Aug 2018 10:59:01 +0800 +Subject: [PATCH 060/150] vhm: Fix kernel-doc issues + +Some comments are not in kernel-doc format so got error like: + +include/linux/vhm/vhm_vm_mngt.h:128: error: Cannot parse struct or +union! + +Some are typo or not updated,eg: + +include/linux/vhm/acrn_vhm_mm.h:93: warning: Excess function parameter +'uos_phy' description in 'map_guest_phys' + +V2: More typo fix and re-wording on Geoffroy's suggestion +V1: Fixed kernel-doc format issue + +Signed-off-by: Xinyun Liu +Reviewed-by: Geoffroy Van Cutsem +Reviewed-by: Eddie Dong +--- + include/linux/vhm/acrn_vhm_mm.h | 12 +++++------- + include/linux/vhm/vhm_ioctl_defs.h | 10 +++++----- + include/linux/vhm/vhm_vm_mngt.h | 16 ++++++++-------- + 3 files changed, 18 insertions(+), 20 deletions(-) + +diff --git a/include/linux/vhm/acrn_vhm_mm.h b/include/linux/vhm/acrn_vhm_mm.h +index 712860b5f5af..87d668f735dc 100644 +--- a/include/linux/vhm/acrn_vhm_mm.h ++++ b/include/linux/vhm/acrn_vhm_mm.h +@@ -80,12 +80,10 @@ static inline unsigned long acrn_hpa2gpa(unsigned long hpa) + } + + /** +- * map_guest_phys - map guest physical address +- * +- * to SOS kernel virtual address ++ * map_guest_phys - map guest physical address to SOS kernel virtual address + * + * @vmid: guest vmid +- * @uos_phy: phsical address in guest ++ * @uos_phys: physical address in guest + * @size: the memory size mapped + * + * Return: SOS kernel virtual address, NULL on error +@@ -96,7 +94,7 @@ void *map_guest_phys(unsigned long vmid, u64 uos_phys, size_t size); + * unmap_guest_phys - unmap guest physical address + * + * @vmid: guest vmid +- * @uos_phy: phsical address in guest ++ * @uos_phys: physical address in guest + * + * Return: 0 on success, <0 for error. + */ +@@ -109,7 +107,7 @@ int unmap_guest_phys(unsigned long vmid, u64 uos_phys); + * @guest_gpa: gpa of UOS + * @host_gpa: gpa of SOS + * @len: memory mapped length +- * @mem_type: memory mapping type. Possilble value could be: ++ * @mem_type: memory mapping type. Possible value could be: + * MEM_TYPE_WB + * MEM_TYPE_WT + * MEM_TYPE_UC +@@ -147,7 +145,7 @@ int unset_mmio_map(unsigned long vmid, unsigned long guest_gpa, + * @guest_gpa: gpa of UOS + * @host_gpa: gpa of SOS + * @len: memory mapped length +- * @mem_type: memory mapping type. Possilble value could be: ++ * @mem_type: memory mapping type. Possible value could be: + * MEM_TYPE_WB + * MEM_TYPE_WT + * MEM_TYPE_UC +diff --git a/include/linux/vhm/vhm_ioctl_defs.h b/include/linux/vhm/vhm_ioctl_defs.h +index 822fa4305f44..ad158f8949ba 100644 +--- a/include/linux/vhm/vhm_ioctl_defs.h ++++ b/include/linux/vhm/vhm_ioctl_defs.h +@@ -52,11 +52,11 @@ + #ifndef _VHM_IOCTL_DEFS_H_ + #define _VHM_IOCTL_DEFS_H_ + +-/* Commmon structures for ACRN/VHM/DM */ ++/* Common structures for ACRN/VHM/DM */ + #include "acrn_common.h" + + /* +- * Commmon IOCTL ID defination for VHM/DM ++ * Common IOCTL ID definition for VHM/DM + */ + #define _IC_ID(x, y) (((x)<<24)|(y)) + #define IC_ID 0x43UL +@@ -161,7 +161,7 @@ struct ic_ptdev_irq { + uint32_t type; + /** @virt_bdf: virtual bdf description of pass thru device */ + uint16_t virt_bdf; /* IN: Device virtual BDF# */ +- /** @phy_bdf: physical bdf description of pass thru device */ ++ /** @phys_bdf: physical bdf description of pass thru device */ + uint16_t phys_bdf; /* IN: Device physical BDF# */ + /** union */ + union { +@@ -171,7 +171,7 @@ struct ic_ptdev_irq { + uint32_t virt_pin; + /** @phys_pin: physical IOAPIC pin */ + uint32_t phys_pin; +- /** @pic_pin: PIC pin */ ++ /** @is_pic_pin: PIC pin */ + uint32_t is_pic_pin; + } intx; + +@@ -191,7 +191,7 @@ struct ic_ptdev_irq { + }; + + /** +- * struct ioreq_notify - data strcture to notify hypervisor ioreq is handled ++ * struct ioreq_notify - data structure to notify hypervisor ioreq is handled + * + * @client_id: client id to identify ioreq client + * @vcpu: identify the ioreq submitter +diff --git a/include/linux/vhm/vhm_vm_mngt.h b/include/linux/vhm/vhm_vm_mngt.h +index 00ee5c9ec300..2f19153fb6af 100644 +--- a/include/linux/vhm/vhm_vm_mngt.h ++++ b/include/linux/vhm/vhm_vm_mngt.h +@@ -90,6 +90,7 @@ extern struct mutex vhm_vm_list_lock; + * @ioreq_client_list: list of ioreq clients + * @req_buf: request buffer shared between HV, SOS and UOS + * @pg: pointer to linux page which holds req_buf ++ * @hugetlb_enabled: flag to enable/disable hugetlb page ept mapping + */ + struct vhm_vm { + struct device *dev; +@@ -121,7 +122,7 @@ struct vm_info { + }; + + /** +- * struct find_get_vm - find and hold vhm_vm of guest according to guest vmid ++ * find_get_vm() - find and keep guest vhm_vm based on the vmid + * + * @vmid: guest vmid + * +@@ -130,17 +131,16 @@ struct vm_info { + struct vhm_vm *find_get_vm(unsigned long vmid); + + /** +- * struct put_vm - release vhm_vm of guest according to guest vmid ++ * put_vm() - release vhm_vm of guest according to guest vmid + * If the latest reference count drops to zero, free vhm_vm as well +- * +- * @vm: pointer to vhm_vm which identrify specific guest ++ * @vm: pointer to vhm_vm which identify specific guest + * + * Return: + */ + void put_vm(struct vhm_vm *vm); + + /** +- * struct vhm_get_vm_info - get vm_info of specific guest ++ * vhm_get_vm_info() - get vm_info of specific guest + * + * @vmid: guest vmid + * @info: pointer to vm_info for returned vm_info +@@ -150,7 +150,7 @@ void put_vm(struct vhm_vm *vm); + int vhm_get_vm_info(unsigned long vmid, struct vm_info *info); + + /** +- * struct vhm_inject_msi - inject MSI interrupt to guest ++ * vhm_inject_msi() - inject MSI interrupt to guest + * + * @vmid: guest vmid + * @msi_addr: MSI addr matches MSI spec +@@ -162,11 +162,11 @@ int vhm_inject_msi(unsigned long vmid, unsigned long msi_addr, + unsigned long msi_data); + + /** +- * struct vhm_vm_gpa2hpa - convert guest physical address to ++ * vhm_vm_gpa2hpa() - convert guest physical address to + * host physical address + * + * @vmid: guest vmid +- * @gap: guest physical address ++ * @gpa: guest physical address + * + * Return: host physical address, <0 on error + */ +-- +2.17.1 + diff --git a/patches/0061-ASoC-Intel-Skylake-Add-debugfs-NHLT-dmic-override.audio b/patches/0061-ASoC-Intel-Skylake-Add-debugfs-NHLT-dmic-override.audio new file mode 100644 index 0000000000..1bcafc8072 --- /dev/null +++ b/patches/0061-ASoC-Intel-Skylake-Add-debugfs-NHLT-dmic-override.audio @@ -0,0 +1,45 @@ +From 20da5da1adc19ee5b4c4dc20fdc8fae325a9668a Mon Sep 17 00:00:00 2001 +From: Vinod Koul +Date: Wed, 26 Aug 2015 13:13:56 +0530 +Subject: [PATCH 061/193] ASoC: Intel: Skylake: Add debugfs NHLT dmic override + +Add debugfs entries for reading and writing DMIC blobs which +driver can use to program DSP + +Signed-off-by: Vinod Koul + +ASoC: Intel: Skylake: Increase the SSP count in debugFS + +Some of Broxton SKUs has 6 SSP ports, hence +support them in debugFS. +--- + sound/soc/intel/skylake/skl-debug.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/sound/soc/intel/skylake/skl-debug.c b/sound/soc/intel/skylake/skl-debug.c +index 0266744e9022..88db0aee1188 100644 +--- a/sound/soc/intel/skylake/skl-debug.c ++++ b/sound/soc/intel/skylake/skl-debug.c +@@ -36,6 +36,7 @@ struct skl_debug { + struct dentry *nhlt; + u8 fw_read_buff[FW_REG_BUF]; + struct nhlt_blob ssp_blob[MAX_SSP]; ++ struct nhlt_blob dmic_blob; + }; + + /** +@@ -596,6 +597,11 @@ static int skl_init_nhlt(struct skl_debug *d) + dev_err(d->dev, "%s: debugfs init failed\n", name); + } + ++ if (!debugfs_create_file("dmic", 0644, ++ d->nhlt, &d->dmic_blob, ++ &nhlt_fops)) ++ dev_err(d->dev, "%s: debugfs init failed\n", name); ++ + return 0; + } + +-- +2.17.1 + diff --git a/patches/0061-drm-meson-add-resume-suspend-hooks.drm b/patches/0061-drm-meson-add-resume-suspend-hooks.drm new file mode 100644 index 0000000000..a703292248 --- /dev/null +++ b/patches/0061-drm-meson-add-resume-suspend-hooks.drm @@ -0,0 +1,77 @@ +From 5aef1d8ce48e6a358634fd080f2b8de1cedffb1c Mon Sep 17 00:00:00 2001 +From: Neil Armstrong +Date: Tue, 27 Aug 2019 11:58:25 +0200 +Subject: [PATCH 061/690] drm/meson: add resume/suspend hooks + +Add the suspend and resume hooks to: +- save and disable the entire DRM driver on suspend +- re-init the entire VPU subsystem on resume, to recover CRTC and pixel +generator functionnal usage after DDR suspend, then recover DRM driver +state + +Signed-off-by: Neil Armstrong +Acked-by: Daniel Vetter +Tested-by: Kevin Hilman +Link: https://patchwork.freedesktop.org/patch/msgid/20190827095825.21015-3-narmstrong@baylibre.com +--- + drivers/gpu/drm/meson/meson_drv.c | 32 +++++++++++++++++++++++++++++++ + 1 file changed, 32 insertions(+) + +diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c +index a24f8dec5adc..397c33182f4f 100644 +--- a/drivers/gpu/drm/meson/meson_drv.c ++++ b/drivers/gpu/drm/meson/meson_drv.c +@@ -372,6 +372,33 @@ static const struct component_master_ops meson_drv_master_ops = { + .unbind = meson_drv_unbind, + }; + ++static int __maybe_unused meson_drv_pm_suspend(struct device *dev) ++{ ++ struct meson_drm *priv = dev_get_drvdata(dev); ++ ++ if (!priv) ++ return 0; ++ ++ return drm_mode_config_helper_suspend(priv->drm); ++} ++ ++static int __maybe_unused meson_drv_pm_resume(struct device *dev) ++{ ++ struct meson_drm *priv = dev_get_drvdata(dev); ++ ++ if (!priv) ++ return 0; ++ ++ meson_vpu_init(priv); ++ meson_venc_init(priv); ++ meson_vpp_init(priv); ++ meson_viu_init(priv); ++ ++ drm_mode_config_helper_resume(priv->drm); ++ ++ return 0; ++} ++ + static int compare_of(struct device *dev, void *data) + { + DRM_DEBUG_DRIVER("Comparing of node %pOF with %pOF\n", +@@ -467,11 +494,16 @@ static const struct of_device_id dt_match[] = { + }; + MODULE_DEVICE_TABLE(of, dt_match); + ++static const struct dev_pm_ops meson_drv_pm_ops = { ++ SET_SYSTEM_SLEEP_PM_OPS(meson_drv_pm_suspend, meson_drv_pm_resume) ++}; ++ + static struct platform_driver meson_drm_platform_driver = { + .probe = meson_drv_probe, + .driver = { + .name = "meson-drm", + .of_match_table = dt_match, ++ .pm = &meson_drv_pm_ops, + }, + }; + +-- +2.17.1 + diff --git a/patches/0061-mei-dal-make-access-list-global-instead-of-per-de.security b/patches/0061-mei-dal-make-access-list-global-instead-of-per-de.security new file mode 100644 index 0000000000..019068608c --- /dev/null +++ b/patches/0061-mei-dal-make-access-list-global-instead-of-per-de.security @@ -0,0 +1,373 @@ +From 4650494e0cfbc82600b7c9055b0ba992da13218c Mon Sep 17 00:00:00 2001 +From: Yael Samet +Date: Mon, 22 Jul 2019 21:36:09 +0300 +Subject: [PATCH 61/65] mei: dal: make access list global instead of per + device. + +Instead of a TA access list for each dal +device, there is a global one, exported via +kernel interface. + +Change-Id: I75005f37491b7b0daf551b23a265ad29773d935c +Signed-off-by: Yael Samet +Signed-off-by: Tomas Winkler +--- + drivers/misc/mei/dal/dal_class.c | 8 +-- + drivers/misc/mei/dal/dal_dev.h | 15 +++--- + drivers/misc/mei/dal/dal_kdi.c | 36 ++----------- + drivers/misc/mei/dal/dal_ta_access.c | 81 +++++++++------------------- + 4 files changed, 37 insertions(+), 103 deletions(-) + +diff --git a/drivers/misc/mei/dal/dal_class.c b/drivers/misc/mei/dal/dal_class.c +index b5a0201a1f73..3ed80393a337 100644 +--- a/drivers/misc/mei/dal/dal_class.c ++++ b/drivers/misc/mei/dal/dal_class.c +@@ -319,7 +319,6 @@ static int dal_validate_access(const struct bh_command_header *hdr, + size_t count, void *ctx) + { + struct dal_client *dc = ctx; +- struct dal_device *ddev = dc->ddev; + const uuid_t *ta_id; + + if (!bh_msg_is_cmd_open_session(hdr)) +@@ -329,7 +328,7 @@ static int dal_validate_access(const struct bh_command_header *hdr, + if (!ta_id) + return -EINVAL; + +- return dal_access_policy_allowed(ddev, ta_id, dc); ++ return dal_access_policy_allowed(ta_id, dc->intf); + } + + /** +@@ -675,7 +674,6 @@ static void dal_device_release(struct device *dev) + { + struct dal_device *ddev = to_dal_device(dev); + +- dal_access_list_free(ddev); + kfree(ddev->bh_fw_msg.msg); + kfree(ddev); + } +@@ -727,10 +725,6 @@ static int dal_probe(struct mei_cl_device *cldev, + goto err_unregister; + } + +- ret = dal_access_list_init(ddev); +- if (ret) +- goto err_unregister; +- + ret = dal_mei_enable(ddev); + if (ret < 0) + goto err_unregister; +diff --git a/drivers/misc/mei/dal/dal_dev.h b/drivers/misc/mei/dal/dal_dev.h +index 1cf178786f42..80630cc846f7 100644 +--- a/drivers/misc/mei/dal/dal_dev.h ++++ b/drivers/misc/mei/dal/dal_dev.h +@@ -27,6 +27,8 @@ extern struct class *dal_class; + enum dal_intf { + DAL_INTF_KDI, + DAL_INTF_CDEV, ++ ++ DAL_INTF_MAX + }; + + /** +@@ -152,13 +154,10 @@ int dal_kdi_recv(unsigned int handle, unsigned char *buf, size_t *count); + int dal_kdi_init(void); + void dal_kdi_exit(void); + +-int dal_access_policy_add(struct dal_device *ddev, +- const uuid_t *ta_id, void *owner); +-int dal_access_policy_remove(struct dal_device *ddev, +- const uuid_t *ta_id, void *owner); +-int dal_access_policy_allowed(struct dal_device *ddev, +- const uuid_t *ta_id, void *owner); +-void dal_access_list_free(struct dal_device *ddev); +-int dal_access_list_init(struct dal_device *ddev); ++int dal_access_policy_add(const uuid_t *ta_id, enum dal_intf owner); ++int dal_access_policy_remove(const uuid_t *ta_id, enum dal_intf owner); ++int dal_access_policy_allowed(const uuid_t *ta_id, enum dal_intf owner); ++void dal_access_list_free(void); ++int dal_access_list_init(void); + + #endif /* _DAL_KDI_H_ */ +diff --git a/drivers/misc/mei/dal/dal_kdi.c b/drivers/misc/mei/dal/dal_kdi.c +index e9805596c91a..41091ebe6c79 100644 +--- a/drivers/misc/mei/dal/dal_kdi.c ++++ b/drivers/misc/mei/dal/dal_kdi.c +@@ -374,27 +374,12 @@ EXPORT_SYMBOL(dal_close_session); + */ + int dal_set_ta_exclusive_access(const uuid_t *ta_id) + { +- struct dal_device *ddev; +- struct device *dev; +- struct dal_client *dc; + int ret; + + mutex_lock(&dal_kdi_lock); + +- dev = dal_find_dev(DAL_MEI_DEVICE_IVM); +- if (!dev) { +- dev_dbg(dev, "can't find device\n"); +- ret = -ENODEV; +- goto unlock; +- } ++ ret = dal_access_policy_add(ta_id, DAL_INTF_KDI); + +- ddev = to_dal_device(dev); +- dc = ddev->clients[DAL_INTF_KDI]; +- +- ret = dal_access_policy_add(ddev, ta_id, dc); +- +- put_device(dev); +-unlock: + mutex_unlock(&dal_kdi_lock); + return ret; + } +@@ -412,27 +397,12 @@ EXPORT_SYMBOL(dal_set_ta_exclusive_access); + */ + int dal_unset_ta_exclusive_access(const uuid_t *ta_id) + { +- struct dal_device *ddev; +- struct device *dev; +- struct dal_client *dc; + int ret; + + mutex_lock(&dal_kdi_lock); + +- dev = dal_find_dev(DAL_MEI_DEVICE_IVM); +- if (!dev) { +- dev_dbg(dev, "can't find device\n"); +- ret = -ENODEV; +- goto unlock; +- } ++ ret = dal_access_policy_remove(ta_id, DAL_INTF_KDI); + +- ddev = to_dal_device(dev); +- dc = ddev->clients[DAL_INTF_KDI]; +- +- ret = dal_access_policy_remove(ddev, ta_id, dc); +- +- put_device(dev); +-unlock: + mutex_unlock(&dal_kdi_lock); + return ret; + } +@@ -532,6 +502,7 @@ int dal_kdi_init(void) + int ret; + + bh_init_internal(); ++ dal_access_list_init(); + + dal_kdi_interface.class = dal_class; + ret = class_interface_register(&dal_kdi_interface); +@@ -553,5 +524,6 @@ int dal_kdi_init(void) + void dal_kdi_exit(void) + { + bh_deinit_internal(); ++ dal_access_list_free(); + class_interface_unregister(&dal_kdi_interface); + } +diff --git a/drivers/misc/mei/dal/dal_ta_access.c b/drivers/misc/mei/dal/dal_ta_access.c +index 23f5b424de62..cb19ce7a1458 100644 +--- a/drivers/misc/mei/dal/dal_ta_access.c ++++ b/drivers/misc/mei/dal/dal_ta_access.c +@@ -15,6 +15,9 @@ static const uuid_t spooler_ta_id = UUID_INIT(0xba8d1643, 0x50b6, 0x49cc, + 0x86, 0x1d, 0x2c, 0x01, + 0xbe, 0xd1, 0x4b, 0xe8); + ++/* dal_access_list, list of TAs with access restriction */ ++static struct list_head dal_access_list; ++ + /** + * struct dal_access_policy - ta access information node + * +@@ -25,21 +28,9 @@ static const uuid_t spooler_ta_id = UUID_INIT(0xba8d1643, 0x50b6, 0x49cc, + struct dal_access_policy { + struct list_head list; + uuid_t ta_id; +- void *owner; ++ enum dal_intf owner; + }; + +-/** +- * dal_dev_get_access_list - get access list of dal device +- * +- * @ddev: dal device +- * +- * Return: pointer to access list +- */ +-static struct list_head *dal_dev_get_access_list(struct dal_device *ddev) +-{ +- return dev_get_drvdata(&ddev->dev); +-} +- + /** + * dal_access_policy_alloc - allocate memory and initialize access list node + * +@@ -51,7 +42,7 @@ static struct list_head *dal_dev_get_access_list(struct dal_device *ddev) + * Locking: called under "kdi_lock" lock + */ + static struct dal_access_policy * +-dal_access_policy_alloc(const uuid_t *ta_id, void *owner) ++dal_access_policy_alloc(const uuid_t *ta_id, enum dal_intf owner) + { + struct dal_access_policy *e; + +@@ -90,7 +81,6 @@ dal_access_policy_find(struct list_head *access_list, const uuid_t *ta_id) + /** + * dal_access_policy_add - add access information of ta and its owner + * +- * @ddev: dal device + * @ta_id: trusted application id + * @owner: owner of ta + * +@@ -101,15 +91,13 @@ dal_access_policy_find(struct list_head *access_list, const uuid_t *ta_id) + * + * Locking: called under "kdi_lock" lock + */ +-int dal_access_policy_add(struct dal_device *ddev, +- const uuid_t *ta_id, void *owner) ++int dal_access_policy_add(const uuid_t *ta_id, enum dal_intf owner) + { +- struct list_head *access_list = dal_dev_get_access_list(ddev); + struct dal_access_policy *e; + +- e = dal_access_policy_find(access_list, ta_id); ++ e = dal_access_policy_find(&dal_access_list, ta_id); + if (e) { +- if (!e->owner) ++ if (e->owner != owner) + return -EPERM; + + return -EEXIST; +@@ -119,14 +107,13 @@ int dal_access_policy_add(struct dal_device *ddev, + if (!e) + return -ENOMEM; + +- list_add_tail(&e->list, access_list); ++ list_add_tail(&e->list, &dal_access_list); + return 0; + } + + /** + * dal_access_policy_remove - remove access information of ta and its owner + * +- * @ddev: dal device + * @ta_id: trusted application id + * @owner: owner of ta + * +@@ -136,17 +123,15 @@ int dal_access_policy_add(struct dal_device *ddev, + * + * Locking: called under "kdi_lock" lock + */ +-int dal_access_policy_remove(struct dal_device *ddev, +- const uuid_t *ta_id, void *owner) ++int dal_access_policy_remove(const uuid_t *ta_id, enum dal_intf owner) + { +- struct list_head *access_list = dal_dev_get_access_list(ddev); + struct dal_access_policy *e; + +- e = dal_access_policy_find(access_list, ta_id); ++ e = dal_access_policy_find(&dal_access_list, ta_id); + if (!e) + return -ENOENT; + +- if (!e->owner || e->owner != owner) ++ if (e->owner != owner) + return -EPERM; + + list_del(&e->list); +@@ -157,7 +142,6 @@ int dal_access_policy_remove(struct dal_device *ddev, + /** + * dal_access_policy_allowed - check if owner is allowed to use ta + * +- * @ddev: dal device + * @ta_id: trusted application id + * @owner: owner + * +@@ -166,17 +150,19 @@ int dal_access_policy_remove(struct dal_device *ddev, + * + * Locking: called under "ddev->write_lock" lock + */ +-int dal_access_policy_allowed(struct dal_device *ddev, +- const uuid_t *ta_id, void *owner) ++int dal_access_policy_allowed(const uuid_t *ta_id, enum dal_intf owner) + { +- struct list_head *access_list = dal_dev_get_access_list(ddev); + struct dal_access_policy *e; + +- e = dal_access_policy_find(access_list, ta_id); ++ e = dal_access_policy_find(&dal_access_list, ta_id); + if (!e) + return 0; + +- if (e->owner && e->owner != owner) ++ /* ++ * owner is DAL_INTF_MAX if the ta cannot be blocked ++ * (currently only the spooler ta) ++ */ ++ if (e->owner != DAL_INTF_MAX && e->owner != owner) + return -EPERM; + + return 0; +@@ -184,51 +170,34 @@ int dal_access_policy_allowed(struct dal_device *ddev, + + /** + * dal_access_list_free - free memory of access list +- * +- * @ddev: dal device + */ +-void dal_access_list_free(struct dal_device *ddev) ++void dal_access_list_free(void) + { +- struct list_head *access_list = dal_dev_get_access_list(ddev); + struct dal_access_policy *e, *n; + +- if (!access_list) +- return; +- +- list_for_each_entry_safe(e, n, access_list, list) { ++ list_for_each_entry_safe(e, n, &dal_access_list, list) { + list_del(&e->list); + kfree(e); + } + +- kfree(access_list); +- dev_set_drvdata(&ddev->dev, NULL); ++ INIT_LIST_HEAD(&dal_access_list); + } + + /** + * dal_access_list_init - initialize an empty access list + * +- * @ddev: dal device +- * + * Note: Add spooler ta id with blank owner to the list. + * This will prevent any user from setting itself owner of the spooler, + * which will block others from openning session to it. + * + * Return: 0 on success +- * -ENOMEM on memory allocation failure + */ +-int dal_access_list_init(struct dal_device *ddev) ++int dal_access_list_init(void) + { +- struct list_head *access_list; +- +- access_list = kzalloc(sizeof(*access_list), GFP_KERNEL); +- if (!access_list) +- return -ENOMEM; +- +- INIT_LIST_HEAD(access_list); +- dev_set_drvdata(&ddev->dev, access_list); ++ INIT_LIST_HEAD(&dal_access_list); + + /* Nobody can own SPOOLER TA */ +- dal_access_policy_add(ddev, &spooler_ta_id, NULL); ++ dal_access_policy_add(&spooler_ta_id, DAL_INTF_MAX); + + return 0; + } +-- +2.17.1 + diff --git a/patches/0061-net-stmmac-ethtool-statistic-to-show-irq-for-.connectivity b/patches/0061-net-stmmac-ethtool-statistic-to-show-irq-for-.connectivity new file mode 100644 index 0000000000..bd11291e0e --- /dev/null +++ b/patches/0061-net-stmmac-ethtool-statistic-to-show-irq-for-.connectivity @@ -0,0 +1,155 @@ +From 5d1ba9d62700145c348207b61952635295799231 Mon Sep 17 00:00:00 2001 +From: Weifeng Voon +Date: Wed, 18 Jul 2018 07:27:02 +0800 +Subject: [PATCH 061/108] net: stmmac: ethtool statistic to show irq for each + tx/rx ch + +Adding new statistics for DMA tx and rx IRQ. Up to 8 channels are +supported, incremented by dwmac4_dma_interrupt. + +Signed-off-by: Weifeng Voon +Signed-off-by: Ong Boon Leong +--- + drivers/net/ethernet/stmicro/stmmac/common.h | 16 +++++ + .../net/ethernet/stmicro/stmmac/dwmac4_lib.c | 66 ++++++++++++++++++- + .../ethernet/stmicro/stmmac/stmmac_ethtool.c | 16 +++++ + 3 files changed, 96 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h +index 7796a0f5b861..f8fd91cd5806 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/common.h ++++ b/drivers/net/ethernet/stmicro/stmmac/common.h +@@ -99,6 +99,22 @@ struct stmmac_extra_stats { + unsigned long tx_clean; + unsigned long tx_set_ic_bit; + unsigned long irq_receive_pmt_irq_n; ++ unsigned long q0_rx_irq_n; ++ unsigned long q1_rx_irq_n; ++ unsigned long q2_rx_irq_n; ++ unsigned long q3_rx_irq_n; ++ unsigned long q4_rx_irq_n; ++ unsigned long q5_rx_irq_n; ++ unsigned long q6_rx_irq_n; ++ unsigned long q7_rx_irq_n; ++ unsigned long q0_tx_irq_n; ++ unsigned long q1_tx_irq_n; ++ unsigned long q2_tx_irq_n; ++ unsigned long q3_tx_irq_n; ++ unsigned long q4_tx_irq_n; ++ unsigned long q5_tx_irq_n; ++ unsigned long q6_tx_irq_n; ++ unsigned long q7_tx_irq_n; + /* MMC info */ + unsigned long mmc_tx_irq_n; + unsigned long mmc_rx_irq_n; +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c +index 211931bc814f..8f1830d795cd 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c +@@ -149,12 +149,74 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr, + if (likely(intr_status & DMA_CHAN_STATUS_NIS)) { + x->normal_irq_n++; + if (likely(intr_status & DMA_CHAN_STATUS_RI)) { +- x->rx_normal_irq_n++; +- ret |= handle_rx; ++ u32 value; ++ ++ value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan)); ++ /* to schedule NAPI on real RIE event. */ ++ if (likely(value & DMA_CHAN_INTR_ENA_RIE)) { ++ x->rx_normal_irq_n++; ++ switch (chan) { ++ case 0x0: ++ x->q0_rx_irq_n++; ++ break; ++ case 0x1: ++ x->q1_rx_irq_n++; ++ break; ++ case 0x2: ++ x->q2_rx_irq_n++; ++ break; ++ case 0x3: ++ x->q3_rx_irq_n++; ++ break; ++ case 0x4: ++ x->q4_rx_irq_n++; ++ break; ++ case 0x5: ++ x->q5_rx_irq_n++; ++ break; ++ case 0x6: ++ x->q6_rx_irq_n++; ++ break; ++ case 0x7: ++ x->q7_rx_irq_n++; ++ break; ++ default: ++ break; ++ } ++ ret |= handle_rx; ++ } + } + if (likely(intr_status & (DMA_CHAN_STATUS_TI | + DMA_CHAN_STATUS_TBU))) { + x->tx_normal_irq_n++; ++ switch (chan) { ++ case 0x0: ++ x->q0_tx_irq_n++; ++ break; ++ case 0x1: ++ x->q1_tx_irq_n++; ++ break; ++ case 0x2: ++ x->q2_tx_irq_n++; ++ break; ++ case 0x3: ++ x->q3_tx_irq_n++; ++ break; ++ case 0x4: ++ x->q4_tx_irq_n++; ++ break; ++ case 0x5: ++ x->q5_tx_irq_n++; ++ break; ++ case 0x6: ++ x->q6_tx_irq_n++; ++ break; ++ case 0x7: ++ x->q7_tx_irq_n++; ++ break; ++ default: ++ break; ++ } + ret |= handle_tx; + } + if (unlikely(intr_status & DMA_CHAN_STATUS_ERI)) +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +index c48a77f6508c..058877a9fcd2 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +@@ -90,6 +90,22 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = { + STMMAC_STAT(tx_clean), + STMMAC_STAT(tx_set_ic_bit), + STMMAC_STAT(irq_receive_pmt_irq_n), ++ STMMAC_STAT(q0_rx_irq_n), ++ STMMAC_STAT(q1_rx_irq_n), ++ STMMAC_STAT(q2_rx_irq_n), ++ STMMAC_STAT(q3_rx_irq_n), ++ STMMAC_STAT(q4_rx_irq_n), ++ STMMAC_STAT(q5_rx_irq_n), ++ STMMAC_STAT(q6_rx_irq_n), ++ STMMAC_STAT(q7_rx_irq_n), ++ STMMAC_STAT(q0_tx_irq_n), ++ STMMAC_STAT(q1_tx_irq_n), ++ STMMAC_STAT(q2_tx_irq_n), ++ STMMAC_STAT(q3_tx_irq_n), ++ STMMAC_STAT(q4_tx_irq_n), ++ STMMAC_STAT(q5_tx_irq_n), ++ STMMAC_STAT(q6_tx_irq_n), ++ STMMAC_STAT(q7_tx_irq_n), + /* MMC info */ + STMMAC_STAT(mmc_tx_irq_n), + STMMAC_STAT(mmc_rx_irq_n), +-- +2.17.1 + diff --git a/patches/0061-trusty-Fix-possible-memory-leak.trusty b/patches/0061-trusty-Fix-possible-memory-leak.trusty new file mode 100644 index 0000000000..0233c06ed7 --- /dev/null +++ b/patches/0061-trusty-Fix-possible-memory-leak.trusty @@ -0,0 +1,39 @@ +From 6d85e86e34573d9139118cbfa1fbec340af8e984 Mon Sep 17 00:00:00 2001 +From: "Zhang, Qi" +Date: Mon, 12 Nov 2018 14:50:33 +0800 +Subject: [PATCH 61/63] trusty: Fix possible memory leak + +Destroy workqueue for failed case + +Change-Id: Ibc88fb85a81579fc0f584340b35797843644d1cf +Tracked-On: PKT-1551 +Signed-off-by: Zhang, Qi +--- + drivers/trusty/trusty-timer.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/drivers/trusty/trusty-timer.c b/drivers/trusty/trusty-timer.c +index ca6ea5799eeb..7359cf8bd3d9 100644 +--- a/drivers/trusty/trusty-timer.c ++++ b/drivers/trusty/trusty-timer.c +@@ -143,8 +143,7 @@ static int trusty_timer_probe(struct platform_device *pdev) + ret = trusty_call_notifier_register(s->trusty_dev, &s->call_notifier); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to register call notifier\n"); +- kfree(s); +- return ret; ++ goto error_call_notifier; + } + + INIT_WORK(&s->timer.work, timer_work_func); +@@ -153,6 +152,7 @@ static int trusty_timer_probe(struct platform_device *pdev) + + return 0; + ++error_call_notifier: + destroy_workqueue(s->workqueue); + err_allocate_work_queue: + kfree(s); +-- +2.17.1 + diff --git a/patches/0061-vhm-add-trusty-init-de-init-support.acrn b/patches/0061-vhm-add-trusty-init-de-init-support.acrn new file mode 100644 index 0000000000..6e06836524 --- /dev/null +++ b/patches/0061-vhm-add-trusty-init-de-init-support.acrn @@ -0,0 +1,112 @@ +From aabfebb587219c82efa2ba6cd0e857def38479bf Mon Sep 17 00:00:00 2001 +From: Mingqiang Chi +Date: Fri, 31 Aug 2018 10:59:01 +0800 +Subject: [PATCH 061/150] vhm: add trusty init/de-init support + +vhm will allocate trusty memory from cma then do ept map for a VM with trusty. +vhm will de-init trusty for a VM during its destroying. + +Signed-off-by: Mingqiang Chi +Signed-off-by: Jason Chen CJ +Reviewed-by: Zhao Yakui +Acked-by: Eddie Dong +--- + drivers/char/vhm/vhm_dev.c | 10 ++++++++++ + drivers/vhm/vhm_mm.c | 25 +++++++++++++++++++++++++ + include/linux/vhm/acrn_vhm_mm.h | 3 +++ + include/linux/vhm/vhm_vm_mngt.h | 1 + + 4 files changed, 39 insertions(+) + +diff --git a/drivers/char/vhm/vhm_dev.c b/drivers/char/vhm/vhm_dev.c +index dceeeee90220..0e6807db5db6 100644 +--- a/drivers/char/vhm/vhm_dev.c ++++ b/drivers/char/vhm/vhm_dev.c +@@ -201,6 +201,14 @@ static long vhm_dev_ioctl(struct file *filep, + + vm->vmid = created_vm.vmid; + ++ if (created_vm.vm_flag & SECURE_WORLD_ENABLED) { ++ ret = init_trusty(vm); ++ if (ret < 0) { ++ pr_err("vhm: failed to init trusty for VM!\n"); ++ return ret; ++ } ++ } ++ + pr_info("vhm: VM %d created\n", created_vm.vmid); + break; + } +@@ -233,6 +241,8 @@ static long vhm_dev_ioctl(struct file *filep, + } + + case IC_DESTROY_VM: { ++ if (vm->trusty_host_gpa) ++ deinit_trusty(vm); + ret = hcall_destroy_vm(vm->vmid); + if (ret < 0) { + pr_err("failed to destroy VM %ld\n", vm->vmid); +diff --git a/drivers/vhm/vhm_mm.c b/drivers/vhm/vhm_mm.c +index fb09ed2f994f..bff448208836 100644 +--- a/drivers/vhm/vhm_mm.c ++++ b/drivers/vhm/vhm_mm.c +@@ -305,6 +305,31 @@ int check_guest_mem(struct vhm_vm *vm) + return 0; + } + ++#define TRUSTY_MEM_GPA_BASE (511UL * 1024UL * 1024UL * 1024UL) ++#define TRUSTY_MEM_SIZE (0x01000000) ++int init_trusty(struct vhm_vm *vm) ++{ ++ unsigned long host_gpa, guest_gpa = TRUSTY_MEM_GPA_BASE; ++ unsigned long len = TRUSTY_MEM_SIZE; ++ ++ host_gpa = _alloc_memblk(vm->dev, TRUSTY_MEM_SIZE); ++ if (host_gpa == 0ULL) ++ return -ENOMEM; ++ ++ vm->trusty_host_gpa = host_gpa; ++ ++ pr_info("VHM: set ept for trusty memory [host_gpa=0x%lx, " ++ "guest_gpa=0x%lx, len=0x%lx]", host_gpa, guest_gpa, len); ++ return _mem_set_memmap(vm->vmid, guest_gpa, host_gpa, len, ++ MEM_TYPE_WB, MEM_ACCESS_RWX, MAP_MEM); ++} ++ ++void deinit_trusty(struct vhm_vm *vm) ++{ ++ _free_memblk(vm->dev, vm->trusty_host_gpa, TRUSTY_MEM_SIZE); ++ vm->trusty_host_gpa = 0; ++} ++ + static void guest_vm_open(struct vm_area_struct *vma) + { + struct vhm_vm *vm = vma->vm_file->private_data; +diff --git a/include/linux/vhm/acrn_vhm_mm.h b/include/linux/vhm/acrn_vhm_mm.h +index 87d668f735dc..5ff9af92f81f 100644 +--- a/include/linux/vhm/acrn_vhm_mm.h ++++ b/include/linux/vhm/acrn_vhm_mm.h +@@ -198,6 +198,9 @@ int alloc_guest_memseg(struct vhm_vm *vm, struct vm_memseg *memseg); + */ + int map_guest_memseg(struct vhm_vm *vm, struct vm_memmap *memmap); + ++int init_trusty(struct vhm_vm *vm); ++void deinit_trusty(struct vhm_vm *vm); ++ + int _mem_set_memmap(unsigned long vmid, unsigned long guest_gpa, + unsigned long host_gpa, unsigned long len, + unsigned int mem_type, unsigned int mem_access_right, +diff --git a/include/linux/vhm/vhm_vm_mngt.h b/include/linux/vhm/vhm_vm_mngt.h +index 2f19153fb6af..c47d425f1537 100644 +--- a/include/linux/vhm/vhm_vm_mngt.h ++++ b/include/linux/vhm/vhm_vm_mngt.h +@@ -96,6 +96,7 @@ struct vhm_vm { + struct device *dev; + struct list_head list; + unsigned long vmid; ++ unsigned long trusty_host_gpa; + int ioreq_fallback_client; + long refcnt; + struct mutex seg_lock; +-- +2.17.1 + diff --git a/patches/0062-ASoC-Intel-Skylake-Read-blobs-from-debugfs-on-overri.audio b/patches/0062-ASoC-Intel-Skylake-Read-blobs-from-debugfs-on-overri.audio new file mode 100644 index 0000000000..98c327a861 --- /dev/null +++ b/patches/0062-ASoC-Intel-Skylake-Read-blobs-from-debugfs-on-overri.audio @@ -0,0 +1,107 @@ +From 4887c516ce5f1204989bbb1a1678aaae6c1683cb Mon Sep 17 00:00:00 2001 +From: Vinod Koul +Date: Wed, 26 Aug 2015 13:13:56 +0530 +Subject: [PATCH 062/193] ASoC: Intel: Skylake: Read blobs from debugfs on + override + +Add API to read blobs from debugfs when override is enabled and +use that API when sending IPCs to DSP + +Change-Id: Id624a179f447e590c9cded684bd687d91dc0975e +Signed-off-by: Vinod Koul +Signed-off-by: Jeeja KP +--- + sound/soc/intel/skylake/skl-debug.c | 21 +++++++++++++++++++++ + sound/soc/intel/skylake/skl-topology.c | 14 +++++++++++++- + sound/soc/intel/skylake/skl.h | 8 ++++++++ + 3 files changed, 42 insertions(+), 1 deletion(-) + +diff --git a/sound/soc/intel/skylake/skl-debug.c b/sound/soc/intel/skylake/skl-debug.c +index 88db0aee1188..b150087249b1 100644 +--- a/sound/soc/intel/skylake/skl-debug.c ++++ b/sound/soc/intel/skylake/skl-debug.c +@@ -468,6 +468,27 @@ static int skl_debugfs_init_ipc(struct skl_debug *d) + return 0; + } + ++struct nhlt_specific_cfg ++*skl_nhlt_get_debugfs_blob(struct skl_debug *d, u8 link_type, u32 instance) ++{ ++ switch (link_type) { ++ case NHLT_LINK_DMIC: ++ return d->dmic_blob.cfg; ++ ++ case NHLT_LINK_SSP: ++ if (instance >= MAX_SSP) ++ return NULL; ++ ++ return d->ssp_blob[instance].cfg; ++ ++ default: ++ break; ++ } ++ ++ dev_err(d->dev, "NHLT debugfs query failed\n"); ++ return NULL; ++} ++ + static ssize_t nhlt_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) + { +diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c +index ed30cb454794..94a99b9089fa 100644 +--- a/sound/soc/intel/skylake/skl-topology.c ++++ b/sound/soc/intel/skylake/skl-topology.c +@@ -1730,10 +1730,22 @@ static int skl_tplg_be_fill_pipe_params(struct snd_soc_dai *dai, + return 0; + + /* update the blob based on virtual bus_id*/ +- cfg = skl_get_ep_blob(skl, mconfig->vbus_id, link_type, ++ if (!skl->nhlt_override) { ++ cfg = skl_get_ep_blob(skl, mconfig->vbus_id, link_type, + params->s_fmt, params->ch, + params->s_freq, params->stream, + dev_type); ++ } else { ++ dev_warn(dai->dev, "Querying NHLT blob from Debugfs!!!!\n"); ++ cfg = skl_nhlt_get_debugfs_blob(skl->debugfs, ++ link_type, mconfig->vbus_id); ++ if (cfg->size > HDA_SST_CFG_MAX) { ++ dev_err(dai->dev, "NHLT debugfs blob is vv large\n"); ++ dev_err(dai->dev, "First word is size in blob!!!\n"); ++ dev_err(dai->dev, "Recieved size %d\n", cfg->size); ++ return -EIO; ++ } ++ } + if (cfg) { + mconfig->formats_config.caps_size = cfg->size; + mconfig->formats_config.caps = (u32 *) &cfg->caps; +diff --git a/sound/soc/intel/skylake/skl.h b/sound/soc/intel/skylake/skl.h +index df3d38639d13..73f8d94bc27b 100644 +--- a/sound/soc/intel/skylake/skl.h ++++ b/sound/soc/intel/skylake/skl.h +@@ -190,6 +190,9 @@ void skl_debugfs_exit(struct skl_dev *skl); + void skl_debug_init_module(struct skl_debug *d, + struct snd_soc_dapm_widget *w, + struct skl_module_cfg *mconfig); ++struct nhlt_specific_cfg ++*skl_nhlt_get_debugfs_blob(struct skl_debug *d, u8 link_type, u32 instance); ++ + #else + static inline struct skl_debug *skl_debugfs_init(struct skl_dev *skl) + { +@@ -203,6 +206,11 @@ static inline void skl_debug_init_module(struct skl_debug *d, + struct snd_soc_dapm_widget *w, + struct skl_module_cfg *mconfig) + {} ++static inline struct nhlt_specific_cfg ++*skl_nhlt_get_debugfs_blob(struct skl_debug *d, u8 link_type, u32 instance) ++{ ++ return NULL; ++} + #endif + + #endif /* __SOUND_SOC_SKL_H */ +-- +2.17.1 + diff --git a/patches/0062-drm-i915-execlists-Flush-the-post-sync-breadcrumb-writ.drm b/patches/0062-drm-i915-execlists-Flush-the-post-sync-breadcrumb-writ.drm new file mode 100644 index 0000000000..4b44285742 --- /dev/null +++ b/patches/0062-drm-i915-execlists-Flush-the-post-sync-breadcrumb-writ.drm @@ -0,0 +1,37 @@ +From 591855df7c5d527494def3c22e14c12b35c04d00 Mon Sep 17 00:00:00 2001 +From: Chris Wilson +Date: Tue, 27 Aug 2019 13:06:15 +0100 +Subject: [PATCH 062/690] drm/i915/execlists: Flush the post-sync breadcrumb + write harder + +Quite rarely we see that the CS completion event fires before the +breadcrumb is coherent, which presumably is a result of the CS_STALL not +waiting for the post-sync operation. Try throwing in a DC_FLUSH into +the following pipecontrol to see if that makes any difference. + +Signed-off-by: Chris Wilson +Cc: Mika Kuoppala +Acked-by: Mika Kuoppala +Link: https://patchwork.freedesktop.org/patch/msgid/20190827120615.31390-1-chris@chris-wilson.co.uk +--- + drivers/gpu/drm/i915/gt/intel_lrc.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c +index 4ef8b5fd5d0c..f15ec7c8fb62 100644 +--- a/drivers/gpu/drm/i915/gt/intel_lrc.c ++++ b/drivers/gpu/drm/i915/gt/intel_lrc.c +@@ -2949,8 +2949,10 @@ static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) + PIPE_CONTROL_DC_FLUSH_ENABLE); + + /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */ ++ /* XXX DC_FLUSH for post-sync write? (cf early context-switch bug) */ + cs = gen8_emit_pipe_control(cs, + PIPE_CONTROL_FLUSH_ENABLE | ++ PIPE_CONTROL_DC_FLUSH_ENABLE | + PIPE_CONTROL_CS_STALL, + 0); + +-- +2.17.1 + diff --git a/patches/0062-mei-dal-fix-check-for-valid-request-pointer.security b/patches/0062-mei-dal-fix-check-for-valid-request-pointer.security new file mode 100644 index 0000000000..e396ebfe18 --- /dev/null +++ b/patches/0062-mei-dal-fix-check-for-valid-request-pointer.security @@ -0,0 +1,46 @@ +From 58b233faf271debe0b02adaa430f3c171833bc2f Mon Sep 17 00:00:00 2001 +From: Tomas Winkler +Date: Tue, 27 Aug 2019 11:00:08 +0300 +Subject: [PATCH 62/65] mei: dal: fix check for valid request pointer. + +bh_request_alloc returns error pointer, hence checking just for NULL won't +work, we need to check IS_ERR_OR_NULL(). The NULL check is still required +as NULL might be assigned during search on list. + +Change-Id: I3d817b5f94d59719e0e4f162102a4f92c1eb7e9d +Signed-off-by: Tomas Winkler +--- + drivers/misc/mei/dal/bh_internal.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/drivers/misc/mei/dal/bh_internal.c b/drivers/misc/mei/dal/bh_internal.c +index c8739a417ecb..14933bf3c295 100644 +--- a/drivers/misc/mei/dal/bh_internal.c ++++ b/drivers/misc/mei/dal/bh_internal.c +@@ -125,12 +125,12 @@ void bh_session_remove(unsigned int conn_idx, u64 host_id) + + static void bh_request_free(struct bh_request_cmd *request) + { +- if (!request) ++ if (IS_ERR_OR_NULL(request)) + return; ++ + kfree(request->cmd); + kfree(request->response); + kfree(request); +- request = NULL; + } + + static struct bh_request_cmd *bh_request_alloc(const void *hdr, +@@ -358,7 +358,7 @@ static int bh_send_message(const struct bh_request_cmd *request) + { + struct bh_command_header *h; + +- if (!request) ++ if (IS_ERR_OR_NULL(request)) + return -EINVAL; + + if (request->cmd_len < sizeof(*h) || !request->cmd) +-- +2.17.1 + diff --git a/patches/0062-net-stmmac-ethtool-statistic-to-show-rx_pkt_n.connectivity b/patches/0062-net-stmmac-ethtool-statistic-to-show-rx_pkt_n.connectivity new file mode 100644 index 0000000000..fc0cc5dca0 --- /dev/null +++ b/patches/0062-net-stmmac-ethtool-statistic-to-show-rx_pkt_n.connectivity @@ -0,0 +1,98 @@ +From 4d28d4ff41de4629e634914cdbf78b9e8d3673b9 Mon Sep 17 00:00:00 2001 +From: Weifeng Voon +Date: Wed, 18 Jul 2018 07:31:42 +0800 +Subject: [PATCH 062/108] net: stmmac: ethtool statistic to show rx_pkt_n for + each rx ch + +Adding new statistics for packets received per queue/channel. Up to 8 +queue/channels are supported. These counters are incremented by stmmac_rx. + +Signed-off-by: Weifeng Voon +Signed-off-by: Ong Boon Leong +--- + drivers/net/ethernet/stmicro/stmmac/common.h | 8 +++++ + .../ethernet/stmicro/stmmac/stmmac_ethtool.c | 8 +++++ + .../net/ethernet/stmicro/stmmac/stmmac_main.c | 29 +++++++++++++++++++ + 3 files changed, 45 insertions(+) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h +index f8fd91cd5806..e6469e9e5dfa 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/common.h ++++ b/drivers/net/ethernet/stmicro/stmmac/common.h +@@ -92,6 +92,14 @@ struct stmmac_extra_stats { + unsigned long threshold; + unsigned long tx_pkt_n; + unsigned long rx_pkt_n; ++ unsigned long q0_rx_pkt_n; ++ unsigned long q1_rx_pkt_n; ++ unsigned long q2_rx_pkt_n; ++ unsigned long q3_rx_pkt_n; ++ unsigned long q4_rx_pkt_n; ++ unsigned long q5_rx_pkt_n; ++ unsigned long q6_rx_pkt_n; ++ unsigned long q7_rx_pkt_n; + unsigned long normal_irq_n; + unsigned long rx_normal_irq_n; + unsigned long napi_poll; +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +index 058877a9fcd2..8a580746ab92 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +@@ -83,6 +83,14 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = { + STMMAC_STAT(threshold), + STMMAC_STAT(tx_pkt_n), + STMMAC_STAT(rx_pkt_n), ++ STMMAC_STAT(q0_rx_pkt_n), ++ STMMAC_STAT(q1_rx_pkt_n), ++ STMMAC_STAT(q2_rx_pkt_n), ++ STMMAC_STAT(q3_rx_pkt_n), ++ STMMAC_STAT(q4_rx_pkt_n), ++ STMMAC_STAT(q5_rx_pkt_n), ++ STMMAC_STAT(q6_rx_pkt_n), ++ STMMAC_STAT(q7_rx_pkt_n), + STMMAC_STAT(normal_irq_n), + STMMAC_STAT(rx_normal_irq_n), + STMMAC_STAT(napi_poll), +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index f529473e10ac..2538f3ab33b4 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -4113,6 +4113,35 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) + + priv->xstats.rx_pkt_n += count; + ++ switch (queue) { ++ case 0x0: ++ priv->xstats.q0_rx_pkt_n += count; ++ break; ++ case 0x1: ++ priv->xstats.q1_rx_pkt_n += count; ++ break; ++ case 0x2: ++ priv->xstats.q2_rx_pkt_n += count; ++ break; ++ case 0x3: ++ priv->xstats.q3_rx_pkt_n += count; ++ break; ++ case 0x4: ++ priv->xstats.q4_rx_pkt_n += count; ++ break; ++ case 0x5: ++ priv->xstats.q5_rx_pkt_n += count; ++ break; ++ case 0x6: ++ priv->xstats.q6_rx_pkt_n += count; ++ break; ++ case 0x7: ++ priv->xstats.q7_rx_pkt_n += count; ++ break; ++ default: ++ break; ++ } ++ + return count; + } + +-- +2.17.1 + diff --git a/patches/0062-trusty-disable-va-printing-in-dmesg-on-user-build.trusty b/patches/0062-trusty-disable-va-printing-in-dmesg-on-user-build.trusty new file mode 100644 index 0000000000..6c143a9eaf --- /dev/null +++ b/patches/0062-trusty-disable-va-printing-in-dmesg-on-user-build.trusty @@ -0,0 +1,30 @@ +From 3f0ea9bc4599c72ce3c01d78eb6bbee5e821a0a7 Mon Sep 17 00:00:00 2001 +From: "Zhang, Qi" +Date: Tue, 27 Nov 2018 13:19:07 +0800 +Subject: [PATCH 62/63] trusty: disable va printing in dmesg on user build + +The va of trusty memory printing may leak security info + +Change-Id: I01380d4995892a91027384217ea6f0acdeb05fdc +Tracked-On: PKT-1560 +Signed-off-by: Zhang, Qi +--- + drivers/trusty/trusty-virtio.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/trusty/trusty-virtio.c b/drivers/trusty/trusty-virtio.c +index df066dda80d3..113dc2fed2ef 100644 +--- a/drivers/trusty/trusty-virtio.c ++++ b/drivers/trusty/trusty-virtio.c +@@ -335,7 +335,7 @@ static struct virtqueue *_find_vq(struct virtio_device *vdev, + */ + tvr->vr_descr->pa = (u32)HIULINT(pa); + +- dev_info(&vdev->dev, "vring%d: va(pa) %p(%llx) qsz %d notifyid %d\n", ++ dev_dbg(&vdev->dev, "vring%d: va(pa) %p(%llx) qsz %d notifyid %d\n", + id, tvr->vaddr, (u64)tvr->paddr, tvr->elem_num, tvr->notifyid); + + #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) +-- +2.17.1 + diff --git a/patches/0062-vhm-fix-kerneldoc-format.acrn b/patches/0062-vhm-fix-kerneldoc-format.acrn new file mode 100644 index 0000000000..929b8a2517 --- /dev/null +++ b/patches/0062-vhm-fix-kerneldoc-format.acrn @@ -0,0 +1,72 @@ +From 88aad788a8835ec10ea281635e2cc3d64402446b Mon Sep 17 00:00:00 2001 +From: Xinyun Liu +Date: Fri, 31 Aug 2018 10:59:01 +0800 +Subject: [PATCH 062/150] vhm: fix kerneldoc format + +remove doxygen commands and add missing description + +Signed-off-by: Xinyun Liu +Reviewed-by: Mingqiang Chi +Acked-by: Yakui Zhao +--- + include/linux/vhm/acrn_vhm_ioreq.h | 4 +--- + include/linux/vhm/acrn_vhm_mm.h | 4 +--- + include/linux/vhm/vhm_vm_mngt.h | 5 ++--- + 3 files changed, 4 insertions(+), 9 deletions(-) + +diff --git a/include/linux/vhm/acrn_vhm_ioreq.h b/include/linux/vhm/acrn_vhm_ioreq.h +index de3a8aa4eaf6..fbf69b37d356 100644 +--- a/include/linux/vhm/acrn_vhm_ioreq.h ++++ b/include/linux/vhm/acrn_vhm_ioreq.h +@@ -52,9 +52,7 @@ + */ + + /** +- * @file acrn_vhm_ioreq.h +- * +- * @brief Virtio and Hypervisor Module(VHM) ioreq APIs ++ * DOC: Virtio and Hypervisor Module(VHM) ioreq APIs + */ + + #ifndef __ACRN_VHM_IOREQ_H__ +diff --git a/include/linux/vhm/acrn_vhm_mm.h b/include/linux/vhm/acrn_vhm_mm.h +index 5ff9af92f81f..21269e47b26a 100644 +--- a/include/linux/vhm/acrn_vhm_mm.h ++++ b/include/linux/vhm/acrn_vhm_mm.h +@@ -52,9 +52,7 @@ + */ + + /** +- * @file acrn_vhm_mm.h +- * +- * @brief Virtio and Hypervisor Module memory manager APIs ++ * DOC: Virtio and Hypervisor Module memory manager APIs + */ + + #ifndef __ACRN_VHM_MM_H__ +diff --git a/include/linux/vhm/vhm_vm_mngt.h b/include/linux/vhm/vhm_vm_mngt.h +index c47d425f1537..4fed61229ad9 100644 +--- a/include/linux/vhm/vhm_vm_mngt.h ++++ b/include/linux/vhm/vhm_vm_mngt.h +@@ -55,9 +55,7 @@ + */ + + /** +- * @file vhm_vm_mngt.h +- * +- * @brief Virtio and Hypervisor Module(VHM) management APIs ++ * DOC: brief Virtio and Hypervisor Module(VHM) management APIs + */ + #ifndef VHM_VM_MNGT_H + #define VHM_VM_MNGT_H +@@ -79,6 +77,7 @@ extern struct mutex vhm_vm_list_lock; + * @dev: pointer to dev of linux device mode + * @list: list of vhm_vm + * @vmid: guest vmid ++ * @trusty_host_gpa: host physical address of continuous memory for Trusty + * @ioreq_fallback_client: default ioreq client + * @refcnt: reference count of guest + * @seg_lock: mutex to protect memseg_list +-- +2.17.1 + diff --git a/patches/0063-ASoC-Intel-Skylake-NHLT-override-check-cfg-size-in-d.audio b/patches/0063-ASoC-Intel-Skylake-NHLT-override-check-cfg-size-in-d.audio new file mode 100644 index 0000000000..7ed24d77c4 --- /dev/null +++ b/patches/0063-ASoC-Intel-Skylake-NHLT-override-check-cfg-size-in-d.audio @@ -0,0 +1,52 @@ +From b8ad8ac70843b4ca21d515f4969ac80ab118a18d Mon Sep 17 00:00:00 2001 +From: Jeeja KP +Date: Tue, 8 Sep 2015 22:16:08 +0530 +Subject: [PATCH 063/193] ASoC: Intel: Skylake: NHLT override, check cfg size + in debugfs blob write + +When blob is updated, check the cfg size. If cfg size exceeds maximum, +return error from debugfs in write. +Removed check in update_params(), we will pass the pointer to cfg +param instead of memcpy. + +Change-Id: Ia459167e28ab37e688ca531e55e24d1faa6a2471 +Signed-off-by: Jeeja KP +--- + sound/soc/intel/skylake/skl-debug.c | 3 +++ + sound/soc/intel/skylake/skl-topology.c | 6 ------ + 2 files changed, 3 insertions(+), 6 deletions(-) + +diff --git a/sound/soc/intel/skylake/skl-debug.c b/sound/soc/intel/skylake/skl-debug.c +index b150087249b1..6764908a146d 100644 +--- a/sound/soc/intel/skylake/skl-debug.c ++++ b/sound/soc/intel/skylake/skl-debug.c +@@ -509,6 +509,9 @@ static ssize_t nhlt_write(struct file *file, + ssize_t written; + size_t size = blob->size; + ++ if (count > 2 * HDA_SST_CFG_MAX) ++ return -EIO; ++ + if (!blob->cfg) { + /* allocate mem for blob */ + blob->cfg = kzalloc(count, GFP_KERNEL); +diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c +index 94a99b9089fa..88ca072bb5cd 100644 +--- a/sound/soc/intel/skylake/skl-topology.c ++++ b/sound/soc/intel/skylake/skl-topology.c +@@ -1739,12 +1739,6 @@ static int skl_tplg_be_fill_pipe_params(struct snd_soc_dai *dai, + dev_warn(dai->dev, "Querying NHLT blob from Debugfs!!!!\n"); + cfg = skl_nhlt_get_debugfs_blob(skl->debugfs, + link_type, mconfig->vbus_id); +- if (cfg->size > HDA_SST_CFG_MAX) { +- dev_err(dai->dev, "NHLT debugfs blob is vv large\n"); +- dev_err(dai->dev, "First word is size in blob!!!\n"); +- dev_err(dai->dev, "Recieved size %d\n", cfg->size); +- return -EIO; +- } + } + if (cfg) { + mconfig->formats_config.caps_size = cfg->size; +-- +2.17.1 + diff --git a/patches/0063-drm-i915-Extend-non-readable-mcr-range.drm b/patches/0063-drm-i915-Extend-non-readable-mcr-range.drm new file mode 100644 index 0000000000..56f997260e --- /dev/null +++ b/patches/0063-drm-i915-Extend-non-readable-mcr-range.drm @@ -0,0 +1,33 @@ +From 16ed95ec8a68c40d91bc1c9e3fef7f766a9ea42b Mon Sep 17 00:00:00 2001 +From: Mika Kuoppala +Date: Fri, 9 Aug 2019 17:56:53 +0300 +Subject: [PATCH 063/690] drm/i915: Extend non readable mcr range + +Our current avoidance of non readable mcr range was not +inclusive enough. Extend the start and end. + +References: HSDES#1405586840 +Cc: Tvrtko Ursulin +Signed-off-by: Mika Kuoppala +Acked-by: Chris Wilson +Link: https://patchwork.freedesktop.org/patch/msgid/20190809145653.2279-1-mika.kuoppala@linux.intel.com +--- + drivers/gpu/drm/i915/gt/intel_workarounds.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c +index d1b68c868ecd..41d0f786e06d 100644 +--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c ++++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c +@@ -1451,7 +1451,7 @@ static bool mcr_range(struct drm_i915_private *i915, u32 offset) + * which only controls CPU initiated MMIO. Routing does not + * work for CS access so we cannot verify them on this path. + */ +- if (INTEL_GEN(i915) >= 8 && (offset >= 0xb100 && offset <= 0xb3ff)) ++ if (INTEL_GEN(i915) >= 8 && (offset >= 0xb000 && offset <= 0xb4ff)) + return true; + + return false; +-- +2.17.1 + diff --git a/patches/0063-mei-dal-initialize-request-link-list-upon-allocat.security b/patches/0063-mei-dal-initialize-request-link-list-upon-allocat.security new file mode 100644 index 0000000000..2438ce9de3 --- /dev/null +++ b/patches/0063-mei-dal-initialize-request-link-list-upon-allocat.security @@ -0,0 +1,53 @@ +From d06bb0a37acc6b0f30b5460de72bb1e0b66aa63c Mon Sep 17 00:00:00 2001 +From: Tomas Winkler +Date: Tue, 3 Sep 2019 13:48:40 +0300 +Subject: [PATCH 63/65] mei: dal: initialize request link list upon allocation + +bh_request_alloc is also the initialization point, +in order to prevent any corruption during release, +it is safer to initialize the link list entry in allocation and +always delete the entry from the list during release, +than relay on zeroed memory allocation. + +Change-Id: Ibdff8cca1354e172091d013ba4ee44f9b98dd822 +Signed-off-by: Tomas Winkler +--- + drivers/misc/mei/dal/bh_internal.c | 7 ++++--- + 1 file changed, 4 insertions(+), 3 deletions(-) + +diff --git a/drivers/misc/mei/dal/bh_internal.c b/drivers/misc/mei/dal/bh_internal.c +index 14933bf3c295..2842b3c84147 100644 +--- a/drivers/misc/mei/dal/bh_internal.c ++++ b/drivers/misc/mei/dal/bh_internal.c +@@ -128,6 +128,7 @@ static void bh_request_free(struct bh_request_cmd *request) + if (IS_ERR_OR_NULL(request)) + return; + ++ list_del(&request->link); + kfree(request->cmd); + kfree(request->response); + kfree(request); +@@ -162,6 +163,8 @@ static struct bh_request_cmd *bh_request_alloc(const void *hdr, + return ERR_PTR(-ENOMEM); + } + ++ INIT_LIST_HEAD(&request->link); ++ + memcpy(request->cmd, hdr, hdr_len); + request->cmd_len = hdr_len; + +@@ -819,10 +822,8 @@ static void bh_request_list_free(struct list_head *request_list) + { + struct bh_request_cmd *pos, *next; + +- list_for_each_entry_safe(pos, next, request_list, link) { +- list_del(&pos->link); ++ list_for_each_entry_safe(pos, next, request_list, link) + bh_request_free(pos); +- } + + INIT_LIST_HEAD(request_list); + } +-- +2.17.1 + diff --git a/patches/0063-net-stmmac-ethtool-statistic-to-show-tx_pkt_n.connectivity b/patches/0063-net-stmmac-ethtool-statistic-to-show-tx_pkt_n.connectivity new file mode 100644 index 0000000000..228945af9c --- /dev/null +++ b/patches/0063-net-stmmac-ethtool-statistic-to-show-tx_pkt_n.connectivity @@ -0,0 +1,98 @@ +From 185e20ee3edb637988dde2a56d6b00d0af593e4e Mon Sep 17 00:00:00 2001 +From: "Tan, Tee Min" +Date: Thu, 15 Aug 2019 01:05:00 +0800 +Subject: [PATCH 063/108] net: stmmac: ethtool statistic to show tx_pkt_n for + each TX ch + +Adding new statistics for packets transmitted per queue. Up to 8 +queues are supported for now. These counters are incremented by +stmmac_tx_clean. + +Signed-off-by: Tan, Tee Min +--- + drivers/net/ethernet/stmicro/stmmac/common.h | 8 +++++ + .../ethernet/stmicro/stmmac/stmmac_ethtool.c | 8 +++++ + .../net/ethernet/stmicro/stmmac/stmmac_main.c | 29 +++++++++++++++++++ + 3 files changed, 45 insertions(+) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h +index e6469e9e5dfa..a6e156205d78 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/common.h ++++ b/drivers/net/ethernet/stmicro/stmmac/common.h +@@ -91,6 +91,14 @@ struct stmmac_extra_stats { + unsigned long rx_early_irq; + unsigned long threshold; + unsigned long tx_pkt_n; ++ unsigned long q0_tx_pkt_n; ++ unsigned long q1_tx_pkt_n; ++ unsigned long q2_tx_pkt_n; ++ unsigned long q3_tx_pkt_n; ++ unsigned long q4_tx_pkt_n; ++ unsigned long q5_tx_pkt_n; ++ unsigned long q6_tx_pkt_n; ++ unsigned long q7_tx_pkt_n; + unsigned long rx_pkt_n; + unsigned long q0_rx_pkt_n; + unsigned long q1_rx_pkt_n; +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +index 8a580746ab92..583d4b442f61 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +@@ -82,6 +82,14 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = { + STMMAC_STAT(rx_early_irq), + STMMAC_STAT(threshold), + STMMAC_STAT(tx_pkt_n), ++ STMMAC_STAT(q0_tx_pkt_n), ++ STMMAC_STAT(q1_tx_pkt_n), ++ STMMAC_STAT(q2_tx_pkt_n), ++ STMMAC_STAT(q3_tx_pkt_n), ++ STMMAC_STAT(q4_tx_pkt_n), ++ STMMAC_STAT(q5_tx_pkt_n), ++ STMMAC_STAT(q6_tx_pkt_n), ++ STMMAC_STAT(q7_tx_pkt_n), + STMMAC_STAT(rx_pkt_n), + STMMAC_STAT(q0_rx_pkt_n), + STMMAC_STAT(q1_rx_pkt_n), +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index 2538f3ab33b4..1340bec3c6b8 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -1977,6 +1977,35 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) + } else { + priv->dev->stats.tx_packets++; + priv->xstats.tx_pkt_n++; ++ ++ switch (queue) { ++ case 0x0: ++ priv->xstats.q0_tx_pkt_n++; ++ break; ++ case 0x1: ++ priv->xstats.q1_tx_pkt_n++; ++ break; ++ case 0x2: ++ priv->xstats.q2_tx_pkt_n++; ++ break; ++ case 0x3: ++ priv->xstats.q3_tx_pkt_n++; ++ break; ++ case 0x4: ++ priv->xstats.q4_tx_pkt_n++; ++ break; ++ case 0x5: ++ priv->xstats.q5_tx_pkt_n++; ++ break; ++ case 0x6: ++ priv->xstats.q6_tx_pkt_n++; ++ break; ++ case 0x7: ++ priv->xstats.q7_tx_pkt_n++; ++ break; ++ default: ++ break; ++ } + } + stmmac_get_tx_hwtstamp(priv, p, skb); + } +-- +2.17.1 + diff --git a/patches/0063-sos-vhm-remove-set-guest-memory-map-by-CMA.acrn b/patches/0063-sos-vhm-remove-set-guest-memory-map-by-CMA.acrn new file mode 100644 index 0000000000..96dee059a9 --- /dev/null +++ b/patches/0063-sos-vhm-remove-set-guest-memory-map-by-CMA.acrn @@ -0,0 +1,459 @@ +From 549830ddcfdb51bba785b61570cd8d7e32794da5 Mon Sep 17 00:00:00 2001 +From: "Li, Fei1" +Date: Fri, 31 Aug 2018 10:59:02 +0800 +Subject: [PATCH 063/150] sos: vhm: remove set guest memory map by CMA + +We removed CMA Device Manager memory allocation mechanisms and use +hugetlb as the only Device Manager memory allocation mechanism. +So there is no needs to support set guest vm memory by CMA any more. + +Signed-off-by: Li, Fei1 +Acked-by: Anthony Xu +--- + drivers/char/vhm/vhm_dev.c | 14 -- + drivers/vhm/vhm_mm.c | 252 ++--------------------------- + include/linux/vhm/acrn_vhm_mm.h | 13 -- + include/linux/vhm/vhm_ioctl_defs.h | 1 + + include/linux/vhm/vhm_vm_mngt.h | 4 - + 5 files changed, 15 insertions(+), 269 deletions(-) + +diff --git a/drivers/char/vhm/vhm_dev.c b/drivers/char/vhm/vhm_dev.c +index 0e6807db5db6..d26cfca96de2 100644 +--- a/drivers/char/vhm/vhm_dev.c ++++ b/drivers/char/vhm/vhm_dev.c +@@ -110,9 +110,6 @@ static int vhm_dev_open(struct inode *inodep, struct file *filep) + vm->vmid = ACRN_INVALID_VMID; + vm->dev = vhm_device; + +- INIT_LIST_HEAD(&vm->memseg_list); +- mutex_init(&vm->seg_lock); +- + for (i = 0; i < HUGEPAGE_HLIST_ARRAY_SIZE; i++) + INIT_HLIST_HEAD(&vm->hugepage_hlist[i]); + mutex_init(&vm->hugepage_lock); +@@ -269,16 +266,6 @@ static long vhm_dev_ioctl(struct file *filep, + return ret; + } + +- case IC_ALLOC_MEMSEG: { +- struct vm_memseg memseg; +- +- if (copy_from_user(&memseg, (void *)ioctl_param, +- sizeof(struct vm_memseg))) +- return -EFAULT; +- +- return alloc_guest_memseg(vm, &memseg); +- } +- + case IC_SET_MEMSEG: { + struct vm_memmap memmap; + +@@ -560,7 +547,6 @@ static const struct file_operations fops = { + .open = vhm_dev_open, + .read = vhm_dev_read, + .write = vhm_dev_write, +- .mmap = vhm_dev_mmap, + .release = vhm_dev_release, + .unlocked_ioctl = vhm_dev_ioctl, + .poll = vhm_dev_poll, +diff --git a/drivers/vhm/vhm_mm.c b/drivers/vhm/vhm_mm.c +index bff448208836..3c0c2acbe522 100644 +--- a/drivers/vhm/vhm_mm.c ++++ b/drivers/vhm/vhm_mm.c +@@ -76,14 +76,6 @@ + #include + #include + +-struct guest_memseg { +- struct list_head list; +- u64 vm0_gpa; +- size_t len; +- u64 gpa; +- long vma_count; +-}; +- + static u64 _alloc_memblk(struct device *dev, size_t len) + { + unsigned int count; +@@ -110,52 +102,6 @@ static bool _free_memblk(struct device *dev, u64 vm0_gpa, size_t len) + return dma_release_from_contiguous(dev, page, count); + } + +-static int add_guest_memseg(struct vhm_vm *vm, unsigned long vm0_gpa, +- unsigned long guest_gpa, unsigned long len) +-{ +- struct guest_memseg *seg; +- int max_gfn; +- +- seg = kzalloc(sizeof(struct guest_memseg), GFP_KERNEL); +- if (seg == NULL) +- return -ENOMEM; +- +- seg->vm0_gpa = vm0_gpa; +- seg->gpa = guest_gpa; +- seg->len = len; +- +- max_gfn = (seg->gpa + seg->len) >> PAGE_SHIFT; +- if (vm->max_gfn < max_gfn) +- vm->max_gfn = max_gfn; +- +- pr_info("VHM: add memseg with len=0x%lx, vm0_gpa=0x%llx," +- " and its guest gpa = 0x%llx, vm max_gfn 0x%x\n", +- seg->len, seg->vm0_gpa, seg->gpa, vm->max_gfn); +- +- seg->vma_count = 0; +- mutex_lock(&vm->seg_lock); +- list_add(&seg->list, &vm->memseg_list); +- mutex_unlock(&vm->seg_lock); +- +- return 0; +-} +- +-int alloc_guest_memseg(struct vhm_vm *vm, struct vm_memseg *memseg) +-{ +- unsigned long vm0_gpa; +- int ret; +- +- vm0_gpa = _alloc_memblk(vm->dev, memseg->len); +- if (vm0_gpa == 0ULL) +- return -ENOMEM; +- +- ret = add_guest_memseg(vm, vm0_gpa, memseg->gpa, memseg->len); +- if (ret < 0) +- _free_memblk(vm->dev, vm0_gpa, memseg->len); +- +- return ret; +-} +- + int _mem_set_memmap(unsigned long vmid, unsigned long guest_gpa, + unsigned long host_gpa, unsigned long len, + unsigned int mem_type, unsigned int mem_access_right, +@@ -223,7 +169,6 @@ int update_memmap_attr(unsigned long vmid, unsigned long guest_gpa, + + int map_guest_memseg(struct vhm_vm *vm, struct vm_memmap *memmap) + { +- struct guest_memseg *seg = NULL; + unsigned int type; + unsigned int mem_type, mem_access_right; + unsigned long guest_gpa, host_gpa; +@@ -232,77 +177,31 @@ int map_guest_memseg(struct vhm_vm *vm, struct vm_memmap *memmap) + if (memmap->type == VM_MEMMAP_SYSMEM && memmap->using_vma) + return hugepage_map_guest(vm, memmap); + +- mutex_lock(&vm->seg_lock); +- +- /* cma or mmio */ +- if (memmap->type == VM_MEMMAP_SYSMEM) { +- list_for_each_entry(seg, &vm->memseg_list, list) { +- if (seg->gpa == memmap->gpa +- && seg->len == memmap->len) +- break; +- } +- if (&seg->list == &vm->memseg_list) { +- mutex_unlock(&vm->seg_lock); +- return -EINVAL; +- } +- guest_gpa = seg->gpa; +- host_gpa = seg->vm0_gpa; +- mem_type = MEM_TYPE_WB; +- mem_access_right = (memmap->prot & MEM_ACCESS_RIGHT_MASK); +- type = MAP_MEM; +- } else { +- guest_gpa = memmap->gpa; +- host_gpa = acrn_hpa2gpa(memmap->hpa); +- mem_type = MEM_TYPE_UC; +- mem_access_right = (memmap->prot & MEM_ACCESS_RIGHT_MASK); +- type = MAP_MMIO; ++ /* mmio */ ++ if (memmap->type != VM_MEMMAP_MMIO) { ++ pr_err("vhm: %s invalid memmap type: %d\n", ++ __func__, memmap->type); ++ return -EINVAL; + } ++ guest_gpa = memmap->gpa; ++ host_gpa = acrn_hpa2gpa(memmap->hpa); ++ mem_type = MEM_TYPE_UC; ++ mem_access_right = (memmap->prot & MEM_ACCESS_RIGHT_MASK); ++ type = MAP_MMIO; + + if (_mem_set_memmap(vm->vmid, guest_gpa, host_gpa, memmap->len, + mem_type, mem_access_right, type) < 0) { + pr_err("vhm: failed to set memmap %ld!\n", vm->vmid); +- mutex_unlock(&vm->seg_lock); + return -EFAULT; + } + +- mutex_unlock(&vm->seg_lock); +- + return 0; + } + + void free_guest_mem(struct vhm_vm *vm) + { +- struct guest_memseg *seg; +- + if (vm->hugetlb_enabled) + return hugepage_free_guest(vm); +- +- mutex_lock(&vm->seg_lock); +- while (!list_empty(&vm->memseg_list)) { +- seg = list_first_entry(&vm->memseg_list, +- struct guest_memseg, list); +- if (!_free_memblk(vm->dev, seg->vm0_gpa, seg->len)) +- pr_warn("failed to free memblk\n"); +- list_del(&seg->list); +- kfree(seg); +- } +- mutex_unlock(&vm->seg_lock); +-} +- +-int check_guest_mem(struct vhm_vm *vm) +-{ +- struct guest_memseg *seg; +- +- mutex_lock(&vm->seg_lock); +- list_for_each_entry(seg, &vm->memseg_list, list) { +- if (seg->vma_count == 0) +- continue; +- +- mutex_unlock(&vm->seg_lock); +- return -EAGAIN; +- } +- mutex_unlock(&vm->seg_lock); +- return 0; + } + + #define TRUSTY_MEM_GPA_BASE (511UL * 1024UL * 1024UL * 1024UL) +@@ -330,121 +229,17 @@ void deinit_trusty(struct vhm_vm *vm) + vm->trusty_host_gpa = 0; + } + +-static void guest_vm_open(struct vm_area_struct *vma) +-{ +- struct vhm_vm *vm = vma->vm_file->private_data; +- struct guest_memseg *seg = vma->vm_private_data; +- +- mutex_lock(&vm->seg_lock); +- seg->vma_count++; +- mutex_unlock(&vm->seg_lock); +-} +- +-static void guest_vm_close(struct vm_area_struct *vma) +-{ +- struct vhm_vm *vm = vma->vm_file->private_data; +- struct guest_memseg *seg = vma->vm_private_data; +- +- mutex_lock(&vm->seg_lock); +- seg->vma_count--; +- BUG_ON(seg->vma_count < 0); +- mutex_unlock(&vm->seg_lock); +-} +- +-static const struct vm_operations_struct guest_vm_ops = { +- .open = guest_vm_open, +- .close = guest_vm_close, +-}; +- +-static int do_mmap_guest(struct file *file, +- struct vm_area_struct *vma, struct guest_memseg *seg) +-{ +- struct page *page; +- size_t size = seg->len; +- unsigned long pfn; +- unsigned long start_addr; +- +- vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTCOPY; +- pfn = seg->vm0_gpa >> PAGE_SHIFT; +- start_addr = vma->vm_start; +- while (size > 0) { +- page = pfn_to_page(pfn); +- if (vm_insert_page(vma, start_addr, page)) +- return -EINVAL; +- size -= PAGE_SIZE; +- start_addr += PAGE_SIZE; +- pfn++; +- } +- seg->vma_count++; +- vma->vm_ops = &guest_vm_ops; +- vma->vm_private_data = (void *)seg; +- +- pr_info("VHM: mmap for memseg [seg vm0_gpa=0x%llx, gpa=0x%llx] " +- "to start addr 0x%lx\n", +- seg->vm0_gpa, seg->gpa, start_addr); +- +- return 0; +-} +- +-int vhm_dev_mmap(struct file *file, struct vm_area_struct *vma) +-{ +- struct vhm_vm *vm = file->private_data; +- struct guest_memseg *seg; +- u64 offset = vma->vm_pgoff << PAGE_SHIFT; +- size_t len = vma->vm_end - vma->vm_start; +- int ret; +- +- if (vm->hugetlb_enabled) +- return -EINVAL; +- +- mutex_lock(&vm->seg_lock); +- list_for_each_entry(seg, &vm->memseg_list, list) { +- if (seg->gpa != offset || seg->len != len) +- continue; +- +- ret = do_mmap_guest(file, vma, seg); +- mutex_unlock(&vm->seg_lock); +- return ret; +- } +- mutex_unlock(&vm->seg_lock); +- return -EINVAL; +-} +- +-static void *do_map_guest_phys(struct vhm_vm *vm, u64 guest_phys, size_t size) +-{ +- struct guest_memseg *seg; +- +- mutex_lock(&vm->seg_lock); +- list_for_each_entry(seg, &vm->memseg_list, list) { +- if (seg->gpa > guest_phys || +- guest_phys >= seg->gpa + seg->len) +- continue; +- +- if (guest_phys + size > seg->gpa + seg->len) { +- mutex_unlock(&vm->seg_lock); +- return NULL; +- } +- +- mutex_unlock(&vm->seg_lock); +- return phys_to_virt(seg->vm0_gpa + guest_phys - seg->gpa); +- } +- mutex_unlock(&vm->seg_lock); +- return NULL; +-} +- + void *map_guest_phys(unsigned long vmid, u64 guest_phys, size_t size) + { + struct vhm_vm *vm; +- void *ret; ++ void *ret = NULL; + + vm = find_get_vm(vmid); + if (vm == NULL) +- return NULL; ++ return ret; + + if (vm->hugetlb_enabled) + ret = hugepage_map_guest_phys(vm, guest_phys, size); +- else +- ret = do_map_guest_phys(vm, guest_phys, size); + + put_vm(vm); + +@@ -452,38 +247,19 @@ void *map_guest_phys(unsigned long vmid, u64 guest_phys, size_t size) + } + EXPORT_SYMBOL(map_guest_phys); + +-static int do_unmap_guest_phys(struct vhm_vm *vm, u64 guest_phys) +-{ +- struct guest_memseg *seg; +- +- mutex_lock(&vm->seg_lock); +- list_for_each_entry(seg, &vm->memseg_list, list) { +- if (seg->gpa <= guest_phys && +- guest_phys < seg->gpa + seg->len) { +- mutex_unlock(&vm->seg_lock); +- return 0; +- } +- } +- mutex_unlock(&vm->seg_lock); +- +- return -ESRCH; +-} +- + int unmap_guest_phys(unsigned long vmid, u64 guest_phys) + { + struct vhm_vm *vm; +- int ret; ++ int ret = -ESRCH; + + vm = find_get_vm(vmid); + if (vm == NULL) { + pr_warn("vm_list corrupted\n"); +- return -ESRCH; ++ return ret; + } + + if (vm->hugetlb_enabled) + ret = hugepage_unmap_guest_phys(vm, guest_phys); +- else +- ret = do_unmap_guest_phys(vm, guest_phys); + + put_vm(vm); + return ret; +diff --git a/include/linux/vhm/acrn_vhm_mm.h b/include/linux/vhm/acrn_vhm_mm.h +index 21269e47b26a..645a8a56531e 100644 +--- a/include/linux/vhm/acrn_vhm_mm.h ++++ b/include/linux/vhm/acrn_vhm_mm.h +@@ -163,8 +163,6 @@ int update_memmap_attr(unsigned long vmid, unsigned long guest_gpa, + + int vhm_dev_mmap(struct file *file, struct vm_area_struct *vma); + +-int check_guest_mem(struct vhm_vm *vm); +- + /** + * free_guest_mem - free memory of guest + * +@@ -174,17 +172,6 @@ int check_guest_mem(struct vhm_vm *vm); + */ + void free_guest_mem(struct vhm_vm *vm); + +-/** +- * alloc_guest_memseg - alloc memory of guest according to pre-defined +- * memory segment info +- * +- * @vm: pointer to guest vm +- * @memseg: pointer to guest memory segment info +- * +- * Return: +- */ +-int alloc_guest_memseg(struct vhm_vm *vm, struct vm_memseg *memseg); +- + /** + * map_guest_memseg - set guest mmapping of memory according to + * pre-defined memory mapping info +diff --git a/include/linux/vhm/vhm_ioctl_defs.h b/include/linux/vhm/vhm_ioctl_defs.h +index ad158f8949ba..3b9b4afc7d82 100644 +--- a/include/linux/vhm/vhm_ioctl_defs.h ++++ b/include/linux/vhm/vhm_ioctl_defs.h +@@ -91,6 +91,7 @@ + + /* Guest memory management */ + #define IC_ID_MEM_BASE 0x40UL ++/* IC_ALLOC_MEMSEG not used */ + #define IC_ALLOC_MEMSEG _IC_ID(IC_ID, IC_ID_MEM_BASE + 0x00) + #define IC_SET_MEMSEG _IC_ID(IC_ID, IC_ID_MEM_BASE + 0x01) + +diff --git a/include/linux/vhm/vhm_vm_mngt.h b/include/linux/vhm/vhm_vm_mngt.h +index 4fed61229ad9..fe0ab90fc425 100644 +--- a/include/linux/vhm/vhm_vm_mngt.h ++++ b/include/linux/vhm/vhm_vm_mngt.h +@@ -80,8 +80,6 @@ extern struct mutex vhm_vm_list_lock; + * @trusty_host_gpa: host physical address of continuous memory for Trusty + * @ioreq_fallback_client: default ioreq client + * @refcnt: reference count of guest +- * @seg_lock: mutex to protect memseg_list +- * @memseg_list: list of memseg + * @hugepage_lock: mutex to protect hugepage_hlist + * @hugepage_hlist: hash list of hugepage + * @max_gfn: maximum guest page frame number +@@ -98,8 +96,6 @@ struct vhm_vm { + unsigned long trusty_host_gpa; + int ioreq_fallback_client; + long refcnt; +- struct mutex seg_lock; +- struct list_head memseg_list; + struct mutex hugepage_lock; + struct hlist_head hugepage_hlist[HUGEPAGE_HLIST_ARRAY_SIZE]; + int max_gfn; +-- +2.17.1 + diff --git a/patches/0063-trusty-Add-dependent-header-files-in-header-file.trusty b/patches/0063-trusty-Add-dependent-header-files-in-header-file.trusty new file mode 100644 index 0000000000..d38bb948f7 --- /dev/null +++ b/patches/0063-trusty-Add-dependent-header-files-in-header-file.trusty @@ -0,0 +1,48 @@ +From ff007c4ac2901936642010e865f1cb4d17e53794 Mon Sep 17 00:00:00 2001 +From: Zhou Furong +Date: Mon, 22 Jul 2019 08:35:05 +0800 +Subject: [PATCH 63/63] trusty: Add dependent header files in header file + +Some dependent header files included in source files before other +header file that depend them. In the case of dependent headers +updated, we need include more header files earlier in source file. +The other way is add these headers to header file to satisfy all +source file need. + +Change-Id: I1e78698de0b4503e3ad1ed8b538632d3887c5f61 +Tracked-On: PKT-2464 +--- + include/linux/trusty/smwall.h | 2 ++ + include/linux/trusty/trusty_ipc.h | 3 +++ + 2 files changed, 5 insertions(+) + +diff --git a/include/linux/trusty/smwall.h b/include/linux/trusty/smwall.h +index 66368de8c137..1d2916061d2f 100644 +--- a/include/linux/trusty/smwall.h ++++ b/include/linux/trusty/smwall.h +@@ -23,6 +23,8 @@ + #ifndef __LINUX_TRUSTY_SMWALL_H + #define __LINUX_TRUSTY_SMWALL_H + ++#include ++ + /** + * DOC: Introduction + * +diff --git a/include/linux/trusty/trusty_ipc.h b/include/linux/trusty/trusty_ipc.h +index 4ca15938a854..4d393f918eac 100644 +--- a/include/linux/trusty/trusty_ipc.h ++++ b/include/linux/trusty/trusty_ipc.h +@@ -14,6 +14,9 @@ + #ifndef __LINUX_TRUSTY_TRUSTY_IPC_H + #define __LINUX_TRUSTY_TRUSTY_IPC_H + ++#include ++#include ++ + struct tipc_chan; + + struct tipc_msg_buf { +-- +2.17.1 + diff --git a/patches/0064-ASoC-Intel-Skylake-add-ssp-blob-override-support-for.audio b/patches/0064-ASoC-Intel-Skylake-add-ssp-blob-override-support-for.audio new file mode 100644 index 0000000000..5a0fd7400f --- /dev/null +++ b/patches/0064-ASoC-Intel-Skylake-add-ssp-blob-override-support-for.audio @@ -0,0 +1,121 @@ +From 466d834a0aa761a449dc009a8270b793cc4c01ba Mon Sep 17 00:00:00 2001 +From: Omair M Abdullah +Date: Tue, 15 Sep 2015 17:46:57 +0530 +Subject: [PATCH 064/193] ASoC: Intel: Skylake: add ssp blob override support + for capture + +Capture on SSP can have different blob, so add support for different blobs for +PB/CAP on same SSP. + +Change-Id: I2bfc6aad78ba03bcc9ee05c9c4633314ada3dd69 +Signed-off-by: Omair M Abdullah +--- + sound/soc/intel/skylake/skl-debug.c | 19 ++++++++++++++----- + sound/soc/intel/skylake/skl-topology.c | 3 ++- + sound/soc/intel/skylake/skl.h | 7 ++++--- + 3 files changed, 20 insertions(+), 9 deletions(-) + +diff --git a/sound/soc/intel/skylake/skl-debug.c b/sound/soc/intel/skylake/skl-debug.c +index 6764908a146d..9e3d871fb46f 100644 +--- a/sound/soc/intel/skylake/skl-debug.c ++++ b/sound/soc/intel/skylake/skl-debug.c +@@ -35,7 +35,7 @@ struct skl_debug { + struct dentry *modules; + struct dentry *nhlt; + u8 fw_read_buff[FW_REG_BUF]; +- struct nhlt_blob ssp_blob[MAX_SSP]; ++ struct nhlt_blob ssp_blob[2*MAX_SSP]; + struct nhlt_blob dmic_blob; + }; + +@@ -469,7 +469,8 @@ static int skl_debugfs_init_ipc(struct skl_debug *d) + } + + struct nhlt_specific_cfg +-*skl_nhlt_get_debugfs_blob(struct skl_debug *d, u8 link_type, u32 instance) ++*skl_nhlt_get_debugfs_blob(struct skl_debug *d, u8 link_type, u32 instance, ++ u8 stream) + { + switch (link_type) { + case NHLT_LINK_DMIC: +@@ -479,7 +480,10 @@ struct nhlt_specific_cfg + if (instance >= MAX_SSP) + return NULL; + +- return d->ssp_blob[instance].cfg; ++ if (stream == SNDRV_PCM_STREAM_PLAYBACK) ++ return d->ssp_blob[instance].cfg; ++ else ++ return d->ssp_blob[MAX_SSP + instance].cfg; + + default: + break; +@@ -553,7 +557,7 @@ static void skl_exit_nhlt(struct skl_debug *d) + + /* free blob memory, if allocated */ + for (i = 0; i < MAX_SSP; i++) +- kfree(d->ssp_blob[i].cfg); ++ kfree(d->ssp_blob[MAX_SSP + i].cfg); + } + + static ssize_t nhlt_control_read(struct file *file, +@@ -614,11 +618,16 @@ static int skl_init_nhlt(struct skl_debug *d) + } + + for (i = 0; i < MAX_SSP; i++) { +- snprintf(name, (sizeof(name)-1), "ssp%d", i); ++ snprintf(name, (sizeof(name)-1), "ssp%dp", i); + if (!debugfs_create_file(name, + 0644, d->nhlt, + &d->ssp_blob[i], &nhlt_fops)) + dev_err(d->dev, "%s: debugfs init failed\n", name); ++ snprintf(name, (sizeof(name)-1), "ssp%dc", i); ++ if (!debugfs_create_file(name, ++ 0644, d->nhlt, ++ &d->ssp_blob[MAX_SSP + i], &nhlt_fops)) ++ dev_err(d->dev, "%s: debugfs init failed\n", name); + } + + if (!debugfs_create_file("dmic", 0644, +diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c +index 88ca072bb5cd..7229263fb04c 100644 +--- a/sound/soc/intel/skylake/skl-topology.c ++++ b/sound/soc/intel/skylake/skl-topology.c +@@ -1738,7 +1738,8 @@ static int skl_tplg_be_fill_pipe_params(struct snd_soc_dai *dai, + } else { + dev_warn(dai->dev, "Querying NHLT blob from Debugfs!!!!\n"); + cfg = skl_nhlt_get_debugfs_blob(skl->debugfs, +- link_type, mconfig->vbus_id); ++ link_type, mconfig->vbus_id, ++ params->stream); + } + if (cfg) { + mconfig->formats_config.caps_size = cfg->size; +diff --git a/sound/soc/intel/skylake/skl.h b/sound/soc/intel/skylake/skl.h +index 73f8d94bc27b..ea57d72c9300 100644 +--- a/sound/soc/intel/skylake/skl.h ++++ b/sound/soc/intel/skylake/skl.h +@@ -191,8 +191,8 @@ void skl_debug_init_module(struct skl_debug *d, + struct snd_soc_dapm_widget *w, + struct skl_module_cfg *mconfig); + struct nhlt_specific_cfg +-*skl_nhlt_get_debugfs_blob(struct skl_debug *d, u8 link_type, u32 instance); +- ++*skl_nhlt_get_debugfs_blob(struct skl_debug *d, u8 link_type, u32 instance, ++ u8 stream); + #else + static inline struct skl_debug *skl_debugfs_init(struct skl_dev *skl) + { +@@ -207,7 +207,8 @@ static inline void skl_debug_init_module(struct skl_debug *d, + struct skl_module_cfg *mconfig) + {} + static inline struct nhlt_specific_cfg +-*skl_nhlt_get_debugfs_blob(struct skl_debug *d, u8 link_type, u32 instance) ++*skl_nhlt_get_debugfs_blob(struct skl_debug *d, u8 link_type, u32 instance, ++ u8 stream) + { + return NULL; + } +-- +2.17.1 + diff --git a/patches/0064-drm-i915-selftests-Ignore-coherency-failures-on-Broadw.drm b/patches/0064-drm-i915-selftests-Ignore-coherency-failures-on-Broadw.drm new file mode 100644 index 0000000000..3ce4baa59a --- /dev/null +++ b/patches/0064-drm-i915-selftests-Ignore-coherency-failures-on-Broadw.drm @@ -0,0 +1,36 @@ +From f1ecb54283c392b2d3dcceb60896fc7056dfff7c Mon Sep 17 00:00:00 2001 +From: Chris Wilson +Date: Mon, 26 Aug 2019 14:38:37 +0100 +Subject: [PATCH 064/690] drm/i915/selftests: Ignore coherency failures on + Broadwater + +We've been ignoring similar coherency issues in IGT for Broadwater, and +specifically Broadwater (original gen4) and not, for example, Crestline +(same generation as Broadwater, but the mobile variant). Without any +means to reproduce locally (I have a 965GM but alas no 965G), fixing will +be slow, so tell CI to ignore any failure until we are ready with a fix. + +Signed-off-by: Chris Wilson +Cc: Joonas Lahtinen +Acked-by: Matthew Auld +Link: https://patchwork.freedesktop.org/patch/msgid/20190826133837.6784-1-chris@chris-wilson.co.uk +--- + drivers/gpu/drm/i915/gt/intel_engine_cs.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c +index e4945ae23e63..626f352e9c3d 100644 +--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c ++++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c +@@ -1119,6 +1119,8 @@ bool intel_engine_can_store_dword(struct intel_engine_cs *engine) + case 3: + /* maybe only uses physical not virtual addresses */ + return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915)); ++ case 4: ++ return !IS_I965G(engine->i915); /* who knows! */ + case 6: + return engine->class != VIDEO_DECODE_CLASS; /* b0rked */ + default: +-- +2.17.1 + diff --git a/patches/0064-mei-dal-add-test-module.security b/patches/0064-mei-dal-add-test-module.security new file mode 100644 index 0000000000..57a871f053 --- /dev/null +++ b/patches/0064-mei-dal-add-test-module.security @@ -0,0 +1,1024 @@ +From e421cefe95a9ba44a0a5306f33ff1b6edffeb010 Mon Sep 17 00:00:00 2001 +From: Yael Samet +Date: Mon, 14 Aug 2017 11:32:37 +0300 +Subject: [PATCH 64/65] mei: dal: add test module + +DAL test module allows the user space to exercise the DAL kernel space +API. +It exposes a character device to the user space, and calls DAL api's +according to the protocol which is defined in kdi_test.h header file. + +To enable the sample driver select +CONFIG_SAMPLE_INTEL_MEI_DAL_TEST + +Change-Id: Id7ec2e7d3f22b07c41941dc5bada6edaf16e893b +Signed-off-by: Tomas Winkler +Signed-off-by: Yael Samet +--- + samples/Kconfig | 9 + + samples/mei/Makefile | 3 + + samples/mei/dal_test.c | 776 ++++++++++++++++++++++++++++++++++++ + samples/mei/uapi/dal_test.h | 176 ++++++++ + 4 files changed, 964 insertions(+) + create mode 100644 samples/mei/dal_test.c + create mode 100644 samples/mei/uapi/dal_test.h + +diff --git a/samples/Kconfig b/samples/Kconfig +index b663d9d24114..58df5431ce7c 100644 +--- a/samples/Kconfig ++++ b/samples/Kconfig +@@ -175,5 +175,14 @@ config SAMPLE_INTEL_MEI + help + Build a sample program to work with mei device. + ++config SAMPLE_INTEL_MEI_DAL_TEST ++ tristate "Test Module for Dynamic Application Loader for ME" ++ depends on INTEL_MEI ++ depends on INTEL_MEI_DAL ++ select SAMPLE_INTEL_MEI ++ help ++ Testing Module for Dynamic Application Loader, to test the ++ kernel space api from a user space client. The test module ++ calls the kernel space api functions of DAL module. + + endif # SAMPLES +diff --git a/samples/mei/Makefile b/samples/mei/Makefile +index 27f37efdadb4..03fe47b085df 100644 +--- a/samples/mei/Makefile ++++ b/samples/mei/Makefile +@@ -1,5 +1,8 @@ + # SPDX-License-Identifier: GPL-2.0 + # Copyright (c) 2012-2019, Intel Corporation. All rights reserved. ++# ++ ++obj-$(CONFIG_SAMPLE_INTEL_MEI_DAL_TEST) := dal_test.o + + hostprogs-y := mei-amt-version + +diff --git a/samples/mei/dal_test.c b/samples/mei/dal_test.c +new file mode 100644 +index 000000000000..f58dc84354d3 +--- /dev/null ++++ b/samples/mei/dal_test.c +@@ -0,0 +1,776 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright(c) 2016-2019 Intel Corporation. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#include "uapi/dal_test.h" ++ ++#define KDI_MODULE "mei_dal" ++ ++/** ++ * this is the max data size possible: ++ * there is no actually max size for acp file, ++ * but for testing 512k is good enough ++ */ ++#define MAX_DATA_SIZE SZ_512K ++ ++#define KDI_TEST_OPENED 0 ++ ++/** ++ * struct dal_test_data - dal test cmd and response data ++ * ++ * @cmd_data_size: size of cmd got from user space ++ * @cmd_data: the cmd got from user space ++ * @cmd_lock: protects cmd_data buffer ++ * ++ * @resp_data_size: size of response from kdi ++ * @resp_data: the response from kdi ++ * @resp_lock: protects resp_data buffer ++ */ ++struct dal_test_data { ++ u32 cmd_data_size; ++ u8 *cmd_data; ++ struct mutex cmd_lock; /* protects cmd_data buffer */ ++ ++ u32 resp_data_size; ++ u8 *resp_data; ++ struct mutex resp_lock; /* protects resp_data buffer */ ++}; ++ ++/** ++ * struct dal_test_device - dal test private data ++ * ++ * @dev: the device structure ++ * @cdev: character device ++ * ++ * @kdi_test_status: status of test module ++ * @data: cmd and response data ++ */ ++static struct dal_test_device { ++ struct device *dev; ++ struct cdev cdev; ++ ++ unsigned long kdi_test_status; ++ struct dal_test_data *data; ++} dal_test_dev; ++ ++#if IS_MODULE(CONFIG_INTEL_MEI_DAL) ++/** ++ * dal_test_find_module - find the given module ++ * ++ * @mod_name: the module name to find ++ * ++ * Return: pointer to the module if it is found ++ * NULL otherwise ++ */ ++static struct module *dal_test_find_module(const char *mod_name) ++{ ++ struct module *mod; ++ ++ mutex_lock(&module_mutex); ++ mod = find_module(mod_name); ++ mutex_unlock(&module_mutex); ++ ++ return mod; ++} ++ ++/** ++ * dal_test_load_kdi - load kdi module ++ * ++ * @dev: dal test device ++ * ++ * Return: 0 on success ++ * <0 on failure ++ */ ++static int dal_test_load_kdi(struct dal_test_device *dev) ++{ ++ struct module *mod; ++ ++ /* load KDI if it wasn't loaded */ ++ request_module(KDI_MODULE); ++ ++ mod = dal_test_find_module(KDI_MODULE); ++ if (!mod) { ++ dev_err(dev->dev, "failed to find KDI module: %s\n", ++ KDI_MODULE); ++ return -ENODEV; ++ } ++ ++ if (!try_module_get(mod)) { ++ dev_err(dev->dev, "failed to get KDI module\n"); ++ return -EFAULT; ++ } ++ ++ return 0; ++} ++ ++/** ++ * dal_test_unload_kdi - unload kdi module ++ * ++ * @dev: dal test device ++ * ++ * Return: 0 on success ++ * <0 on failure ++ */ ++static int dal_test_unload_kdi(struct dal_test_device *dev) ++{ ++ struct module *mod; ++ ++ mod = dal_test_find_module(KDI_MODULE); ++ if (!mod) { ++ dev_err(dev->dev, "failed to find KDI module: %s\n", ++ KDI_MODULE); ++ return -ENODEV; ++ } ++ module_put(mod); ++ ++ return 0; ++} ++#else ++static inline int dal_test_load_kdi(struct dal_test_device *dev) { return 0; } ++static inline int dal_test_unload_kdi(struct dal_test_device *dev) { return 0; } ++#endif ++ ++/** ++ * dal_test_result_set - set data to the result buffer ++ * ++ * @test_data: test command and response buffers ++ * @data: new data ++ * @size: size of the data buffer ++ */ ++static void dal_test_result_set(struct dal_test_data *test_data, ++ void *data, u32 size) ++{ ++ memcpy(test_data->resp_data, data, size); ++ test_data->resp_data_size = size; ++} ++ ++/** ++ * dal_test_result_append - append data to the result buffer ++ * ++ * @test_data: test command and response buffers ++ * @data: new data ++ * @size: size of the data buffer ++ */ ++static void dal_test_result_append(struct dal_test_data *test_data, ++ void *data, u32 size) ++{ ++ size_t offset = test_data->resp_data_size; ++ ++ memcpy(test_data->resp_data + offset, data, size); ++ test_data->resp_data_size += size; ++} ++ ++/** ++ * dal_test_send_and_recv - call send and receive function of kdi ++ * ++ * @dev: dal test device ++ * @t_cmd: the command to send kdi ++ * @t_data: test command and response buffers ++ */ ++static void dal_test_send_and_recv(struct dal_test_device *dev, ++ struct kdi_test_command *t_cmd, ++ struct dal_test_data *t_data) ++{ ++ struct send_and_rcv_cmd *cmd; ++ struct send_and_rcv_resp resp; ++ ssize_t data_size; ++ size_t output_len; ++ s32 response_code; ++ u8 *input; ++ u8 *output; ++ s32 status; ++ ++ memset(&resp, 0, sizeof(resp)); ++ ++ cmd = (struct send_and_rcv_cmd *)t_cmd->data; ++ data_size = t_data->cmd_data_size - sizeof(t_cmd->cmd_id) - ++ sizeof(*cmd); ++ if (data_size < 0) { ++ dev_dbg(dev->dev, "malformed command struct: data_size = %zu\n", ++ data_size); ++ resp.test_mod_status = -EINVAL; ++ ++ mutex_lock(&t_data->resp_lock); ++ dal_test_result_set(t_data, &resp, sizeof(resp)); ++ mutex_unlock(&t_data->resp_lock); ++ return; ++ } ++ ++ response_code = 0; ++ output = NULL; ++ input = (data_size) ? cmd->input : NULL; ++ output_len = (cmd->is_output_len_ptr) ? cmd->output_buf_len : 0; ++ ++ dev_dbg(dev->dev, "call dal_send_and_receive: handle=%llu command_id=%d input_len=%zd\n", ++ cmd->session_handle, cmd->command_id, data_size); ++ ++ status = dal_send_and_receive(cmd->session_handle, cmd->command_id, ++ input, data_size, ++ cmd->is_output_buf ? &output : NULL, ++ cmd->is_output_len_ptr ? ++ &output_len : NULL, ++ cmd->is_response_code_ptr ? ++ &response_code : NULL); ++ ++ dev_dbg(dev->dev, "dal_send_and_receive return: status=%d output_len=%zu response_code=%d\n", ++ status, output_len, response_code); ++ ++ resp.output_len = (u32)output_len; ++ resp.response_code = response_code; ++ resp.status = status; ++ resp.test_mod_status = 0; ++ ++ /* in case the call failed we don't copy the data */ ++ mutex_lock(&t_data->resp_lock); ++ dal_test_result_set(t_data, &resp, sizeof(resp)); ++ if (output && resp.output_len) ++ dal_test_result_append(t_data, output, resp.output_len); ++ mutex_unlock(&t_data->resp_lock); ++ ++ kfree(output); ++} ++ ++/** ++ * dal_test_create_session - call create session function of kdi ++ * ++ * @dev: dal test device ++ * @t_cmd: the command to send kdi ++ * @t_data: test command and response buffers ++ */ ++static void dal_test_create_session(struct dal_test_device *dev, ++ struct kdi_test_command *t_cmd, ++ struct dal_test_data *t_data) ++{ ++ struct session_create_cmd *cmd; ++ struct session_create_resp resp; ++ u32 data_size; ++ u64 handle; ++ char *app_id; ++ u8 *acp_pkg; ++ u8 *init_params; ++ u32 offset; ++ s32 status; ++ ++ memset(&resp, 0, sizeof(resp)); ++ ++ cmd = (struct session_create_cmd *)t_cmd->data; ++ data_size = t_data->cmd_data_size - sizeof(t_cmd->cmd_id) - ++ sizeof(*cmd); ++ ++ if (cmd->app_id_len + cmd->acp_pkg_len + cmd->init_param_len != ++ data_size) { ++ dev_dbg(dev->dev, "malformed command struct: data_size = %d\n", ++ data_size); ++ resp.test_mod_status = -EINVAL; ++ ++ mutex_lock(&t_data->resp_lock); ++ dal_test_result_set(t_data, &resp, sizeof(resp)); ++ mutex_unlock(&t_data->resp_lock); ++ return; ++ } ++ ++ handle = 0; ++ ++ offset = 0; ++ app_id = (cmd->app_id_len) ? cmd->data + offset : NULL; ++ offset += cmd->app_id_len; ++ ++ acp_pkg = (cmd->acp_pkg_len) ? cmd->data + offset : NULL; ++ offset += cmd->acp_pkg_len; ++ ++ init_params = (cmd->init_param_len) ? cmd->data + offset : NULL; ++ offset += cmd->init_param_len; ++ ++ dev_dbg(dev->dev, "call dal_create_session params: app_id = %s, app_id len = %d, acp pkg len = %d, init params len = %d\n", ++ app_id, cmd->app_id_len, cmd->acp_pkg_len, cmd->init_param_len); ++ ++ status = dal_create_session(cmd->is_session_handle_ptr ? ++ &handle : NULL, ++ app_id, acp_pkg, ++ cmd->acp_pkg_len, ++ init_params, ++ cmd->init_param_len); ++ dev_dbg(dev->dev, "dal_create_session return: status = %d, handle = %llu\n", ++ status, handle); ++ ++ resp.session_handle = handle; ++ resp.status = status; ++ resp.test_mod_status = 0; ++ ++ mutex_lock(&t_data->resp_lock); ++ dal_test_result_set(t_data, &resp, sizeof(resp)); ++ mutex_unlock(&t_data->resp_lock); ++} ++ ++/** ++ * dal_test_close_session - call close session function of kdi ++ * ++ * @dev: dal test device ++ * @t_cmd: the command to send kdi ++ * @t_data: test command and response buffers ++ */ ++static void dal_test_close_session(struct dal_test_device *dev, ++ struct kdi_test_command *t_cmd, ++ struct dal_test_data *t_data) ++{ ++ struct session_close_cmd *cmd; ++ struct session_close_resp resp; ++ ++ memset(&resp, 0, sizeof(resp)); ++ ++ cmd = (struct session_close_cmd *)t_cmd->data; ++ if (t_data->cmd_data_size != sizeof(t_cmd->cmd_id) + sizeof(*cmd)) { ++ dev_dbg(dev->dev, "malformed command struct\n"); ++ resp.test_mod_status = -EINVAL; ++ ++ mutex_lock(&t_data->resp_lock); ++ dal_test_result_set(t_data, &resp, sizeof(resp)); ++ mutex_unlock(&t_data->resp_lock); ++ return; ++ } ++ ++ resp.status = dal_close_session(cmd->session_handle); ++ resp.test_mod_status = 0; ++ ++ mutex_lock(&t_data->resp_lock); ++ dal_test_result_set(t_data, &resp, sizeof(resp)); ++ mutex_unlock(&t_data->resp_lock); ++} ++ ++/** ++ * dal_test_version_info - call get version function of kdi ++ * ++ * @dev: dal test device ++ * @t_cmd: the command to send kdi ++ * @t_data: test command and response buffers ++ */ ++static void dal_test_version_info(struct dal_test_device *dev, ++ struct kdi_test_command *t_cmd, ++ struct dal_test_data *t_data) ++{ ++ struct version_get_info_cmd *cmd; ++ struct version_get_info_resp resp; ++ struct dal_version_info *version; ++ ++ memset(&resp, 0, sizeof(resp)); ++ ++ cmd = (struct version_get_info_cmd *)t_cmd->data; ++ if (t_data->cmd_data_size != sizeof(t_cmd->cmd_id) + sizeof(*cmd)) { ++ dev_dbg(dev->dev, "malformed command struct\n"); ++ resp.test_mod_status = -EINVAL; ++ mutex_lock(&t_data->resp_lock); ++ dal_test_result_set(t_data, &resp, sizeof(resp)); ++ mutex_unlock(&t_data->resp_lock); ++ return; ++ } ++ ++ version = (cmd->is_version_ptr) ? ++ (struct dal_version_info *)resp.kdi_version : NULL; ++ ++ resp.status = dal_get_version_info(version); ++ resp.test_mod_status = 0; ++ ++ mutex_lock(&t_data->resp_lock); ++ dal_test_result_set(t_data, &resp, sizeof(resp)); ++ mutex_unlock(&t_data->resp_lock); ++} ++ ++/** ++ * dal_test_set_ex_access - call set/remove access function of kdi ++ * ++ * @dev: dal test device ++ * @t_cmd: the command to send kdi ++ * @t_data: test command and response buffers ++ * @set_access: true when calling set access function ++ * false when calling remove access function ++ */ ++static void dal_test_set_ex_access(struct dal_test_device *dev, ++ struct kdi_test_command *t_cmd, ++ struct dal_test_data *t_data, ++ bool set_access) ++{ ++ struct ta_access_set_remove_cmd *cmd; ++ struct ta_access_set_remove_resp resp; ++ u32 data_size; ++ uuid_t app_uuid; ++ char *app_id; ++ s32 status; ++ ++ memset(&resp, 0, sizeof(resp)); ++ ++ cmd = (struct ta_access_set_remove_cmd *)t_cmd->data; ++ data_size = t_data->cmd_data_size - sizeof(t_cmd->cmd_id) - ++ sizeof(*cmd); ++ ++ if (cmd->app_id_len != data_size) { ++ dev_dbg(dev->dev, "malformed command struct\n"); ++ resp.test_mod_status = -EINVAL; ++ ++ mutex_lock(&t_data->resp_lock); ++ dal_test_result_set(t_data, &resp, sizeof(resp)); ++ mutex_unlock(&t_data->resp_lock); ++ return; ++ } ++ ++ app_id = (cmd->app_id_len) ? cmd->data : NULL; ++ ++ status = dal_uuid_parse(app_id, &app_uuid); ++ if (status < 0) ++ goto out; ++ ++ if (set_access) ++ status = dal_set_ta_exclusive_access(&app_uuid); ++ else ++ status = dal_unset_ta_exclusive_access(&app_uuid); ++ ++out: ++ resp.status = status; ++ resp.test_mod_status = 0; ++ ++ mutex_lock(&t_data->resp_lock); ++ dal_test_result_set(t_data, &resp, sizeof(resp)); ++ mutex_unlock(&t_data->resp_lock); ++} ++ ++/** ++ * dal_test_kdi_command - parse and invoke the requested command ++ * ++ * @dev: dal test device ++ */ ++static void dal_test_kdi_command(struct dal_test_device *dev) ++{ ++ struct dal_test_data *test_data; ++ struct kdi_test_command *cmd; ++ s32 status; ++ ++ test_data = dev->data; ++ cmd = (struct kdi_test_command *)test_data->cmd_data; ++ ++ if (test_data->cmd_data_size < sizeof(cmd->cmd_id)) { ++ dev_dbg(dev->dev, "malformed command struct\n"); ++ status = -EINVAL; ++ goto prep_err_test_mod; ++ } ++ ++ switch (cmd->cmd_id) { ++ case KDI_SESSION_CREATE: { ++ dev_dbg(dev->dev, "KDI_CREATE_SESSION[%d]\n", cmd->cmd_id); ++ dal_test_create_session(dev, cmd, test_data); ++ break; ++ } ++ case KDI_SESSION_CLOSE: { ++ dev_dbg(dev->dev, "KDI_CLOSE_SESSION[%d]\n", cmd->cmd_id); ++ dal_test_close_session(dev, cmd, test_data); ++ break; ++ } ++ case KDI_SEND_AND_RCV: { ++ dev_dbg(dev->dev, "KDI_SEND_AND_RCV[%d]\n", cmd->cmd_id); ++ dal_test_send_and_recv(dev, cmd, test_data); ++ break; ++ } ++ case KDI_VERSION_GET_INFO: { ++ dev_dbg(dev->dev, "KDI_GET_VERSION_INFO[%d]\n", cmd->cmd_id); ++ dal_test_version_info(dev, cmd, test_data); ++ break; ++ } ++ case KDI_EXCLUSIVE_ACCESS_SET: ++ case KDI_EXCLUSIVE_ACCESS_REMOVE: { ++ dev_dbg(dev->dev, "KDI_SET_EXCLUSIVE_ACCESS or KDI_REMOVE_EXCLUSIVE_ACCESS[%d]\n", ++ cmd->cmd_id); ++ dal_test_set_ex_access(dev, cmd, test_data, ++ cmd->cmd_id == KDI_EXCLUSIVE_ACCESS_SET); ++ break; ++ } ++ default: ++ dev_dbg(dev->dev, "unknown command %d\n", cmd->cmd_id); ++ status = -EINVAL; ++ goto prep_err_test_mod; ++ } ++ ++ return; ++ ++prep_err_test_mod: ++ mutex_lock(&test_data->resp_lock); ++ dal_test_result_set(test_data, &status, sizeof(status)); ++ mutex_unlock(&test_data->resp_lock); ++} ++ ++/** ++ * dal_test_read - dal test read function ++ * ++ * @filp: pointer to file structure ++ * @buff: pointer to user buffer ++ * @count: buffer length ++ * @offp: data offset in buffer ++ * ++ * Return: >=0 data length on success ++ * <0 on failure ++ */ ++static ssize_t dal_test_read(struct file *filp, char __user *buff, size_t count, ++ loff_t *offp) ++{ ++ struct dal_test_device *dev; ++ struct dal_test_data *test_data; ++ int ret; ++ ++ dev = filp->private_data; ++ test_data = dev->data; ++ ++ mutex_lock(&test_data->resp_lock); ++ ++ if (test_data->resp_data_size > count) { ++ ret = -EMSGSIZE; ++ goto unlock; ++ } ++ ++ dev_dbg(dev->dev, "copying %d bytes to userspace\n", ++ test_data->resp_data_size); ++ if (copy_to_user(buff, test_data->resp_data, ++ test_data->resp_data_size)) { ++ dev_dbg(dev->dev, "copy_to_user failed\n"); ++ ret = -EFAULT; ++ goto unlock; ++ } ++ ret = test_data->resp_data_size; ++ ++unlock: ++ mutex_unlock(&test_data->resp_lock); ++ ++ return ret; ++} ++ ++/** ++ * dal_test_write - dal test write function ++ * ++ * @filp: pointer to file structure ++ * @buff: pointer to user buffer ++ * @count: buffer length ++ * @offp: data offset in buffer ++ * ++ * Return: >=0 data length on success ++ * <0 on failure ++ */ ++static ssize_t dal_test_write(struct file *filp, const char __user *buff, ++ size_t count, loff_t *offp) ++{ ++ struct dal_test_device *dev; ++ struct dal_test_data *test_data; ++ ++ dev = filp->private_data; ++ test_data = dev->data; ++ ++ if (count > MAX_DATA_SIZE) ++ return -EMSGSIZE; ++ ++ mutex_lock(&test_data->cmd_lock); ++ ++ if (copy_from_user(test_data->cmd_data, buff, count)) { ++ mutex_unlock(&test_data->cmd_lock); ++ dev_dbg(dev->dev, "copy_from_user failed\n"); ++ return -EFAULT; ++ } ++ ++ test_data->cmd_data_size = count; ++ dev_dbg(dev->dev, "write %zu bytes\n", count); ++ ++ dal_test_kdi_command(dev); ++ ++ mutex_unlock(&test_data->cmd_lock); ++ ++ return count; ++} ++ ++/** ++ * dal_test_open - dal test open function ++ * ++ * @inode: pointer to inode structure ++ * @filp: pointer to file structure ++ * ++ * Return: 0 on success ++ * <0 on failure ++ */ ++static int dal_test_open(struct inode *inode, struct file *filp) ++{ ++ struct dal_test_device *dev; ++ struct dal_test_data *test_data; ++ int ret; ++ ++ dev = container_of(inode->i_cdev, struct dal_test_device, cdev); ++ if (!dev) ++ return -ENODEV; ++ ++ /* single open */ ++ if (test_and_set_bit(KDI_TEST_OPENED, &dev->kdi_test_status)) ++ return -EBUSY; ++ ++ test_data = kzalloc(sizeof(*test_data), GFP_KERNEL); ++ if (!test_data) { ++ ret = -ENOMEM; ++ goto err_clear_bit; ++ } ++ ++ test_data->cmd_data = kzalloc(MAX_DATA_SIZE, GFP_KERNEL); ++ test_data->resp_data = kzalloc(MAX_DATA_SIZE, GFP_KERNEL); ++ if (!test_data->cmd_data || !test_data->resp_data) { ++ ret = -ENOMEM; ++ goto err_free; ++ } ++ ++ mutex_init(&test_data->cmd_lock); ++ mutex_init(&test_data->resp_lock); ++ ++ ret = dal_test_load_kdi(dev); ++ if (ret) ++ goto err_free; ++ ++ dev->data = test_data; ++ filp->private_data = dev; ++ ++ return nonseekable_open(inode, filp); ++ ++err_free: ++ kfree(test_data->cmd_data); ++ kfree(test_data->resp_data); ++ kfree(test_data); ++ ++err_clear_bit: ++ clear_bit(KDI_TEST_OPENED, &dev->kdi_test_status); ++ ++ return ret; ++} ++ ++/** ++ * dal_test_release - dal test release function ++ * ++ * @inode: pointer to inode structure ++ * @filp: pointer to file structure ++ * ++ * Return: 0 on success ++ * <0 on failure ++ */ ++static int dal_test_release(struct inode *inode, struct file *filp) ++{ ++ struct dal_test_device *dev; ++ struct dal_test_data *test_data; ++ ++ dev = filp->private_data; ++ if (!dev) ++ return -ENODEV; ++ ++ dal_test_unload_kdi(dev); ++ ++ test_data = dev->data; ++ if (test_data) { ++ kfree(test_data->cmd_data); ++ kfree(test_data->resp_data); ++ kfree(test_data); ++ } ++ ++ clear_bit(KDI_TEST_OPENED, &dev->kdi_test_status); ++ ++ filp->private_data = NULL; ++ ++ return 0; ++} ++ ++static const struct file_operations dal_test_fops = { ++ .owner = THIS_MODULE, ++ .open = dal_test_open, ++ .release = dal_test_release, ++ .read = dal_test_read, ++ .write = dal_test_write, ++ .llseek = no_llseek, ++}; ++ ++/** ++ * dal_test_exit - destroy dal test device ++ */ ++static void __exit dal_test_exit(void) ++{ ++ struct dal_test_device *dev = &dal_test_dev; ++ struct class *dal_test_class; ++ static dev_t devt; ++ ++ dal_test_class = dev->dev->class; ++ devt = dev->dev->devt; ++ ++ cdev_del(&dev->cdev); ++ unregister_chrdev_region(devt, MINORMASK); ++ device_destroy(dal_test_class, devt); ++ class_destroy(dal_test_class); ++} ++ ++/** ++ * dal_test_init - initiallize dal test device ++ * ++ * Return: 0 on success ++ * <0 on failure ++ */ ++static int __init dal_test_init(void) ++{ ++ struct dal_test_device *dev = &dal_test_dev; ++ struct class *dal_test_class; ++ static dev_t devt; ++ int ret; ++ ++ ret = alloc_chrdev_region(&devt, 0, 1, "mei_dal_test"); ++ if (ret) ++ return ret; ++ ++ dal_test_class = class_create(THIS_MODULE, "mei_dal_test"); ++ if (IS_ERR(dal_test_class)) { ++ ret = PTR_ERR(dal_test_class); ++ dal_test_class = NULL; ++ goto err_unregister_cdev; ++ } ++ ++ dev->dev = device_create(dal_test_class, NULL, devt, dev, "dal_test0"); ++ if (IS_ERR(dev->dev)) { ++ ret = PTR_ERR(dev->dev); ++ goto err_class_destroy; ++ } ++ ++ cdev_init(&dev->cdev, &dal_test_fops); ++ dev->cdev.owner = THIS_MODULE; ++ ret = cdev_add(&dev->cdev, devt, 1); ++ if (ret) ++ goto err_device_destroy; ++ ++ return 0; ++ ++err_device_destroy: ++ device_destroy(dal_test_class, devt); ++err_class_destroy: ++ class_destroy(dal_test_class); ++err_unregister_cdev: ++ unregister_chrdev_region(devt, 1); ++ ++ return ret; ++} ++ ++module_init(dal_test_init); ++module_exit(dal_test_exit); ++ ++MODULE_AUTHOR("Intel Corporation"); ++MODULE_DESCRIPTION("Intel(R) DAL test"); ++MODULE_LICENSE("GPL v2"); +diff --git a/samples/mei/uapi/dal_test.h b/samples/mei/uapi/dal_test.h +new file mode 100644 +index 000000000000..fc5d88c3f4dd +--- /dev/null ++++ b/samples/mei/uapi/dal_test.h +@@ -0,0 +1,176 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright(c) 2016-2019 Intel Corporation. ++ */ ++ ++#ifndef KDI_CMD_DEFS_H ++#define KDI_CMD_DEFS_H ++ ++/** ++ * enum kdi_command_id - cmd id to invoke in kdi module ++ * ++ * @KDI_SESSION_CREATE: call kdi "create session" function ++ * @KDI_SESSION_CLOSE: call kdi "close session" function ++ * @KDI_SEND_AND_RCV: call kdi "send and receive" function ++ * @KDI_VERSION_GET_INFO: call kdi "get version" function ++ * @KDI_EXCLUSIVE_ACCESS_SET: call kdi "set exclusive access" function ++ * @KDI_EXCLUSIVE_ACCESS_REMOVE: call kdi "unset exclusive access" function ++ */ ++enum kdi_command_id { ++ KDI_SESSION_CREATE, ++ KDI_SESSION_CLOSE, ++ KDI_SEND_AND_RCV, ++ KDI_VERSION_GET_INFO, ++ KDI_EXCLUSIVE_ACCESS_SET, ++ KDI_EXCLUSIVE_ACCESS_REMOVE ++}; ++ ++/** ++ * struct kdi_test_command - contains the command received from user space ++ * ++ * @cmd_id: the command id ++ * @data: the command data ++ */ ++struct kdi_test_command { ++ __u8 cmd_id; ++ unsigned char data[0]; ++} __packed; ++ ++/** ++ * struct session_create_cmd - create session cmd data ++ * ++ * @app_id_len: length of app_id arg ++ * @acp_pkg_len: length of the acp_pkg arg ++ * @init_param_len: length of init param arg ++ * @is_session_handle_ptr: either send kdi a valid ptr to hold the ++ * session handle or NULL ++ * @data: buffer to hold the cmd arguments ++ */ ++struct session_create_cmd { ++ __u32 app_id_len; ++ __u32 acp_pkg_len; ++ __u32 init_param_len; ++ __u8 is_session_handle_ptr; ++ unsigned char data[0]; ++} __packed; ++ ++/** ++ * struct session_create_resp - create session response ++ * ++ * @session_handle: the session handle ++ * @test_mod_status: status returned from the test module ++ * @status: status returned from kdi ++ */ ++struct session_create_resp { ++ __u64 session_handle; ++ __s32 test_mod_status; ++ __s32 status; ++} __packed; ++ ++/** ++ * struct session_close_cmd - close session cmd ++ * ++ * @session_handle: the session handle to close ++ */ ++struct session_close_cmd { ++ __u64 session_handle; ++} __packed; ++ ++/** ++ * struct session_close_resp - close session response ++ * ++ * @test_mod_status: status returned from the test module ++ * @status: status returned from kdi ++ */ ++struct session_close_resp { ++ __s32 test_mod_status; ++ __s32 status; ++} __packed; ++ ++/** ++ * struct send_and_rcv_cmd - send and receive cmd ++ * ++ * @session_handle: the session handle ++ * @command_id: the cmd id to send the applet ++ * @output_buf_len: the size of the output buffer ++ * @is_output_buf: either send kdi a valid ptr to hold the output buffer or NULL ++ * @is_output_len_ptr: either send kdi a valid ptr to hold ++ * the output len or NULL ++ * @is_response_code_ptr: either send kdi a valid ptr to hold ++ * the applet response code or NULL ++ * @input: the input data to send the applet ++ */ ++struct send_and_rcv_cmd { ++ __u64 session_handle; ++ __u32 command_id; ++ __u32 output_buf_len; ++ __u8 is_output_buf; ++ __u8 is_output_len_ptr; ++ __u8 is_response_code_ptr; ++ unsigned char input[0]; ++} __packed; ++ ++/** ++ * struct send_and_rcv_resp - send and receive response ++ * ++ * @test_mod_status: status returned from the test module ++ * @status: status returned from kdi ++ * @response_code: response code returned from the applet ++ * @output_len: length of output from the applet ++ * @output: the output got from the applet ++ */ ++struct send_and_rcv_resp { ++ __s32 test_mod_status; ++ __s32 status; ++ __s32 response_code; ++ __u32 output_len; ++ unsigned char output[0]; ++} __packed; ++ ++/** ++ * struct version_get_info_cmd - get version cmd ++ * ++ * @is_version_ptr: either send kdi a valid ptr to hold the version info or NULL ++ */ ++struct version_get_info_cmd { ++ __u8 is_version_ptr; ++} __packed; ++ ++/** ++ * struct version_get_info_resp - get version response ++ * ++ * @kdi_version: kdi version ++ * @reserved: reserved bytes ++ * @test_mod_status: status returned from the test module ++ * @status: status returned from kdi ++ */ ++struct version_get_info_resp { ++ char kdi_version[32]; ++ __u32 reserved[4]; ++ __s32 test_mod_status; ++ __s32 status; ++} __packed; ++ ++/** ++ * struct ta_access_set_remove_cmd - set/remove access cmd ++ * ++ * @app_id_len: length of app_id arg ++ * @data: the cmd data. contains the app_id ++ */ ++struct ta_access_set_remove_cmd { ++ __u32 app_id_len; ++ unsigned char data[0]; ++} __packed; ++ ++/** ++ * struct ta_access_set_remove_resp - set/remove access response ++ * ++ * @test_mod_status: status returned from the test module ++ * @status: status returned from kdi ++ */ ++struct ta_access_set_remove_resp { ++ __s32 test_mod_status; ++ __s32 status; ++} __packed; ++ ++#endif /* KDI_CMD_DEFS_H */ +-- +2.17.1 + diff --git a/patches/0064-net-stmmac-change-the-intr-status-check-logic.connectivity b/patches/0064-net-stmmac-change-the-intr-status-check-logic.connectivity new file mode 100644 index 0000000000..f49d65fe45 --- /dev/null +++ b/patches/0064-net-stmmac-change-the-intr-status-check-logic.connectivity @@ -0,0 +1,156 @@ +From 0962ed8d9eb7df3df6ebf23eaaf08ae6236e77f9 Mon Sep 17 00:00:00 2001 +From: Weifeng Voon +Date: Fri, 8 Mar 2019 22:52:58 +0800 +Subject: [PATCH 064/108] net: stmmac: change the intr status check logic + +This change is to accommodate interrupt mode(INTM) MODE1. +As the NIS status bit is not asserted for any RI/TI events. + +Signed-off-by: Weifeng Voon +--- + .../net/ethernet/stmicro/stmmac/dwmac4_lib.c | 106 +++++++++--------- + 1 file changed, 53 insertions(+), 53 deletions(-) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c +index 8f1830d795cd..deef04cccea1 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c +@@ -146,82 +146,82 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr, + } + } + /* TX/RX NORMAL interrupts */ +- if (likely(intr_status & DMA_CHAN_STATUS_NIS)) { ++ if (likely(intr_status & DMA_CHAN_STATUS_NIS)) + x->normal_irq_n++; +- if (likely(intr_status & DMA_CHAN_STATUS_RI)) { +- u32 value; +- +- value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan)); +- /* to schedule NAPI on real RIE event. */ +- if (likely(value & DMA_CHAN_INTR_ENA_RIE)) { +- x->rx_normal_irq_n++; +- switch (chan) { +- case 0x0: +- x->q0_rx_irq_n++; +- break; +- case 0x1: +- x->q1_rx_irq_n++; +- break; +- case 0x2: +- x->q2_rx_irq_n++; +- break; +- case 0x3: +- x->q3_rx_irq_n++; +- break; +- case 0x4: +- x->q4_rx_irq_n++; +- break; +- case 0x5: +- x->q5_rx_irq_n++; +- break; +- case 0x6: +- x->q6_rx_irq_n++; +- break; +- case 0x7: +- x->q7_rx_irq_n++; +- break; +- default: +- break; +- } +- ret |= handle_rx; +- } +- } +- if (likely(intr_status & (DMA_CHAN_STATUS_TI | +- DMA_CHAN_STATUS_TBU))) { +- x->tx_normal_irq_n++; ++ ++ if (likely(intr_status & DMA_CHAN_STATUS_RI)) { ++ u32 value; ++ ++ value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan)); ++ /* to schedule NAPI on real RIE event. */ ++ if (likely(value & DMA_CHAN_INTR_ENA_RIE)) { ++ x->rx_normal_irq_n++; + switch (chan) { + case 0x0: +- x->q0_tx_irq_n++; ++ x->q0_rx_irq_n++; + break; + case 0x1: +- x->q1_tx_irq_n++; ++ x->q1_rx_irq_n++; + break; + case 0x2: +- x->q2_tx_irq_n++; ++ x->q2_rx_irq_n++; + break; + case 0x3: +- x->q3_tx_irq_n++; ++ x->q3_rx_irq_n++; + break; + case 0x4: +- x->q4_tx_irq_n++; ++ x->q4_rx_irq_n++; + break; + case 0x5: +- x->q5_tx_irq_n++; ++ x->q5_rx_irq_n++; + break; + case 0x6: +- x->q6_tx_irq_n++; ++ x->q6_rx_irq_n++; + break; + case 0x7: +- x->q7_tx_irq_n++; ++ x->q7_rx_irq_n++; + break; + default: + break; + } +- ret |= handle_tx; ++ ret |= handle_rx; ++ } ++ } ++ if (likely(intr_status & (DMA_CHAN_STATUS_TI | ++ DMA_CHAN_STATUS_TBU))) { ++ x->tx_normal_irq_n++; ++ switch (chan) { ++ case 0x0: ++ x->q0_tx_irq_n++; ++ break; ++ case 0x1: ++ x->q1_tx_irq_n++; ++ break; ++ case 0x2: ++ x->q2_tx_irq_n++; ++ break; ++ case 0x3: ++ x->q3_tx_irq_n++; ++ break; ++ case 0x4: ++ x->q4_tx_irq_n++; ++ break; ++ case 0x5: ++ x->q5_tx_irq_n++; ++ break; ++ case 0x6: ++ x->q6_tx_irq_n++; ++ break; ++ case 0x7: ++ x->q7_tx_irq_n++; ++ break; ++ default: ++ break; + } +- if (unlikely(intr_status & DMA_CHAN_STATUS_ERI)) +- x->rx_early_irq++; ++ ret |= handle_tx; + } ++ if (unlikely(intr_status & DMA_CHAN_STATUS_ERI)) ++ x->rx_early_irq++; + + writel(intr_status & intr_en, ioaddr + DMA_CHAN_STATUS(chan)); + return ret; +-- +2.17.1 + diff --git a/patches/0064-sos-vhm-remove-hugetlb_enabled-flag.acrn b/patches/0064-sos-vhm-remove-hugetlb_enabled-flag.acrn new file mode 100644 index 0000000000..426ca5ceda --- /dev/null +++ b/patches/0064-sos-vhm-remove-hugetlb_enabled-flag.acrn @@ -0,0 +1,115 @@ +From 9885c336bca52c4f1556ab8aab612be88e90f0ce Mon Sep 17 00:00:00 2001 +From: "Li, Fei1" +Date: Fri, 31 Aug 2018 10:59:02 +0800 +Subject: [PATCH 064/150] sos: vhm: remove hugetlb_enabled flag + +Since we only have hugetlb memory allocation mechanism, there no needs +hugetlb_enabled to indicate we're using hugetlb. + +Signed-off-by: Li, Fei1 +--- + drivers/char/vhm/vhm_dev.c | 1 - + drivers/vhm/vhm_hugetlb.c | 1 - + drivers/vhm/vhm_mm.c | 17 +++++++---------- + include/linux/vhm/vhm_vm_mngt.h | 2 -- + 4 files changed, 7 insertions(+), 14 deletions(-) + +diff --git a/drivers/char/vhm/vhm_dev.c b/drivers/char/vhm/vhm_dev.c +index d26cfca96de2..5dca224d47f0 100644 +--- a/drivers/char/vhm/vhm_dev.c ++++ b/drivers/char/vhm/vhm_dev.c +@@ -119,7 +119,6 @@ static int vhm_dev_open(struct inode *inodep, struct file *filep) + + vm_mutex_lock(&vhm_vm_list_lock); + vm->refcnt = 1; +- vm->hugetlb_enabled = 0; + vm_list_add(&vm->list); + vm_mutex_unlock(&vhm_vm_list_lock); + filep->private_data = vm; +diff --git a/drivers/vhm/vhm_hugetlb.c b/drivers/vhm/vhm_hugetlb.c +index 9c39f9167f77..a83f00ad2e9d 100644 +--- a/drivers/vhm/vhm_hugetlb.c ++++ b/drivers/vhm/vhm_hugetlb.c +@@ -194,7 +194,6 @@ int hugepage_map_guest(struct vhm_vm *vm, struct vm_memmap *memmap) + } + + __free_page(memmaps_buf_pg); +- vm->hugetlb_enabled = 1; + + return 0; + err: +diff --git a/drivers/vhm/vhm_mm.c b/drivers/vhm/vhm_mm.c +index 3c0c2acbe522..c7ca10255064 100644 +--- a/drivers/vhm/vhm_mm.c ++++ b/drivers/vhm/vhm_mm.c +@@ -200,8 +200,7 @@ int map_guest_memseg(struct vhm_vm *vm, struct vm_memmap *memmap) + + void free_guest_mem(struct vhm_vm *vm) + { +- if (vm->hugetlb_enabled) +- return hugepage_free_guest(vm); ++ return hugepage_free_guest(vm); + } + + #define TRUSTY_MEM_GPA_BASE (511UL * 1024UL * 1024UL * 1024UL) +@@ -232,14 +231,13 @@ void deinit_trusty(struct vhm_vm *vm) + void *map_guest_phys(unsigned long vmid, u64 guest_phys, size_t size) + { + struct vhm_vm *vm; +- void *ret = NULL; ++ void *ret; + + vm = find_get_vm(vmid); + if (vm == NULL) +- return ret; ++ return NULL; + +- if (vm->hugetlb_enabled) +- ret = hugepage_map_guest_phys(vm, guest_phys, size); ++ ret = hugepage_map_guest_phys(vm, guest_phys, size); + + put_vm(vm); + +@@ -250,16 +248,15 @@ EXPORT_SYMBOL(map_guest_phys); + int unmap_guest_phys(unsigned long vmid, u64 guest_phys) + { + struct vhm_vm *vm; +- int ret = -ESRCH; ++ int ret; + + vm = find_get_vm(vmid); + if (vm == NULL) { + pr_warn("vm_list corrupted\n"); +- return ret; ++ return -ESRCH; + } + +- if (vm->hugetlb_enabled) +- ret = hugepage_unmap_guest_phys(vm, guest_phys); ++ ret = hugepage_unmap_guest_phys(vm, guest_phys); + + put_vm(vm); + return ret; +diff --git a/include/linux/vhm/vhm_vm_mngt.h b/include/linux/vhm/vhm_vm_mngt.h +index fe0ab90fc425..774cf2ddfe1f 100644 +--- a/include/linux/vhm/vhm_vm_mngt.h ++++ b/include/linux/vhm/vhm_vm_mngt.h +@@ -87,7 +87,6 @@ extern struct mutex vhm_vm_list_lock; + * @ioreq_client_list: list of ioreq clients + * @req_buf: request buffer shared between HV, SOS and UOS + * @pg: pointer to linux page which holds req_buf +- * @hugetlb_enabled: flag to enable/disable hugetlb page ept mapping + */ + struct vhm_vm { + struct device *dev; +@@ -103,7 +102,6 @@ struct vhm_vm { + struct list_head ioreq_client_list; + struct vhm_request_buffer *req_buf; + struct page *pg; +- int hugetlb_enabled; + }; + + /** +-- +2.17.1 + diff --git a/patches/0065-WORKAROUND-Remove-size-check-for-DMIC-blob.audio b/patches/0065-WORKAROUND-Remove-size-check-for-DMIC-blob.audio new file mode 100644 index 0000000000..7ca565fe13 --- /dev/null +++ b/patches/0065-WORKAROUND-Remove-size-check-for-DMIC-blob.audio @@ -0,0 +1,29 @@ +From 6bfff1bdedc71ff27393298f94251f70a6dfeb92 Mon Sep 17 00:00:00 2001 +From: Ramesh Babu +Date: Mon, 2 Nov 2015 07:06:38 +0530 +Subject: [PATCH 065/193] WORKAROUND: Remove size check for DMIC blob + +Change-Id: Ic7c70d4f0b1bf137c8bfbfbb9ef9962fdad8daf9 +Signed-off-by: Ramesh Babu +Reviewed-on: +--- + sound/soc/intel/skylake/skl-debug.c | 3 --- + 1 file changed, 3 deletions(-) + +diff --git a/sound/soc/intel/skylake/skl-debug.c b/sound/soc/intel/skylake/skl-debug.c +index 9e3d871fb46f..b7d98f6c5877 100644 +--- a/sound/soc/intel/skylake/skl-debug.c ++++ b/sound/soc/intel/skylake/skl-debug.c +@@ -513,9 +513,6 @@ static ssize_t nhlt_write(struct file *file, + ssize_t written; + size_t size = blob->size; + +- if (count > 2 * HDA_SST_CFG_MAX) +- return -EIO; +- + if (!blob->cfg) { + /* allocate mem for blob */ + blob->cfg = kzalloc(count, GFP_KERNEL); +-- +2.17.1 + diff --git a/patches/0065-drm-i915-Align-power-domain-names-with-port-names.drm b/patches/0065-drm-i915-Align-power-domain-names-with-port-names.drm new file mode 100644 index 0000000000..d2fd113ae3 --- /dev/null +++ b/patches/0065-drm-i915-Align-power-domain-names-with-port-names.drm @@ -0,0 +1,828 @@ +From 9b7dd3bca982dc52d816dab73b9d0942cc792158 Mon Sep 17 00:00:00 2001 +From: Imre Deak +Date: Fri, 23 Aug 2019 13:07:11 +0300 +Subject: [PATCH 065/690] drm/i915: Align power domain names with port names +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +There is a difference in BSpec's and the driver's designation of DDI +ports. BSpec uses the following names: +- before GEN11: + BSpec/driver: + port A/B/C/D etc +- GEN11: + BSpec/driver: + port A-F +- GEN12: + BSpec: + port A/B/C for combo PHY ports + port TC1-6 for Type C PHY ports + driver: + port A-I. + The driver's port D name matches BSpec's TC1 port name. + +So far power domains were named according to the BSpec designation, to +make it easier to match the code against the specification. That however +can be confusing when a power domain needs to be matched to a port on +GEN12+. To resolve that use the driver's port A-I designation for power +domain names too and rename the corresponding power wells so that they +reflect the mapping from the driver's to BSpec's port name. + +Cc: Lucas De Marchi +Cc: Ville Syrjälä +Signed-off-by: Imre Deak +Reviewed-by: Stanislav Lisovskiy +Link: https://patchwork.freedesktop.org/patch/msgid/20190823100711.27833-1-imre.deak@intel.com +--- + drivers/gpu/drm/i915/display/intel_display.c | 10 +- + .../drm/i915/display/intel_display_power.c | 361 +++++++++--------- + .../drm/i915/display/intel_display_power.h | 40 +- + drivers/gpu/drm/i915/i915_debugfs.c | 3 +- + 4 files changed, 198 insertions(+), 216 deletions(-) + +diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c +index 822581deaaac..15a61a858b59 100644 +--- a/drivers/gpu/drm/i915/display/intel_display.c ++++ b/drivers/gpu/drm/i915/display/intel_display.c +@@ -6750,16 +6750,16 @@ intel_aux_power_domain(struct intel_digital_port *dig_port) + dig_port->tc_mode == TC_PORT_TBT_ALT) { + switch (dig_port->aux_ch) { + case AUX_CH_C: +- return POWER_DOMAIN_AUX_TBT1; ++ return POWER_DOMAIN_AUX_C_TBT; + case AUX_CH_D: +- return POWER_DOMAIN_AUX_TBT2; ++ return POWER_DOMAIN_AUX_D_TBT; + case AUX_CH_E: +- return POWER_DOMAIN_AUX_TBT3; ++ return POWER_DOMAIN_AUX_E_TBT; + case AUX_CH_F: +- return POWER_DOMAIN_AUX_TBT4; ++ return POWER_DOMAIN_AUX_F_TBT; + default: + MISSING_CASE(dig_port->aux_ch); +- return POWER_DOMAIN_AUX_TBT1; ++ return POWER_DOMAIN_AUX_C_TBT; + } + } + +diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c +index 12099760d99e..ce88a27229ef 100644 +--- a/drivers/gpu/drm/i915/display/intel_display_power.c ++++ b/drivers/gpu/drm/i915/display/intel_display_power.c +@@ -24,11 +24,8 @@ bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, + enum i915_power_well_id power_well_id); + + const char * +-intel_display_power_domain_str(struct drm_i915_private *i915, +- enum intel_display_power_domain domain) ++intel_display_power_domain_str(enum intel_display_power_domain domain) + { +- bool ddi_tc_ports = IS_GEN(i915, 12); +- + switch (domain) { + case POWER_DOMAIN_DISPLAY_CORE: + return "DISPLAY_CORE"; +@@ -71,23 +68,17 @@ intel_display_power_domain_str(struct drm_i915_private *i915, + case POWER_DOMAIN_PORT_DDI_C_LANES: + return "PORT_DDI_C_LANES"; + case POWER_DOMAIN_PORT_DDI_D_LANES: +- BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_D_LANES != +- POWER_DOMAIN_PORT_DDI_TC1_LANES); +- return ddi_tc_ports ? "PORT_DDI_TC1_LANES" : "PORT_DDI_D_LANES"; ++ return "PORT_DDI_D_LANES"; + case POWER_DOMAIN_PORT_DDI_E_LANES: +- BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_E_LANES != +- POWER_DOMAIN_PORT_DDI_TC2_LANES); +- return ddi_tc_ports ? "PORT_DDI_TC2_LANES" : "PORT_DDI_E_LANES"; ++ return "PORT_DDI_E_LANES"; + case POWER_DOMAIN_PORT_DDI_F_LANES: +- BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_F_LANES != +- POWER_DOMAIN_PORT_DDI_TC3_LANES); +- return ddi_tc_ports ? "PORT_DDI_TC3_LANES" : "PORT_DDI_F_LANES"; +- case POWER_DOMAIN_PORT_DDI_TC4_LANES: +- return "PORT_DDI_TC4_LANES"; +- case POWER_DOMAIN_PORT_DDI_TC5_LANES: +- return "PORT_DDI_TC5_LANES"; +- case POWER_DOMAIN_PORT_DDI_TC6_LANES: +- return "PORT_DDI_TC6_LANES"; ++ return "PORT_DDI_F_LANES"; ++ case POWER_DOMAIN_PORT_DDI_G_LANES: ++ return "PORT_DDI_G_LANES"; ++ case POWER_DOMAIN_PORT_DDI_H_LANES: ++ return "PORT_DDI_H_LANES"; ++ case POWER_DOMAIN_PORT_DDI_I_LANES: ++ return "PORT_DDI_I_LANES"; + case POWER_DOMAIN_PORT_DDI_A_IO: + return "PORT_DDI_A_IO"; + case POWER_DOMAIN_PORT_DDI_B_IO: +@@ -95,23 +86,17 @@ intel_display_power_domain_str(struct drm_i915_private *i915, + case POWER_DOMAIN_PORT_DDI_C_IO: + return "PORT_DDI_C_IO"; + case POWER_DOMAIN_PORT_DDI_D_IO: +- BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_D_IO != +- POWER_DOMAIN_PORT_DDI_TC1_IO); +- return ddi_tc_ports ? "PORT_DDI_TC1_IO" : "PORT_DDI_D_IO"; ++ return "PORT_DDI_D_IO"; + case POWER_DOMAIN_PORT_DDI_E_IO: +- BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_E_IO != +- POWER_DOMAIN_PORT_DDI_TC2_IO); +- return ddi_tc_ports ? "PORT_DDI_TC2_IO" : "PORT_DDI_E_IO"; ++ return "PORT_DDI_E_IO"; + case POWER_DOMAIN_PORT_DDI_F_IO: +- BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_F_IO != +- POWER_DOMAIN_PORT_DDI_TC3_IO); +- return ddi_tc_ports ? "PORT_DDI_TC3_IO" : "PORT_DDI_F_IO"; +- case POWER_DOMAIN_PORT_DDI_TC4_IO: +- return "PORT_DDI_TC4_IO"; +- case POWER_DOMAIN_PORT_DDI_TC5_IO: +- return "PORT_DDI_TC5_IO"; +- case POWER_DOMAIN_PORT_DDI_TC6_IO: +- return "PORT_DDI_TC6_IO"; ++ return "PORT_DDI_F_IO"; ++ case POWER_DOMAIN_PORT_DDI_G_IO: ++ return "PORT_DDI_G_IO"; ++ case POWER_DOMAIN_PORT_DDI_H_IO: ++ return "PORT_DDI_H_IO"; ++ case POWER_DOMAIN_PORT_DDI_I_IO: ++ return "PORT_DDI_I_IO"; + case POWER_DOMAIN_PORT_DSI: + return "PORT_DSI"; + case POWER_DOMAIN_PORT_CRT: +@@ -129,34 +114,33 @@ intel_display_power_domain_str(struct drm_i915_private *i915, + case POWER_DOMAIN_AUX_C: + return "AUX_C"; + case POWER_DOMAIN_AUX_D: +- BUILD_BUG_ON(POWER_DOMAIN_AUX_D != POWER_DOMAIN_AUX_TC1); +- return ddi_tc_ports ? "AUX_TC1" : "AUX_D"; ++ return "AUX_D"; + case POWER_DOMAIN_AUX_E: +- BUILD_BUG_ON(POWER_DOMAIN_AUX_E != POWER_DOMAIN_AUX_TC2); +- return ddi_tc_ports ? "AUX_TC2" : "AUX_E"; ++ return "AUX_E"; + case POWER_DOMAIN_AUX_F: +- BUILD_BUG_ON(POWER_DOMAIN_AUX_F != POWER_DOMAIN_AUX_TC3); +- return ddi_tc_ports ? "AUX_TC3" : "AUX_F"; +- case POWER_DOMAIN_AUX_TC4: +- return "AUX_TC4"; +- case POWER_DOMAIN_AUX_TC5: +- return "AUX_TC5"; +- case POWER_DOMAIN_AUX_TC6: +- return "AUX_TC6"; ++ return "AUX_F"; ++ case POWER_DOMAIN_AUX_G: ++ return "AUX_G"; ++ case POWER_DOMAIN_AUX_H: ++ return "AUX_H"; ++ case POWER_DOMAIN_AUX_I: ++ return "AUX_I"; + case POWER_DOMAIN_AUX_IO_A: + return "AUX_IO_A"; +- case POWER_DOMAIN_AUX_TBT1: +- return "AUX_TBT1"; +- case POWER_DOMAIN_AUX_TBT2: +- return "AUX_TBT2"; +- case POWER_DOMAIN_AUX_TBT3: +- return "AUX_TBT3"; +- case POWER_DOMAIN_AUX_TBT4: +- return "AUX_TBT4"; +- case POWER_DOMAIN_AUX_TBT5: +- return "AUX_TBT5"; +- case POWER_DOMAIN_AUX_TBT6: +- return "AUX_TBT6"; ++ case POWER_DOMAIN_AUX_C_TBT: ++ return "AUX_C_TBT"; ++ case POWER_DOMAIN_AUX_D_TBT: ++ return "AUX_D_TBT"; ++ case POWER_DOMAIN_AUX_E_TBT: ++ return "AUX_E_TBT"; ++ case POWER_DOMAIN_AUX_F_TBT: ++ return "AUX_F_TBT"; ++ case POWER_DOMAIN_AUX_G_TBT: ++ return "AUX_G_TBT"; ++ case POWER_DOMAIN_AUX_H_TBT: ++ return "AUX_H_TBT"; ++ case POWER_DOMAIN_AUX_I_TBT: ++ return "AUX_I_TBT"; + case POWER_DOMAIN_GMBUS: + return "GMBUS"; + case POWER_DOMAIN_INIT: +@@ -1718,15 +1702,12 @@ __async_put_domains_state_ok(struct i915_power_domains *power_domains) + static void print_power_domains(struct i915_power_domains *power_domains, + const char *prefix, u64 mask) + { +- struct drm_i915_private *i915 = +- container_of(power_domains, struct drm_i915_private, +- power_domains); + enum intel_display_power_domain domain; + + DRM_DEBUG_DRIVER("%s (%lu):\n", prefix, hweight64(mask)); + for_each_power_domain(domain, mask) + DRM_DEBUG_DRIVER("%s use_count %d\n", +- intel_display_power_domain_str(i915, domain), ++ intel_display_power_domain_str(domain), + power_domains->domain_use_count[domain]); + } + +@@ -1896,7 +1877,7 @@ __intel_display_power_put_domain(struct drm_i915_private *dev_priv, + { + struct i915_power_domains *power_domains; + struct i915_power_well *power_well; +- const char *name = intel_display_power_domain_str(dev_priv, domain); ++ const char *name = intel_display_power_domain_str(domain); + + power_domains = &dev_priv->power_domains; + +@@ -2487,10 +2468,10 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, + BIT_ULL(POWER_DOMAIN_AUX_D) | \ + BIT_ULL(POWER_DOMAIN_AUX_E) | \ + BIT_ULL(POWER_DOMAIN_AUX_F) | \ +- BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ +- BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ +- BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ +- BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ ++ BIT_ULL(POWER_DOMAIN_AUX_C_TBT) | \ ++ BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \ ++ BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \ ++ BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \ + BIT_ULL(POWER_DOMAIN_VGA) | \ + BIT_ULL(POWER_DOMAIN_AUDIO) | \ + BIT_ULL(POWER_DOMAIN_INIT)) +@@ -2530,22 +2511,22 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, + BIT_ULL(POWER_DOMAIN_AUX_A)) + #define ICL_AUX_B_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_B)) +-#define ICL_AUX_C_IO_POWER_DOMAINS ( \ ++#define ICL_AUX_C_TC1_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_C)) +-#define ICL_AUX_D_IO_POWER_DOMAINS ( \ ++#define ICL_AUX_D_TC2_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_D)) +-#define ICL_AUX_E_IO_POWER_DOMAINS ( \ ++#define ICL_AUX_E_TC3_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_E)) +-#define ICL_AUX_F_IO_POWER_DOMAINS ( \ ++#define ICL_AUX_F_TC4_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_F)) +-#define ICL_AUX_TBT1_IO_POWER_DOMAINS ( \ +- BIT_ULL(POWER_DOMAIN_AUX_TBT1)) +-#define ICL_AUX_TBT2_IO_POWER_DOMAINS ( \ +- BIT_ULL(POWER_DOMAIN_AUX_TBT2)) +-#define ICL_AUX_TBT3_IO_POWER_DOMAINS ( \ +- BIT_ULL(POWER_DOMAIN_AUX_TBT3)) +-#define ICL_AUX_TBT4_IO_POWER_DOMAINS ( \ +- BIT_ULL(POWER_DOMAIN_AUX_TBT4)) ++#define ICL_AUX_C_TBT1_IO_POWER_DOMAINS ( \ ++ BIT_ULL(POWER_DOMAIN_AUX_C_TBT)) ++#define ICL_AUX_D_TBT2_IO_POWER_DOMAINS ( \ ++ BIT_ULL(POWER_DOMAIN_AUX_D_TBT)) ++#define ICL_AUX_E_TBT3_IO_POWER_DOMAINS ( \ ++ BIT_ULL(POWER_DOMAIN_AUX_E_TBT)) ++#define ICL_AUX_F_TBT4_IO_POWER_DOMAINS ( \ ++ BIT_ULL(POWER_DOMAIN_AUX_F_TBT)) + + #define TGL_PW_5_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PIPE_D) | \ +@@ -2565,24 +2546,24 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, + BIT_ULL(POWER_DOMAIN_PIPE_B) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ + BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ +- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC1_LANES) | \ +- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC2_LANES) | \ +- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC3_LANES) | \ +- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC4_LANES) | \ +- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC5_LANES) | \ +- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC6_LANES) | \ +- BIT_ULL(POWER_DOMAIN_AUX_TC1) | \ +- BIT_ULL(POWER_DOMAIN_AUX_TC2) | \ +- BIT_ULL(POWER_DOMAIN_AUX_TC3) | \ +- BIT_ULL(POWER_DOMAIN_AUX_TC4) | \ +- BIT_ULL(POWER_DOMAIN_AUX_TC5) | \ +- BIT_ULL(POWER_DOMAIN_AUX_TC6) | \ +- BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ +- BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ +- BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ +- BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ +- BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \ +- BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \ ++ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ ++ BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ ++ BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ ++ BIT_ULL(POWER_DOMAIN_PORT_DDI_G_LANES) | \ ++ BIT_ULL(POWER_DOMAIN_PORT_DDI_H_LANES) | \ ++ BIT_ULL(POWER_DOMAIN_PORT_DDI_I_LANES) | \ ++ BIT_ULL(POWER_DOMAIN_AUX_D) | \ ++ BIT_ULL(POWER_DOMAIN_AUX_E) | \ ++ BIT_ULL(POWER_DOMAIN_AUX_F) | \ ++ BIT_ULL(POWER_DOMAIN_AUX_G) | \ ++ BIT_ULL(POWER_DOMAIN_AUX_H) | \ ++ BIT_ULL(POWER_DOMAIN_AUX_I) | \ ++ BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \ ++ BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \ ++ BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \ ++ BIT_ULL(POWER_DOMAIN_AUX_G_TBT) | \ ++ BIT_ULL(POWER_DOMAIN_AUX_H_TBT) | \ ++ BIT_ULL(POWER_DOMAIN_AUX_I_TBT) | \ + BIT_ULL(POWER_DOMAIN_VGA) | \ + BIT_ULL(POWER_DOMAIN_AUDIO) | \ + BIT_ULL(POWER_DOMAIN_INIT)) +@@ -2598,35 +2579,50 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, + BIT_ULL(POWER_DOMAIN_AUX_A) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +-#define TGL_DDI_IO_TC1_POWER_DOMAINS ( \ +- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC1_IO)) +-#define TGL_DDI_IO_TC2_POWER_DOMAINS ( \ +- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC2_IO)) +-#define TGL_DDI_IO_TC3_POWER_DOMAINS ( \ +- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC3_IO)) +-#define TGL_DDI_IO_TC4_POWER_DOMAINS ( \ +- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC4_IO)) +-#define TGL_DDI_IO_TC5_POWER_DOMAINS ( \ +- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC5_IO)) +-#define TGL_DDI_IO_TC6_POWER_DOMAINS ( \ +- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC6_IO)) +- +-#define TGL_AUX_TC1_IO_POWER_DOMAINS ( \ +- BIT_ULL(POWER_DOMAIN_AUX_TC1)) +-#define TGL_AUX_TC2_IO_POWER_DOMAINS ( \ +- BIT_ULL(POWER_DOMAIN_AUX_TC2)) +-#define TGL_AUX_TC3_IO_POWER_DOMAINS ( \ +- BIT_ULL(POWER_DOMAIN_AUX_TC3)) +-#define TGL_AUX_TC4_IO_POWER_DOMAINS ( \ +- BIT_ULL(POWER_DOMAIN_AUX_TC4)) +-#define TGL_AUX_TC5_IO_POWER_DOMAINS ( \ +- BIT_ULL(POWER_DOMAIN_AUX_TC5)) +-#define TGL_AUX_TC6_IO_POWER_DOMAINS ( \ +- BIT_ULL(POWER_DOMAIN_AUX_TC6)) +-#define TGL_AUX_TBT5_IO_POWER_DOMAINS ( \ +- BIT_ULL(POWER_DOMAIN_AUX_TBT5)) +-#define TGL_AUX_TBT6_IO_POWER_DOMAINS ( \ +- BIT_ULL(POWER_DOMAIN_AUX_TBT6)) ++#define TGL_DDI_IO_D_TC1_POWER_DOMAINS ( \ ++ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO)) ++#define TGL_DDI_IO_E_TC2_POWER_DOMAINS ( \ ++ BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO)) ++#define TGL_DDI_IO_F_TC3_POWER_DOMAINS ( \ ++ BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO)) ++#define TGL_DDI_IO_G_TC4_POWER_DOMAINS ( \ ++ BIT_ULL(POWER_DOMAIN_PORT_DDI_G_IO)) ++#define TGL_DDI_IO_H_TC5_POWER_DOMAINS ( \ ++ BIT_ULL(POWER_DOMAIN_PORT_DDI_H_IO)) ++#define TGL_DDI_IO_I_TC6_POWER_DOMAINS ( \ ++ BIT_ULL(POWER_DOMAIN_PORT_DDI_I_IO)) ++ ++#define TGL_AUX_A_IO_POWER_DOMAINS ( \ ++ BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ ++ BIT_ULL(POWER_DOMAIN_AUX_A)) ++#define TGL_AUX_B_IO_POWER_DOMAINS ( \ ++ BIT_ULL(POWER_DOMAIN_AUX_B)) ++#define TGL_AUX_C_IO_POWER_DOMAINS ( \ ++ BIT_ULL(POWER_DOMAIN_AUX_C)) ++#define TGL_AUX_D_TC1_IO_POWER_DOMAINS ( \ ++ BIT_ULL(POWER_DOMAIN_AUX_D)) ++#define TGL_AUX_E_TC2_IO_POWER_DOMAINS ( \ ++ BIT_ULL(POWER_DOMAIN_AUX_E)) ++#define TGL_AUX_F_TC3_IO_POWER_DOMAINS ( \ ++ BIT_ULL(POWER_DOMAIN_AUX_F)) ++#define TGL_AUX_G_TC4_IO_POWER_DOMAINS ( \ ++ BIT_ULL(POWER_DOMAIN_AUX_G)) ++#define TGL_AUX_H_TC5_IO_POWER_DOMAINS ( \ ++ BIT_ULL(POWER_DOMAIN_AUX_H)) ++#define TGL_AUX_I_TC6_IO_POWER_DOMAINS ( \ ++ BIT_ULL(POWER_DOMAIN_AUX_I)) ++#define TGL_AUX_D_TBT1_IO_POWER_DOMAINS ( \ ++ BIT_ULL(POWER_DOMAIN_AUX_D_TBT)) ++#define TGL_AUX_E_TBT2_IO_POWER_DOMAINS ( \ ++ BIT_ULL(POWER_DOMAIN_AUX_E_TBT)) ++#define TGL_AUX_F_TBT3_IO_POWER_DOMAINS ( \ ++ BIT_ULL(POWER_DOMAIN_AUX_F_TBT)) ++#define TGL_AUX_G_TBT4_IO_POWER_DOMAINS ( \ ++ BIT_ULL(POWER_DOMAIN_AUX_G_TBT)) ++#define TGL_AUX_H_TBT5_IO_POWER_DOMAINS ( \ ++ BIT_ULL(POWER_DOMAIN_AUX_H_TBT)) ++#define TGL_AUX_I_TBT6_IO_POWER_DOMAINS ( \ ++ BIT_ULL(POWER_DOMAIN_AUX_I_TBT)) + + static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { + .sync_hw = i9xx_power_well_sync_hw_noop, +@@ -3484,8 +3480,8 @@ static const struct i915_power_well_desc icl_power_wells[] = { + }, + }, + { +- .name = "AUX C", +- .domains = ICL_AUX_C_IO_POWER_DOMAINS, ++ .name = "AUX C TC1", ++ .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS, + .ops = &icl_tc_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { +@@ -3495,8 +3491,8 @@ static const struct i915_power_well_desc icl_power_wells[] = { + }, + }, + { +- .name = "AUX D", +- .domains = ICL_AUX_D_IO_POWER_DOMAINS, ++ .name = "AUX D TC2", ++ .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS, + .ops = &icl_tc_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { +@@ -3506,8 +3502,8 @@ static const struct i915_power_well_desc icl_power_wells[] = { + }, + }, + { +- .name = "AUX E", +- .domains = ICL_AUX_E_IO_POWER_DOMAINS, ++ .name = "AUX E TC3", ++ .domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS, + .ops = &icl_tc_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { +@@ -3517,8 +3513,8 @@ static const struct i915_power_well_desc icl_power_wells[] = { + }, + }, + { +- .name = "AUX F", +- .domains = ICL_AUX_F_IO_POWER_DOMAINS, ++ .name = "AUX F TC4", ++ .domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS, + .ops = &icl_tc_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { +@@ -3528,8 +3524,8 @@ static const struct i915_power_well_desc icl_power_wells[] = { + }, + }, + { +- .name = "AUX TBT1", +- .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS, ++ .name = "AUX C TBT1", ++ .domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS, + .ops = &icl_tc_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { +@@ -3539,8 +3535,8 @@ static const struct i915_power_well_desc icl_power_wells[] = { + }, + }, + { +- .name = "AUX TBT2", +- .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS, ++ .name = "AUX D TBT2", ++ .domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS, + .ops = &icl_tc_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { +@@ -3550,8 +3546,8 @@ static const struct i915_power_well_desc icl_power_wells[] = { + }, + }, + { +- .name = "AUX TBT3", +- .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS, ++ .name = "AUX E TBT3", ++ .domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS, + .ops = &icl_tc_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { +@@ -3561,8 +3557,8 @@ static const struct i915_power_well_desc icl_power_wells[] = { + }, + }, + { +- .name = "AUX TBT4", +- .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS, ++ .name = "AUX F TBT4", ++ .domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS, + .ops = &icl_tc_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { +@@ -3667,8 +3663,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { + } + }, + { +- .name = "DDI TC1 IO", +- .domains = TGL_DDI_IO_TC1_POWER_DOMAINS, ++ .name = "DDI D TC1 IO", ++ .domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { +@@ -3677,8 +3673,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { + }, + }, + { +- .name = "DDI TC2 IO", +- .domains = TGL_DDI_IO_TC2_POWER_DOMAINS, ++ .name = "DDI E TC2 IO", ++ .domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { +@@ -3687,8 +3683,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { + }, + }, + { +- .name = "DDI TC3 IO", +- .domains = TGL_DDI_IO_TC3_POWER_DOMAINS, ++ .name = "DDI F TC3 IO", ++ .domains = TGL_DDI_IO_F_TC3_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { +@@ -3697,8 +3693,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { + }, + }, + { +- .name = "DDI TC4 IO", +- .domains = TGL_DDI_IO_TC4_POWER_DOMAINS, ++ .name = "DDI G TC4 IO", ++ .domains = TGL_DDI_IO_G_TC4_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { +@@ -3707,8 +3703,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { + }, + }, + { +- .name = "DDI TC5 IO", +- .domains = TGL_DDI_IO_TC5_POWER_DOMAINS, ++ .name = "DDI H TC5 IO", ++ .domains = TGL_DDI_IO_H_TC5_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { +@@ -3717,8 +3713,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { + }, + }, + { +- .name = "DDI TC6 IO", +- .domains = TGL_DDI_IO_TC6_POWER_DOMAINS, ++ .name = "DDI I TC6 IO", ++ .domains = TGL_DDI_IO_I_TC6_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { +@@ -3728,7 +3724,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { + }, + { + .name = "AUX A", +- .domains = ICL_AUX_A_IO_POWER_DOMAINS, ++ .domains = TGL_AUX_A_IO_POWER_DOMAINS, + .ops = &icl_combo_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { +@@ -3738,7 +3734,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { + }, + { + .name = "AUX B", +- .domains = ICL_AUX_B_IO_POWER_DOMAINS, ++ .domains = TGL_AUX_B_IO_POWER_DOMAINS, + .ops = &icl_combo_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { +@@ -3748,7 +3744,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { + }, + { + .name = "AUX C", +- .domains = ICL_AUX_C_IO_POWER_DOMAINS, ++ .domains = TGL_AUX_C_IO_POWER_DOMAINS, + .ops = &icl_combo_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { +@@ -3757,8 +3753,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { + }, + }, + { +- .name = "AUX TC1", +- .domains = TGL_AUX_TC1_IO_POWER_DOMAINS, ++ .name = "AUX D TC1", ++ .domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS, + .ops = &icl_tc_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { +@@ -3768,8 +3764,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { + }, + }, + { +- .name = "AUX TC2", +- .domains = TGL_AUX_TC2_IO_POWER_DOMAINS, ++ .name = "AUX E TC2", ++ .domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS, + .ops = &icl_tc_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { +@@ -3779,8 +3775,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { + }, + }, + { +- .name = "AUX TC3", +- .domains = TGL_AUX_TC3_IO_POWER_DOMAINS, ++ .name = "AUX F TC3", ++ .domains = TGL_AUX_F_TC3_IO_POWER_DOMAINS, + .ops = &icl_tc_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { +@@ -3790,8 +3786,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { + }, + }, + { +- .name = "AUX TC4", +- .domains = TGL_AUX_TC4_IO_POWER_DOMAINS, ++ .name = "AUX G TC4", ++ .domains = TGL_AUX_G_TC4_IO_POWER_DOMAINS, + .ops = &icl_tc_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { +@@ -3801,8 +3797,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { + }, + }, + { +- .name = "AUX TC5", +- .domains = TGL_AUX_TC5_IO_POWER_DOMAINS, ++ .name = "AUX H TC5", ++ .domains = TGL_AUX_H_TC5_IO_POWER_DOMAINS, + .ops = &icl_tc_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { +@@ -3812,8 +3808,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { + }, + }, + { +- .name = "AUX TC6", +- .domains = TGL_AUX_TC6_IO_POWER_DOMAINS, ++ .name = "AUX I TC6", ++ .domains = TGL_AUX_I_TC6_IO_POWER_DOMAINS, + .ops = &icl_tc_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { +@@ -3823,8 +3819,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { + }, + }, + { +- .name = "AUX TBT1", +- .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS, ++ .name = "AUX D TBT1", ++ .domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { +@@ -3834,8 +3830,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { + }, + }, + { +- .name = "AUX TBT2", +- .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS, ++ .name = "AUX E TBT2", ++ .domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { +@@ -3845,8 +3841,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { + }, + }, + { +- .name = "AUX TBT3", +- .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS, ++ .name = "AUX F TBT3", ++ .domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { +@@ -3856,8 +3852,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { + }, + }, + { +- .name = "AUX TBT4", +- .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS, ++ .name = "AUX G TBT4", ++ .domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { +@@ -3867,8 +3863,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { + }, + }, + { +- .name = "AUX TBT5", +- .domains = TGL_AUX_TBT5_IO_POWER_DOMAINS, ++ .name = "AUX H TBT5", ++ .domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { +@@ -3878,8 +3874,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { + }, + }, + { +- .name = "AUX TBT6", +- .domains = TGL_AUX_TBT6_IO_POWER_DOMAINS, ++ .name = "AUX I TBT6", ++ .domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { +@@ -5104,8 +5100,7 @@ static void intel_power_domains_dump_info(struct drm_i915_private *i915) + + for_each_power_domain(domain, power_well->desc->domains) + DRM_DEBUG_DRIVER(" %-23s %d\n", +- intel_display_power_domain_str(i915, +- domain), ++ intel_display_power_domain_str(domain), + power_domains->domain_use_count[domain]); + } + } +diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h +index a50605b8b1ad..737b5def7fc6 100644 +--- a/drivers/gpu/drm/i915/display/intel_display_power.h ++++ b/drivers/gpu/drm/i915/display/intel_display_power.h +@@ -36,29 +36,20 @@ enum intel_display_power_domain { + POWER_DOMAIN_PORT_DDI_B_LANES, + POWER_DOMAIN_PORT_DDI_C_LANES, + POWER_DOMAIN_PORT_DDI_D_LANES, +- POWER_DOMAIN_PORT_DDI_TC1_LANES = POWER_DOMAIN_PORT_DDI_D_LANES, + POWER_DOMAIN_PORT_DDI_E_LANES, +- POWER_DOMAIN_PORT_DDI_TC2_LANES = POWER_DOMAIN_PORT_DDI_E_LANES, + POWER_DOMAIN_PORT_DDI_F_LANES, +- POWER_DOMAIN_PORT_DDI_TC3_LANES = POWER_DOMAIN_PORT_DDI_F_LANES, +- POWER_DOMAIN_PORT_DDI_TC4_LANES, +- POWER_DOMAIN_PORT_DDI_TC5_LANES, +- POWER_DOMAIN_PORT_DDI_TC6_LANES, ++ POWER_DOMAIN_PORT_DDI_G_LANES, ++ POWER_DOMAIN_PORT_DDI_H_LANES, ++ POWER_DOMAIN_PORT_DDI_I_LANES, + POWER_DOMAIN_PORT_DDI_A_IO, + POWER_DOMAIN_PORT_DDI_B_IO, + POWER_DOMAIN_PORT_DDI_C_IO, + POWER_DOMAIN_PORT_DDI_D_IO, +- POWER_DOMAIN_PORT_DDI_TC1_IO = POWER_DOMAIN_PORT_DDI_D_IO, + POWER_DOMAIN_PORT_DDI_E_IO, +- POWER_DOMAIN_PORT_DDI_TC2_IO = POWER_DOMAIN_PORT_DDI_E_IO, + POWER_DOMAIN_PORT_DDI_F_IO, +- POWER_DOMAIN_PORT_DDI_TC3_IO = POWER_DOMAIN_PORT_DDI_F_IO, + POWER_DOMAIN_PORT_DDI_G_IO, +- POWER_DOMAIN_PORT_DDI_TC4_IO = POWER_DOMAIN_PORT_DDI_G_IO, + POWER_DOMAIN_PORT_DDI_H_IO, +- POWER_DOMAIN_PORT_DDI_TC5_IO = POWER_DOMAIN_PORT_DDI_H_IO, + POWER_DOMAIN_PORT_DDI_I_IO, +- POWER_DOMAIN_PORT_DDI_TC6_IO = POWER_DOMAIN_PORT_DDI_I_IO, + POWER_DOMAIN_PORT_DSI, + POWER_DOMAIN_PORT_CRT, + POWER_DOMAIN_PORT_OTHER, +@@ -68,21 +59,19 @@ enum intel_display_power_domain { + POWER_DOMAIN_AUX_B, + POWER_DOMAIN_AUX_C, + POWER_DOMAIN_AUX_D, +- POWER_DOMAIN_AUX_TC1 = POWER_DOMAIN_AUX_D, + POWER_DOMAIN_AUX_E, +- POWER_DOMAIN_AUX_TC2 = POWER_DOMAIN_AUX_E, + POWER_DOMAIN_AUX_F, +- POWER_DOMAIN_AUX_TC3 = POWER_DOMAIN_AUX_F, +- POWER_DOMAIN_AUX_TC4, +- POWER_DOMAIN_AUX_TC5, +- POWER_DOMAIN_AUX_TC6, ++ POWER_DOMAIN_AUX_G, ++ POWER_DOMAIN_AUX_H, ++ POWER_DOMAIN_AUX_I, + POWER_DOMAIN_AUX_IO_A, +- POWER_DOMAIN_AUX_TBT1, +- POWER_DOMAIN_AUX_TBT2, +- POWER_DOMAIN_AUX_TBT3, +- POWER_DOMAIN_AUX_TBT4, +- POWER_DOMAIN_AUX_TBT5, +- POWER_DOMAIN_AUX_TBT6, ++ POWER_DOMAIN_AUX_C_TBT, ++ POWER_DOMAIN_AUX_D_TBT, ++ POWER_DOMAIN_AUX_E_TBT, ++ POWER_DOMAIN_AUX_F_TBT, ++ POWER_DOMAIN_AUX_G_TBT, ++ POWER_DOMAIN_AUX_H_TBT, ++ POWER_DOMAIN_AUX_I_TBT, + POWER_DOMAIN_GMBUS, + POWER_DOMAIN_MODESET, + POWER_DOMAIN_GT_IRQ, +@@ -269,8 +258,7 @@ void intel_display_power_suspend(struct drm_i915_private *i915); + void intel_display_power_resume(struct drm_i915_private *i915); + + const char * +-intel_display_power_domain_str(struct drm_i915_private *i915, +- enum intel_display_power_domain domain); ++intel_display_power_domain_str(enum intel_display_power_domain domain); + + bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain); +diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c +index 5c1a2b1e7d34..5e81c4fc13ae 100644 +--- a/drivers/gpu/drm/i915/i915_debugfs.c ++++ b/drivers/gpu/drm/i915/i915_debugfs.c +@@ -2365,8 +2365,7 @@ static int i915_power_domain_info(struct seq_file *m, void *unused) + + for_each_power_domain(power_domain, power_well->desc->domains) + seq_printf(m, " %-23s %d\n", +- intel_display_power_domain_str(dev_priv, +- power_domain), ++ intel_display_power_domain_str(power_domain), + power_domains->domain_use_count[power_domain]); + } + +-- +2.17.1 + diff --git a/patches/0065-mei-dal-put-message-into-buffer-in-single-operati.security b/patches/0065-mei-dal-put-message-into-buffer-in-single-operati.security new file mode 100644 index 0000000000..a79a701172 --- /dev/null +++ b/patches/0065-mei-dal-put-message-into-buffer-in-single-operati.security @@ -0,0 +1,171 @@ +From 0e8967b9b0cd29bfdd2038dfbab9031001cea95d Mon Sep 17 00:00:00 2001 +From: Tomas Winkler +Date: Mon, 7 Oct 2019 18:01:11 +0300 +Subject: [PATCH 65/65] mei: dal: put message into buffer in single operation + +For internal communication between the receive handler and reader +the message is preceded with length of the message. The message was +pushed in two stages first the length and than the actual message. +The reader that is waiting on !kfifo_empty() condition may wake up +between the two operations and get out of the sync. +Pushing the message as a whole can provide solution w/o introducing +additional locking. + +Change-Id: I2c319140c8404b35c31f1ca66671ca059e777827 +Signed-off-by: Tomas Winkler +--- + drivers/misc/mei/dal/dal_class.c | 48 +++++++++++++++++--------------- + drivers/misc/mei/dal/dal_dev.h | 4 +-- + 2 files changed, 27 insertions(+), 25 deletions(-) + +diff --git a/drivers/misc/mei/dal/dal_class.c b/drivers/misc/mei/dal/dal_class.c +index 3ed80393a337..3f3c92f2aac3 100644 +--- a/drivers/misc/mei/dal/dal_class.c ++++ b/drivers/misc/mei/dal/dal_class.c +@@ -66,11 +66,12 @@ void dal_dc_print(struct device *dev, struct dal_client *dc) + static void dal_dc_update_read_state(struct dal_client *dc, ssize_t len) + { + struct dal_device *ddev = dc->ddev; ++ struct dal_bh_msg *bh_msg = ddev->bh_fw_msg; + + /* check BH msg magic, if it exists this is the header */ +- if (bh_msg_is_response(ddev->bh_fw_msg.msg, len)) { ++ if (bh_msg_is_response(bh_msg->msg, len)) { + struct bh_response_header *hdr = +- (struct bh_response_header *)dc->ddev->bh_fw_msg.msg; ++ (struct bh_response_header *)bh_msg->msg; + + dc->expected_msg_size_from_fw = hdr->h.length; + dev_dbg(&ddev->dev, "expected_msg_size_from_fw = %d bytes read = %zd\n", +@@ -99,7 +100,7 @@ static enum dal_intf dal_get_client_by_squence_number(struct dal_device *ddev) + if (!ddev->clients[DAL_INTF_KDI]) + return DAL_INTF_CDEV; + +- head = (struct bh_response_header *)ddev->bh_fw_msg.msg; ++ head = (struct bh_response_header *)ddev->bh_fw_msg->msg; + + dev_dbg(&ddev->dev, "msg seq = %llu\n", head->seq); + +@@ -128,7 +129,7 @@ static void dal_recv_cb(struct mei_cl_device *cldev) + /* + * read the msg from MEI + */ +- len = mei_cldev_recv(cldev, ddev->bh_fw_msg.msg, DAL_MAX_BUFFER_SIZE); ++ len = mei_cldev_recv(cldev, ddev->bh_fw_msg->msg, DAL_MAX_BUFFER_SIZE); + if (len < 0) { + dev_err(&cldev->dev, "recv failed %zd\n", len); + return; +@@ -141,10 +142,10 @@ static void dal_recv_cb(struct mei_cl_device *cldev) + mutex_lock(&ddev->context_lock); + + /* save msg len */ +- ddev->bh_fw_msg.len = len; ++ ddev->bh_fw_msg->len = len; + + /* set to which interface the msg should be sent */ +- if (bh_msg_is_response(ddev->bh_fw_msg.msg, len)) { ++ if (bh_msg_is_response(ddev->bh_fw_msg->msg, len)) { + intf = dal_get_client_by_squence_number(ddev); + dev_dbg(&ddev->dev, "recv_cb(): Client set by sequence number\n"); + dc = ddev->clients[intf]; +@@ -171,10 +172,11 @@ static void dal_recv_cb(struct mei_cl_device *cldev) + * save new msg in queue, + * if the queue is full all new messages will be thrown + */ +- ret = kfifo_in(&dc->read_queue, &ddev->bh_fw_msg.len, sizeof(len)); +- ret += kfifo_in(&dc->read_queue, ddev->bh_fw_msg.msg, len); +- if (ret < len + sizeof(len)) ++ ret = kfifo_in(&dc->read_queue, ddev->bh_fw_msg, sizeof(len) + len); ++ if (ret < len + sizeof(len)) { ++ /* FIXME: need to take care of partial message */ + dev_dbg(&ddev->dev, "queue is full - MSG THROWN\n"); ++ } + + dal_dc_update_read_state(dc, len); + +@@ -274,21 +276,19 @@ static int dal_wait_for_write(struct dal_device *ddev, struct dal_client *dc) + static int dal_send_error_access_denied(struct dal_client *dc, const void *cmd) + { + struct dal_device *ddev = dc->ddev; +- struct bh_response_header res; +- size_t len; ++ struct bh_response_header *res; ++ struct dal_bh_msg *bh_msg; ++ u8 buf[sizeof(*bh_msg) + sizeof(*res)]; + int ret; + + mutex_lock(&ddev->context_lock); + +- bh_prep_access_denied_response(cmd, &res); +- len = sizeof(res); +- +- if (kfifo_in(&dc->read_queue, &len, sizeof(len)) != sizeof(len)) { +- ret = -ENOMEM; +- goto out; +- } ++ bh_msg = (struct dal_bh_msg *)buf; ++ res = (struct bh_response_header *)bh_msg->msg; ++ bh_msg->len = sizeof(*res); + +- if (kfifo_in(&dc->read_queue, &res, len) != len) { ++ bh_prep_access_denied_response(cmd, res); ++ if (kfifo_in(&dc->read_queue, buf, sizeof(buf)) != sizeof(buf)) { + ret = -ENOMEM; + goto out; + } +@@ -583,7 +583,7 @@ int dal_dc_setup(struct dal_device *ddev, enum dal_intf intf) + return -ENOMEM; + + /* each buffer contains data and length */ +- readq_sz = (DAL_MAX_BUFFER_SIZE + sizeof(ddev->bh_fw_msg.len)) * ++ readq_sz = (DAL_MAX_BUFFER_SIZE + sizeof(ddev->bh_fw_msg->len)) * + DAL_BUFFERS_PER_CLIENT; + ret = kfifo_alloc(&dc->read_queue, readq_sz, GFP_KERNEL); + if (ret) { +@@ -674,7 +674,7 @@ static void dal_device_release(struct device *dev) + { + struct dal_device *ddev = to_dal_device(dev); + +- kfree(ddev->bh_fw_msg.msg); ++ kfree(ddev->bh_fw_msg); + kfree(ddev); + } + +@@ -719,8 +719,10 @@ static int dal_probe(struct mei_cl_device *cldev, + goto err_unregister; + } + +- ddev->bh_fw_msg.msg = kzalloc(DAL_MAX_BUFFER_SIZE, GFP_KERNEL); +- if (!ddev->bh_fw_msg.msg) { ++ ddev->bh_fw_msg = kzalloc(DAL_MAX_BUFFER_SIZE + ++ sizeof(*ddev->bh_fw_msg), ++ GFP_KERNEL); ++ if (!ddev->bh_fw_msg) { + ret = -ENOMEM; + goto err_unregister; + } +diff --git a/drivers/misc/mei/dal/dal_dev.h b/drivers/misc/mei/dal/dal_dev.h +index 80630cc846f7..8cc42f89d350 100644 +--- a/drivers/misc/mei/dal/dal_dev.h ++++ b/drivers/misc/mei/dal/dal_dev.h +@@ -88,7 +88,7 @@ struct dal_client { + */ + struct dal_bh_msg { + size_t len; +- char *msg; ++ char msg[0]; + }; + + /** +@@ -126,7 +126,7 @@ struct dal_device { + wait_queue_head_t wq; + struct list_head writers; + struct dal_client *clients[DAL_CLIENTS_PER_DEVICE]; +- struct dal_bh_msg bh_fw_msg; ++ struct dal_bh_msg *bh_fw_msg; + struct dal_client *current_read_client; + + struct mei_cl_device *cldev; +-- +2.17.1 + diff --git a/patches/0065-net-stmmac-fix-INTR-TBU-status-affecting-irq-.connectivity b/patches/0065-net-stmmac-fix-INTR-TBU-status-affecting-irq-.connectivity new file mode 100644 index 0000000000..eec86ebe68 --- /dev/null +++ b/patches/0065-net-stmmac-fix-INTR-TBU-status-affecting-irq-.connectivity @@ -0,0 +1,42 @@ +From f75b032157ab316e4c7064fba32601a786280e24 Mon Sep 17 00:00:00 2001 +From: Weifeng Voon +Date: Sat, 9 Mar 2019 03:14:39 +0800 +Subject: [PATCH 065/108] net: stmmac: fix INTR TBU status affecting irq count + statistic + +DMA channel status "Transmit buffer unavailable(TBU)" is not +considered as a successful dma tx. Hence, it should not affect +all the irq count statistic. + +Fixes: 1103d3a5531c ("net: stmmac: dwmac4: Also use TBU interrupt to clean TX path") +Signed-off-by: Weifeng Voon +--- + drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c +index deef04cccea1..aa5c13ed0d54 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c +@@ -187,8 +187,7 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr, + ret |= handle_rx; + } + } +- if (likely(intr_status & (DMA_CHAN_STATUS_TI | +- DMA_CHAN_STATUS_TBU))) { ++ if (likely(intr_status & DMA_CHAN_STATUS_TI)) { + x->tx_normal_irq_n++; + switch (chan) { + case 0x0: +@@ -220,6 +219,8 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr, + } + ret |= handle_tx; + } ++ if (unlikely(intr_status & DMA_CHAN_STATUS_TBU)) ++ ret |= handle_tx; + if (unlikely(intr_status & DMA_CHAN_STATUS_ERI)) + x->rx_early_irq++; + +-- +2.17.1 + diff --git a/patches/0065-sos-vhm-remove-MAP_MMIO.acrn b/patches/0065-sos-vhm-remove-MAP_MMIO.acrn new file mode 100644 index 0000000000..926119440f --- /dev/null +++ b/patches/0065-sos-vhm-remove-MAP_MMIO.acrn @@ -0,0 +1,52 @@ +From d80857aba9c147490058b671668bbb316b71deb5 Mon Sep 17 00:00:00 2001 +From: "Li, Fei1" +Date: Fri, 31 Aug 2018 10:59:02 +0800 +Subject: [PATCH 065/150] sos: vhm: remove MAP_MMIO + +Now the MAP_MMIO has no difference with MAP_MEM. So there's no needs +to keep it. + +Signed-off-by: Li, Fei1 +Acked-by: Eddie Dong +--- + drivers/vhm/vhm_mm.c | 4 ++-- + include/linux/vhm/acrn_hv_defs.h | 1 - + 2 files changed, 2 insertions(+), 3 deletions(-) + +diff --git a/drivers/vhm/vhm_mm.c b/drivers/vhm/vhm_mm.c +index c7ca10255064..f663558ae943 100644 +--- a/drivers/vhm/vhm_mm.c ++++ b/drivers/vhm/vhm_mm.c +@@ -135,7 +135,7 @@ int set_mmio_map(unsigned long vmid, unsigned long guest_gpa, + unsigned int mem_type, unsigned mem_access_right) + { + return _mem_set_memmap(vmid, guest_gpa, host_gpa, len, +- mem_type, mem_access_right, MAP_MMIO); ++ mem_type, mem_access_right, MAP_MEM); + } + + int unset_mmio_map(unsigned long vmid, unsigned long guest_gpa, +@@ -187,7 +187,7 @@ int map_guest_memseg(struct vhm_vm *vm, struct vm_memmap *memmap) + host_gpa = acrn_hpa2gpa(memmap->hpa); + mem_type = MEM_TYPE_UC; + mem_access_right = (memmap->prot & MEM_ACCESS_RIGHT_MASK); +- type = MAP_MMIO; ++ type = MAP_MEM; + + if (_mem_set_memmap(vm->vmid, guest_gpa, host_gpa, memmap->len, + mem_type, mem_access_right, type) < 0) { +diff --git a/include/linux/vhm/acrn_hv_defs.h b/include/linux/vhm/acrn_hv_defs.h +index f20f3afb8e89..135910baeab0 100644 +--- a/include/linux/vhm/acrn_hv_defs.h ++++ b/include/linux/vhm/acrn_hv_defs.h +@@ -133,7 +133,6 @@ + + struct vm_set_memmap { + #define MAP_MEM 0 +-#define MAP_MMIO 1 + #define MAP_UNMAP 2 + uint32_t type; + +-- +2.17.1 + diff --git a/patches/0066-ALSA-core-let-low-level-driver-or-userspace-disable-.audio b/patches/0066-ALSA-core-let-low-level-driver-or-userspace-disable-.audio new file mode 100644 index 0000000000..3b69f22698 --- /dev/null +++ b/patches/0066-ALSA-core-let-low-level-driver-or-userspace-disable-.audio @@ -0,0 +1,80 @@ +From fabbf013f0e1a817eaf73f1e6e618d59ca98d9ef Mon Sep 17 00:00:00 2001 +From: Pierre-Louis Bossart +Date: Tue, 17 May 2016 14:00:15 +0530 +Subject: [PATCH 066/193] ALSA: core: let low-level driver or userspace disable + rewinds + +Add new hw_params flag to explicitly tell driver that rewinds will never +be used. This can be used by low-level driver to optimize DMA operations +and reduce power consumption. Use this flag only when data written in +ring buffer will never be invalidated, e.g. any update of appl_ptr is +final. + +Note that the update of appl_ptr include both a read/write data +operation as well as snd_pcm_forward() whose behavior is not modified. + +Signed-off-by: Pierre-Louis Bossart +Signed-off-by: Ramesh Babu +Signed-off-by: Subhransu S. Prusty +--- + include/sound/pcm.h | 1 + + include/uapi/sound/asound.h | 1 + + sound/core/pcm_native.c | 6 ++++++ + 3 files changed, 8 insertions(+) + +diff --git a/include/sound/pcm.h b/include/sound/pcm.h +index bbe6eb1ff5d2..36e847ffb556 100644 +--- a/include/sound/pcm.h ++++ b/include/sound/pcm.h +@@ -370,6 +370,7 @@ struct snd_pcm_runtime { + unsigned int rate_num; + unsigned int rate_den; + unsigned int no_period_wakeup: 1; ++ unsigned int no_rewinds:1; + + /* -- SW params -- */ + int tstamp_mode; /* mmap timestamp is updated */ +diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h +index df1153cea0b7..ffc53dd7ac44 100644 +--- a/include/uapi/sound/asound.h ++++ b/include/uapi/sound/asound.h +@@ -378,6 +378,7 @@ typedef int snd_pcm_hw_param_t; + #define SNDRV_PCM_HW_PARAMS_NORESAMPLE (1<<0) /* avoid rate resampling */ + #define SNDRV_PCM_HW_PARAMS_EXPORT_BUFFER (1<<1) /* export buffer */ + #define SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP (1<<2) /* disable period wakeups */ ++#define SNDRV_PCM_HW_PARAMS_NO_REWINDS (1<<3) /* disable rewinds */ + + struct snd_interval { + unsigned int min, max; +diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c +index 91c6ad58729f..078e4bcc01c4 100644 +--- a/sound/core/pcm_native.c ++++ b/sound/core/pcm_native.c +@@ -680,6 +680,8 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream, + runtime->no_period_wakeup = + (params->info & SNDRV_PCM_INFO_NO_PERIOD_WAKEUP) && + (params->flags & SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP); ++ runtime->no_rewinds = ++ (params->flags & SNDRV_PCM_HW_PARAMS_NO_REWINDS) ? 1 : 0; + + bits = snd_pcm_format_physical_width(runtime->format); + runtime->sample_bits = bits; +@@ -2680,11 +2682,15 @@ static snd_pcm_sframes_t rewind_appl_ptr(struct snd_pcm_substream *substream, + static snd_pcm_sframes_t snd_pcm_rewind(struct snd_pcm_substream *substream, + snd_pcm_uframes_t frames) + { ++ struct snd_pcm_runtime *runtime = substream->runtime; + snd_pcm_sframes_t ret; + + if (frames == 0) + return 0; + ++ if (runtime->no_rewinds) ++ return 0; ++ + snd_pcm_stream_lock_irq(substream); + ret = do_pcm_hwsync(substream); + if (!ret) +-- +2.17.1 + diff --git a/patches/0066-drm-i915-Protect-our-local-workers-against-I915_FENCE_.drm b/patches/0066-drm-i915-Protect-our-local-workers-against-I915_FENCE_.drm new file mode 100644 index 0000000000..172f9391fc --- /dev/null +++ b/patches/0066-drm-i915-Protect-our-local-workers-against-I915_FENCE_.drm @@ -0,0 +1,34 @@ +From 1db4586868bb938b23c71becee424b1b29d0520f Mon Sep 17 00:00:00 2001 +From: Chris Wilson +Date: Mon, 26 Aug 2019 08:21:27 +0100 +Subject: [PATCH 066/690] drm/i915: Protect our local workers against + I915_FENCE_TIMEOUT + +Trust our own workers to not cause unnecessary delays and disable the +automatic timeout on their asynchronous fence waits. (Along the same +lines that we trust our own requests to complete eventually, if +necessary by force.) + +Signed-off-by: Chris Wilson +Reviewed-by: Matthew Auld +Link: https://patchwork.freedesktop.org/patch/msgid/20190826072149.9447-6-chris@chris-wilson.co.uk +--- + drivers/gpu/drm/i915/i915_request.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c +index eef70dc68934..99eff4fd311b 100644 +--- a/drivers/gpu/drm/i915/i915_request.c ++++ b/drivers/gpu/drm/i915/i915_request.c +@@ -963,7 +963,7 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence) + ret = i915_request_await_request(rq, to_request(fence)); + else + ret = i915_sw_fence_await_dma_fence(&rq->submit, fence, +- I915_FENCE_TIMEOUT, ++ fence->context ? I915_FENCE_TIMEOUT : 0, + I915_FENCE_GFP); + if (ret < 0) + return ret; +-- +2.17.1 + diff --git a/patches/0066-net-stmmac-add-ethtool-support-for-get-set-ch.connectivity b/patches/0066-net-stmmac-add-ethtool-support-for-get-set-ch.connectivity new file mode 100644 index 0000000000..5fdbab3f20 --- /dev/null +++ b/patches/0066-net-stmmac-add-ethtool-support-for-get-set-ch.connectivity @@ -0,0 +1,202 @@ +From 84834e436b03a43a89751fa433c21a8891cd8044 Mon Sep 17 00:00:00 2001 +From: Ong Boon Leong +Date: Fri, 27 Jul 2018 14:52:21 +0800 +Subject: [PATCH 066/108] net: stmmac: add ethtool support for get/set channels + +Restructure NAPI add and delete process so that we can call them +accordingly in open() and ethtool_set_channels() accordingly. + +Introduced stmmac_reinit_queues() to handle the transition needed +for changing Rx & Tx channels accordingly. + +Signed-off-by: Ong Boon Leong +--- + drivers/net/ethernet/stmicro/stmmac/stmmac.h | 1 + + .../ethernet/stmicro/stmmac/stmmac_ethtool.c | 26 ++++++ + .../net/ethernet/stmicro/stmmac/stmmac_main.c | 92 +++++++++++++------ + 3 files changed, 93 insertions(+), 26 deletions(-) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h +index 78d6c5dbda15..2e1a52a9b3e1 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h +@@ -279,6 +279,7 @@ int stmmac_dvr_probe(struct device *device, + struct stmmac_resources *res); + void stmmac_disable_eee_mode(struct stmmac_priv *priv); + bool stmmac_eee_init(struct stmmac_priv *priv); ++int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt); + + #if IS_ENABLED(CONFIG_STMMAC_SELFTESTS) + void stmmac_selftest_run(struct net_device *dev, +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +index 583d4b442f61..54d39ad471b3 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +@@ -906,6 +906,30 @@ static int stmmac_set_rxfh(struct net_device *dev, const u32 *indir, + priv->plat->rx_queues_to_use); + } + ++static void stmmac_get_channels(struct net_device *dev, ++ struct ethtool_channels *chan) ++{ ++ struct stmmac_priv *priv = netdev_priv(dev); ++ ++ chan->rx_count = priv->plat->rx_queues_to_use; ++ chan->tx_count = priv->plat->tx_queues_to_use; ++ chan->max_rx = priv->dma_cap.number_rx_queues; ++ chan->max_tx = priv->dma_cap.number_tx_queues; ++} ++ ++static int stmmac_set_channels(struct net_device *dev, ++ struct ethtool_channels *chan) ++{ ++ struct stmmac_priv *priv = netdev_priv(dev); ++ ++ if (chan->rx_count > priv->dma_cap.number_rx_queues || ++ chan->tx_count > priv->dma_cap.number_tx_queues || ++ !chan->rx_count || !chan->tx_count) ++ return -EINVAL; ++ ++ return stmmac_reinit_queues(dev, chan->rx_count, chan->tx_count); ++} ++ + static int stmmac_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) + { +@@ -1005,6 +1029,8 @@ static const struct ethtool_ops stmmac_ethtool_ops = { + .get_ts_info = stmmac_get_ts_info, + .get_coalesce = stmmac_get_coalesce, + .set_coalesce = stmmac_set_coalesce, ++ .get_channels = stmmac_get_channels, ++ .set_channels = stmmac_set_channels, + .get_tunable = stmmac_get_tunable, + .set_tunable = stmmac_set_tunable, + .get_link_ksettings = stmmac_ethtool_get_link_ksettings, +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index 1340bec3c6b8..2e4d49db35b2 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -5105,6 +5105,69 @@ static int stmmac_hw_init(struct stmmac_priv *priv) + return 0; + } + ++static void stmmac_napi_add(struct net_device *dev) ++{ ++ struct stmmac_priv *priv = netdev_priv(dev); ++ u32 queue, maxq; ++ ++ maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); ++ ++ for (queue = 0; queue < maxq; queue++) { ++ struct stmmac_channel *ch = &priv->channel[queue]; ++ ++ ch->priv_data = priv; ++ ch->index = queue; ++ ++ if (queue < priv->plat->rx_queues_to_use) { ++ netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx, ++ NAPI_POLL_WEIGHT); ++ } ++ if (queue < priv->plat->tx_queues_to_use) { ++ netif_tx_napi_add(dev, &ch->tx_napi, ++ stmmac_napi_poll_tx, ++ NAPI_POLL_WEIGHT); ++ } ++ } ++} ++ ++static void stmmac_napi_del(struct net_device *dev) ++{ ++ struct stmmac_priv *priv = netdev_priv(dev); ++ u32 queue, maxq; ++ ++ maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); ++ ++ for (queue = 0; queue < maxq; queue++) { ++ struct stmmac_channel *ch = &priv->channel[queue]; ++ ++ if (queue < priv->plat->rx_queues_to_use) ++ netif_napi_del(&ch->rx_napi); ++ if (queue < priv->plat->tx_queues_to_use) ++ netif_napi_del(&ch->tx_napi); ++ } ++} ++ ++int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt) ++{ ++ struct stmmac_priv *priv = netdev_priv(dev); ++ int ret = 0; ++ ++ if (netif_running(dev)) ++ stmmac_release(dev); ++ ++ stmmac_napi_del(dev); ++ ++ priv->plat->rx_queues_to_use = rx_cnt; ++ priv->plat->tx_queues_to_use = tx_cnt; ++ ++ stmmac_napi_add(dev); ++ ++ if (netif_running(dev)) ++ ret = stmmac_open(dev); ++ ++ return ret; ++} ++ + /** + * stmmac_dvr_probe + * @device: device pointer +@@ -5121,7 +5184,7 @@ int stmmac_dvr_probe(struct device *device, + { + struct net_device *ndev = NULL; + struct stmmac_priv *priv; +- u32 queue, rxq, maxq; ++ u32 rxq; + int i, ret = 0; + + ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv), +@@ -5300,24 +5363,7 @@ int stmmac_dvr_probe(struct device *device, + priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ + + /* Setup channels NAPI */ +- maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); +- +- for (queue = 0; queue < maxq; queue++) { +- struct stmmac_channel *ch = &priv->channel[queue]; +- +- ch->priv_data = priv; +- ch->index = queue; +- +- if (queue < priv->plat->rx_queues_to_use) { +- netif_napi_add(ndev, &ch->rx_napi, stmmac_napi_poll_rx, +- NAPI_POLL_WEIGHT); +- } +- if (queue < priv->plat->tx_queues_to_use) { +- netif_tx_napi_add(ndev, &ch->tx_napi, +- stmmac_napi_poll_tx, +- NAPI_POLL_WEIGHT); +- } +- } ++ stmmac_napi_add(ndev); + + mutex_init(&priv->lock); + +@@ -5377,14 +5423,8 @@ int stmmac_dvr_probe(struct device *device, + priv->hw->pcs != STMMAC_PCS_RTBI) + stmmac_mdio_unregister(ndev); + error_mdio_register: +- for (queue = 0; queue < maxq; queue++) { +- struct stmmac_channel *ch = &priv->channel[queue]; ++ stmmac_napi_del(ndev); + +- if (queue < priv->plat->rx_queues_to_use) +- netif_napi_del(&ch->rx_napi); +- if (queue < priv->plat->tx_queues_to_use) +- netif_napi_del(&ch->tx_napi); +- } + error_hw_init: + destroy_workqueue(priv->wq); + +-- +2.17.1 + diff --git a/patches/0066-vhm-revisit-types-in-structure-parameters-of-hypercal.acrn b/patches/0066-vhm-revisit-types-in-structure-parameters-of-hypercal.acrn new file mode 100644 index 0000000000..e82569eb85 --- /dev/null +++ b/patches/0066-vhm-revisit-types-in-structure-parameters-of-hypercal.acrn @@ -0,0 +1,215 @@ +From 777dc601c6f9f201fbacb49357016e7c56c2d0d6 Mon Sep 17 00:00:00 2001 +From: Junjie Mao +Date: Fri, 31 Aug 2018 10:59:02 +0800 +Subject: [PATCH 066/150] vhm: revisit types in structure parameters of + hypercalls + +While fixing the MISRA C violations related to integral types, we have unified +the type of the following data: + + uint8_t: + phys_pin, virt_pin, vpic_pin, ioapic_pin, vioapic_pin + + uint16_t: + vm_id, pcpu_id, vcpu_id, vpid + + uint32_t: + vector, irq + +This patch revisits the types of the fields in vhm_request as well as the +structures used as parameters in the hypercalls, and make them aligned with the +types the hypervisor uses for such data. Reserved fields are added to keep the +size and layout of the structures. Implicit paddings are also made explicit as +reserved fields. + +This is the update on the VHM side in correspondance to the same changes in the +hypervisor and device model. + +v1 -> v2: + + * Make reserved fields unsigned. + * Combine continuous reserved fields using proper arrays. + * Make msix_entry_index unsigned as it is used in this way in both the + hypervisor and kernel. + +Signed-off-by: Junjie Mao +Reviewed-by: Zhao Yakui +--- + include/linux/vhm/acrn_common.h | 54 ++++++++++++++++++++------------ + include/linux/vhm/acrn_hv_defs.h | 17 +++++++--- + 2 files changed, 46 insertions(+), 25 deletions(-) + +diff --git a/include/linux/vhm/acrn_common.h b/include/linux/vhm/acrn_common.h +index a4ae0146ac39..7a1b17fd53bd 100644 +--- a/include/linux/vhm/acrn_common.h ++++ b/include/linux/vhm/acrn_common.h +@@ -93,17 +93,17 @@ + struct mmio_request { + uint32_t direction; + uint32_t reserved; +- int64_t address; +- int64_t size; +- int64_t value; ++ uint64_t address; ++ uint64_t size; ++ uint64_t value; + } __attribute__((aligned(8))); + + struct pio_request { + uint32_t direction; + uint32_t reserved; +- int64_t address; +- int64_t size; +- int32_t value; ++ uint64_t address; ++ uint64_t size; ++ uint32_t value; + } __attribute__((aligned(8))); + + struct pci_request { +@@ -120,16 +120,15 @@ struct pci_request { + /* vhm_request are 256Bytes aligned */ + struct vhm_request { + /* offset: 0bytes - 63bytes */ +- union { +- uint32_t type; +- int32_t reserved0[16]; +- }; ++ uint32_t type; ++ uint32_t reserved0[15]; ++ + /* offset: 64bytes-127bytes */ + union { + struct pio_request pio_request; + struct pci_request pci_request; + struct mmio_request mmio_request; +- int64_t reserved1[8]; ++ uint64_t reserved1[8]; + } reqs; + + /* True: valid req which need VHM to process. +@@ -151,7 +150,7 @@ struct vhm_request { + struct vhm_request_buffer { + union { + struct vhm_request req_queue[VHM_REQUEST_MAX]; +- int8_t reserved[4096]; ++ uint8_t reserved[4096]; + }; + } __attribute__((aligned(4096))); + +@@ -160,10 +159,16 @@ struct vhm_request_buffer { + */ + struct acrn_create_vm { + /** created vmid return to VHM. Keep it first field */ +- int32_t vmid; ++ uint16_t vmid; ++ ++ /** Reserved */ ++ uint16_t reserved0; + + /** VCPU numbers this VM want to create */ +- uint32_t vcpu_num; ++ uint16_t vcpu_num; ++ ++ /** Reserved */ ++ uint16_t reserved1; + + /** the GUID of this VM */ + uint8_t GUID[16]; +@@ -174,7 +179,7 @@ struct acrn_create_vm { + uint64_t vm_flag; + + /** Reserved for future use*/ +- uint8_t reserved[24]; ++ uint8_t reserved2[24]; + } __attribute__((aligned(8))); + + /** +@@ -220,12 +225,18 @@ struct acrn_irqline { + uint32_t reserved; + + /** pic IRQ for ISA type */ +- uint64_t pic_irq; ++ uint32_t pic_irq; ++ ++ /** Reserved */ ++ uint32_t reserved0; + + /** ioapic IRQ for IOAPIC & ISA TYPE, +- * if -1 then this IRQ will not be injected ++ * if ~0U then this IRQ will not be injected + */ +- uint64_t ioapic_irq; ++ uint32_t ioapic_irq; ++ ++ /** Reserved */ ++ uint32_t reserved1; + } __attribute__((aligned(8))); + + /** +@@ -246,7 +257,10 @@ struct acrn_msi_entry { + */ + struct acrn_nmi_entry { + /** virtual CPU ID to inject */ +- int64_t vcpu_id; ++ uint16_t vcpu_id; ++ ++ /** Reserved */ ++ uint16_t reserved[3]; + } __attribute__((aligned(8))); + + /** +@@ -285,7 +299,7 @@ struct acrn_vm_pci_msix_remap { + /** if the pass-through PCI device is MSI-X, this field contains + * the MSI-X entry table index + */ +- int32_t msix_entry_index; ++ uint32_t msix_entry_index; + + /** if the pass-through PCI device is MSI-X, this field contains + * Vector Control for MSI-X Entry, field defined in MSI-X spec +diff --git a/include/linux/vhm/acrn_hv_defs.h b/include/linux/vhm/acrn_hv_defs.h +index 135910baeab0..0e2586f9d955 100644 +--- a/include/linux/vhm/acrn_hv_defs.h ++++ b/include/linux/vhm/acrn_hv_defs.h +@@ -169,7 +169,10 @@ struct memory_map { + + struct set_memmaps { + /*IN: vmid for this hypercall */ +- uint64_t vmid; ++ uint16_t vmid; ++ ++ /** Reserved */ ++ uint16_t reserved[3]; + + /* IN: multi memmaps numbers */ + uint32_t memmaps_num; +@@ -183,7 +186,8 @@ struct set_memmaps { + } __attribute__((aligned(8))); + + struct sbuf_setup_param { +- uint32_t pcpu_id; ++ uint16_t pcpu_id; ++ uint16_t reserved; + uint32_t sbuf_id; + uint64_t gpa; + } __attribute__((aligned(8))); +@@ -202,9 +206,12 @@ struct hc_ptdev_irq { + uint16_t phys_bdf; /* IN: Device physical BDF# */ + union { + struct { +- uint32_t virt_pin; /* IN: virtual IOAPIC pin */ +- uint32_t phys_pin; /* IN: physical IOAPIC pin */ +- uint32_t pic_pin; /* IN: pin from PIC? */ ++ uint8_t virt_pin; /* IN: virtual IOAPIC pin */ ++ uint8_t reserved0[3]; /* Reserved */ ++ uint8_t phys_pin; /* IN: physical IOAPIC pin */ ++ uint8_t reserved1[3]; /* Reserved */ ++ bool pic_pin; /* IN: pin from PIC? */ ++ uint8_t reserved2[3]; /* Reserved */ + } intx; + struct { + /* IN: vector count of MSI/MSIX */ +-- +2.17.1 + diff --git a/patches/0067-ALSA-pcm-conditionally-avoid-mmap-of-control-data.audio b/patches/0067-ALSA-pcm-conditionally-avoid-mmap-of-control-data.audio new file mode 100644 index 0000000000..c9c2d92c45 --- /dev/null +++ b/patches/0067-ALSA-pcm-conditionally-avoid-mmap-of-control-data.audio @@ -0,0 +1,89 @@ +From 5590a7a615c66322d92575c43ab830de84aa1b33 Mon Sep 17 00:00:00 2001 +From: Pierre-Louis Bossart +Date: Thu, 16 Jun 2016 16:01:00 +0530 +Subject: [PATCH 067/193] ALSA: pcm: conditionally avoid mmap of control data. + +In case of mmap, by default alsa-lib mmaps both control and status data. + +If driver subscribes for application pointer update, driver needs to get +notification whenever appl ptr changes. With the above case driver won't +get appl ptr notifications. + +This patch check on a hw info flag and returns error when user land asks +for mmaping control & status data, thus forcing user to issue +IOCTL_SYNC_PTR. + +Change-Id: I05b83f630812face322c474d9bbb6d56cbdc08fb +Suggested-by: Takashi Iwai +Signed-off-by: Pierre-Louis Bossart +Signed-off-by: Ramesh Babu +Signed-off-by: Jaikrishna Nemallapudi +Signed-off-by: Subhransu S. Prusty +Signed-off-by: Mallikarjun, chippalkatti +Reviewed-on: +Reviewed-by: audio_build +Reviewed-by: Sm, Bhadur A +Tested-by: Sm, Bhadur A +--- + include/uapi/sound/asound.h | 1 + + sound/core/pcm_native.c | 17 +++++++++++++++++ + 2 files changed, 18 insertions(+) + +diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h +index ffc53dd7ac44..02a3847fac5c 100644 +--- a/include/uapi/sound/asound.h ++++ b/include/uapi/sound/asound.h +@@ -297,6 +297,7 @@ typedef int __bitwise snd_pcm_subformat_t; + #define SNDRV_PCM_INFO_HAS_LINK_ABSOLUTE_ATIME 0x02000000 /* report absolute hardware link audio time, not reset on startup */ + #define SNDRV_PCM_INFO_HAS_LINK_ESTIMATED_ATIME 0x04000000 /* report estimated link audio time */ + #define SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME 0x08000000 /* report synchronized audio/system time */ ++#define SNDRV_PCM_INFO_NO_STATUS_MMAP 0x10000000 /* status and control mmap not supported */ + + #define SNDRV_PCM_INFO_DRAIN_TRIGGER 0x40000000 /* internal kernel flag - trigger in drain */ + #define SNDRV_PCM_INFO_FIFO_IN_FRAMES 0x80000000 /* internal kernel flag - FIFO size is in frames */ +diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c +index 078e4bcc01c4..22317f045bf5 100644 +--- a/sound/core/pcm_native.c ++++ b/sound/core/pcm_native.c +@@ -3498,21 +3498,38 @@ static int snd_pcm_mmap(struct file *file, struct vm_area_struct *area) + struct snd_pcm_file * pcm_file; + struct snd_pcm_substream *substream; + unsigned long offset; ++ unsigned int info; + + pcm_file = file->private_data; + substream = pcm_file->substream; + if (PCM_RUNTIME_CHECK(substream)) + return -ENXIO; ++ info = substream->runtime->hw.info; + + offset = area->vm_pgoff << PAGE_SHIFT; + switch (offset) { + case SNDRV_PCM_MMAP_OFFSET_STATUS: + if (!pcm_status_mmap_allowed(pcm_file)) + return -ENXIO; ++ /* ++ * force fallback to ioctl if driver doesn't support status ++ * and control mmap. ++ */ ++ if (info & SNDRV_PCM_INFO_NO_STATUS_MMAP) ++ return -ENXIO; ++ + return snd_pcm_mmap_status(substream, file, area); + case SNDRV_PCM_MMAP_OFFSET_CONTROL: + if (!pcm_control_mmap_allowed(pcm_file)) + return -ENXIO; ++ ++ /* ++ * force fallback to ioctl if driver doesn't support status ++ * and control mmap. ++ */ ++ if (info & SNDRV_PCM_INFO_NO_STATUS_MMAP) ++ return -ENXIO; ++ + return snd_pcm_mmap_control(substream, file, area); + default: + return snd_pcm_mmap_data(substream, file, area); +-- +2.17.1 + diff --git a/patches/0067-drm-i915-tgl-Enabling-DSC-on-Pipe-A-for-TGL.drm b/patches/0067-drm-i915-tgl-Enabling-DSC-on-Pipe-A-for-TGL.drm new file mode 100644 index 0000000000..f07bbc910f --- /dev/null +++ b/patches/0067-drm-i915-tgl-Enabling-DSC-on-Pipe-A-for-TGL.drm @@ -0,0 +1,65 @@ +From cd108a0d2d2166932dc6e90e223a6ece6e4dcc66 Mon Sep 17 00:00:00 2001 +From: Madhumitha Tolakanahalli Pradeep + +Date: Thu, 22 Aug 2019 17:46:55 -0700 +Subject: [PATCH 067/690] drm/i915/tgl: Enabling DSC on Pipe A for TGL + +DSC was not supported on Pipe A for previous platforms. Tigerlake onwards, +all the pipes support DSC. Hence, the DSC and FEC restriction on +Pipe A needs to be removed. + +v2: Changes in the logic around removing the restriction around + Pipe A (Manasi, Lucas) + +Cc: Manasi Navare +Signed-off-by: Madhumitha Tolakanahalli Pradeep +Reviewed-by: Manasi Navare +Reviewed-by: Lucas De Marchi +Signed-off-by: Manasi Navare +Link: https://patchwork.freedesktop.org/patch/msgid/20190823004655.28905-1-madhumitha.tolakanahalli.pradeep@intel.com +--- + drivers/gpu/drm/i915/display/intel_dp.c | 21 +++++++++++++++++---- + 1 file changed, 17 insertions(+), 4 deletions(-) + +diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c +index f6aefba822fb..961562176eeb 100644 +--- a/drivers/gpu/drm/i915/display/intel_dp.c ++++ b/drivers/gpu/drm/i915/display/intel_dp.c +@@ -1828,8 +1828,14 @@ static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp, + { + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + +- return INTEL_GEN(dev_priv) >= 11 && +- pipe_config->cpu_transcoder != TRANSCODER_A; ++ /* On TGL, FEC is supported on all Pipes */ ++ if (INTEL_GEN(dev_priv) >= 12) ++ return true; ++ ++ if (IS_GEN(dev_priv, 11) && pipe_config->cpu_transcoder != TRANSCODER_A) ++ return true; ++ ++ return false; + } + + static bool intel_dp_supports_fec(struct intel_dp *intel_dp, +@@ -1844,8 +1850,15 @@ static bool intel_dp_source_supports_dsc(struct intel_dp *intel_dp, + { + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + +- return INTEL_GEN(dev_priv) >= 10 && +- pipe_config->cpu_transcoder != TRANSCODER_A; ++ /* On TGL, DSC is supported on all Pipes */ ++ if (INTEL_GEN(dev_priv) >= 12) ++ return true; ++ ++ if (INTEL_GEN(dev_priv) >= 10 && ++ pipe_config->cpu_transcoder != TRANSCODER_A) ++ return true; ++ ++ return false; + } + + static bool intel_dp_supports_dsc(struct intel_dp *intel_dp, +-- +2.17.1 + diff --git a/patches/0067-net-stmmac-Add-support-to-Ethtool-get-set-rin.connectivity b/patches/0067-net-stmmac-Add-support-to-Ethtool-get-set-rin.connectivity new file mode 100644 index 0000000000..3dc2f8f488 --- /dev/null +++ b/patches/0067-net-stmmac-Add-support-to-Ethtool-get-set-rin.connectivity @@ -0,0 +1,770 @@ +From b3796a7aa2ab47edfff6ea86c2643d747f9cb9a7 Mon Sep 17 00:00:00 2001 +From: "Song, Yoong Siang" +Date: Thu, 19 Jul 2018 17:58:40 +0800 +Subject: [PATCH 067/108] net: stmmac: Add support to Ethtool get/set ring + parameters + +This patch add support to --show-ring & --set-ring Ethtool functions: +- Adding min, max, power of two check to new ring parameter's value. +- Bring down the network interface before changing the value of ring + parameters. +- Bring up the network interface after changing the value of ring + parameters. + +Signed-off-by: Song, Yoong Siang +Signed-off-by: Voon Weifeng +Signed-off-by: Ong Boon Leong +--- + .../net/ethernet/stmicro/stmmac/chain_mode.c | 7 +- + drivers/net/ethernet/stmicro/stmmac/common.h | 13 +- + .../net/ethernet/stmicro/stmmac/ring_mode.c | 2 +- + drivers/net/ethernet/stmicro/stmmac/stmmac.h | 3 + + .../ethernet/stmicro/stmmac/stmmac_ethtool.c | 29 +++ + .../net/ethernet/stmicro/stmmac/stmmac_main.c | 199 +++++++++++------- + .../stmicro/stmmac/stmmac_selftests.c | 2 +- + 7 files changed, 170 insertions(+), 85 deletions(-) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c +index 52971f5293aa..d2cdc02d9f94 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c ++++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c +@@ -46,7 +46,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum) + + while (len != 0) { + tx_q->tx_skbuff[entry] = NULL; +- entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); ++ entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); + desc = tx_q->dma_tx + entry; + + if (len > bmax) { +@@ -137,7 +137,7 @@ static void refill_desc3(void *priv_ptr, struct dma_desc *p) + */ + p->des3 = cpu_to_le32((unsigned int)(rx_q->dma_rx_phy + + (((rx_q->dirty_rx) + 1) % +- DMA_RX_SIZE) * ++ priv->dma_rx_size) * + sizeof(struct dma_desc))); + } + +@@ -154,7 +154,8 @@ static void clean_desc3(void *priv_ptr, struct dma_desc *p) + * to keep explicit chaining in the descriptor. + */ + p->des3 = cpu_to_le32((unsigned int)((tx_q->dma_tx_phy + +- ((tx_q->dirty_tx + 1) % DMA_TX_SIZE)) ++ ((tx_q->dirty_tx + 1) % ++ priv->dma_tx_size)) + * sizeof(struct dma_desc))); + } + +diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h +index a6e156205d78..f1e54d1e7969 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/common.h ++++ b/drivers/net/ethernet/stmicro/stmmac/common.h +@@ -37,9 +37,16 @@ + + #define STMMAC_CHAN0 0 /* Always supported and default for all chips */ + +-/* These need to be power of two, and >= 4 */ +-#define DMA_TX_SIZE 512 +-#define DMA_RX_SIZE 512 ++/* TX and RX Descriptor Length, these need to be power of two. ++ * TX descriptor length less than 64 may cause transmit queue timed out error. ++ * RX descriptor length less than 64 may cause inconsistent Rx chain error. ++ */ ++#define DMA_MIN_TX_SIZE 64 ++#define DMA_MAX_TX_SIZE 1024 ++#define DMA_DEFAULT_TX_SIZE 512 ++#define DMA_MIN_RX_SIZE 64 ++#define DMA_MAX_RX_SIZE 1024 ++#define DMA_DEFAULT_RX_SIZE 512 + #define STMMAC_GET_ENTRY(x, size) ((x + 1) & (size - 1)) + + #undef FRAME_FILTER_DEBUG +diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +index 14bd5e7b9875..8ad900949dc8 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c ++++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +@@ -51,7 +51,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum) + stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum, + STMMAC_RING_MODE, 0, false, skb->len); + tx_q->tx_skbuff[entry] = NULL; +- entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); ++ entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); + + if (priv->extend_desc) + desc = (struct dma_desc *)(tx_q->dma_etx + entry); +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h +index 2e1a52a9b3e1..30a547473104 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h +@@ -171,9 +171,11 @@ struct stmmac_priv { + + /* RX Queue */ + struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES]; ++ unsigned int dma_rx_size; + + /* TX Queue */ + struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES]; ++ unsigned int dma_tx_size; + + /* Generic channel for NAPI */ + struct stmmac_channel channel[STMMAC_CH_MAX]; +@@ -280,6 +282,7 @@ int stmmac_dvr_probe(struct device *device, + void stmmac_disable_eee_mode(struct stmmac_priv *priv); + bool stmmac_eee_init(struct stmmac_priv *priv); + int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt); ++int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size); + + #if IS_ENABLED(CONFIG_STMMAC_SELFTESTS) + void stmmac_selftest_run(struct net_device *dev, +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +index 54d39ad471b3..d85f91fabd1f 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +@@ -472,6 +472,33 @@ static int stmmac_nway_reset(struct net_device *dev) + return phylink_ethtool_nway_reset(priv->phylink); + } + ++static void stmmac_get_ringparam(struct net_device *netdev, ++ struct ethtool_ringparam *ring) ++{ ++ struct stmmac_priv *priv = netdev_priv(netdev); ++ ++ ring->rx_max_pending = DMA_MAX_RX_SIZE; ++ ring->tx_max_pending = DMA_MAX_TX_SIZE; ++ ring->rx_pending = priv->dma_rx_size; ++ ring->tx_pending = priv->dma_tx_size; ++} ++ ++static int stmmac_set_ringparam(struct net_device *netdev, ++ struct ethtool_ringparam *ring) ++{ ++ if (ring->rx_mini_pending || ring->rx_jumbo_pending || ++ ring->rx_pending < DMA_MIN_RX_SIZE || ++ ring->rx_pending > DMA_MAX_RX_SIZE || ++ !is_power_of_2(ring->rx_pending) || ++ ring->tx_pending < DMA_MIN_TX_SIZE || ++ ring->tx_pending > DMA_MAX_TX_SIZE || ++ !is_power_of_2(ring->tx_pending)) ++ return -EINVAL; ++ ++ return stmmac_reinit_ringparam(netdev, ring->rx_pending, ++ ring->tx_pending); ++} ++ + static void + stmmac_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +@@ -1011,6 +1038,8 @@ static const struct ethtool_ops stmmac_ethtool_ops = { + .get_regs_len = stmmac_ethtool_get_regs_len, + .get_link = ethtool_op_get_link, + .nway_reset = stmmac_nway_reset, ++ .get_ringparam = stmmac_get_ringparam, ++ .set_ringparam = stmmac_set_ringparam, + .get_pauseparam = stmmac_get_pauseparam, + .set_pauseparam = stmmac_set_pauseparam, + .self_test = stmmac_selftest_run, +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index 2e4d49db35b2..e58667cd2e96 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -64,8 +64,8 @@ static int phyaddr = -1; + module_param(phyaddr, int, 0444); + MODULE_PARM_DESC(phyaddr, "Physical device address"); + +-#define STMMAC_TX_THRESH (DMA_TX_SIZE / 4) +-#define STMMAC_RX_THRESH (DMA_RX_SIZE / 4) ++#define STMMAC_TX_THRESH(x) ((x)->dma_tx_size / 4) ++#define STMMAC_RX_THRESH(x) ((x)->dma_rx_size / 4) + + static int flow_ctrl = FLOW_AUTO; + module_param(flow_ctrl, int, 0644); +@@ -302,7 +302,8 @@ static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue) + if (tx_q->dirty_tx > tx_q->cur_tx) + avail = tx_q->dirty_tx - tx_q->cur_tx - 1; + else +- avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1; ++ avail = priv->dma_tx_size - tx_q->cur_tx + ++ tx_q->dirty_tx - 1; + + return avail; + } +@@ -320,7 +321,8 @@ static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue) + if (rx_q->dirty_rx <= rx_q->cur_rx) + dirty = rx_q->cur_rx - rx_q->dirty_rx; + else +- dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx; ++ dirty = priv->dma_rx_size - rx_q->dirty_rx + ++ rx_q->cur_rx; + + return dirty; + } +@@ -1099,7 +1101,7 @@ static void stmmac_display_rx_rings(struct stmmac_priv *priv) + head_rx = (void *)rx_q->dma_rx; + + /* Display RX ring */ +- stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true); ++ stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true); + } + } + +@@ -1122,7 +1124,7 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv) + else + head_tx = (void *)tx_q->dma_tx; + +- stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false); ++ stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false); + } + } + +@@ -1164,16 +1166,16 @@ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue) + int i; + + /* Clear the RX descriptors */ +- for (i = 0; i < DMA_RX_SIZE; i++) ++ for (i = 0; i < priv->dma_rx_size; i++) + if (priv->extend_desc) + stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, + priv->use_riwt, priv->mode, +- (i == DMA_RX_SIZE - 1), ++ (i == priv->dma_rx_size - 1), + priv->dma_buf_sz); + else + stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], + priv->use_riwt, priv->mode, +- (i == DMA_RX_SIZE - 1), ++ (i == priv->dma_rx_size - 1), + priv->dma_buf_sz); + } + +@@ -1190,18 +1192,19 @@ static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue) + int i; + + /* Clear the TX descriptors */ +- for (i = 0; i < DMA_TX_SIZE; i++) ++ for (i = 0; i < priv->dma_tx_size; i++) + if (priv->extend_desc) + stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic, + priv->mode, +- (i == DMA_TX_SIZE - 1)); ++ (i == priv->dma_tx_size - 1)); + else if (priv->enhanced_tx_desc) + stmmac_init_tx_desc(priv, &tx_q->dma_enhtx[i].basic, + priv->mode, +- (i == DMA_TX_SIZE - 1)); ++ (i == priv->dma_tx_size - 1)); + else + stmmac_init_tx_desc(priv, &tx_q->dma_tx[i], +- priv->mode, (i == DMA_TX_SIZE - 1)); ++ priv->mode, ++ (i == priv->dma_tx_size - 1)); + } + + /** +@@ -1354,7 +1357,7 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) + + stmmac_clear_rx_descriptors(priv, queue); + +- for (i = 0; i < DMA_RX_SIZE; i++) { ++ for (i = 0; i < priv->dma_rx_size; i++) { + struct dma_desc *p; + + if (priv->extend_desc) +@@ -1369,16 +1372,18 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) + } + + rx_q->cur_rx = 0; +- rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE); ++ rx_q->dirty_rx = (unsigned int)(i - priv->dma_rx_size); + + /* Setup the chained descriptor addresses */ + if (priv->mode == STMMAC_CHAIN_MODE) { + if (priv->extend_desc) + stmmac_mode_init(priv, rx_q->dma_erx, +- rx_q->dma_rx_phy, DMA_RX_SIZE, 1); ++ rx_q->dma_rx_phy, ++ priv->dma_rx_size, 1); + else + stmmac_mode_init(priv, rx_q->dma_rx, +- rx_q->dma_rx_phy, DMA_RX_SIZE, 0); ++ rx_q->dma_rx_phy, ++ priv->dma_rx_size, 0); + } + } + +@@ -1394,7 +1399,7 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) + if (queue == 0) + break; + +- i = DMA_RX_SIZE; ++ i = priv->dma_rx_size; + queue--; + } + +@@ -1427,17 +1432,18 @@ static int init_dma_tx_desc_rings(struct net_device *dev) + if (priv->extend_desc) + stmmac_mode_init(priv, tx_q->dma_etx, + tx_q->dma_tx_phy, +- DMA_TX_SIZE, 1); ++ priv->dma_tx_size, 1); + else if (priv->enhanced_tx_desc) + stmmac_mode_init(priv, tx_q->dma_enhtx, + tx_q->dma_tx_phy, +- DMA_TX_SIZE, 1); ++ priv->dma_tx_size, 1); + else + stmmac_mode_init(priv, tx_q->dma_tx, +- tx_q->dma_tx_phy, DMA_TX_SIZE, 0); ++ tx_q->dma_tx_phy, ++ priv->dma_tx_size, 0); + } + +- for (i = 0; i < DMA_TX_SIZE; i++) { ++ for (i = 0; i < priv->dma_tx_size; i++) { + struct dma_desc *p; + if (priv->extend_desc) + p = &((tx_q->dma_etx + i)->basic); +@@ -1501,7 +1507,7 @@ static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue) + { + int i; + +- for (i = 0; i < DMA_RX_SIZE; i++) ++ for (i = 0; i < priv->dma_rx_size; i++) + stmmac_free_rx_buffer(priv, queue, i); + } + +@@ -1514,7 +1520,7 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue) + { + int i; + +- for (i = 0; i < DMA_TX_SIZE; i++) ++ for (i = 0; i < priv->dma_tx_size; i++) + stmmac_free_tx_buffer(priv, queue, i); + } + +@@ -1536,11 +1542,11 @@ static void free_dma_rx_desc_resources(struct stmmac_priv *priv) + + /* Free DMA regions of consistent memory previously allocated */ + if (!priv->extend_desc) +- dma_free_coherent(priv->device, +- DMA_RX_SIZE * sizeof(struct dma_desc), ++ dma_free_coherent(priv->device, priv->dma_rx_size * ++ sizeof(struct dma_desc), + rx_q->dma_rx, rx_q->dma_rx_phy); + else +- dma_free_coherent(priv->device, DMA_RX_SIZE * ++ dma_free_coherent(priv->device, priv->dma_rx_size * + sizeof(struct dma_extended_desc), + rx_q->dma_erx, rx_q->dma_rx_phy); + +@@ -1570,15 +1576,15 @@ static void free_dma_tx_desc_resources(struct stmmac_priv *priv) + + /* Free DMA regions of consistent memory previously allocated */ + if (priv->extend_desc) +- dma_free_coherent(priv->device, DMA_TX_SIZE * ++ dma_free_coherent(priv->device, priv->dma_tx_size * + sizeof(struct dma_extended_desc), + tx_q->dma_etx, tx_q->dma_tx_phy); + else if (priv->enhanced_tx_desc) +- dma_free_coherent(priv->device, DMA_TX_SIZE * ++ dma_free_coherent(priv->device, priv->dma_tx_size * + sizeof(struct dma_enhanced_tx_desc), + tx_q->dma_enhtx, tx_q->dma_tx_phy); + else +- dma_free_coherent(priv->device, DMA_TX_SIZE * ++ dma_free_coherent(priv->device, priv->dma_tx_size * + sizeof(struct dma_desc), + tx_q->dma_tx, tx_q->dma_tx_phy); + +@@ -1611,7 +1617,7 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) + rx_q->priv_data = priv; + + pp_params.flags = PP_FLAG_DMA_MAP; +- pp_params.pool_size = DMA_RX_SIZE; ++ pp_params.pool_size = priv->dma_rx_size; + num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE); + pp_params.order = ilog2(num_pages); + pp_params.nid = dev_to_node(priv->device); +@@ -1625,24 +1631,29 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) + goto err_dma; + } + +- rx_q->buf_pool = kcalloc(DMA_RX_SIZE, sizeof(*rx_q->buf_pool), ++ rx_q->buf_pool = kcalloc(priv->dma_rx_size, ++ sizeof(*rx_q->buf_pool), + GFP_KERNEL); + if (!rx_q->buf_pool) + goto err_dma; + + if (priv->extend_desc) { + rx_q->dma_erx = dma_alloc_coherent(priv->device, +- DMA_RX_SIZE * sizeof(struct dma_extended_desc), +- &rx_q->dma_rx_phy, +- GFP_KERNEL); ++ priv->dma_rx_size * ++ sizeof(struct ++ dma_extended_desc), ++ &rx_q->dma_rx_phy, ++ GFP_KERNEL); + if (!rx_q->dma_erx) + goto err_dma; + + } else { + rx_q->dma_rx = dma_alloc_coherent(priv->device, +- DMA_RX_SIZE * sizeof(struct dma_desc), +- &rx_q->dma_rx_phy, +- GFP_KERNEL); ++ priv->dma_rx_size * ++ sizeof(struct ++ dma_desc), ++ &rx_q->dma_rx_phy, ++ GFP_KERNEL); + if (!rx_q->dma_rx) + goto err_dma; + } +@@ -1677,13 +1688,13 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) + tx_q->queue_index = queue; + tx_q->priv_data = priv; + +- tx_q->tx_skbuff_dma = kcalloc(DMA_TX_SIZE, ++ tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size, + sizeof(*tx_q->tx_skbuff_dma), + GFP_KERNEL); + if (!tx_q->tx_skbuff_dma) + goto err_dma; + +- tx_q->tx_skbuff = kcalloc(DMA_TX_SIZE, ++ tx_q->tx_skbuff = kcalloc(priv->dma_tx_size, + sizeof(struct sk_buff *), + GFP_KERNEL); + if (!tx_q->tx_skbuff) +@@ -1691,14 +1702,16 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) + + if (priv->extend_desc) { + tx_q->dma_etx = dma_alloc_coherent(priv->device, +- DMA_TX_SIZE * sizeof(struct dma_extended_desc), +- &tx_q->dma_tx_phy, +- GFP_KERNEL); ++ priv->dma_tx_size * ++ sizeof(struct ++ dma_extended_desc), ++ &tx_q->dma_tx_phy, ++ GFP_KERNEL); + if (!tx_q->dma_etx) + goto err_dma; + } else if (priv->enhanced_tx_desc) { + tx_q->dma_enhtx = dma_alloc_coherent(priv->device, +- DMA_TX_SIZE * ++ priv->dma_tx_size * + sizeof(struct + dma_enhanced_tx_desc), + &tx_q->dma_tx_phy, +@@ -1707,9 +1720,11 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) + goto err_dma; + } else { + tx_q->dma_tx = dma_alloc_coherent(priv->device, +- DMA_TX_SIZE * sizeof(struct dma_desc), +- &tx_q->dma_tx_phy, +- GFP_KERNEL); ++ priv->dma_tx_size * ++ sizeof(struct ++ dma_desc), ++ &tx_q->dma_tx_phy, ++ GFP_KERNEL); + if (!tx_q->dma_tx) + goto err_dma; + } +@@ -2040,7 +2055,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) + + stmmac_release_tx_desc(priv, p, priv->mode); + +- entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); ++ entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); + } + tx_q->dirty_tx = entry; + +@@ -2049,7 +2064,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) + + if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev, + queue))) && +- stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) { ++ stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) { + + netif_dbg(priv, tx_done, priv->dev, + "%s: restart transmit\n", __func__); +@@ -2086,18 +2101,19 @@ static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) + + stmmac_stop_tx_dma(priv, chan); + dma_free_tx_skbufs(priv, chan); +- for (i = 0; i < DMA_TX_SIZE; i++) ++ for (i = 0; i < priv->dma_tx_size; i++) + if (priv->extend_desc) + stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic, + priv->mode, +- (i == DMA_TX_SIZE - 1)); ++ (i == priv->dma_tx_size - 1)); + else if (priv->enhanced_tx_desc) + stmmac_init_tx_desc(priv, &tx_q->dma_enhtx[i].basic, + priv->mode, +- (i == DMA_TX_SIZE - 1)); ++ (i == priv->dma_tx_size - 1)); + else + stmmac_init_tx_desc(priv, &tx_q->dma_tx[i], +- priv->mode, (i == DMA_TX_SIZE - 1)); ++ priv->mode, ++ (i == priv->dma_tx_size - 1)); + tx_q->dirty_tx = 0; + tx_q->cur_tx = 0; + tx_q->mss = 0; +@@ -2325,7 +2341,8 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) + rx_q->dma_rx_phy, chan); + + rx_q->rx_tail_addr = rx_q->dma_rx_phy + +- (DMA_RX_SIZE * sizeof(struct dma_desc)); ++ (priv->dma_rx_size * ++ sizeof(struct dma_desc)); + stmmac_set_rx_tail_ptr(priv, priv->ioaddr, + rx_q->rx_tail_addr, chan); + } +@@ -2409,12 +2426,14 @@ static void stmmac_set_rings_length(struct stmmac_priv *priv) + /* set TX ring length */ + for (chan = 0; chan < tx_channels_count; chan++) + stmmac_set_tx_ring_len(priv, priv->ioaddr, +- (DMA_TX_SIZE - 1), chan); ++ (priv->dma_tx_size - 1), ++ chan); + + /* set RX ring length */ + for (chan = 0; chan < rx_channels_count; chan++) + stmmac_set_rx_ring_len(priv, priv->ioaddr, +- (DMA_RX_SIZE - 1), chan); ++ (priv->dma_rx_size - 1), ++ chan); + } + + /** +@@ -2850,7 +2869,6 @@ static void stmmac_free_irq(struct net_device *dev, + if (priv->rx_irq[j] > 0) + free_irq(priv->rx_irq[j], &priv->rx_queue[j]); + } +- + if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) + free_irq(priv->sfty_ue_irq, dev); + /* fall through */ +@@ -3094,6 +3112,11 @@ static int stmmac_open(struct net_device *dev) + priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); + priv->rx_copybreak = STMMAC_RX_COPYBREAK; + ++ if (!priv->dma_rx_size) ++ priv->dma_rx_size = DMA_DEFAULT_RX_SIZE; ++ if (!priv->dma_tx_size) ++ priv->dma_tx_size = DMA_DEFAULT_TX_SIZE; ++ + ret = alloc_dma_desc_resources(priv); + if (ret < 0) { + netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n", +@@ -3240,7 +3263,7 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb, + return false; + + stmmac_set_tx_owner(priv, p); +- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); ++ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size); + return true; + } + +@@ -3268,7 +3291,8 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des, + while (tmp_len > 0) { + dma_addr_t curr_addr; + +- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); ++ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, ++ priv->dma_tx_size); + WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); + /* TSO is not available in DWMAC v3.5 */ + if (priv->enhanced_tx_desc) +@@ -3368,7 +3392,8 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) + mss_desc = tx_q->dma_tx + tx_q->cur_tx; + stmmac_set_mss(priv, mss_desc, mss); + tx_q->mss = mss; +- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); ++ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, ++ priv->dma_tx_size); + WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); + } + +@@ -3448,7 +3473,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) + * ndo_start_xmit will fill this descriptor the next time it's + * called and stmmac_tx_clean may clean up to this descriptor. + */ +- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); ++ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size); + + if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { + netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", +@@ -3514,15 +3539,16 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) + __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, + tx_q->cur_tx, first, nfrags); + +- stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0); ++ stmmac_display_ring(priv, (void *)tx_q->dma_tx, ++ priv->dma_tx_size, 0); + + /* TSO is not available in DWMAC v3.5 */ + if (priv->enhanced_tx_desc) + stmmac_display_ring(priv, (void *)tx_q->dma_enhtx, +- DMA_TX_SIZE, 0); ++ priv->dma_tx_size, 0); + else + stmmac_display_ring(priv, (void *)tx_q->dma_tx, +- DMA_TX_SIZE, 0); ++ priv->dma_tx_size, 0); + pr_info(">>> frame to be transmitted: "); + print_pkt(skb->data, skb_headlen(skb)); + } +@@ -3647,7 +3673,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) + int len = skb_frag_size(frag); + bool last_segment = (i == (nfrags - 1)); + +- entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); ++ entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); + WARN_ON(tx_q->tx_skbuff[entry]); + + if (likely(priv->extend_desc)) +@@ -3683,7 +3709,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) + * ndo_start_xmit will fill this descriptor the next time it's + * called and stmmac_tx_clean may clean up to this descriptor. + */ +- entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); ++ entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); + tx_q->cur_tx = entry; + + if (netif_msg_pktdata(priv)) { +@@ -3701,7 +3727,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) + else + tx_head = (void *)tx_q->dma_tx; + +- stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false); ++ stmmac_display_ring(priv, tx_head, priv->dma_tx_size, false); + + netdev_dbg(priv->dev, ">>> frame to be transmitted: "); + print_pkt(skb->data, skb->len); +@@ -3834,7 +3860,7 @@ static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) + + static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q) + { +- if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH) ++ if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH(rx_q->priv_data)) + return 0; + + return 1; +@@ -3904,7 +3930,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) + dma_wmb(); + stmmac_set_rx_owner(priv, p, use_rx_wd); + last_refill = entry; +- entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE); ++ entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size); + } + if (last_refill != entry) { + rx_q->dirty_rx = entry; +@@ -3941,7 +3967,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) + else + rx_head = (void *)rx_q->dma_rx; + +- stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true); ++ stmmac_display_ring(priv, rx_head, ++ priv->dma_rx_size, true); + } + while (count < limit) { + unsigned int hlen = 0, prev_len = 0; +@@ -3986,7 +4013,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) + + count++; + +- rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE); ++ rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, ++ priv->dma_rx_size); + next_entry = rx_q->cur_rx; + + if (priv->extend_desc) +@@ -4201,7 +4229,7 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) + + priv->xstats.napi_poll++; + +- work_done = stmmac_tx_clean(priv, DMA_TX_SIZE, chan); ++ work_done = stmmac_tx_clean(priv, priv->dma_tx_size, chan); + work_done = min(work_done, budget); + + if (work_done < budget) +@@ -4744,11 +4772,11 @@ static int stmmac_rings_status_show(struct seq_file *seq, void *v) + if (priv->extend_desc) { + seq_printf(seq, "Extended descriptor ring:\n"); + sysfs_display_ring((void *)rx_q->dma_erx, +- DMA_RX_SIZE, 1, seq); ++ priv->dma_rx_size, 1, seq); + } else { + seq_printf(seq, "Descriptor ring:\n"); + sysfs_display_ring((void *)rx_q->dma_rx, +- DMA_RX_SIZE, 0, seq); ++ priv->dma_rx_size, 0, seq); + } + } + +@@ -4760,15 +4788,15 @@ static int stmmac_rings_status_show(struct seq_file *seq, void *v) + if (priv->extend_desc) { + seq_printf(seq, "Extended descriptor ring:\n"); + sysfs_display_ring((void *)tx_q->dma_etx, +- DMA_TX_SIZE, 1, seq); ++ priv->dma_tx_size, 1, seq); + } else if (priv->enhanced_tx_desc) { + seq_printf(seq, "Enhanced descriptor ring:\n"); + sysfs_display_ring((void *)tx_q->dma_enhtx, +- DMA_TX_SIZE, 2, seq); ++ priv->dma_tx_size, 2, seq); + } else { + seq_printf(seq, "Descriptor ring:\n"); + sysfs_display_ring((void *)tx_q->dma_tx, +- DMA_TX_SIZE, 0, seq); ++ priv->dma_tx_size, 0, seq); + } + } + +@@ -5168,6 +5196,23 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt) + return ret; + } + ++int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size) ++{ ++ struct stmmac_priv *priv = netdev_priv(dev); ++ int ret = 0; ++ ++ if (netif_running(dev)) ++ stmmac_release(dev); ++ ++ priv->dma_rx_size = rx_size; ++ priv->dma_tx_size = tx_size; ++ ++ if (netif_running(dev)) ++ ret = stmmac_open(dev); ++ ++ return ret; ++} ++ + /** + * stmmac_dvr_probe + * @device: device pointer +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c +index e4ac3c401432..07f6b1cae578 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c +@@ -732,7 +732,7 @@ static int stmmac_test_flowctrl(struct stmmac_priv *priv) + u32 tail; + + tail = priv->rx_queue[i].dma_rx_phy + +- (DMA_RX_SIZE * sizeof(struct dma_desc)); ++ (priv->dma_rx_size * sizeof(struct dma_desc)); + + stmmac_set_rx_tail_ptr(priv, priv->ioaddr, tail, i); + stmmac_start_rx(priv, priv->ioaddr, i); +-- +2.17.1 + diff --git a/patches/0067-sos-vhm-add-hcall_write_protect_page-hypercall.acrn b/patches/0067-sos-vhm-add-hcall_write_protect_page-hypercall.acrn new file mode 100644 index 0000000000..7a6cfdb21f --- /dev/null +++ b/patches/0067-sos-vhm-add-hcall_write_protect_page-hypercall.acrn @@ -0,0 +1,159 @@ +From b57fca1b60e97a78361e2026e2a9fe717aa3484a Mon Sep 17 00:00:00 2001 +From: "Li, Fei1" +Date: Fri, 31 Aug 2018 10:59:02 +0800 +Subject: [PATCH 067/150] sos: vhm: add hcall_write_protect_page hypercall + +1. add write_protect_page to set or unset one page write protect +2. replace update_memmap_attr with write_protect_page to set or +unset one page write protect +3. replace update_memmap_attr with set_mmio_map to add guest memory region + +Signed-off-by: Li, Fei1 +--- + drivers/vhm/vhm_hypercall.c | 5 +++++ + drivers/vhm/vhm_mm.c | 25 ++++++++++++++++++++----- + include/linux/vhm/acrn_hv_defs.h | 14 ++++++++++++++ + include/linux/vhm/acrn_vhm_mm.h | 23 +++++------------------ + include/linux/vhm/vhm_hypercall.h | 2 ++ + 5 files changed, 46 insertions(+), 23 deletions(-) + +diff --git a/drivers/vhm/vhm_hypercall.c b/drivers/vhm/vhm_hypercall.c +index 639ea60472a7..4ac7e2c5624a 100644 +--- a/drivers/vhm/vhm_hypercall.c ++++ b/drivers/vhm/vhm_hypercall.c +@@ -107,6 +107,11 @@ inline long hcall_set_memmaps(unsigned long pa_memmaps) + return acrn_hypercall1(HC_VM_SET_MEMMAPS, pa_memmaps); + } + ++inline long hcall_write_protect_page(unsigned long vmid, unsigned long wp) ++{ ++ return acrn_hypercall2(HC_VM_WRITE_PROTECT_PAGE, vmid, wp); ++} ++ + inline long hcall_set_ioreq_buffer(unsigned long vmid, unsigned long buffer) + { + return acrn_hypercall2(HC_SET_IOREQ_BUFFER, vmid, buffer); +diff --git a/drivers/vhm/vhm_mm.c b/drivers/vhm/vhm_mm.c +index f663558ae943..c7ca8e99612d 100644 +--- a/drivers/vhm/vhm_mm.c ++++ b/drivers/vhm/vhm_mm.c +@@ -159,12 +159,27 @@ int set_memmaps(struct set_memmaps *memmaps) + return 0; + } + +-int update_memmap_attr(unsigned long vmid, unsigned long guest_gpa, +- unsigned long host_gpa, unsigned long len, +- unsigned int mem_type, unsigned int mem_access_right) ++/* ++ * when set is true, set page write protection, ++ * else clear page write protection. ++ */ ++int write_protect_page(unsigned long vmid, ++ unsigned long gpa, unsigned char set) + { +- return _mem_set_memmap(vmid, guest_gpa, host_gpa, len, +- mem_type, mem_access_right, MAP_MEM); ++ struct wp_data wp; ++ ++ wp.set = set; ++ wp.gpa = gpa; ++ ++ if (hcall_write_protect_page(vmid, ++ virt_to_phys(&wp)) < 0) { ++ pr_err("vhm: vm[%ld] %s failed !\n", vmid, __func__); ++ return -EFAULT; ++ } ++ ++ pr_debug("VHM: %s, gpa: 0x%lx, set: %d\n", __func__, gpa, set); ++ ++ return 0; + } + + int map_guest_memseg(struct vhm_vm *vm, struct vm_memmap *memmap) +diff --git a/include/linux/vhm/acrn_hv_defs.h b/include/linux/vhm/acrn_hv_defs.h +index 0e2586f9d955..80d494e168f9 100644 +--- a/include/linux/vhm/acrn_hv_defs.h ++++ b/include/linux/vhm/acrn_hv_defs.h +@@ -96,6 +96,7 @@ + #define HC_VM_SET_MEMMAP _HC_ID(HC_ID, HC_ID_MEM_BASE + 0x00) + #define HC_VM_GPA2HPA _HC_ID(HC_ID, HC_ID_MEM_BASE + 0x01) + #define HC_VM_SET_MEMMAPS _HC_ID(HC_ID, HC_ID_MEM_BASE + 0x02) ++#define HC_VM_WRITE_PROTECT_PAGE _HC_ID(HC_ID, HC_ID_MEM_BASE + 0x03) + + /* PCI assignment*/ + #define HC_ID_PCI_BASE 0x50UL +@@ -185,6 +186,19 @@ struct set_memmaps { + uint64_t memmaps_gpa; + } __attribute__((aligned(8))); + ++struct wp_data { ++ /** set page write protect permission. ++ * ture: set the wp; flase: clear the wp ++ */ ++ uint8_t set; ++ ++ /** Reserved */ ++ uint64_t pad:56; ++ ++ /** the guest physical address of the page to change */ ++ uint64_t gpa; ++} __aligned(8); ++ + struct sbuf_setup_param { + uint16_t pcpu_id; + uint16_t reserved; +diff --git a/include/linux/vhm/acrn_vhm_mm.h b/include/linux/vhm/acrn_vhm_mm.h +index 645a8a56531e..0769200ea3bf 100644 +--- a/include/linux/vhm/acrn_vhm_mm.h ++++ b/include/linux/vhm/acrn_vhm_mm.h +@@ -137,29 +137,16 @@ int unset_mmio_map(unsigned long vmid, unsigned long guest_gpa, + unsigned long host_gpa, unsigned long len); + + /** +- * update_memmap_attr - update mmio EPT mapping between UOS gpa and SOS gpa ++ * write_protect_page - change one page write protection + * + * @vmid: guest vmid +- * @guest_gpa: gpa of UOS +- * @host_gpa: gpa of SOS +- * @len: memory mapped length +- * @mem_type: memory mapping type. Possible value could be: +- * MEM_TYPE_WB +- * MEM_TYPE_WT +- * MEM_TYPE_UC +- * MEM_TYPE_WC +- * MEM_TYPE_WP +- * @mem_access_right: memory mapping access. Possible value could be: +- * MEM_ACCESS_READ +- * MEM_ACCESS_WRITE +- * MEM_ACCESS_EXEC +- * MEM_ACCESS_RWX ++ * @gpa: gpa in guest vmid ++ * @set: set or clear page write protection + * + * Return: 0 on success, <0 for error. + */ +-int update_memmap_attr(unsigned long vmid, unsigned long guest_gpa, +- unsigned long host_gpa, unsigned long len, +- unsigned int mem_type, unsigned int mem_access_right); ++int write_protect_page(unsigned long vmid, ++ unsigned long gpa, unsigned char set); + + int vhm_dev_mmap(struct file *file, struct vm_area_struct *vma); + +diff --git a/include/linux/vhm/vhm_hypercall.h b/include/linux/vhm/vhm_hypercall.h +index 5d2dc5a7a1af..eeb1b7c54db2 100644 +--- a/include/linux/vhm/vhm_hypercall.h ++++ b/include/linux/vhm/vhm_hypercall.h +@@ -150,6 +150,8 @@ inline long hcall_get_cpu_state(unsigned long cmd, unsigned long state_pa); + inline long hcall_set_memmap(unsigned long vmid, + unsigned long memmap); + inline long hcall_set_memmaps(unsigned long pa_memmaps); ++inline long hcall_write_protect_page(unsigned long vmid, ++ unsigned long wp); + inline long hcall_set_ioreq_buffer(unsigned long vmid, + unsigned long buffer); + inline long hcall_notify_req_finish(unsigned long vmid, unsigned long vcpu); +-- +2.17.1 + diff --git a/patches/0068-ALSA-hda-ext-add-spib-to-stream-context.audio b/patches/0068-ALSA-hda-ext-add-spib-to-stream-context.audio new file mode 100644 index 0000000000..e888e16372 --- /dev/null +++ b/patches/0068-ALSA-hda-ext-add-spib-to-stream-context.audio @@ -0,0 +1,53 @@ +From 8fd7b1e2720254103a418b6cf7d78f746ecfb349 Mon Sep 17 00:00:00 2001 +From: Ramesh Babu +Date: Mon, 6 Jun 2016 13:13:15 +0530 +Subject: [PATCH 068/193] ALSA: hda: ext: add spib to stream context. + +Platforms like skylake support SPIB (software position index in +Buffer) capability, through which application pointer can be +programmed in DMA. This helps DMA stop rendering stale data. + +This patch saves spib values in stream context which can be +restored during resume from S3. + +Change-Id: I2992242087ee0732b6fc571b5e65eb59aa1fa251 +Signed-off-by: Ramesh Babu +Signed-off-by: Subhransu S. Prusty +Signed-off-by: Mallikarjun, chippalkatti +Reviewed-on: +Reviewed-by: audio_build +Reviewed-by: Sm, Bhadur A +Tested-by: Sm, Bhadur A +--- + include/sound/hdaudio_ext.h | 1 + + sound/hda/ext/hdac_ext_stream.c | 2 ++ + 2 files changed, 3 insertions(+) + +diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h +index 3e16c23a9221..d2949bb176f4 100644 +--- a/include/sound/hdaudio_ext.h ++++ b/include/sound/hdaudio_ext.h +@@ -66,6 +66,7 @@ struct hdac_ext_stream { + + u32 dpib; + u32 lpib; ++ u32 spib; + bool decoupled:1; + bool link_locked:1; + bool link_prepared; +diff --git a/sound/hda/ext/hdac_ext_stream.c b/sound/hda/ext/hdac_ext_stream.c +index de3369c666cd..eea0959ab752 100644 +--- a/sound/hda/ext/hdac_ext_stream.c ++++ b/sound/hda/ext/hdac_ext_stream.c +@@ -469,6 +469,8 @@ int snd_hdac_ext_stream_set_spib(struct hdac_bus *bus, + } + + writel(value, stream->spib_addr); ++ /* save the value in stream context */ ++ stream->spib = value; + + return 0; + } +-- +2.17.1 + diff --git a/patches/0068-drm-Stop-including-drm_bridge.h-from-drm_crtc.h.drm b/patches/0068-drm-Stop-including-drm_bridge.h-from-drm_crtc.h.drm new file mode 100644 index 0000000000..12614b7dc2 --- /dev/null +++ b/patches/0068-drm-Stop-including-drm_bridge.h-from-drm_crtc.h.drm @@ -0,0 +1,699 @@ +From 8b9f9c7bcf2d2b99531e7219c4362b4065c5178c Mon Sep 17 00:00:00 2001 +From: Boris Brezillon +Date: Mon, 26 Aug 2019 17:26:29 +0200 +Subject: [PATCH 068/690] drm: Stop including drm_bridge.h from drm_crtc.h + +We are about to add a drm_bridge_state that inherits from +drm_private_state which is defined in drm_atomic.h. Problem is, +drm_atomic.h includes drm_crtc.h which in turn includes drm_bridge.h, +leading to "drm_private_state has incomplete type" error. + +Let's force all users of the drm_bridge API to explicitly include +drm_bridge.h. + +Signed-off-by: Boris Brezillon +Reviewed-by: Sam Ravnborg +Link: https://patchwork.freedesktop.org/patch/msgid/20190826152649.13820-2-boris.brezillon@collabora.com +--- + drivers/gpu/drm/arc/arcpgu_hdmi.c | 1 + + drivers/gpu/drm/bridge/analogix-anx78xx.c | 1 + + drivers/gpu/drm/bridge/analogix/analogix_dp_core.c | 1 + + drivers/gpu/drm/bridge/dumb-vga-dac.c | 1 + + drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c | 1 + + drivers/gpu/drm/bridge/nxp-ptn3460.c | 1 + + drivers/gpu/drm/bridge/panel.c | 1 + + drivers/gpu/drm/bridge/parade-ps8622.c | 1 + + drivers/gpu/drm/bridge/sii902x.c | 1 + + drivers/gpu/drm/bridge/sii9234.c | 1 + + drivers/gpu/drm/bridge/sil-sii8620.c | 1 + + drivers/gpu/drm/bridge/synopsys/dw-hdmi.c | 1 + + drivers/gpu/drm/bridge/tc358764.c | 1 + + drivers/gpu/drm/bridge/tc358767.c | 1 + + drivers/gpu/drm/bridge/ti-sn65dsi86.c | 1 + + drivers/gpu/drm/bridge/ti-tfp410.c | 1 + + drivers/gpu/drm/drm_atomic_helper.c | 1 + + drivers/gpu/drm/drm_crtc_helper.c | 1 + + drivers/gpu/drm/drm_encoder.c | 1 + + drivers/gpu/drm/drm_probe_helper.c | 1 + + drivers/gpu/drm/drm_simple_kms_helper.c | 1 + + drivers/gpu/drm/exynos/exynos_dp.c | 1 + + drivers/gpu/drm/exynos/exynos_drm_dsi.c | 1 + + drivers/gpu/drm/exynos/exynos_drm_mic.c | 1 + + drivers/gpu/drm/exynos/exynos_hdmi.c | 1 + + drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c | 1 + + drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c | 1 + + drivers/gpu/drm/i2c/tda998x_drv.c | 1 + + drivers/gpu/drm/imx/imx-ldb.c | 1 + + drivers/gpu/drm/imx/parallel-display.c | 1 + + drivers/gpu/drm/ingenic/ingenic-drm.c | 1 + + drivers/gpu/drm/mediatek/mtk_dpi.c | 1 + + drivers/gpu/drm/mediatek/mtk_dsi.c | 1 + + drivers/gpu/drm/mediatek/mtk_hdmi.c | 1 + + drivers/gpu/drm/msm/dsi/dsi.h | 1 + + drivers/gpu/drm/msm/edp/edp.h | 1 + + drivers/gpu/drm/msm/hdmi/hdmi.h | 2 ++ + drivers/gpu/drm/omapdrm/dss/output.c | 1 + + drivers/gpu/drm/omapdrm/omap_drv.c | 1 + + drivers/gpu/drm/omapdrm/omap_encoder.c | 1 + + drivers/gpu/drm/rcar-du/rcar_du_encoder.c | 1 + + drivers/gpu/drm/rockchip/rockchip_lvds.c | 1 + + drivers/gpu/drm/rockchip/rockchip_rgb.c | 1 + + drivers/gpu/drm/sti/sti_dvo.c | 1 + + drivers/gpu/drm/sti/sti_hda.c | 1 + + drivers/gpu/drm/sti/sti_hdmi.c | 1 + + drivers/gpu/drm/sun4i/sun4i_lvds.c | 1 + + drivers/gpu/drm/sun4i/sun4i_rgb.c | 1 + + drivers/gpu/drm/sun4i/sun4i_tcon.c | 1 + + drivers/gpu/drm/tilcdc/tilcdc_external.c | 1 + + drivers/gpu/drm/vc4/vc4_dsi.c | 1 + + include/drm/drm_crtc.h | 1 - + 52 files changed, 52 insertions(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/arc/arcpgu_hdmi.c b/drivers/gpu/drm/arc/arcpgu_hdmi.c +index 98aac743cc26..8fd7094beece 100644 +--- a/drivers/gpu/drm/arc/arcpgu_hdmi.c ++++ b/drivers/gpu/drm/arc/arcpgu_hdmi.c +@@ -5,6 +5,7 @@ + * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com) + */ + ++#include + #include + #include + #include +diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix-anx78xx.c +index 3c7cc5af735c..e3f4fd2a5ad4 100644 +--- a/drivers/gpu/drm/bridge/analogix-anx78xx.c ++++ b/drivers/gpu/drm/bridge/analogix-anx78xx.c +@@ -19,6 +19,7 @@ + #include + + #include ++#include + #include + #include + #include +diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c +index 22885dceaa17..bb411fe52ae8 100644 +--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c ++++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c +@@ -21,6 +21,7 @@ + #include + #include + #include ++#include + #include + #include + #include +diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c +index 7aa789c35882..cc33dc411b9e 100644 +--- a/drivers/gpu/drm/bridge/dumb-vga-dac.c ++++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c +@@ -12,6 +12,7 @@ + #include + + #include ++#include + #include + #include + #include +diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c +index 6e81e5db57f2..e8a49f6146c6 100644 +--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c ++++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c +@@ -25,6 +25,7 @@ + + #include + #include ++#include + #include + #include + #include +diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c +index d4a1cc5052c3..57ff01339559 100644 +--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c ++++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c +@@ -11,6 +11,7 @@ + #include + #include + #include ++#include + #include + #include + #include +diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c +index b12ae3a4c5f1..6cffeb4a42f2 100644 +--- a/drivers/gpu/drm/bridge/panel.c ++++ b/drivers/gpu/drm/bridge/panel.c +@@ -5,6 +5,7 @@ + */ + + #include ++#include + #include + #include + #include +diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c +index 93c68e2e9484..b7a72dfdcac3 100644 +--- a/drivers/gpu/drm/bridge/parade-ps8622.c ++++ b/drivers/gpu/drm/bridge/parade-ps8622.c +@@ -17,6 +17,7 @@ + #include + + #include ++#include + #include + #include + #include +diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c +index 38f75ac580df..b70e8c5cf2e1 100644 +--- a/drivers/gpu/drm/bridge/sii902x.c ++++ b/drivers/gpu/drm/bridge/sii902x.c +@@ -20,6 +20,7 @@ + #include + + #include ++#include + #include + #include + #include +diff --git a/drivers/gpu/drm/bridge/sii9234.c b/drivers/gpu/drm/bridge/sii9234.c +index 25d4ad8c7ad6..ad00d841ed9e 100644 +--- a/drivers/gpu/drm/bridge/sii9234.c ++++ b/drivers/gpu/drm/bridge/sii9234.c +@@ -13,6 +13,7 @@ + * Dharam Kumar + */ + #include ++#include + #include + #include + +diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c +index bd3165ee5354..14643923a721 100644 +--- a/drivers/gpu/drm/bridge/sil-sii8620.c ++++ b/drivers/gpu/drm/bridge/sil-sii8620.c +@@ -9,6 +9,7 @@ + #include + + #include ++#include + #include + #include + #include +diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +index 521d689413c8..16a21460f481 100644 +--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c ++++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +@@ -26,6 +26,7 @@ + + #include + #include ++#include + #include + #include + #include +diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c +index 170f162ffa55..db298f550a5a 100644 +--- a/drivers/gpu/drm/bridge/tc358764.c ++++ b/drivers/gpu/drm/bridge/tc358764.c +@@ -16,6 +16,7 @@ + #include